diff --git "a/3997.jsonl" "b/3997.jsonl" new file mode 100644--- /dev/null +++ "b/3997.jsonl" @@ -0,0 +1,654 @@ +{"seq_id":"239525647","text":"__author__ = 'alexandre'\nimport time\nimport RPi.GPIO as io\nio.setmode(io.BOARD)\nio.setup(7, io.IN)\n\ndef motion_detection():\n try:\n while True:\n time.sleep(2)\n if (io.input (7) == 1):\n print(\"Motion\")\n else:\n print(\"No motion\")\n except KeyboardInterrupt:\n io.cleanup()\n exit()\n\nmotion_detection()","sub_path":"basic/cir_motion_sensor.py","file_name":"cir_motion_sensor.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"480741535","text":"# Setting\nimport os\nimport re\nimport numpy as np\nimport pandas as pd\nfrom konlpy.tag import Twitter\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.cluster import KMeans\n\n\n\ndef load_data(file_path):\n # 데이터 로드 및 간단한 전처리\n path = \"C:/Users/YY/Desktop/TB/Week05/NLP/\"\n file = pd.read_csv(os.path.join(path, file_path), encoding='utf-8', index_col=0)\n\n # 필요 없는 칼럼 삭제 및 이름 변경\n file.drop(['from', 'Date'], axis=1, inplace=True)\n file.rename(columns={'x':'contents'}, inplace=True)\n print(\"loading done\")\n\n return file\n\n\n\ndef make_stopwords(file):\n # stopwords 준비\n lines = []\n\n f = open(os.path.join(path, file), 'r')\n while True:\n line = f.readline()\n if not line:\n break\n lines.append(line)\n f.close()\n\n stopwords = set(re.sub('\\n', '', word) for word in lines)\n print(list(stopwords)[0:10])\n print(\"making stopwords done\")\n\n return stopwords\n\n\n\ndef remove_id():\n # 트위터 아이디를 제거해준다.\n pattern = re.compile('.@+[A-Za-z0-9\\_]*:*')\n tweets = [re.sub(pattern, ' ', sentence) for sentence in list(file['contents'])]\n print(\"removing id done\")\n\n return tweets\n\n\n\nclass TweetTokenizer:\n # 트윗을 토큰화함.\n def __init__(self):\n self.twitter = Twitter()\n self.stopwords = stopwords\n\n def nominalize(tweets, start, end):\n nouns = []\n for tweet in tweets[start:end]:\n nouns.append(' '.join([noun for noun in twitter.nouns(str(tweet)) if not noun in stopwords]))\n # print(len(nouns))\n # document = ' '.join(nouns)\n print(\"tokenizing done\")\n\n return nouns\n\n\n\ndef embedding_clustering():\n vect = CountVectorizer(min_df=0.001, encoding='utf-8', max_features=50, ngram_range=(1, 1))\n bow = vect.fit_transform(nouns)\n print(\"사전 길이: \", len(vect.vocabulary_))\n\n X = bow.toarray()\n print(\"X shape: \", X.shape)\n vect.get_feature_names()\n\n dict = {'문재인':0, '남북정상회담':1, '지방선거':2, '자유한국당':3, '안철수':4, '더불어민주당':5,\n '미투':6, '바른미래당':7, '보수':8, '서울시장':9, '진보':10, '박원순':11, '김문수':12}\n\n Y = np.array(file['Keyword'].map(dict)).astype(int).reshape(-1, 1)\n\n kmeans = KMeans(n_clusters=13)\n kmeans.fit(X)\n pred = kmeans.predict(X).reshape(-1, 1)\n\n result = np.concatenate([pred, Y], axis=1)\n\n print(pd.Series(pred.reshape(-1, )).value_counts())\n print(pd.Series(Y.reshape(-1, )).value_counts())\n\n return result\n\n\n\ndef main():\n file = load_data('tweet.csv')\n stopwords = make_stopwords('korean_stopwords.txt')\n twitter = Twitter()\n tweets = remove_id()\n nouns = TweetTokenizer.nominalize(tweets, 0, 118570)\n result = embedding_clustering()\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Machine_Learning/Assignments/TB/Week05_nlp.py","file_name":"Week05_nlp.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"271874183","text":"from linkedlistnode import linkedlist\n\ndef return_kth_to_last(ll,k):\n p1= ll.head\n p2= ll.head\n #we dont know the length of ll\n for i in range(k):\n if p1 is None:\n return None\n p1=p1.next\n while p1.next != None:\n p1=p1.next\n p2=p2.next\n\n return p2 \n","sub_path":"linkedlist/returnk2last.py","file_name":"returnk2last.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"335394749","text":"##################### HEAD #######################\n\nfrom time import sleep\nfrom random import randint\nfrom output import *\n\n#################### CONFIG ######################\n\ndifficulties = {\"1\":(100, 8), \"2\":(1000, 10), \"3\":(10000, 12),\\\n\"4\":(100000, 14), \"5\":(1000000, 16), \"6\":(10000000, 18)} \n#key denotes difficulty and tuple denotes max_num and max_tries\ndifficulty = \"2\"\nsimulation_games = 100\n#number of tries to simulate \nsimulation_delay = 0.05\n#simulation delay in seconds\nsimulation_threshold = 1000\n#bypasses print screens and delays above\n\n################### FUNCTIONS ####################\n\n\ndef get_difficulty(difficulty):\n\n output(\"difficulty\", difficulty)\n sleep(0.25)\n new_difficulty = raw_input(\"> \")\n\n if new_difficulty in difficulties:\n difficulty = new_difficulty\n\n output(\"difficulty\", difficulty)\n sleep(1.0)\n raw_input(\"Press any key to continue. \")\n\n return difficulty\n\n\ndef game(max_num, max_tries, simulation_games = None):\n\n if simulation_games != None:\n output(\"simulation\")\n sleep(1.0)\n raw_input(\"Press any key to continue... \")\n simulation_successes = 0\n games = simulation_games\n else: \n output(\"intro2\")\n games = 1 \n\n for i in range(games):\n\n rnum = randint(1,max_num)\n num = 0 \n tries = 0\n\n if simulation_games != None:\n simulation_min = 1\n simulation_max = max_num\n\n while tries <= max_tries and num != rnum:\n\n if simulation_games != None:\n num = (simulation_min + simulation_max) // 2\n else:\n while True:\n num = (raw_input(\"Guess a number between 1 and %d > \" % max_num))\n try:\n num = int(num)\n except:\n output(\"try again\")\n continue\n else:\n break\n\n if len(str(num)) < 1 or num <= 0 or num >= max_num:\n output(\"try again\")\n elif num < rnum:\n tries += 1\n if simulation_games != None: simulation_min = num\n else: output(\"higher\", tries)\n elif num > rnum:\n tries += 1\n if simulation_games != None: simulation_max = num\n else: output(\"lower\", tries)\n\n if num == rnum:\n if simulation_games != None: simulation_successes += 1\n if simulation_games == None or simulation_games <= simulation_threshold: output(\"win\", tries-1, rnum)\n else:\n if simulation_games == None or simulation_games <= simulation_threshold: output(\"lose\", tries-1, rnum)\n\n if simulation_games != None and simulation_games <= simulation_threshold: sleep(simulation_delay)\n\n if simulation_games != None: \n output(\"success rate\", float(simulation_successes)/float(simulation_games) * 100)\n\n sleep(1.0)\n raw_input(\"Press any key to continue. \")\n\n\n##################### MAIN #######################\n\noutput(\"intro\")\nsleep(1.0)\nraw_input(\"Press any key to continue... \")\n\nwhile True:\n output(\"menu\")\n sleep(0.25)\n selection = raw_input(\"> \")\n\n if selection == \"2\": \n difficulty = get_difficulty(difficulty)\n elif selection == \"3\":\n max_num, max_tries = difficulties[difficulty]\n game(max_num, max_tries, simulation_games)\n elif selection == \"4\":\n break \n else:\n max_num, max_tries = difficulties[difficulty]\n game(max_num, max_tries)\n\noutput(\"quit\")\nsleep(1.0)\n","sub_path":"guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"282072765","text":"from .document import Document\nfrom .remote import _quote_id\nfrom .view import View\n\n\nclass DesignDocument(Document):\n _allowed_keys = [\n \"language\",\n \"options\",\n \"filters\",\n \"lists\",\n \"rewrites\",\n \"shows\",\n \"updates\",\n \"validate_doc_update\",\n \"views\",\n ]\n\n @property\n def endpoint(self):\n return f\"{self._database.endpoint}/_design/{_quote_id(self.id)}\"\n\n def __setitem__(self, key, value):\n if key in self._allowed_keys:\n super().__setitem__(key, value)\n else:\n raise KeyError(f\"The key '{key}' is not allowed in an design document.\")\n\n def view(self, view):\n return View(self._database, self.id, view)\n\n async def create_view(\n self, view, map_function, reduce_function=None, exists_ok=False\n ):\n if \"views\" not in self:\n self[\"views\"] = {}\n\n if view in self[\"views\"] and not exists_ok:\n raise KeyError(\n f\"The view '{view}' does already exist in the design document {self.id}\"\n )\n\n self[\"views\"][view] = {\"map\": map_function}\n if reduce_function is not None:\n self[\"views\"][view][\"reduce\"] = reduce_function\n self[\"language\"] = \"javascript\"\n\n await self.save()\n\n return self.view(view)\n","sub_path":"aiocouch/design_document.py","file_name":"design_document.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"537699464","text":"from django.db import models\nimport datetime\n\n\nclass Attendance(models.Model):\n employee_number = models.IntegerField()\n department = models.CharField(max_length=16)\n clock_in_time = models.DateTimeField()\n clock_out_time = models.DateTimeField(null=True, blank=True)\n shift = models.CharField(max_length=16)\n\n @staticmethod\n def get_manhours_during(start, stop=None, department='all', shift='all'):\n if stop is None:\n stop = datetime.datetime.now()\n if shift == 'all':\n this_shift = Attendance.objects.all()\n else:\n this_shift = Attendance.objects.filter(shift=shift)\n if department == 'all' or department == 'Plant':\n in_department = this_shift\n else:\n in_department = this_shift.filter(department=department)\n were_clocked_in = in_department.filter(clock_in_time__lt=start).exclude(clock_out_time__lt=start)\n clocked_in_after_start = in_department.filter(clock_in_time__gte=start)\n clocked_in_during = clocked_in_after_start.filter(clock_in_time__lt=stop)\n clocked_out_after_start = in_department.filter(clock_out_time__gte=start)\n clocked_out_during = clocked_out_after_start.filter(clock_out_time__lt=stop)\n all_relevent = were_clocked_in | clocked_in_during | clocked_out_during\n manhours = 0\n for employee in all_relevent:\n begin = max(employee.clock_in_time, start)\n if employee.clock_out_time == None:\n end = stop\n else:\n end = min(employee.clock_out_time, stop)\n manhours += ((end - begin).total_seconds())/3600\n return manhours\n\n\n @staticmethod\n def get_active_at(active_time=None, department='all', shift='all'):\n if active_time is None:\n active_time = datetime.datetime.now()\n if shift == 'all':\n this_shift = Attendance.objects.all()\n else:\n this_shift = Attendance.objects.filter(shift=shift)\n if department == 'all' or department == 'Plant':\n in_department = this_shift\n else:\n in_department = this_shift.filter(department=department)\n have_clocked_in = in_department.filter(clock_in_time__lt=active_time)\n not_clocked_out_yet = have_clocked_in.filter(clock_out_time__gt=active_time)\n never_clocked_out = have_clocked_in.filter(clock_out_time=None)\n not_clocked_out = not_clocked_out_yet | never_clocked_out\n return not_clocked_out.count()\n\n def is_ot(self, time_in_question=None):\n if time_in_question is None:\n time_in_question = datetime.datetime.now().time()\n if self.shift == 0:\n if not self.clock_out_time and time_in_question > datetime.time(14, 30):\n return True\n else:\n return False\n else:\n if not self.clock_out_time and time_in_question > datetime.time(22, 30):\n return True\n else:\n return False\n\n\nclass Complete(models.Model):\n serial_number = models.CharField(max_length=10)\n completed = models.DateTimeField()\n\n @staticmethod\n def claims_by_time(time_in_question, hour=None):\n day = time_in_question.date()\n return Complete.objects.filter(completed__gt=datetime.datetime.combine(day,\n datetime.time(0))).filter(completed__lt=time_in_question).count()\n","sub_path":"hpv/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"60869092","text":"# This sample tests the case where super().__new__(cls) is called\n# and there is an inferred return type based on the cls type.\n\nfrom typing import Literal, NamedTuple\n\nFooBase = NamedTuple(\"FooBase\", [(\"x\", int)])\n\n\nclass Foo(FooBase):\n def __new__(cls):\n obj = super().__new__(cls, x=1)\n t1: Literal[\"Self@Foo\"] = reveal_type(obj)\n return obj\n\n\nf = Foo()\nt2: Literal[\"Foo\"] = reveal_type(f)\n","sub_path":"packages/pyright-internal/src/tests/samples/super6.py","file_name":"super6.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"498746878","text":"#Universidad del Valle de Guatemala\n#Raul Alejandro Monzon Solis 17014\n#Bases de Datos\n#Laboratorio 12\n\nimport Funciones\n\n \n#Menu para Interaccion\nprint (\"Raul Alejandro Monzon Solis 17014\\nBases de Datos\\nLaboratorio 12\\nIngresar valores enteros durante use el programa\")\nopcion = 1\nwhile opcion != 6: \n print(\"Seleccione el ejercicio que deseee:\\nSi desea realizar el ejercicio a escriba 1\\nSi desea realizar el ejercicio b escriba 2\\nSi desea realizar el ejercicio c escriba 3\\nSi desea realizar el ejercicio d escriba 4\\nSi desea realizar el ejercicio e escriba 5\\nIngrese 6 para salir\")\n opcion = input(\"INGRESAR NUMERO\")\n try:\n opcion = int(opcion)\n if opcion == 1:\n precio = input(\"Precio: \")\n Funciones.ejercicioA(precio)\n elif opcion == 2:\n velocidad = input(\"speed: \")\n ram = input(\"RAM: \")\n hd = input(\"HD: \")\n Funciones.ejercicioB(velocidad,ram,hd)\n elif opcion == 3:\n presupuesto = input(\"Presupuesto maximo: \")\n speed = input(\"Speed minima: \")\n color = raw_input(\"Desea imprimir a color? (S/N) (Ingresar sin comillas la letra): \")\n Funciones.ejercicioC(presupuesto, speed, color)\n elif opcion == 4:\n Model = input(\"Model: \")\n Speed = input(\"Speed: \")\n RAM = input(\"RAM: \")\n HD = input(\"HD: \")\n Precio = input(\"Precio: \")\n Funciones.ejercicioD(Model, Speed, RAM, HD, Precio)\n elif opcion == 5:\n Precio = input(\"Precio: \")\n Funciones.ejercicioE(Precio)\n else: \n print(\"INGRESE UNA DE LAS OPCIONES (NUMEROS)\")\n except ValueError:\n print(\"INGRESE UNA DE LAS OPCIONES (NUMEROS\")\nprint(\"Ha finalizado el programa correctamente :)\")\n","sub_path":"lab012-17014-Parte1.py","file_name":"lab012-17014-Parte1.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"232250941","text":"# -*- coding: utf-8 -*-\n\n# Como funciona as burlações de votações online. Parte 2\n# Material abaixo absorvido do curso Pythonicos, para estudos e conheicmento\n\nimport requests \n\ndef proxy():\n\turl = \"http://gimmeproxy.com/api/getProxy?protocol=http\"\n\tr = requests.get(url).json()\n\treturn {r['protocol']:r['curl']}\n\n\nurl = \"http://www.ferendum.com/pt/votarpost2.php\"\n\nproxies = proxy()\t\n\n# Edit and Resend, Request Body (pega o que foi enviado)\n# Substitui & por , e = por :\n# Ficando na estrutura do dicionário\ndata = {\"record1\":\"\",\"record2\":\"\",\"pregunta_ID\":\"45561\",\"sec_digit\":\"91791\",\"config_anonimo\":\"1\",\"config_aut_req\":\"0\",\"titulo\":\"Votaria+em+Jair+messias+bolsonaro\"}\n\ntry:\n\tr = requests.post(url,data=data,proxies=proxies)\n\tprint(r.status_code)\n\tif \"Obrigado por participar desta enquete\" in r.content.decode():\n\t\tprint(\"Voto Realizado com Sucesso!\")\nexcept:\n\tprint(\"Error!\")","sub_path":"pythonicos/Proxy Votacoes Online/proxy_votacoes3.py","file_name":"proxy_votacoes3.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"541840227","text":"\"\"\"\nThis file may not be shared/redistributed without permission. Please read copyright notice in the git repo. If this file contains other copyright notices disregard this text.\n\"\"\"\nimport numpy as np\nfrom irlc.ex06.ilqr_agent import ILQRAgent\nfrom irlc import train\nfrom irlc import savepdf\nimport matplotlib.pyplot as plt\nfrom irlc.ex04.model_cartpole import GymSinCosCartpoleEnvironment\nfrom irlc import VideoMonitor\n\ndef cartpole_experiment(N=12, use_linesearch=True, figex=\"\", animate=True):\n np.random.seed(2)\n Tmax = .9\n dt = Tmax/N\n\n env = GymSinCosCartpoleEnvironment(dt=dt, Tmax=Tmax, supersample_trajectory=True)\n agent = ILQRAgent(env, env.discrete_model, N=N, ilqr_iterations=200, use_linesearch=use_linesearch)\n if animate:\n env =VideoMonitor(env)\n stats, trajectories = train(env, agent, num_episodes=1, return_trajectory=True)\n\n agent.use_ubar = True\n stats2, trajectories2 = train(env, agent, num_episodes=1, return_trajectory=True)\n env.close()\n\n xb = agent.xbar\n tb = np.arange(N+1)*dt\n plt.figure(figsize=(8,6))\n F = 3\n plt.plot(trajectories[0].time, trajectories[0].state[:,F], 'k-', label='Closed-loop $\\\\pi$')\n plt.plot(trajectories2[0].time, trajectories2[0].state[:,F], '-', label='Open-loop $\\\\bar{u}_k$')\n\n plt.plot(tb, xb[:,F], '.-', label=\"iLQR rediction $\\\\bar{x}_k$\")\n plt.xlabel(\"Time/seconds\")\n plt.ylabel(\"$\\cos(\\\\theta)$\")\n plt.title(f\"Pendulum environment $T={N}$\")\n\n plt.grid()\n plt.legend()\n ev = \"pendulum\"\n savepdf(f\"irlc_cartpole_theta_N{N}_{use_linesearch}{figex}\")\n plt.show()\n\ndef plt_cartpole():\n cartpole_experiment(N=50, use_linesearch=True, animate=True)\n\nif __name__ == '__main__':\n plt_cartpole()\n","sub_path":"irlc/ex06/ilqr_cartpole_agent.py","file_name":"ilqr_cartpole_agent.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148500294","text":"import responses\nfrom payconiq import Transaction\n\nfrom .testcase import PayconiqTestCase\n\n\nclass TransactionTestCase(PayconiqTestCase):\n\n REMOTE_ID = '5e14137fe51905040b202c04'\n\n @responses.activate\n def test_resource_start(self):\n responses.add(\n responses.POST,\n Transaction.get_base_url(),\n json={\n 'transactionId': self.REMOTE_ID\n },\n status=200\n )\n\n remote_id = Transaction.start(\n amount=1,\n webhook_url=''\n )\n self.assertEqual(\n remote_id,\n self.REMOTE_ID\n )\n","sub_path":"lunchbreak/payconiq/tests/test_transaction.py","file_name":"test_transaction.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"473946954","text":"from datetime import date\nfrom datetime import datetime\nfrom datetime import timedelta\nimport urllib.request\nimport json\nimport pytz\nimport hockey_scraper\n\ntoday = date.today()\nnow = datetime.now()\nest = pytz.timezone(\"America/New_York\")\npst = pytz.timezone(\"America/Los_Angeles\")\nloc_dt = est.localize(now)\npacific = loc_dt.astimezone(pst)\nstringnow = pacific.strftime(\"%Y-%m-%d\")\nformattednow = datetime.strptime(stringnow, \"%Y-%m-%d\")\nltrVenues = [\n \"Honda Center\",\n \"Gila River Arena\",\n \"Jobing.com Arena\",\n \"TD Garden\",\n \"KeyBank Center\",\n \"First Niagara Center\",\n \"Nationwide Arena\",\n \"Rogers Place\",\n \"Bridgestone Arena\",\n \"Prudential Center\",\n \"NYCB Live/Nassau Coliseum\",\n \"Barclays Center\",\n \"Canadian Tire Centre\",\n \"PPG Paints Arena\",\n \"CONSOL Energy Center\",\n \"SAP Center at San Jose\",\n \"Enterprise Center\",\n \"Scottrade Center\",\n \"Amalie Arena\",\n \"Tampa Bay Times Forum\"]\nrtlVenues = [\n \"Scotiabank Saddledome\",\n \"PNC Arena\",\n \"United Center\",\n \"Pepsi Center\",\n \"American Airlines Center\",\n \"Little Caesars Arena\",\n \"Joe Louis Arena\",\n \"Rexall Place\",\n \"BB&T Center\",\n \"STAPLES Center\", \"Staples Center\",\n \"Xcel Energy Center\",\n \"Centre Bell\",\n \"Nassau Coliseum\",\n \"Madison Square Garden\",\n \"Wells Fargo Center\",\n \"Scotiabank Arena\",\n \"Air Canada Centre\",\n \"Rogers Arena\",\n \"T-Mobile Arena\",\n \"Bell MTS Place\",\n \"MTS Centre\"]\nhomeLtRVenues = [\"Navy-Marine Corps Memorial Stadium\"]\nhomeRtLVenues = [\"Capital One Arena\",\"Verizon Center\",\"Nationals Park\"]\nhomeVenues = [\"Navy-Marine Corps Memorial Stadium\",\"Capital One Arena\",\"Verizon Center\",\"Nationals Park\"]\n\ndef getGame(theJSON, i):\n gamefeed = \"\"\n gamePk = \"\"\n gameslice = slice(4,6)\n gamedate = datetime.strptime(theJSON[\"dates\"][i][\"date\"], \"%Y-%m-%d\")\n if gamedate == formattednow:\n gamePk = str(theJSON[\"dates\"][i][\"games\"][0][\"gamePk\"])\n if gamePk[gameslice] == \"02\":\n gamefeed = \"https://statsapi.web.nhl.com\" + theJSON[\"dates\"][i][\"games\"][0][\"link\"]\n return [gamefeed, int(gamePk)]\n else:\n pass\n\n# def getOldGame(theJSON, i):\n# gamefeed = \"\"\n# gamePk = \"\"\n# gameslice = slice(4,6)\n# gamePk = str(theJSON[\"dates\"][i][\"games\"][0][\"gamePk\"])\n# if gamePk[gameslice] == \"02\":\n# gamefeed = \"https://statsapi.web.nhl.com\" + theJSON[\"dates\"][i][\"games\"][0][\"link\"]\n# return [gamefeed, int(gamePk)]\n\ndef parseGame(data, gameID):\n theJSON = json.loads(data)\n minutesslice = slice(0,2)\n secondsslice = slice(3,5)\n # print(theJSON)\n if theJSON[\"gameData\"][\"status\"][\"detailedState\"] == \"Final\": # Check to make sure the game is finished\n # Next Steps:\n # check to see if ovechkin played\n if \"ID8471214\" in theJSON[\"gameData\"][\"players\"]:\n print(\"Ovechkin on roster\")\n if (theJSON[\"gameData\"][\"venue\"][\"name\"] in homeVenues):\n if not theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"home\"][\"players\"][\"ID8471214\"][\"stats\"]:\n print(\"Ovechkin rostered but did not play\")\n else:\n plays = dataScrape(gameID, \"home\", theJSON)\n # print(plays[1],plays[4])\n stringtoi = theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"home\"][\"players\"][\"ID8471214\"][\"stats\"][\"skaterStats\"][\"timeOnIce\"]\n toisplit = stringtoi.split(\":\")\n minutes = int(toisplit[0])\n seconds = int(toisplit[1])\n toi = (minutes * 60) + seconds\n plays.append(toi)\n\n evstringtoi = theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"home\"][\"players\"][\"ID8471214\"][\"stats\"][\"skaterStats\"][\"evenTimeOnIce\"]\n evtoisplit = evstringtoi.split(\":\")\n evminutes = int(evtoisplit[0])\n evseconds = int(evtoisplit[1])\n evtoi = (evminutes * 60) + evseconds\n plays.append(evtoi)\n\n ppstringtoi = theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"home\"][\"players\"][\"ID8471214\"][\"stats\"][\"skaterStats\"][\"powerPlayTimeOnIce\"]\n pptoisplit = ppstringtoi.split(\":\")\n ppminutes = int(pptoisplit[0])\n ppseconds = int(pptoisplit[1])\n pptoi = (ppminutes * 60) + ppseconds\n plays.append(pptoi)\n return plays\n else:\n if not theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"away\"][\"players\"][\"ID8471214\"][\"stats\"]:\n print(\"Ovechkin rostered but did not play\")\n else:\n plays = dataScrape(gameID, \"away\", theJSON)\n # print(plays[1],plays[4])\n stringtoi = theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"away\"][\"players\"][\"ID8471214\"][\"stats\"][\"skaterStats\"][\"timeOnIce\"]\n toisplit = stringtoi.split(\":\")\n minutes = int(toisplit[0])\n seconds = int(toisplit[1])\n toi = (minutes * 60) + seconds\n plays.append(toi)\n\n evstringtoi = theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"away\"][\"players\"][\"ID8471214\"][\"stats\"][\"skaterStats\"][\"evenTimeOnIce\"]\n evtoisplit = evstringtoi.split(\":\")\n evminutes = int(evtoisplit[0])\n evseconds = int(evtoisplit[1])\n evtoi = (evminutes * 60) + evseconds\n plays.append(evtoi)\n\n ppstringtoi = theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"away\"][\"players\"][\"ID8471214\"][\"stats\"][\"skaterStats\"][\"powerPlayTimeOnIce\"]\n pptoisplit = ppstringtoi.split(\":\")\n ppminutes = int(pptoisplit[0])\n ppseconds = int(pptoisplit[1])\n pptoi = (ppminutes * 60) + ppseconds\n plays.append(pptoi)\n return plays\n else:\n print(\"Ovechkin not on roster\")\n else:\n print(\"Game not finished!\")\n\ndef dataScrape(gameID, venue, gamefeed):\n if venue == \"home\":\n print(\"Ovechkin played at home\")\n elif venue == \"away\":\n print(\"Ovechkin played on the road\")\n else:\n print(\"venue error\")\n\n shotsgoals = []\n fenwickgoals = []\n # corsigoals = []\n shots = []\n fenwick = []\n # corsi = []\n evshotsgoals = []\n evfenwickgoals = []\n # evcorsigoals = []\n evshots = []\n evfenwick = []\n # evcorsi = []\n ppshotsgoals = []\n ppfenwickgoals = []\n # ppcorsigoals = []\n ppshots = []\n ppfenwick = []\n # ppcorsi = []\n # print(gamefeed)\n scrape = hockey_scraper.scrape_games([gameID], False, data_format=\"Pandas\")\n bigData = scrape[\"pbp\"]\n eventlist = [\"GOAL\",\"SHOT\",\"MISS\"]\n shotdata = bigData.loc[bigData[\"Event\"].isin(eventlist)]\n roshotdata = shotdata.loc[shotdata[\"Period\"] <= 4]\n ovishotdata = roshotdata.loc[roshotdata[\"p1_ID\"] == 8471214]\n # print(ovishotdata)\n for i, j in ovishotdata.iterrows():\n coordinates = convertCoord(gamefeed,venue,j[\"Period\"],j[\"xC\"],j[\"yC\"])\n # print(i, j[\"Description\"], newcoord)\n strength = j[\"Strength\"]\n strsplit = strength.split(\"x\")\n if venue == \"home\":\n if int(strsplit[0]) >= 5 and int(strsplit[1]) >= 5:\n if j[\"Event\"] == \"GOAL\":\n shotsgoals.append(1)\n fenwickgoals.append(1)\n # corsigoals.append(1)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(1)\n evfenwickgoals.append(1)\n # evcorsigoals.append(1)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"SHOT\":\n shotsgoals.append(0)\n fenwickgoals.append(0)\n # corsigoals.append(0)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(0)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"MISS\":\n fenwickgoals.append(0)\n # corsigoals.append(0)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif int(strsplit[1]) == int(strsplit[0]):\n if j[\"Event\"] == \"GOAL\":\n shotsgoals.append(1)\n fenwickgoals.append(1)\n # corsigoals.append(1)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(1)\n evfenwickgoals.append(1)\n # evcorsigoals.append(1)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"SHOT\":\n shotsgoals.append(0)\n fenwickgoals.append(0)\n # corsigoals.append(0)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(0)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"MISS\":\n fenwickgoals.append(0)\n # corsigoals.append(0)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif int(strsplit[1]) < int(strsplit[0]):\n if j[\"Event\"] == \"GOAL\":\n shotsgoals.append(1)\n fenwickgoals.append(1)\n # corsigoals.append(1)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n ppshotsgoals.append(1)\n ppfenwickgoals.append(1)\n # ppcorsigoals.append(1)\n ppshots.append(coordinates)\n ppfenwick.append(coordinates)\n # ppcorsi.append(coordinates)\n elif j[\"Event\"] == \"SHOT\":\n shotsgoals.append(0)\n fenwickgoals.append(0)\n # corsigoals.append(0)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n ppshotsgoals.append(0)\n ppfenwickgoals.append(0)\n # ppcorsigoals.append(0)\n ppshots.append(coordinates)\n ppfenwick.append(coordinates)\n # ppcorsi.append(coordinates)\n elif j[\"Event\"] == \"MISS\":\n fenwickgoals.append(0)\n # corsigoals.append(0)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n ppfenwickgoals.append(0)\n # ppcorsigoals.append(0)\n ppfenwick.append(coordinates)\n # ppcorsi.append(coordinates)\n else:\n pass\n elif venue == \"away\":\n if int(strsplit[0]) >= 5 and int(strsplit[1]) >= 5:\n if j[\"Event\"] == \"GOAL\":\n shotsgoals.append(1)\n fenwickgoals.append(1)\n # corsigoals.append(1)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(1)\n evfenwickgoals.append(1)\n # evcorsigoals.append(1)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"SHOT\":\n shotsgoals.append(0)\n fenwickgoals.append(0)\n # corsigoals.append(0)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(0)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"MISS\":\n fenwickgoals.append(0)\n # corsigoals.append(0)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif int(strsplit[0]) == int(strsplit[1]):\n if j[\"Event\"] == \"GOAL\":\n shotsgoals.append(1)\n fenwickgoals.append(1)\n # corsigoals.append(1)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(1)\n evfenwickgoals.append(1)\n # evcorsigoals.append(1)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"SHOT\":\n shotsgoals.append(0)\n fenwickgoals.append(0)\n # corsigoals.append(0)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(0)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"MISS\":\n fenwickgoals.append(0)\n # corsigoals.append(0)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif int(strsplit[0]) < int(strsplit[1]):\n if j[\"Event\"] == \"GOAL\":\n shotsgoals.append(1)\n fenwickgoals.append(1)\n # corsigoals.append(1)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n ppshotsgoals.append(1)\n ppfenwickgoals.append(1)\n # ppcorsigoals.append(1)\n ppshots.append(coordinates)\n ppfenwick.append(coordinates)\n # ppcorsi.append(coordinates)\n elif j[\"Event\"] == \"SHOT\":\n shotsgoals.append(0)\n fenwickgoals.append(0)\n # corsigoals.append(0)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n ppshotsgoals.append(0)\n ppfenwickgoals.append(0)\n # ppcorsigoals.append(0)\n ppshots.append(coordinates)\n ppfenwick.append(coordinates)\n # ppcorsi.append(coordinates)\n elif j[\"Event\"] == \"MISS\":\n fenwickgoals.append(0)\n # corsigoals.append(0)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n ppfenwickgoals.append(0)\n # ppcorsigoals.append(0)\n ppfenwick.append(coordinates)\n # ppcorsi.append(coordinates)\n else:\n pass\n avgx = 0\n totalx = 0\n for i in range(0, len(fenwick)):\n totalx = totalx + fenwick[i][0]\n if len(fenwick) > 0:\n avgx = totalx / len(fenwick)\n if avgx > 100:\n # print(\"-------------------- INVERT HERE --------------------\")\n for fen in range(0, len(fenwick)):\n fenwick[fen][0] = -fenwick[fen][0] + 178\n fenwick[fen][1] = -fenwick[fen][1]\n print(shots)\n print(fenwick)\n print(evshots)\n print(evfenwick)\n print(ppshots)\n print(ppfenwick)\n # print(\"---------------- INVERSION COMPLETE ----------------\")\n return [shotsgoals, fenwickgoals, shots, fenwick, evshotsgoals, evfenwickgoals, evshots, evfenwick, ppshotsgoals, ppfenwickgoals, ppshots, ppfenwick]\n\ndef constructJSON(plays,gameno):\n # print(plays)\n gamedata = {}\n if plays != None:\n gamedata[\"Game\"] = \"G\" + str(gameno)\n gamedata[\"Goals\"] = {}\n gamedata[\"Goals\"][\"Shots\"] = plays[0]\n gamedata[\"Goals\"][\"Fenwick\"] = plays[1]\n # gamedata[\"Goals\"][\"Corsi\"] = plays[2]\n gamedata[\"TOI\"] = plays[12]\n gamedata[\"Shots\"] = plays[2]\n gamedata[\"Fenwick\"] = plays[3]\n # gamedata[\"Corsi\"] = plays[5]\n gamedata[\"Even Strength\"] = {}\n gamedata[\"Even Strength\"][\"Goals\"] = {}\n gamedata[\"Even Strength\"][\"Goals\"][\"Shots\"] = plays[4]\n gamedata[\"Even Strength\"][\"Goals\"][\"Fenwick\"] = plays[5]\n # gamedata[\"Even Strength\"][\"Goals\"][\"Corsi\"] = plays[8]\n gamedata[\"Even Strength\"][\"TOI\"] = plays[13]\n gamedata[\"Even Strength\"][\"Shots\"] = plays[6]\n gamedata[\"Even Strength\"][\"Fenwick\"] = plays[7]\n # gamedata[\"Even Strength\"][\"Corsi\"] = plays[11]\n gamedata[\"Power Play\"] = {}\n gamedata[\"Power Play\"][\"Goals\"] = {}\n gamedata[\"Power Play\"][\"Goals\"][\"Shots\"] = plays[8]\n gamedata[\"Power Play\"][\"Goals\"][\"Fenwick\"] = plays[9]\n # gamedata[\"Power Play\"][\"Goals\"][\"Corsi\"] = plays[14]\n gamedata[\"Power Play\"][\"TOI\"] = plays[14]\n gamedata[\"Power Play\"][\"Shots\"] = plays[10]\n gamedata[\"Power Play\"][\"Fenwick\"] = plays[11]\n # gamedata[\"Power Play\"][\"Corsi\"] = plays[17]\n return gamedata\n\ndef convertCoord(gamefeed, venue, period, x, y):\n if venue == \"home\":\n if gamefeed[\"gameData\"][\"venue\"][\"name\"] in homeLtRVenues:\n if period == 1 or period == 3:\n # print(\"Caps shooting left to right at home\")\n newX = -x + 89\n newY = y\n return [newX, newY]\n elif period == 2 or period == 4:\n # print(\"Caps shooting right to left at home\")\n newX = x + 89\n newY = -y\n return [newX, newY]\n else:\n pass\n elif gamefeed[\"gameData\"][\"venue\"][\"name\"] in homeRtLVenues:\n if period == 1 or period == 3:\n # print(\"Caps shooting right to left at home\")\n newX = x + 89\n newY = -y\n return [newX, newY]\n elif period == 2 or period == 4:\n # print(\"Caps shooting left to right at home\")\n newX = -x + 89\n newY = y\n return [newX, newY]\n else:\n pass\n else:\n print(\"venue error\")\n elif venue == \"away\":\n if gamefeed[\"gameData\"][\"venue\"][\"name\"] in ltrVenues:\n if period == 1 or period == 3:\n # print(\"Caps shooting right to left on the road\")\n newX = x + 89\n newY = -y\n return [newX, newY]\n elif period == 2 or period == 4:\n # print(\"Caps shooting left to right on the road\")\n newX = -x + 89\n newY = y\n return [newX, newY]\n else:\n pass\n elif gamefeed[\"gameData\"][\"venue\"][\"name\"] in rtlVenues:\n if period == 1 or period == 3:\n # print(\"Caps shooting left to right on the road\")\n newX = -x + 89\n newY = y\n return [newX, newY]\n elif period == 2 or period == 4:\n # print(\"Caps shooting right to left on the road\")\n newX = x + 89\n newY = -y\n return [newX, newY]\n else:\n pass\n else:\n print(\"venue error\")\n else:\n print(\"venue error\")\n\ndef main():\n if (today.month >= 8):\n currentseason = today.year\n else:\n currentseason = today.year - 1\n\n with open('advdata.json') as json_file:\n finalJSON = json.load(json_file)\n print(finalJSON)\n scheduleUrl = str(\"https://statsapi.web.nhl.com/api/v1/schedule?teamId=15&startDate=\" + str(currentseason) + \"-09-01&endDate=\" + str(currentseason + 1) + \"-05-01\")\n print(scheduleUrl)\n webUrl = urllib.request.urlopen(scheduleUrl)\n print (\"result code: \" + str(webUrl.getcode()))\n if (webUrl.getcode() == 200):\n data = webUrl.read()\n newgameJSON = json.loads(data)\n for i,d in enumerate(newgameJSON[\"dates\"]):\n gameJSON = getGame(newgameJSON, i)\n # print(gameJSON[0])\n if gameJSON != None:\n gameUrl = urllib.request.urlopen(gameJSON[0])\n print (\"result code: \" + str(gameUrl.getcode()))\n if (gameUrl.getcode() == 200):\n gamedata = gameUrl.read()\n shotinfo = parseGame(gamedata, gameJSON[1])\n if shotinfo != []:\n exportdict = constructJSON(shotinfo,gameJSON[1])\n print(exportdict)\n finalJSON.append(exportdict)\n else:\n print(\"Received error, cannot parse results\")\n else:\n print(\"Received error, cannot parse results\")\n with open('advdata.json', 'w+') as outfile:\n json.dump(finalJSON, outfile)\n\nif __name__ == \"__main__\":\n main()","sub_path":"PyTodaysData.py","file_name":"PyTodaysData.py","file_ext":"py","file_size_in_byte":23453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"213542859","text":"import pygame\nimport os\n\n# Special code to center window\nos.environ['SDL_VIDEO_CENTERED'] = '1'\n\n\nclass Snake(object):\n def __init__(self):\n self.snake = [(World.GRID_W/2-1, World.GRID_H/2), (World.GRID_W/2, World.GRID_H/2)]\n\n def draw(self, screen):\n block_size = World.BLOCK_SIZE\n block_color = World.WHITE\n for x, y in self.snake:\n block_pos = (x * World.BLOCK_W, y * World.BLOCK_H)\n rect = pygame.Rect(block_pos, block_size)\n pygame.draw.rect(screen, block_color, rect)\n\n\nclass World(object):\n FRAMES_PER_SECOND = 30\n RESOLUTION = WIDTH, HEGIHT = 800, 600\n GRID_SIZE = GRID_W, GRID_H = (40, 40)\n BLOCK_SIZE = BLOCK_W, BLOCK_H = (WIDTH / GRID_W, HEGIHT / GRID_H)\n WINDOWS_TITLE = 'Snake v1.0 by raz'\n BLACK = (0, 0, 0)\n WHITE = (255, 255, 255)\n\n def __init__(self):\n self.player = Snake()\n\n def draw(self, screen):\n screen.fill(self.BLACK)\n self.player.draw(screen)\n pygame.display.flip()\n\n\ndef main():\n # Initial Setup\n pygame.init()\n screen_surface = pygame.display.set_mode(World.RESOLUTION)\n pygame.display.set_caption(World.WINDOWS_TITLE)\n\n world = World()\n done = False\n clock = pygame.time.Clock()\n while not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n world.draw(screen_surface)\n # Delays until next frame\n clock.tick(World.FRAMES_PER_SECOND)\n pygame.quit()\n\nmain()","sub_path":"snake/snake02.py","file_name":"snake02.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"290972520","text":"#!/usr/bin/env python3\nimport random\n\n\ndef read_roster():\n with open('roster.txt', 'r') as f:\n line = f.readline()\n count = 0\n for line in f:\n x = line.split()\n if len(x) == 3:\n if 'e' in x[0]+x[1] or 'E' in x[0]+x[1]:\n print(x[0] + ' '+ x[1])\n count += 1\n else:\n if 'e' in x[0] or 'E' in x[0]:\n print(x[0])\n count += 1\n\n\n print(\"Total number of names that contain letter 'e' : {}\".format(count))\n\n\ndef main():\n \n read_roster()\n\nif __name__ == '__main__':\n main()\n","sub_path":"D06ex03.py","file_name":"D06ex03.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"201674344","text":"# coding=utf-8\nfrom src.testcase.GN_Y201H.WidgetOperation import *\n\n\nclass GNY201HNormalTimer2(WidgetOperation):\n @case_run(False)\n def run(self):\n self.case_module = u\"普通定时(#246)\" # 用例所属模块\n self.case_title = u'在线状态,4组开与4组关按自定义方式执行的定时执行状态检查' # 用例名称\n self.zentao_id = \"2064\" # 禅道ID\n\n # 用例动作\n def case(self):\n self.choose_home_device(conf[\"MAC\"][\"HW\"][0])\n\n self.delete_normal_timer()\n\n self.delete_delay_timer()\n\n self.set_power(\"power_off\")\n\n self.widget_click(self.page[\"control_device_page\"][\"normal_timer\"],\n self.page[\"normal_timer_page\"][\"title\"])\n\n now = time.strftime(\"%H:%M\")\n\n time_1 = [\"point\", \"09:00\"]\n start_time_1, set_time_1, cycle1 = self.create_normal_timer(now, time_on=time_1, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_2 = [\"point\", \"10:00\"]\n start_time_2, set_time_2, cycle2 = self.create_normal_timer(now, time_off=time_2, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_3 = [\"point\", \"11:00\"]\n start_time_3, set_time_3, cycle3 = self.create_normal_timer(now, time_on=time_3, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_4 = [\"point\", \"12:00\"]\n start_time_4, set_time_4, cycle4 = self.create_normal_timer(now, time_off=time_4, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_5 = [\"point\", \"13:00\"]\n start_time_5, set_time_5, cycle5 = self.create_normal_timer(now, time_on=time_5, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_6 = [\"point\", \"14:00\"]\n start_time_6, set_time_6, cycle6 = self.create_normal_timer(now, time_off=time_6, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_7 = [\"point\", \"15:00\"]\n start_time_7, set_time_7, cycle7 = self.create_normal_timer(now, time_on=time_7, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_8 = [\"point\", \"16:00\"]\n start_time_8, set_time_8, cycle8 = self.create_normal_timer(now, time_off=time_8, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n self.widget_click(self.page[\"normal_timer_page\"][\"to_return\"],\n self.page[\"control_device_page\"][\"title\"])\n\n self.check_timer(start_time_1, set_time_1, u\"电源已开启\", cycle1)\n self.check_timer(start_time_2, set_time_2, u\"电源已关闭\", cycle2)\n self.check_timer(start_time_3, set_time_3, u\"电源已开启\", cycle3)\n self.check_timer(start_time_4, set_time_4, u\"电源已关闭\", cycle4)\n self.check_timer(start_time_5, set_time_5, u\"电源已开启\", cycle5)\n self.check_timer(start_time_6, set_time_6, u\"电源已关闭\", cycle6)\n self.check_timer(start_time_7, set_time_7, u\"电源已开启\", cycle7)\n self.check_timer(start_time_8, set_time_8, u\"电源已关闭\", cycle8)\n","sub_path":"src/testcase/GN_Y201H/case/GN_Y201H_NORMAL_TIMER/GN_Y201H_NORMAL_TIMER_002.py","file_name":"GN_Y201H_NORMAL_TIMER_002.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"575455459","text":"'''\nAuthor: Viveque Ramji\nPurpose: Module to clean camera data and provide an open direction to move in\n\n'''\nimport numpy as np\nfrom scipy import sparse\nimport matplotlib.pyplot as plt\n\nimport adaptive_grid_sizing as ags\nimport voronoi\nimport sparse_interpolation as si\nimport obstacle_avoid as oa\n\nimport time\nimport logging\n\nclass Navigation:\n\t\"\"\"\n Object to use depth images to find gap to move in to.\n \"\"\"\n\n\tdef __init__(self, debug=False):\n\t\t\"\"\"\n\t Intitalize Navigation module\n\t \"\"\"\n\t\tself.debug = debug\n\n\n\tdef reconstructFrame(self, depth, perc_samples=0.005, min_sigma=0.5, min_h=10, algorithm_type='voronoi'):\n\t\t\"\"\"\n\t Givena partial depth image, will return an interpolated version filling\n\t all missing data.\n\t \"\"\"\n\n\n\t\tif algorithm_type == 'voronoi':\n\t\t\tsamples, measured_vector = si.createSamples(depth, perc_samples)\n\t\t\tif len(samples) <= 1:\n\t\t\t\treturn None\n\t\t\tfilled = voronoi.getVoronoi(depth.shape, samples, measured_vector)\n\t\telif algorithm_type == 'rbf':\n\t\t\tsamples, measured_vector = si.createSamples(depth, perc_samples)\n\t\t\tif len(samples) <= 1:\n\t\t\t\treturn None\n\t\t\tfilled = si.interpolateDepthImage(depth.shape,samples, measured_vector)\n\t\telif algorithm_type == 'ags_only':\n\t\t\tfilled = depth\n\n\t\tadapted = ags.depthCompletion(filled, min_sigma, min_h)\n\n\t\tif self.debug:\n\t\t\tsamples, measured_vector = si.createSamples(depth, perc_samples)\n\t\t\tsample_img = np.zeros((depth.shape)).flatten()\n\t\t\tsample_img[samples] = depth.flatten()[samples]\n\t\t\tsample_img = sample_img.reshape(depth.shape)\n\n\t\t\tself.plot(depth, sample_img, filled, adapted)\n\n\t\treturn adapted\n\tdef obstacleAvoid(self, depth, max_dist=1.2,barrier_h=.5):\n\t\t\"\"\"\n\t Given a depth image and a threshold value, will find the largest gap\n\t that can be used, returning the fraction along the images width where\n\t this is and the degrees rotation from the center. \n\t \"\"\"\n\t\tpos = oa.findLargestGap(depth, max_dist, barrier_h,DEBUG=self.debug)\n\t\treturn pos\n\n\tdef plot(self, depth, sample_img, filled, ags, cmap='viridis', b=True):\n\t\t\"\"\"\n\t Will plot the rgb image, original depth, interpolated depth and the\n\t position of where the algorithm recommends to move.\n\t \"\"\"\n\t\tplt.subplot(2, 2, 1)\n\t\tplt.title('Depth')\n\t\tplt.imshow(depth)\n\t\tplt.xticks(visible=False)\n\t\tplt.yticks(visible=False)\n\n\t\tplt.subplot(2, 2, 2)\n\t\tplt.imshow(sample_img, cmap=cmap)\n\t\tplt.title('Samples')\n\t\tplt.xticks(visible=False)\n\t\tplt.yticks(visible=False)\n\n\t\tplt.subplot(2, 2, 3)\n\t\tplt.imshow(filled, cmap=cmap)\n\t\tplt.title('RBF, Voronoi, or None')\n\t\tplt.xticks(visible=False)\n\t\tplt.yticks(visible=False)\n\n\t\tplt.subplot(2, 2, 4)\n\t\tplt.imshow(ags, cmap=cmap)\n\t\tplt.title('AGS')\n\t\tplt.xticks(visible=False)\n\t\tplt.yticks(visible=False)\n\n\n\t\tplt.subplots_adjust(bottom=0.1, right=0.8, top=0.9)\n\t\tcax = plt.axes([0.85, 0.1, 0.075, 0.8])\n\t\tplt.colorbar(cax=cax)\n\n\t\tplt.show(block=~b)\n\t\tif b:\n\t\t\ttime.sleep(b)\n\t\t\tplt.close()\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Application example with visualization.\n \"\"\"\n depth = np.random.rand(10, 5)\n depth = np.hstack((depth*4, depth*0.9))\n depth[0, 5] = np.nan\n depth[0, 6] = np.nan\n depth[depth>4.0] = 0.0\n\n nav = Navigation(True)\n adapted = nav.reconstructFrame(depth, .1, .5, 10)\n frac, pos = nav.obstacleAvoid(adapted, 1.3)\n","sub_path":"main/navigation/nav.py","file_name":"nav.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"77644713","text":"from django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect, HttpRequest, HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom accounts.models import Account\nfrom compute import views as compute_views\nfrom files.forms import DocumentForm\nfrom files.models import Document#, SummaryStats\nfrom files.views import *\n\nfrom forms import CreateForm\nimport json\nimport urllib2\n\ndef home_page(request):\n return redirect('home')\n\ndef list_panes(request):\n \"\"\"\n Entry handler for user's home page!\n \"\"\"\n # redirect to login page if not signed in\n if not request.user.is_authenticated():\n return redirect('top')\n\n if request.method == 'POST':\n form = DocumentForm(request.POST, request.FILES)\n if form.is_valid():\n owner = Account.objects.get(id=request.user.id).username\n Document.upload_file(\n request.user.id,\n request.FILES['csv_file'].name,\n request.FILES['csv_file'],\n owner\n )\n # Redirect to the document list after POST\n return HttpResponseRedirect(reverse('app.views.list_panes'))\n\n elif request.method == 'GET':\n return render_to_response(\n 'home.html',\n get_panes_data(request.user.id),\n context_instance=RequestContext(request)\n )\n\ndef get_panes_data(acct_id):\n \"\"\"\n Fetches relevant data needed for template rendering on GET request\n :return: dict\n \"\"\"\n documents = Document.select_user_files(acct_id)\n shared_files = Document.select_shared_files(acct_id)\n user_name = Account.objects.get(id=acct_id).username\n upload = DocumentForm()\n form = CreateForm()\n panes = compute_views.handle_get_panes(acct_id)\n return {\n 'user_name': user_name,\n 'documents': documents,\n 'shared_documents': shared_files,\n 'form': form,\n 'panes': panes,\n 'upload': upload\n }\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"274483350","text":"from mock import Mock, call\nfrom sprinter.testtools import FormulaTest\n\nsource_config = \"\"\"\n\"\"\"\n\ntarget_config = \"\"\"\n[simple_example]\nformula = sprinter.formulas.egg\negg = jedi\n\n[simple_multiple_eggs]\nformula = sprinter.formulas.egg\neggs = jedi, epc=0.5\n pelican\n\n[simple_multiple_and_single_eggs]\nformula = sprinter.formulas.egg\negg = sprinter\neggs = jedi, epc=0.5\n pelican\n\n[sprinter]\nformula = sprinter.formulas.egg\negg = http://github.com/toumorokoshi/sprinter/tarball/master\n\"\"\"\n\n\nclass TestEggFormula(FormulaTest):\n \"\"\" Tests for the egg formula \"\"\"\n\n def setup(self):\n super(TestEggFormula, self).setup(source_config=source_config,\n target_config=target_config)\n\n def test_simple_example(self):\n \"\"\" The egg formula should install a single egg \"\"\"\n self.environment.install_feature(\"simple_example\")\n self.lib.call.assert_called_with(\"pip install jedi\")\n\n def test_simple_multiple_eggs(self):\n \"\"\" The egg formula should install multiple eggs \"\"\"\n self.environment.install_feature(\"simple_multiple_eggs\")\n self.lib.call.assert_any_call(\"pip install jedi\")\n self.lib.call.assert_any_call(\"pip install epc=0.5\")\n self.lib.call.assert_any_call(\"pip install pelican\")\n\n def test_simple_multiple_and_single_eggs(self):\n \"\"\" The egg formula should install single and multiple eggs \"\"\"\n self.environment.install_feature(\"simple_multiple_and_single_eggs\")\n self.lib.call.assert_any_call(\"pip install jedi\")\n self.lib.call.assert_any_call(\"pip install epc=0.5\")\n self.lib.call.assert_any_call(\"pip install pelican\")\n self.lib.call.assert_any_call(\"pip install sprinter\")\n\n def test_sprinter(self):\n \"\"\" The sprinter egg formula should install sprinter from a remote protocol \"\"\"\n self.environment.install_feature(\"sprinter\")\n self.lib.call.assert_called_with(\"pip install http://github.com/toumorokoshi/sprinter/tarball/master\")\n","sub_path":"sprinter/formulas/egg_tests.py","file_name":"egg_tests.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"347448175","text":"\"\"\"DB re-creation\n\nRevision ID: 4eb4bae04a02\nRevises: \nCreate Date: 2018-11-15 14:13:50.929199\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4eb4bae04a02'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('music',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=150), nullable=False),\n sa.Column('source', sa.String(length=150), nullable=False),\n sa.Column('duration', sa.Integer(), nullable=False),\n sa.Column('loop', sa.Boolean(), nullable=False),\n sa.Column('vote', sa.Integer(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_music_title'), 'music', ['title'], unique=False)\n op.create_table('scene',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=32), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_scene_name'), 'scene', ['name'], unique=False)\n op.create_table('style',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=32), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_style_name'), 'style', ['name'], unique=False)\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('login', sa.String(length=64), nullable=False),\n sa.Column('password', sa.String(length=128), nullable=False),\n sa.Column('email', sa.String(length=256), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_login'), 'user', ['login'], unique=False)\n op.create_table('music_scene',\n sa.Column('scene_id', sa.Integer(), nullable=False),\n sa.Column('music_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['music_id'], ['music.id'], ),\n sa.ForeignKeyConstraint(['scene_id'], ['scene.id'], ),\n sa.PrimaryKeyConstraint('scene_id', 'music_id')\n )\n op.create_table('music_style',\n sa.Column('style_id', sa.Integer(), nullable=False),\n sa.Column('music_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['music_id'], ['music.id'], ),\n sa.ForeignKeyConstraint(['style_id'], ['style.id'], ),\n sa.PrimaryKeyConstraint('style_id', 'music_id')\n )\n op.create_table('playlist',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=32), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('playlist_music',\n sa.Column('playlist_id', sa.Integer(), nullable=False),\n sa.Column('music_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['music_id'], ['music.id'], ),\n sa.ForeignKeyConstraint(['playlist_id'], ['playlist.id'], ),\n sa.PrimaryKeyConstraint('playlist_id', 'music_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('playlist_music')\n op.drop_table('playlist')\n op.drop_table('music_style')\n op.drop_table('music_scene')\n op.drop_index(op.f('ix_user_login'), table_name='user')\n op.drop_table('user')\n op.drop_index(op.f('ix_style_name'), table_name='style')\n op.drop_table('style')\n op.drop_index(op.f('ix_scene_name'), table_name='scene')\n op.drop_table('scene')\n op.drop_index(op.f('ix_music_title'), table_name='music')\n op.drop_table('music')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/4eb4bae04a02_db_re_creation.py","file_name":"4eb4bae04a02_db_re_creation.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"563514035","text":"from networks.model_builder import ModelBuilder\nfrom networks.prepare_real_data import RealPreparer\nimport matplotlib.pyplot as plt\n\nmodel_name = 'rnn_gru_lstm_dence_ru_ru'\npath = 'models/' + model_name\n\ndata_builder, model_builder = RealPreparer(), ModelBuilder()\nx, y = data_builder.Run()\nmodel = model_builder.Build(x, y, test_part=0.1)\nmodel_builder.Save(path)\nmodel = model_builder.Load(path)\n\npredict = model.predict(x)\n\n\npredict = [predict[0][0]]*62 + [p[0] for p in predict]\n\nplt.plot(data_builder.y_plot, c='r')\nplt.plot(data_builder.x_plot)\nplt.plot(predict, c='b')\nplt.savefig(path + f'/{model_name}.png')\nplt.show()\n\n\n","sub_path":"networks/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"367637235","text":"'画图'\nimport tworoundattack.utils as utils\nimport matplotlib.pyplot as plt\n\n\n#画有和没有aes操作时间直方图\ndef drawhistogram():\n aestime = utils.readtime()\n aesntime = utils.readntime()\n for i in range(128):\n plt.subplot(16, 8, 1 + i)\n if i < 64:\n plt.hist(aestime[0][i], 100)\n else:\n plt.hist(aesntime[0][i - 64], 100)\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"aesattackpy/build/lib/tworoundattack/drawhistogram.py","file_name":"drawhistogram.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"78362397","text":"from ert.enkf.plot import EnsembleDataFetcher, ObservationDataFetcher, RefcaseDataFetcher, BlockObservationDataFetcher, EnsembleGenKWFetcher, EnsembleGenDataFetcher, ObservationGenDataFetcher\nfrom ert.enkf.plot.ensemble_block_data_fetcher import EnsembleBlockDataFetcher\nfrom ert_gui.tools.plot.data import PlotData, ObservationPlotData, EnsemblePlotData, RefcasePlotData, HistogramPlotDataFactory, ReportStepLessHistogramPlotDataFactory\nfrom ert_gui.models import ErtConnector\nfrom ert_gui.models.mixins import ModelMixin\n\n\nclass PlotDataFetcher(ErtConnector, ModelMixin):\n\n def getPlotDataForKeyAndCases(self, key, cases):\n observation_data_fetcher = ObservationDataFetcher(self.ert())\n block_observation_data_fetcher = BlockObservationDataFetcher(self.ert())\n gen_kw_fetcher = EnsembleGenKWFetcher(self.ert())\n gen_data_fetcher = EnsembleGenDataFetcher(self.ert())\n\n if self.isBlockObservationKey(key):\n return self.fetchBlockObservationData(block_observation_data_fetcher, key, cases)\n\n elif self.isSummaryKey(key):\n return self.fetchSummaryData(observation_data_fetcher, key, cases)\n\n elif self.isGenKWKey(key):\n return self.fetchGenKWData(gen_kw_fetcher, key, cases)\n\n elif self.isGenDataKey(key):\n return self.fetchGenData(gen_data_fetcher, key, cases)\n\n else:\n raise NotImplementedError(\"Key %s not supported.\" % key)\n\n\n def isSummaryKey(self, key):\n ensemble_data_fetcher = EnsembleBlockDataFetcher(self.ert())\n return ensemble_data_fetcher.supportsKey(key)\n\n\n def isBlockObservationKey(self, key):\n block_observation_data_fetcher = BlockObservationDataFetcher(self.ert())\n return block_observation_data_fetcher.supportsKey(key)\n\n\n def isGenKWKey(self, key):\n gen_kw_fetcher = EnsembleGenKWFetcher(self.ert())\n return gen_kw_fetcher.supportsKey(key)\n\n\n def isGenDataKey(self, key):\n obs_gen_data_fetcher = ObservationGenDataFetcher(self.ert())\n return obs_gen_data_fetcher.supportsKey(key)\n\n\n def fetchGenData(self, gen_data_fetcher, key, cases):\n plot_data = PlotData(key)\n\n ensemble_data = ObservationGenDataFetcher(self.ert()).fetchData(key, cases)\n\n if len(ensemble_data) > 0:\n observation_plot_data = ObservationPlotData(key)\n\n observation_plot_data.setObservationData(ensemble_data[\"x\"], ensemble_data[\"y\"], ensemble_data[\"std\"], ensemble_data[\"continuous\"])\n observation_plot_data.updateBoundaries(ensemble_data[\"min_x\"], ensemble_data[\"max_x\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n plot_data.setObservationData(observation_plot_data)\n\n for case in cases:\n ensemble_data = gen_data_fetcher.fetchData(key, case)\n\n if len(ensemble_data) > 0:\n ensemble_plot_data = EnsemblePlotData(key, case)\n ensemble_plot_data.setEnsembleData(ensemble_data[\"x\"], ensemble_data[\"y\"], ensemble_data[\"min_y_values\"], ensemble_data[\"max_y_values\"])\n ensemble_plot_data.updateBoundaries(ensemble_data[\"min_x\"], ensemble_data[\"max_x\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n plot_data.addEnsembleData(ensemble_plot_data)\n\n return plot_data\n\n\n def fetchGenKWData(self, gen_kw_fetcher, key, cases):\n plot_data = PlotData(key)\n\n histogram_factory = ReportStepLessHistogramPlotDataFactory(key)\n\n for case in cases:\n ensemble_data = gen_kw_fetcher.fetchData(key, case)\n\n plot_data.setShouldUseLogScale(ensemble_data[\"use_log_scale\"])\n\n ensemble_plot_data = EnsemblePlotData(key, case)\n ensemble_plot_data.setEnsembleData(ensemble_data[\"x\"], ensemble_data[\"y\"], [], [])\n ensemble_plot_data.updateBoundaries(ensemble_data[\"min_x\"], ensemble_data[\"max_x\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n\n plot_data.addEnsembleData(ensemble_plot_data)\n\n histogram_factory.addEnsembleData(case, ensemble_data[\"x\"], ensemble_data[\"y\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n\n plot_data.setHistogramFactory(histogram_factory)\n\n return plot_data\n\n\n def fetchBlockObservationData(self, block_observation_data_fetcher, key, cases):\n plot_data = PlotData(key)\n\n data = block_observation_data_fetcher.fetchData(key)\n block_observation_plot_data = ObservationPlotData(key)\n selected_report_step_index = 0\n\n if len(data) > 0:\n data = data[selected_report_step_index]\n block_observation_plot_data.setObservationData(data[\"x\"], data[\"y\"], data[\"std\"], False)\n block_observation_plot_data.updateBoundaries(data[\"min_x\"], data[\"max_x\"], data[\"min_y\"], data[\"max_y\"])\n\n plot_data.setObservationData(block_observation_plot_data)\n\n for case in cases:\n ensemble_data = EnsembleBlockDataFetcher(self.ert()).fetchData(key, case)\n\n if len(ensemble_data) > 0:\n ensemble_data = ensemble_data[selected_report_step_index]\n ensemble_plot_data = EnsemblePlotData(key, case)\n ensemble_plot_data.setEnsembleData(ensemble_data[\"x\"], ensemble_data[\"y\"], ensemble_data[\"min_x_values\"], ensemble_data[\"max_x_values\"])\n ensemble_plot_data.updateBoundaries(ensemble_data[\"min_x\"], ensemble_data[\"max_x\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n plot_data.addEnsembleData(ensemble_plot_data)\n\n return plot_data\n\n\n def fetchSummaryData(self, observation_data_fetcher, key, cases):\n plot_data = PlotData(key)\n\n histogram_factory = HistogramPlotDataFactory(key)\n\n observation_data = observation_data_fetcher.fetchData(key)\n observation_plot_data = ObservationPlotData(key)\n observation_plot_data.setObservationData(observation_data[\"x\"], observation_data[\"y\"], observation_data[\"std\"], observation_data[\"continuous\"])\n observation_plot_data.updateBoundaries(observation_data[\"min_x\"], observation_data[\"max_x\"], observation_data[\"min_y\"], observation_data[\"max_y\"])\n plot_data.setObservationData(observation_plot_data)\n\n histogram_factory.setObservations(observation_data[\"x\"], observation_data[\"y\"], observation_data[\"std\"], observation_data[\"min_y\"], observation_data[\"max_y\"])\n\n\n\n refcase_data = RefcaseDataFetcher(self.ert()).fetchData(key)\n refcase_plot_data = RefcasePlotData(key)\n refcase_plot_data.setRefcaseData(refcase_data[\"x\"], refcase_data[\"y\"])\n refcase_plot_data.updateBoundaries(refcase_data[\"min_x\"], refcase_data[\"max_x\"], refcase_data[\"min_y\"], refcase_data[\"max_y\"])\n plot_data.setRefcaseData(refcase_plot_data)\n\n histogram_factory.setRefcase(refcase_data[\"x\"], refcase_data[\"y\"], refcase_data[\"min_y\"], refcase_data[\"max_y\"])\n\n for case in cases:\n ensemble_data = EnsembleDataFetcher(self.ert()).fetchData(key, case)\n\n ensemble_plot_data = EnsemblePlotData(key, case)\n ensemble_plot_data.setEnsembleData(ensemble_data[\"x\"], ensemble_data[\"y\"], ensemble_data[\"min_y_values\"], ensemble_data[\"max_y_values\"])\n ensemble_plot_data.updateBoundaries(ensemble_data[\"min_x\"], ensemble_data[\"max_x\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n plot_data.addEnsembleData(ensemble_plot_data)\n\n histogram_factory.addEnsembleData(case, ensemble_data[\"x\"], ensemble_data[\"y\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n\n plot_data.setHistogramFactory(histogram_factory)\n\n return plot_data\n\n","sub_path":"devel/python/python/ert_gui/tools/plot/data/plot_data_fetcher.py","file_name":"plot_data_fetcher.py","file_ext":"py","file_size_in_byte":7705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"639219275","text":"# -*- coding: utf-8 -*-\n\nfrom flask import render_template\nimport mysql.connector\n\ndef select(_SQL, cursor, conn, params=None):\n try:\n cursor.execute(_SQL, params)\n result = cursor.fetchall()\n except mysql.connector.Error as e:\n cursor.close()\n conn.close()\n err_output = \"Невозможно выполнить запрос к базе данных.\" + \" \" + str(e.errno) + \" \" + e.msg\n return None, render_template('err_output.html', err_output=err_output, nav_buttons=True, back='back')\n \n return result, None\n","sub_path":"includes/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"103609438","text":"#!/usr/bin/python\n\"\"\"Syslog Replay Tool.\n\nAuthor: Nicholas Albright\nThis is a quick and dirty tool that will replay messages found in a text file to a remote syslog server.\nGeneric use example:\n MSSP Walks into a customer environment to do monitoring post security event.\n Customer had no central logging prior to the security event.\n On Box logging is still in place.\n Running this tool on existing syslog messages -> forward to SIEM will give us historial logs.\n* NOTE: TimeStamp's need to be 'generated time' not ingested time.\n\"\"\"\nimport sys\nimport time\nimport socket\nimport logging\nimport argparse\n\nlogging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\nlog = logging.getLogger(__name__)\n\n\ndef sendlog(filename, server, port, rate):\n \"\"\"Send our Syslog Data to remote server.\"\"\"\n remote = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n row_count = 0\n rate_count = 0\n try:\n with open(filename, 'r') as fh:\n for line in fh:\n row_count += 1\n rate_count += 1\n if rate_count >= rate:\n time.sleep(1)\n rate_count = 0\n remote.sendto(line.rstrip(), (server, int(port)))\n except Exception as err:\n log.exception(err)\n\n return row_count\n\n\ndef optionparse():\n \"\"\"Parse Options.\"\"\"\n opts = argparse.ArgumentParser(description='Nicholas\\' Syslog Replay Tool')\n opts.add_argument('filename', help='File continaing syslog formatted messages')\n opts.add_argument('-p', '--port', help='Syslog port (UDP), Default: 514', default=514)\n opts.add_argument('-r', '--rate', help='Rate Per Second (or events per second), Default: 300', default=300)\n opts.add_argument('-s', '--server', help='Server to send messages to.')\n parsed_args = opts.parse_args()\n if not parsed_args.server:\n opts.print_help()\n sys.exit()\n return parsed_args\n\n\nif __name__ == '__main__':\n args = optionparse()\n start = time.time()\n row_count = sendlog(args.filename, args.server, args.port, int(args.rate))\n end = time.time()\n log.info('Addressed %d messages in %s seconds' % (row_count, str(end - start)))\n","sub_path":"syslog_replay.py","file_name":"syslog_replay.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"606160272","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\n# python std lib\nimport copy\nimport logging\nimport os\nimport re\n\n# phabfive imports\nfrom phabfive.exceptions import PhabfiveConfigException, PhabfiveRemoteException\n\n\n# 3rd party imports\nimport anyconfig\nimport appdirs\nfrom phabricator import Phabricator, APIError\n\n\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(__name__)\nlogging.getLogger(\"anyconfig\").setLevel(logging.ERROR)\n\n\nCONFIGURABLES = [\"PHABFIVE_DEBUG\", \"PHAB_TOKEN\", \"PHAB_URL\"]\nDEFAULTS = {\"PHABFIVE_DEBUG\": False, \"PHAB_TOKEN\": \"\", \"PHAB_URL\": \"\"}\nREQUIRED = [\"PHAB_TOKEN\", \"PHAB_URL\"]\nVALIDATORS = {\n \"PHAB_URL\": \"^http(s)?://[a-zA-Z0-9._-]+/api/$\",\n \"PHAB_TOKEN\": \"^[a-zA-Z0-9-]{32}$\",\n}\nVALID_EXAMPLES = {\"PHAB_URL\": \"example: http://127.0.0.1/api/\"}\nCONFIG_EXAMPLES = {\n \"PHAB_TOKEN\": \"example: export PHAB_TOKEN=cli-RANDOMRANDOMRANDOMRANDOMRAND\",\n \"PHAB_URL\": \"example: echo PHAB_URL: https://dynamist.phacility.com/api/ >> ~/.config/phabfive.yaml\",\n}\n\n\nclass Phabfive(object):\n def __init__(self):\n\n # Get super-early debugging by `export PHABFIVE_DEBUG=1`\n if \"PHABFIVE_DEBUG\" in os.environ:\n log.setLevel(logging.DEBUG)\n log.info(\n \"Loglevel is: {}\".format(logging.getLevelName(log.getEffectiveLevel()))\n )\n\n self.conf = self.load_config()\n\n maxlen = 8 + len(max(dict(self.conf).keys(), key=len))\n for k, v in dict(self.conf).items():\n log.debug(\"{} {} {}\".format(k, \".\" * (maxlen - len(k)), v))\n\n # check for required configurables\n for k, v in dict(self.conf).items():\n if k in REQUIRED and not v:\n error = \"{} is not configured\".format(k)\n example = CONFIG_EXAMPLES.get(k)\n if example:\n error += \", \" + example\n raise PhabfiveConfigException(error)\n\n # check validity of configurables\n for k, v in VALIDATORS.items():\n if not re.match(VALIDATORS[k], self.conf[k]):\n error = \"{} is malformed\".format(k)\n example = VALID_EXAMPLES.get(k)\n if example:\n error += \", \" + example\n raise PhabfiveConfigException(error)\n self.phab = Phabricator(\n host=self.conf.get(\"PHAB_URL\"), token=self.conf.get(\"PHAB_TOKEN\")\n )\n\n self.verify_connection()\n\n def verify_connection(self):\n \"\"\"\n \"\"\"\n try:\n result = self.phab.user.whoami()\n except APIError as e:\n raise PhabfiveRemoteException(e.message)\n\n def load_config(self):\n \"\"\"\n Load configuration from configuration files and environment variables.\n\n Search order, latest has presedence:\n\n 1. hard coded defaults\n 2. /etc/phabfive.yaml\n 3. /etc/phabfive.d/*.yaml\n 4. ~/.config/phabfive.yaml\n 5. ~/.config/phabfive.d/*.yaml\n 6. environment variables\n \"\"\"\n environ = os.environ.copy()\n\n log.debug(\"Loading configuration defaults\")\n conf = copy.deepcopy(DEFAULTS)\n\n os.environ[\"XDG_CONFIG_DIRS\"] = \"/etc\"\n\n site_conf_file = os.path.join(appdirs.site_config_dir(\"phabfive\") + \".yaml\")\n log.debug(\"Loading configuration file: {}\".format(site_conf_file))\n anyconfig.merge(\n conf,\n {\n k: v\n for k, v in dict(\n anyconfig.load(site_conf_file, ignore_missing=True)\n ).items()\n if k in CONFIGURABLES\n },\n )\n\n site_conf_dir = os.path.join(\n appdirs.site_config_dir(\"phabfive\") + \".d\", \"*.yaml\"\n )\n log.debug(\"Loading configuration files: {}\".format(site_conf_dir))\n anyconfig.merge(\n conf,\n {\n k: v\n for k, v in dict(anyconfig.load(site_conf_dir)).items()\n if k in CONFIGURABLES\n },\n )\n\n user_conf_file = os.path.join(appdirs.user_config_dir(\"phabfive\")) + \".yaml\"\n log.debug(\"Loading configuration file: {}\".format(user_conf_file))\n anyconfig.merge(\n conf,\n {\n k: v\n for k, v in dict(\n anyconfig.load(user_conf_file, ignore_missing=True)\n ).items()\n if k in CONFIGURABLES\n },\n )\n\n user_conf_dir = os.path.join(\n appdirs.user_config_dir(\"phabfive\") + \".d\", \"*.yaml\"\n )\n log.debug(\"Loading configuration files: {}\".format(user_conf_dir))\n anyconfig.merge(\n conf,\n {\n k: v\n for k, v in dict(anyconfig.load(user_conf_dir)).items()\n if k in CONFIGURABLES\n },\n )\n\n log.debug(\"Loading configuration from environment\")\n anyconfig.merge(conf, {k: v for k, v in environ.items() if k in CONFIGURABLES})\n\n return conf\n","sub_path":"phabfive/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"405948679","text":"# from python\n\n# Django modules\nfrom django.shortcuts import render_to_response\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage\nfrom django.template import RequestContext\nfrom django.db import transaction\nfrom django.db.models import get_model\nfrom django.views.decorators.csrf import csrf_protect\n\n# Our modules\nfrom infra.forms.audit_log import SearchAuditLogForm\nfrom infra.models import AuditLog\nfrom bin.constants import PAGINATE_BY\nfrom infra.custom.build_query_filter import build_query_filter\nimport infra.custom.custom_json as custom_json\n\n# Decorators to ensure that user is logged in and\n# is a staff member\n@login_required\n@staff_member_required\n@csrf_protect\n# Have to fix all exception before enabling commit manually.\n# When turned on, will hide any exception behind its TransactionManagementError\n# so remark off when debugging...\n@transaction.commit_manually\ndef show_audit_log(request, app_label, object_name, object_id):\n \"\"\"\n Display audit log for a app.model and id.\n Allows further search by username, fieldname\n and changed date.\n\n Uses template infra/show_audit_log.html\n\n \"\"\"\n # Initialise Error Messages\n error_messages = []\n\n # Model and Object Id is mandatory\n app_model = get_model(app_label, object_name)\n if app_model is None:\n error_messages.append(_(\"App.Model %(app_label)s.%(object_name)s passed in does not exist in this Application\") % \n {'app_label': app_label, 'object_name': object_name}) \n # Get the model instance description (its unicode())\n try:\n model_instance = app_model.objects.get(pk=object_id)\n instance_desc = unicode(model_instance)\n except app_model.DoesNotExist:\n # Deleted instance?\n instance_desc = \" with Primary Key \" + str(object_id) \n\n # Always query audit header for this model and id\n queryset_criteria = ['and', ['iexact', 'audit_header__model_name', app_label + '.' + object_name],\n ['exact', 'audit_header__primary_key_id', object_id]]\n\n # Post happens when user performs a search\n if request.method == 'POST':\n\n # Decrement the number of levels we have to go back (so that we\n # will return to the Browse page\n back_levels = int(request.POST['back_levels']) - 1\n\n search_form = SearchAuditLogForm(request.POST)\n # User pressed search button \n if request.POST.has_key('search'):\n if search_form.is_valid():\n # Perform Search\n if search_form.cleaned_data['changed_by']:\n queryset_criteria.append(['exact', 'audit_header__changed_by', search_form.cleaned_data['changed_by']])\n if search_form.cleaned_data['changed_on']:\n queryset_criteria.append(['gte', 'audit_header__changed_on', search_form.cleaned_data['changed_on']])\n if search_form.cleaned_data['field_name']:\n queryset_criteria.append(['exact', 'field_name', search_form.cleaned_data['field_name']])\n else:\n # else user is doing paging\n # We need to restore previously save search criteria\n queryset_criteria = custom_json.loads(request.POST.get('queryset_criteria'))\n else:\n # GET Method (1st time this page is called)\n search_form = SearchAuditLogForm()\n back_levels = -1\n\n # Perform query based on search criteria\n audit_logs = AuditLog.objects.filter(build_query_filter(queryset_criteria)).order_by('audit_header__changed_on', 'id')\n\n # Pass tasks found to the paginator\n paginator = Paginator(audit_logs, PAGINATE_BY)\n # Page requested, default to page 1 when not requested\n try:\n page = int(request.POST.get('page', '1'))\n audit_log_page = paginator.page(page)\n except (InvalidPage, EmptyPage):\n # Display last page when invalid or empty page\n audit_log_page = paginator.page(paginator.num_page)\n\n # Even reads need to be committed\n transaction.commit()\n # Display page to user. We have to commit again, possibly because render to response will \n # dirty trx buffer again\n with transaction.commit_on_success(): return render_to_response('infra/show_audit_log.html', {'search_form': search_form, \n 'audit_log_page' : audit_log_page, 'queryset_criteria': custom_json.dumps(queryset_criteria),\n 'object_desc': app_model._meta.verbose_name, 'instance_desc': instance_desc, 'back_levels': back_levels,\n 'error_messages': error_messages, 'media': search_form.media,\n }, context_instance=RequestContext(request))\n","sub_path":"infra/views/show_audit_log.py","file_name":"show_audit_log.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"612001103","text":"# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-\n### BEGIN LICENSE\n# Copyright (C) 2020 \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n### END LICENSE\n\nimport optparse, sys\nfrom locale import gettext as _\n\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk # pylint: disable=E0611\n\nfrom remarkable import RemarkableWindow\n\nfrom remarkable_lib import set_up_logging, get_version\n\ndef parse_options():\n \"\"\"Support for command line options\"\"\"\n parser = optparse.OptionParser(version=\"%%prog %s\" % get_version())\n parser.add_option(\n \"-v\", \"--verbose\", action=\"count\", dest=\"verbose\",\n help=_(\"Show debug messages (-vv debugs remarkable_lib also)\"))\n (options, args) = parser.parse_args()\n\n set_up_logging(options)\n\ndef main():\n 'constructor for your class instances'\n parse_options()\n\n # Run the application. \n window = RemarkableWindow.RemarkableWindow()\n\n window.show_all()\n window.check_settings() # Load settings after app displayed to fix bugs!\n Gtk.main()\n","sub_path":"remarkable/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"24588015","text":"\"\"\"\r\nThis is the adaptation of Ion module of Numworks.\r\nPlease don't use keyboard and this module at the same time.\r\n\"\"\"\r\n### v All keys of Numworks v ###\r\nKEY_LEFT = 0\r\nKEY_UP = 1\r\nKEY_DOWN = 2\r\nKEY_RIGHT = 3\r\nKEY_OK = 4\r\nKEY_BACK = 5\r\nKEY_HOME = 6\r\nKEY_ONOFF = 7\r\nKEY_SHIFT = 12\r\nKEY_ALPHA = 13\r\nKEY_XNT = 14\r\nKEY_VAR = 15\r\nKEY_TOOLBOX = 16\r\nKEY_BACKSPACE = 17\r\nKEY_EXP = 18\r\nKEY_LN = 19\r\nKEY_LOG = 20\r\nKEY_IMAGINARY = 21\r\nKEY_COMMA = 22\r\nKEY_POWER = 23\r\nKEY_SINE = 24\r\nKEY_COSINE = 25\r\nKEY_TANGENT = 26\r\nKEY_PI = 27\r\nKEY_SQRT = 28\r\nKEY_SQUARE = 29\r\nKEY_SEVEN = 30\r\nKEY_EIGHT = 31\r\nKEY_NINE = 32\r\nKEY_LEFTPARENTHESIS = 33\r\nKEY_RIGHTPARENTHESIS = 34\r\nKEY_FOUR = 36\r\nKEY_FIVE = 37\r\nKEY_SIX = 38\r\nKEY_MULTIPLICATION = 39\r\nKEY_DIVISION = 40\r\nKEY_ONE = 42\r\nKEY_TWO = 43\r\nKEY_THREE = 44\r\nKEY_PLUS = 45\r\nKEY_MINUS = 46\r\nKEY_ZERO = 48\r\nKEY_DOT = 49\r\nKEY_EE = 50\r\nKEY_ANS = 51\r\nKEY_EXE = 52\r\nKEYS = [\r\n \"left\", \"up\", \"down\", \"right\", \"return\", \"del\", \"home\", \"end\", None, None, \r\n None, None, \"shift\", \"ctrl\", \":\", \";\", \"\\\"\", \"backspace\", \"[\", \"]\", \r\n \"{\", \"}\", \", \", \"^\", \"s\", \"c\", \"t\", \"p\", \"<\", \"²\", \r\n \"7\", \"8\", \"9\", \"(\", \")\", None, \"4\", \"5\", \"6\", \"*\", \r\n \"/\", None, \"1\", \"2\", \"3\", \"+\", \"-\", None, \"0\", \".\", \r\n \"insert\", \"@\", \"enter\"\r\n]\r\n### ^ All keys of Numworks ^ ###\r\n\r\nfrom keyboard import is_pressed\r\n\r\ndef keydown(key):\r\n if key < 0 or key > 52 or KEYS[key] == None: return False\r\n else: return is_pressed(KEYS[key])\r\n\r\ndef get_keys(): return KEYS\r\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"383737730","text":"import numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\n#matplotlib.use('Agg')\r\nimport time\r\nimport datetime\r\nimport math\r\nimport random\r\nimport copy\r\nimport pandas as pd\r\n#from pandas.tools import plotting # 高度なプロットを行うツールのインポート\r\nfrom scipy.cluster.hierarchy import linkage, dendrogram, fcluster\r\nfrom sklearn.cluster import KMeans # K-means クラスタリングをおこなう\r\nimport xlrd\r\n\r\nplt.style.use('ggplot')\r\nfig = plt.figure(figsize=(5, 5))\r\nax = fig.add_subplot(111)\r\n\r\nvn = 6 #vehicle number\r\n#cp = 50 #collection place+1(include depot)\r\n#cp_list = [i for i in range(1,cp)]\r\n#MAP_SIZE = 10\r\n\r\n'''\r\ncp_xy = np.random.rand(cp, 2) * MAP_SIZE #行列を生成。rand(行, 列)\r\nk = 0\r\nfor j in range(vn):\r\n print('!!!',j)\r\n if j != 0:\r\n k += math.ceil(cp/vn)-1\r\n p = 10*MAP_SIZE*np.random.rand()*np.random.choice([-1,1])\r\n q = 10*MAP_SIZE*np.random.rand()*np.random.choice([-1,1])\r\n for i in range(math.ceil(cp/vn)-1):\r\n print(i+k)\r\n cp_xy[i+k][0] += p\r\n cp_xy[i+k][1] += q\r\n\r\ncp_xy[0][0] = 0.\r\ncp_xy[0][1] = 0.\r\n\r\n\r\n#print(cp_xy)\r\n'''\r\n\r\n#エクセルデータから配送先の位置データを持ってくる-------------------------\r\nbook = xlrd.open_workbook('city_position2.xlsx')\r\nsheet = book.sheet_by_name('Sheet1')\r\ndef get_list(sheet):\r\n return [sheet.row_values(row) for row in range(sheet.nrows)]\r\ncp_xy = get_list(sheet)\r\ncp_xy = np.array(cp_xy) #リストをnumpyに変換\r\n\r\nx = cp_xy[:, 0] #すべての行の0列目\r\ny = cp_xy[:, 1] #すべての行の1列目\r\n\r\ncp = sheet.nrows #都市数\r\n\r\nprint(cp)\r\nprint(x)\r\nprint(y)\r\n#-------------------------------------------------------------------\r\n\r\nd_matrix = np.sqrt((x[:, np.newaxis] - x[np.newaxis, :]) ** 2 +\r\n (y[:, np.newaxis] - y[np.newaxis, :]) ** 2) #distance matrix\r\n\r\norder = [[] for i in range(vn)] #各車両の順番情報が入っている。2番目の車両の1番目に訪れる番号はtotal_order[1][0]\r\n\r\n\r\n\r\n#print(cp_list)\r\n#print(cp_list[0:3])\r\n\r\n\r\ndef separate_solution(cp_list, vn, order):\r\n #初期順番を作製する.各車両に一つずつ数字を入れていく。\r\n #random.shuffle(cp_list)\r\n k = 0\r\n while k != len(cp_list):\r\n for i in range(vn):\r\n if k == len(cp_list):\r\n break\r\n order[i].append(cp_list[k])\r\n k += 1\r\n #print('randam_order = ',t_order)\r\n for i in range(vn): #depotを加える\r\n order[i].insert(0, 0)\r\n\r\n\r\ndef calculate_total_distance(order, d_matrix):\r\n \"\"\"Calculate total distance traveled for given visit order\"\"\"\r\n total_distance = 0\r\n for i in range(vn):\r\n idx_from = np.array(order[i]) #訪問順\r\n idx_to = np.array(order[i][1:] + [order[i][0]]) #訪問順をひとつずらしたリスト\r\n distance_arr = d_matrix[idx_from, idx_to] #idx_toのk番目が行、idx_fromのk番目が列を指定する行列\r\n total_distance += np.sum(distance_arr)\r\n\r\n return total_distance\r\n\r\ndef calculate_each_distance(i, order, d_matrix):\r\n \"\"\"Calculate each distance traveled for given visit order\"\"\"\r\n idx_from = np.array(order[i]) #訪問順\r\n idx_to = np.array(order[i][1:] + [order[i][0]]) #訪問順をひとつずらしたリスト\r\n #print(idx_from)\r\n #print(idx_to)\r\n distance_arr = d_matrix[idx_from, idx_to] #idx_toのk番目が行、idx_fromのk番目が列を指定する行列\r\n\r\n return np.sum(distance_arr)\r\n\r\n#配送先を移してクラスタ的に改善\r\ndef k_means_method(order, d_matrix, vn):\r\n #d_matrix_del = np.delete(d_matrix, [0, 0], 1)\r\n d_matrix_pd = pd.DataFrame(d_matrix)\r\n kmeans_model = KMeans(n_clusters=vn, random_state=10).fit(d_matrix_pd.iloc[:, 1:])\r\n labels = kmeans_model.labels_\r\n\r\n print(labels)\r\n\r\n for i in range(cp):\r\n order[labels[i]].append(i)\r\n\r\n for i in range(vn): #depotを加える\r\n order[i].insert(0, 0)\r\n\r\n return order\r\n\r\n\r\ndef visualize_visit_order(order, cp_xy, vn):\r\n \"\"\"Visualize traveling path for given visit order\"\"\"\r\n for i in range(vn):\r\n route = np.array(order[i] + [order[i][0]]) # add point of departure\r\n x_arr = cp_xy[:, 0][route] #順序に並び替え\r\n y_arr = cp_xy[:, 1][route] #順序に並び替え\r\n\r\n plt.plot(x_arr, y_arr, 'o-') #入力の順番に注意\r\n\r\n plt.show()\r\n #plt.savefig('VRP_myself'+str(datetime.datetime.now())+'.png')\r\n\r\n\r\n\r\n\r\n#以下、局所探索法、特に2-opt法-------------------------------------------------------------\r\n\r\ndef calculate_2opt_total_distance(order, distance_matrix):\r\n \"\"\"Calculate total distance traveled for given visit order\"\"\"\r\n idx_from = np.array(order) #訪問順\r\n idx_to = np.array(order[1:] + [order[0]]) #訪問順をひとつずらしたリスト\r\n distance_arr = distance_matrix[idx_from, idx_to] #idx_toのk番目が行、idx_fromのk番目が列を指定する行列\r\n\r\n return np.sum(distance_arr)\r\n\r\n#移動距離の差分を計算する\r\ndef calculate_2opt_exchange_cost(visit_order, i, j, distance_matrix):\r\n \"\"\"Calculate the difference of cost by applying given 2-opt exchange\"\"\"\r\n n_cities = len(visit_order)\r\n a, b = visit_order[i], visit_order[(i + 1) % n_cities] #最大のcity番号だけ特殊。それ以外は隣のcity番号。\r\n c, d = visit_order[j], visit_order[(j + 1) % n_cities]\r\n\r\n cost_before = distance_matrix[a, b] + distance_matrix[c, d]\r\n cost_after = distance_matrix[a, c] + distance_matrix[b, d]\r\n return cost_after - cost_before\r\n\r\n#交換後の訪問順序を計算\r\ndef apply_2opt_exchange(visit_order, i, j):\r\n \"\"\"Apply 2-opt exhanging on visit order\"\"\"\r\n\r\n tmp = visit_order[i + 1: j + 1]\r\n tmp.reverse()\r\n visit_order[i + 1: j + 1] = tmp\r\n\r\n return visit_order\r\n\r\n#近傍探索の実装。現状の訪問経路の、各2パスを入れ替える操作を全組み合わせで実施します。\r\n#全通りを計算しておき、もっとも総移動距離を減らせる交換を実際に適用します。これ以上改善できなければNoneを返すことにします。\r\ndef improve_with_2opt(visit_order, distance_matrix):\r\n \"\"\"Check all 2-opt neighbors and improve the visit order\"\"\"\r\n n_cities = len(visit_order)\r\n cost_diff_best = 0.0\r\n i_best, j_best = None, None\r\n\r\n #すべての組み合わせの中で一番ベスト(一番効果のある)な入れ替えを決定するforloop\r\n for i in range(0, n_cities - 2):\r\n for j in range(i + 2, n_cities):\r\n if i == 0 and j == n_cities - 1: #最初と最後の順番は入れ替えない??\r\n continue #条件を満たしたら以下をスキップ(条件を満たしてないなら実行)\r\n\r\n cost_diff = calculate_2opt_exchange_cost(\r\n visit_order, i, j, distance_matrix)\r\n #costの差分がより小さいならそれをbestにする\r\n if cost_diff < cost_diff_best:\r\n cost_diff_best = cost_diff\r\n i_best, j_best = i, j\r\n #costの差分が0より小さいなら(少しでも改善されるなら)2点の順番を入れ替える\r\n if cost_diff_best < 0.0:\r\n visit_order_new = apply_2opt_exchange(visit_order, i_best, j_best)\r\n return visit_order_new\r\n else:\r\n return None\r\n\r\n#改善ができる限り、上の近傍探索を繰り返す\r\ndef local_search(visit_order, distance_matrix, improve_func): #improve_func = improve_with_2opt\r\n \"\"\"Main procedure of local search\"\"\"\r\n cost_total = calculate_2opt_total_distance(visit_order, distance_matrix)\r\n\r\n while True:\r\n improved = improve_func(visit_order, distance_matrix)\r\n if not improved: #改善できないならbreak #わからない\r\n break\r\n\r\n visit_order = improved\r\n\r\n return visit_order\r\n\r\n\r\ndef opt_improve(vn, improve_order, d_matrix, improve_with_2opt):\r\n opt_improve_order = [[] for i in range(vn)]\r\n for i in range(vn):\r\n opt_improve_order[i] = local_search(improve_order[i], d_matrix, improve_with_2opt)\r\n\r\n return opt_improve_order\r\n\r\n#ここまで改善法-----------------------------------------------------------------------------------------\r\n\r\n\r\n#ある程度改善する--------------------------------------------------------------------\r\n#total_distance = calculate_total_distance(order, d_matrix) #改善前の総距離\r\n\r\n#1.クラスタ的に配送先を決める\r\nimprove_order = k_means_method(order, d_matrix,vn)\r\nprint('order = ' ,improve_order)\r\nt = calculate_total_distance(improve_order, d_matrix)\r\n#print(total_distance, '-->', t)\r\n\r\n\r\n#2.各車両に対して2optで改善\r\nopt_improve_order = opt_improve(vn, improve_order, d_matrix, improve_with_2opt)\r\nt_opt = calculate_total_distance(opt_improve_order, d_matrix)\r\nprint(t, '-->', t_opt)\r\norder = opt_improve_order\r\n\r\nvisualize_visit_order(order, cp_xy, vn)\r\n\r\n'''\r\n#以下で、以上で求めたルートを基に配送(収集)する--------------------------------------\r\n\r\nfor i in range(cp): #配送先のプロット\r\n ax.scatter(x[i], y[i], c='red')\r\nax.scatter(x[0], y[0], marker=',', s=200, c='b')\r\n\r\nstate = [0 for i in range(vn)] #各車両の居場所を収納する。\r\nnext_state = []\r\nfor i in range(vn):\r\n if len(order[i]) > 1:\r\n next_state.append(order[i][1])\r\n else:\r\n next_state.append(0)\r\n\r\nims =[]\r\nfor i in range(max([len(order[i]) for i in range(vn)])):\r\n\r\n for j in range(vn):\r\n if len(order[j]) > i+1:\r\n state[j] = order[j][i]\r\n next_state[j] = order[j][i+1]\r\n else:\r\n state[j] = next_state[j]\r\n next_state[j] = 0\r\n\r\n #print('state = ',state,'next_state = ', next_state)\r\n\r\n im1=[0 for i in range(vn)]\r\n im2=[0 for i in range(vn)]\r\n each_vn = []\r\n tot_vn = []\r\n for k in range(vn):\r\n im1[k] = ax.plot([x[state[k]], x[next_state[k]]], [y[state[k]], y[next_state[k]]], 'r' '-',lw=7)\r\n each_vn += im1[k]\r\n\r\n route = np.array(order[k] + [order[k][0]]) # add point of departure\r\n x_arr = cp_xy[:, 0][route] #順序に並び替え\r\n y_arr = cp_xy[:, 1][route] #順序に並び替え\r\n im2[k] = ax.plot(x_arr, y_arr, 'k', '--')\r\n tot_vn += im2[k]\r\n\r\n ims.append(each_vn + tot_vn)\r\n\r\n\r\nani = animation.ArtistAnimation(fig, ims, interval=500, repeat_delay=1500)\r\n#ani.save('')\r\nplt.show()\r\n'''\r\n","sub_path":"VRP/VRP_myself4_cluster_k-mean_from_excel.py","file_name":"VRP_myself4_cluster_k-mean_from_excel.py","file_ext":"py","file_size_in_byte":10549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"499544703","text":"# ../scripts/included/gg_welcome_msg/gg_welcome_msg.py\r\n\r\n'''\r\n$Rev$\r\n$LastChangedBy$\r\n$LastChangedDate$\r\n'''\r\n\r\n# =============================================================================\r\n# >> IMPORTS\r\n# =============================================================================\r\n# Python Imports\r\nfrom __future__ import with_statement\r\nimport time\r\n\r\n# Eventscripts Imports\r\nimport es\r\nfrom cmdlib import registerSayCommand\r\nfrom cmdlib import unregisterSayCommand\r\nimport popuplib\r\n\r\n# GunGame Imports\r\nfrom gungame51.core.addons.shortcuts import AddonInfo\r\nfrom gungame51.core import get_game_dir\r\nfrom gungame51.core import gungame_info\r\nfrom gungame51.core.addons.shortcuts import get_loaded_addon_list\r\n\r\n\r\n# =============================================================================\r\n# >> ADDON REGISTRATION/INFORMATION\r\n# =============================================================================\r\ninfo = AddonInfo()\r\ninfo.name = 'gg_welcome_msg'\r\ninfo.title = 'GG Welcome Message'\r\ninfo.author = 'GG Dev Team'\r\ninfo.version = \"5.1.%s\" % \"$Rev$\".split('$Rev: ')[1].split()[0]\r\n\r\n\r\n# =============================================================================\r\n# >> GLOBAL VARIABLES\r\n# =============================================================================\r\n# Store the title of the menu\r\ntitle = 'GunGame%s -- Welcome Message' % gungame_info('version')\r\ngg_welcome_msg_timeout = es.ServerVar('gg_welcome_msg_timeout')\r\n\r\n# Create an empty list for detecting if a player just joined the server\r\nmessageQueue = []\r\n\r\n\r\n# =============================================================================\r\n# >> LOAD & UNLOAD\r\n# =============================================================================\r\ndef load():\r\n # Register !welcome\r\n registerSayCommand('!welcome', welcome, 'Displays a !welcome menu.')\r\n\r\n # Build the main gg_welcome popup\r\n buildPopups()\r\n\r\n\r\ndef unload():\r\n # Unregister !welcome\r\n unregisterSayCommand('!welcome')\r\n\r\n # Clean up existing popups\r\n if popuplib.exists('gg_welcome'):\r\n popuplib.delete('gg_welcome')\r\n if popuplib.exists('gg_welcome_include'):\r\n popuplib.delete('gg_welcome_include')\r\n if popuplib.exists('gg_welcome_custom'):\r\n popuplib.delete('gg_welcome_custom')\r\n\r\n\r\n# =============================================================================\r\n# >> GAME EVENTS\r\n# =============================================================================\r\ndef player_activate(event_var):\r\n userid = event_var['userid']\r\n\r\n # If the user is already in the que to receive the welcome message, stop\r\n # here\r\n if userid in messageQueue:\r\n return\r\n\r\n # Add the user to the welcome message queue\r\n messageQueue.append(userid)\r\n\r\n\r\ndef player_team(event_var):\r\n userid = event_var['userid']\r\n\r\n # If the user is in the queue\r\n if userid in messageQueue:\r\n # Send them the welcome message\r\n welcome(userid, '')\r\n # Remove them from the queue\r\n messageQueue.remove(userid)\r\n\r\n\r\n# =============================================================================\r\n# >> CUSTOM/HELPER FUNCTIONS\r\n# =============================================================================\r\ndef buildPopups():\r\n # Get the custom text for the popup\r\n with get_game_dir('cfg/gungame51/' +\r\n 'included_addon_configs/gg_welcome_msg.txt').open() as customFile:\r\n customText = customFile.readlines()\r\n\r\n # Remove unnecessary characters\r\n customText = [x.strip() for x in customText]\r\n # Ignore commented lines\r\n customText = filter(lambda x: x[:2] != '//', customText)\r\n\r\n # Create a new popuplib instance\r\n menu = popuplib.create('gg_welcome')\r\n menu.addline(title)\r\n menu.addline('-' * 30)\r\n\r\n # For each line of custom text\r\n for line in customText:\r\n # If there is nothing on the line, make it a single space to show up\r\n # on the menu\r\n if not line:\r\n line = ' '\r\n\r\n # Replace variables in the line\r\n line = line.replace('$server', str(es.ServerVar('hostname')))\r\n line = line.replace('$date', time.strftime('%d/%m/%Y'))\r\n line = line.replace('$time', time.strftime('%H:%M:%S'))\r\n\r\n # Add the line to the menu\r\n menu.addline(line)\r\n\r\n # Create the rest of the menu\r\n menu.addline('-' * 30)\r\n menu.addline('->1. Included Addons')\r\n menu.select(1, welcome_handler)\r\n menu.addline('->2. Custom Addons')\r\n menu.select(2, welcome_handler)\r\n menu.addline('-' * 30)\r\n menu.addline('0. Cancel')\r\n\r\n # Set the timeout for the menu\r\n menu.timeout('send', int(gg_welcome_msg_timeout))\r\n menu.timeout('view', int(gg_welcome_msg_timeout))\r\n\r\n\r\ndef welcome(userid, args):\r\n # Do not send to bots or non-existent players\r\n if es.getplayersteamid(userid) == 'BOT' or not es.exists('userid', userid):\r\n return\r\n\r\n # If the user has the popup open, remove it\r\n popuplib.unsendname('gg_welcome', userid)\r\n # Send the popup\r\n popuplib.send('gg_welcome', userid)\r\n\r\n\r\ndef welcome_handler(userid, choice, popupname):\r\n # If they selected to see the included addons list\r\n if choice == 1:\r\n # If the menu exists, delete it\r\n if popuplib.exists('gg_welcome_include'):\r\n popuplib.delete('gg_welcome_include')\r\n # Create an easylist instance\r\n menu = popuplib.easylist('gg_welcome_include',\r\n get_loaded_addon_list('included'))\r\n elif choice == 2:\r\n # If the menu exists, delete it\r\n if popuplib.exists('gg_welcome_custom'):\r\n popuplib.delete('gg_welcome_custom')\r\n # Create an easylist instance\r\n menu = popuplib.easylist('gg_welcome_custom',\r\n get_loaded_addon_list('custom'))\r\n\r\n # Set the menu's title\r\n menu.settitle(title)\r\n # When the menu is closed, go back to the welcome message\r\n menu.submenu(0, 'gg_welcome')\r\n # Set the timeout for the menu\r\n menu.timeout('send', int(gg_welcome_msg_timeout))\r\n menu.timeout('view', int(gg_welcome_msg_timeout))\r\n # Send the popup\r\n menu.send(userid)\r\n","sub_path":"cstrike/addons/eventscripts/gungame51/scripts/included/gg_welcome_msg/gg_welcome_msg.py","file_name":"gg_welcome_msg.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"179017616","text":"\n\nchromo_set = set()\n\nwith open (\"p8/blast/pred34inyeast.txt\", \"r\") as fh:\n for line in fh:\n if \"sp|\" in line:\n get_line = line.rstrip().split(\"_YEAST\")\n chromo_set.add(get_line[0].split(\"|\")[2])\n\nwith open (\"p8/overlapsets_uniref.txt\", \"w\") as wh:\n with open (\"p8/experiments.txt\", \"r\") as fh2:\n for line in fh2: \n expset = set(line.strip().split(\" \"))\n overlen = len(chromo_set.intersection(expset))\n wh.write(\"----------------\" + \"\\n\")\n wh.write(\"Overlapping genes:\" + \"\\n\")\n wh.write(\" \".join(chromo_set.intersection(expset)) +\"\\n\" )\n wh.write(\"Length of overlap:\" + \"\\n\" + str(overlen) + \"\\n\" + \"\\n\")\n wh.write(\"Full set:\" + \"\\n\")\n wh.write(\" \".join(expset) + \"\\n\" + \"\\n\")\n","sub_path":"p8/scripts (copy)/parse_blast.py","file_name":"parse_blast.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"277844991","text":"import numpy as np\r\nimport cv2\r\nimport math\r\ndef waveleteTransform(img):\r\n # 将图像像素类型转换成浮点型\r\n image = img.astype(float)\r\n height, width = image.shape[:2]\r\n result = np.zeros((height, width, 3), float)\r\n\r\n # 水平方向第一次处理\r\n width2 = int(width / 2)\r\n for i in range(height):\r\n\r\n for j in range(0, width - 1, 2):\r\n # 分为奇序列和偶序列进行处理\r\n j1 = (int)(j + 1)\r\n j2 = (int)(j / 2)\r\n # 向下取整\r\n width3 = width2 + j2\r\n # 采用提升方案\r\n # xc=(xo + xe)/2\r\n # xd=(x0 - xd)/2\r\n # [xc xd]\r\n result[i, j2] = ((image[i, j] + image[i, j1]) / 2)\r\n result[i, width3] = ((image[i, j] - image[i, j1]) / 2)\r\n\r\n\r\n # copy array\r\n image = np.copy(result)\r\n result=np.zeros((height, width, 3), float)\r\n # 垂直方向第一次处理\r\n height2 = int(height / 2)\r\n for i in range(0, height - 1, 2):\r\n for j in range(0, width):\r\n i1 = (int)(i + 1)\r\n i2 = (int)(i / 2)\r\n height3 = height2 + i2\r\n\r\n result[i2, j] = (image[i, j] + image[i1, j]) / 2\r\n result[height3, j] = (image[i, j] - image[i1, j]) / 2\r\n\r\n image = np.copy(result).astype(np.uint8)\r\n HH = np.copy(image[height2+1:,width2+1:])\r\n LH = np.copy(image[height2+1:,:width2+1])\r\n HL = np.copy(image[:height2+1,width2+1:])\r\n LL = np.copy(image[:height2+1,:width2+1])\r\n return [image,HH,LH,HL,LL]\r\n\r\n\r\ndef denoise(img):\r\n #采用软阈值法进行去噪\r\n image = img.astype(float)\r\n #sigma = abs(np.median(image))/0.6745\r\n #threshold = math.sqrt(sigma*(2*math.log(len(image))))\r\n #image[(abs(image) threshold] -= threshold\r\n #image[image < (-threshold)] += threshold\r\n image[(abs(image) < 256)] = 0.0\r\n image = image.astype(np.uint8)\r\n return image\r\n\r\ndef inverseWaveleteTransform(img):\r\n image = img.astype(float)\r\n nr, nc = image.shape[:2]\r\n result = np.zeros((nr, nc, 3), float)\r\n nr2 = nr / 2\r\n\r\n for i in range(0, nr - 1, 2):\r\n for j in range(0, nc):\r\n\r\n i1 = (int) (i + 1)\r\n i2 = (int) (i / 2)\r\n nr3 = (int) (nr2 + i2)\r\n\r\n result[i, j] = ((image[i2, j] / 2) + (image[nr3, j] / 2)) * 2\r\n result[i1, j] = ((image[i2, j] / 2) - (image[nr3, j] / 2)) * 2\r\n\r\n # //copy array\r\n image = np.copy(result)\r\n\r\n # // Horizontal processing:\r\n nc2 = nc / 2\r\n for i in range(0, nr):\r\n for j in range(0, nc - 1, 2):\r\n\r\n j1 = (int) (j + 1)\r\n j2 = (int) (j / 2)\r\n nc3 = (int) (j2 + nc2)\r\n result[i, j] = ((image[i, j2] / 2) + (image[i, nc3] / 2)) * 2\r\n result[i, j1] = ((image[i, j2] / 2) - (image[i, nc3] / 2)) * 2\r\n\r\n resultimg = result.astype(np.uint8)\r\n return resultimg\r\n\r\n\r\nif __name__ == '__main__':\r\n # loadImage & copy image\r\n image = cv2.imread(\"./image/image_noise.jpg\")\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n cv2.imwrite('./image/gray_image_noise.jpg', image)\r\n height, width = image.shape[:2]\r\n image2, HH, LH, HL, LL = waveleteTransform(image)\r\n cv2.imwrite('./image/DWT.jpg', image2)\r\n HH_d = denoise(HH)\r\n HL_d = denoise(HL)\r\n LH_d = denoise(LH)\r\n image3 = np.copy(image2)\r\n #图3是去噪后的分解图\r\n\r\n image3[int(height/2)+1:,int(width/2)+1:] = HH_d;\r\n image3[int(height/2)+1:,:int(width/2)+1] = LH_d;\r\n image3[:int(height/2)+1,int(width/2)+1:] = HL_d;\r\n cv2.imwrite('./image/DWT_denoise.jpg', image3)\r\n image4 = inverseWaveleteTransform(image2)\r\n #cv2.imwrite('./image/IDWT.jpg', image4)\r\n #图像4是重构后的原图\r\n image5 = inverseWaveleteTransform(image3)\r\n cv2.imwrite('./image/IDWT_denoise.jpg', image5)\r\n #图像5是重构后的去噪图像\r\n","sub_path":"DWT_denoising.py","file_name":"DWT_denoising.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"82599203","text":"sites = [\"pythonworld.ru\",\"telegram.org\",\"wikipedia.org\"]\r\n\r\nname = input(\"Введите что вы хотите дабавить \\n\")\r\nsites.append(name)\r\n\r\n\r\nname_1 = input(\"Введите что вы хотите удалить *В чифрах* \\n\") \r\nsites.remove(name_1) \r\n\r\nsites_copy = sites[:]\r\n\r\nsites_copy.reverse()\r\nsites.clear()\r\n\r\nprint(sites)\r\nprint(sites_copy)\r\n\r\nwhile True: \r\n\tprint(sites_copy)","sub_path":"HW_8/DZ_1.py","file_name":"DZ_1.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"254174658","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author : Joshua\n@Time : 2019/2/14 12:01\n@File : lightlda2model.py\n@Desc : \n\"\"\"\n\n\nimport re\nimport sys\nimport gzip\nimport svmlight\nimport numpy as np\nimport pickle\n\ndef nlexicon(data):\n vmax = 0\n for datum in data:\n if datum[0] > vmax:\n vmax = datum[0]\n return vmax + 1\n\ndef match(r, s):\n m = re.search(r, s)\n if m:\n return m.group(1)\n else:\n sys.stderr.write('match(): invalid content.\\n')\n sys.exit(0)\n\ndef read_param(file):\n with open(file, 'r') as fh:\n content = fh.read()\n alpha0 = float(match(r'alpha\\s*=\\s*([0-9.+\\-]+)', content))\n beta0 = float(match(r'alpha\\s*=\\s*([0-9.+\\-]+)', content))\n topics = int(match(r'topics\\s*=\\s*([0-9]+)', content))\n iters = int(match(r'iters\\s*=\\s*([0-9]+)', content))\n return alpha0, beta0, topics, iters\n\ndef lightlda2beta(dir, topics, beta0):\n data = svmlight.loadex(dir + '/' + 'server_0_table_0.model')\n nlex = nlexicon(data)\n matrix = np.zeros((nlex, topics)) + beta0\n for w, doc in data:\n L = len(doc.id)\n for j in range(L):\n k = doc.id[j]\n c = doc.cnt[j]\n matrix[w][k] += c\n s = np.sum(matrix, axis=0)\n return np.dot(matrix, np.diag(1.0/s))\n\ndef lightlda2gamma(dir, topics, alpha0):\n data = svmlight.loadex(dir + '/' + 'doc_topic.0')\n gamma = []\n for n, doc in data:\n v = np.zeros(topics) + alpha0\n L = len(doc.id)\n for j in range(L):\n k = doc.id[j]\n c = doc.cnt[j]\n v[k] += c\n gamma.append(v)\n return np.array(gamma)\n\ndef lightlda2model(dir):\n model = {}\n alpha0, beta0, topics, iters = read_param(dir + '/' + 'param')\n # print(alpha0,beta0,topics,iters)\n model['alpha'] = alpha0\n model['beta'] = lightlda2beta(dir, topics, beta0)\n model['gamma'] = lightlda2gamma(dir, topics, alpha0)\n return model\n\ndef usage ():\n print('usage: lightlda2model dir')\n sys.exit(0)\n\ndef main ():\n if len(sys.argv) < 2:\n usage()\n dir = sys.argv[1]\n print('reading model..')\n model = lightlda2model(dir)\n print('saving model..')\n with gzip.open(dir + '/' + 'model', 'wb') as gf:\n pickle.dump(model, gf, 2)\n print('done.')\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"ba_lda/lightlda2model.py","file_name":"lightlda2model.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"43360728","text":"from collections import *\nimport sys \nimport string\nfrom math import inf\n\nN = 1000\n\ninput=sys.stdin.readline\n\n\n# \"\". join(strings) \n \ndef ri():\n return int(input())\n \ndef rl():\n return list(map(int, input().split()))\n \nn = ri()\npp = []\nvv = []\nfor i in range(n):\n\tprice, vitamins = input().split()\n\tpp.append(int(price))\n\tvv.append(vitamins)\n\n#dp = [juice i][maskVitamins] = min price to pay if we can chose within i-1 first juices to have the vitamins in the mask\n\n#this is the same as the dp in the other solution, except that we regroup all the dps into one big matrix\ndp = [[inf] * 8 for i in range((N + 5))]\n\n#base case:\n\n#create a bitmask from the juice\njuice_mask = 0\nif \"C\" in vv[0]:\n\tjuice_mask += 1 \nif \"B\" in vv[0]:\n\tjuice_mask += 2 \t\nif \"A\" in vv[0]:\n\tjuice_mask += 4\nfor mask in range(8):\n\tif mask & juice_mask == mask:\n\t\tdp[0][mask] = pp[0]\n\n\n#transitions:\nfor i in range(n - 1):\n\n\t#create a bitmask from the juice\n\tjuice_mask = 0\n\tif \"C\" in vv[i + 1]:\n\t\tjuice_mask += 1 \n\tif \"B\" in vv[i + 1]:\n\t\tjuice_mask += 2 \n\tif \"A\" in vv[i + 1]:\n\t\tjuice_mask += 4\n\n\tfor mask in range(8):\n\t\t#if we want, we dont take the juice at rank i + 1\n\t\tdp[i + 1][mask] = min(dp[i][mask], dp[i + 1][mask])\n\n\t\t#we can also chose to only take the juice at i + 1:\n\t\tif mask & juice_mask == mask:\n\t\t\tdp[i + 1][mask] = min(pp[i+1], dp[i + 1][mask])\n\n\t\t#or we take a mix\n\t\tdp[i + 1][mask | juice_mask] = min(dp[i][mask] + pp[i + 1], dp[i + 1][mask | juice_mask])\n\nans = dp[n - 1][7]\n\nif ans == inf:\n\tprint(-1)\nelse:\n\tprint(ans)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"DP_mashup_list_Colin_Galen/Problem_C/C_bitmask.py","file_name":"C_bitmask.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"396058306","text":"\n#Description\n#Move a ball around with UP/DOWN/LEFT/RIGHT continuously\n#Part of the pygame series at https://github.com/tomwh2010/PythonPhysics\n#Public domain by tomwh2010@hotmail.com\n\nimport pygame, sys\nfrom pygame.locals import *\n\n\n#color of the ball\nSHAPE_COLOR=pygame.Color(\"red\")\n\n#style=0 => filled, style=1 => thin line, style=4 => thick line\nFILLSTYLE=0\n\n#Frames pr second\nFPS=40\n\n#window size\nWIDTH=800\nHEIGHT=500\n\n#initialize the pygame environment\npygame.init()\n\n# set up the window with size and caption\nscreen=pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('Move a ball')\n\n# you have to call this at the start if you want to use this module.\npygame.font.init()\n\n#choose font for later use\nmyfont=pygame.font.SysFont('Times New Roman', 24)\n\n#create text buffer\nstrBuffer=\"Move cursor with arrow keys\"\n\n#render buffer as picture\ntextsurface=myfont.render(strBuffer, 1, pygame.Color(\"black\"))\n\n# creates a clock\nclock=pygame.time.Clock()\n\n#initial location of the ball; center\nmyball=[WIDTH//2, HEIGHT//2]\n\nwhile True:\n #limit updates to FPS\n clock.tick(FPS)\n\n #get events from the event queue\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n #by using this construct instead of event we will get\n #continous flow + at 45 degree angles if we want to to\n keys_pressed = pygame.key.get_pressed()\n\n if keys_pressed[K_LEFT]:\n myball[0]-=5\n\n if keys_pressed[K_RIGHT]:\n myball[0]+=5\n\n if keys_pressed[K_UP]:\n myball[1]-=5\n\n if keys_pressed[K_DOWN]:\n myball[1]+=5\n\n #draw background color to blank the screen\n screen.fill(pygame.Color(\"gray69\"))\n\n #paint picture to screen at location 130,180\n screen.blit(textsurface,(10, 10))\n\n #circle(screen, color, coords(x,y), radius, fillstyle\n pygame.draw.circle(screen, SHAPE_COLOR, myball, 10, FILLSTYLE)\n\n #update display\n pygame.display.flip()\n","sub_path":"Python/movetheball_continuous.py","file_name":"movetheball_continuous.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"151017911","text":"def neighbours(cell):\n position = cell.split(\",\")\n for x in range(-1, 2):\n for y in range(-1, 2):\n if not (x == y == 0):\n yield \"%i,%i\" % (int(position[0]) + x, int(position[1]) + y)\n\nclass GameOfLife:\n #Board tracks every relevant cell, AKA all live cells + every dead cell with live neighbours\n board = {}\n\n def __init__(self, seed=()):\n self.board = {}\n for cell in seed:\n self.revive(cell)\n \n\n #Make dictionary of cells\n #{'x,y':Bool}\n #At every iteration:\n #Check each live and dead cell in the dic\n #If the cell lives, add its neighbours to the dic\n #if it dies, and it has no neighbours, remove from dic\n\n\n #Count the number of neighbouring cells that are alive\n #next to the given cell\n def neighbour_count(self, cell):\n count = 0\n for neighbour in neighbours(cell):\n if self.is_alive(neighbour):\n count += 1\n return count\n\n #Very self descriptive\n def is_alive(self, cell):\n if cell in self.board:\n return self.board[cell]\n return False\n\n #Set the life status of a cell to alive\n ##Also adds neighbouring cells to the board\n def revive(self, cell):\n self.board[cell] = True\n for neighbour in neighbours(cell):\n if neighbour not in self.board:\n self.board[neighbour] = False\n\n #Set the life status of a cell to dead\n ##Also removes the cell from the board if it has no live neighbours\n def kill(self, cell):\n if self.neighbour_count(cell) == 0:\n del self.board[cell]\n else:\n self.board[cell] = False\n\n #go forwards one turn\n def step(self):\n next_step = GameOfLife()\n next_step.board = self.board.copy()\n\n for cell, alive in self.board.items():\n count = self.neighbour_count(cell)\n if alive and ((count > 3) or (count < 2)):\n next_step.kill(cell)\n elif not alive and (count == 3):\n next_step.revive(cell)\n self.board = next_step.board\n\n #Print to terminal\n #80 x 23\n #-40 <===> 39\n #-11 <===> 11\n def render(self):\n for y in range(11, -12, -1):\n line = \"\"\n for x in range(-40, 40):\n if self.is_alive(\"%i,%i\" % (x, y)):\n line += \"x\"\n else:\n line += \" \"\n print(line)\n","sub_path":"dict/life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"414946563","text":"import tensorflow as tf\nimport numpy as np\nimport os\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom anaconda_navigator.utils.encoding import write\ntf.app.flags.DEFINE_string('data_dir', '.', \"\"\"the default data dirs\"\"\")\n\nFLAGS=tf.app.flags.FLAGS\nmnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n\nIMAGE_SIZE = 28\nNUM_CHANNELS = 1\nBATCH_SIZE=32\nnum_epochs=1\n\ntrain_data = mnist.train.images\ntrain_labels = np.asarray(mnist.train.labels, dtype=np.int32)\neval_data = mnist.test.images\neval_labels = np.asarray(mnist.test.labels, dtype=np.int32)\ntrain_size = train_labels.shape[0]\ncheckPointPath=\"C:\\\\tmp\\\\mnistckp\"\npbPath=\"C:\\\\tmp\\\\mnistckp\\\\model.pb\"\n\n\ndef inference(input, l2_regularizer=None):\n\n input_layer = tf.reshape(input, [-1, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS], name=\"inputLayer\")\n tf.summary.image(\"inputImageSummary\", input_layer)\n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=32,\n kernel_size=[5, 5],\n padding=\"same\",\n kernel_regularizer=l2_regularizer,\n activation=tf.nn.relu,\n name=\"convww\")\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[5, 5],\n padding=\"same\",\n kernel_regularizer=l2_regularizer,\n activation=tf.nn.relu)\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])\n\n dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n dropout = tf.layers.dropout(inputs=dense, rate=0.4)\n logits = tf.layers.dense(inputs=dropout, units=10)\n return logits\n\n\ndef train():\n\n train_data_node = tf.placeholder(tf.float32, shape=(None, IMAGE_SIZE*IMAGE_SIZE* NUM_CHANNELS),name=\"inputdataName\")\n train_labels_node = tf.placeholder(tf.int64, shape=(None,10))\n regularizer = tf.contrib.layers.l2_regularizer(scale=0.0)\n logits = inference(train_data_node, regularizer)\n trainableVars=tf.trainable_variables()\n for var in trainableVars:\n if var.name.startswith('convww'):\n mean = tf.reduce_mean(var)\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar(var.name+'_mean', mean)\n tf.summary.scalar(var.name+'_stddev', stddev)\n tf.summary.histogram(var.name+'_histogram', var)\n print(var.name)\n prediction = tf.nn.softmax(logits,name=\"predictionName\")\n l2_loss = tf.losses.get_regularization_loss()\n entropyloss=tf.nn.softmax_cross_entropy_with_logits(labels=train_labels_node, logits=logits)\n loss = tf.reduce_mean([l2_loss+entropyloss])\n tf.summary.scalar('lossVal', loss)\n learning_rate=0.01\n optimizer = tf.train.MomentumOptimizer(learning_rate,momentum=0.9).minimize(loss)\n labels = tf.argmax(train_labels_node, 1)\n top_k_op = tf.nn.in_top_k(logits, labels, 1)\n accuracy = tf.reduce_mean(tf.cast(top_k_op, \"float\"), name=\"accuracy\")\n tf.summary.scalar('accuracyVal', accuracy)\n merged = tf.summary.merge_all()\n\n saver = tf.train.Saver()\n initAll=tf.global_variables_initializer()\n\n graph_def = tf.get_default_graph().as_graph_def()\n with tf.gfile.GFile(pbPath, 'wb') as f:\n f.write(graph_def.SerializeToString())\n\n with tf.Session() as sess:\n sess.run(initAll)\n sumwriter = tf.summary.FileWriter(checkPointPath, sess.graph)\n for step in range(int(num_epochs * train_size) // BATCH_SIZE):\n offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)\n batch_data = train_data[offset:(offset + BATCH_SIZE), ...]\n batch_labels = train_labels[offset:(offset + BATCH_SIZE)]\n feed_dict = {train_data_node: batch_data,\n train_labels_node: batch_labels}\n _, lossVal, accuracyVal=sess.run([optimizer, loss, accuracy], feed_dict=feed_dict)\n print('Iter %d, lossVal %.3f, accuracyVal %.3f' % (step, lossVal, accuracyVal))\n\n if step % 10 == 0:\n summaryVal=sess.run(merged,feed_dict=feed_dict)\n sumwriter.add_summary(summaryVal, step)\n\n sumwriter.close()\n saver.save(sess=sess,save_path=os.path.join(checkPointPath,\"model\"))\n\n\nif __name__ == '__main__':\n train()","sub_path":"book/chapter4/tensorboard/mnisttensorboard.py","file_name":"mnisttensorboard.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"633884276","text":"def quick_sort(arr):\n left = []\n right = []\n if len(arr) <= 1:\n return arr\n ref = arr[0]\n ref_count = 0\n for ele in arr:\n if ele < ref:\n left.append(ele)\n elif ele > ref:\n right.append(ele)\n else:\n ref_count += 1\n left = quick_sort(left)\n right = quick_sort(right)\n return right + [ref] * ref_count + left\n\nN = int(input())\nA = list(map(int, input().split()))\n\nA = [x for x in A if A.count(x) == 1]\nA = quick_sort(A)\n\nans = 0\nif len(A) == 0:\n print(ans)\n exit()\nfor i in range(N):\n flag = True\n for j in range(i+1, N):\n if A[i] % A[j] == 0:\n flag = False\n break\n if flag: ans += 1\nprint(ans)","sub_path":"atcoder/abc/170/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"109665559","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n \"\"\"\n输入一个链表的头节点,从尾到头反过来返回每个节点的值(用数组返回)。\n输入:head = [1,3,2]\n输出:[2,3,1]\n限制:\n0 <= 链表长度 <= 10000\n链接:https://leetcode-cn.com/problems/cong-wei-dao-tou-da-yin-lian-biao-lcof\n \"\"\"\n def reversePrint(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: List[int]\n \"\"\"\n rec = []\n while head:\n rec[0:0] = [head.val]\n head = head.next\n return rec\n\n\ndef create(nums):\n aux = p = ListNode(-1)\n for x in nums:\n p.next = ListNode(x)\n p = p.next\n return aux.next\n\n\ndef main():\n head = [1, 3, 2]\n test = Solution()\n ret = test.reversePrint(create(head))\n print(ret)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"剑指offer-面试题06. 从尾到头打印链表.py","file_name":"剑指offer-面试题06. 从尾到头打印链表.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"262439263","text":"from collections import defaultdict\n\nfrom scrapy.xlib.pydispatch import dispatcher\nfrom scrapy import signals\n\n\ndef gen_table_name(item_type):\n table_name = item_type.lower()\n if table_name.endswith(\"item\") and len(table_name) > 4:\n table_name = table_name[:-4]\n return table_name\n\n\nclass ScraperWikiPipeline(object):\n \"\"\"\n A pipeline for saving to the Scraperwiki datastore\n\n If the scraper returns different kind of items they are stored in\n different tables\n \"\"\"\n def __init__(self):\n self.buff = 20\n self.data = defaultdict(list)\n self.counter = 0\n dispatcher.connect(self.spider_closed, signals.spider_closed)\n\n def process_item(self, item, spider):\n item_type = item.__class__.__name__\n self.data[item_type].append(dict(item))\n if len(self.data[item_type]) >= self.buff:\n self.write_data(spider, item_type)\n return item\n\n def spider_closed(self, spider):\n for item_type in self.data:\n if self.data[item_type]:\n self.write_data(spider, item_type)\n\n def write_data(self, spider, item_type):\n import scraperwiki\n\n table_name = gen_table_name(item_type)\n unique_keys = spider.settings.get(\n 'SW_UNIQUE_KEYS', {item_type: ['id']}\n )\n scraperwiki.sqlite.save(\n table_name=table_name,\n unique_keys=unique_keys[item_type],\n data=self.data[item_type]\n )\n self.data[item_type] = []\n","sub_path":"scrapyrwiki/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"66971501","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### Problem Statement\n# \n# \n# Given an integer array, find and return all the subsets of the array.\n# The order of subsets in the output array is not important. However the order of elements in a particular subset should remain the same as in the input array.\n# \n# *Note: An empty set will be represented by an empty list*\n# \n# **Example 1**\n# \n# ```\n# arr = [9]\n# \n# output = [[]\n# [9]]\n# ```\n# \n# **Example 2**\n# \n# ```\n# arr = [9, 12, 15]\n# \n# output = [[],\n# [15],\n# [12],\n# [12, 15],\n# [9],\n# [9, 15],\n# [9, 12],\n# [9, 12, 15]]\n# ```\n\n# In[4]:\n\n\ndef subsets(arr):\n \"\"\"\n :param: arr - input integer array\n Return - list of lists (two dimensional array) where each list represents a subset\n TODO: complete this method to return subsets of an array\n \"\"\"\n return subsets_index(arr,0)\n\ndef subsets_index(arr,index):\n if index >= len(arr):\n return [[]]\n \n temp = subsets_index(arr,index+1)\n out = list()\n for ele in temp:\n out.append(ele)\n \n for ele in temp:\n cur = list()\n cur.append(arr[index])\n cur.extend(ele)\n out.append(cur)\n return out\n\n\n# \n\n# In[5]:\n\n\ndef test_function(test_case):\n arr = test_case[0]\n solution = test_case[1]\n \n output = subsets(arr)\n \n output.sort()\n solution.sort()\n \n if output == solution:\n print(\"Pass\")\n else:\n print(\"Fail\") \n\n\n# In[6]:\n\n\narr = [9]\nsolution = [[], [9]]\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\n\n# In[7]:\n\n\narr = [5, 7]\nsolution = [[], [7], [5], [5, 7]]\ntest_case = [arr, solution]\ntest_function(test_case)\n\n\n# In[8]:\n\n\narr = [9, 12, 15]\nsolution = [[], [15], [12], [12, 15], [9], [9, 15], [9, 12], [9, 12, 15]]\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\n\n# In[9]:\n\n\narr = [9, 8, 9, 8]\nsolution = [[],\n[8],\n[9],\n[9, 8],\n[8],\n[8, 8],\n[8, 9],\n[8, 9, 8],\n[9],\n[9, 8],\n[9, 9],\n[9, 9, 8],\n[9, 8],\n[9, 8, 8],\n[9, 8, 9],\n[9, 8, 9, 8]]\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Recursion/Return-Subsets.py","file_name":"Return-Subsets.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"108966100","text":"#O(n*2^n) \n#def powerSet(array):\n# subsets = [[]]\n# for ele in array:\n# for i in range(len(subsets)):\n# currentSubset = subsets[i]\n# subsets.append(currentSubset+[ele])\n# return susbsets\n#O(n*2^n)time | O(n*2^n) space\ndef powerSet(array, idx = None):\n if idx is None:\n idx = len(array) - 1\n elif idx < 0:\n return [[]]\n ele = array[idx]\n subsets = powerSet(array,idx -1)\n for i in range(len(subsets)):\n currentSubset = subsets[i]\n subsets.append(currentSubset+[ele])\n return subsets\n \npowerSet([1,2,3,4])\n \n","sub_path":"powerSet.py","file_name":"powerSet.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"191868693","text":"\nfrom weakref import WeakKeyDictionary\n\n\nclass StatefulProperty(object):\n\n def __init__(self, initial_condition = None):\n self.default = initial_condition\n self.data = WeakKeyDictionary()\n\n def __get__(self, instance, owner):\n if instance is None:\n return self\n\n try:\n return self.data[instance]\n except KeyError:\n val = StatePerSimulation(self.default)\n self.data[instance] = val\n return val\n\n def __set__(self, instance, value):\n raise AttributeError(\"Property is read-only. \"\n \"Did you mean to access via a simultation?\")\n\n\n\nclass StatePerSimulation(object):\n\n def __init__(self, initial_condition = None):\n self.default = initial_condition\n self._sim_data = WeakKeyDictionary()\n\n def __getitem__(self, key):\n from .controlsystem import ControlSystemSimulation\n assert isinstance(key, ControlSystemSimulation)\n try:\n return self._sim_data[key]\n except KeyError:\n if isinstance(self.default, dict) and len(self.default) == 0:\n # Create a new empty dictionary and remember it\n result = dict()\n self._sim_data[key] = result\n return result\n else:\n return self.default\n\n\n def __setitem__(self, key, value):\n from .controlsystem import ControlSystemSimulation\n assert isinstance(key, ControlSystemSimulation)\n self._sim_data[key] = value\n\n\n","sub_path":"skfuzzy/control/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"11054538","text":"import os\r\nimport sys\r\nimport git\r\nimport subprocess\r\nfrom apscheduler import events\r\n\r\n\r\nclass ContinuousDeployment:\r\n def __init__(self, scheduler):\r\n self.scheduler = scheduler\r\n self.jobs_running = 0\r\n # self.scheduler.add_listener(self.job_started, mask=events.EVENT_JOB_SUBMITTED)\r\n # self.scheduler.add_listener(self.job_ended, mask=events.EVENT_JOB_EXECUTED)\r\n # self.scheduler.add_listener(self.job_ended, mask=events.EVENT_JOB_ERROR)\r\n # self.scheduler.add_job(self.check_for_updates, \"cron\", minute=\"*\")\r\n #\r\n self.repo = git.Repo(\".\")\r\n print(self.repo.active_branch.commit)\r\n\r\n def job_started(self, _):\r\n self.jobs_running += 1\r\n\r\n def job_ended(self, _):\r\n self.jobs_running -= 1\r\n\r\n def check_for_updates(self):\r\n if self.jobs_running != 1:\r\n return\r\n if not self.up_to_date():\r\n print(\"Not up to date. Pulling...\")\r\n self.repo.remotes.origin.pull()\r\n restart_program()\r\n else:\r\n print(\"Up to date.\")\r\n\r\n def up_to_date(self):\r\n status = self.repo.remotes.origin.fetch()\r\n for branch in status:\r\n if str(branch.ref) == \"origin/master\":\r\n return branch.commit == self.repo.active_branch.commit\r\n return False\r\n\r\n\r\ndef restart_program():\r\n python = sys.executable\r\n os.execl(python, python, *sys.argv)\r\n\r\n\r\nfrom apscheduler.schedulers.blocking import BlockingScheduler\r\nscheduler = BlockingScheduler()\r\n\r\n\r\nimport time\r\n\r\nprint(\"HELLO AGAIN!!\")\r\nx = ContinuousDeployment(scheduler)\r\n\r\n\r\n@scheduler.scheduled_job(\"cron\", second=\"*/5\")\r\ndef test_job():\r\n x.check_for_updates()\r\n\r\n\r\nscheduler.start()\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"490690323","text":"import re\nimport unittest\nimport os.path\nimport grokcore.view\n\nfrom pkg_resources import resource_listdir\n\nfrom zope.app.wsgi.testlayer import BrowserLayer, http\nfrom zope.testing import doctest, renormalizing\n\n\nFunctionalLayer = BrowserLayer(grokcore.view)\n\nchecker = renormalizing.RENormalizing([\n # Accommodate to exception wrapping in newer versions of mechanize\n (re.compile(r'httperror_seek_wrapper:', re.M), 'HTTPError:'),\n ])\n\ndef suiteFromPackage(name):\n files = resource_listdir(__name__, name)\n suite = unittest.TestSuite()\n getRootFolder = FunctionalLayer.getRootFolder\n globs = dict(http=http,\n getRootFolder=getRootFolder)\n optionflags = (doctest.ELLIPSIS+\n doctest.NORMALIZE_WHITESPACE+\n doctest.REPORT_NDIFF)\n\n for filename in files:\n if filename == '__init__.py':\n continue\n\n test = None\n if filename.endswith('.py'):\n dottedname = 'grokcore.view.ftests.%s.%s' % (name, filename[:-3])\n test = doctest.DocTestSuite(\n dottedname,\n checker=checker,\n extraglobs=globs,\n optionflags=optionflags)\n test.layer = FunctionalLayer\n elif filename.endswith('.txt'):\n test = doctest.DocFileSuite(\n os.path.join(name, filename),\n optionflags=optionflags,\n globs=globs)\n test.layer = FunctionalLayer\n if test is not None:\n suite.addTest(test)\n return suite\n\ndef test_suite():\n suite = unittest.TestSuite()\n for name in [\n 'contentprovider',\n 'directoryresource',\n 'static',\n 'url',\n 'view',\n ]:\n suite.addTest(suiteFromPackage(name))\n return suite\n","sub_path":"buildout-cache--/eggs/grokcore.view-2.8-py2.7.egg/grokcore/view/ftests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"141983749","text":"import pygame\nfrom mymath import *\n\nWHITE = (255, 255, 255)\nclass Triangle3D:\n def __init__(self, p1, p2, p3, width, height, focal_length, camera):\n self.points = [0, 0, 0]\n self.points[0] = TranslateTo2D(p1, width, height, focal_length, camera)\n self.points[1] = TranslateTo2D(p2, width, height, focal_length, camera)\n self.points[2] = TranslateTo2D(p3, width, height, focal_length, camera)\n # print(self.points)\n def draw3D(self, graphic):\n p = self.points\n pygame.draw.line(graphic, WHITE, p[0], p[1])\n pygame.draw.line(graphic, WHITE, p[1], p[2])\n pygame.draw.line(graphic, WHITE, p[2], p[0])\n\nclass Square3D:\n def __init__(self, p1, p2, p3, p4, width, height, focal_length, camera):\n self.points = [0, 0, 0, 0]\n # print(self.points)\n \n self.points[0] = TranslateTo2D(p1, width, height, focal_length, camera)\n self.points[1] = TranslateTo2D(p2, width, height, focal_length, camera)\n self.points[2] = TranslateTo2D(p3, width, height, focal_length, camera)\n self.points[3] = TranslateTo2D(p4, width, height, focal_length, camera)\n\n def draw3D(self, graphic):\n p = self.points\n pygame.draw.line(graphic, WHITE, p[0], p[1])\n pygame.draw.line(graphic, WHITE, p[1], p[2])\n pygame.draw.line(graphic, WHITE, p[2], p[3])\n pygame.draw.line(graphic, WHITE, p[3], p[0])\n\nclass mesh:\n def __init__(self, string):\n self.points = vertexLoad(string)\n self.faces = faceLoad(string)\n\n def meshDraw(self, window, width, height, focal_length, angle, camera):\n m = self\n\n for i in m.faces:\n if (len(i) == 3):\n # Поиск точек\n p1 = m.points[i[0]-1]\n p2 = m.points[i[1]-1]\n p3 = m.points[i[2]-1]\n \n # Умножение на матрицу поворота \n p1 = rotateX(angle[0], p1)\n p2 = rotateX(angle[0], p2)\n p3 = rotateX(angle[0], p3)\n\n p1 = rotateY(angle[1], p1)\n p2 = rotateY(angle[1], p2)\n p3 = rotateY(angle[1], p3)\n \n p1 = rotateZ(angle[2], p1)\n p2 = rotateZ(angle[2], p2)\n p3 = rotateZ(angle[2], p3)\n\n v1 = [p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]]\n v2 = [p3[0] - p1[0], p3[1] - p1[1], p3[2] - p1[2]]\n n = crossProd(v1, v2)\n if (n[2]*camera[2] + n[1]*camera[1] + n[0]*camera[0] < 0):\n\n tr = Triangle3D(p1, p2, p3, width, height, focal_length, camera)\n tr.draw3D(window)\n \n elif(len(i) == 4):\n p1 = m.points[i[0]-1]\n p2 = m.points[i[1]-1]\n p3 = m.points[i[2]-1]\n p4 = m.points[i[3]-1]\n \n p1 = rotateX(angle[0], p1)\n p2 = rotateX(angle[0], p2)\n p3 = rotateX(angle[0], p3)\n p4 = rotateX(angle[0], p4) \n\n p1 = rotateY(angle[1], p1)\n p2 = rotateY(angle[1], p2)\n p3 = rotateY(angle[1], p3)\n p4 = rotateY(angle[1], p4)\n \n p1 = rotateZ(angle[2], p1)\n p2 = rotateZ(angle[2], p2)\n p3 = rotateZ(angle[2], p3)\n p4 = rotateZ(angle[2], p4)\n\n v1 = [p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]]\n v2 = [p3[0] - p1[0], p3[1] - p1[1], p3[2] - p1[2]]\n n = crossProd(v1, v2)\n if (n[2]*camera[2] + n[1]*camera[1] + n[0]*camera[0]< 0):\n sq = Square3D(p1, p2, p3, p4, width, height, focal_length, camera)\n sq.draw3D(window) \n\ndef vertexLoad(string):\n vertices = []\n f = open(string)\n for x in f:\n if(x[0] == 'v' and x[1] != 'n' and x[1] != 't'):\n a = x.split()\n a.remove('v')\n for i in range(len(a)):\n a[i] = float(a[i])\n vertices.append(a)\n return vertices\n\ndef faceLoad(string):\n f = open(string)\n faces = []\n for x in f:\n if (x[0] == 'f'):\n a = x.split()\n a.remove('f')\n for i in range(len(a)):\n a[i] = int((a[i][:a[i].index('/')]))\n faces.append(a)\n return faces\n","sub_path":"main/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"540607621","text":"\"\"\"\nThis is where the implementation of the plugin code goes.\nThe GD_1-class is imported from both run_plugin.py and run_debug.py\n\"\"\"\n\nimport json\nimport sys\nimport logging\nfrom webgme_bindings import PluginBase\n\n# Setting up a logger\nlogger = logging.getLogger('GD_2')\nlogger.setLevel(logging.INFO)\nhandler = logging.StreamHandler(sys.stdout) # By default it logs to stderr..\nhandler.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\nlibNode = None\n\ndef search_in_library(self, core, target): #meta_type(string): {Device, Country, Vendor, Region} | target(string) is the node's name we're searching\n childrens = core.load_children(self.META[\"Countries\"]) #loading all children from library\n for child in childrens:\n node = core.load_by_path(self.root_node, child['nodePath']) #loading the node by path\n get_child(self, core, node, target)\n\n\ndef get_child(self, core, node, target):\n global libNode\n # logger.info(\" Device: '{0}'\".format(core.get_attribute(node,'name'))) #logging the found child's name\n children = core.load_children(node) #getting all the children of a node\n for child in children:\n if core.get_attribute(child, 'name') == target: #if we found the searched country's node\n # logger.info(\"Found node: '{0}'\".format(core.get_attribute(child, 'name')))\n libNode = child\n break #return it\n else: #else\n get_child(self, core, child, target) #search in the child's children\n\n\nclass GD_2(PluginBase):\n def main(self):\n global libNode\n core = self.core\n root_node = self.root_node\n active_node = self.active_node\n\n with open(\"./imports/costumRegions.json\") as f: #loading regions json file into data list\n regions = json.load(f)\n\n with open(\"./imports/countries2.json\") as f: #loading countries json file into data list\n countries = json.load(f)\n\n name = core.get_attribute(active_node, 'name') #get the name of active_node\n logger.info('ActiveNode at \"{0}\" has name {1}'.format(core.get_path(active_node), name)) #logging the active_node's name\n\n #getting the meta of target file which we want to create\n region_meta = self.META[\"CustomRegion\"]\n\n i = 1 #temporary variable that helps to push the new child off from the old child so they does not stack\n for region in regions:\n child1 = core.create_child(active_node, region_meta) #creating a child node of the active node\n position_item = core.get_registry(active_node,'position') #getting position of the active node\n position_item['y'] = position_item['y'] + 50 * i #changing the position variable\n position_item['x'] += 400\n core.set_registry(child1, 'position', position_item) #changing the child's position\n core.set_attribute(child1, 'name', region['name']) #changing the child's name\n i+=1 #update the temporary variable\n\n for country in countries:\n if country[\"region\"] == core.get_attribute(child1, 'name'): #check if the selected country is in it's region\n search_in_library(self,core,country[\"country\"])\n core.add_member(child1, \"MemberCountries\", libNode)\n\n # creating a commit for the update\n commit_info = self.util.save(root_node, self.commit_hash, 'test', 'Python plugin created regions with countries')\n logger.info('commited:{0}'.format(commit_info))\n","sub_path":"src/plugins/GD_2/GD_2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"524643802","text":"\"\"\"Function to get the config id\"\"\"\nimport argparse\n\n\ndef argument_parser() -> str:\n \"\"\"Function to get the config id\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Argument parser to obtain the name of the config file\"\n )\n parser.add_argument(\n \"--config_id\",\n default=\"sample_config\",\n help=\"config id to use\",\n )\n args = parser.parse_args()\n assert isinstance(args.config_id, str)\n return args.config_id\n","sub_path":"src/utils/argument_parser.py","file_name":"argument_parser.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"237898984","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('realtime', '0010_radarconvertparams'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='radarsnapshot',\n name='save_datetime',\n field=models.DateTimeField(default=datetime.datetime(2017, 7, 7, 17, 19, 51, 255485), verbose_name=b'salvataggio', auto_now=True),\n preserve_default=False,\n ),\n ]\n","sub_path":"torinometeo/realtime/migrations/0011_radarsnapshot_save_datetime.py","file_name":"0011_radarsnapshot_save_datetime.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"195244041","text":"import tensorflow as tf\nimport numpy as np\n\nclass Optimizer:\n\n\tdef __init__(self,lr):\n\t\tself.lr= lr\n\t\tself.opt = None\n\n\tdef get_optimizer(self):\n\t\treturn self.opt\n\n\tdef minimize(self,inp):\n\t\treturn self.get_optimizer().minimize(inp)\n\n\tdef init_variables(self,session):\n\t\tsession.run(tf.variables_initializer(self.get_optimizer().variables()))\n\nclass MomentumOpt(Optimizer):\n\n\tdef __init__(self,lr=10e-4,momentum=0.9,decay=0.99,nesterov=True):\n\t\tOptimizer.__init__(self,lr)\n\t\tself.momentum = momentum\n\t\tself.nesterov = nesterov\n\t\tself.opt = tf.train.MomentumOptimizer(learning_rate=self.lr,\n\t\t\tmomentum=self.momentum,\n\t\t\tuse_nesterov=self.nesterov,\n\t\t)\n\nclass RMSPropOpt(Optimizer):\n\n\tdef __init__(self,lr=10e-4,decay=0.9,momentum=0.0,centered=False):\n\t\tOptimizer.__init__(self,lr)\n\t\tself.decay = decay\n\t\tself.momentum = momentum\n\t\tself.centered = centered\n\t\tself.opt = tf.train.RMSPropOptimizer(learning_rate=self.lr,\n\t\t\tdecay=self.decay,\n\t\t\tmomentum=self.momentum,\n\t\t\tcentered=self.centered,\n\t\t)\n\nclass AdamOpt(Optimizer):\n\n\tdef __init__(self,lr=10e-4,beta1=0.9,beta2=0.999):\n\t\tOptimizer.__init__(self,lr)\n\t\tself.beta1=beta1\n\t\tself.beta2 = beta2\n\t\tself.opt = tf.train.AdamOptimizer(learning_rate=self.lr,\n\t\t\tbeta1=self.beta1,\n\t\t\tbeta2=self.beta2,\n\t\t)\n\nclass GradientDOpt(Optimizer):\n\n\tdef __init__(self,lr=10e-4):\n\t\tOptimizer.__init__(self,lr)\n\t\tself.opt = tf.train.GradientDescentOptimizer(learning_rate=self.lr)","sub_path":"wolFikaM/tf_opt.py","file_name":"tf_opt.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"440797422","text":"#!/usr/bin/env python3\n# -*- coding : utf-8 -*-\n\nimport os\nCONFIG_FILE = 'config.txt'\nCUR_DIR = os.path.abspath(os.path.dirname(__file__))\nCF_PATH = os.path.join(CUR_DIR, CONFIG_FILE)\n\n#You need a baidu translate and \n#config.txt -> \n#APPID = xxxxxxx\n#SECRET_KEY = xxxxxx\n\ndef load_config(fn=CF_PATH):\n config = {}\n \n with open(fn, 'r', encoding='utf-8') as f:\n for line in f:\n #allow some notes in config file.\n ignore = line.find('#')\n if ignore >= 0: line = line[:ignore]\n \n kv = line.split('=', 1)\n if len(kv) != 2: continue\n k, v = kv[0].strip(), kv[1].strip()\n config[k] = v\n \n return config\n \nif __name__ == '__main__':\n config = load_config()\n print(config['APPID'], config['SECRET_KEY'])","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"36234478","text":"import matplotlib\nmatplotlib.use(\"TkAgg\")\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\n\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import *\n\nLARGE_FONT=(\"Verdana\",12)\n\n\nclass MyApp(tk.Tk):\n\n def __init__(self,*args,**kwargs):\n tk.Tk.__init__(self,*args , **kwargs)\n tk.Tk.wm_title(self,'TEST')\n #tk.Tk.iconbitmap(self, default=\"MGava.bmp\")\n container = tk.Frame(self)\n container.pack(side=\"top\",fill=\"both\",expand=True)\n container.grid_rowconfigure(0,weight=1) # minsize, priority\n container.grid_columnconfigure(0,weight=1)\n\n self.frames = {}\n for F in (StartPage, PageOne, PageTwo , PageThree): \n frame = F(container,self)\n self.frames[F] = frame\n frame.grid(row=0, column=0, sticky=\"nsew\") # sticky expand on all direction\n\n self.show_frame(StartPage)\n\n def show_frame(self, cont):\n frame = self.frames[cont]\n frame.tkraise()\n \n\n\nclass StartPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n label = tk.Label(self, text=\"Start Page\", font=LARGE_FONT)\n label.pack(pady=10,padx=10)\n button1 = ttk.Button(self, text=\"Visit Page1\",\n command=lambda: controller.show_frame(PageOne))\n button1.pack()\n button2 = ttk.Button(self, text=\"Visit Page2\",\n command=lambda: controller.show_frame(PageTwo))\n button2.pack()\n button3 = ttk.Button(self, text=\"Visit Page3\",\n command=lambda: controller.show_frame(PageThree))\n button3.pack()\n\n \nclass PageOne(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n label = tk.Label(self, text=\"Page One\", font=LARGE_FONT)\n label.pack(pady=10,padx=10)\n \n User=StringVar()\n User.set(\"user\")\n user = ttk.Entry(self, textvariable=User)\n user.pack()\n \n button = ttk.Button(self, text=\"Back\",\n command=lambda: controller.show_frame(StartPage))\n button.pack()\n button2 = ttk.Button(self, text=\"Visit Page2\",\n command=lambda: controller.show_frame(PageTwo))\n button2.pack()\n\nclass PageTwo(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n label = tk.Label(self, text=\"Page Two\", font=LARGE_FONT)\n label.pack(pady=10,padx=10)\n button = ttk.Button(self, text=\"Back\",\n command=lambda: controller.show_frame(StartPage))\n button.pack()\n button1 = ttk.Button(self, text=\"Visit Page1\",\n command=lambda: controller.show_frame(PageOne))\n button1.pack()\n\nclass PageThree(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n label = tk.Label(self, text=\"Page THree\", font=LARGE_FONT)\n label.pack(pady=10,padx=10)\n button = ttk.Button(self, text=\"Back\",\n command=lambda: controller.show_frame(StartPage))\n button.pack()\n\n f = Figure(figsize=(5,5), dpi=100)\n a = f.add_subplot(111)\n a.plot([1,2,3,4,5,6],[3,4,7,1,5,7])\n\n canvas = FigureCanvasTkAgg(f,self)\n canvas.show()\n canvas.get_tk_widget().pack(side=tk.TOP,fill=tk.BOTH, expand = True)\n\n toolbar= NavigationToolbar2TkAgg(canvas,self)\n toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP,fill=tk.BOTH, expand = True)\n\n \n\napp = MyApp()\napp.mainloop()\n","sub_path":"Python3/suunto_xml/tkinter_app.py","file_name":"tkinter_app.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"65328666","text":"# '''\n# This deep learning model comes from network resources. We refer to it Mask-r-cnn,\n# and the citation information shows below:\n#\n# @misc{matterport_maskrcnn_2017,\n# title={Mask R-CNN for object detection and instance segmentation on Keras and TensorFlow},\n# author={Waleed Abdulla},\n# year={2017},\n# publisher={Github},\n# journal={GitHub repository},\n# howpublished={\\url{https://github.com/matterport/Mask_RCNN}},\n# }\n# @article{ward2020scalable,\n# title={Scalable learning for bridging the species gap in image-based plant phenotyping},\n# author={Ward, Daniel and Moghadam, Peyman},\n# journal={Computer Vision and Image Understanding},\n# pages={103009},\n# year={2020},\n# publisher={Elsevier}\n# }\n#\n# So before running the code, you must download the model from link, get the initial weights from train.py.\n#\n# This python file focus on predicting dataset and getting output images using trained model.\n#\n# '''\n\n\nimport os\nfrom glob import glob\nimport argparse\nimport task3_config_cvppp as config_cvppp\nfrom mrcnn import model, visualize\nimport numpy as np\nimport cv2 as cv\nfrom skimage import io\nfrom matplotlib import pyplot as plt\nimport pylab\n\n\n# Converts a mask to RGB Format\ndef mask_to_rgb(mask):\n\n colours = visualize.random_colors(mask.shape[2])\n rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))\n\n for i in range(mask.shape[2]):\n for c in range(3):\n rgb_mask[:, :, c] = np.where(mask[:, :, i] != 0, int(colours[i][c] * 255), rgb_mask[:, :, c])\n\n return rgb_mask\n\n\n### 333\ndef load_image(im_path):\n\n image = cv.imread(im_path, 1)\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n # Check for alpha channel\n if not image.shape[2] <= 3:\n image = image[:, :, :3]\n\n return image\n\n\n# gain arguments from keyboard\ndef arguments():\n parser = argparse.ArgumentParser(description='Performs inference using a Mask RCNN Model')\n parser.add_argument('--dataPattern', type=str, required=True,\n help=\"A glob file path pattern in quotations. e.g. 'path/*_rgb.png'\")\n parser.add_argument('--outputPath', type=str, required=True,\n help='Directory to save all outputs')\n parser.add_argument('--weightsPath', type=str, required=True,\n help='Path to model weights (.h5)')\n\n return parser.parse_args()\n\n\n# The main prediction function\ndef predict_segmentations():\n\n args = arguments()\n\n image_pattern = args.dataPattern\n\n print(\"Image Pattern:\", image_pattern)\n\n # Create output dir\n assert not os.path.isdir(args.outputPath), \"output dir already exists, please try again\"\n os.mkdir(args.outputPath)\n\n # Init config\n configuration = config_cvppp.InferenceConfig()\n\n # Init model\n inference_model = model.MaskRCNN(mode=\"inference\",\n config=configuration,\n model_dir=args.outputPath)\n\n inference_model.load_weights(args.weightsPath, by_name=True)\n\n # Predict Images\n with open(os.path.join(args.outputPath, 'leafCounts.csv'), 'a') as count_file:\n count_file.write(\"Image, Count\\n\")\n for im_path in glob(image_pattern):\n out_path = os.path.join(args.outputPath, os.path.basename(im_path))\n\n print(\"Saving prediction for\", im_path, \"at\", out_path)\n\n try:\n image = load_image(im_path)\n except:\n print(\"Bad File for prediction:\", im_path)\n continue\n\n # blur images\n # image = cv.GaussianBlur(image, (101, 101), 92, 0)\n\n # predict images\n results = inference_model.detect([image])\n # import matplotlib.pyplot as plt\n # plt.imshow(results)\n\n # convert images to RGB format\n rgb_mask = mask_to_rgb(results[0]['masks'])\n\n # store images\n # cv.imwrite(out_path, rgb_mask.astype(np.uint8), cv.IMWRITE_PNG_COMPRESSION)\n io.imsave(out_path, rgb_mask.astype(np.uint8))\n io.imshow(rgb_mask.astype(np.uint8))\n # view.show()\n plt.show()\n # sore result of leaf-counting\n\n count_file.write(os.path.basename(im_path) + \", \" + str(results[0]['masks'].shape[2]) + \"\\n\")\n\nif __name__ == '__main__':\n predict_segmentations()","sub_path":"9517_Group_Submision/Codes/task3_inference.py","file_name":"task3_inference.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"212861158","text":"def evaluate(expression):\n OPERATORS = {\n '+': lambda y, x : x + y,\n '-': lambda y, x : x - y,\n '*': lambda y, x : x * y,\n '/': lambda y, x : int(float(x) / y)\n }\n \n stack = []\n \n for token in expression:\n if token in OPERATORS:\n stack.append(OPERATORS[token](stack.pop(), stack.pop()))\n else:\n stack.append(int(token))\n \n return stack[-1]\n \n","sub_path":"Python/150_evaluate_reverse_polish_notation.py","file_name":"150_evaluate_reverse_polish_notation.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"245043952","text":"from django.urls import path, re_path, include\nfrom .views import add_to_cart, order_summary, delete_from_cart\n\napp_name = 'shopping_cart'\n\nurlpatterns = [\n re_path(r\"^add-to-cart/(?P[-\\w]+)/$\", add_to_cart, name=\"add_to_cart\"),\n path('order-summary/', order_summary, name=\"order-summary\"),\n re_path(r'^item/delete/(?P[-\\w]+)/$', delete_from_cart, name='delete_item'),\n]","sub_path":"src/shopping_cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"8978613","text":"from urllib.parse import urlparse, urljoin\n\nfrom .core import *\nfrom .vparsers import *\nfrom .utils import attributeerror_wrapper\n\n\nclass OsiedlePlatiniumParser(MultipleRequestsGeneratorMixin, MultipleWebpageParser): \n url = \"http://www.osiedleplatinum.pl/katalog-mieszkan\"\n method = \"GET\"\n var_params = [ dict(page=i) for i in range(1, 15) ]\n headers = {\n \"Host\": \"www.osiedleplatinum.pl\",\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Referer\": \"http://www.osiedleplatinum.pl/katalog-mieszkan\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Upgrade-Insecure-Requests\": \"1\"\n }\n\n schema = [\n DataUnit(label=\"Plan\", parser=LinkParser(DOMElementExtractor(\"a\")), id=\"plan\"),\n DataUnit(label=\"Typ\", parser=DOMTextExtractor(), id=\"_type\"),\n DataUnit(label=\"Number\", parser=DOMTextExtractor(), id=\"number\"),\n DataUnit(label=\"Pokoje\", parser=IntParser(DOMTextExtractor()), id=\"rooms\"),\n DataUnit(label=\"Metraż\", parser=AreaParser(DOMTextExtractor()), id=\"area\"),\n DataUnit(label=\"Piętro\", parser=IntParser(DOMTextExtractor()), id=\"floor\"),\n DataUnit(label=\"Cena m^2\", parser=PriceParser(DOMTextExtractor()), id=\"price_m2\"),\n DataUnit(label=\"Cena\", parser=PriceParser(DOMTextExtractor()), id=\"price\"),\n DataUnit(label=\"Szczegóły\", parser=NoneParser(), id=\"details_none\")\n ]\n \n @attributeerror_wrapper(return_value=[])\n def find_records(self, soup):\n return soup.find(\"table\", {\"class\": \"my-table\"})\\\n .find(\"tbody\").find_all(\"tr\")\n\n def split_record(self, record):\n return record.find_all(\"td\")\n \n def modify_record(self, record, soup=None):\n record[\"plan\"] = urljoin(self.url, record[\"plan\"])\n record[\"status\"] = self.deduce_status(record[\"price\"])\n record[\"fid\"] = self.create_fid(record)\n return record\n\n def deduce_status(self, price):\n if price is None:\n return StatusParser.SOLD\n return StatusParser.AVAILABLE\n\n def create_fid(self, record):\n fid_form = \"{floor}/{number}\"\n return fid_form.format(**record)","sub_path":"parsers/osiedleplatinium.py","file_name":"osiedleplatinium.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"13266530","text":"from health_check.backends import BaseHealthCheckBackend\nfrom health_check.exceptions import (\n ServiceReturnedUnexpectedResult, ServiceUnavailable\n)\n\nfrom sso.utils import sso_api_client\n\n\nclass SingleSignOnBackend(BaseHealthCheckBackend):\n\n message_bad_status = 'SSO proxy returned {0.status_code} status code'\n\n def check_status(self):\n try:\n response = sso_api_client.ping()\n except Exception as error:\n raise ServiceUnavailable('(SSO proxy) ' + str(error))\n else:\n if response.status_code != 200:\n raise ServiceReturnedUnexpectedResult(\n self.message_bad_status.format(response)\n )\n return True\n","sub_path":"healthcheck/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"173091434","text":"from ichnaea.api.exceptions import (\n RegionNotFoundV0,\n RegionNotFoundV0JS,\n)\nfrom ichnaea.api.locate.tests.base import BaseLocateTest\nfrom ichnaea.tests.base import AppTestCase\n\n\nclass RegionBase(BaseLocateTest, AppTestCase):\n\n apikey_metrics = False\n default_apikey = None\n metric_type = 'region'\n track_connection_events = True\n\n\nclass CommonRegionTests(object):\n\n def test_geoip(self):\n res = self._call(ip=self.test_ip)\n self.check_response(res, 'ok')\n self.check_db_calls(rw=0, ro=0)\n self.check_stats(counter=[\n ('request', [self.metric_path, 'method:post', 'status:200']),\n ], timer=[\n ('request', [self.metric_path, 'method:post']),\n ])\n\n def test_geoip_miss(self):\n res = self._call(ip='127.0.0.1', status=404)\n self.check_response(res, 'not_found')\n self.check_db_calls(rw=0, ro=0)\n self.check_stats(counter=[\n ('request', [self.metric_path, 'method:post', 'status:404']),\n ], timer=[\n ('request', [self.metric_path, 'method:post']),\n ])\n\n def test_get(self):\n res = self._call(ip=self.test_ip, method='get', status=200)\n self.check_response(res, 'ok')\n self.check_stats(counter=[\n ('request', [self.metric_path, 'method:get', 'status:200']),\n ], timer=[\n ('request', [self.metric_path, 'method:get']),\n ])\n\n def test_options(self):\n res = self._call(method='options', status=200)\n self.assertEqual(res.headers['Access-Control-Allow-Origin'], '*')\n self.assertEqual(res.headers['Access-Control-Max-Age'], '2592000')\n\n def test_unsupported_methods(self):\n self._call(method='delete', status=405)\n self._call(method='patch', status=405)\n self._call(method='put', status=405)\n\n def test_cache(self):\n res = self._call(ip=self.test_ip, method='get', status=200)\n cache = res.cache_control\n self.assertFalse(cache.public)\n self.assertTrue(cache.private)\n self.assertTrue(cache.proxy_revalidate)\n self.assertEqual(cache.max_age, 60)\n self.assertEqual(cache.s_max_age, 0)\n\n def test_api_key(self):\n res = self._call(ip=self.test_ip, api_key='test')\n self.check_response(res, 'ok')\n self.check_db_calls(rw=0, ro=0)\n # we don't log any additional API-key specific metrics\n self.check_stats(total=2)\n\n\nclass TestJSONView(CommonRegionTests, RegionBase):\n\n url = '/country.json'\n metric_path = 'path:country.json'\n not_found = RegionNotFoundV0\n\n @property\n def ip_response(self):\n return {\n 'country_code': 'GB',\n 'country_name': 'United Kingdom',\n }\n\n def check_response(self, response, status):\n self.assertEqual(response.content_type, 'application/json')\n self.assertEqual(response.charset, 'UTF-8')\n self.assertEqual(response.headers['Access-Control-Allow-Origin'], '*')\n self.assertEqual(response.headers['Access-Control-Max-Age'], '2592000')\n if status == 'ok':\n self.assertEqual(response.json, self.ip_response)\n elif status == 'not_found':\n self.assertEqual(response.json, self.not_found.json_body())\n\n\nclass TestJSView(CommonRegionTests, RegionBase):\n\n url = '/country.js'\n metric_path = 'path:country.js'\n not_found = RegionNotFoundV0JS\n\n @property\n def ip_response(self):\n return \"\"\"\\\nfunction geoip_country_code() { return 'GB'; }\nfunction geoip_country_name() { return 'United Kingdom'; }\n\"\"\"\n\n def check_response(self, response, status):\n self.assertEqual(response.content_type, 'text/javascript')\n self.assertEqual(response.charset, 'UTF-8')\n self.assertEqual(response.headers['Access-Control-Allow-Origin'], '*')\n self.assertEqual(response.headers['Access-Control-Max-Age'], '2592000')\n if status == 'ok':\n self.assertEqual(response.text, self.ip_response)\n elif status == 'not_found':\n self.assertEqual(response.text, self.not_found().text)\n","sub_path":"ichnaea/api/locate/tests/test_region_v0.py","file_name":"test_region_v0.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"502178683","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/11/26 14:53\n# @Author : qkwu\n# @File : leetcode10RegularExpressionMatching.py\n\n# 简单递归 超时\n# class Solution(object):\n# def isMatch(self, s, p):\n# if not p: return not s\n# first_match = bool(s) and p[0] in [s[0],'.']\n#\n# if len(p) >= 2 and p[1] == '*':\n# return self.isMatch(s, p[2:]) or (first_match and self.isMatch(s[1:], p))\n#\n# return first_match and self.isMatch(s[1:], p[1:])\n\n# DP\nclass Solution(object):\n def isMatch(self, s, p):\n # The DP table and the string s and p use the same indexes i and j, but\n # table[i][j] means the match status between p[:i] and s[:j], i.e.\n # table[0][0] means the match status of two empty strings, and\n # table[1][1] means the match status of p[0] and s[0]. Therefore, when\n # refering to the i-th and the j-th characters of p and s for updating\n # table[i][j], we use p[i - 1] and s[j - 1].\n m, n = len(s) + 1, len(p) + 1\n matches = [[False] * n for _ in range(m)]\n\n # Match empty string with empty pattern\n matches[0][0] = True\n\n # Match empty string with .*\n for i, element in enumerate(p[1:], 2):\n matches[0][i] = matches[0][i - 2] and element == '*'\n\n for i, ss in enumerate(s, 1):\n for j, pp in enumerate(p, 1):\n if pp != '*':\n # The previous character has matched and the current one\n # has to be matched. Two possible matches: the same or .\n matches[i][j] = matches[i - 1][j - 1] and \\\n (ss == pp or pp == '.')\n else:\n # Horizontal look up [j - 2].\n # Not use the character before *.\n matches[i][j] |= matches[i][j - 2]\n\n # Vertical look up [i - 1].\n # Use at least one character before *.\n # p a b *\n # s 1 0 0 0\n # a 0 1 0 1\n # b 0 0 1 1\n # b 0 0 0 ?\n # 因为j是从1开始enu,所以用p[j-2]实际上只是前一位,所以下式是为了尽量匹配更多的?*(如bbbbb 匹配b*)\n if ss == p[j - 2] or p[j - 2] == '.':\n matches[i][j] |= matches[i - 1][j]\n\n return matches[-1][-1]\ns = \"bbbacbcbcbbbbabbbab\"\np = \"b*c*c*.*.*.*ab*c\"\n# Output: true\n\nsl = Solution()\nprint(sl.isMatch(s, p))\n\n# '.' Matches any single character.\n# '*' Matches zero or more of the preceding element.\n# s = \"aa\"\n# p = \"a*\"\n# Output: true\n#\n#\n# s = \"aab\"\n# p = \"c*a*b\"\n# Output: true\n#\n# s = \"mississippi\"\n# p = \"mis*is*p*.\"\n# Output: false","sub_path":"leetcode10RegularExpressionMatching.py","file_name":"leetcode10RegularExpressionMatching.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"600653635","text":"# imports #\nimport json\nimport xml.etree.ElementTree as et\nimport os\nfrom strippers import strip_space\n\n# Directory stuff #\ndir_path = os.path.dirname(os.path.realpath(__file__))\ndir_path = dir_path.replace('/Documentation/Max Documentation', '/Current Test Version/FrameLib')\nref_dir = f'{dir_path}/docs/refpages'\nobj_lookup = f'{dir_path}/interfaces/FrameLib-obj-dlookup.json'\n\n# A class to parse the XML files and build a JSON file from it #\nclass ParseAndBuild():\n def __init__(self):\n self.tree = 0\n self.root = 0\n self.digest = 'none'\n self.d_master_dict = dict({})\n \n # Assign values to construct the json #\n def build_json_file(self):\n self.d_inner_data = dict({\n 'digest' : self.digest\n })\n self.d_outer_data = dict({self.object_name:self.d_inner_data})\n self.d_master_dict.update(self.d_outer_data)\n\n # Extract the info from the refpages #\n def extract_from_refpage(self, x):\n self.tree = et.parse(x)\n self.root = self.tree.getroot() #c74object\n\n # Find Information # \n self.object_name = self.root.get('name') #finds the name so you don't have to do regex\n\n for child in self.root:\n if child.tag == 'digest':\n self.digest = child.text\n\n # Strip whitespace #\n self.digest = strip_space(self.digest)\n\n # Call the build function #\n self.build_json_file()\n\n#----------- THE GUTS ----------- #\ndef main():\n worker = ParseAndBuild()\n for filename in os.listdir(ref_dir):\n if filename != '.DS_Store':\n if filename != '_c74_ref_modules.xml':\n current_category = filename\n source_file_name = f'{ref_dir}/{filename}'\n\n for filename in os.listdir(source_file_name):\n if filename != '.DS_Store':\n source_file = f'{ref_dir}/{current_category}/{filename}'\n worker.extract_from_refpage(source_file)\n\n with open(obj_lookup, 'w') as fp:\n json.dump(worker.d_master_dict, fp, indent=4)\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n","sub_path":"Documentation/Max Documentation/parse_to_dlookup.py","file_name":"parse_to_dlookup.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"404816243","text":"# tower of hanoi problem implementation\n# for problem statement search it\n\ndef toh(n,frm, to, aux):\n '''\n toh == tower of hanoi, n= number of disks (numbered 1,2,...n),\n 'frm' is source disk, 'to' is destination disk and 'aux' is auxillary disk\n '''\n # if only 1 disk, transfer to destination\n if n==1:\n print(\"Move disk {} from {} to {}\\n\".format(n,frm,to))\n else:\n # move n-1 disks from source to auxillary using destination\n toh(n-1,frm, aux, to)\n # move last disk from source to destination\n print(\"Move disk {} from {} to {}\\n\".format(n,frm, to))\n # move n-1 disks back from auxillary to destination using source\n toh(n-1, aux, to, frm)\n\n\nif __name__ == '__main__':\n n = int(input(\"enter number of disks \"))\n print(\"We are using A, B and C tower, where A initially contains all the disks\\n and need to be transferred to C\\n\")\n toh(n, 'A', 'C', 'B')\n print(\"Completed..\")","sub_path":"towerOfHanoi.py","file_name":"towerOfHanoi.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148894043","text":"\"\"\"\nSprites with texture transformations\n\nArtwork from http://kenney.nl\n\nIf Python and Arcade are installed, this example can be run from the command line with:\npython -m arcade.examples.sprite_texture_transform\n\"\"\"\n\nimport arcade\nfrom arcade import Matrix3x3\nimport math\nimport os\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nSHIP_SPEED = 5\nASPECT = SCREEN_HEIGHT / SCREEN_WIDTH\nSCREEN_TITLE = \"Texture transformations\"\n\n\nclass MyGame(arcade.Window):\n \"\"\" Main application class. \"\"\"\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n\n # Set the working directory (where we expect to find files) to the same\n # directory this .py file is in. You can leave this out of your own\n # code, but it is needed to easily run the examples using \"python -m\"\n # as mentioned at the top of this program.\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n\n self.ship = None\n self.camera_x = 0\n self.t = 0\n self.stars = None\n self.xy_square = None\n\n def setup(self):\n \"\"\" Setup \"\"\"\n self.ship = arcade.Sprite(\":resources:images/space_shooter/playerShip1_orange.png\", 0.5)\n self.ship.center_x = SCREEN_WIDTH / 2\n self.ship.center_y = SCREEN_HEIGHT / 2\n self.ship.angle = 270\n self.stars = arcade.load_texture(\":resources:images/backgrounds/stars.png\")\n self.xy_square = arcade.load_texture(\":resources:images/test_textures/xy_square.png\")\n\n # Set the background color\n arcade.set_background_color(arcade.color.BLACK)\n\n def on_update(self, delta_time: float):\n \"\"\" Update \"\"\"\n self.ship.update()\n self.camera_x += 2\n self.t += delta_time * 60\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n\n # This command has to happen before we start drawing\n arcade.start_render()\n\n for z in [300, 200, 150, 100]:\n opacity = int(math.exp(-z / 1000) * 255)\n angle = z\n scale = 150 / z\n translate = scale / 500\n self.stars.draw_transformed(\n 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, opacity,\n Matrix3x3().rotate(angle).scale(scale * ASPECT, scale).translate(-self.camera_x * translate, 0))\n self.ship.draw()\n\n for i, pair in enumerate([\n ['identity', Matrix3x3()],\n ['rotate(30)', Matrix3x3().rotate(30)],\n ['scale(0.8, 0.5)', Matrix3x3().scale(0.8, 0.5)],\n ['translate(0.3, 0.1)', Matrix3x3().translate(0.3, 0.1)],\n ['rotate(10).\\nscale(0.33, 0.33)', Matrix3x3().rotate(10).scale(0.7, 0.7)],\n ['scale(-1, 1)', Matrix3x3().scale(-1, 1)],\n ['shear(0.3, 0.1)', Matrix3x3().shear(0.3, 0.1)],\n [f'rotate({int(self.t) % 360})', Matrix3x3().rotate(self.t)],\n ]):\n x = 80 + 180 * (i % 4)\n y = 420 - (i // 4) * 320\n arcade.draw_text(pair[0], x, y - 20 - pair[0].count('\\n') * 10, arcade.color.WHITE, 10)\n self.xy_square.draw_transformed(x, y, 100, 100, 0, 255, pair[1])\n\n\ndef main():\n \"\"\" Main method \"\"\"\n window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n window.setup()\n arcade.run()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"arcade/examples/texture_transform.py","file_name":"texture_transform.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"291100772","text":"from tkinter import *\n\n\ndef doNothing():\n print(\"ok ok I won't...\")\n\nroot = Tk()\n\nroot.option_add('*tearOff',False) # hide the dotted line below the window\n\n# Tkinter puts menus at the top by default\nmenu = Menu(root)\nroot.config(menu=menu)\n\nsubMenu = Menu(menu)\n# Adds a drop down when \"File\" is clicked\nmenu.add_cascade(label=\"File\", menu=subMenu)\n\nsubMenu.add_command(label=\"New Project...\", command=doNothing)\nsubMenu.add_command(label=\"New...\", command=doNothing)\nsubMenu.add_separator()\nsubMenu.add_command(label=\"Exit\", command=doNothing)\n\neditMenu = Menu(menu)\nmenu.add_cascade(label=\"Edit\", menu=editMenu)\neditMenu.add_command(label=\"Redo\", command=doNothing)\n\nroot.mainloop()","sub_path":"6-menu/menu_template.py","file_name":"menu_template.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"654130196","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('building', '0012_sponsor'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='sponsor',\n name='condition_des',\n field=models.TextField(verbose_name=b'\\xe6\\x9d\\xa1\\xe4\\xbb\\xb6\\xe8\\xaf\\xb4\\xe6\\x98\\x8e', blank=True),\n ),\n migrations.AddField(\n model_name='sponsor',\n name='income',\n field=models.IntegerField(default=0, verbose_name=b'\\xe6\\xaf\\x8f\\xe5\\xa4\\xa9\\xe6\\x94\\xb6\\xe5\\x85\\xa5\\xe8\\xbd\\xaf\\xe5\\xa6\\xb9\\xe5\\xb8\\x81'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='sponsor',\n name='income_des',\n field=models.TextField(verbose_name=b'\\xe6\\x94\\xb6\\xe5\\x85\\xa5\\xe8\\xaf\\xb4\\xe6\\x98\\x8e', blank=True),\n ),\n ]\n","sub_path":"apps/building/migrations/0013_auto_20151023_1631.py","file_name":"0013_auto_20151023_1631.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"623018423","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/contrib/operators/redis_publish_operator.py\n# Compiled at: 2019-09-11 03:47:34\n# Size of source mod 2**32: 2191 bytes\nfrom airflow.utils.decorators import apply_defaults\nfrom airflow.contrib.hooks.redis_hook import RedisHook\nfrom airflow.models import BaseOperator\n\nclass RedisPublishOperator(BaseOperator):\n __doc__ = '\\n Publish a message to Redis.\\n\\n :param channel: redis channel to which the message is published (templated)\\n :type channel: str\\n :param message: the message to publish (templated)\\n :type message: str\\n :param redis_conn_id: redis connection to use\\n :type redis_conn_id: str\\n '\n template_fields = ('channel', 'message')\n\n @apply_defaults\n def __init__(self, channel, message, redis_conn_id='redis_default', *args, **kwargs):\n (super(RedisPublishOperator, self).__init__)(*args, **kwargs)\n self.redis_conn_id = redis_conn_id\n self.channel = channel\n self.message = message\n\n def execute(self, context):\n \"\"\"\n Publish the message to Redis channel\n\n :param context: the context object\n :type context: dict\n \"\"\"\n redis_hook = RedisHook(redis_conn_id=(self.redis_conn_id))\n self.log.info('Sending messsage %s to Redis on channel %s', self.message, self.channel)\n result = redis_hook.get_conn().publish(channel=(self.channel), message=(self.message))\n self.log.info('Result of publishing %s', result)","sub_path":"pycfiles/apache_airflow_arup-1.10.5-py3.6/redis_publish_operator.cpython-36.py","file_name":"redis_publish_operator.cpython-36.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"434184823","text":"import os,time\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse\nimport googlemaps\nfrom datetime import datetime,timedelta\nfrom time import mktime\nfrom math import ceil\n\n# setting the timezone for proper dates\nos.environ['TZ'] = 'Europe/Amsterdam'\ntime.tzset()\n\n# Setting up the Google Maps client with the key stored in the environment variables\ngmaps = googlemaps.Client(key=os.environ['GMAPS_KEY'])\n\n# Setting up the logger for logging\nimport logging\nlogger = logging.getLogger('qlinkplanner')\n\n\n# View for the index. Just a simple form.\ndef index(request):\n\treturn render(request,'planner/index.html')\n\t\n# View for the car modality\ndef car(request):\n\t# Form validation\n\tif(request.POST.get(\"from\",\"\") == \"\" or request.POST.get(\"to\",\"\") == \"\"):\n\t\tlogger.info(\"Empty from or to\")\n\t\treturn\n\t# Get current time\n\tnow = datetime.now()\n\t# Fetch data from Google\n\tcar_directions = gmaps.directions(request.POST.get(\"from\", \"\"),request.POST.get(\"to\", \"\"),mode=\"driving\",departure_time=now,language=\"nl_NL\")\n\t# Calculae arrival time\n\tarrival = now + timedelta(seconds = car_directions[0][\"legs\"][0][\"duration\"][\"value\"])\n\t# Log request\n\tlogger.info(\"Car\")\n\t# Render the car modality\n\treturn render(request,'planner/trips/car.html',{'directions':car_directions,'arrival':arrival.strftime(\"%H:%M\")})\n\t\ndef parkride(request):\n\t# Form validation\n\tif(request.POST.get(\"from\",\"\") == \"\" or request.POST.get(\"to\",\"\") == \"\"):\n\t\tlogger.info(\"Empty from or to\")\n\t\treturn\n\t\t\n\t# Get current time\n\tnow = datetime.now()\n\t\n\t# Set place-id and name for the park and rides\n\tif(request.POST.get(\"via\",\"\") == \"prhgk\"):\n\t\tparkride = \"ChIJvdszPSczyEcRhdw9i5NkIqA\"\n\t\tprname = \"Park+Ride Hoogkerk\"\n\telif(request.POST.get(\"via\",\"\") == \"prhrn\"):\n\t\tparkride = \"ChIJf4OcMgstyEcRO1iKAgXkFPQ\"\n\t\tprname = \"Park+Ride Haren\"\n\telif(request.POST.get(\"via\",\"\") == \"prreit\"):\n\t\tparkride = \"ChIJX1933-DMyUcRFxqjP6DI8ow\"\n\t\tprname = \"Park+Ride Reitdiep\"\n\telif(request.POST.get(\"via\",\"\") == \"prkar\"):\n\t\tparkride = \"ChIJ61-7iZrSyUcRp09q_RrMv_U\"\n\t\tprname = \"Park+Ride Kardinge\"\n\telif(request.POST.get(\"via\",\"\") == \"prhs\"):\n\t\tparkride = \"ChIJPSpLYU3NyUcR2XAniPw6Yuk\"\n\t\tprname = \"Park+Ride Hoofdstation\"\n\telif(request.POST.get(\"via\",\"\") == \"prp3\"):\n\t\tparkride = \"ChIJ54zXTkstyEcR8dO-LV1HdxI\"\n\t\tprname = \"Park+Ride P3 (Europapark/Boumaboulevard)\"\n\n\t# first car then ride\n\tif(request.POST.get(\"direction\",\"\") == \"heen\"):\n\t\t# Get car directions\n\t\tcar_directions = gmaps.directions(request.POST.get(\"from\", \"\"),\"place_id:\"+parkride,mode=\"driving\",departure_time=now,language=\"nl_NL\")\n\t\t# Calculate arrival at park+ride and add 5 minutes to that\n\t\tarrival_at_park_ride = now + timedelta(seconds = car_directions[0][\"legs\"][0][\"duration\"][\"value\"])\n\t\tarrival_at_park_ride_parked = arrival_at_park_ride + timedelta(minutes = 5)\n\t\t# Get transit directions\n\t\ttransit_directions = gmaps.directions(\"place_id:\"+parkride,request.POST.get(\"to\", \"\"),mode=\"transit\",departure_time=arrival_at_park_ride_parked,language=\"nl_NL\")\n\t\t# log the request\n\t\tlogger.info(\"Park and Ride heen\")\n\t\t# Get arrival time and transform it into a timestamp\n\t\tarrival = transit_directions[0][\"legs\"][0][\"arrival_time\"][\"text\"]\n\t\tarrival_timestamp = datetime.strptime(now.strftime(\"%Y-%m-%d\")+\" \"+transit_directions[0][\"legs\"][0][\"arrival_time\"][\"text\"],\"%Y-%m-%d %H:%M\")\n\t\t# Convert it into an UNIX timestamp to calculate duration time\n\t\tarrival_timestamp = mktime(arrival_timestamp.timetuple())\n\t\tnow_unix = mktime(now.timetuple())\n\t\tduration = ceil((arrival_timestamp-now_unix)/60)\n\t\t# Render results\n\t\treturn render(request,'planner/trips/parkrideheen.html',{'car_directions':car_directions,'transit_directions':transit_directions,'arrival':arrival,'prname':prname,'duration':duration})\n\t\n\t# First ride then car (return trip)\n\telif(request.POST.get(\"direction\",\"\") == \"terug\"):\n\t\t# Get transit directions\n\t\ttransit_directions = gmaps.directions(request.POST.get(\"from\", \"\"),\"place_id:\"+parkride,mode=\"transit\",departure_time=now,language=\"nl_NL\")\n\t\t# Calculate the arrival time at the park+ride and add 5 minutes to that\n\t\tarrival_at_park_ride = datetime.strptime(now.strftime(\"%Y-%m-%d\")+\" \"+transit_directions[0][\"legs\"][0][\"arrival_time\"][\"text\"],\"%Y-%m-%d %H:%M\")\n\t\tarrival_at_park_ride_parked = arrival_at_park_ride + timedelta(minutes = 5)\n\t\t# Get the car directions\n\t\tcar_directions = gmaps.directions(\"place_id:\"+parkride,request.POST.get(\"to\", \"\"),mode=\"driving\",departure_time=arrival_at_park_ride_parked,language=\"nl_NL\")\n\t\t# Calculate the duration based on the durations + 5 minutes\n\t\tduration = ceil((transit_directions[0][\"legs\"][0][\"duration\"][\"value\"] + (5*60) + car_directions[0][\"legs\"][0][\"duration\"][\"value\"])/60)\n\t\t# Calculate the final arrival time\n\t\tarrival = arrival_at_park_ride_parked + timedelta(seconds = car_directions[0][\"legs\"][0][\"duration\"][\"value\"])\n\t\tlogger.info(\"Park and Ride terug\")\n\t\t# Extract the departure time\n\t\tdeparture = transit_directions[0][\"legs\"][0][\"departure_time\"][\"text\"]\n\t\t# Render the results\n\t\treturn render(request,'planner/trips/parkrideterug.html',{'car_directions':car_directions,'transit_directions':transit_directions,'arrival':arrival.strftime(\"%H:%M\"),'prname':prname,'duration':duration,'departure':departure})\n\t\t\n\t\n\t\ndef transit(request):\n\t# Form validation\n\tif(request.POST.get(\"from\",\"\") == \"\" or request.POST.get(\"to\",\"\") == \"\"):\n\t\tlogger.info(\"Empty from or to\")\n\t\treturn\n\t# Get current date\n\tnow = datetime.now()\n\t#Fetch directions from Google\n\ttransit_directions = gmaps.directions(request.POST.get(\"from\", \"\"),request.POST.get(\"to\", \"\"),mode=\"transit\",departure_time=now,language=\"nl_NL\")\n\t# Extract arrival time\n\tarrival = transit_directions[0][\"legs\"][0][\"arrival_time\"][\"text\"]\n\t# Extract departure time\n\tdeparture = transit_directions[0][\"legs\"][0][\"departure_time\"][\"text\"]\n\t# Log request\n\tlogger.info(\"Transit\")\n\t# Render page\n\treturn render(request,'planner/trips/transit.html',{'directions':transit_directions,'arrival':arrival,'departure':departure})","sub_path":"planner/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"231654686","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 25 17:11:09 2018\n\n@author: yuanjihuang\n\"\"\"\nimport csv\nimport Adafruit_ADS1x15\nimport Adafruit_ADXL345\n\n\nclass Sensor:\n def __init__(self, max_len=20):\n self.adc = Adafruit_ADS1x15.ADS1115(address=0x48)\n self.acel = Adafruit_ADXL345.ADXL345(address=0x53)\n self.cur_time = 0\n self.time = [0]\n self.muscle = []\n self.acc = []\n self.max_len = max_len\n\n def check_len(self):\n if len(self.time) > self.max_len:\n self.time.pop(0)\n if len(self.muscle) > self.max_len:\n self.muscle.pop(0)\n if len(self.acc) > self.max_len:\n self.acc.pop(0)\n\n def read(self):\n # add time\n self.cur_time += 1\n self.time.append(self.cur_time)\n # add muscle\n a = self.adc.read_adc(0, gain=(2 / 3))\n self.muscle.append(a)\n # add acc\n x, y, z = self.acel.read()\n self.acc.append((x, y, z))\n # check length\n self.check_len()\n\n def save_csv(self, name):\n # print(self.cur_time)\n with open(name, 'w') as output:\n writer = csv.writer(output, delimiter=',', lineterminator='\\n')\n for i in range(len(self.muscle)):\n writer.writerow([self.time[i], self.muscle[i], self.acc[i][0],\\\n self.acc[i][1], self.acc[i][2]])\n","sub_path":"sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"499973017","text":"import read\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report\n\nprint(\"Reading data ...\")\nx_all, y_all = read.read(LOAD_DATA=False)\nx_train, x_test, y_train, y_test = train_test_split(x_all, y_all, test_size=0.3, random_state=42)\nprint(x_train.shape, y_train.shape)\nprint(x_test.shape, y_test.shape)\n\nmodels = [RandomForestClassifier(),\n RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',\n max_depth=None, max_features='sqrt', max_leaf_nodes=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=1000, n_jobs=1,\n oob_score=False, random_state=None, verbose=0,\n warm_start=False)]\n\nfor model in models:\n print(\"Fitting RF ...\")\n model.fit(x_train, y_train)\n\n print(\"Evaluating ...\")\n y_pred = model.predict(x_test)\n\n print(\"Accuracy is %f.\" % accuracy_score(y_test, y_pred))\n print(confusion_matrix(y_test, y_pred))\n print(\"Precision score is %f.\" % precision_score(y_test, y_pred))\n print(\"Recall score is %f.\" % recall_score(y_test, y_pred))\n print(\"F1 score is %f.\" % f1_score(y_test, y_pred))\n print(\"-----------------------------------\")\n","sub_path":"model/train/train_randomForenst.py","file_name":"train_randomForenst.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"103343009","text":"from tkinter import *\r\nimport pandas as pd\r\nimport pickle\r\nfrom tkinter import messagebox\r\nfrom tkinter import font\r\n\r\n# loading dumped datafiles(pickles from managing_inventory.py)\r\ndata = pickle.load(open('pcat.p', 'rb'))\r\nlast3 = pickle.load(open('last3mon.p', 'rb'))\r\nlis_inv = pickle.load(open(\"lis_inv.p\", \"rb\"))\r\ninv_mon_name = pickle.load(open(\"inv_mon_name.p\", \"rb\"))\r\npred = pickle.load(open(\"predsales.p\", \"rb\"))\r\noff = pickle.load(open('pcat.p', 'rb'))\r\noff_prod = pickle.load(open(\"off_prod.p\", \"rb\"))\r\nfur_prod = pickle.load(open(\"fur_prod.p\", \"rb\"))\r\ntec_prod = pickle.load(open(\"tec_prod.p\", \"rb\"))\r\n\r\n# window configuration\r\nroot = Tk()\r\nroot.geometry(\"900x600\")\r\nroot.configure(background='RoyalBlue3')\r\nroot.title(\"DM-Inventory Management\")\r\nroot.resizable(0,0)\r\nMonth = list(range(1,13))\r\nYear = [2014,2015,2016,2017]\r\nCategory = ['Office Supplies','Furniture','Technology']\r\nBMW = ['Best', 'Moderate', 'Worst']\r\noffsub = ['Binders', 'Storage', 'Appliances', 'Paper', 'Art', 'Envelopes', 'Fasteners', 'Labels', 'Supplies']\r\nfursub = ['Chairs', 'Bookcases', 'Furnishings', 'Tables']\r\ntecsub = ['Copiers', 'Accessories', 'Machines', 'Phones']\r\nsub = [offsub, fursub, tecsub]\r\n\r\nrate = StringVar(root)\r\nrate.set('a')\r\nrateDrop = OptionMenu(root, rate, *BMW)\r\n\r\nsubname = StringVar(root)\r\nsub_sel = ['a']\r\nsubname.set('a')\r\nscnameDropDown = OptionMenu(root, subname, *sub_sel)\r\n\r\np = StringVar(root)\r\nproduct_list = [0]\r\npDropDown = OptionMenu(root, p, *product_list)\r\n\r\n\r\ndef gsummary(*ar):\r\n v = p.get()\r\n print(v)\r\n h1=list(last3[:370]['d_product'])\r\n h2=list(last3[371:1500]['d_product'])\r\n h3=list(last3[1500:]['d_product'])\r\n val = 0\r\n z = 0\r\n \r\n if v in h1:\r\n for j in range(len(last3)):\r\n if last3['d_product'][j] == v:\r\n z=j\r\n ltq= last3['d_quantity'][z] \r\n print(\"Quantity sold in last 3 months: \", ltq)\r\n val = lis_inv[inv_mon_name[-1]][v]\r\n print(\"Remaining quantity in inventory\", val)\r\n print('Best Product')\r\n pre = pred[v]\r\n print(\"Predicted sales:\", pre)\r\n listi.delete(0, END)\r\n listi.insert(END, ' Best Product')\r\n listi.itemconfig(END, foreground=\"green\")\r\n listi.insert(END, 'Name of the product :')\r\n listi.insert(END, v)\r\n listi.insert(END, 'Quantity sold in last 3 months : {}'.format(ltq))\r\n listi.insert(END, 'Remaining quantity in inventory : {}'.format(val))\r\n listi.insert(END, 'Predicted sales : {}'.format(pre))\r\n if(val < 15):\r\n messagebox.showinfo(\"Add inventory alert\", \"This product quantity is running low. Increase the inventory for this product!!!\")\r\n \r\n \r\n elif v in h2:\r\n for j in range(len(last3)):\r\n if last3['d_product'][j] == v:\r\n z=j\r\n ltq= last3['d_quantity'][z] \r\n print(\"Quantity sold in last 3 months: \", ltq)\r\n val = lis_inv[inv_mon_name[-1]][v]\r\n print(\"Remaining quantity in inventory\", val)\r\n print('Moderate Product')\r\n pre = pred[v]\r\n print(\"Predicted sales:\", pre)\r\n listi.delete(0, END)\r\n listi.insert(END, ' Moderate Product')\r\n listi.itemconfig(END, foreground=\"blue\")\r\n listi.insert(END, 'Name of the product :')\r\n listi.insert(END, v)\r\n listi.insert(END, 'Quantity sold in last 3 months : {}'.format(ltq))\r\n listi.insert(END, 'Remaining quantity in inventory : {}'.format(val))\r\n listi.insert(END, 'Predicted sales : {}'.format(pre))\r\n if(val < 15):\r\n messagebox.showinfo(\"Add inventory alert\", \"This product quantity is running low. Increase the inventory for this product!!!\")\r\n \r\n elif v in h3:\r\n for j in range(len(last3)):\r\n if last3['d_product'][j] == v:\r\n z=j\r\n ltq= last3['d_quantity'][z] \r\n print(\"Quantity sold in last 3 months: \", ltq)\r\n val = lis_inv[inv_mon_name[-1]][v]\r\n print(\"Remaining quantity in inventory\", val)\r\n print('Worst Product')\r\n pre = pred[v]\r\n print(\"Predicted sales:\", pre)\r\n listi.delete(0, END)\r\n listi.insert(END, ' Worst Product')\r\n listi.itemconfig(END, foreground=\"red\")\r\n listi.insert(END, 'Name of the product :')\r\n listi.insert(END, v)\r\n listi.insert(END, 'Quantity sold in last 3 months : {}'.format(ltq))\r\n listi.insert(END, 'Remaining quantity in inventory : {}'.format(val))\r\n listi.insert(END, 'Predicted sales : {}'.format(pre))\r\n if(val < 15):\r\n messagebox.showinfo(\"Add inventory alert\", \"This product quantity is running low. Increase the inventory for this product!!!\")\r\n\r\n elif v == 'Nothing to select':\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n \r\n else:\r\n val = lis_inv[inv_mon_name[-1]][v]\r\n print(\"Remaining quantity in inventory\", val)\r\n print('Not Sold in last THREE MONTHS!!!')\r\n pre = pred[v]\r\n print(\"Predicted sales:\", pre)\r\n listi.delete(0, END)\r\n listi.insert(END, ' Not Sold in last THREE MONTHS!!!')\r\n listi.itemconfig(END, foreground=\"purple\")\r\n listi.insert(END, 'Name of the product :')\r\n listi.insert(END, v)\r\n listi.insert(END, 'Remaining quantity in inventory : {}'.format(val))\r\n listi.insert(END, 'Predicted sales : {}'.format(pre))\r\n if(val < 15):\r\n messagebox.showinfo(\"Add inventory alert\", \"This product quantity is running low. Increase the inventory for this product!!!\")\r\n \r\n \r\n# function to get product summary on selecting a product\r\ndef getpname(*val):\r\n c = cat.get()\r\n v = subname.get()\r\n r = rate.get()\r\n global p\r\n global pDropDown\r\n pDropDown.destroy()\r\n print(c, v, r)\r\n p = StringVar(root)\r\n if c == 'Office Supplies':\r\n if r == 'Best':\r\n for i in range(len(sub[0])):\r\n if v == sub[0][i]:\r\n product_list = off_prod[0][i]\r\n if len(product_list) > 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n elif r == 'Moderate':\r\n for i in range(len(sub[0])):\r\n if v == sub[0][i]:\r\n product_list = off_prod[1][i]\r\n if len(product_list)> 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n else:\r\n for i in range(len(sub[0])):\r\n if v == sub[0][i]:\r\n product_list = off_prod[2][i]\r\n if len(product_list)> 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n \r\n elif c == 'Furniture':\r\n if r == 'Best':\r\n for i in range(len(sub[1])):\r\n if v == sub[1][i]:\r\n product_list = fur_prod[0][i]\r\n if len(product_list) > 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n elif r == 'Moderate':\r\n for i in range(len(sub[1])):\r\n if v == sub[1][i]:\r\n product_list = fur_prod[1][i]\r\n if len(product_list)> 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n else:\r\n for i in range(len(sub[1])):\r\n if v == sub[1][i]:\r\n product_list = fur_prod[2][i]\r\n if len(product_list)> 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n \r\n else:\r\n if r == 'Best':\r\n for i in range(len(sub[2])):\r\n if v == sub[2][i]:\r\n product_list = tec_prod[0][i]\r\n if len(product_list) > 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n elif r == 'Moderate':\r\n for i in range(len(sub[2])):\r\n if v == sub[2][i]:\r\n product_list = tec_prod[1][i]\r\n if len(product_list)> 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n else:\r\n for i in range(len(sub[2])):\r\n if v == sub[2][i]:\r\n product_list = tec_prod[2][i]\r\n if len(product_list)> 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n lisdis.delete(0, END)\r\n j = 1 \r\n if product_list[0] == 'Nothing to select':\r\n lisdis.insert(END, \"*Nothing to display*\")\r\n else:\r\n for i in product_list:\r\n lisdis.insert(END, '{}. {}'.format(j, i))\r\n j +=1\r\n \r\n\r\ndef getrate(*val):\r\n print(subname.get())\r\n global rate\r\n global rateDrop\r\n rateDrop.destroy()\r\n rate = StringVar(root)\r\n rate.set(BMW[0])\r\n rate.trace(\"w\", getpname)\r\n rateDrop = OptionMenu(root, rate, *BMW)\r\n rateDrop.configure(width = 10, anchor = 'w')\r\n rateDrop.place(relx = 0.21, rely = 0.285)\r\n \r\n# function to get a list of products on selecting category \r\ndef getYM(val):\r\n global s\r\n global subname\r\n global sub_sel\r\n global scnameDropDown\r\n scnameDropDown.destroy()\r\n \r\n c = cat.get()\r\n if c == 'Office Supplies':\r\n sub_sel = sub[0]\r\n elif c == 'Furniture':\r\n sub_sel = sub[1]\r\n else:\r\n sub_sel = sub[2]\r\n print(c)\r\n subname = StringVar(root)\r\n subname.set(sub_sel[0])\r\n subname.trace(\"w\", getrate) # calls getpname() when a product is chosen from the list\r\n scnameDropDown = OptionMenu(root, subname, *sub_sel)\r\n scnameDropDown.configure(width = 10, anchor = 'w')\r\n scnameDropDown.place(relx = 0.21, rely = 0.165)\r\n\r\ncatLabel = Label(root,text='Select Category : ', background='RoyalBlue3', fg = 'white')\r\ncatLabel.place(relx = 0.05, rely = 0.05)\r\n\r\npLabel = Label(root,text='Select Sub-Category : ', background='RoyalBlue3', fg = 'white')\r\npLabel.place(relx = 0.05, rely = 0.17)\r\n\r\nproL = Label(root,text='Select Rating : ', background='RoyalBlue3', fg = 'white')\r\nproL.place(relx = 0.05, rely = 0.295)\r\n\r\nLabel(root, text = 'Select Product : ', background='RoyalBlue3', fg = 'white').place(relx = 0.05, rely = 0.415)\r\n \r\ncat = StringVar(root)\r\ncat.set(Category[0])\r\ncatDropdown = OptionMenu(root,cat,*Category, command=getYM)\r\ncatDropdown.configure(width = 13, anchor = 'w')\r\ncatDropdown.place(relx = 0.21, rely = 0.05)\r\n\r\ncatLabel = Label(root,text='Summary of selected product : ', background='RoyalBlue3', fg = 'white')\r\ncatLabel.place(relx = 0.62, rely = 0.55) \r\n\r\nLabel(root, text = 'List of products : ', background='RoyalBlue3', fg = 'white').place(relx = 0.62, rely = 0.06)\r\n\r\nlisdis = Listbox(root, width = 60, height = 15)\r\nlisdis.place(relx = 0.545, rely = 0.1)\r\n\r\nlisti = Listbox(root, width = 45, height = 7)\r\nlisti.place(relx = 0.6, rely = 0.59)\r\n\r\n\r\nroot.mainloop()","sub_path":"Final Folder/GUI/in_gui.py","file_name":"in_gui.py","file_ext":"py","file_size_in_byte":14928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"587491843","text":"'''\r\nCreated on Sep 3, 2012\r\n \r\n@author: Daniel J. Rivers\r\n'''\r\nfrom DataAccess.DBInt import DBInt\r\nfrom Utilities import OutputUtils\r\n\r\nclass TableHandler:\r\n\r\n sep = \"~\"\r\n\r\n @staticmethod\r\n def writeRecord( l, td ):\r\n values = []\r\n where = []\r\n for i, v in enumerate( l ):\r\n values.append( ( td.columnNames[ i ][ 0 ], v ) )\r\n if i <= td.where:\r\n where.append( ( td.columnNames[ i ][ 0 ], v ) )\r\n ret = DBInt().merge( tuple( values ), tuple( where ), td )\r\n OutputUtils.debug( \"Merged Record: \" + str( values ) )\r\n return ret\r\n\r\n @staticmethod\r\n def getRecordByID( i, td ):\r\n try:\r\n return DBInt().get( ( ( \"ID\", i ), ), td )[ 0 ]\r\n except Exception as e:\r\n OutputUtils.exception( \"No row found\", e )\r\n\r\n @staticmethod\r\n def getAllRecords( td ):\r\n return DBInt().getAll( td )\r\n\r\n @staticmethod\r\n def getColumnHeaders( td ):\r\n ret = []\r\n for i in td.columnNames [:len( td.columnNames ) - 1]:\r\n ret.append( i[ 0 ] )\r\n return ret\r\n\r\n @classmethod\r\n def getTableSetup( cls, td, l ):\r\n return [ cls.getColumnHeaders( td ), cls.getValuesForTable( td, l )]\r\n\r\n @staticmethod\r\n def getValuesForTable( td, l ):\r\n records = TableHandler.getAllRecords( td )\r\n ret = []\r\n for i in records:\r\n record = i[1:len( td.columnNames )]\r\n if not l:\r\n ret.append( record )\r\n else:\r\n add = True\r\n for j in l:\r\n if j in record:\r\n add = False\r\n if add:\r\n ret.append( record )\r\n return ret\r\n\r\n @staticmethod\r\n def getData():\r\n return None\r\n","sub_path":"FileInventory/DataAccess/TableHandler.py","file_name":"TableHandler.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"137859702","text":"from sys import argv\nimport numpy as np\nfrom scipy.stats import entropy as KLdivergence\n\ndef prob_indep(counts, n):\n# counts: (matrix) contingency table\n# n: (int) sum of counts\n\tR = [sum(row) for row in counts]\n\tC = [sum([counts[i][j] for i in range(len(counts))]) for j in range(len(counts[0]))]\n\tprob = []\n\tfor i in range(len(counts)):\n\t\tprob.append([])\n\t\tfor j in range(len(counts[i])):\n\t\t\tprob[-1].append(0)\n\t\t\tprob[i][j] = R[i]*C[j]/n**2\n\treturn prob\n\ndef total(counts):\n# counts: (matrix) contingency table\n\tif len(counts)==0:\n\t\treturn 0\n\treturn sum(counts[0])+total(counts[1:])\n\ndef flatten(M):\n\tL = []\n\tfor row in M:\n\t\tL = L + row\n\treturn np.array(L)\n\ndef printv(str, flag=\"-v\"):\n\tif flag in argv:\n\t\tprint(str)\n\ndef MonteCarlo(KL, prob, n, sample_size=1E6, seed=0):\n\tnp.random.seed(seed)\n\tprintv(\"Sampling from H0...\")\n\tsample = np.random.multinomial(n, prob, int(sample_size))\n\tprintv(\"Calculating KL for samples...\")\n\tKL_sample = np.array([KLdivergence(obs, prob, 2) for obs in sample])\n\tprintv(\"Computing p-value..\")\n\tp = ((KL_sample >= KL).sum()+1)/(int(sample_size)+1)\n\treturn p\n\ndef receive_cont_table(path):\n\tM = []\n\twith open(path, \"r\") as F:\n\t\tfor line in F:\n\t\t\tM.append([int(x) for x in line.strip().split(\"\\t\")])\n\treturn M\n\n# receive input\nprintv(\"Receiving input...\")\ncounts = receive_cont_table(argv[1])\nn = total(counts)\n# put expected (H0) and observed distributions in numpy arrays\nprintv(\"Calculating H0 distribution...\")\nexp = flatten(prob_indep(counts, n))\nobs = flatten(counts)/n\n# remove counts[i][j] where row i and col j are empty\nprintv(\"Removing rows/cols without counts...\")\nmask = np.argwhere(exp>0).flatten()\nexp, obs = exp[mask], obs[mask]\n# calculate KL-divergence and assess significance\nprintv(\"Calculating KL divergence...\")\nKL = KLdivergence(obs, exp, 2)\np = MonteCarlo(KL, exp, n)\n\nprint(p)\nprintv(\"KL=\"+str(KL), flag=\"-kl\")\n","sub_path":"montecarlo.py","file_name":"montecarlo.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"112633282","text":"import urllib\nimport lxml.html as html\nimport io, json\n\nlink = \"https://tv.yandex.ru/213?grid=main&period=all-day\"\npage = html.fromstring(urllib.urlopen(link).read())\nchannels = page.cssselect('.tv-channel')\n\ndata = []\n\nfor row in channels:\n\titem = dict()\n\titem['channel'] = row.cssselect('.tv-channel-title__text')[0].text_content()\n\tstyle = row.cssselect('.b-tv-image__picture')[0].get(\"style\").replace(\"url(\", \"url(http:\")\n\titem['icon'] = \"http://\" + style[style.find(\"avatars\"):-1]\n\tpr = row.cssselect('.tv-channel-events__items')[0]\n\ttime = pr.cssselect('.tv-event__time-text')\n\tprog = pr.cssselect('.tv-event__title-inner')\n\titem['programs'] = []\n\tfor p in range(0, len(time)):\n\t \titem['programs'].append([time[p].text_content(), prog[p].text_content()])\n\tdata.append(item)\n\nwith open('data.json', 'w') as outfile:\n\tjson_string = json.dumps({'channels':data}, indent=4, ensure_ascii = False).encode('utf-8')\n\toutfile.write(json_string)\n\n","sub_path":"TV.app/www/PY/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"56695332","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import SGDRegressor\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.preprocessing import PolynomialFeatures\nimport matplotlib.pyplot as plt\n\n\nX = 2 * np.random.rand(100, 1)\ny = 4 + 3 * X + np.random.randn(100, 1)\n\nX_b = np.c_[np.ones((100, 1)), X]\n\ntheta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y);\n\nprint(\"Calculado usando numpy \")\nprint(theta_best)\n\n# using Scikit-learn\n\n\nprint(\"Calculado usando Scikit Learn Liniear Regression \")\nlin_reg = LinearRegression()\nlin_reg.fit(X, y)\nprint(lin_reg.intercept_, lin_reg.coef_)\n\n\n# previsão e plotagem\n\n\nX_new = np.array([[0], [2]])\nX_new_b = np.c_[np.ones((2, 1)), X_new]\ny_predict = X_new_b.dot(theta_best)\nplt.plot(X_new, y_predict, \"r-\") # linha em vermelho definida por dois pontos baseado já nos coeficientes calculados\nplt.plot(X, y, \"b.\")\nplt.axis([0, 2, 0, 15])\nplt.show()\n\nprint(\"Calculado usando Scikit SGDRegressor \")\nsgd_reg = SGDRegressor(max_iter=50, penalty=None, eta0=0.1)\nsgd_reg.fit(X, y)\nprint(sgd_reg.intercept_, sgd_reg.coef_)\n\n# Regressão Polinomial\nprint(\" Calculando Regressão com função polinomial \")\nm = 100\nX = 6 * np.random.rand(m, 1) -3\ny = 3 * X **2 + 8*X + 5 + np.random.randn(m, 1)\n\npoly_features = PolynomialFeatures(degree=2, include_bias=False)\nX_poly = poly_features.fit_transform(X)\nlin_reg.fit(X_poly, y)\nprint(lin_reg.intercept_, lin_reg.coef_)\n\nplt.plot(X, y, 'b.')\nplt.show()\n\n\n# Elastic Net\nprint(\"Elastic Net\")\n\nX = 2 * np.random.rand(100, 1)\ny = 4 + 3 * X + np.random.randn(100, 1)\n\nelastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5)\nelastic_net.fit(X,y)\nprint(elastic_net.intercept_, elastic_net.coef_)\n\nprint(elastic_net.predict([[1.5]]))\n","sub_path":"TestesIniciais/cp04.py","file_name":"cp04.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"653222737","text":"# experiment to test the relationship between accuracy and\n# number of choices in the puzzle\n\nimport csv\n\n# Ipmort the model and the generator\nfrom model.trainer import Trainer\nfrom model.generator import Generator\n\n\n# Define experiment parameters\nBASE_SIZES = [16]\nTRAINING_SIZES = [500000]\nALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmno'\nNUM_CHOICES = [2,3,4,5,6,7,8]\n\ndef train_model(base, training_size, length=2, choice=5, epochs=2000, batch_size=1000, dimension = 100, testing_size=100):\n # initialise generator\n alphabet = ALPHABET[:base]\n g = Generator(alphabet, length, choice)\n\n # generate data\n train_data = g.generate_data(training_size)\n test_data = g.generate_data(testing_size)\n\n # initialise model\n trainer = Trainer(train_data, test_data, epochs, dimension)\n\n # run model on generated data\n model = trainer.batch_train()\n\n # evaluate the model on training and testing data\n train_acc = trainer.evaluate(model, train_data[:200])\n test_acc = trainer.evaluate(model, test_data)\n\n # return the choice and the accuracy\n return (choice, train_acc, test_acc)\n\ndef run_experiment():\n print('experimenting with base size')\n # iterate through training sizes\n for training_size in TRAINING_SIZES:\n results = []\n # iterate through all base sizes\n for base_size in BASE_SIZES:\n # iterate through all choice\n for choice in NUM_CHOICES:\n result = train_model(base=base_size, training_size = training_size, choice=choice)\n # Add result to results list\n results.append(result)\n # Save the list as a csv\n with open(\"results/num_choices_experiment/num_choices_experiment.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(results)\n\n# do not run when imported\nif __name__ == '__main__':\n run_experiment()\n","sub_path":"experiments/num_choices_experiment.py","file_name":"num_choices_experiment.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"474699862","text":"#\n# [64] Minimum Path Sum\n#\n# https://leetcode.com/problems/minimum-path-sum/description/\n#\n# algorithms\n# Medium (41.29%)\n# Total Accepted: 153.7K\n# Total Submissions: 371.7K\n# Testcase Example: '[[1,3,1],[1,5,1],[4,2,1]]'\n#\n# Given a m x n grid filled with non-negative numbers, find a path from top\n# left to bottom right which minimizes the sum of all numbers along its path.\n#\n# Note: You can only move either down or right at any point in time.\n#\n# Example:\n#\n#\n# Input:\n# [\n# [1,3,1],\n# ⁠ [1,5,1],\n# ⁠ [4,2,1]\n# ]\n# Output: 7\n# Explanation: Because the path 1→3→1→1→1 minimizes the sum.\n#\n\n\nclass Solution:\n def minPathSum(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m, n = len(grid), len(grid[0])\n dp = [[0 for x in range(n)] for y in range(2)]\n for i in range(m):\n for j in range(n):\n c, d = i & 1, (i+1) & 1\n dp[c][j] = grid[i][j]\n if i >= 1 and j >= 1:\n dp[c][j] += min(dp[d][j], dp[c][j-1])\n elif i >= 1:\n dp[c][j] += dp[d][j]\n elif j >= 1:\n dp[c][j] += dp[c][j-1]\n return dp[(m-1) & 1][n-1]\n","sub_path":"64.minimum-path-sum.python3.py","file_name":"64.minimum-path-sum.python3.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"217666969","text":"import os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + \"/../python\")\n\nimport pytest\nfrom unittest.mock import MagicMock, call\nimport random\nimport python.infotiv_launcher\nfrom python.infotiv_launcher import LaunchCMD\n\n\n@pytest.fixture()\ndef launcher():\n print('\\n*********Start*********')\n launcher = python.infotiv_launcher.Launcher()\n launcher.rc = MagicMock()\n yield launcher\n print('\\n**********End**********')\n\n# ---------------------------------------------------------------------------------\n# ------------------------ set_launch_position ------------------------------------\n# ---------------------------------------------------------------------------------\n\ndef test_encoders_ready_check_encoders_not_ready(launcher):\n # GIVEN\n launcher.encoders_ready = 0\n\n # WHEN\n return_value = launcher.encoder_ready_check()\n\n # THEN\n assert return_value == 0\n\n\ndef test_set_launch_position_encoders_not_ready(launcher):\n # GIVEN\n with pytest.raises(Exception) as err:\n launcher.encoders_ready = 0\n # WHEN\n launcher.set_launch_position(1)\n # THEN\n assert err.match('Encoder Not Ready')\n\n\n@pytest.mark.parametrize(\"invalid_data\", [(-1), 112])\ndef test_set_launch_position_encoders_ready_launch_position_invalid_type_of_error(launcher, invalid_data):\n # GIVEN\n with pytest.raises(Exception, match='out of bounds') as err:\n launcher.encoders_ready = 1\n # WHEN\n launcher.set_launch_position(invalid_data)\n # THEN\n assert err.type is ValueError\n\n\n@pytest.mark.parametrize(\"invalid_data\", [(-1), 112])\ndef test_set_launch_position_encoders_ready_launch_position_invalid_message(launcher, invalid_data):\n # GIVEN\n with pytest.raises(ValueError) as err:\n launcher.encoders_ready = 1\n # WHEN\n launcher.set_launch_position(invalid_data)\n # THEN\n err.match('out of bounds')\n\n\ndef test_set_launch_position_encoders_ready_launch_position_zero(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n launcher.rc.ReadEncM2.return_value = (1, 2, 2)\n launcher.rc.ReadBuffers.return_value = (0, 0, 0x80)\n\n # WHEN\n launcher.set_launch_position(0)\n\n # THEN\n calls = [call(129, -2500, 2, 1),\n call(129, 0, 0, 0),\n call(129, 2500, 2188, 0),\n call(129, 0, 0, 0)]\n launcher.rc.SpeedDistanceM2.assert_has_calls(calls)\n assert launcher.rc.SpeedDistanceM2.call_count == 4\n\n\ndef test_set_launch_position_encoders_ready_launch_position_higher_than_zero(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n launcher.rc.ReadEncM2.return_value = (1, 4, 2) # launch_actual = 4 , launch_increment = -4\n launcher.rc.ReadBuffers.return_value = (0, 0, 0x80)\n\n # WHEN\n launcher.set_launch_position(1)\n\n # THEN\n\n calls = [call(129, -2500, 4, 1),\n call(129, 0, 0, 0),\n call(129, 2500, 2319, 0),\n call(129, 0, 0, 0)]\n launcher.rc.SpeedDistanceM2.assert_has_calls(calls)\n assert launcher.rc.SpeedDistanceM2.call_count == 4\n\n\ndef test_set_launch_position_encoders_ready_launch_position_max(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n launcher.rc.ReadEncM2.return_value = (1, -1.5, 2)\n launcher.rc.ReadBuffers.return_value = (0, 0, 0x80)\n\n # WHEN\n launcher.set_launch_position(111)\n\n # THEN\n\n calls = [call(129, 2500, 1.5, 1),\n call(129, 0, 0, 0),\n call(129, 2500, 16991.5, 0),\n call(129, 0, 0, 0)]\n launcher.rc.SpeedDistanceM2.assert_has_calls(calls)\n assert launcher.rc.SpeedDistanceM2.call_count == 4\n\n\ndef test_stop(launcher):\n # GIVEN\n launcher.rc.ForwardM1 = MagicMock(return_value=True)\n launcher.rc.ForwardM2 = MagicMock(return_value=True)\n\n # WHEN\n launcher.stop()\n\n # THEN\n launcher.rc.ForwardM1.assert_any_call(launcher.address, 0)\n launcher.rc.ForwardM2.assert_any_call(launcher.address, 0)\n launcher.rc.ForwardM1.assert_any_call(launcher.address_2,0)\n launcher.rc.ForwardM2.assert_any_call(launcher.address_2, 0)\n\n\ndef test_max_pitch_zero_increment(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n launcher.rc.ReadEncM1.return_value = (1, 355000) # pitch increment = 0\n\n # WHEN\n launcher.max_pitch()\n\n # THEN\n\n calls = [call(launcher.address, launcher.pitch_speed_pulses, 0, 1),\n call(launcher.address, 0, 0, 0)]\n launcher.rc.SpeedDistanceM1.assert_has_calls(calls)\n assert launcher.rc.SpeedDistanceM1.call_count == 2\n\n\ndef test_max_pitch_higher_than_zero_increment(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n launcher.rc.ReadEncM1.return_value = (1, 2) # pitch increment = 354998\n\n # WHEN\n launcher.max_pitch()\n\n # THEN\n\n calls = [call(launcher.address, launcher.pitch_speed_pulses, 354998, 1),\n call(launcher.address, 0, 0, 0)]\n launcher.rc.SpeedDistanceM1.assert_has_calls(calls)\n assert launcher.rc.SpeedDistanceM1.call_count == 2\n\n\ndef test_max_pitch_lower_than_zero_increment(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n launcher.rc.ReadEncM1.return_value = (1, 355020) # pitch increment = -20\n\n # WHEN\n launcher.max_pitch()\n\n # THEN\n\n calls = [call(launcher.address, -launcher.pitch_speed_pulses, 20, 1),\n call(launcher.address, 0, 0, 0)]\n launcher.rc.SpeedDistanceM1.assert_has_calls(calls)\n assert launcher.rc.SpeedDistanceM1.call_count == 2\n\n\n@pytest.mark.parametrize(\"test_input, expected\", [(12345.6789, 1234.57), (10, 1), (123, 12.3), (0, 0), (-10, -1)])\ndef test_battery_voltage_decimal_value(launcher, test_input, expected):\n # GIVEN\n launcher.rc.ReadMainBatteryVoltage.return_value = (128, test_input)\n # WHEN\n return_value = launcher.battery_voltage()\n # THEN\n assert return_value == expected\n\n# ---------------------------------------------------------------------------------\n# ------------------------ set_launch_variables -----------------------------------\n# ---------------------------------------------------------------------------------\n\ndef test_set_launch_variables_valid_positions_called(launcher):\n # GIVEN\n launcher.change_pitch = MagicMock()\n launcher.change_rotation = MagicMock()\n launcher.change_lift = MagicMock()\n\n # WHEN\n pitch_position = random.randint(0, launcher.pitch_length)\n rotation_position = random.randint(0, launcher.rotation_length)\n lift_position = random.randint(0, launcher.lift_length)\n launcher.set_launch_variables(pitch_position, rotation_position, lift_position)\n\n # THEN\n launcher.change_pitch.assert_called_with(pitch_position)\n launcher.change_rotation.assert_called_with(rotation_position)\n launcher.change_lift.assert_called_with(lift_position)\n\n\n# ---------------------------------------------------------------------------------\n# ------------------------ launch_control------------------------------------------\n# ---------------------------------------------------------------------------------\ndef test_launch_control_LaunchCMD_up(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n # WHEN\n launcher.launch_control(LaunchCMD(1))\n\n # THEN\n launcher.rc.ForwardM2.assert_called_with(launcher.address_2, launcher.launch_speed_manual)\n launcher.rc.BackwardM2.assert_not_called()\n\n\ndef test_launch_control_LaunchCMD_down(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n # WHEN\n launcher.rc.BackwardM2.return_value = True\n launcher.launch_control(LaunchCMD(2))\n\n # THEN\n launcher.rc.BackwardM2.assert_called_with(launcher.address_2, launcher.launch_speed_manual)\n launcher.rc.ForwardM2.assert_not_called()\n\n\ndef test_launch_control_LaunchCMD_stop(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n # WHEN\n launcher.rc.ForwardM2.return_value = True\n launcher.launch_control(LaunchCMD(3))\n\n # THEN\n launcher.rc.ForwardM2.assert_called_with(launcher.address_2, 0)\n launcher.rc.BackwardM2.assert_not_called()\n","sub_path":"test/infotiv_launcher/test_launch_functions.py","file_name":"test_launch_functions.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"61031067","text":"from sierra.base_parameters import BaseParameter\nfrom datetime import datetime, timedelta\nimport numpy as np\nfrom sierra.utilities.converter import convert\nimport math\n\n\nclass Millerton_Lake_Flood_Release_Requirement(BaseParameter):\n\n should_drawdown = None\n\n def setup(self):\n super().setup()\n num_scenarios = len(self.model.scenarios.combinations)\n self.should_drawdown = np.empty(num_scenarios, np.bool)\n\n def _value(self, timestep, scenario_index):\n\n if self.model.mode == 'planning':\n return 0\n\n sid = scenario_index.global_id\n\n # Note: the following logic follows the U.S. Army Corps of Engineers 1980 Water Control Manual for Friant Dam\n\n month = self.datetime.month\n day = self.datetime.day\n month_day = '{}-{}'.format(month, day)\n\n # Get flood curve\n flood_curves = self.model.tables[\"Millerton Lake flood curve\"]\n rainflood_curve_mcm = flood_curves.at[month_day, 'rainflood']\n conditional_curve_mcm = flood_curves.at[month_day, 'conditional']\n\n # Get previous storage\n NML = self.model.nodes[\"Millerton Lake\"]\n millerton_storage_mcm = NML.volume[scenario_index.global_id]\n\n # Load base ag demand info\n WYT = self.get('San Joaquin Valley WYT' + self.month_suffix, timestep, scenario_index)\n Madera_df = self.model.tables[\"CVP Madera Canal demand\"][WYT]\n Friant_Kern_df = self.model.tables[\"CVP Friant-Kern Canal demand\"][WYT]\n\n # Set the default release of zero\n release_mcm = 0.0\n\n # 1. Conservation space release\n if millerton_storage_mcm < conditional_curve_mcm:\n pass\n\n # 2. Rainflood space release\n max_storage = NML.get_max_volume(scenario_index)\n above_85_taf_mcm = max_storage - rainflood_curve_mcm - 104.85\n if above_85_taf_mcm > 0.0 and (month >= 10 or month <= 3): # 85 TAF\n MPL = self.model.nodes[\"Mammoth Pool Reservoir\"]\n mammoth_pool_space_mcm = MPL.max_volume - MPL.volume[sid]\n rainflood_curve_mcm += min(above_85_taf_mcm, mammoth_pool_space_mcm)\n\n if millerton_storage_mcm >= rainflood_curve_mcm:\n release_mcm = millerton_storage_mcm - rainflood_curve_mcm\n\n # 3. Conditional space release\n elif 2 <= month <= 7:\n # Note: Here, we are calculating forecasts directly as able, rather than using the USACE manual diagram.\n\n # 3.1. Calculate forecasted unimpaired runoff into Millerton Lake, through July 31.\n # For now, assume perfect forecast.\n # TODO: update to use imperfect forecast?\n fnf_start = timestep.datetime\n fnf_end = datetime(timestep.year, 7, 31)\n forecasted_inflow_mcm = self.model.parameters[\"Full Natural Flow\"].dataframe[fnf_start:fnf_end].sum()\n\n # 3.2. Calculate today's and forecasted irrigation demand.\n forecast_days = 14\n ag_start = (month, day)\n if (month, day) <= (5, 31):\n ag_end = (6, 15)\n else:\n # min of +15 days (1-15 = today + 14 days)\n ag_end_date = timestep.datetime + timedelta(days=forecast_days)\n ag_end = min((ag_end_date.month, ag_end_date.day), (8, 1))\n\n # today_ag_demand = Madera_df[ag_start] + Friant_Kern_df[ag_start]\n\n # option 1: use actual forecasted demand\n # madera_forcasted_demand_mcm = Madera_df[ag_start:ag_end].sum() / 35.315 * 0.0864\n\n # option 2: use Madera canal capacity (i.e., assume we can release at capacity)\n madera_fcst_dem_mcm = self.model.nodes['CVP Madera Canal'].max_flow * forecast_days\n\n friant_kern_fcst_dem_mcm = Friant_Kern_df[ag_start:ag_end].sum() / 35.315 * 0.0864\n forecasted_ag_demand_mcm = madera_fcst_dem_mcm + friant_kern_fcst_dem_mcm\n\n # 3.3. Calculate total space required for flood control\n # slope from flood control chart = 1 / 1.6\n total_space_required_mcm = forecasted_inflow_mcm * 0.625 - forecasted_ag_demand_mcm\n\n # 3.4. Calculate upstream space, adjusted\n\n # 3.4.1. Get total previous storage in upstream reservoirs\n upstream_storage_space_mcm = 0.0\n for node in self.model.nodes:\n # TODO: make this more efficient\n if hasattr(node, 'volume') and node.name != 'Millerton Lake':\n upstream_storage_space_mcm += node.volume[scenario_index.global_id]\n\n # 3.4.2. Calculate adjustment to storage space\n # Note: this is approximated from the upper right of the Flood Control Diagram (Fig. A-11)\n days_since_feb1 = (timestep.datetime - datetime(timestep.year, 2, 1)).days\n adjustment_to_upstream_space_taf = 100 - 3.1623e-9 * math.exp(0.13284 * days_since_feb1)\n adjustment_to_upstream_space_mcm = adjustment_to_upstream_space_taf * 1.2335\n\n # 3.4.3. Subtract adjustment from upstream space\n adjusted_upstream_storage_space_mcm = upstream_storage_space_mcm - adjustment_to_upstream_space_mcm\n\n # 3.5. Calculate conditional reservation required\n # Note: It does not appear that this is actually used in the Flood Control Diagram\n conditional_space_required_mcm = total_space_required_mcm - adjusted_upstream_storage_space_mcm\n\n # 3.6. Compute total space available for flood control\n millerton_storage_space_mcm = NML.max_volume - millerton_storage_mcm\n total_space_available_mcm = millerton_storage_space_mcm + adjusted_upstream_storage_space_mcm\n # total_space_available_mcm = millerton_storage_space_mcm\n\n # 3.7. Finally, compute the supplemental release\n # Note that the goal is to spread the release out over time\n storage_difference_mcm = max(conditional_space_required_mcm - total_space_available_mcm, 0.0)\n\n # if storage_difference_mcm > 0.0:\n # print('{}: conditional; release: {} taf'.format(timestep.datetime, storage_difference_mcm / 1.2335))\n\n # if (month, day) <= (5, 5):\n #\n #\n # elif (month, day) <= (6, 5):\n #\n #\n # elif (month, day) <= (6, 30):\n #\n #\n # else:\n\n supplemental_release_mcm = storage_difference_mcm\n\n # 3.8. Calculate total release\n # Note that this differs from the example in the USACE manual, since we are only calculating instream\n # release here. In the manual, \"total release\" is instream release + ag. release\n release_mcm = max(release_mcm, supplemental_release_mcm)\n\n # This is our overall target release, without accounting for max downstream releases\n release_mcm = float(max(release_mcm, 0.0))\n\n # Assume Madera Canal can absorb some flood control capacity\n # Note that we cannot calculate Madera demand from the demand node/parameter, since that node depends on this.\n if release_mcm > 0.0:\n madera_canal_cfs = Madera_df[(month, day)]\n madera_canal_mcm = madera_canal_cfs / 35.315 * 0.0864\n madera_canal_max_mcm = self.model.nodes[\"Madera Canal.1\"].max_flow\n adjusted_release_mcm = release_mcm - (madera_canal_max_mcm - madera_canal_mcm)\n release_mcm = max(adjusted_release_mcm, 0.0)\n\n # ...reduce to limit flow to <= 8000 cfs (19.57 mcm)\n little_dry_creek_max_mcm = 19.57\n release_mcm = min(release_mcm, little_dry_creek_max_mcm)\n\n # DRAWDOWN OPERATIONS\n\n # Release slowly to a target storage of 350 TAF by Oct 31.\n\n if (7, 1) <= (month, day) <= (10, 31):\n nov1_target = 431.725 # 350 TAF\n\n # Stop this if we've hit the target\n if millerton_storage_mcm < nov1_target:\n self.should_drawdown[sid] = False\n\n # Check if New Melones filled\n if millerton_storage_mcm > nov1_target and not self.should_drawdown[sid]:\n day_before_yesterday = self.datetime + timedelta(days=-2)\n millerton_storage_df = self.model.recorders[\"Millerton Lake/storage\"].to_dataframe()\n prev_millerton_storage_mcm = millerton_storage_df.loc[day_before_yesterday].values[sid]\n if millerton_storage_mcm <= prev_millerton_storage_mcm:\n self.should_drawdown[sid] = True\n\n if self.should_drawdown[sid]:\n drawdown_release_mcm = millerton_storage_mcm - nov1_target\n # drawdown_release_mcm = millerton_storage_mcm - nov1_target\n prev_inflow_mcm = 0.0\n for node in ['Kerckhoff 1 PH', 'Kerckhoff 2 PH', 'IFR bl Kerckhoff Lake', 'Millerton Lake Inflow']:\n prev_inflow_mcm += self.model.nodes[node].prev_flow[sid]\n\n drawdown_release_mcm += prev_inflow_mcm\n\n drawdown_days = (datetime(timestep.year, 11, 1) - timestep.datetime).days\n release_mcm = drawdown_release_mcm / drawdown_days\n # release_mcm = max(release_mcm, drawdown_release_mcm)\n\n # Let's also limit ramping (for both instream flow and reservoir management reasons)\n prev_release_mcm = self.model.nodes[\"Millerton Lake Flood Release\"].prev_flow[scenario_index.global_id]\n # if (month, day) == (7, 1):\n # release_mcm = min(release_mcm, 3)\n if release_mcm > prev_release_mcm:\n release_mcm = min(release_mcm, prev_release_mcm * 1.2)\n elif release_mcm < prev_release_mcm:\n release_mcm = max(release_mcm, prev_release_mcm * 0.8)\n\n else:\n self.should_drawdown[sid] = False\n\n # if self.res_name == 'Millerton Flood Release':\n # release_mcm -= self.model.nodes['CVP Madera Canal'].max_flow\n # release_mcm = max(release_mcm, 0.0)\n\n release_cms = release_mcm / 0.0864\n\n if millerton_storage_mcm < 250:\n release_cms *= 0.5\n\n return release_cms\n\n def value(self, *args, **kwargs):\n try:\n val = self._value(*args, **kwargs)\n return convert(val, \"m^3 s^-1\", \"m^3 day^-1\", scale_in=1, scale_out=1000000.0)\n except Exception as err:\n print('\\nERROR for parameter {}'.format(self.name))\n print('File where error occurred: {}'.format(__file__))\n print(err)\n raise\n\n @classmethod\n def load(cls, model, data):\n return cls(model, **data)\n\n\nMillerton_Lake_Flood_Release_Requirement.register()\n","sub_path":"sierra/models/upper_san_joaquin/_parameters/Millerton_Lake_Flood_Release_Requirement.py","file_name":"Millerton_Lake_Flood_Release_Requirement.py","file_ext":"py","file_size_in_byte":10745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"185735617","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport os\nimport math\n\n\n#________________________________\n#matlab memoria\n\npath = '../results/windows/matlab/'\n#files = os.listdir(path)\n#print(files)\nfiles = ['ex15', 'cfd1', 'shallowwater', 'cfd2', 'parabolicfem', 'apache2', 'G3circuit']#, 'stoc-f']\n#files = ['ex15']\nresult_matlab_win = [0] * files.__len__()\nresult_matlab_ubuntu = [0] * files.__len__()\nj = 0\nfor name in files:\n perc = path + name + '.mat.txt'\n df = pd.read_table(perc, delim_whitespace=True, skiprows=1, header=None)\n df.columns = ['Elapsed time', 'CPU', 'Real', 'Virtual']\n #print(df)\n mat = [name, df['Virtual'].max(), df['Virtual'][0]]\n result_matlab_win[j] = mat\n\n #print(df['Virtual'].max())\n j = j + 1\n\npath = '../results/linux/matlab/'\nj = 0\nfor name in files:\n perc = path + name + '.mat.txt'\n df = pd.read_table(perc, delim_whitespace=True, skiprows=1, header=None)\n df.columns = ['Elapsed time', 'CPU', 'Real', 'Virtual']\n #print(df)\n mat = [name, df['Virtual'].max(), df['Virtual'][0]]\n result_matlab_ubuntu[j] = mat\n\n #print(df['Virtual'].max())\n j = j + 1\n\n# print(result_matlab)\n# print(result_python)\n\n#________________________________\n#python memoria\n\npath = '../results/windows/python/'\n#files = os.listdir(path)\n#print(files)\nfiles = ['ex15', 'cfd1', 'shallowwater', 'cfd2', 'parabolicfem', 'apache2', 'G3circuit']#, 'stoc-f']\n#files = ['ex15']\nresult_python_win = [0] * files.__len__()\nresult_python_ubuntu = [0] * files.__len__()\nj = 0\nfor name in files:\n perc = path + name + '.mat.txt'\n df = pd.read_table(perc, delim_whitespace=True, skiprows=1, header=None)\n df.columns = ['Elapsed time', 'CPU', 'Real', 'Virtual']\n #print(df)\n mat = [name, df['Virtual'].max(), df['Virtual'][0]]\n result_python_win[j] = mat\n\n #print(df['Virtual'].max())\n j = j + 1\n\npath = '../results/linux/python/'\nj = 0\nfor name in files:\n perc = path + name + '.mat.txt'\n df = pd.read_table(perc, delim_whitespace=True, skiprows=1, header=None)\n df.columns = ['Elapsed time', 'CPU', 'Real', 'Virtual']\n #print(df)\n mat = [name, df['Virtual'].max(), df['Virtual'][0]]\n result_python_ubuntu[j] = mat\n\n #print(df['Virtual'].max())\n j = j + 1\n\n#________________________________\n#R memoria\n\npath = '../results/windows/r/'\n#files = os.listdir(path)\n#print(files)\nfiles_r_win = ['ex15', 'cfd1', 'shallowwater', 'cfd2', 'parabolicfem']#, 'stoc-f']\n#files = ['ex15']\nresult_r_win = [0] * files_r_win.__len__()\n\nj = 0\nfor name in files_r_win:\n perc = path + name + '.mtx.txt'\n df = pd.read_table(perc, delim_whitespace=True, skiprows=1, header=None)\n df.columns = ['Elapsed time', 'CPU', 'Real', 'Virtual']\n #print(df)\n mat = [name, df['Virtual'].max(), df['Virtual'][0]]\n result_r_win[j] = mat\n\n #print(df['Virtual'].max())\n j = j + 1\n\nfiles_r_ubuntu = ['ex15', 'cfd1', 'shallowwater', 'cfd2', 'parabolicfem', 'apache2']#, 'stoc-f']\nresult_r_ubuntu = [0] * files_r_ubuntu.__len__()\npath = '../results/linux/r/'\nj = 0\nfor name in files_r_ubuntu:\n perc = path + name + '.mtx.txt'\n df = pd.read_table(perc, delim_whitespace=True, skiprows=1, header=None)\n df.columns = ['Elapsed time', 'CPU', 'Real', 'Virtual']\n #print(df)\n mat = [name, df['Virtual'].max(), df['Virtual'][0]]\n result_r_ubuntu[j] = mat\n\n #print(df['Virtual'].max())\n j = j + 1\n\n\n\n# Create bars matlab\nbarWidth = 0.20\nmem_bef_matlab_win = [0] * files.__len__()\nmem_after_matlab_win = [0] * files.__len__()\nmem_bef_matlab_ubuntu = [0] * files.__len__()\nmem_after_matlab_ubuntu = [0] * files.__len__()\n\nfor i in range(files.__len__()):\n mem_bef_matlab_win[i] = result_matlab_win[i][2]\n mem_after_matlab_win[i] = result_matlab_win[i][1]\n\n mem_bef_matlab_ubuntu[i] = result_matlab_ubuntu[i][2]\n mem_after_matlab_ubuntu[i] = result_matlab_ubuntu[i][1]\n\n# Create bars python\nbarWidth = 0.20\nmem_bef_python_win = [0] * files.__len__()\nmem_after_python_win = [0] * files.__len__()\nmem_bef_python_ubuntu = [0] * files.__len__()\nmem_after_python_ubuntu = [0] * files.__len__()\n\nfor i in range(files.__len__()):\n mem_bef_python_win[i] = result_python_win[i][2]\n mem_after_python_win[i] = result_python_win[i][1]\n\n mem_bef_python_ubuntu[i] = result_python_ubuntu[i][2]\n mem_after_python_ubuntu[i] = result_python_ubuntu[i][1]\n\n# Create bars r\nbarWidth = 0.15\nmem_bef_r_win = [0] * files.__len__()\nmem_after_r_win = [0] * files.__len__()\nmem_bef_r_ubuntu = [0] * files.__len__()\nmem_after_r_ubuntu = [0] * files.__len__()\n\nfor i in range(files_r_win.__len__()):\n mem_bef_r_win[i] = result_r_win[i][2]\n mem_after_r_win[i] = result_r_win[i][1]\n\nfor i in range(files_r_ubuntu.__len__()):\n mem_bef_r_ubuntu[i] = result_r_ubuntu[i][2]\n mem_after_r_ubuntu[i] = result_r_ubuntu[i][1]\n\n\n# position bars\nn = max(len(mem_bef_r_ubuntu),len(mem_after_r_ubuntu),len(mem_bef_r_win),len(mem_after_r_win))\npos = np.arange(n)\n\nbar1 = plt.bar(pos, mem_bef_r_ubuntu, width = barWidth, color = 'r', label='R before ubuntu')\nbar2 = plt.bar(pos+barWidth, mem_after_r_ubuntu, width = barWidth, color = 'b', label='R after ubuntu')\n\nbar3 = plt.bar(pos+barWidth+barWidth, mem_bef_python_ubuntu, width = barWidth, color = 'g', label='Python before ubuntu')\nbar4 = plt.bar(pos+barWidth+barWidth+barWidth, mem_after_python_ubuntu, width = barWidth, color = 'y', label='Python after ubuntu')\n\nbar5 = plt.bar(pos+barWidth * 4, mem_bef_matlab_ubuntu, width = barWidth, color = 'purple', label='Matlab before ubuntu')\nbar6 = plt.bar(pos+barWidth * 5, mem_after_matlab_ubuntu, width = barWidth, color = 'darkslategrey', label='Matlab after ubuntu')\n\nplt.legend(ncol=4,loc='upper left')\n\nplt.xticks([m + barWidth * 2 + barWidth/2 for m in range(len(mem_bef_r_ubuntu))], ['ex15', 'cfd1', 'shallow_water', 'cfd2', 'parabolic_fem', 'apache2', 'G3_circuit'])\nplt.yscale('log')\n\nfor rect in bar1 + bar2 + bar3 + bar4 + bar5 + bar6:\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_width()/2.0, height, '%d' % int(height), ha='center', va='bottom')\n'''\nfor i in range(len(pos)):\n plt.text(x = pos[i] - 0.015, y = mem_bef_matlab_win[i], s = round(mem_bef_matlab_win[i],0), size = 7)\n plt.text(x = pos[i] + barWidth - 0.015, y = mem_after_matlab_win[i], s = round(mem_after_matlab_win[i],0), size = 7)\n plt.text(x = pos[i] + barWidth + barWidth - 0.015, y = mem_bef_matlab_ubuntu[i], s = round(mem_bef_matlab_ubuntu[i],0), size = 7)\n plt.text(x = pos[i] + barWidth + barWidth + barWidth - 0.015, y = mem_after_matlab_ubuntu[i], s = round(mem_after_matlab_ubuntu[i],0), size = 7)\n'''\n'''\n plt.text(x = pos[i] - 0.015, y = mem_bef_matlab_win[i] + alfa * mem_bef_matlab_win[i], s = round(mem_bef_matlab_win[i],2), size = 7, rotation=90)\n plt.text(x = pos[i] + barWidth - 0.015, y = mem_after_matlab_win[i] + alfa * mem_after_matlab_win[i], s = round(mem_after_matlab_win[i],2), size = 7, rotation=90)\n plt.text(x = pos[i] + barWidth + barWidth - 0.015, y = mem_bef_matlab_ubuntu[i] + alfa * mem_bef_matlab_ubuntu[i], s = round(mem_bef_matlab_ubuntu[i],2), size = 7, rotation=90)\n plt.text(x = pos[i] + barWidth + barWidth + barWidth - 0.015, y = mem_after_matlab_ubuntu[i] + alfa * mem_after_matlab_ubuntu[i], s = round(mem_after_matlab_ubuntu[i],2), size = 7, rotation=90)\n'''\n#plt.title('Python Ubuntu vs Python Windows with UMF_PACK FALSE')\nplt.ylabel('Memory')\nplt.xlabel('Matrix Name')\n\nfig = plt.gcf()\nfig.set_size_inches(18.5, 10.5)\n\n#plt.show()\nfig.savefig('../../Grafici/' + os.path.basename(__file__) + '.png', dpi=1000)\n","sub_path":"plot/5.3_Matlab vs python vs R ubuntu memoria.py","file_name":"5.3_Matlab vs python vs R ubuntu memoria.py","file_ext":"py","file_size_in_byte":7588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"292696093","text":"from fractions import Fraction\nfrom wick.expression import AExpression\nfrom wick.wick import apply_wick\nfrom wick.convenience import one_e, two_e, E1, E2, braE1, commute\n\nH1 = one_e(\"f\", [\"occ\", \"vir\"], norder=True)\nH2 = two_e(\"I\", [\"occ\", \"vir\"], norder=True)\nH = H1 + H2\n\nbra = braE1(\"occ\", \"vir\")\nT1 = E1(\"t\", [\"occ\"], [\"vir\"])\nT2 = E2(\"t\", [\"occ\"], [\"vir\"])\nT = T1 + T2\n\nHT = commute(H, T)\nHTT = commute(HT, T)\nHTTT = commute(commute(commute(H2, T1), T1), T1)\n\nS = bra*(H + HT + Fraction('1/2')*HTT + Fraction('1/6')*HTTT)\nout = apply_wick(S)\nout.resolve()\nfinal = AExpression(Ex=out)\nprint(final)\n","sub_path":"examples/ccsd_T1.py","file_name":"ccsd_T1.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"546735396","text":"from bs4 import BeautifulSoup\r\nfrom lxml import html\r\nimport requests\r\nfrom textblob import TextBlob\r\nimport urllib, json\r\n\r\n# Global Variables\r\n# list of results\r\nresults = []\r\n# which search methods are available\r\nsearchA = True\r\nsearchB = True\r\nsearchC = True\r\nsearchD = True\r\n\r\n\r\n# Generic google search\r\n# Basic search results\r\n# unstable / unpredictable / numerous\r\ndef searchMethodA(topic):\r\n global results\r\n results = []\r\n USER_AGENT = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}\r\n response = requests.get('https://www.google.com/search?q={}&num={}&hl={}'.format(topic, 100, 'en'),\r\n headers=USER_AGENT)\r\n response.raise_for_status()\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n page = soup.find_all('div', attrs={'class': 'g'})\r\n for a in page:\r\n source = a.find('a', href=True)\r\n name = a.find('h3')\r\n info = a.find('span', attrs={'class': 'st'})\r\n if source and name:\r\n source = source['href']\r\n title = name.get_text()\r\n if info:\r\n info = info.get_text()\r\n if source != '#':\r\n results.append({topic, name, info})\r\n\r\n\r\n# Google search\r\n# Collects related titles at top of search\r\n# Sometimes works well depending on input\r\n# unstable / unreliable / inconsistent\r\ndef searchMethodB(topic):\r\n USER_AGENT = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}\r\n response = requests.get('https://www.google.com/search?q={}&num={}&hl={}'.format(topic, 100, 'en'),\r\n headers=USER_AGENT)\r\n response.raise_for_status()\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n scrape = soup.find_all('div', attrs={'class': 'kltat'})\r\n for a in scrape:\r\n results.append(a.find('span'))\r\n\r\n\r\n# Database of popular short crossword puzzle words based on year\r\n# Results available for years 1942 - 2020\r\n# First test search method\r\n# Intended as a extra side search method\r\n# stable / reliable / limited\r\n# https://www.xwordinfo.com/popular\r\ndef searchMethodC(year):\r\n global results\r\n if not (1942 <= int(year) <= 2020):\r\n print() # do nothing\r\n else:\r\n page = requests.get(\"https://www.xwordinfo.com/Popular?year=\" + str(year))\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n table = soup.table\r\n tableRows = table.find_all('tr')\r\n for row in tableRows:\r\n t = row.find_all('td')\r\n info = [i.text for i in t]\r\n results.append(info)\r\n\r\n\r\ndef organizeMethodDOutput():\r\n output_File = open(\"output.txt\", \"r\")\r\n toBeCleaned = output_File.read()\r\n output_File.close()\r\n uncleanList = list(toBeCleaned)\r\n output_File = open(\"output.txt\", \"w\")\r\n preWordCounter = 0\r\n lineCounter = 0\r\n pre = list('\"word\":\"')\r\n cur = ''\r\n for c in uncleanList:\r\n if preWordCounter >= 8 and uncleanList[lineCounter] != '\"':\r\n cur = cur + uncleanList[lineCounter]\r\n elif preWordCounter >= 8 and uncleanList[lineCounter] == '\"':\r\n results.append(cur)\r\n output_File.write(cur + '\\n')\r\n cur = ''\r\n preWordCounter = 0\r\n elif pre[preWordCounter] == uncleanList[lineCounter]:\r\n preWordCounter = preWordCounter + 1\r\n lineCounter = lineCounter + 1\r\n output_File.close()\r\n\r\n\r\n# Search a web site that finds related words\r\n# Still being worked on\r\n# Issues with flexbox\r\n# stable / effective / numerous\r\n# https://relatedwords.org/\r\ndef searchMethodD(topic):\r\n page = requests.get('https://relatedwords.org/api/related?term=' + topic)\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n output_File = open(\"output.txt\", \"w\")\r\n output_File.write(str(soup))\r\n output_File.close()\r\n organizeMethodDOutput()\r\n\r\n\r\n# Determines if results are sufficient\r\n# Currently only return true for testing purposes\r\n# Incomplete\r\ndef areResultsGood():\r\n return True\r\n\r\n\r\n# Prints current results\r\ndef printResults():\r\n for a in results:\r\n print(a)\r\n\r\n\r\n# Clean results array of extra unnecessary characters\r\n# Incomplete\r\n# Waiting until search methods are complete\r\ndef cleanResults():\r\n print()\r\n\r\n\r\n# Selects next search method\r\ndef search(sTopic, sType, sInput):\r\n global searchA\r\n global searchB\r\n global searchC\r\n global searchD\r\n resultsFound = False\r\n if sType == 'a' and searchA:\r\n searchMethodA(sTopic)\r\n resultsFound = areResultsGood()\r\n searchA = False\r\n elif sType == 'b' and searchB:\r\n searchMethodB(sTopic)\r\n resultsFound = areResultsGood()\r\n searchB = False\r\n elif sType == 'c' and searchC:\r\n searchMethodC(sInput)\r\n resultsFound = areResultsGood()\r\n searchC = False\r\n elif sType == 'd' and searchD:\r\n searchMethodD(sTopic)\r\n resultsFound = areResultsGood()\r\n searchD = False\r\n elif searchA:\r\n searchMethodA(sTopic)\r\n resultsFound = areResultsGood()\r\n searchA = False\r\n elif searchB:\r\n searchMethodB(sTopic)\r\n resultsFound = areResultsGood()\r\n searchB = False\r\n elif searchC:\r\n searchMethodC(sInput)\r\n resultsFound = areResultsGood()\r\n searchC = False\r\n elif searchD:\r\n searchMethodD(sTopic)\r\n resultsFound = areResultsGood()\r\n searchD = False\r\n if (not searchA and not searchB and not searchC) or resultsFound:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# Start of program\r\ndef programStart(searchTopic, searchType, searchInput):\r\n searchesDone = False\r\n while not searchesDone:\r\n searchesDone = search(searchTopic, searchType, searchInput)\r\n\r\n\r\n# Tests\r\n# programStart(\"pizza\", 'a', 0)\r\n# programStart(\"movies\", 'b', 0)\r\n# programStart(\"\", 'c', 2006)\r\ninput_file = open(\"input.txt\", \"r\")\r\naSearchTerm = input_file.read()\r\ninput_file.close()\r\nprogramStart(aSearchTerm, 'd', 0)\r\n# printResults()\r\n","sub_path":"bin/scraperRoughDraft.py","file_name":"scraperRoughDraft.py","file_ext":"py","file_size_in_byte":6156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"335078815","text":"list1 = ['张三', '李四', '刘老二', '王麻子', '王达成', '隔壁老王']\n# 写代码判断列表中名字为三个字的人有几个\n\n# 思路,首先得把每个名字遍历一遍\nnum1 = 0 # 存放名字为3个字的出现次数\nfor n in list1: # n是列表中的每个成员,list1中有几个成员循环几次\n sum = 0\n for a in n: # a是字符串n中的每个字符,n中有几个字符for循环几次\n sum += 1\n if sum == 3:\n num1 += 1\nprint(num1)\n# 只要知道sum出现3有几次,就是这个答案\n\n# 第一次循环n是张三\n# 第二次循环n是李四\n# 第三次循环n是刘老二\n# 第四次循环n是王麻子\n# 第五次循环n是隔壁老王\n# n = \"刘二\"\n# sum = 0\n# for a in n:\n# sum += 1\n# print(sum)\n","sub_path":"04、 python编程/day04/3-code/10-课堂练习-判断三个字的名字.py","file_name":"10-课堂练习-判断三个字的名字.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"22349005","text":"#code adapted from http://stackoverflow.com/questions/19560498/faster-way-to-remove-stop-words-in-python\nimport sys\nimport os\n\ndef stripStopWords(text, i):\n\t#print(text)\n\t#testtext = \"hi the me the bicycle maryeileen a the them hi\"\n\twith open('stop_words.txt', 'r') as myfile:\n\t\tdata=myfile.read()\n\n\tstop_words = data.split()\n\t#print(testtext)\n\t#print(stop_words)\n\n\t#text = text.decode('unicode_escape').encode('ascii','ignore')\n\t#tokenized_text = word_tokenize(text)\n\t#print(tokenized_text)\n\tclean_text = ' '.join([word for word in text.lower().split() if word not in stop_words])\n\t#print(clean_text)\n\t#print(clean_text)\n\t#path = 'noStopWords_files'\n\t#if not os.path.exists(path):\n\t#\tos.makedirs(path)\n\t#f = str(i)\n\t#clean_text.decode('utf-8')\n\t#with open(os.path.join(path, f), 'wb') as temp_file:\n\t#\ttemp_file.write(clean_text)\n\treturn clean_text\n\ndef main():\n\tpath = 'strippedHTML_files'\n\ti = 0 \n\tfor filename in os.listdir(path):\n\t\twith open(os.path.join(path, filename), 'r') as myfile:\n\t\t\t\n\t\t\tdata=myfile.read()\n\t\t\tdata = str(data)\n\t\t\tstripStopWords(data, i)\n\t\t\ti = i+1\nif __name__ == '__main__':\n\tmain()","sub_path":"removeStopWords.py","file_name":"removeStopWords.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"452247154","text":"from TestbedManagementSystem.Files.Database.configurationDB import configurationDB\r\n\r\nclass NetworkAdapterTable:\r\n def __init__(self, configuration):\r\n self.__NAstring = configuration['NAstring']\r\n self.__NAvmName= configuration['NAvmName']\r\n self.__dictVm = configuration['dictVM']\r\n \r\n\r\n def insertNetAdapt(self,configuration):\r\n conn = configurationDB.connect(self)\r\n cur = conn.cursor() \r\n insert = \"INSERT INTO NETWORK_ADAPTER (NAstring,NAvmName) VALUES ('\" + configuration['NAstring'] + \"','\" + configuration['NAstring'] + \"')\"\r\n try:\r\n cur.execute(insert)\r\n conn.commit()\r\n print('Add Network Adapter\\n')\r\n except:\r\n conn.rollback\r\n cur.close()\r\n conn.close()\r\n \r\n def selectRefeNetAdapt(self,configuration):\r\n conn = configurationDB.connect(self)\r\n cur = conn.cursor() \r\n select = \"SELECT * FROM NETWORK_ADAPTER WHERE NAstring= '\" + configuration['NAstring'] +\"', NAvmName = '\" + configuration['NAvmName']+\"'\"\r\n try:\r\n cur.execute(select) \r\n conn.commit()\r\n print('Selected Network Adapter\\n')\r\n except:\r\n conn.rollback()\r\n \r\n for row in cur:\r\n print()\r\n print (row)\r\n cur.close()\r\n conn.close()\r\n \r\n \r\n def updateNetAdapt(self,configuration):\r\n conn = configurationDB.connect(self)\r\n cur = conn.cursor()\r\n \r\n update = \"UPDATE NETORK_ADAPTER SET NAstring= $s , NAvmName= $s WHERE NAString= $s, NAvmName= $s \"\r\n try:\r\n cur.execute(update,(configuration['NAstring'], configuration['NAvmName'],configuration['NAstring'],configuration['NAvmName']))\r\n conn.commit()\r\n except:\r\n conn.rollback\r\n print('Network AdapterS Updated\\n')\r\n cur.close()\r\n conn.close()\r\n \r\n \r\n def removeNetAdapt(self,configuration):\r\n conn = configurationDB.connect(self)\r\n cur = conn.cursor()\r\n remove =\"DELETE FROM NETORK_ADAPTER WHERE NAstring= '\" + configuration['NAstring'] +\"', NAvmName = '\" + configuration['NAvmName']+\"'\"\r\n try:\r\n cur.execute(remove)\r\n conn.commit()\r\n except:\r\n conn.rollback()\r\n print('Workshop History deleted\\n')\r\n \r\n def __toDict(self):\r\n dict = {'NAstring':self.__NAstring,\r\n 'NAvmName':self.__NAvmName, \r\n 'dictVM':self.__dictVm}\r\n return dict","sub_path":"TestbedManagementSystem/Files/JesusChavez/stargate/binary/Database/NetworkAdapterTable.py","file_name":"NetworkAdapterTable.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"467153811","text":"from django.contrib import admin\nfrom sispos.report.models import Report, ParecerOrientadorMestrado\n\n\nclass ParecerInline(admin.StackedInline):\n model = ParecerOrientadorMestrado\n extra = 1\n fieldsets = (\n ('primeiro semestre', {\n 'classes': ('collapse',),\n 'fields': ('s1_desempenho', 's1_projeto', 's1_outras_atividades')\n }\n ),\n ('segundo semestre', {\n 'classes': ('collapse',),\n 'fields': ('s2_desempenho', 's2_metodologia', 's2_abordagem', 's2_outras_atividades')\n }\n ),\n ('terceiro semestre', {\n 'classes': ('collapse',),\n 'fields': ('s3_resultados', 's3_perspectivas', 's3_resumo', 's3_outras_atividades')\n }\n )\n )\n\n\nclass ReportModelAdmin(admin.ModelAdmin):\n list_display = ['aluno_name']\n inlines = [ParecerInline]\n\n def aluno_name(self, obj):\n return obj.aluno.get_full_name()\n\n aluno_name.short_description = 'Aluno'\n\n\nclass ParecerOrientadorMestradoModelAdmin(admin.ModelAdmin):\n pass\n\nadmin.site.register(Report, ReportModelAdmin)\nadmin.site.register(ParecerOrientadorMestrado, ParecerOrientadorMestradoModelAdmin)","sub_path":"sispos/report/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"527287512","text":"from tkinter import *\nfrom functools import partial # To prevent unwanted windows\n\nimport random\n\nclass Quiz:\n def __init__(self):\n\n # Formatting variables...\n background_color = \"#8FF7A7\"\n btn_color = \"#51BBFE\"\n font_color = \"black\"\n\n self.quiz_frame = Frame(width=500, height=500,bg=background_color)\n self.quiz_frame.grid()\n\n self.starting_questoin = IntVar()\n self.starting_questoin.set(0)\n\n self.heading_frame = Frame(self.quiz_frame,bg=background_color)\n self.heading_frame.grid(row=0)\n\n # Quiz (row 0)\n self.Quiz_label = Label(self.heading_frame,text=\"MATH QUIZ\",\n font=(\"arial 20 bold\"),\n fg=font_color,bg=background_color)\n self.Quiz_label.grid(row=0)\n\n # Quiz (row 1)\n self.choicing_frame = Frame(self.quiz_frame,bg=background_color)\n self.choicing_frame.grid(row=1)\n\n self.amount_error_label = Label(self.choicing_frame, font=\"arial 10 italic\",\n text=\"\", bg=background_color)\n self.amount_error_label.grid(row=0)\n\n self.cho_num_label = Label(self.choicing_frame,text=\"How many question would you like\",\n font=(\"arial 10 bold\"),\n fg=font_color,bg=background_color)\n self.cho_num_label.grid(row=1)\n\n self.cho_num_entry = Entry(self.choicing_frame,\n font=\"arial 10 bold\", width=5)\n self.cho_num_entry.grid(row=1,column=1)\n\n self.low_num_label = Label(self.choicing_frame,text=\"low number amout\",\n font=\"arial 10 bold\",\n fg=font_color,bg=background_color)\n self.low_num_label.grid(row=2)\n\n self.low_num_entry = Entry(self.choicing_frame,\n font=\"arial 10 bold\", width=5)\n self.low_num_entry.grid(row=2,column=1)\n\n self.high_num_label = Label(self.choicing_frame,text=\"high number amout\",\n font=\"arial 10 bold\",\n fg=font_color,bg=background_color)\n self.high_num_label.grid(row=3)\n\n self.high_num_entry = Entry(self.choicing_frame,\n font=\"arial 10 bold\", width=5)\n self.high_num_entry.grid(row=3,column=1)\n\n self.question_amount_btn = Button(self.choicing_frame,text=\"Enter\",bg=btn_color,\n font=\"arial 14 bold\",command=self.check_question)\n self.question_amount_btn.grid(row=4,)\n\n self.cho_btn__frame = Frame(self.quiz_frame, width=300, bg=background_color)\n self.cho_btn__frame.grid(row=2)\n\n self.addition_btn = Button(self.cho_btn__frame,text=\"Addition\", font=\"arial 20 bold\", fg=font_color,\n bg=btn_color,padx=35,command=self.addition)\n self.addition_btn.grid(row=1)\n\n self.addition_label = Label(self.cho_btn__frame,\n text=\"this is a place holder\",\n font=\"arial 10 bold\", fg=font_color,bg=\"#F7FE72\")\n self.addition_label.grid(row=1,column=1)\n\n self.division_btn = Button(self.cho_btn__frame,\n text=\"Division\", font=\"arial 20 bold\", fg=font_color,\n bg=btn_color,padx=36,command=self.division)\n self.division_btn.grid(row=2)\n\n self.division_label = Label(self.cho_btn__frame,\n text=\"this is a place holder\",\n font=\"arial 10 bold\", fg=font_color,bg=\"#F7FE72\")\n self.division_label.grid(row=2,column=1)\n\n self.multiplication_btn = Button(self.cho_btn__frame,\n text=\"Multiplication\", font=\"arial 20 bold\", fg=font_color,\n bg=btn_color,command=self.multiplication)\n self.multiplication_btn.grid(row=3)\n\n self.multiplication = Label(self.cho_btn__frame,\n text=\"this is a place holder \",\n font=\"arial 10 bold\", fg=font_color,bg=\"#F7FE72\")\n self.multiplication.grid(row=3,column=1)\n\n self.help_frame = Frame(self.quiz_frame)\n self.help_frame.grid(row=3)\n\n self.addition_btn.config(state=DISABLED)\n self.division_btn.config(state=DISABLED)\n self.multiplication_btn.config(state=DISABLED)\n\n # Help Button (row 2)\n self.help_button = Button(self.help_frame,\n text=\"Help\", font=\"arial 14 bold\", fg=\"black\",\n bg=\"green\", command=self.help)\n self.help_button.grid(row=2)\n\n def help(self):\n print(\"you need help\")\n get_help = Help(self)\n get_help.help_text.configure(text=\"You have to pick a top and you will be getting tested on in\")\n\n def multiplication(self):\n get_multiplication = Multiplication(self)\n get_multiplication.multiplication_text.configure(text=\"Fill in the boxes\")\n\n def addition(self):\n get_addition = Addition(self)\n get_addition.addition_text.configure(text=\"Fill in the boxes\")\n\n def division(self):\n get_division = Division(self,)\n get_division.division_text.configure(text=\"Fill in the boxes\")\n\n\n def check_question(self):\n starting_questoin = self.cho_num_entry.get()\n\n # Set error background colour (and assum that there are no\n # error at the start\n error_back = \"#ffafaf\"\n has_error = \"no\"\n error_feedback = \"\"\n\n # change background to white (for testing purposes) ...\n self.cho_num_entry.config(bg=\"white\")\n self.amount_error_label.config(text=\"\")\n\n self.addition_btn.config(state=DISABLED)\n self.division_btn.config(state=DISABLED)\n self.multiplication_btn.config(state=DISABLED)\n\n def division(self, stakes):\n starting_questoin = self.question_amount.get()\n Division(self, stakes, starting_questoin)\n\n def check_question(self):\n starting_questoin = self.cho_num_entry.get()\n\n # Set error background colour (and assum that there are no\n # error at the start\n error_back = \"#ffafaf\"\n has_error = \"no\"\n error_feedback = \"\"\n\n # change background to white (for testing purposes) ...\n self.cho_num_entry.config(bg=\"white\")\n self.amount_error_label.config(text=\"\")\n\n self.addition_btn.config(state=DISABLED)\n self.division_btn.config(state=DISABLED)\n self.multiplication_btn.config(state=DISABLED)\n\n try:\n starting_questoin = int(starting_questoin)\n\n if starting_questoin < 0:\n has_error = \"yes\"\n error_feedback = \"You need to enter a number\"\n elif starting_questoin > 20:\n has_error = \"yes\"\n error_feedback = \"unfortunately thats to high\"\n elif starting_questoin >= 1:\n self.addition_btn.config(state=NORMAL)\n self.division_btn.config(state=NORMAL)\n self.multiplication_btn.config(state=NORMAL)\n error_feedback = \"sorry you need a number a bit bigger\"\n\n except ValueError:\n has_error = \"yes\"\n error_feedback = \"Please fill the boxes with whole numbers\"\n\n if has_error == \"yes\":\n self.cho_num_entry.config(bg=error_back)\n self.amount_error_label.config(text=error_feedback)\n\n else:\n self.starting_questoin.set(starting_questoin)\n\n def division(self):\n starting_questoin = self.cho_num_entry.get()\n print(starting_questoin)\n\n Division(self, starting_questoin,)\n\n # hide start up window\n root.withdraw()\n\n\n\nclass Division:\n def __init__(self, partner,starting_questoin):\n starting_questoin = int(starting_questoin)\n background_color = \"#8FF7A7\"\n low_number = 1\n high_number = 10\n number_enter = [low_number,high_number]\n\n # disable button\n partner.addition_btn.config(state=DISABLED)\n partner.division_btn.config(state=DISABLED)\n partner.multiplication_btn.config(state=DISABLED)\n partner.help_button.config(state=DISABLED)\n\n # Set up Geo game one\n self.division_box = Toplevel()\n\n # Set up GUI Frame\n self.division_frame = Frame(self.division_box, width=300, bg=background_color)\n self.division_frame.grid()\n # Set up Geo Instruction heading (row 0)\n self.heading = Label(self.division_frame,\n text=\"Division\",\n font=\"arial 20 bold\",bg=background_color)\n self.heading.grid(row=0)\n\n self.questions_lable = Label(self.division_frame,\n text=starting_questoin,\n font=\"arial 10 bold\", fg=\"black\",bg=background_color)\n self.questions_lable.grid(row=1)\n\n # Geo text (label, row 1)\n self.division_text = Label(self.division_frame,\n text=\"Fill the boxes\",\n justify=LEFT,width=50, bg=background_color,wrap=200)\n self.division_text.grid(row=2)\n\n # Dismiss button (row 2)\n self.dismiss_btn = Button(self.division_frame,text=\"Dismiss\",width=10,bg=\"red\",\n font=\"arial 10 bold\",\n command=partial(self.close_division, partner))\n self.dismiss_btn.grid(row=3)\n\n def close_division(self, partner):\n # Put help button back to normal\n partner.addition_btn.config(state=NORMAL)\n partner.division_btn.config(state=NORMAL)\n partner.multiplication_btn.config(state=NORMAL)\n partner.help_button.config(state=NORMAL)\n self.division_box.destroy()\n\n\nclass Addition:\n def __init__(self, partner):\n background_color = \"#8FF7A7\"\n\n # disable button\n partner.addition_btn.config(state=DISABLED)\n partner.division_btn.config(state=DISABLED)\n partner.multiplication_btn.config(state=DISABLED)\n partner.help_button.config(state=DISABLED)\n\n # Set up Geo game one\n self.addition_box = Toplevel()\n\n # Set up GUI Frame\n self.addition_frame = Frame(self.addition_box, width=300, bg=background_color)\n self.addition_frame.grid()\n # Set up Geo Instruction heading (row 0)\n self.heading = Label(self.addition_frame,\n text=\"Addition\",\n font=\"arial 20 bold\",bg=background_color)\n self.heading.grid(row=0)\n # Geo text (label, row 1)\n self.addition_text = Label(self.addition_frame,\n text=\"Fill the boxes\",\n justify=LEFT,width=50, bg=background_color,wrap=200)\n self.addition_text.grid(column=0,row=1)\n\n # Dismiss button (row 2)\n self.dismiss_btn = Button(self.addition_frame,text=\"Dismiss\",width=10,bg=\"red\",\n font=\"arial 10 bold\",\n command=partial(self.close_addition, partner))\n self.dismiss_btn.grid(row=2, pady=10)\n\n def close_addition(self, partner):\n # Put help button back to normal\n partner.addition_btn.config(state=NORMAL)\n partner.division_btn.config(state=NORMAL)\n partner.multiplication_btn.config(state=NORMAL)\n partner.help_button.config(state=NORMAL)\n self.addition_box.destroy()\nclass Multiplication:\n def __init__(self, partner):\n background_color = \"#8FF7A7\"\n\n # disable button\n partner.addition_btn.config(state=DISABLED)\n partner.division_btn.config(state=DISABLED)\n partner.multiplication_btn.config(state=DISABLED)\n partner.help_button.config(state=DISABLED)\n\n # Set up Geo game one\n self.Multiplication_box = Toplevel()\n\n # Set up GUI Frame\n self.Multiplication_frame = Frame(self.Multiplication_box, width=300, bg=background_color)\n self.Multiplication_frame.grid()\n # Set up Geo Instruction heading (row 0)\n self.heading = Label(self.Multiplication_frame,\n text=\"Multiplication\",\n font=\"arial 20 bold\",bg=background_color)\n self.heading.grid(row=0)\n # Geo text (label, row 1)\n self.multiplication_text = Label(self.Multiplication_frame,\n text=\"Fill the boxes\",\n justify=LEFT,width=50, bg=background_color,wrap=200)\n self.multiplication_text.grid(column=0,row=1)\n\n # Dismiss button (row 2)\n self.dismiss_btn = Button(self.Multiplication_frame,text=\"Dismiss\",width=10,bg=\"red\",\n font=\"arial 10 bold\",\n command=partial(self.close_multiplication, partner))\n self.dismiss_btn.grid(row=2, pady=10)\n\n def close_multiplication(self, partner):\n # Put help button back to normal\n partner.addition_btn.config(state=NORMAL)\n partner.division_btn.config(state=NORMAL)\n partner.multiplication_btn.config(state=NORMAL)\n partner.help_button.config(state=NORMAL)\n self.Multiplication_box.destroy()\nclass Help:\n def __init__(self, partner):\n background_color = \"#8FF7A7\"\n\n # disable help button\n partner.addition_btn.config(state=DISABLED)\n partner.division_btn.config(state=DISABLED)\n partner.multiplication_btn.config(state=DISABLED)\n partner.help_button.config(state=DISABLED)\n\n # Set up child window (ie: help box)\n self.help_box = Toplevel()\n\n # Set up GUI Frame\n self.help_frame = Frame(self.help_box, width=300, bg=background_color)\n self.help_frame.grid()\n # Set up help heading (row 0)\n self.how_heading = Label(self.help_frame,\n text=\"Help / Instruction\",\n font=\"arial 20 bold\",bg=background_color)\n self.how_heading.grid(row=0)\n # Help text (label, row 1)\n self.help_text = Label(self.help_frame,\n text=\"i have no idea where this is going to be\",\n justify=LEFT,width=50, bg=background_color,wrap=200)\n self.help_text.grid(column=0,row=1)\n\n # Dismiss button (row 2)\n self.dismiss_btn = Button(self.help_frame,text=\"Dismiss\",width=10,bg=\"red\",\n font=\"arial 10 bold\",\n command=partial(self.close_help, partner))\n self.dismiss_btn.grid(row=2, pady=10)\n\n def close_help(self, partner):\n # Put help button back to normal\n partner.addition_btn.config(state=NORMAL)\n partner.division_btn.config(state=NORMAL)\n partner.multiplication_btn.config(state=NORMAL)\n partner.help_button.config(state=NORMAL)\n self.help_box.destroy()\n\n# main routine\nif __name__ == \"__main__\":\n root = Tk()\n root.title(\"Quiz\")\n something = Quiz()\n root.mainloop()\n","sub_path":"04_page.py","file_name":"04_page.py","file_ext":"py","file_size_in_byte":15357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"531464269","text":"'''\nTEST SCRIPT FOR TRIPLE HADAMARD WITH MASK UNIFORM NORMALIZATION AND CLIPPING\nBy Filipe Chagas\n'''\n\nimport sys\nsys.path.append('../src')\nsys.path.append('../src/plugins')\n\nimport pipeline as pl\nimport hadamard as hd\nimport numpy as np\nimport random as rd\nimport cv2 as cv\n\n#pipeline\nmy_pipeline = pl.Pipeline()\n\n#buses\nmy_pipeline.create_bus('input', pl.BusFormat.Triple)\nmy_pipeline.create_bus('mask', pl.BusFormat.Triple)\nmy_pipeline.create_bus('output', pl.BusFormat.Triple)\n\n#pipes\nmy_had = hd.TripleHadamardPipe()\n\n#params\nmy_had.set_param('normalize_mask', True)\nmy_had.set_param('uniform_normalization', True)\nmy_had.set_param('clipping', True)\n\n#build\nmy_pipeline.insert_pipe('my_had', my_had, ['input', 'mask'], ['output'])\nmy_pipeline.set_pipe_layer('my_had', 0)\n\nmy_pipeline.set_layers_sequence([0])\n\n#test\ntolerance = 1\nclip = np.vectorize(lambda x : 255 if x > 255 else (0 if x < 0 else (x)))\n\nif __name__ == '__main__':\n for n in range(100):\n print('TEST ' + str(n)) \n \n #input data\n in_data = np.array(np.random.rand(rd.randint(1,500),rd.randint(1,500), 3)*255, np.uint8)\n print('input')\n print(in_data)\n\n #expected output\n mask = np.array(np.random.rand(rd.randint(1,500),rd.randint(1,500), 3)*255, np.uint8)\n mask_rs = cv.resize(mask, (in_data.shape[1], in_data.shape[0])).astype(np.float)\n mask_unorm = mask_rs / np.max(mask_rs)\n\n expected_out = clip(in_data.astype(np.float) * mask_unorm)\n expected_out = expected_out.astype(np.uint8)\n \n print('expected out')\n print(expected_out)\n\n #process and get output\n my_pipeline.buses['input'].set_data(in_data)\n my_pipeline.buses['mask'].set_data(mask)\n my_pipeline.process()\n out_data = my_pipeline.buses['output'].get_data()\n\n print('out')\n print(out_data)\n\n #check\n diference = out_data - expected_out\n ans0 = (diference > tolerance)\n ans1 = (diference < tolerance)\n \n if (not ans0.all()) or (not ans1.all()):\n print('SUCCESS')\n else:\n print('FAILURE')\n break\n\n my_pipeline.reset_buses()\n\n ","sub_path":"tests/test8.py","file_name":"test8.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"410366146","text":"from urllib.request import Request, urlopen\nfrom bs4 import BeautifulSoup\nfrom discord.ext import commands\n\nstates = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DE\", \"FL\", \"GA\", \"HI\", \"ID\", \"IL\", \"IN\", \"IA\",\n \"KS\", \"KY\", \"LA\", \"ME\", \"MD\", \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\",\n \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\", \"SD\", \"TN\", \"TX\", \"UT\", \"VT\",\n \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\n\n\nclass Election:\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def gainz(self, race_type):\n \"\"\"Say a list of states gained and lost by the dems in a given race on Discord\n\n Keyword arguments:\n race_type -- the type of race to detect\n \"\"\"\n await self.bot.say('Generating gainz (this may take a while).')\n pos_gainz = []\n neg_gainz = []\n for state in states:\n state_gainz = self.get_state_gainz(state, race_type)\n if state_gainz is not None:\n # check if races are + or -\n try:\n if state_gainz.index('+') > -1:\n print(state_gainz)\n pos_gainz.append(state_gainz)\n except ValueError:\n print(state_gainz)\n neg_gainz.append(state_gainz)\n # check for primary and break\n if state_gainz == 'Primary':\n await self.bot.say('Race is currently primary, cannot compute gainz.')\n return\n await self.bot.say('Gainz for ' + race_type.upper() + ': ' + ','.join(pos_gainz) +\n '\\nLosses for ' + race_type.upper() + ': ' + ', '.join(neg_gainz))\n\n @staticmethod\n def get_state_gainz(state, race_type):\n \"\"\"Return whether a state has been gained or lost by the dems\n\n Keyword arguments:\n state -- the state to search\n race_type -- the type of race to detect wins and losses for\n \"\"\"\n race_index = {\n 's1': 0,\n 's2': 1,\n 'gov': 2\n }.get(race_type, 's1')\n incumbent_index = {\n 's1': 1,\n 's2': 2,\n 'gov': 0\n }.get(race_type, 's1')\n # retrieve state data\n url = \"http://oppressive.games/power/state.php?state=\" + state\n request = Request(url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) '\n 'AppleWebKit/537.75.14 (KHTML, like Gecko) '\n 'Version/7.0.3 Safari/7046A194A'})\n html = str(urlopen(request).read())\n soup = BeautifulSoup(html, \"lxml\")\n politicians_raw = soup.find_all('table')[1]\n politicians = [[cell.text for cell in row(\"td\")]\n for row in politicians_raw(\"tr\")]\n # retrieve incumbent list\n incumbents_raw = soup.find_all('table')[0]\n incumbents = [[cell.text for cell in row(\"td\")]\n for row in incumbents_raw(\"tr\")]\n incumbents = [[cell.replace('\\\\n', '') for cell in row]\n for row in incumbents]\n # retrieve state race as specified in race_type\n container_raw = soup.find_all(\"div\", {\"class\": \"col-md-6 well\"})[race_index]\n race_raw = container_raw.find('table')\n race_data = [[cell.text for cell in row(\"td\")]\n for row in race_raw(\"tr\")[1:]]\n race_data = [[cell.strip() for cell in row]\n for row in race_data]\n race_data = [[cell.replace(\"\\\\n\",\"\") for cell in row]\n for row in race_data]\n # retrieve democratic politician data\n dem_raw = [x for x in race_data if 'Democratic Party' in x]\n # retrieve incumbent politician\n incumbent_pol = [row for row in politicians if row[0] in incumbents[1][incumbent_index]]\n incumbent_pol = [j for i in incumbent_pol for j in i]\n # race data is empty or primary break\n if not race_data:\n return None\n if race_data[0].__len__() < 3:\n return 'Primary'\n # get index of dem in race\n try:\n dem_index = race_data.index(dem_raw[0])\n except IndexError:\n return None\n for row in race_data:\n row[2] = float(row[2][:row[2].find(\"%\")])\n # get highest polling politician\n max_value = ['test', 'test', float(0)]\n for row in race_data:\n if row[2] > max_value[2]:\n max_value = row\n # determine gains or losses\n try:\n if max_value[1] == 'Democratic Party' and incumbent_pol[1] != 'Democratic Party':\n return '+' + state\n elif max_value[1] != 'Democratic Party' and race_data[dem_index][0] in incumbents[1]:\n return '-' + state\n except IndexError:\n if max_value[1] == 'Democratic Party':\n return '+' + state\n return None\n\n\ndef setup(bot):\n bot.add_cog(Election(bot))\n","sub_path":"election.py","file_name":"election.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"328876663","text":"# -*- coding: utf-8 -*-\n\"\"\"\nЗадание 6.2b\n\nСделать копию скрипта задания 6.2a.\n\nДополнить скрипт:\nЕсли адрес был введен неправильно, запросить адрес снова.\n\nОграничение: Все задания надо выполнять используя только пройденные темы.\n\"\"\"\n#start\nwhile True:\n ip = input(\"Enter IP address: \")\n octets = ip.split(\".\")\n\n valid_ip = len(octets) == 4\n\n for i in octets:\n if i.isdigit() and 0 <= int(i) < 256 and valid_ip:\n valid_ip = True\n break\n else :\n valid_ip = False\n if valid_ip:\n break\n else:\n print(\"Wrong ip address\")\n \n\nif valid_ip: \n if int(octets[0]) in range (1,224):\n print(\"unicast\")\n elif int(octets [0]) in range (224, 240):\n print(\"multicast\")\n elif ip == \"255.255.255.255\":\n print(\"local broadcast\")\n elif ip == \"0.0.0.0\":\n print(\"unassigned\")\n else:\n print(\"unused\")\n\n \n \n","sub_path":"exercises/06_control_structures/task_6_2b.py","file_name":"task_6_2b.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"4002284","text":"import requests\nimport re\nimport json\nimport time\nfrom requests.exceptions import RequestException\n\ndef getHtml(url):\n try:\n headers = {\n 'User-Agent': 'Mozilla/5.0(Macintosh;Intel Mac OS X 10_13+3) '\n + 'Applewebit/537.36(KHTML,like Gecko) Chorme/65.0.3325.162 Safari/537.36'\n }\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n return response.text\n return None\n except RequestException:\n return None\n\ndef Write_To_Text(content):\n with open(\"E:\\豆瓣影评\\差评.txt\",'a',encoding='utf-8') as f:\n f.write(json.dumps(content,ensure_ascii=False)+'\\n\\n')\n\ndef Parse_One_Page(html):\n partern='(.*?)'\n items =re.findall(partern, html)\n for item in items:\n Write_To_Text(item)\n\ndef main(offset):\n url='https://movie.douban.com/' \\\n +'subject/25882296/comments?start='+str(offset)\\\n + '&limit=20&sort=new_score&status=P&percent_type=l'\n html = getHtml(url)\n Parse_One_Page(html)\n\nif __name__ == '__main__':\n for i in range(10):\n main(offset=i*20)\n time.sleep(0)\n\n","sub_path":"getComments.py","file_name":"getComments.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"139485226","text":"\nimport argparse\nimport logging\nimport sys\n\n\nsys.path.append('../../../sr2018')\nfrom components.evaluator.syn_evaluator import SynEvaluator\n\n\ndef test(mode='single'):\n\n ref11 = ['I', 'like', 'fresh', 'green', 'apples']\n ref12 = ['I', 'like', 'green', 'apples']\n hyp1 = ['I', 'like', 'fresh', 'spring', 'apples']\n\n ref21 = ['He', 'is', 'my', 'glorious', 'father']\n ref22 = ['He', 'is', 'my', 'glorious', 'brother']\n hyp2 = ['He', 'is', 'my', 'beloved', 'father']\n\n all_hyps = [hyp1, hyp2]\n\n if mode == 'single':\n logging.debug('Single-reference mode')\n all_refs = [[ref12], [ref22]]\n\n elif mode == 'multi':\n logging.debug('Multi-reference mode')\n all_refs = [[ref11, ref12], [ref21, ref22]]\n\n else:\n raise NotImplementedError()\n\n scores = SynEvaluator.compute_metric_scores(all_refs, all_hyps)\n SynEvaluator.output_scores(scores)\n\n\ndef parse_args():\n argparser = argparse.ArgumentParser()\n argparser.add_argument('--ref', help='References file')\n argparser.add_argument('--hyp', help='Hypotheses file')\n argparser.add_argument('--test', help='Run tests', action='store_true')\n argparser.add_argument('--mode', help='Mode: single or multi-reference', choices=['single', 'multi'])\n\n args = argparser.parse_args()\n return args\n\ndef main():\n logging.basicConfig(level=logging.DEBUG)\n argvs = parse_args()\n\n if argvs.test:\n logging.info('Running tests')\n mode = argvs.mode\n test(mode)\n\n else:\n ref_fn = argvs.ref\n hyp_fn = argvs.hyp\n\n scores = SynEvaluator.read_predictions_and_eval(ref_fn, hyp_fn, normalise=True)\n SynEvaluator.output_scores(scores)\n\nif __name__ == '__main__':\n main()","sub_path":"components/evaluator/syn_eval_script.py","file_name":"syn_eval_script.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"339479929","text":"# -*- coding: utf8 -*-\nfrom datetime import datetime\nimport re\n\nfrom scrapy.http import Request, HtmlResponse,FormRequest\nfrom scrapy.selector import Selector\n\nfrom alascrapy.spiders.base_spiders.ala_spider import AlaSpider\nfrom alascrapy.spiders.base_spiders.bazaarvoice_spider import BVNoSeleniumSpider\nfrom alascrapy.lib.generic import get_full_url, date_format\nimport alascrapy.lib.dao.incremental_scraping as incremental_utils\nfrom alascrapy.items import CategoryItem, ProductItem, ReviewItem, ProductIdItem\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom alascrapy.lib.selenium_browser import SeleniumBrowser\nimport json\n\nclass Gamona_deSpider(AlaSpider):\n name = 'gamona_de'\n allowed_domains = ['gamona.de']\n start_urls = ['http://www.gamona.de/games/reviews.html']\n\n \n def parse(self, response):\n original_url = response.url\n url0='http://www.gamona.de/$invoke/games/reviews.html'\n for i in range(1,100):\n data0={\"name\":\"more\",\"arguments\":[{\"page\":i}]}\n #r=FormRequest(url=url0,formdata=json.dumps(data0),callback=self.level_xhr)\n r = Request(url0, method='POST',body=json.dumps(data0),headers={'Content-Type': 'application/json'},callback=self.level_xhr)\n yield r\n product = response.meta.get(\"product\", {})\n review = response.meta.get(\"review\", {})\n \n url_xpath = \"//li[@class='game']/a/@href\"\n single_url = self.extract(response.xpath(url_xpath))\n if single_url:\n matches = None\n if \"\":\n matches = re.search(\"\", single_url, re.IGNORECASE)\n if matches:\n single_url = matches.group(0)\n else:\n return\n single_url = get_full_url(original_url, single_url)\n \n request = Request(single_url, callback=self.level_2)\n try:\n request.meta[\"product\"] = product\n except:\n pass\n try:\n request.meta[\"review\"] = review\n except:\n pass\n yield request\n def level_xhr(self,response):\n #print 'enter level_xhr'\n t=response.body\n pattern = re.compile(r'(?<=href)(.*?)(?=data\\-)')\n anses = pattern.findall(t)\n for ans in anses:\n url='http://www.gamona.de'+ans[3:-3]\n yield Request(url=url,callback=self.level_2)\n def level_2(self, response):\n \n original_url = response.url\n product = response.meta.get(\"product\", {})\n review = response.meta.get(\"review\", {})\n \n product_xpaths = { \n \n \n \"ProductName\":\"//span[@itemprop='name']/h1/a/text()\",\n \n \n \n \n }\n product = self.init_item_by_xpaths(response, \"product\", product_xpaths)\n product['TestUrl'] = original_url\n picurl = product.get(\"PicURL\", \"\")\n if picurl and picurl[:2] == \"//\":\n product[\"PicURL\"] = \"https:\" + product[\"PicURL\"]\n if picurl and picurl[:1] == \"/\":\n product[\"PicURL\"] = get_full_url(original_url, picurl)\n manuf = product.get(\"ProductManufacturer\", \"\")\n if manuf == \"\" and \"\"[:2] != \"//\":\n product[\"ProductManufacturer\"] = \"\"\n try:\n product[\"OriginalCategoryName\"] = category['category_path']\n except:\n pass\n ocn = product.get(\"OriginalCategoryName\", \"\")\n if ocn == \"\" and \"\"[:2] != \"//\":\n product[\"OriginalCategoryName\"] = \"\"\n review_xpaths = { \n \n \n \"ProductName\":\"//span[@itemprop='name']/h1/a/text()\",\n \n \n \n \"TestDateText\":\"//span[@class='date']/text()\",\n \n \n \n \n \"TestSummary\":\"//div[contains(@class,'articlebody')]/p[1]//text()\",\n \n \n \n \"Author\":\"//a[@rel='author']//span[@itemprop='name']//text()\",\n \n \n \"TestTitle\":\"//span[@itemprop='name']/h2/text()\",\n \n \n \n }\n review = self.init_item_by_xpaths(response, \"review\", review_xpaths)\n review['TestUrl'] = original_url\n try:\n review['ProductName'] = product['ProductName']\n review['source_internal_id'] = product['source_internal_id']\n except:\n pass\n awpic_link = review.get(\"AwardPic\", \"\")\n if awpic_link and awpic_link[:2] == \"//\":\n review[\"AwardPic\"] = \"https:\" + review[\"AwardPic\"]\n if awpic_link and awpic_link[:1] == \"/\":\n review[\"AwardPic\"] = get_full_url(original_url, awpic_link)\n\n matches = None\n field_value = review.get(\"TestDateText\", \"\")\n if field_value:\n matches = re.search(\"(\\d{2}\\.\\d{2}\\.\\d{4})\", field_value, re.IGNORECASE)\n if matches:\n review[\"TestDateText\"] = matches.group(1)\n \n\n if review[\"TestDateText\"]:\n \n review[\"TestDateText\"] = review[\"TestDateText\"].strip()\n review[\"TestDateText\"] = date_format(review[\"TestDateText\"], \"%d.%B.%Y\", [\"en\"])\n \n\n review[\"DBaseCategoryName\"] = \"PRO\"\n \n\n review[\"SourceTestScale\"] = \"10\"\n \n\n yield product\n\n in_another_page_xpath = \"//p[@class='nextpage']/a/@href\"\n pros_xpath = \"//div[@class='box pro']/ul//text()\"\n cons_xpath = \"//div[@class='box contra']/ul//text()\"\n rating_xpath = \"//span[@class='award']/img/@alt\"\n award_xpath = \"\"\n award_pic_xpath = \"\"\n \n test_verdict_xpath_1 = '//span[@class=\"content\"]/h3/text()'\n \n review[\"TestVerdict\"] = None\n in_another_page_url = None\n if in_another_page_xpath:\n in_another_page_url = self.extract(response.xpath(in_another_page_xpath))\n if in_another_page_url:\n in_another_page_url = get_full_url(response, in_another_page_url)\n request = Request(in_another_page_url, callback=self.parse_fields_page)\n request.meta['review'] = review\n \n request.meta['test_verdict_xpath_1'] = test_verdict_xpath_1\n \n request.meta['pros_xpath'] = pros_xpath\n request.meta['cons_xpath'] = cons_xpath\n request.meta['rating_xpath'] = rating_xpath\n request.meta['award_xpath'] = award_xpath\n request.meta['award_pic_xpath'] = award_pic_xpath\n yield request\n else:\n \n if not review[\"TestVerdict\"]:\n review[\"TestVerdict\"] = self.extract(response.xpath(test_verdict_xpath_1))\n \n yield review\n \n def parse_fields_page(self, response):\n review = response.meta['review']\n \n test_verdict_xpath_1 = response.meta['test_verdict_xpath_1']\n \n \n if not review[\"TestVerdict\"]:\n review[\"TestVerdict\"] = self.extract(response.xpath(test_verdict_xpath_1))\n \n pros_xpath = response.meta['pros_xpath']\n cons_xpath = response.meta['cons_xpath']\n rating_xpath = response.meta['rating_xpath']\n award_xpath = response.meta['award_xpath']\n award_pic_xpath = response.meta['award_pic_xpath']\n if pros_xpath:\n review[\"TestPros\"] = self.extract_all(response.xpath(pros_xpath), ' ; ')\n if cons_xpath:\n review[\"TestCons\"] = self.extract_all(response.xpath(cons_xpath), ' ; ')\n if rating_xpath:\n review['SourceTestRating'] = self.extract(response.xpath(rating_xpath), '%')\n if award_xpath:\n review['award'] = self.extract(response.xpath(award_xpath))\n if award_pic_xpath:\n review['AwardPic'] = self.extract(response.xpath(award_pic_xpath))\n yield review\n\nif __name__=='__main__':\n from scrapy.crawler import CrawlerProcess\n\n process = CrawlerProcess({})\n process.crawl(Gamona_deSpider)\n process.start()","sub_path":"alascrapy/spiders/gamona_de.py","file_name":"gamona_de.py","file_ext":"py","file_size_in_byte":8448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"9258834","text":"num=[]\r\n\"\"\"user input\"\"\"\r\n\r\nnumcount=int(input(\"how many numbers do you want to input?\"))\r\n\r\nfor i in range(numcount):\r\n element= int(input(\"enter number:\"))\r\n num.append(element)\r\n \r\nuserNum=[]\r\n\r\nfor i in num:\r\n if i not in userNum:\r\n userNum.append(i)\r\n \r\nprint(num)\r\nprint(userNum)\r\n\r\npoppedElement=userNum.pop()\r\nprint(\"popped elment is :\",poppedElement)\r\nprint(userNum)","sub_path":"Popped element.py","file_name":"Popped element.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"452933662","text":"\nimport itertools\n\ndef isprime(number):\n if number == 0 or number == 1:\n return False\n \n for i in range(2, int(number ** 0.5) + 1):\n if number % i == 0:\n return False\n \n return True\n\n\ndef solution(numbers):\n check_dict = dict()\n answer = 0\n for i in range(1, len(numbers) + 1):\n for item in list(itertools.permutations(numbers, i)):\n number = int(''.join(item))\n if number not in check_dict:\n check_dict[number] = number\n \n if isprime(number):\n answer += 1\n\n return answer\n","sub_path":"programmers/42839.py","file_name":"42839.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"311771257","text":"import numpy as np\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib.animation import FFMpegWriter\n\nY1 = np.genfromtxt('./Data/2_1_random.txt')\n\nnSeconds1 = 10\ncol1=len(Y1[0])\nrow1=len(Y1)//col1\nfps1=row1//nSeconds1\n\nfig1 = plt.figure( figsize=(8,8) )\n\nim1 = plt.imshow(Y1[0:col1,0:col1])\n\ndef animate_func1(i):\n im1.set_array(Y1[i*col1:(i+1)*col1,0:col1].copy())\n return [im1]\n\nanim1 = animation.FuncAnimation(fig1, animate_func1, frames = row1, interval = 1000 /fps1 )\n\nanim1.save('Animations/Animation_1_random.mp4', fps=fps1, extra_args=['-vcodec', 'libx264'])\n\nY3 = np.genfromtxt('./Data/2_3_random.txt')\n\nnSeconds3 = 10\ncol3=len(Y3[0])\nrow3=len(Y3)//col3\nfps3=row3//nSeconds3\n\nfig3 = plt.figure( figsize=(8,8) )\n\nim3 = plt.imshow(Y3[0:col3,0:col3])\n\ndef animate_func3(i):\n im3.set_array(Y3[i*col3:(i+1)*col3,0:col3].copy())\n return [im3]\n\nanim3 = animation.FuncAnimation(fig3, animate_func3, frames = row3, interval = 1000 /fps3 )\n\nanim3.save('Animations/Animation_3_random.mp4', fps=fps3, extra_args=['-vcodec', 'libx264'])\n","sub_path":"Blatt9/Animation_Programs/Animate_random.py","file_name":"Animate_random.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"40989472","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = r'''\n---\nmodule: online_user_info\nshort_description: Gather information about Online user.\ndescription:\n - Gather information about the user.\nversion_added: \"2.9\"\nauthor:\n - \"Remy Leone (@sieben)\"\nextends_documentation_fragment: online\n'''\n\nEXAMPLES = r'''\n- name: Gather Online user info\n online_user_info:\n register: result\n\n- debug:\n msg: \"{{ result.online_user_info }}\"\n'''\n\nRETURN = r'''\n---\nonline_user_info:\n description: Response from Online API\n returned: success\n type: complex\n sample:\n \"online_user_info\": {\n \"company\": \"foobar LLC\",\n \"email\": \"foobar@example.com\",\n \"first_name\": \"foo\",\n \"id\": 42,\n \"last_name\": \"bar\",\n \"login\": \"foobar\"\n }\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.online import (\n Online, OnlineException, online_argument_spec\n)\n\n\nclass OnlineUserInfo(Online):\n\n def __init__(self, module):\n super(OnlineUserInfo, self).__init__(module)\n self.name = 'api/v1/user'\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=online_argument_spec(),\n supports_check_mode=True,\n )\n\n try:\n module.exit_json(\n online_user_info=OnlineUserInfo(module).get_resources()\n )\n except OnlineException as exc:\n module.fail_json(msg=exc.message)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"env/lib/python3.9/site-packages/ansible/modules/cloud/online/online_user_info.py","file_name":"online_user_info.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"245264892","text":"import math\n\n\nf = open(\"dane_ulamki.txt\")\n\nsuma = 0\nfor line in f.readlines():\n a, b = map(int, line.split())\n suma += a // math.gcd(a, b)\n\nprint(suma)\nf.close()\n","sub_path":"zbiór inf/Coding/65/65-3.py","file_name":"65-3.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"18494842","text":"import threading\nimport requests\nimport time\n\nthings = []\nthings_size_threshold = 1000\nthings_size_limit = 10\n\n# Collector\nnum_requests_made = 0\ndef collect():\n global num_requests_made\n while True:\n if len(things) < things_size_threshold:\n print(f'Making request number: {num_requests_made}. Length of things: {len(things)}')\n response = requests.get('https://jolynclothing.com/products/printed-jackson-3-onesie-ruggle.json')\n body = response.json\n things.append(body)\n num_requests_made += 1\n else:\n print(f'Size limit reached. Sleeping for 5 secs')\n time.sleep(5)\n\ndef write():\n global things\n while True:\n print('Writer checking if things can be written ... ')\n if len(things) > things_size_limit:\n print('Writing to disk ... ')\n # file_rows = things[:things_size_limit]\n remaining_rows = things[things_size_limit:]\n things = remaining_rows\n time.sleep(2)\n else:\n print('Too early for writer, sleeping ...')\n time.sleep(2)\n\ncollector = threading.Thread(target=collect)\nwriter = threading.Thread(target=write)\n\ncollector.start()\nwriter.start()\ncollector.join()\nwriter.join()\n\n\n\n","sub_path":"PythonMultithreading.py","file_name":"PythonMultithreading.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"130692539","text":"from random import randint\nfrom euler import *\nimport itertools\n\n#####################################\n# CONSTRUCTION OF INTERESTING WORDS #\n#####################################\n\ndef prefixes(x):\n \"\"\" If x is a word, prefixes(x) is a word which is the concatenation\n of the prefixes of x, i.e \"x[:1]x[:2]...x[:len(x)] \"\"\"\n pre = \"\"\n for i in range(len(x)+1):\n pre += x[:i+1]\n return(pre)\n\ndef random(n):\n \"\"\" Construct a random binary word of length n \"\"\"\n res = \"\"\n for i in range(n):\n res+=str(randint(0,1))\n return res\n\n\ndef deBruijn(n, cyclic = True):\n \"\"\"Construct a de Bruijn word of order n over the alphabet {0,1}.\n\n By default, the word is considered cyclic. If the parameter ̀cyclic`\n is set to False, then the word is not cyclic anymore.\n \"\"\"\n gr = constructionGraph(n-1)\n tour = eulerian_tour(gr,'0'*(n-1))\n if cyclic:\n res = \"\" # version mot circulaire\n else:\n res = '0'*(n-2) # version mot pas circulaire\n for e in tour:\n res += e[-1]\n if cyclic:\n return res[1:] #version circulaire\n else:\n return res #version pas circulaire\n\n\ndef TrianglePath(p,l):\n \"\"\"Construct a triangle-paths word (of depth p and chain-length l).\n\n That is: the concatenation of every sequences of length <= p,\n followed by 2**p random chains of length l (where each chain is the\n concatenation of the prefixes of a random word).\n\n \"\"\"\n res = \"\"\n #Triangles\n for i in range(p):\n for x in [\"\".join(seq) for seq in itertools.product(\"01\", repeat=i)]:\n res += x\n\n chains = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=p)]\n for x in chains:\n res += x\n for i in range(l):\n for i in range(len(chains)):\n chains[i] += str(randint(0,1))\n res += chains[i]\n return res\n\n \n\n################\n# USEFUL TOOLS #\n################\n\ndef constructionGraph(n):\n \"\"\"Construct a graph where nodes are labeled by all sequences of size\n n over the alphabet {0,1}. We add an edge labeled by the letter \"a\"\n between a node w and w' if w[1:] = w'[0:-1] and w'[-1] = a.\n\n An eulerian tour in such a graph with parameter (n-1) corresponds\n more or less to a de Bruijn sequence of order n.\n \"\"\"\n g = {}\n nodes = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=n)]\n for no in nodes:\n g[no] = set([no[1:n] + '0', no[1:n] + '1'])\n # g['0'*n].remove('0'*n)\n # g['1'*n].remove('1'*n)\n \n return g\n","sub_path":"src/Clean/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"428475141","text":"import os\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\nimport tarfile\r\nimport gzip\r\nimport shutil\r\nimport bz2\r\nimport binascii\r\nimport lzma\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nimport plotly.plotly as py\r\n\r\ndef printstat():\r\n print(\"Enter the type of compression you want do: \")\r\n print(\"1.Enter the filename as 'fortar.py' for archieving: \")\r\n print(\"2.Enter the filename as 'forbz2.py' for bz2 compression: \")\r\n print(\"3.Enter the filename as 'forgzip.py' for gzip compression: \")\r\n print(\"4.Enter the filename as 'forlzma.py' for lzma compression: \")\r\n print(\"5.Want to see user selection stats (y/n)? \")\r\n\r\ndef userstat():\r\n y=[]\r\n with open('output.csv', newline='') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\r\n for row in spamreader:\r\n y.append(int(', '.join(row)))\r\n plt.plot(y)\r\n plt.ylabel('User choice')\r\n plt.xlabel('Every new entry')\r\n plt.show()\r\n\r\n\r\n \r\ndef manualcall():\r\n y=[]\r\n printstat()\r\n x=input(\"Choose your option : \")\r\n if(x=='y' or x=='n'):\r\n if(x=='y'):\r\n userstat()\r\n else:\r\n manualcall()\r\n else:\r\n y.append(x)\r\n resultFyle = open(\"output.csv\",'a+')\r\n for r in y:\r\n resultFyle.write(r + \"\\n\")\r\n resultFyle.close()\r\n if(int(x)==1):\r\n #tar = input(\"Enter the filename as 'fortar.py' for archieving: \")\r\n #os.system('python ' +str(tar))\r\n for_tar()\r\n \r\n elif(int(x)==2):\r\n #bz = input(\"Enter the filename as 'forbz2.py' for bz2 compression: \")\r\n #os.system('python ' +str(bz))\r\n for_bz()\r\n elif(int(x)==3):\r\n #gzip = input(\"Enter the filename as 'forgzip.py' for gzip compression: \")\r\n #os.system('python ' +str(gzip))\r\n for_gzip()\r\n elif(int(x)==4):\r\n #lzma = input(\"Enter the filename as 'forlzma.py' for lzma compression: \")\r\n #os.system('python ' +str(lzma))\r\n for_lzma()\r\n\r\ndef for_tar():\r\n print(\"tar Archieving\")\r\n flag=True\r\n y=[]\r\n while(flag):\r\n print(\"1.Archieve a file\")\r\n print(\"2.(de)Archieve a file\")\r\n print(\"3.Upload file to drive\")\r\n print(\"4.exit\")\r\n ch=input(\"Enter your choice.\")\r\n if(int(ch) == 1):\r\n #Archive\r\n while(True):\r\n z=input(\"Want to add more file(y/n) : \")\r\n if(z=='y'):\r\n x=input(\"Enter file name : \")\r\n y.append(x)\r\n else:\r\n break\r\n\r\n print ('creating archive')\r\n print (y)\r\n\r\n out = tarfile.open('tarfile_add.tar', mode='w')\r\n for c in y:\r\n out.add(str(c))\r\n\r\n t = tarfile.open('tarfile_add.tar', mode='r')\r\n for member_info in t.getmembers():\r\n print (member_info.name)\r\n\r\n elif(int(ch)==2):\r\n tar = tarfile.open(\"tarfile_add.tar\")\r\n tar.extractall()\r\n tar.close()\r\n elif(int(ch)==3):\r\n r=input(\"Enter file name to upload\")\r\n os.system('python ' +'driveupload.py '+r)\r\n else:\r\n break\r\n\r\n\r\ndef for_gzip():\r\n print(\"compression and deccompression using gzip\")\r\n flag=True\r\n while(flag):\r\n print(\"1.compress a file\")\r\n print(\"2.(de)compress a file\")\r\n print(\"3.Upload file to drive\")\r\n print(\"4.exit\")\r\n ch=input(\"Enter your choice.\")\r\n if(int(ch) == 1):\r\n #compress\r\n x=input(\"Enter file name : \")\r\n z=str(x).split('.')\r\n inF = open(x, 'rb')\r\n s = inF.read()\r\n inF.close()\r\n\r\n outF=gzip.GzipFile(\"compressedByGZIP.gz\",'wb')\r\n outF.write(s)\r\n outF.close()\r\n elif(int(ch) == 2):\r\n #decompress\r\n inF = gzip.GzipFile(\"compressedByGZIP.gz\", 'rb')\r\n s = inF.read()\r\n inF.close()\r\n\r\n outF = open(\"x1.\"+str(z[1]), 'wb')\r\n outF.write(s)\r\n outF.close()\r\n elif(int(ch)==3):\r\n r=input(\"Enter file name to upload\")\r\n os.system('python ' +'driveupload.py '+r)\r\n else:\r\n break\r\n\r\n \r\n c=os.path.getsize(x)\r\n d=os.path.getsize(\"compressedByGZIP.gz\")\r\n\r\n\r\n import matplotlib.pyplot as plt; plt.rcdefaults()\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n \r\n objects = (\"original\",\"compressed\")\r\n y_pos = np.arange(len(objects))\r\n performance = [c,d]\r\n plt.barh(y_pos, performance, align='center')\r\n plt.yticks(y_pos, objects)\r\n plt.xlabel('ratio')\r\n plt.title('Compression')\r\n \r\n plt.show()\r\n\r\n\r\n\r\ndef for_bz2():\r\n print(\"compression and deccompression using bzip2\")\r\n flag=True\r\n while(flag):\r\n print(\"1.compress a file\")\r\n print(\"2.(de)compress a file\")\r\n print(\"3.Upload file to drive\")\r\n print(\"4.exit\")\r\n ch=input(\"Enter your choice.\")\r\n if(int(ch) == 1):\r\n #compress\r\n x=input(\"Enter file name : \")\r\n z=str(x).split('.')\r\n inF = open(x, 'rb')\r\n s = inF.read()\r\n inF.close()\r\n\r\n outF=bz2.BZ2File(\"compressedByBZ2.bz\",'wb')\r\n outF.write(s)\r\n outF.close()\r\n elif(int(ch) == 2):\r\n #decompress\r\n inF = bz2.BZ2File(\"compressedByBZ2.bz\", 'rb')\r\n s = inF.read()\r\n inF.close()\r\n\r\n outF = open(\"x1.\"+str(z[1]), 'wb')\r\n outF.write(s)\r\n outF.close()\r\n\r\n elif(int(ch)==3):\r\n r=input(\"Enter file name to upload\")\r\n os.system('python ' +'driveupload.py '+r)\r\n\r\n else:\r\n break\r\n\r\n c=os.path.getsize(x)\r\n d=os.path.getsize(\"compressedByBZ2.bz\")\r\n\r\n\r\n import matplotlib.pyplot as plt; plt.rcdefaults()\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n \r\n objects = (\"original\",\"compressed\")\r\n y_pos = np.arange(len(objects))\r\n performance = [c,d]\r\n plt.barh(y_pos, performance, align='center')\r\n plt.yticks(y_pos, objects)\r\n plt.xlabel('ratio')\r\n plt.title('Compression')\r\n \r\n plt.show()\r\n\r\n\r\ndef for_lzma():\r\n print(\"compression and decompression using LZma 2\")\r\n flag=True\r\n while(flag):\r\n print(\"1.compress a file\")\r\n print(\"2.(de)compress a file\")\r\n print(\"3.Upload file to drive\")\r\n print(\"4.exit\")\r\n ch=input(\"Enter your choice.\")\r\n if(int(ch) == 1):\r\n #compress\r\n x=input(\"Enter file name : \")\r\n inF = open(x, 'rb')\r\n z=str(x).split('.')\r\n s = inF.read()\r\n inF.close()\r\n\r\n outF=lzma.LZMAFile(\"compressedByXZ.xz\",'wb')\r\n outF.write(s)\r\n outF.close()\r\n\r\n elif(int(ch) == 2):\r\n #decompress\r\n inF = lzma.LZMAFile(\"compressedByXZ.xz\", 'rb')\r\n s = inF.read()\r\n inF.close()\r\n\r\n outF = open(\"x1.\"+str(z[1]), 'wb')\r\n outF.write(s)\r\n outF.close()\r\n elif(int(ch)==3):\r\n r=input(\"Enter file name to upload\")\r\n os.system('python ' +'driveupload.py '+r)\r\n else:\r\n break\r\n\r\n c=os.path.getsize(x)\r\n d=os.path.getsize(\"compressedByXZ.xz\")\r\n\r\n\r\n import matplotlib.pyplot as plt; plt.rcdefaults()\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n \r\n objects = (\"original\",\"compressed\")\r\n y_pos = np.arange(len(objects))\r\n performance = [c,d]\r\n plt.barh(y_pos, performance, align='center')\r\n plt.yticks(y_pos, objects)\r\n plt.xlabel('ratio')\r\n plt.title('Compression')\r\n \r\n plt.show()\r\n\r\n\r\n\r\n \r\nif(os.path.getsize(\"output.csv\") == 0):\r\n manualcall()\r\n\r\nelse:\r\n resultFyle = open(\"output.csv\",'r+')\r\n v1=0\r\n v2=0\r\n v3=0\r\n v4=0\r\n print(\"\"\"\r\n1.Preffered one\r\n2.Suggested one(lzma2 Compression)\r\n3.Manual\r\n4.user stats\r\n \"\"\")\r\n ch2 = input(\"choice : \")\r\n \r\n with open('output.csv', newline='') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\r\n for row in spamreader:\r\n if(int(', '.join(row))==1):\r\n v1=v1+1\r\n elif(int(', '.join(row))==2):\r\n v2=v2+1\r\n elif(int(', '.join(row))==3):\r\n v3=v3+1\r\n elif(int(', '.join(row))==4):\r\n v4=v4+1\r\n if(int(ch2)==1):\r\n if(v1>=v2 and v1>=v3 and v1>=v4):\r\n print(\"the prefered one is tar archieving \")\r\n rt=input(\"Do you want to continue (y/n) : \")\r\n if(rt=='y'):\r\n tar = 'fortar.py'\r\n #os.system('python ' +tar)\r\n for_tar()\r\n else:\r\n manualcall()\r\n \r\n elif(v2>=v1 and v2>=v3 and v2>=v4):\r\n print(\"the prefered one is bz2 compression \")\r\n rt=input(\"Do you want to continue (y/n) : \")\r\n if(rt=='y'):\r\n bz = 'forbz2.py'\r\n #os.system('python ' +bz)\r\n for_bz()\r\n else:\r\n manualcall()\r\n \r\n elif(v3>=v2 and v3>=v1 and v3>=v4):\r\n print(\"the prefered one is gzip compression \")\r\n rt=input(\"Do you want to continue (y/n) : \")\r\n if(rt=='y'):\r\n gzip = 'forgzip.py'\r\n #os.system('python ' +gzip)\r\n for_gzip()\r\n else:\r\n manualcall()\r\n\r\n \r\n elif(v4>=v2 and v4>=v3 and v4>=v1):\r\n print(\"the prefered one is lzma compression \")\r\n rt=input(\"Do you want to continue (y/n) : \")\r\n if(rt=='y'):\r\n lzma = 'forlzma.py'\r\n #os.system('python ' +lzma)\r\n for_lzma()\r\n else:\r\n manualcall()\r\n\r\n \r\n elif(int(ch2)==2):\r\n lzma = 'forlzma.py'\r\n #os.system('python ' +lzma)\r\n for_lzma()\r\n\r\n elif(int(ch2)==3):\r\n manualcall()\r\n elif(int(ch2)==4):\r\n userstat()\r\n\r\n","sub_path":"first2.py","file_name":"first2.py","file_ext":"py","file_size_in_byte":10245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"468392038","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Exercise link: https://www.hackerrank.com/challenges/missing-stock-prices\n\n#%% Useful links\n# https://stackoverflow.com/questions/24588437/convert-date-to-float-for-linear-regression-on-pandas-data-frame\n# https://scikit-learn.org/stable/modules/impute.html#impute\n# https://scikit-learn.org/stable/modules/generated/sklearn.impute.IterativeImputer.html#sklearn.impute.IterativeImputer\n\n#%% get sample data from file on localdir\nwith open('/home/pefs/Desktop/missing_values.txt', 'r') as f:\n raw = f.read().split('\\n')\nraw = raw[:-2]\nn_quotes = raw.pop(0)\n\n\n#%% get sample data from stdin\nn_quotes = int(input())\nraw = []\nfor _ in range(n_quotes):\n raw.append(input())\n\n#%% data wrangling\nimport pandas as pd\nfrom datetime import datetime\nimport numpy as np\n\nquotes = [el.split('\\t') for el in raw]\n \nfor i in range(len(quotes)):\n if quotes[i][1][0] == 'M':\n quotes[i][1] = ''\n \ndate = list(list(zip(*quotes))[0])\nprice = list(list(zip(*quotes))[1])\n\nidx = []\nfor i in range(len(price)):\n if price[i]=='':\n price[i] = float('NaN')\n idx.append(i)\n else:\n price[i] = float(price[i])\n\ndate = [datetime.strptime(el, '%m/%d/%Y %H:%M:%S') for el in date]\n\ndf = pd.Series(price, index=date)\ndf.index.name = 'date'\ndf = df.reset_index(name='price')\n\nmiss = df[df.price.isnull()].date.values\n#miss = miss.astype('datetime64[D]').astype(int)\nmiss = [[x] for x in miss]\nmiss = np.asarray(miss)\n\ndf.dropna(inplace=True)\ndate, price = [[x] for x in df.date.values], df.price.values\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(date, price, \n test_size=0.01, \n shuffle=False)\nx_train, y_train = np.asarray(x_train), np.asarray(y_train)\n\n#from sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import SGDRegressor\nmdl = SGDRegressor(shuffle=False, max_iter=5000, learning_rate='optimal', \n random_state=0, n_iter_no_change=30).fit(x_train, y_train)\n\nprint(*mdl.predict(miss), sep='\\n')\n# filling missing values with interpolation methods, didn't get good results\n'''\nprint(*df.price.interpolate(method='linear').loc[idx].values, sep='\\n') #1.04\nprint(*df.price.interpolate(method='krogh').loc[idx].values, sep='\\n') #7.80\n'''\n\n","sub_path":"statistics_machine_learning/Missing Stock Prices.py","file_name":"Missing Stock Prices.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"397106352","text":"from flask import Flask,request,jsonify\nfrom flask_cors import *\nimport json\n\napp = Flask(__name__)\nCORS(app, supports_credentials=True) \n\n@app.route('/',methods=['GET','POST'])\ndef getpiclist():\n from functions import ZipExtract,MoveFile,DelZipFile,RenameFile,ReturnJpgList\n from PilFunctions import xqimglogo\n # 构建path\n dir_id = request.args.get('id')\n if int(dir_id) <= 1000:\n path = 'G:/1-1000/' + str(dir_id)\n elif int(dir_id) >1000 and int(dir_id) <=2000:\n path = 'G:/1000-2000/' + str(dir_id)\n elif int(dir_id) >2000 and int(dir_id) <=3000:\n path = 'G:/2000-3000/' + str(dir_id)\n\n print('\\n\\n--------解压ZIP文件--------\\n\\n')\n ZipExtract(path)\n print('\\n\\n--------移动文件到根目录--------\\n\\n')\n MoveFile(path)\n print('\\n\\n--------重命名文件--------\\n\\n')\n RenameFile(path)\n print('\\n\\n--------删除ZIP文件--------\\n\\n')\n DelZipFile(path)\n\n print('\\n\\n--------完成ALL DOWN--------\\n\\n')\n\n print(jsonify(ReturnJpgList(path)))\n return jsonify(ReturnJpgList(path))\n\n@app.route('/tim',methods=['GET','POST'])\ndef tim():\n from PilFunctions import st,xq,xqimglogo\n from functions import ClearUpload\n body = request.get_json()\n\n dir_id = body['id']\n if int(dir_id) <= 1000:\n path = 'G:/1-1000/' + str(dir_id)\n elif int(dir_id) >1000 and int(dir_id) <=2000:\n path = 'G:/1000-2000/' + str(dir_id)\n elif int(dir_id) >2000 and int(dir_id) <=3000:\n path = 'G:/2000-3000/' + str(dir_id)\n \n print('\\n\\n--------清空桌面--------\\n\\n')\n ClearUpload()\n\n print('\\n\\n--------制作首图--------\\n\\n')\n piclist=body['piclist']\n ststyle=body['ststyle']\n st(piclist,ststyle)\n\n print('\\n\\n--------制作详情图片--------\\n\\n')\n xq(path)\n\n print('\\n\\n--------图片添加水印--------\\n\\n')\n color = body['color']\n if color != '':\n xqimglogo(path,color)\n\n return 'success'\n\nif __name__ == '__main__':\n # app.debug = True\n app.run()","sub_path":"py/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"645189826","text":"'''\r\nCopyright 2020 Flexera Software LLC\r\nSee LICENSE.TXT for full license text\r\nSPDX-License-Identifier: MIT\r\n\r\nCreated on Nov 19, 2019\r\n\r\n@author: SGeary\r\n'''\r\nimport logging\r\nimport requests\r\nimport sys\r\nimport config\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n#######################################################################\r\n# If the calling app is a flask app then we can use\r\n# the flask abort function to catch exceptions\r\n# so see if its defined in a common config file\r\ntry: \r\n FLASKAPP = config.FLASKAPP\r\nexcept:\r\n FLASKAPP = False\r\n\r\nif FLASKAPP:\r\n from flask import abort\r\n#######################################################################\r\n\r\nFNCI_API = \"FNCI v6 Change Project Owner API\"\r\nENDPOINT_URL = config.v6_BASEURL + \"project/changeProjectOwner/\"\r\n\r\n#-----------------------------------------------------------------------#\r\ndef change_project_owner(projectID, ownerID, authToken):\r\n logger.debug(\"Entering change_project_owner\")\r\n \r\n changeProjectOwnerBody = get_changeProjectOwnerBody(projectID, ownerID) \r\n logger.debug(\"changeProjectOwnerBody: %s\" %changeProjectOwnerBody)\r\n \r\n headers = {'Content-Type': 'application/json', 'Authorization': authToken} \r\n RESTAPI_URL = ENDPOINT_URL\r\n logger.debug(\" RESTAPI_URL: %s\" %RESTAPI_URL)\r\n logger.debug(\" headers: %s\" %headers) \r\n \r\n try:\r\n response = requests.post(RESTAPI_URL, data=changeProjectOwnerBody, headers=headers)\r\n response.raise_for_status()\r\n except requests.exceptions.ConnectionError:\r\n # Connection Error - Is the server up and running?\r\n abort_message = FNCI_API + \" - Error Connecting to FNCI Server - \" + (ENDPOINT_URL).split(\"palamida\")[0] # Get rid of everything after palamida in url\r\n logger.error(\" %s\" %(abort_message))\r\n\r\n if FLASKAPP: \r\n # Using error code 500 (Internal Server Error) to cover connection errors\r\n # in the flask apps\r\n abort(500, FNCI_API + \" - %s\" %abort_message) \r\n else:\r\n print(abort_message)\r\n print(\"Is the FNCI server running?\")\r\n print(\"Exiting script\")\r\n sys.exit() \r\n except requests.exceptions.RequestException as e: # Catch the exception for the logs but process below\r\n logger.error(e)\r\n \r\n # We at least received a response from FNCI so check the status to see\r\n # what happened if there was an error or the expected data\r\n if response.status_code == 200:\r\n logger.debug(\" Call to %s was successful.\" %FNCI_API)\r\n \r\n elif response.status_code == 400:\r\n # Bad Request\r\n logger.error(\"Response code 400 - %s\" %response.text)\r\n if FLASKAPP: \r\n abort(400, FNCI_API + \" - Bad Request - Look at debug log for more details\") \r\n else:\r\n print(\"%s - Error: %s - Bad Request.\" %(FNCI_API, response.status_code ))\r\n print(\" Exiting script\")\r\n sys.exit() \r\n\r\n elif response.status_code == 401:\r\n # Unauthorized Access\r\n logger.error(\" %s - Error: %s - Authentication Failed: JWT token is not valid or user does not have correct permissions.\" %(FNCI_API, response.status_code ))\r\n if FLASKAPP: \r\n abort(401, FNCI_API + \" - Authentication Failed: JWT token is not valid or user does not have correct permissions.\")\r\n else:\r\n print(\"%s - Error: %s - Authentication Failed: JWT token is not valid or user does not have correct permissions.\" %(FNCI_API, response.status_code ))\r\n print(\" Exiting script\")\r\n sys.exit() \r\n\r\n elif response.status_code == 404:\r\n # Not Found\r\n logger.error(\" %s - Error: %s - URL endpoint not found: %s\" %(FNCI_API, response.status_code, RESTAPI_URL ))\r\n if FLASKAPP: \r\n abort(400, FNCI_API + \" - Bad Request - URL endpoint not found\") \r\n else:\r\n print(\" %s - Error: %s - URL endpoint not found: %s\" %(FNCI_API, response.status_code, RESTAPI_URL ))\r\n print(\" Exiting script\")\r\n sys.exit() \r\n\r\n elif response.status_code == 405:\r\n # Method Not Allowed\r\n logger.error(\" %s - Error: %s - Method (GET/POST/PUT//DELETE/ETC) Not Allowed.\" %(FNCI_API, response.status_code ))\r\n if FLASKAPP: \r\n abort(405, FNCI_API + \" - Method Not Allowed.\")\r\n else:\r\n print(\" %s - Error: %s - Method (GET/POST/PUT//DELETE/ETC) Not Allowed.\" %(FNCI_API, response.status_code ))\r\n print(\" Exiting script\")\r\n sys.exit() \r\n \r\n elif response.status_code == 500:\r\n # Internal Server Error\r\n logger.error(\" %s - Error: %s - Internal Server Error.\" %(FNCI_API, response.status_code ))\r\n if FLASKAPP: \r\n abort(500, FNCI_API + \" - Internal Server Error.\")\r\n else:\r\n print(\" %s - Error: %s - Internal Server Error.\" %(FNCI_API, response.status_code ))\r\n print(\" Exiting script\")\r\n sys.exit() \r\n#-----------------------------------------------------------#\r\n\r\n \r\ndef get_changeProjectOwnerBody(projectID, ownerID) : \r\n\r\n \r\n changeProjectOwnerBody = ''' {\r\n \"projectId\" :\"''' + str(projectID) + '''\",\r\n \"userId\" :\"''' + str(ownerID) + '''\"\r\n}'''\r\n \r\n return changeProjectOwnerBody \r\n ","sub_path":"FNCI/v6/project/changeProjectOwner.py","file_name":"changeProjectOwner.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"240298070","text":"import sys\nimport os\nimport shutil\nimport io\nimport math\nimport base64\nimport json\nimport lzma\nimport jinja2\nimport falcon\nimport tempfile\nimport subprocess\n\nfrom contextlib import closing\nfrom PIL import Image, ImageDraw, ImageFont\n\nfrom urllib.parse import urlencode\n\nimport pyparsing as pp\nimport browser_json\n\nclass ImageGenerationFailed(Exception):\n pass\n\ndef render_image_error(text):\n text = text.strip()\n with closing(Image.new(\"RGBA\", (600, 300), (0, 0, 0, 0))) as image:\n max_width = 100\n max_height = 0\n \n draw = ImageDraw.Draw(image)\n try:\n font = ImageFont.truetype(font=\"Arial\", size=20)\n except OSError:\n font = ImageFont.truetype(font=\"DejaVuSansMono\", size=20)\n \n x_offset, _ = draw.textsize(\"Error: \", font=font)\n draw.text((0, 0), \"Error: \", fill=(255, 0, 0, 255))\n \n w, h = draw.textsize(text, font=font)\n max_width = min(max(max_width, w + x_offset), image.width)\n max_height = min(max(max_height, h), image.height)\n \n draw.text((x_offset, 0), text, fill=(0, 0, 0, 255))\n \n with closing(image.crop((0, 0, max_width, max_height))) as subimage:\n with closing(io.BytesIO()) as io_out:\n subimage.save(io_out, \"PNG\")\n return io_out.getvalue()\n\ndef generate_image(image_type, scale, code):\n if code.strip() == \"\":\n return render_image_error(\"No code supplied\")\n \n #print(\"------- code start ----------\")\n #print(type(code))\n #print(code)\n \n #print(\"------- code normalized ----------\")\n try:\n code = json.dumps(browser_json.parse_browser_json(code.decode() if isinstance(code, bytes) else code))\n except pp.ParseException as e:\n return render_image_error(\"invalid json syntax\")\n #errMsg = str(e)\n #return render_image_error(\"\\n\".join(errMsg[i*80:(i+1)*80] for i in range(math.ceil(len(errMsg) / 80))))\n #print(code)\n #print(\"------- code end ----------\")\n \n try:\n tmpdir = tempfile.mkdtemp()\n\n filename = os.path.join(tmpdir, \"out\")\n process_result = subprocess.run(\n [\"phantomjs\", \"server/serverside-renderer.js\", \"--silent\", \"--format={}\".format(image_type), \"--scale={}\".format(scale), filename],\n cwd=os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"..\"),\n input=code if isinstance(code, bytes) else code.encode(),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n \n #print(process_result.args)\n #print(process_result.returncode)\n #print(process_result.stdout)\n #print(process_result.stderr)\n\n if process_result.returncode == 0:\n with open(filename, \"rb\") as f:\n return f.read()\n else:\n return render_image_error(process_result.stdout.decode())\n finally:\n shutil.rmtree(tmpdir)\n return None\n\ndef derive_host_url(req):\n port_ext = \"\"\n if req.port != 80:\n port_ext = \":{}\".format(req.port)\n return req.scheme + \"://\" + req.host + port_ext\n\nclass HTMLContent:\n def __init__(self):\n self.resource_path = os.path.join(\n os.path.dirname(__file__),\n \"templates\")\n self.jinja_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(self.resource_path))\n\n def on_get(self, req, response, filename=None):\n if filename is None:\n filename = \"index.html\"\n\n extension = os.path.splitext(filename.lower())[1]\n inferred_mediatype = falcon.MEDIA_TEXT\n if extension == \".html\":\n inferred_mediatype = falcon.MEDIA_HTML\n elif extension == \".css\":\n inferred_mediatype = \"text/css\"\n elif extension == \".js\":\n inferred_mediatype = falcon.MEDIA_JS\n elif extension == \".png\":\n inferred_mediatype = falcon.MEDIA_PNG\n elif extension in (\".jpg\", \".jpeg\"):\n inferred_mediatype = falcon.MEDIA_JPEG\n\n try:\n template = self.jinja_env.get_template(filename)\n except jinja2.exceptions.TemplateNotFound:\n raise falcon.HTTPNotFound(\n title=\"Page not found\",\n description=\"The page '{}' does not exist\".format(filename))\n else:\n port_ext = \"\"\n if req.port != 80:\n port_ext = \":{}\".format(req.port)\n rendered_page = template.render(\n hostname=req.host + port_ext,\n protocol=req.scheme)\n response.body = rendered_page\n response.content_type = inferred_mediatype\n\n\ndef verify_image_type(image_type):\n if image_type not in (\"svg\", \"png\"):\n raise falcon.HTTPInternalServerError(\n title=\"Invalid type parameter\",\n description=\"type parameter must be svg or png\")\n\ndef parse_image_scale(scale):\n try:\n scale = float(scale)\n if not (.1 <= scale <= 10.0):\n raise ValueError()\n except ValueError:\n raise falcon.HTTPInternalServerError(\n title=\"Invalid scale parameter\",\n description=\"Scale must be a valid float between .1 and 10.0\")\n else:\n return scale\n\n\ndef compute_compression_prepostfixes():\n single1_compression = lzma.compress(b'{default: [ \"more things!\" ]}')\n single2_compression = lzma.compress(b'{x: \"asdf\", diferetn: 9, [2 3 4 5 1012 ]}' * 5)\n \n global STANDARD_COMPRESSION_PREFIX, STANDARD_COMPRESSION_POSTFIX\n \n l = 0\n for i in range(min(len(single1_compression), len(single2_compression))):\n if single1_compression[i] == single2_compression[i]:\n l += 1\n else:\n break\n STANDARD_COMPRESSION_PREFIX = single1_compression[:l]\n \n l = 0\n for i in range(min(len(single1_compression), len(single2_compression))):\n if single1_compression[-1 - i] == single2_compression[-1 - i]:\n l -= 1\n else:\n break\n STANDARD_COMPRESSION_POSTFIX = \"\" if l == 0 else single1_compression[l:]\ncompute_compression_prepostfixes()\n\ndef compress_text(text):\n compressed_bytes = lzma.compress(\n text.encode() if isinstance(text, str) \n else code)\n\n t = compressed_bytes\n compression_mode = 0\n if compressed_bytes[:len(STANDARD_COMPRESSION_PREFIX)] == STANDARD_COMPRESSION_PREFIX:\n compression_mode |= 1\n compressed_bytes = compressed_bytes[len(STANDARD_COMPRESSION_PREFIX):]\n if len(STANDARD_COMPRESSION_POSTFIX) > 0:\n if compressed_bytes[-len(STANDARD_COMPRESSION_POSTFIX):] == STANDARD_COMPRESSION_POSTFIX:\n compression_mode |= 2\n compressed_bytes = compressed_bytes[:-len(STANDARD_COMPRESSION_POSTFIX)]\n \n return str(compression_mode) + base64.b64encode(compressed_bytes).decode()\n\ndef decompress_text(text):\n mode = chr(text[0])\n try:\n mode = int(mode)\n if not (0 <= mode < (1 << 2)):\n raise ValueError(\"...\")\n except ValueError:\n raise falcon.HTTPInternalServerError(\n title=\"Invalid code\",\n description=\"The supplied code is missing the encoding mode byte.\")\n \n try:\n binary_lzma_code = base64.b64decode(text[1:])\n except ValueError:\n raise falcon.HTTPInternalServerError(\n title=\"Code is not base64 encoded\",\n description=\"The provided code is not encoded in the base64 format\")\n\n if (mode & 1) != 0:\n binary_lzma_code = STANDARD_COMPRESSION_PREFIX + binary_lzma_code\n if (mode & 2) != 0:\n binary_lzma_code = binary_lzma_code + STANDARD_COMPRESSION_POSTFIX\n \n try:\n plain_code = lzma.decompress(binary_lzma_code)\n except lzma.LZMAError:\n raise falcon.HTTPInternalServerError(\n title=\"Code is not compressed\",\n description=\"The provided code is note compressed using the correct lzma compression technique\")\n\n if isinstance(plain_code, str):\n plain_code = plain_code.encode()\n\n return plain_code\n\nclass RestAPI:\n def on_get(self, req, response, cmd=\"[None]\"):\n cmd = cmd.lower()\n if cmd == \"gen_image\":\n image_type = req.get_param(\"type\", required=True).lower()\n verify_image_type(image_type)\n\n scale = req.get_param(\"scale\", default=1.0)\n scale = parse_image_scale(scale)\n\n code = req.get_param(\"c\", required=True)\n \n plain_code = decompress_text(code.encode())\n\n image = generate_image(image_type, scale, plain_code)\n if image is None:\n raise falcon.HTTPInternalServerError(\n title=\"Image generation failed\",\n description=\"The code you submitted could not be used to render a wavedrom image\")\n\n response.content_type = {\n \"svg\": \"image/svg+xml\",\n \"png\": falcon.MEDIA_PNG\n }[image_type]\n response.body = image\n else:\n raise falcon.HTTPNotFound(\n title=\"invalid command\",\n description=\"get command {} does not exist\".format(cmd))\n\n def on_post(self, req, response, cmd=\"[None]\"):\n cmd = cmd.lower()\n if cmd == \"gen_image\":\n return self.on_get(req, response, cmd=cmd)\n if cmd == \"generate_link\":\n scale = req.get_param(\"scale\", default=1.0)\n scale = parse_image_scale(scale)\n\n image_type = req.get_param(\"type\", default=\"png\").lower()\n verify_image_type(image_type)\n\n auto_redirect = req.get_param_as_bool(\"redirect\")\n\n code = req.get_param(\"code\")\n if code is None:\n code = req.bounded_stream.read(32768)\n try:\n generate_image(\"svg\", 1.0, code)\n except ImageGenerationFailed:\n raise falcon.HTTPInternalServerError(\n title=\"Invalid WaveDrom code\",\n description=\"The WaveDrom code you submitted cannot be parsed by the WaveDrom generator\")\n\n compressed_code = compress_text((code.decode() if isinstance(code, bytes) else code).strip())\n\n url = \"{hosturl}/rest/gen_image?{options}\".format(\n hosturl=derive_host_url(req),\n options=urlencode({\n \"type\": image_type,\n \"scale\": scale,\n \"c\": compressed_code}))\n\n if auto_redirect:\n raise falcon.HTTPTemporaryRedirect(url)\n\n response.content_type = falcon.MEDIA_TEXT\n response.body = url\n else:\n raise falcon.HTTPNotFound(\n title=\"invalid command\",\n description=\"post command {} does not exist\".format(cmd))\n\nclass StaticRedirect:\n def __init__(self, path):\n self.path = path\n\n def on_get(self, req, response):\n raise falcon.HTTPTemporaryRedirect(self.path)\n\n def on_post(self, req, response):\n raise falcon.HTTPTemporaryRedirect(self.path)\n\nif __name__ in (\"__main__\", \"main\"):\n html_content = HTMLContent()\n rest_api = RestAPI()\n \n fapi = falcon.API()\n\n fapi.req_options.auto_parse_form_urlencoded = True\n\n fapi.add_route(\"/\", StaticRedirect(\"/html/\"))\n fapi.add_route(\"/html/\", StaticRedirect(\"index.html\"))\n\n fapi.add_route(\"/html/{filename}\", html_content)\n\n fapi.add_route(\"/rest/\", rest_api)\n fapi.add_route(\"/rest/{cmd}\", rest_api)\n\n","sub_path":"python/render_server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"533762106","text":"import logging\nimport logging.config\nimport os\nimport re\nimport shutil\nimport sys\nimport wave\n\nfrom process_script.check_noise_annotation import check_noise_annotation_old_norm\nfrom process_script.metada_update import AudioMetadata, write_meta, read_supplement\n\nlogger = logging.getLogger(\"yueyu\")\nlog_path = os.path.dirname(os.getcwd()) + '/Logs/'\nlog_name = log_path + 'log.log'\nfh = logging.FileHandler(log_name, mode='a', encoding=\"utf8\")\nlogger.addHandler(fh)\n\n\nclass Check(object):\n def __init__(self, src, dst, workbook):\n self.src = src\n self.dst = dst\n self.workbook = workbook\n\n def move(self, src_path):\n dst_path = os.path.join(self.dst, os.path.relpath(src_path, self.src))\n dirname = os.path.dirname(dst_path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n shutil.move(src_path, dst_path)\n shutil.move(src_path.replace('.wav', '.txt'), dst_path.replace('.wav', '.txt'))\n shutil.move(src_path.replace('.wav', '.metadata'), dst_path.replace('.wav', '.metadata'))\n\n def spain(self, userinfo):\n userinfos = {}\n for group, infos in userinfo.items():\n if infos['city'] in userinfos:\n userinfos[infos['city']].update({group: infos})\n else:\n userinfos.update({infos['city']: {group: infos}})\n return userinfos\n\n def checkers(self, option):\n logger.error(\"Start\")\n\n # 用户信息,\n userinfo = read_supplement(self.workbook)\n\n errors = []\n for path, dirs, files in os.walk(self.src):\n for file in files:\n if os.path.splitext(file)[-1] == '.wav':\n audio_f = os.path.join(path, file)\n txt_f = audio_f.replace('.wav', '.txt')\n meta_f = audio_f.replace('.wav', '.metadata')\n\n wav_checker = WAV(audio_f)\n if os.path.exists(txt_f):\n txt_checker = TXT(txt_f)\n else:\n logger.error(\"{}\\t Don't have txt file \".format(audio_f))\n continue\n if os.path.exists(meta_f):\n meta_checker = Metadata(meta_f)\n else:\n logger.error(\"{}\\t Don't have meta file \".format(audio_f))\n continue\n\n if option == 'update':\n # lines = txt_checker.update()\n # txt_checker.check(lines)\n meta_checker.update(userinfo, self.src, self.dst, errors)\n meta_checker.check()\n # wav_checker.check()\n\n elif option == 'check':\n txt_checker.check()\n meta_checker.check()\n wav_checker.check()\n\n # if not txt_checker.flag or not meta_checker.flag or not wav_checker.flag:\n # self.move(audio_f)\n\n logger.error(\"End\")\n\n\nclass File(object):\n GROUP_REGEX = re.compile('(?P[G|Z]\\d+)[A-F\\d_]*(?PS\\d+)\\.')\n\n def __init__(self, filepath):\n self.filepath = filepath\n self.flag = True\n r = self.GROUP_REGEX.search(os.path.basename(filepath))\n if r:\n self.group = r.group('group').strip()\n else:\n self.group = os.path.basename(filepath)\n\n def read_file(self):\n \"\"\"\n 读取文件,捕获编码,如果不是utf8 抛出异常\n :return:\n \"\"\"\n try:\n with open(self.filepath, 'r', encoding='utf-8') as f:\n return f.readlines()\n except UnicodeDecodeError as e:\n logger.error(\"{}\\t not encode utf-8\".format(self.filepath))\n self.flag = False\n\n def is_has_ch(self, lines):\n # 是否含有中文\n z = re.compile(u'[\\u4e00-\\u9fa5]')\n for line in lines:\n if z.search(line):\n self.flag = False\n logger.error(\"{}\\t has chinese\".format(self.filepath))\n return\n\n def write_file(self, lines):\n with open(self.filepath, 'w', encoding='utf8') as f:\n for line in lines:\n f.write(line)\n\n\nclass TXT(File):\n def ch_to_en(self, lines):\n \"\"\"中文标点转英文\"\"\"\n table = {ord(f): ord(t) for f, t in zip('【】;‘’:“”《》,。、?', '[];\\'\\':\"\"<>,. ?')}\n return [text.translate(table) for text in lines]\n\n def remove(self, lines):\n # 去除斜杠\n new_lines = []\n for line in lines:\n new_lines.append(re.sub('/|~', '', line))\n return new_lines\n\n def is_double_str(self, lines):\n \"\"\"\n 是否包含全角\n :param lines:\n :return:\n \"\"\"\n double_s = []\n double_str = lambda x: ord(x) == 0x3000 or 0xFF01 <= ord(x) <= 0xFF5E\n for line in lines:\n for x in line:\n if double_str(x):\n double_s.append(x)\n if double_s:\n self.flag = False\n logger.error(\"{}\\t Has double str(quan jiao) is {}\".format(self.filepath, double_s))\n\n def dbc2sbc(self, lines):\n \"\"\"全角转半角\"\"\"\n new_lines = []\n for line in lines:\n rstring = ''\n for uchar in line:\n inside_code = ord(uchar)\n if inside_code == 0x3000:\n inside_code = 0x0020\n else:\n inside_code -= 0xfee0\n if not (0x0021 <= inside_code and inside_code <= 0x7e):\n rstring += uchar\n continue\n rstring += chr(inside_code)\n new_lines.append(rstring)\n\n return new_lines\n\n def is_one_line(self, lines: list):\n \"\"\"\n 判断是否为一行\n :param lines: 文本行\n :return:\n \"\"\"\n if len(lines) == 0:\n self.flag = False\n logger.error(\"{}\\t the file is empty\".format(self.filepath))\n elif len(lines) > 1:\n self.flag = False\n logger.error(\"{}\\t the file is Multi-line\".format(self.filepath))\n else:\n content = lines[0].strip()\n if not content:\n self.flag = False\n logger.error(\"{}\\t the file is line break\".format(self.filepath))\n\n def is_have_digit(self, lines):\n \"\"\"\n 是否包含数字\n :param lines:\n :return:\n \"\"\"\n P_DIGIT = re.compile(u'\\d+')\n digit = P_DIGIT.findall(lines[0])\n if digit:\n self.flag = False\n logger.error(\"{}\\t contains numbers is {}\".format(self.filepath, digit))\n\n def is_have_symbol(self, lines):\n \"\"\"\n 判断是否有特殊字符\n :param lines: 行内容\n :return:\n \"\"\"\n P_SYMBOL_FULL = re.compile('[#¥{}【】;‘��:“”《》,。、?·&*$^]')\n special_symbol = P_SYMBOL_FULL.findall(lines[0])\n if special_symbol:\n self.flag = False\n logger.error(\"{}\\t contains special symbol is {}\".format(self.filepath, special_symbol))\n\n def update(self):\n # 更新\n lines = self.read_file()\n # for updater in [self.ch_to_en, self.dbc2sbc, self.remove]:\n for updater in [self.ch_to_en, self.dbc2sbc]: #\n lines = updater(lines)\n self.write_file(lines)\n return lines\n\n def check(self, lines=None):\n # 检查\n if not lines:\n lines = self.read_file()\n self.is_one_line(lines)\n\n # 如果不存在空行和多行的情况进入的特殊字符的检查\n if self.flag:\n self.is_have_digit(lines)\n self.is_have_symbol(lines)\n self.is_double_str(lines)\n check_noise_annotation_old_norm(self.filepath, lines[0])\n\n\nclass Metadata(File):\n meta_map = {\n 'SES': 'dirname',\n 'DIR': 'dirpath',\n 'FIP': 'dirpath',\n 'SAM': 'frame',\n 'SNB': 'sample_width',\n 'SBF': 'lohi',\n 'SSB': 'per_bits',\n 'QNT': 'type',\n 'NCH': 'channels',\n 'SCD': 'dirname',\n 'LBD': 'mark_file',\n 'LBR': 'length',\n 'ORS': 'text'\n }\n\n def read_meta(self):\n infos = {}\n with open(self.filepath, 'r', encoding='utf-8') as f:\n for line in f:\n info = line.strip().split('\\t')\n if len(info) == 2:\n k, v = info\n infos.update({k: v})\n elif len(info) == 1:\n k, v = info[0], ''\n infos.update({k: v})\n else:\n k, v = info[0], \"\\t\".join(info[1:])\n infos.update({k: v})\n\n return infos\n\n def update(self, userinfo, src, dst, errors):\n if userinfo is not None: # 这里是df 不能直接用bool判断\n infos = self.read_meta()\n group = infos.get(\"SES\")\n if not group:\n self.flag = False\n logger.error(\"{}\\t SES key is null\".format(self.filepath))\n else:\n if group in userinfo.index:\n update_info = userinfo.loc[group, :].to_dict()\n # 修改文件\n tem = 1\n new_content = \"\"\n with open(self.filepath, 'r+', encoding='utf8') as f:\n for line in f:\n type_name = line.strip().split(\"\\t\")[0]\n if type_name in update_info:\n line_content = type_name + \"\\t\" + update_info[type_name] + \"\\n\"\n new_content += line_content\n else:\n new_content += line\n f.seek(0)\n f.truncate()\n f.write(new_content)\n\n\n def check(self):\n z = re.compile(u'[\\u4e00-\\u9fa5]')\n meta_no_null = ['SEX', 'AGE', 'ACC', 'ACT', \"BIR\"]\n lines = self.read_file()\n meta = {}\n\n for line in lines:\n line = line.strip()\n if z.search(line) and 'ORS' not in line:\n self.flag = False\n logger.error(\"{}\\t content contains chinese\".format(self.filepath))\n\n if len(line.split('\\t')) > 3:\n self.flag = False\n logger.error(\"{}\\t content redundant TAB keys\".format(self.filepath))\n elif len(line.split('\\t')) == 3:\n if \"LBR\" in line or \"LBO\" in line:\n pass\n else:\n self.flag = False\n logger.error(\"{}\\t content redundant TAB keys, {}\".format(self.filepath, line.split('\\t')[0]))\n\n elif len(line.split('\\t')) == 1:\n if line.split('\\t')[0] in meta_no_null:\n self.flag = False\n logger.error(\"{}\\t {}\\t key is null\".format(self.filepath, line.split('\\t')[0]))\n else:\n key = line.split('\\t')[0]\n valve = line.split('\\t')[1]\n meta[key] = valve\n # print(meta)\n for m in meta_no_null:\n # print(meta[m])\n if not m in meta.keys():\n self.flag = False\n logger.error(\"{}\\t {}\\t key is null\".format(self.filepath, m))\n else:\n if not meta['SEX'] in ['Male', 'Female']:\n self.flag = False\n logger.error(\"{}\\t value format is err\".format(self.filepath))\n\n\nclass WAV(File):\n min_length = 15\n audio_channel = 1\n sample_width = 2\n framerate = [16000, 22050, 44100]\n\n def check(self):\n fsize = os.path.getsize(self.filepath)\n txt_file = self.filepath.replace('.wav', '.txt')\n meta_file = self.filepath.replace('.wav', '.metadata')\n\n if not os.path.exists(txt_file) or not os.path.exists(meta_file):\n self.flag = False\n logger.error(\"{}\\t missing files\".format(self.filepath))\n\n if fsize / float(1024) < self.min_length:\n self.flag = False\n logger.error(\"{}\\t size error\".format(self.filepath))\n else:\n with wave.open(self.filepath, 'rb') as f:\n if not f.getnchannels() == self.audio_channel:\n self.flag = False\n logger.error(\"{}\\t channel error\".format(self.filepath))\n\n if not f.getframerate() in self.framerate:\n self.flag = False\n logger.error(\"{}\\t sample error\".format(self.filepath))\n if not f.getsampwidth() == self.sample_width:\n self.flag = False\n logger.error(\"{}\\t sample width error\".format(self.filepath))\n\n\nif __name__ == '__main__':\n # root, audio_size, audio_sample, audio_channel, meta_key, sy_list = read_ini('config.txt')\n\n try:\n # 脚本使用\n src_path = sys.argv[1]\n dst_path = sys.argv[2]\n workbook = sys.argv[3]\n option = sys.argv[4]\n except Exception as e:\n # 集成环境使用\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy161101028_g_351人意大利语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy161101028_r_215小时意大利语手机采集语音数据_朗读\\完整数据包_加密后数据\\data'\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy161101033_g_405人法语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy161101033_r_232小时法语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy161101034_g_343人西班牙语手机采集语音数据\\完整数据包_processed\\data\\category'\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy161101034_r_227小时西班牙语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy170801048_338小时西班牙语手机采集语音数据\\完整数据包_processed\\data'\n src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy170901049_347小时意大利语手机采集语音数据\\完整数据包_加密后数据\\data'\n\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\apy161101031_r_215小时美式英语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\apy161101031_g_344人美式英语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\apy161101032_g_357人英式英语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\apy161101032_r_199小时英式英语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\APY161101011_201小时北美英语手机采集语音数据\\完整数据包_processed\\data\\androidcategory'\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\APY161101011_201小时北美英语手机采集语音数据\\完整数据包_processed\\data\\desktopcategory'\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\APY161101011_201小时北美英语手机采集语音数据\\完整数据包_processed\\data\\iphonecategory'\n\n # src_path = r'\\\\10.10.30.14\\杨明明\\修改测试demo\\data'\n dst_path = ''\n workbook = r''\n # option = 'update'\n option = 'check'\n\n print(src_path)\n c = Check(src_path, dst_path, workbook)\n c.checkers(option)\n","sub_path":"process_script/audio_check.py","file_name":"audio_check.py","file_ext":"py","file_size_in_byte":15971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"122037939","text":"from math import gcd\n\nfractions = []\nfor d in range(999900, 1000001):\n n = int(3 * d / 7)\n fractions.append([n / d, n, d])\nfor res in sorted(fractions)[::-1]:\n if gcd(res[1], res[2]) == 1:\n print(res[1])\n break\n","sub_path":"problems/71.py","file_name":"71.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"72448481","text":"import boto3\nimport json\nimport decimal\n\ndynamodb = boto3.resource('dynamodb')\n\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n if o % 1 > 0:\n return float(o)\n else:\n return int(o)\n return super(DecimalEncoder, self).default(o)\n\ndef lambda_handler(event, context):\n usertable = dynamodb.Table(\"user\")\n response = usertable.scan()\n\n return {\n 'statusCode' : 200,\n 'body' : json.dumps(response['Items'], cls=DecimalEncoder),\n 'headers' : {\n 'Content-Type' : 'application/json'\n }\n }","sub_path":"dynamoDBread.py","file_name":"dynamoDBread.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"44062206","text":"m = 0\nmn = 0\nfor c in range(1, 6):\n p = float(input(f\"Peso {c}ª Pessoa: \"))\n if c == 1:\n m = p\n mn = p\n else:\n if p >= m:\n m = p\n if p <= mn:\n mn = p\nprint(f\"Maior = {m} Kg\\nMenor = {mn} Kg\")\n","sub_path":"ExCursoEmVídeo(Python)/ex55.py","file_name":"ex55.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"84366026","text":"import torch\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef diagonala( tensor2d):\n x = tensor2d.size()\n if x[0]!=x[1]:\n return -1\n for x in range (x[0]):\n print(\"[\", x , \",\", x, \"]: \", tensor2d[x, x], sep='')\n return 0\n\na = [ [1,2,3], [1,1,2], [1,2,4]] #basic 2d tensor initialization\nA = torch.tensor(a)\nc = A.size()\nprint(A, A.ndimension(), A.shape)\n\ndiagonala(A) #index testing function\n\nB = 2*A\nC = torch.mm(A, B)\nC[1:3, 1] = 0\nprint(C)\n\na = np.lin\n\n\n","sub_path":"1 Tensor and Gradients/2d_tensors.py","file_name":"2d_tensors.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"250876217","text":"\n\"\"\"\n\n어간 추출, 원형 복원\n • PorterStemmer 어간 추출 알고리즘\n • LancasterStemmer 어간 추출 알고리즘\n • 원형 복원(lemmatizing)\n\n\"\"\"\n\n\n\nwords = ['lives', 'dies', 'flies', 'died']\n\n\n# ------------------------------------------------------------\n# PorterStemmer 어간 추출 알고리즘\n\nfrom nltk.stem import PorterStemmer\nst = PorterStemmer()\nresult = [st.stem(w) for w in words]\nprint(result)\n# ['live', 'die', 'fli', 'die']\n\n\n# ------------------------------------------------------------\n# LancasterStemmer 어간 추출 알고리즘\nfrom nltk.stem import LancasterStemmer\nst = LancasterStemmer()\nresult = [st.stem(w) for w in words]\nprint(result)\n# ['liv', 'die', 'fli', 'died']\n\n\n# 결과를 보면\n# 두 어간 추출 알고리즘으로 인한 결과가 다른 것을 알 수 있습니다.\n\n\n\n# ------------------------------------------------------------\n# 원형 복원(lemmatizing)\n\nfrom nltk.stem import WordNetLemmatizer\nlm = WordNetLemmatizer()\nresult = [lm.lemmatize(w) for w in words]\nprint(result)\n# ['life', 'dy', 'fly', 'died']\n\n\n# \"died\"의 pos=\"v\" 옵션으로 접미사나 어미를 제거한 형태로 원형복원\nprint(lm.lemmatize(\"died\", pos=\"v\"))\n# die\n","sub_path":"PythonPackages/src/NLTK/ex03.py","file_name":"ex03.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"620491101","text":"from django.conf.urls import url\nfrom . import views\nurlpatterns = [url(r'^index/$', views.index, name='index'),\n url(r'^(?P[0-9]+)/$', views.question, name='question'),\n url(r'^first/$', views.first, name='first'),\n url(r'^second/$', views.update, name='second'),\n url(r'^image/$', views.image, name='image'),\n url(r'^check/$', views.check, name='check'),\n url(r'^background/$', views.background, name='background'),\n\n ]","sub_path":"styles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"511958441","text":"from pprint import pprint\n\ntrainings = { \"course1\":{\"title\":\"Python Training Course for Beginners\", \n \"location\":\"Frankfurt\", \n \"trainer\":\"Steve G. Snake\"},\n \"course2\":{\"title\":\"Intermediate Python Training\",\n \"location\":\"Berlin\",\n \"trainer\":\"Ella M. Charming\"},\n \"course3\":{\"title\":\"Python Text Processing Course\",\n \"location\":\"München\",\n \"trainer\":\"Monica A. Snowdon\"}\n }\n\ntrainings2 = trainings.copy()\n\ntrainings[\"course2\"] = {\"title\":\"Perl Seminar for Beginners\",\n \"location\":\"Ulm\",\n \"trainer\":\"James D. Morgan\"}\npprint(trainings2)","sub_path":"shallowcopy_pprint.py","file_name":"shallowcopy_pprint.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"595670283","text":"from cloud.aws import *\nfrom cloud.response import Response\nimport cloud.shortuuid as shortuuid\n\n\n# Define the input output format of the function.\n# This information is used when creating the *SDK*.\ninfo = {\n 'input_format': {\n 'guest_id': 'str?',\n },\n 'output_format': {\n 'guest_id': 'str',\n 'session_id': 'str',\n }\n}\n\n\ndef do(data, boto3):\n body = {}\n recipe = data['recipe']\n params = data['params']\n app_id = data['app_id']\n\n guest_id = params.get('guest_id', None)\n table_name = 'auth-{}'.format(app_id)\n\n login_conf = recipe['login_method']['guest_login']\n default_group_name = login_conf['default_group_name']\n enabled = login_conf['enabled']\n if enabled == 'true':\n enabled = True\n elif enabled == 'false':\n enabled = False\n\n if not enabled:\n body['error'] = '6'\n body['message'] = '게스트 로그인이 비활성화 상태입니다.'\n return Response(body)\n\n dynamo = DynamoDB(boto3)\n\n if guest_id:\n result = dynamo.get_item(table_name, guest_id)\n if result.get('Item', None):\n session_item = {\n 'userId': guest_id\n }\n dynamo.put_item(table_name, 'session', session_item)\n body['message'] = '게스트 로그인 성공'\n return Response(body)\n else:\n body['error'] = '7'\n body['message'] = '해당 게스트가 없습니다'\n return Response(body)\n else:\n guest_id = shortuuid.uuid()\n email = '{}@guest.com'.format(shortuuid.uuid())\n item = {\n 'email': email,\n 'group': default_group_name,\n 'extra': {},\n 'loginMethod': 'guest_login',\n }\n dynamo.put_item(table_name, 'user', item, item_id=guest_id)\n session_id = shortuuid.uuid()\n session_item = {\n 'userId': guest_id\n }\n dynamo.put_item(table_name, 'session', session_item, item_id=session_id)\n body['session_id'] = session_id\n body['guest_id'] = guest_id\n body['message'] = '게스트 로그인 성공'\n return Response(body)\n\n","sub_path":"aws_interface/cloud/auth/guest.py","file_name":"guest.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"256482171","text":"''' This example shows how WakeUpBehaviour works. The agent\nprints a message in the screen.\n'''\n\n# Needed imports\nfrom pade.behaviours.types import WakeUpBehaviour\nfrom pade.core.agent import Agent\nfrom pade.misc.utility import display_message, start_loop\n\n# Defining the LaterAgent (inherits from Agent class)\nclass LateAgent(Agent):\n\n\t# This method will execute at agent startup\n\tdef setup(self):\n\t\t# The behaviour is created with two args, where\n\t\t# the second is a time (in seconds) to behaviour\n\t\t# waits.\n\t\tbehaviour = AmILate(self, 5)\n\t\t# This adds a behaviour in the agent\n\t\tself.add_behaviour(behaviour)\n\n\n# Defining the AmILate behaviour\nclass AmILate(WakeUpBehaviour):\n\n\t# This method executes the main actions of behaviour\n\tdef on_wake(self):\n\t\tdisplay_message(self.agent, 'Am I late?')\n\n\n# This starts the agents with PADE\nif __name__ == '__main__':\n\t# Defining a LateAgent object\n\tlateagent = LateAgent('late')\n\t# Creating a list with agents that will be executed\n\tagents_list = [lateagent]\n\t# Passing the agent list to main loop of PADE\n\tstart_loop(agents_list)","sub_path":"examples/behaviours-and-messages/LateAgent.py","file_name":"LateAgent.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"570274507","text":"\n\nfrom xai.brain.wordbase.nouns._frailty import _FRAILTY\n\n#calss header\nclass _FRAILTIES(_FRAILTY, ):\n\tdef __init__(self,): \n\t\t_FRAILTY.__init__(self)\n\t\tself.name = \"FRAILTIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"frailty\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_frailties.py","file_name":"_frailties.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"289169908","text":"#!/usr/bin/env python3\n\nimport skilstak.colors as c\nimport random\n\nanswers = [\n \"yes\",\n \"no\",\n \"maybe\"\n]\n\ndef main():\n print(c.clear + c.multi(\"Welcome to the magical eightball\"))\n while True:\n answer = random.choice(answers)\n input(c.rc() + \">>> \" + c.reset)\n print(c.rc() + answer)\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print(c.clear)\n exit()\n\n\n","sub_path":"py/eightball/eightball.py","file_name":"eightball.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"630778203","text":"from numpy import *\r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\n \r\n\r\ndef euclDistance(vector1, vector2): \r\n return sqrt(sum(power(vector2 - vector1, 2))) \r\n \r\ndef Centroids(dataSet, k): \r\n numpixel, attr = dataSet.shape \r\n centroids = zeros((k, attr)) \r\n for j in range(k): \r\n i = random.randint(0, numpixel)\r\n centroids[j , :] = dataSet[i, :] \r\n return centroids \r\n \r\ndef kmeans(dataSet, k): \r\n numpixel = dataSet.shape[0] \r\n Assign = mat(zeros((numpixel, 2))) \r\n Changed = True \r\n centroids =Centroids(dataSet, k) \r\n \r\n while Changed: \r\n Changed = False \r\n for i in range(numpixel): \r\n minDistance = 1000000.0 \r\n minIndex = 0 \r\n for j in range(k): \r\n distance = euclDistance( dataSet[i, :], centroids[j, :]) \r\n if minDistance > distance: \r\n minDistance = distance \r\n minIndex = j \r\n if Assign[i, 0] != minIndex: \r\n Changed = True \r\n Assign[i, :] = minIndex,minDistance**2\r\n \r\n for j in range(k):\r\n centroidj=Assign[:,0].A==j\r\n dataj=nonzero(centroidj)\r\n clusterj=dataSet[dataj[0]] \r\n centroids[j, :] = mean(clusterj, axis = 0)\r\n \r\n return centroids, Assign\r\n\r\n \r\ndef result(file_name, data):\r\n \r\n m, n = np.shape(data)\r\n f = open(file_name, \"w\")\r\n for i in range(m):\r\n pixel = []\r\n for j in range(n):\r\n pixel.append(str(data[i, j]))\r\n f.write(\"\\t\".join(pixel) + \"\\n\")\r\n f.close()\r\n","sub_path":"Kmeans.py","file_name":"Kmeans.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"292457335","text":"from pypokerengine.players import BasePokerPlayer\nfrom pypokerengine.utils.card_utils import gen_cards, estimate_hole_card_win_rate\nimport random as rand\nimport pprint\n\nclass Honest22Player(BasePokerPlayer):\n\n\t# Number of simulation\n\tNB_SIMULATION = 250\n\n\t# Minumum rounds needed to collect base data for 2nd heuristic\n\tMIN_NUM_DATA_COLLECTED = 100\n\n\t# Psuedo infinite\n\tPOS_INF = 10000.0\n\n\t# Street name constant\n\tSTREET_ZERO_CARD = \"preflop\"\n\tSTREET_THREE_CARD = \"flop\"\n\tSTREET_FOUR_CARD = \"turn\"\n\tSTREET_FIVE_CARD = \"river\"\n\n\t# List of streets\n\tLIST_OF_STREET = [STREET_ZERO_CARD, STREET_THREE_CARD, STREET_FOUR_CARD, STREET_FIVE_CARD]\n\n\t# Action name constant\n\tFOLD = \"fold\"\n\tCALL = \"call\"\n\tRAISE = \"raise\"\n\n\t# Action index constant\n\tFOLD_INDEX = 0\n\tCALL_INDEX = 1\n\tRAISE_INDEX = 2\n\n\t# Game constant (declared here as it is difficult to check during game)\n\t# ENGINE BUG: The engine uses 20 for preflop, flop and 40 for turn, river \n\t# instead of 10 like the project specification stated\n\tRAISE_AMOUNT_PREFLOP = 20\n\tRAISE_AMOUNT_FLOP = 20\n\tRAISE_AMOUNT_TURN = 40\n\tRAISE_AMOUNT_RIVER = 40\n\n\t# Limited poker constant\n\tNUM_STREET_PER_ROUND = 4\n\tNUM_RAISE_PER_STREET = 4\n\tNUM_RAISE_PER_ROUND_PER_PLAYER = 4\n\n\t# Player turn constant\n\tPLAYER_TURN = True\n\tOPPONENT_TURN = False\n\n\t# Street index\n\tSTREET_INDEX_DICT = {\n\t\tSTREET_ZERO_CARD: 0,\n\t\tSTREET_THREE_CARD: 1,\n\t\tSTREET_FOUR_CARD: 2,\n\t\tSTREET_FIVE_CARD: 3\n\t}\n\n\t# Raise amount dictionary\n\tRAISE_AMOUNT_DICT = {\n\t\tSTREET_ZERO_CARD: RAISE_AMOUNT_PREFLOP,\n\t\tSTREET_THREE_CARD: RAISE_AMOUNT_FLOP,\n\t\tSTREET_FOUR_CARD: RAISE_AMOUNT_TURN,\n\t\tSTREET_FIVE_CARD: RAISE_AMOUNT_RIVER,\n\t\t0: RAISE_AMOUNT_PREFLOP,\n\t\t1: RAISE_AMOUNT_FLOP,\n\t\t2: RAISE_AMOUNT_TURN,\n\t\t3: RAISE_AMOUNT_RIVER\n\t}\n\n\t# Convert card letter to number\n\tCARD_NUM_DICT = {\n\t\t\"2\": 2,\n\t\t\"3\": 3,\n\t\t\"4\": 4,\n\t\t\"5\": 5,\n\t\t\"6\": 6,\n\t\t\"7\": 7,\n\t\t\"8\": 8,\n\t\t\"9\": 9,\n\t\t\"T\": 10,\n\t\t\"J\": 11,\n\t\t\"Q\": 12,\n\t\t\"K\": 13,\n\t\t\"A\": 14\n\t}\n\n\n\t# Hold history of Opponent's action and outcome\n\t# To search: self.RAISE_HISTORY[did_player_win][is_player_big_blind][street][num_raises]\n\tRAISE_HISTORY = {\n\t\t# Roundts won\n\t\tTrue: {\n\t\t\t# Player is big blind\n\t\t\tTrue: {\n\t\t\t\t#Streets history (index: number_of_raises, value: number_of_rounds)\n\t\t\t\tSTREET_ZERO_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_THREE_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FOUR_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FIVE_CARD: [0, 0, 0, 0, 0]\n\t\t\t},\n\t\t\t# Opponent is big blind\n\t\t\tFalse: {\n\t\t\t\t#Streets history (index: number_of_raises, value: number_of_rounds)\n\t\t\t\tSTREET_ZERO_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_THREE_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FOUR_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FIVE_CARD: [0, 0, 0, 0, 0]\n\t\t\t}\n\t\t},\n\t\t# Rounds lost\n\t\tFalse: {\n\t\t\t# Player is big blind\n\t\t\tTrue: {\n\t\t\t\t# Streets history (index: number_of_raises, value: number_of_rounds)\n\t\t\t\tSTREET_ZERO_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_THREE_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FOUR_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FIVE_CARD: [0, 0, 0, 0, 0]\n\t\t\t},\n\t\t\t# Opponent is big blind\n\t\t\tFalse: {\n\t\t\t\t# Streets history (index: number_of_raises, value: number_of_rounds)\n\t\t\t\tSTREET_ZERO_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_THREE_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FOUR_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FIVE_CARD: [0, 0, 0, 0, 0]\n\t\t\t}\n\t\t}\n\t}\n\n\t# Hold win rates history given the player who is big blind this round\n\t# To search: self.WIN_RATES_FROM_RAISE_HISTORY[street][num_raises]\n\tWIN_RATES_FROM_RAISE_HISTORY = {\n\t\t# Streets history (index: number_of_raises, value: win_rates)\n\t\tSTREET_ZERO_CARD: [0.0, 0.0, 0.0, 0.0, 0.0],\n\t\tSTREET_THREE_CARD: [0.0, 0.0, 0.0, 0.0, 0.0],\n\t\tSTREET_FOUR_CARD: [0.0, 0.0, 0.0, 0.0, 0.0],\n\t\tSTREET_FIVE_CARD: [0.0, 0.0, 0.0, 0.0, 0.0]\n\t}\n\n\tBET_AVG_KEY = \"avg\"\n\tBET_NUM_KEY = \"num\"\n\tBET_TOT_KEY = \"total\"\n\n\tBET_AMOUNT_HISTORY = {\n\t\t# Streets history (current_amount: {BET_AVG_KEY: avg, BET_NUM_KEY: num, BET_TOT_KEY: total})\n\t\tSTREET_ZERO_CARD: {},\n\t\tSTREET_THREE_CARD: {},\n\t\tSTREET_FOUR_CARD: {}\n\t}\n\n\t# Hold card probability look up table\n\t# To search: self.PREFLOP_EXPECTED_VALUE[is_same_shape][lower_card_number][higher_card_number]\n\t\"\"\" TO BE FILLED HERE \"\"\"\n\tPREFLOP_EXPECTED_VALUE = {\n\t\t# Same shape (suited)\n\t\tTrue: {\n\t\t\t# Lower card number\n\t\t\t\"2\": {\n\t\t\t\t# Higher card number\n\t\t\t\t\"3\": -0.2803, \"4\": -0.2634 , \"5\": -0.2430 , \"6\": -0.2466 , \"7\": -0.2369 ,\n\t\t\t\t\"8\": -0.1946, \"9\": -0.1517 , \"T\": -0.1032 , \"J\": -0.0524 , \"Q\": 0.0034 ,\n\t\t\t\t\"K\": 0.0642 , \"A\": 0.1476\n\t\t\t},\n\t\t\t\"3\": {\n\t\t\t\t\"4\": -0.2272, \"5\": -0.2061, \"6\": -0.2093, \"7\": -0.1993, \"8\": -0.1825,\n\t\t\t\t\"9\": -0.1347, \"T\": -0.0861, \"J\": -0.0354, \"Q\": 0.0204, \"K\": 0.0811,\n\t\t\t\t\"A\": 0.1644\n\t\t\t},\n\t\t\t\"4\": {\n\t\t\t\t\"5\": -0.1709, \"6\": -0.1733, \"7\": -0.1630, \"8\": -0.1460, \"9\": -0.1228,\n\t\t\t\t\"T\": -0.0694, \"J\": -0.0186, \"Q\": 0.0371, \"K\": 0.0977, \"A\": 0.1807\n\t\t\t},\n\t\t\t\"5\": {\n\t\t\t\t\"6\": -0.1373, \"7\": -0.1265, \"8\": -0.1091, \"9\": -0.0856, \"T\": -0.0557,\n\t\t\t\t\"J\": -0.0003, \"Q\": 0.0554, \"K\": 0.1159, \"A\": 0.1985\n\t\t\t},\n\t\t\t\"6\": {\n\t\t\t\t\"7\": -0.0926, \"8\": -0.0751, \"9\": -0.0514, \"T\": -0.0212, \"Q\": 0.0723,\n\t\t\t\t\"J\": 0.0121, \"K\": 0.1328, \"A\": 0.1981\n\t\t\t},\n\t\t\t\"7\": {\n\t\t\t\t\"8\": -0.0413, \"9\": -0.0174, \"T\": 0.0128, \"J\": 0.0465, \"Q\": 0.0860,\n\t\t\t\t\"K\": 0.1580, \"A\": 0.2197\n\t\t\t},\n\t\t\t\"8\": {\n\t\t\t\t\"9\": 0.0016, \"T\": 0.0467, \"J\": 0.0803, \"Q\": 0.1204, \"K\": 0.1662,\n\t\t\t\t\"A\": 0.2389\n\t\t\t},\n\t\t\t\"9\": {\n\t\t\t\t\"T\": 0.0806, \"J\": 0.1132, \"Q\": 0.1533, \"K\": 0.1998, \"A\": 0.2556\n\t\t\t},\n\t\t\t\"T\": {\n\t\t\t\t\"J\": 0.1506, \"Q\": 0.1894, \"K\": 0.2358, \"A\": 0.2920\n\t\t\t},\n\t\t\t\"J\": { \n\t\t\t\t\"Q\": 0.2052, \"K\": 0.2513, \"A\": 0.3079\n\t\t\t},\n\t\t\t\"Q\": {\n\t\t\t\t\"K\": 0.2680, \"A\": 0.3242\n\t\t\t},\n\t\t\t\"K\": {\n\t\t\t\t\"A\": 0.3409\n\t\t\t}\n\t\t},\n\t\t# Different shape (unsuited)\n\t\tFalse: {\n\t\t\t# Lower card number\n\t\t\t\"2\": {\n\t\t\t\t# Higher card number\n\t\t\t\t\"2\": 0.0067 , \"3\": -0.3539 , \"4\": -0.3360 , \"5\": -0.3143 , \"6\": -0.3185 ,\n\t\t\t\t\"7\": -0.3083, \"8\": -0.2634 , \"9\": -0.2180 , \"T\": -0.1666 , \"J\": -0.1130 ,\n\t\t\t\t\"Q\": -0.0541, \"K\": 0.0102 , \"A\": 0.0986\n\t\t\t},\n\t\t\t\"3\": {\n\t\t\t\t\"3\": 0.0739, \"4\": -0.2971, \"5\": -0.2747, \"6\": -0.2784, \"7\": -0.2680,\n\t\t\t\t\"8\": -0.2503, \"9\": -0.1996, \"T\": -0.1481, \"J\": -0.0945, \"Q\": -0.0356,\n\t\t\t\t\"K\": -0.0285, \"A\": 0.1169\n\t\t\t},\n\t\t\t\"4\": {\n\t\t\t\t\"4\": 0.1405, \"5\": -0.2369, \"6\": -0.2398, \"7\": -0.2290, \"8\": -0.2111, \n\t\t\t\t\"9\": -0.1866, \"T\": -0.1299, \"J\": -0.0763, \"Q\": -0.0174, \"K\": 0.0465, \n\t\t\t\t\"A\": 0.1346\n\t\t\t},\n\t\t\t\"5\": {\n\t\t\t\t\"5\": 0.2065, \"6\": -0.2011, \"7\": -0.1898, \"8\": -0.1714, \"9\": -0.1466, \n\t\t\t\t\"T\": -0.1150, \"J\": -0.0564, \"Q\": 0.0024, \"K\": 0.0663, \"A\": 0.1539\n\t\t\t},\n\t\t\t\"6\": {\n\t\t\t\t\"6\": 0.2657, \"7\": -0.1535, \"8\": -0.1353, \"9\": -0.1102, \"T\": -0.0782, \n\t\t\t\t\"J\": -0.0431, \"Q\": 0.0205, \"K\": 0.0845, \"A\": 0.1536\n\t\t\t},\n\t\t\t\"7\": {\n\t\t\t\t\"7\": 0.3247, \"8\": -0.0990, \"9\": -0.0740, \"T\": -0.0418, \"J\": -0.0064, \n\t\t\t\t\"Q\": 0.0353, \"K\": 0.1037, \"A\": 0.1768 \n\t\t\t},\n\t\t\t\"8\": {\n\t\t\t\t\"8\": 0.3833, \"9\": -0.0381, \"T\": -0.0056, \"J\": 0.0298, \"Q\": 0.0720, \n\t\t\t\t\"K\": 0.1204, \"A\": 0.1975\n\t\t\t},\n\t\t\t\"9\": {\n\t\t\t\t\"9\": 0.4411, \"T\": 0.0306, \"J\": 0.0650, \"Q\": 0.1072, \"K\": 0.1562, \n\t\t\t\t\"A\": 0.2155\n\t\t\t},\n\t\t\t\"T\": {\n\t\t\t\t\"T\":0.5002, \"J\": 0.1050, \"Q\": 0.1458, \"K\": 0.1948, \"A\": 0.2544\n\t\t\t},\n\t\t\t\"J\": { \n\t\t\t\t\"J\": 0.5494, \"Q\": 0.1627, \"K\": 0.2114, \"A\": 0.2713\n\t\t\t},\n\t\t\t\"Q\": {\n\t\t\t\t\"Q\": 0.5985, \"K\": 0.2291, \"A\": 0.2886\n\t\t\t},\n\t\t\t\"K\": {\n\t\t\t\t\"K\": 0.6479, \"A\": 0.3064\n\t\t\t},\n\t\t\t\"A\": {\n\t\t\t\t\"A\": 0.7041\n\t\t\t}\n\t\t}\n\t}\n\n\n\tdef __init__(self):\n\t\tBasePokerPlayer.__init__(self)\n\t\t# To be re-initialized at the start of each game\n\t\t# Agent info\n\t\t# self.uuid is already initialized by the engine\n\t\tself.name = \"agent22\"\n\t\tself.seat_pos = 0\n\t\t# Rule info\n\t\tself.small_blind_amount = 0\n\t\tself.big_blind_amount = 0\n\t\tself.max_round = 1000\n\t\t# To be re-initialized at the start of each round\n\t\t# Round info\n\t\tself.round_count = 0\n\t\tself.big_blind_seat_pos = 0\n\t\tself.is_player_big_blind = True\n\t\tself.hole_card = []\n\t\tself.player_stack_at_start_of_round = 10000\n\t\tself.opponent_stack_at_start_of_round = 10000\n\t\tself.prev_outcome = 0\n\t\t# To be re-initialized at the start of each street\n\t\t# Street info\n\t\tself.street = self.STREET_ZERO_CARD\n\t\tself.is_start_of_street = True\n\t\tself.community_card = []\n\t\tself.player_bet_at_start_of_street = 0\n\t\tself.opponent_bet_at_start_of_street = 0\n\t\tself.bet_at_end_of_street = []\n\t\tself.remaining_raise_this_street = self.NUM_RAISE_PER_STREET\t#set to 4\n\t\tself.remaining_player_raise_this_round = self.NUM_RAISE_PER_ROUND_PER_PLAYER\t#set to 4\n\t\tself.remaining_opponent_raise_this_round = self.NUM_RAISE_PER_ROUND_PER_PLAYER\t#set to 4\n\t\tself.preflop_expected_value = 0.0\n\t\tself.winning_probability = 0.5\n\t\tself.opp_heuristic_weight = 0.0\n\t\t# To be re-initialized at the start of each update\n\t\t# Current info (will be reinitialize from game info)\n\t\tself.player_stack = 10000\n\t\tself.opponent_stack = 10000\n\t\tself.player_bet = 0 \n\t\tself.opponent_bet = 0\n\t\tself.last_action = {}\n\t\t# Pre-calculating and populating data for estimating the future raising amount\n\t\tself.avg_raise_amount_remaining_street = []\n\t\tself.pre_calculate_avg_raise_amount_remaining_street()\n\n\tdef declare_action(self, valid_actions, hole_card, round_state):\n\t\tcall_action_info = valid_actions[self.best_action(valid_actions)]\n\t\taction = call_action_info[\"action\"]\n\t\treturn action\t# action returned here is sent to the poker engine\n\n\tdef receive_game_start_message(self, game_info):\n\t\t# initialize game infomation\n\t\tself.small_blind_amount = game_info[\"rule\"][\"small_blind_amount\"]\n\t\tself.big_blind_amount = 2 * self.small_blind_amount\n\t\tself.max_round = game_info[\"rule\"][\"max_round\"]\n\t\t# initialize personal data\n\t\tfor i in range(0, len(game_info[\"seats\"])):\n\t\t\tif game_info[\"seats\"][i][\"uuid\"] == self.uuid:\n\t\t\t\tself.seat_pos = i\n\t\t\t\tself.name = game_info[\"seats\"][i][\"name\"]\n\t\t# initialize money stack\n\t\tself.player_stack_at_start_of_round = game_info[\"rule\"][\"initial_stack\"]\n\t\tself.opponent_stack_at_start_of_round = game_info[\"rule\"][\"initial_stack\"]\n\t\tself.player_stack = self.player_stack_at_start_of_round\n\t\tself.opponent_stack = self.opponent_stack_at_start_of_round\n\t\t# DEBUG\n\t\t# pprint.pprint(game_info)\n\t\t# print(\"-----GAME START-----\")\n\t\t# print(\"name: \" + str(self.name))\n\t\t# print(\"uuid: \" + str(self.uuid))\n\t\t# print(\"small_blind_amount: \" + str(self.small_blind_amount))\n\t\t# print(\"big_blind_amount: \" + str(self.big_blind_amount))\n\t\t# print(\"max_round: \" + str(self.max_round))\n\t\t# print(\"seat_pos: \" + str(self.seat_pos))\n\t\t# print(\"--------------------\")\n\t\t# pass\n\n\tdef receive_round_start_message(self, round_count, hole_card, seats):\n\t\t# Initialize round info\n\t\tself.round_count = round_count\n\t\tself.hole_card = list(hole_card)\n\t\tself.remaining_player_raise_this_round = self.NUM_RAISE_PER_ROUND_PER_PLAYER\n\t\tself.remaining_opponent_raise_this_round = self.NUM_RAISE_PER_ROUND_PER_PLAYER\n\t\t# Initialize last action\n\t\tself.last_action = {}\n\t\t# Initialize bet amount at end of street\n\t\tself.bet_at_end_of_street = []\n\t\t# Initialize for preflop street\n\t\tself.player_bet_at_start_of_street = 0\n\t\tself.opponent_bet_at_start_of_street = 0\n\t\tself.remaining_raise_this_street = self.NUM_RAISE_PER_STREET\n\t\t# Update stack and initialize small/big blind bet\n\t\tfor i in range(0, len(seats)):\n\t\t\tif seats[i][\"uuid\"] == self.uuid:\n\t\t\t\tself.player_stack = seats[i][\"stack\"]\n\t\t\t\tself.player_bet = self.player_stack_at_start_of_round - self.player_stack\n\t\t\t\t# big blind/all in is considered as a raise\n\t\t\t\tif self.player_bet > self.small_blind_amount:\n\t\t\t\t\tself.remaining_raise_this_street -= 1\n\t\t\t\t\tself.big_blind_seat_pos = i\n\t\t\telse:\n\t\t\t\tself.opponent_stack = seats[i][\"stack\"]\n\t\t\t\tself.opponent_bet = self.opponent_stack_at_start_of_round - self.opponent_stack\n\t\t\t\t# big blind/all in is considered as a raise\n\t\t\t\tif self.opponent_bet > self.small_blind_amount:\n\t\t\t\t\tself.remaining_raise_this_street -= 1\n\t\t\t\t\tself.big_blind_seat_pos = i\n\t\tself.is_player_big_blind = (self.seat_pos == self.big_blind_seat_pos)\n\t\t# DEBUG\n\t\t# pprint.pprint(hole_card)\n\t\t# pprint.pprint(seats)\n\t\t# print(\"-----ROUND START-----\")\n\t\t# print(\"round_count: \" + str(self.round_count))\n\t\t# print(\"big_blind_seat_pos: \" + str(self.big_blind_seat_pos))\n\t\t# print(\"hole_card: \")\n\t\t# pprint.pprint(self.hole_card)\n\t\t# print(\"player_stack_at_start_of_round: \" + str(self.player_stack_at_start_of_round))\n\t\t# print(\"opponent_stack_at_start_of_round: \" + str(self.opponent_stack_at_start_of_round))\n\t\t# print(\"---------------------\")\n\t\t# pass\n\n\tdef receive_street_start_message(self, street, round_state):\n\t\t# Initialize street info\n\t\tself.street = street\n\t\tself.is_start_of_street = True\n\t\tself.community_card = list(round_state[\"community_card\"])\n\t\t# Initialize for non-preflop street\n\t\tif self.street != self.STREET_ZERO_CARD:\n\t\t\tself.remaining_raise_this_street = self.NUM_RAISE_PER_STREET\n\t\t\tself.player_bet_at_start_of_street = self.player_bet\n\t\t\tself.opponent_bet_at_start_of_street = self.opponent_bet\n\t\t\tself.bet_at_end_of_street.append(self.player_bet_at_start_of_street)\n\t\t# Update stacks\n\t\tfor i in range(0, len(round_state[\"seats\"])):\n\t\t\tif round_state[\"seats\"][i][\"uuid\"] == self.uuid:\n\t\t\t\tself.player_stack = round_state[\"seats\"][i][\"stack\"]\n\t\t\telse:\n\t\t\t\tself.opponent_stack = round_state[\"seats\"][i][\"stack\"]\n\t\t# Recalculate winning probability\n\t\tself.re_calculate_probability()\n\t\t# DEBUG\n\t\t# pprint.pprint(round_state)\n\t\t# print(\"-----STREET START-----\")\n\t\t# print(\"street: \" + str(self.street))\n\t\t# print(\"raise_amount: \" + str(self.raise_amount(self.street)))\n\t\t# print(\"avg_raise_amount_remaining_street: \" \n\t\t#\t\t+ str(self.avg_raise_amount_remaining_street[self.STREET_INDEX_DICT[self.street]]))\n\t\t# pprint.pprint(self.community_card)\n\t\t# print(\"player_bet_at_start_of_street: \" + str(self.player_bet_at_start_of_street))\n\t\t# print(\"opponent_bet_at_start_of_street: \" + str(self.opponent_bet_at_start_of_street))\n\t\t# print(\"player_bet: \" + str(self.player_bet))\n\t\t# print(\"opponent_bet: \" + str(self.opponent_bet))\n\t\t# print(\"----------------------\")\n\t\t# pass\n\n\t# ENGINE BUG (RARE): Poker game might send update message 2 times\n\t# In order to update number of remaining raise while dealing with this bug,\n\t# we make use of the fact that a player cannot preform 2 identical raise action \n\t# subsequently withour letting the opponent do anything\n\tdef receive_game_update_message(self, action, round_state):\n\t\t# Check for repeating bug\n\t\tif ((action != self.last_action) or (self.is_start_of_street == True)):\n\t\t\t# Update bet amount\n\t\t\tif action[\"player_uuid\"] == self.uuid:\n\t\t\t\tif action[\"action\"] != self.FOLD:\n\t\t\t\t\tself.player_bet = self.player_bet_at_start_of_street + action[\"amount\"]\n\t\t\t\tif action[\"action\"] == self.RAISE:\n\t\t\t\t\tself.remaining_raise_this_street -= 1\n\t\t\t\t\tself.remaining_player_raise_this_round -= 1\n\t\t\telse:\n\t\t\t\tif action[\"action\"] != self.FOLD:\n\t\t\t\t\tself.opponent_bet = self.opponent_bet_at_start_of_street + action[\"amount\"]\n\t\t\t\tif action[\"action\"] == self.RAISE:\n\t\t\t\t\tself.remaining_raise_this_street -= 1\n\t\t\t\t\tself.remaining_opponent_raise_this_round -= 1\n\t\t\t# Update stacks\n\t\t\tfor i in range(0, len(round_state[\"seats\"])):\n\t\t\t\tif round_state[\"seats\"][i][\"uuid\"] == self.uuid:\n\t\t\t\t\tself.player_stack = round_state[\"seats\"][i][\"stack\"]\n\t\t\t\telse:\n\t\t\t\t\tself.opponent_stack = round_state[\"seats\"][i][\"stack\"]\n\t\t\t# An action has been preformed since the start of the street\n\t\t\tself.is_start_of_street = False\n\t\t# Update last action\n\t\tself.last_action = dict(action)\n\t\t# DEBUG\n\t\t# pprint.pprint(action)\n\t\t# pprint.pprint(round_state)\n\t\t# print(\"-----GAME UPDATE-----\")\n\t\t# print(\"player_bet: \" + str(self.player_bet))\n\t\t# print(\"opponent_bet: \" + str(self.opponent_bet))\n\t\t# print(str(self.player_stack))\n\t\t# print(\"opponent_stack: \" + str(self.opponent_stack))\n\t\t# print(\"remaining_raise_this_street: \" + str(self.remaining_raise_this_street))\n\t\t# print(\"remaining_player_raise_this_round: \" + str(self.remaining_player_raise_this_round))\n\t\t# print(\"remaining_opponent_raise_this_round: \" + str(self.remaining_opponent_raise_this_round))\n\t\t# print(\"---------------------\")\n\t\t# pass\n\n\tdef receive_round_result_message(self, winners, hand_info, round_state):\n\t\t# Update bet amount history\n\t\tif (self.last_action[\"action\"] != self.FOLD):\n\t\t\tfinal_bet = self.player_bet\n\t\t\tif (len(self.bet_at_end_of_street) == (self.NUM_STREET_PER_ROUND - 1)):\n\t\t\t\tfor i in range(0, self.NUM_STREET_PER_ROUND - 1):\n\t\t\t\t\tstreet = self.LIST_OF_STREET[i]\n\t\t\t\t\tbet = self.bet_at_end_of_street[i]\n\t\t\t\t\tif bet in self.BET_AMOUNT_HISTORY[street].keys():\n\t\t\t\t\t\tself.BET_AMOUNT_HISTORY[street][bet][self.BET_TOT_KEY] += final_bet\n\t\t\t\t\t\tself.BET_AMOUNT_HISTORY[street][bet][self.BET_NUM_KEY] += 1\n\t\t\t\t\t\tself.BET_AMOUNT_HISTORY[street][bet][self.BET_AVG_KEY] = (\n\t\t\t\t\t\t\tself.BET_AMOUNT_HISTORY[street][bet][self.BET_TOT_KEY]\n\t\t\t\t\t\t\t/ float(self.BET_AMOUNT_HISTORY[street][bet][self.BET_NUM_KEY]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.BET_AMOUNT_HISTORY[street][bet] = {\n\t\t\t\t\t\t\tself.BET_AVG_KEY: final_bet,\n\t\t\t\t\t\t\tself.BET_NUM_KEY: 1,\n\t\t\t\t\t\t\tself.BET_TOT_KEY: final_bet\n\t\t\t\t\t\t}\n\n\t\t# Update stacks\n\t\tfor i in range(0, len(round_state[\"seats\"])):\n\t\t\tif round_state[\"seats\"][i][\"uuid\"] == self.uuid:\n\t\t\t\tself.player_stack = round_state[\"seats\"][i][\"stack\"]\n\t\t\telse:\n\t\t\t\tself.opponent_stack = round_state[\"seats\"][i][\"stack\"]\n\n\t\tself.prev_outcome = self.player_stack - self.player_stack_at_start_of_round\n\t\tself.player_stack_at_start_of_round = self.player_stack\n\t\tself.opponent_stack_at_start_of_round = self.opponent_stack\n\n\t\thas_won = (winners[0][\"name\"] == self.name)\n\t\t\n\t\t# We can only evaluate how powerful the opponent is given their action if we do not fold\n\t\tif not ((has_won == False) and (self.last_action[\"action\"] == self.FOLD)):\n\t\t\tnum_raises = [0, 0, 0, 0]\n\t\t\tfor street in round_state[\"action_histories\"].keys():\n\t\t\t\t# Calculate number of raise per street\n\t\t\t\tfor turn in round_state[\"action_histories\"][street]:\n\t\t\t\t\tif turn[\"action\"] == \"RAISE\" and turn[\"uuid\"] != self.uuid:\n\t\t\t\t\t\tnum_raises[self.STREET_INDEX_DICT[street]] += 1\n\n\t\t\t# Calculate number of raise by the end of certain street\n\t\t\tfor i in range(self.NUM_STREET_PER_ROUND - 1, 0, -1):\n\t\t\t\tfor j in range(0, i):\n\t\t\t\t\tnum_raises[i] += num_raises[j]\n\n\t\t\tfor street in self.LIST_OF_STREET:\n\t\t\t\tnum_opp_raises = num_raises[self.STREET_INDEX_DICT[street]]\n\t\t\t\tself.RAISE_HISTORY[has_won][self.is_player_big_blind][street][num_opp_raises] += 1\n\n\t\t\t# Note: it can be mathematically proven that we only need to update \n\t\t\t# entries with number of raises by the end of each street of this round\n\t\t\tfor street in self.WIN_RATES_FROM_RAISE_HISTORY.keys():\n\t\t\t\tif street in round_state[\"action_histories\"].keys():\n\t\t\t\t\traises = num_raises[self.STREET_INDEX_DICT[street]]\n\t\t\t\t\tself.WIN_RATES_FROM_RAISE_HISTORY[street][raises] = (\n\t\t\t\t\t\tself.win_chance_from_raise_history(street, raises))\n\n\t\t# DEBUG\n\t\t# print(self.RAISE_HISTORY)\n\t\t# pprint.pprint(winners)\n\t\t# pprint.pprint(hand_info)\n\t\t# pprint.pprint(round_state)\n\t\t# print(\"-----ROUND RESULT-----\")\n\t\tprint(str(self.player_stack - 10000))\n\t\t# print(\"opponent_stack: \" + str(self.opponent_stack))\n\t\t# print(\"----------------------\")\n\t\t# pass\n\n\tdef raise_amount(self, street):\n\t\t# Default result: RAISE_AMOUNT_PREFLOP\n\t\tresult = self.RAISE_AMOUNT_DICT.get(street, self.RAISE_AMOUNT_PREFLOP)\n\t\treturn result\n\n\t# First call of the heuristic minimax search, return best action index\n\t# Special case: call in the very first turn of the street will not end the street\n\tdef best_action(self, valid_actions):\n\t\tbet_diff = self.opponent_bet - self.player_bet\n\t\t# Initialize with fold action\n\t\tbest_outcome = (-1) * self.player_bet\n\t\tbest_action_index = self.FOLD_INDEX\n\t\t# Check call action\n\t\tcall_outcome = 0\n\t\tif self.is_start_of_street == True:\n\t\t\tcall_outcome = self.heuristic_minimax(\t#player calls, opponent turn\n\t\t\t\t\t\t\t\tself.OPPONENT_TURN,\n\t\t\t\t\t\t\t\tself.opponent_bet,\n\t\t\t\t\t\t\t\tself.opponent_bet,\n\t\t\t\t\t\t\t\tself.player_stack - bet_diff,\n\t\t\t\t\t\t\t\tself.opponent_stack,\n\t\t\t\t\t\t\t\tself.remaining_player_raise_this_round,\n\t\t\t\t\t\t\t\tself.remaining_opponent_raise_this_round,\n\t\t\t\t\t\t\t\tself.remaining_raise_this_street,\n\t\t\t\t\t\t\t\tbest_outcome,\n\t\t\t\t\t\t\t\tself.POS_INF)\n\t\telse:\n\t\t\tcall_outcome = self.expected_outcome(\t#player calls, street ends\n\t\t\t\t\t\t\t\tself.opponent_bet,\n\t\t\t\t\t\t\t\tself.player_stack - bet_diff,\n\t\t\t\t\t\t\t\tself.opponent_stack,\n\t\t\t\t\t\t\t\tself.remaining_player_raise_this_round,\n\t\t\t\t\t\t\t\tself.remaining_opponent_raise_this_round)\n\t\tif call_outcome >= best_outcome:\n\t\t\tbest_outcome = call_outcome\n\t\t\tbest_action_index = self.CALL_INDEX\n\t\t# Check raise action\n\t\tif len(valid_actions) == 3:\n\t\t\traise_outcome = 0\n\t\t\traise_amount_this_street = self.raise_amount(self.street)\n\t\t\tplayer_has_enough_money = (self.player_stack >= (bet_diff + raise_amount_this_street))\t#flag\n\t\t\topponent_has_enough_money = (self.opponent_stack >= raise_amount_this_street)\t#flag\n\t\t\tif (player_has_enough_money and opponent_has_enough_money):\n\t\t\t\traise_outcome = self.heuristic_minimax(\t#player raises\n\t\t\t\t\t\t\t\t\tself.OPPONENT_TURN,\n\t\t\t\t\t\t\t\t\tself.opponent_bet + raise_amount_this_street,\n\t\t\t\t\t\t\t\t\tself.opponent_bet,\n\t\t\t\t\t\t\t\t\tself.player_stack - bet_diff - raise_amount_this_street,\n\t\t\t\t\t\t\t\t\tself.opponent_stack,\n\t\t\t\t\t\t\t\t\tself.remaining_player_raise_this_round - 1,\n\t\t\t\t\t\t\t\t\tself.remaining_opponent_raise_this_round,\n\t\t\t\t\t\t\t\t\tself.remaining_raise_this_street - 1,\n\t\t\t\t\t\t\t\t\tbest_outcome,\n\t\t\t\t\t\t\t\t\tself.POS_INF)\n\t\t\telse:\n\t\t\t\tlast_raise_amount = min(self.player_stack - bet_diff, self.opponent_stack)\n\t\t\t\traise_outcome = self.heuristic_minimax(\t#player raises to the highest possible remaining amount\n\t\t\t\t\t\t\t\t\tself.OPPONENT_TURN,\n\t\t\t\t\t\t\t\t\tself.opponent_bet + last_raise_amount,\n\t\t\t\t\t\t\t\t\tself.opponent_bet,\n\t\t\t\t\t\t\t\t\tself.player_stack - bet_diff - last_raise_amount,\n\t\t\t\t\t\t\t\t\tself.opponent_stack,\n\t\t\t\t\t\t\t\t\tself.remaining_player_raise_this_round - 1,\n\t\t\t\t\t\t\t\t\tself.remaining_opponent_raise_this_round,\n\t\t\t\t\t\t\t\t\tself.remaining_raise_this_street - 1,\n\t\t\t\t\t\t\t\t\tbest_outcome,\n\t\t\t\t\t\t\t\t\tself.POS_INF)\n\t\t\tif raise_outcome >= best_outcome:\n\t\t\t\tbest_outcome = raise_outcome\n\t\t\t\tbest_action_index = self.RAISE_INDEX\n\t\treturn best_action_index\n\n\t# Alpha-beta prunning heuristic minimax algorithm\n\t# Special case in the first turn of the street is already checked in best_action\n\tdef heuristic_minimax(self,\n\t\t\t\t\t\tplayer_turn,\n\t\t\t\t\t\tplayer_bet,\n\t\t\t\t\t\topponent_bet,\n\t\t\t\t\t\tplayer_stack,\n\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\tremaining_player_raise_this_round,\n\t\t\t\t\t\tremaining_opponent_raise_this_round,\n\t\t\t\t\t\tremaining_raise_this_street,\n\t\t\t\t\t\talpha,\n\t\t\t\t\t\tbeta\n\t\t\t\t\t\t):\n\t\tbest_outcome = 0\n\t\tcall_outcome = 0\n\t\traise_outcome = 0\n\t\traise_amount_this_street = self.raise_amount(self.street)\n\t\t# Max player\n\t\tif player_turn == self.PLAYER_TURN: \t# if it is player's turn\n\t\t\tbest_outcome = (-1) * player_bet \t# initialize with folding\n\t\t\tif (best_outcome >= beta):\n\t\t\t\treturn best_outcome\t\t\t\t# prunning\n\t\t\tbet_diff = opponent_bet - player_bet\n\t\t\t# Check call outcome\n\t\t\tcall_outcome = self.expected_outcome(\n\t\t\t\t\t\t\t\topponent_bet,\n\t\t\t\t\t\t\t\tplayer_stack - bet_diff,\n\t\t\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\t\t\tremaining_player_raise_this_round,\n\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round)\n\t\t\tif call_outcome >= best_outcome:\n\t\t\t\tbest_outcome = call_outcome\n\t\t\tif (best_outcome >= beta):\n\t\t\t\treturn best_outcome\t\t\t\t# prunning\n\t\t\t# Check raise outcome\n\t\t\tif remaining_raise_this_street > 0:\t#make sure there is eligible number of raises left in street\n\t\t\t\tif remaining_player_raise_this_round > 0:\t#check if player is still eligible to raise\n\t\t\t\t\tif ((player_stack > bet_diff) and (opponent_stack > 0)):\n\t\t\t\t\t\tplayer_has_enough_money = (player_stack >= (bet_diff + raise_amount_this_street))\n\t\t\t\t\t\topponent_has_enough_money = (opponent_stack >= raise_amount_this_street)\n\t\t\t\t\t\tif (player_has_enough_money and opponent_has_enough_money):\n\t\t\t\t\t\t\traise_outcome = self.heuristic_minimax(\t#recursive miniMax\n\t\t\t\t\t\t\t\t\t\t\t\tself.OPPONENT_TURN,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_bet + raise_amount_this_street,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_bet,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_stack - bet_diff - raise_amount_this_street,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_player_raise_this_round - 1,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_raise_this_street - 1,\n\t\t\t\t\t\t\t\t\t\t\t\tmax(alpha, best_outcome),\n\t\t\t\t\t\t\t\t\t\t\t\tbeta)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlast_raise_amount = min(player_stack - bet_diff, opponent_stack)\n\t\t\t\t\t\t\traise_outcome = self.heuristic_minimax(\t#recursive miniMax\n\t\t\t\t\t\t\t\t\t\t\t\tself.OPPONENT_TURN,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_bet + last_raise_amount,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_bet,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_stack - bet_diff - last_raise_amount,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_player_raise_this_round - 1,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_raise_this_street - 1,\n\t\t\t\t\t\t\t\t\t\t\t\tmax(alpha, best_outcome),\n\t\t\t\t\t\t\t\t\t\t\t\tbeta)\n\t\t\t\t\t\tif raise_outcome >= best_outcome:\n\t\t\t\t\t\t\tbest_outcome = raise_outcome\n\t\t# Min player\n\t\telse:\n\t\t\tbest_outcome = opponent_bet\n\t\t\tif (best_outcome <= alpha):\n\t\t\t\treturn best_outcome\t\t\t\t# prunning\n\t\t\tbet_diff = player_bet - opponent_bet\n\t\t\t# Check call outcome\n\t\t\tcall_outcome = self.expected_outcome(\n\t\t\t\t\t\t\t\tplayer_bet,\n\t\t\t\t\t\t\t\tplayer_stack,\n\t\t\t\t\t\t\t\topponent_stack - bet_diff,\n\t\t\t\t\t\t\t\tremaining_player_raise_this_round,\n\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round)\n\t\t\tif call_outcome <= best_outcome:\n\t\t\t\tbest_outcome = call_outcome\n\t\t\tif (best_outcome <= alpha):\n\t\t\t\treturn best_outcome\t\t\t\t# prunning\n\t\t\t# Check raise outcome\n\t\t\tif remaining_raise_this_street > 0:\n\t\t\t\tif remaining_opponent_raise_this_round > 0:\n\t\t\t\t\tif ((opponent_stack > bet_diff) and (player_stack > 0)):\n\t\t\t\t\t\topponent_has_enough_money = (opponent_stack >= (bet_diff + raise_amount_this_street))\n\t\t\t\t\t\tplayer_has_enough_money = (player_stack >= raise_amount_this_street)\n\t\t\t\t\t\tif (player_has_enough_money and opponent_has_enough_money):\n\t\t\t\t\t\t\traise_outcome = self.heuristic_minimax(\n\t\t\t\t\t\t\t\t\t\t\t\tself.PLAYER_TURN,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_bet,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_bet + raise_amount_this_street,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_stack,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_stack - bet_diff - raise_amount_this_street,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_player_raise_this_round,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round - 1,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_raise_this_street - 1,\n\t\t\t\t\t\t\t\t\t\t\t\talpha,\n\t\t\t\t\t\t\t\t\t\t\t\tmin(beta, best_outcome))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlast_raise_amount = min(opponent_stack - bet_diff, player_stack)\n\t\t\t\t\t\t\traise_outcome = self.heuristic_minimax(\n\t\t\t\t\t\t\t\t\t\t\t\tself.PLAYER_TURN,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_bet,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_bet + last_raise_amount,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_stack,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_stack - bet_diff - last_raise_amount,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_player_raise_this_round,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round - 1,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_raise_this_street - 1,\n\t\t\t\t\t\t\t\t\t\t\t\talpha,\n\t\t\t\t\t\t\t\t\t\t\t\tmin(beta, best_outcome))\n\t\t\t\t\t\tif raise_outcome <= best_outcome:\n\t\t\t\t\t\t\tbest_outcome = raise_outcome\n\t\treturn best_outcome\n\n\tdef expected_outcome(self, \n\t\t\t\t\t\tbet_amount,\n\t\t\t\t\t\tplayer_stack, \n\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\tremaining_player_raise_this_round, \n\t\t\t\t\t\tremaining_opponent_raise_this_round):\n\t\tnum_opponent_raise = self.NUM_RAISE_PER_ROUND_PER_PLAYER - remaining_opponent_raise_this_round\n\t\tif self.street == self.STREET_FIVE_CARD:\n\t\t\texpected_bet = bet_amount\n\t\telse:\n\t\t\tif ((self.round_count > self.MIN_NUM_DATA_COLLECTED)\n\t\t\t\tand (bet_amount in self.BET_AMOUNT_HISTORY[self.street].keys())):\n\t\t\t\texpected_bet = self.BET_AMOUNT_HISTORY[self.street][bet_amount][self.BET_AVG_KEY]\n\t\t\telse:\n\t\t\t\texpected_bet = self.naive_expected_bet_amount(\n\t\t\t\t\t\t\t\t\tbet_amount, \n\t\t\t\t\t\t\t\t\tplayer_stack, \n\t\t\t\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\t\t\t\tremaining_player_raise_this_round,\n\t\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round)\n\t\texpected_value = self.evaluate_value(expected_bet, num_opponent_raise)\n\t\treturn expected_value\n\n\tdef naive_expected_bet_amount(self, \n\t\t\t\t\t\t\t\tbet_amount, \n\t\t\t\t\t\t\t\tplayer_stack, \n\t\t\t\t\t\t\t\topponent_stack, \n\t\t\t\t\t\t\t\tremaining_player_raise_this_round, \n\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round):\n\t\tnum_player_raise = self.NUM_RAISE_PER_ROUND_PER_PLAYER - remaining_player_raise_this_round\n\t\tnum_opponent_raise = self.NUM_RAISE_PER_ROUND_PER_PLAYER - remaining_opponent_raise_this_round\n\t\tstreet_index = self.STREET_INDEX_DICT[self.street]\n\t\tnum_street = street_index + 1\n\t\tnum_remaining_street = self.NUM_STREET_PER_ROUND - num_street\n\t\tavg_raise_amount_remaining_street = self.avg_raise_amount_remaining_street[street_index]\n\t\texpected_num_player_future_raise = min(remaining_player_raise_this_round, \n\t\t\t\t\t\t\t\t\t\t\t\tfloat(num_player_raise) / num_street * num_remaining_street)\n\t\texpected_num_opponent_future_raise = min(remaining_opponent_raise_this_round, \n\t\t\t\t\t\t\t\t\t\t\t\tfloat(num_opponent_raise) / num_street * num_remaining_street)\n\t\tremaining_possible_total_raise = num_remaining_street * self.NUM_RAISE_PER_STREET\n\t\texpected_increase_bet = min(player_stack, \n\t\t\t\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\t\t\t\t((expected_num_player_future_raise + expected_num_opponent_future_raise) \n\t\t\t\t\t\t\t\t\t\t* avg_raise_amount_remaining_street),\n\t\t\t\t\t\t\t\t\tremaining_possible_total_raise * avg_raise_amount_remaining_street)\n\t\texpected_bet = bet_amount + expected_increase_bet\n\t\treturn expected_bet\n\n\tdef evaluate_value(self, bet_amount, num_opponent_raise):\n\t\tfirst_card = self.hole_card[0]\n\t\tsecond_card = self.hole_card[1]\n\t\tvalue = 0\n\t\tif self.street == self.STREET_ZERO_CARD:\n\t\t\tcard_heuristic = bet_amount * self.preflop_expected_value\n\t\t\t# card_heuristic = bet_amount * (2 * self.winning_probability - 1)\n\t\telse:\t#not in PREFLOP\n\t\t\t# E = P(W) * B - (1 - P(W)) * B\n\t\t\tcard_heuristic = bet_amount * (2 * self.winning_probability - 1)\n\t\twin_rates_from_raise_history = self.WIN_RATES_FROM_RAISE_HISTORY[self.street][num_opponent_raise]\n\t\topp_heuristic = bet_amount * (2 * win_rates_from_raise_history - 1)\n\t\tif ((win_rates_from_raise_history >= 0.0) \n\t\t\tand (win_rates_from_raise_history <= 1.0) \n\t\t\tand (self.round_count > self.MIN_NUM_DATA_COLLECTED)):\n\t\t\tvalue = (1 - self.opp_heuristic_weight) * card_heuristic + self.opp_heuristic_weight * opp_heuristic\n\t\telse:\n\t\t\tvalue = card_heuristic\n\t\treturn value\n\n\tdef re_calculate_probability(self):\n\t\t# if in PREFLOP, we check against expected value and reverse the equation \n\t\tif self.street == self.STREET_ZERO_CARD:\n\t\t\tfirst_card = self.hole_card[0]\n\t\t\tsecond_card = self.hole_card[1]\n\t\t\t\n\t\t\tif self.CARD_NUM_DICT[first_card[1]] > self.CARD_NUM_DICT[second_card[1]]: #check number\n\t\t\t\tlower_card_number = second_card[1]\n\t\t\t\thigher_card_number = first_card[1]\n\t\t\telse:\n\t\t\t\tlower_card_number = first_card[1]\n\t\t\t\thigher_card_number = second_card[1]\n\t\t\tif first_card[0] == second_card[0]: #check same shape\n\t\t\t\tis_same_shape = True\n\t\t\telse:\n\t\t\t\tis_same_shape = False\n\t\t\t#reverse engineer equation, 2*Pr(win) = (Expected Value Per Bet) + 1\n\t\t\tself.preflop_expected_value = (\n\t\t\t\tself.PREFLOP_EXPECTED_VALUE[is_same_shape][lower_card_number][higher_card_number])\n\t\t\tself.winning_probability = (self.preflop_expected_value + 1) / 2\n\n\t\t#when not in PREFLOP\n\t\telse:\n\t\t\tself.winning_probability = estimate_hole_card_win_rate(\n\t\t\t\t\t\t\t\t\t\t\tnb_simulation = self.NB_SIMULATION,\n\t\t\t\t\t\t\t\t\t\t\tnb_player = 2,\n\t\t\t\t\t\t\t\t\t\t\thole_card = gen_cards(list(self.hole_card)),\n\t\t\t\t\t\t\t\t\t\t\tcommunity_card = gen_cards(list(self.community_card)))\n\n\tdef pre_calculate_avg_raise_amount_remaining_street(self):\n\t\ttotal_raise_amount_value = 0\n\t\tfor i in range(0, self.NUM_STREET_PER_ROUND):\t#calculating raise amount sum of all 4 street\n\t\t\ttotal_raise_amount_value += self.RAISE_AMOUNT_DICT[i]\n\t\tnum_remaining_street = self.NUM_STREET_PER_ROUND\t#set to 4\n\t\tfor i in range(0, self.NUM_STREET_PER_ROUND):\t#calculate avg remaining raise for each street\n\t\t\ttotal_raise_amount_value -= self.RAISE_AMOUNT_DICT[i]\n\t\t\tnum_remaining_street -= 1\n\t\t\tavg_raise_value = 0\n\t\t\tif num_remaining_street != 0:\n\t\t\t\tavg_raise_value = float(total_raise_amount_value) / num_remaining_street\n\t\t\tself.avg_raise_amount_remaining_street.append(avg_raise_value)\n\n\tdef rounds_won(self, street_name):\n\t\twon = 0\n\t\tfor x in self.RAISE_HISTORY[True][self.is_player_big_blind][street_name]:\n\t\t\twon += x\n\t\treturn won\n\n\tdef rounds_lost(self, street_name):\n\t\tlost = 0\n\t\tfor x in self.RAISE_HISTORY[False][self.is_player_big_blind][street_name]:\n\t\t\tlost += x\n\t\treturn lost\n\n\tdef rounds_with_specific_raises(self, street_name, num_raises):\n\t\tresult = (self.RAISE_HISTORY[False][self.is_player_big_blind][street_name][num_raises] \n\t\t\t\t\t+ self.RAISE_HISTORY[True][self.is_player_big_blind][street_name][num_raises])\n\t\treturn result\n \n\t# Use definition of conditional probability to calculate prob of winning given the num of raises made by opponent\n\t# Return a probability between 0.0 and 1.0 if success, and return -1.0 if cannot compute\n\tdef win_chance_from_raise_history(self, street, num_raises):\n\t\tnum_wins = self.rounds_won(street)\n\t\tnum_lost = self.rounds_lost(street)\n\t\tprob_player_win_given_opp_raises = -1.0\t#initialize to card winning probability in case cannot compute\n\t\tif (num_wins + num_lost != 0):\n\t\t\tprob_cur_opp_raises_and_player_win = (self.RAISE_HISTORY[True][self.is_player_big_blind][street][num_raises] \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t/ float(num_wins + num_lost))\n\t\t\tprob_opp_raises = self.rounds_with_specific_raises(street, num_raises) / float(num_wins + num_lost)\n\t\t\tif prob_opp_raises != 0:\n\t\t\t\tprob_player_win_given_opp_raises = prob_cur_opp_raises_and_player_win / prob_opp_raises\n\t\treturn prob_player_win_given_opp_raises\n\ndef setup_ai():\n\treturn Honest22Player()\n","sub_path":"honest22player.py","file_name":"honest22player.py","file_ext":"py","file_size_in_byte":32960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"306589208","text":"#!/usr/bin/env python3\n\n\"\"\"\nCreated on 14 Mar 2019\n\n@author: Bruno Beloff (bruno.beloff@southcoastscience.com)\n\nsource repo: scs_airnow\n\nDESCRIPTION\nThe airnow_downloader utility is used to\n\nSYNOPSIS\nairnow_downloader.py -t ORG GROUP LOC TOPIC -s START -e END [-d DIR] [-v]\n\nEXAMPLES\n./airnow_downloader.py -t unep ethiopia 1 particulates -s 2019-03-20T00:00:00Z -e 2019-03-21T00:00:00Z -d data -v\n\nSEE ALSO\nscs_analysis/aqcsv_mapper\nscs_analysis/aqcsv_task_manager\n\"\"\"\n\nimport os\nimport sys\n\nfrom scs_airnow.cmd.cmd_airnow_downloader import CmdAirNowDownloader\nfrom scs_airnow.helper.airnow_availability import AirNowAvailability\n\nfrom scs_core.aqcsv.connector.airnow_mapping_task import AirNowMappingTaskList\n\nfrom scs_core.data.datum import Datum\n\nfrom scs_core.sys.filesystem import Filesystem\nfrom scs_core.sys.subprocess import Pipe\n\nfrom scs_host.sys.host import Host\n\n\n# TODO: fix the issue of locality for external scripts\n\n# --------------------------------------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n\n # ----------------------------------------------------------------------------------------------------------------\n # cmd...\n\n cmd = CmdAirNowDownloader()\n\n if not cmd.is_valid_start():\n print(\"airnow_downloader: invalid format for start datetime.\", file=sys.stderr)\n exit(2)\n\n if not cmd.is_valid_end():\n print(\"airnow_downloader: invalid format for end datetime.\", file=sys.stderr)\n exit(2)\n\n if not Datum.is_numeric(cmd.task_loc):\n print(\"airnow_downloader: the loc value %s should be an integer.\" % cmd.task_loc, file=sys.stderr)\n exit(2)\n\n if not cmd.is_valid():\n cmd.print_help(sys.stderr)\n exit(2)\n\n if cmd.verbose:\n print(\"airnow_downloader: %s\" % cmd, file=sys.stderr)\n sys.stderr.flush()\n\n\n try:\n # ------------------------------------------------------------------------------------------------------------\n # resources...\n\n # MappingTask...\n tasks = AirNowMappingTaskList.load(Host)\n task = tasks.item((cmd.task_org, cmd.task_group, int(cmd.task_loc), cmd.task_topic))\n\n if task is None:\n print(\"airnow_downloader: task not found.\", file=sys.stderr)\n exit(1)\n\n if cmd.verbose:\n print(\"airnow_downloader: %s\" % task, file=sys.stderr)\n sys.stderr.flush()\n\n # files...\n task_prefix = task.file_prefix()\n dir_name = task.site_code if cmd.dir is None else os.path.join(cmd.dir, task.site_code)\n file_prefix = task_prefix if cmd.file_prefix is None else cmd.file_prefix\n\n file_path = os.path.join(dir_name, file_prefix)\n\n if cmd.verbose:\n print(\"airnow_downloader: file group: %s\" % file_path, file=sys.stderr)\n sys.stderr.flush()\n\n\n # ------------------------------------------------------------------------------------------------------------\n # validation...\n\n # datetimes...\n start = cmd.start.as_iso8601()\n end = cmd.end.as_iso8601()\n\n # data availability...\n if cmd.check:\n result = AirNowAvailability.check(\"airnow_downloader\", task, cmd.end, cmd.verbose)\n\n if result != AirNowAvailability.OK:\n exit(result)\n\n\n # ------------------------------------------------------------------------------------------------------------\n # run: directories...\n\n if cmd.verbose:\n print(\"airnow_downloader: making directories...\", end='', file=sys.stderr)\n sys.stderr.flush()\n\n Filesystem.mkdir(dir_name)\n\n if cmd.verbose:\n print(\"done.\", file=sys.stderr)\n\n\n # ------------------------------------------------------------------------------------------------------------\n # run: env download...\n\n env_filename = file_path + '-' + task.topic + '.csv'\n\n if cmd.verbose:\n print(\"airnow_downloader: downloading %s data...\" % task.topic, end='', file=sys.stderr)\n sys.stderr.flush()\n\n p = Pipe(('./aws_topic_history.py', task.environment_path(), '-s', start, '-e', end),\n ['./node.py', 'rec', 'tag', 'src'] + ['val.' + param for param in task.parameters],\n ('./sample_aggregate.py', '-c', task.checkpoint),\n ('./csv_writer.py', env_filename))\n\n return_code = p.wait()\n\n if return_code > 0:\n print(\"airnow_downloader: %s download failed with exit code %s.\" % (task.topic, return_code),\n file=sys.stderr)\n exit(return_code)\n\n if cmd.verbose:\n print(\"done.\", file=sys.stderr)\n\n\n # ------------------------------------------------------------------------------------------------------------\n # run: status download...\n\n status_filename = file_path + '-status.csv'\n\n if cmd.verbose:\n print(\"airnow_downloader: downloading status data...\", end='', file=sys.stderr)\n sys.stderr.flush()\n\n p = Pipe(('./aws_topic_history.py', task.status_path(), '-s', start, '-e', end),\n ('./node.py', 'rec', 'tag', 'val.tz', 'val.sch', 'val.gps', 'val.airnow'),\n ('./sample_aggregate.py', '-c', task.checkpoint),\n ('./csv_writer.py', status_filename))\n\n return_code = p.wait()\n\n if return_code > 0:\n print(\"airnow_downloader: status download failed with exit code %s.\" % return_code, file=sys.stderr)\n exit(return_code)\n\n if cmd.verbose:\n print(\"done.\", file=sys.stderr)\n\n\n # ------------------------------------------------------------------------------------------------------------\n # run: join...\n\n joined_filename = file_path + '-joined.csv'\n\n if cmd.verbose:\n print(\"airnow_downloader: joining...\", end='', file=sys.stderr)\n sys.stderr.flush()\n\n p = Pipe(('./csv_join.py', '-i', '-l', task.topic, 'rec', env_filename, '-r', 'status', 'rec', status_filename),\n ('./csv_writer.py', joined_filename))\n\n return_code = p.wait()\n\n if return_code > 0:\n print(\"airnow_downloader: join failed with exit code %s.\" % return_code, file=sys.stderr)\n exit(return_code)\n\n if cmd.verbose:\n print(\"done.\", file=sys.stderr)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n # end...\n\n except KeyboardInterrupt:\n print(file=sys.stderr)\n","sub_path":"src/scs_airnow/airnow_downloader.py","file_name":"airnow_downloader.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"204392930","text":"import numpy as np\n\ndef unity_lut_1d():\n lutmono = np.arange(0, 32768, 32, dtype=np.uint16)\n lut = np.stack([lutmono]*3, axis=0)\n return lut\n\ndef unity_lut_3d(n=33):\n spacing = complex(0,n)\n lut = np.mgrid[0.:4095.:spacing,0.:4095.:spacing,0.:4095.:spacing]\n lut = np.rint(lut).astype(np.uint16)\n lut = np.transpose(lut, axes=(1,2,3,0))\n lut = np.flip(lut, axis=-1)\n return lut\n\ndef read_cube_file(filename):\n nheader = 0\n lut_1d_size = None\n lut_3d_size = None\n domain_min = None\n domain_max = None\n with open(filename, \"r\") as f:\n for line in f:\n icomment = line.find(\"#\")\n if icomment>=0:\n line = line[:icomment]\n \n splitline = line.split()\n if splitline:\n keyword = splitline[0]\n else:\n keyword = None\n \n if keyword is None:\n pass\n elif keyword == \"TITLE\":\n pass\n elif keyword == \"LUT_1D_SIZE\":\n lut_1d_size = int(splitline[1])\n if lut_1d_size<2 or lut_1d_size>65536:\n raise ValueError(f\"Invalid value {lut_1d_size} for LUT_1D_SIZE, must be in range [2,65536].\")\n elif keyword == \"LUT_3D_SIZE\":\n lut_3d_size = int(splitline[1])\n if lut_3d_size<2 or lut_3d_size>256:\n raise ValueError(f\"Invalid value {lut_3d_size} for LUT_3D_SIZE, must be in range [2,256].\")\n elif keyword == \"DOMAIN_MIN\":\n domain_min = np.genfromtxt([line], usecols=(1,2,3), dtype=np.float64)\n if domain_min.shape != (3,):\n raise ValueError(\"DOMAIN_MIN must provide exactly 3 values.\")\n if np.amin(domain_min) < -1e37 or np.amax(domain_min) > 1e37:\n raise ValueError(\"Invalid value in DOMAIN_MIN, must be in range [-1e37,1e37].\")\n elif keyword == \"DOMAIN_MAX\":\n domain_max = np.genfromtxt([line], usecols=(1,2,3), dtype=np.float64)\n if domain_max.shape != (3,):\n raise ValueError(\"DOMAIN_MIN must provide exactly 3 values.\")\n if np.amin(domain_max) < -1e37 or np.amax(domain_max) > 1e37:\n raise ValueError(\"Invalid value in DOMAIN_MAX, must be in range [-1e37,1e37].\")\n else:\n break\n \n nheader += 1\n \n if lut_1d_size and lut_3d_size:\n raise ValueError(\"Cannot specify both LUT_1D_SIZE and LUT_3D_SIZE.\")\n \n if not lut_1d_size and not lut_3d_size:\n raise ValueError(\"Must specify one of LUT_1D_SIZE or LUT_3D_SIZE.\")\n \n if domain_min is None:\n domain_min = np.zeros((3,), dtype=np.float64)\n \n if domain_max is None:\n domain_max = np.ones((3,), dtype=np.float64)\n \n lut = np.genfromtxt(filename, skip_header=nheader, comments=\"#\", dtype=np.float64)\n if np.amin(lut) < -1e37 or np.amax(lut) > 1e37:\n raise ValueError(\"Invalid value in DOMAIN_MAX, must be in range [-1e37,1e37].\")\n \n domain_min = np.reshape(domain_min, (1,3))\n domain_max = np.reshape(domain_max, (1,3))\n \n #shift and scale lut to range [0.,1.]\n lut = (lut-domain_min)/(domain_max-domain_min)\n \n if lut_1d_size:\n if lut.shape != (lut_1d_size,3):\n raise ValueError(f\"Expected shape {(lut_1d_size,3)} for 1D LUT, but got {lut.shape}.\")\n #convert to integer with appropriate range\n lut = np.rint(lut*32767.).astype(np.uint16)\n #transpose to get the correct element order\n lut = np.transpose(lut)\n elif lut_3d_size:\n if lut.shape != (lut_3d_size**3, 3):\n raise ValueError(f\"Expected shape {(lut_3d_size**3, 3)} for 3D LUT, but got {lut.shape}.\")\n lut = np.reshape(lut, (lut_3d_size, lut_3d_size, lut_3d_size, 3))\n lut = np.rint(lut*4095.).astype(np.uint16)\n \n return lut\n\ndef read_cal_file(filename):\n nheader = 0\n with open(filename, \"r\") as f:\n caldata = f.readlines()\n \n dataidx = caldata.index(\"BEGIN_DATA\\n\")\n lut_1d_size_in = int(caldata[dataidx-1].split()[1])\n \n lut = np.genfromtxt(caldata[dataidx+1:dataidx+1+lut_1d_size_in], dtype=np.float64)\n \n if lut.shape != (lut_1d_size_in,4):\n raise ValueError(f\"Expected shape {(lut_1d_size_in,3)} for 1D LUT, but got {lut.shape}.\")\n\n lut_1d_size = 1024\n \n #interpolate if necessary\n if lut_1d_size_in != lut_1d_size:\n x = np.linspace(0., 1., lut_1d_size, dtype=np.float64)\n lutcomponents = []\n for i in range(1,4):\n lutcomponent = np.interp(x, lut[:,0], lut[:,i])\n lutcomponents.append(lutcomponent)\n lut = np.stack(lutcomponents, axis=-1)\n else:\n lut = lut[:,1:]\n \n #convert to integer with appropriate range\n lut = np.rint(32767.*lut).astype(np.uint16)\n #transpose to get the correct element order\n lut = np.transpose(lut)\n \n return lut\n","sub_path":"aiopylgtv/lut_tools.py","file_name":"lut_tools.py","file_ext":"py","file_size_in_byte":5044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"459593522","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 21 11:15:01 2018\n\n@author: sujania\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 05s00-13s02\ncc_inv = np.array([-1.024446e+01, -1.111985e-01, -4.172265e+00, 7.545845e+00, -2.760083e+00])\ncc_wh = np.array([-23.249747946970412, 0., 0., 0., 0.])\ncc_trp = np.array([-7.8685474666240403, 0., 0., 0., 0.])\ncc_bgt = np.array([-23.243773926407343, 0., 0., 0., 0.])\ncc_rwz = np.array([-19.445862735380029, 0., 0., 0., 0.])\ncc_s20rts = np.array([-1.5207712954483830, -0.79245287657720009, \n -1.8023340224734921, 2.5102791743920361, -3.7194176036023707])\ncc_trmp_s20rts = cc_trp + cc_s20rts\ncc_bgt_s20rts = cc_bgt + cc_s20rts\n\nfig = plt.figure(figsize=(10,3.5))\nax = fig.add_subplot(1,1,1)\nax.plot(cc_inv, '-^', color='r', label='${}_5S_0-{}_{13}S_2$', linewidth=2.5)\nax.plot(cc_bgt_s20rts, '-^', color='b', label='IC Beghein-Trampert + S20RTS', linewidth=2)\n#ax.plot(cc_s20rts, '-^', color='b', label='S20RTS')\n#ax.plot(cc_wh, '-^', color='g', label='IC Woodhouse')\n#ax.plot(cc_bgt, '-^', color='c', label='IC Beghein-Trampert')\n#ax.plot(cc_trp, '-^', color='plum', label='IC Tromp')\n#ax.plot(cc_rwz, '-^', color='m', label='IC Romanowicz')\nax.set_xticks(range(len(cc_inv)))\nplt.axhline(y=0, color='darkgray', linestyle='-', linewidth=5, zorder=0)\nplt.text(2, 1.5, 'PREM', fontsize=15, backgroundcolor='w', va='bottom', ha='center', zorder=0)\nax.tick_params(axis = 'both', which = 'major', labelsize = 15, zorder=0)\ncst = ['$Re[c_{20}]$', '$Re[c_{21}]$', '$Im[c_{21}]$', '$Re[c_{22}]$', '$Im[c_{22}]$']\nax.set_xticks(range(len(cst)))\nax.set_xticklabels(cst, rotation='horizontal', fontsize=15)\n\n# Shrink current axis's height by 10% on the bottom\nbox = ax.get_position()\n# Put a legend below current axis\nax.legend(loc='lower right',\n fancybox=True, shadow=True, ncol=2, fontsize=16)\n#plt.title('Cross-coupling coefficients')\nfig.savefig('cc_coeffs_05s00_13s02', dpi=350) \n","sub_path":"frospy/tests/todo/Su/plot_cc_coeff.py","file_name":"plot_cc_coeff.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"28971834","text":"import sqlite3\n\ndef databaseentry(ir,hr):\n con = sqlite3.connect('static/data.db')\n params = dict()\n try:\n with con:\n cur = con.cursor()\n cur.execute('INSERT OR IGNORE INTO data (ir,hr) VALUES (?, ?)', (ir,hr))\n con.commit()\n params['status'] = \"success\" \n except:\n params['status'] = \"fail\"\n return params\n\ndef databaseread():\n con = sqlite3.connect('static/data.db')\n params = dict()\n try:\n with con:\n cur = con.cursor()\n cur.execute(\"Select * from data order by id desc limit 1\")\n rows = cur.fetchall()\n for row in rows:\n id = row[0]\n ir = row[1]\n hr = row[2]\n params['id'] = id\n params['ir'] = ir\n params['hr'] = hr\n except:\n params['status'] = \"fail\"\n return params\n","sub_path":"API/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"275268080","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n# Authour:Dreamer\n# Tmie:2018.6.22\n\n# 1.创建窗口\n# 2.加载背景图片\n# 3.背景图片贴到窗口\n# 4.刷新窗口\n\nimport pygame # 模块分两种 静态模块.py 动态模块.py (windows .dll .pyd) linux .os\nimport sys\nfrom pygame.locals import *\nimport time\nimport random\n\n# 模拟常量:用大写定义后不要再改了\nWINDOW_WIDTH = 512\nWINDOW_HEIGHT = 760\n\n\nclass BasePlane:\n\n def __init__(self, img_path, x, y, window):\n self.img = pygame.image.load(img_path) # 英雄图片路径\n # 英雄飞机第一次出现在窗口的位置\n self.x = x\n self.y = y\n # 窗口\n self.window = window\n\n def display(self):\n \"\"\"显示英雄飞机图片\"\"\"\n self.window.blit(self.img, (self.x, self.y))\n\n\nclass Bullet:\n \"\"\"子弹\"\"\"\n\n def __init__(self, img_path, x, y, window):\n self.img = pygame.image.load(img_path) # 子弹图片路径\n # 子弹出现在窗口的位置\n self.x = x\n self.y = y\n # 子弹所在的窗口\n self.window = window\n\n def display(self):\n \"\"\"显示子弹图片\"\"\"\n self.window.blit(self.img, (self.x, self.y))\n\n def bullet_move(self):\n \"\"\"子弹移动\"\"\"\n self.y -= 10\n\n def __del__(self):\n \"\"\"对象释放时自动调用\"\"\"\n print(\"子弹销毁了!\")\n\n\nclass HeroPlane(BasePlane):\n \"\"\"英雄飞机\"\"\"\n\n def __init__(self, img_path, x, y, window):\n super().__init__(img_path, x, y, window)\n\n self.bullets = [] # 保存所有发射的子弹\n\n def display(self):\n \"\"\"显示英雄飞机图片\"\"\"\n self.window.blit(self.img, (self.x, self.y))\n\n def move_left(self):\n \"\"\"飞机向左移\"\"\"\n if (self.x - 8) >= 0:\n self.x -= 8\n\n def move_right(self):\n \"\"\"飞机向右移\"\"\"\n if (self.x + 8) <= 395:\n self.x += 8\n\n def move_down(self):\n \"\"\"飞机向下移\"\"\"\n if (self.y + 8) <= 700:\n self.y += 8\n\n def move_up(self):\n \"\"\"飞机向上移\"\"\"\n if 0 < (self.y - 8):\n self.y -= 8\n\n def fire(self):\n \"\"\"发射子弹\"\"\"\n # 创建子弹对象\n # 子弹的x = 飞机的x + 飞机的宽度 * 0.5 - 子弹宽度 * 0.5\n # 子弹的y = 飞机的y - 子弹的高\n bullet = Bullet(\"res/bullet_9.png\", self.x + 50, self.y - 31, self.window)\n # bullet.display()\n # 把发射过的子弹添加到列表中保存起来\n self.bullets.append(bullet)\n\n def display_bullet(self):\n \"\"\"处理子弹图\"\"\"\n del_bullet = []\n for bullet in self.bullets: # 取出一个个子弹\n if bullet.y >= 31:\n bullet.display() # 每一个子弹重复贴图\n bullet.bullet_move() # 让子弹飞\n else:\n # self.bullets.remove(bullet) # 让超出边界的子弹销毁\n del_bullet.append(bullet)\n for bullet1 in del_bullet:\n self.bullets.remove(bullet1)\n\n\nclass EnemyPlane(BasePlane):\n \"\"\"敌机\"\"\"\n\n def enemy_move(self):\n \"\"\"敌机移动\"\"\"\n self.y += 5\n if self.y >= WINDOW_HEIGHT:\n self.y = random.randint(-300, -68)\n self.x = random.randint(0, WINDOW_WIDTH - 100)\n self.img = pygame.image.load(\"res/img-plane_%d.png\" % random.randint(1, 7))\n\n\n# 程序的主入口\ndef main():\n # 1.初始化pygam库。让计算机硬件做装备,如果想要音效或文字\n pygame.init()\n\n # 2.创建窗口\n window = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n\n # 3.加载图片文件,返回图片对象\n # 加载背景图片\n bg_img = pygame.image.load(\"res/img_bg_level_1.jpg\")\n\n # 创建英雄飞机对象\n hero_plane = HeroPlane(\"res/hero2.png\", 196, 500, window)\n\n # 创建敌机\n enemy_list = []\n for _ in range(105):\n enemy_plane = EnemyPlane(\"res/img-plane_%d.png\" % random.randint(1, 7), random.randint(0, WINDOW_WIDTH - 100),\n random.randint(-300, -68), window)\n enemy_list.append(enemy_plane)\n while True:\n # 4.贴图(指定坐标,将图片绘制到窗口)\n window.blit(bg_img, (0, 0))\n\n # 添加飞机到窗口上\n hero_plane.display()\n\n # 添加敌机到窗口上\n for enemy_plane in enemy_list:\n enemy_plane.display()\n enemy_plane.enemy_move()\n # 把子弹对象重新贴图\n # for bullet in hero_plane.bullets: # 取出一个个子弹\n # bullet.display() # 每一个子弹重复贴图\n # bullet.bullet_move() # 让子弹飞\n hero_plane.display_bullet()\n # 5.刷新界面,不刷新不会更新内容\n pygame.display.update()\n\n # 检测事件\n for event in pygame.event.get():\n # 1.鼠标点击关闭窗口事件\n if event.type == QUIT:\n print(\"点击关闭窗口按钮\")\n sys.exit() # 关闭程序\n # 2.键盘按下事件\n if event.type == KEYDOWN:\n # # 判断用户按键操作\n # if event.key == K_LEFT or event.key == K_a:\n # x -= 5\n # print(\"left\")\n # if event.key == K_RIGHT or event.key == K_d:\n # x += 5\n # print(\"right\")\n # if event.key == K_DOWN or event.key == K_w:\n # y += 5\n # print(\"down\")\n # if event.key == K_UP or event.key == K_s:\n # y -= 5\n # print(\"up\")\n if event.key == K_SPACE:\n print(\"space\")\n hero_plane.fire()\n # 3.键盘获取长按事件\n # 获取当前键盘所有按键的状态(按下、没有按下),返回bool元组(0,0,0,1,0,0,0)\n pressed_keys = pygame.key.get_pressed()\n\n if pressed_keys[K_LEFT] or pressed_keys[K_a]:\n hero_plane.move_left()\n elif pressed_keys[K_RIGHT] or pressed_keys[K_d]:\n hero_plane.move_right()\n elif pressed_keys[K_DOWN] or pressed_keys[K_s]:\n hero_plane.move_down()\n elif pressed_keys[K_UP] or pressed_keys[K_w]:\n hero_plane.move_up()\n\n # 让每一次的循环停0.01,降低循环的执行速度\n time.sleep(0.01) # 降低CPU的消耗\n print(len(hero_plane.bullets))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python12/PlaneWar/08-抽取基类.py","file_name":"08-抽取基类.py","file_ext":"py","file_size_in_byte":6604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"223113415","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport sys\nimport re\nimport os\nimport boto3\nfrom datetime import datetime as dt\n\n\ndef get_content(target_url):\n paths = \"{}/phantomjs\".format(os.getenv(\"LAMBDA_TASK_ROOT\"))\n\n service_args = ['--ignore-ssl-errors=yes']\n driver = webdriver.PhantomJS(\n executable_path=paths,\n desired_capabilities={\n 'phantomjs.page.settings.userAgent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36\",\n },\n service_args=service_args,\n service_log_path=os.path.devnull \n )\n print(\"target_url: {}\".format(target_url))\n driver.get(target_url)\n\n data = driver.page_source.encode(\"utf-8\")\n soup = BeautifulSoup(data, \"lxml\")\n print(soup.title.string)\n return soup.prettify()\n\n\ndef create_html_file(content):\n tmp_dir = '/tmp'\n\n cur_ts = dt.now().strftime('%Y%m%d_%H%M%S')\n file_path = \"{}/{}.html\".format(tmp_dir, cur_ts)\n with open(file_path, 'w') as file:\n file.write(content)\n return file_path\n\n\ndef upload_text_s3bucket(upload_file_path, s3_bucket, key):\n print(\"upload text. file_path:{}, upload_s3_bucket:{}, key:{}\".format(upload_file_path, s3_bucket, key))\n bucket = boto3.resource('s3').Bucket(s3_bucket)\n bucket.upload_file(upload_file_path, key)\n\n\ndef lambda_handler(event, context):\n\n current_dt = dt.now().strftime('%Y%m%d%H%M%S')\n upload_s3_bucket = os.getenv(\"UPLOAD_S3_BUCKET\")\n target_url = os.getenv(\"CRAWLING_TARGET_URL\")\n target_url_without_slash = target_url.replace(\"/\", \"_\")\n\n # get content\n content = get_content(target_url)\n\n # save\n saved_file_path = create_html_file(content)\n\n # upload\n upload_text_s3bucket(saved_file_path, upload_s3_bucket, \"crawling_result/{}/{}.html\".format(target_url_without_slash, current_dt))","sub_path":"crawling/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"443886372","text":"\nclass Tag:\n\n def __init__(self, tagname1=None, tagname2=None, tagslug=None, tagdescription=None):\n # None служебная конструкция, которая позволяет по умолчанию заполнять поле класса пустышкой\n # если при инициализации класса и конструктора, это поле не было указано\n self.tagname1 = tagname1\n self.tagname2 = tagname2\n self.tagslug = tagslug\n self.tagdescription = tagdescription","sub_path":"model/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255300759","text":"import maya.cmds as cmds\nimport Window\nreload(Window)\n\n\nclass Locator(Window.Window):\n def __init__(self, name='Locator Window'):\n Window.Window.__init__(self, name)\n\n def CreateUI(self):\n ''' Creates a UI for this tool '''\n self.DelUI()\n\n self.mWin = cmds.window(self.mWin, height=175, title=\"Create Locator at Center\", width=350)\n self.mLayout = cmds.columnLayout(adjustableColumn=True, height=100, rowSpacing=10, width=275)\n column = cmds.columnLayout(adjustableColumn=True, columnAttach=[\"both\", 25], parent=self.mLayout,\n rowSpacing=5)\n cmds.button(command=lambda *args: self.CreateLoc(2), height=25, label='Rotated Pivot', parent=column)\n cmds.button(command=lambda *args: self.CreateLoc(1), height=25, label='Center Pivot', parent=column)\n\n cmds.showWindow(self.mWin)\n\n def CreateLoc(self, option=1):\n ''' Creates a locator at the center of selection, or the pivot of each object, defaults to former '''\n sels = cmds.ls(sl=True)\n\n if option == 1:\n bbox = cmds.exactWorldBoundingBox(sels)\n pivot = [(bbox[0] + bbox[3]) / 2, (bbox[1] + bbox[4]) / 2, (bbox[2] + bbox[5]) / 2]\n\n loc = cmds.spaceLocator()[0]\n cmds.xform(loc, t=pivot, ws=True)\n\n elif option == 2:\n for sel in sels:\n pivot = cmds.xform(sel, q=True, t=True, ws=True)\n rot = cmds.xform(sel, q=True, ro=True, ws=True)\n loc = cmds.spaceLocator()[0]\n cmds.xform(loc, t=pivot, ws=True)\n cmds.xform(loc, ro=rot, ws=True)\n","sub_path":"MayaModels/scripts/Locator.py","file_name":"Locator.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"416392090","text":"import com.ihsan.foundation.pobjecthelper as phelper\nimport com.ihsan.timeutils as timeutils\nimport pyFlexcel\nimport sys\n\ndef formSetDataEx(uideflist, Parameter):\n if Parameter.DatasetCount == 0: return\n\n key=Parameter.FirstRecord.key\n config = uideflist.config\n helper = phelper.PObjectHelper(config)\n uploaded = helper.GetObject('UploadedStatement',key)\n\n uip = uideflist.uipart\n rec = uip.Dataset.AddRecord()\n rec.bulan = uploaded.StatementMonth\n rec.tahun = uploaded.StatementYear\n rec.picname = uploaded.LBranchBankPIC.PICName\n rec.cabang = uploaded.LBranchBankPIC.LBranch.BranchName\n\n\n","sub_path":"dialogs/Transaksi/fPeragaanUploadFileAngsuran_data.py","file_name":"fPeragaanUploadFileAngsuran_data.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"453730512","text":"#from datetime import datetime, timezone\nimport time\nimport pymysql\nimport sys\n\ndb_connection = {\n 'remote': {\n 'DBHost': 'mysqldb1.cmwln1uaaae0.ap-south-1.rds.amazonaws.com',\n 'DBUser': 'admin',\n 'DBPass': 'smartx123',\n 'DBName': 'waterfall'\n },\n 'local':{\n 'DBHost': 'localhost',\n 'DBUser': 'mounika',\n 'DBPass': 'Mounika@123',\n 'DBName': 'waterfall'\n }\n}\n\n\ndef get_cursor(db_conn_type):\n \"\"\"\n Create cursor for local and remote mysql server\n args: db_conn_type : (remote, local)\n :return: db, crsr\n \"\"\"\n db = pymysql.connect(host=db_connection[db_conn_type]['DBHost'],\n user=db_connection[db_conn_type]['DBUser'],\n passwd=db_connection[db_conn_type]['DBPass'],\n db=db_connection[db_conn_type]['DBName'])\n crsr = db.cursor()\n return db, crsr\n\n\n\ndef sync_local_to_remote_db():\n \"\"\"\n Sync records in the local database to remote database;\n :return:\n \"\"\"\n try:\n db_local, cur_local = get_cursor('local')\n db_remote, cur_remote = get_cursor('remote')\n\n # get last record id from remote database\n cmd_last_record = '''\n SELECT * FROM temp_reading\n ORDER BY id DESC \n LIMIT 1 ;\n '''\n cur_remote.execute(cmd_last_record)\n last_record_id = cur_remote.fetchall()[0][0]\n print(\"Remote DB last record id: \", last_record_id)\n #update new records from local db to remote db\n\n cmd_new_local_records = '''\n SELECT * FROM temp_reading\n WHERE id > {last_id} \n '''.format(last_id = last_record_id)\n\n cur_local.execute(cmd_new_local_records)\n local_records = cur_local.fetchall()\n\n if len(local_records) == 0:\n print('Local and Remote DBs are in sync..')\n for each_record in local_records:\n\n cmd_update = '''\n INSERT INTO temp_reading (id, timestamp, current_temp, high, low) \n VALUES {values}\n '''.format(values=(each_record[0],\n each_record[1].strftime(\"%Y-%m-%d, %H:%M:%S\"),\n float(each_record[2]),\n float(each_record[3]),\n float(each_record[4])))\n cur_remote.execute(cmd_update)\n db_remote.commit()\n print(\"Successfully inserted:\", each_record)\n\n except Exception as e:\n print(e)\n sys.exit(1)\n\nif __name__ == '__main__':\n sync_local_to_remote_db()","sub_path":"agri/update_cloud_db.py","file_name":"update_cloud_db.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"587321668","text":"import csv\nimport argparse\nimport sys\nsys.path.append(\"../\")\n\nfrom app_package import db\nfrom app_package.models import Player, Transaction, InternalState\nfrom scheduler.params_operations import *\nfrom scheduler.state_operations import *\n\nPARAMS_FILE = \"../scheduler/params.json\"\n\n\ndef print_table(my_dict, col_list=None):\n \"\"\"\n Pretty print a list of dicts (my_dict) as a table. You can specify the order of\n the columns in col_list.\n \"\"\"\n\n if not col_list:\n col_list = list(my_dict[0].keys() if my_dict else [])\n my_list = [col_list]\n for item in my_dict:\n my_list.append([str(item[col]) for col in col_list])\n col_size = [max(map(len, col)) for col in zip(*my_list)]\n format_str = ' | '.join([\"{{:<{}}}\".format(i) for i in col_size])\n my_list.insert(1, ['-' * i for i in col_size])\n for item in my_list:\n print(format_str.format(*item))\n\n\ndef clear_all_tables():\n print(\"--Clearing all tables...\")\n meta = db.metadata\n for table in reversed(meta.sorted_tables):\n db.session.execute(table.delete())\n db.session.commit()\n\n\ndef create_players(players_file=None):\n \"\"\"\n Generate db entries with initialization data from players_file.\n \"\"\"\n print(\"--Generating initial players database...\")\n params = get_params(PARAMS_FILE)\n with open(players_file) as f:\n reader = csv.DictReader(f)\n for i, row in enumerate(reader):\n p = Player(playername=row[\"playername\"],\n email=row[\"email\"],\n credit=params[\"starter_credit\"],\n about_me=row[\"about_me\"],\n avatar_path=\"avatars/{}.jpg\".format(str(i+1)))\n p.set_password(row[\"password\"])\n db.session.add(p)\n db.session.commit()\n\n\ndef view_players():\n all_players = Player.query.all()\n player_list = [p.__dict__ for p in all_players]\n print_table(player_list, [\"id\", \"playername\", \"credit\", \"score\", \"avatar_path\"])\n\n\ndef init_state():\n print(\"--Generating initial internal state...\")\n params = get_params(PARAMS_FILE)\n init_counter = params[\"refill\"][\"frequency_min\"]\n state = InternalState(counter=init_counter,\n is_challenge_open=False,\n is_final_vote_open=False,\n is_leaderboard_open=False)\n db.session.add(state)\n db.session.commit()\n\n\ndef view_state():\n state = get_state()\n print_table([state[1]])\n\n\ndef simple_vote(sender_name, recipient_name, amount):\n sender = Player.query.filter(Player.playername==sender_name).first()\n sender.credit -= amount\n recipient = Player.query.filter(Player.playername==recipient_name).first()\n recipient.score += amount\n print(\"{} gives {} to {}\".format(sender_name, amount, recipient_name))\n\n\n","sub_path":"scheduler/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"156446278","text":"import logging\nfrom os import getenv\n\nfrom squid_py import ConfigProvider\nfrom brizo.constants import ConfigSections\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_provider_account(ocean_instance):\n address = ConfigProvider.get_config().parity_address\n logger.info(f'address: {address}, {ocean_instance.accounts.accounts_addresses}')\n for acc in ocean_instance.accounts.list():\n if acc.address.lower() == address.lower():\n return acc\n\n\ndef get_env_property(env_variable, property_name):\n return getenv(\n env_variable,\n ConfigProvider.get_config().get(ConfigSections.OSMOSIS, property_name)\n )\n\n\ndef get_metadata(ddo):\n try:\n for service in ddo['service']:\n if service['type'] == 'Metadata':\n return service['metadata']\n except Exception as e:\n logger.error(\"Error getting the metatada: %s\" % e)\n\n\ndef check_required_attributes(required_attributes, data, method):\n assert isinstance(data, dict), 'invalid payload format.'\n logger.info('got %s request: %s' % (method, data))\n if not data:\n logger.error('%s request failed: data is empty.' % method)\n return 'payload seems empty.', 400\n for attr in required_attributes:\n if attr not in data:\n logger.error('%s request failed: required attr %s missing.' % (method, attr))\n return '\"%s\" is required in the call to %s' % (attr, method), 400\n return None, None\n","sub_path":"brizo/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"194083289","text":"from random import choice\n\nclass HangmanGame(object):\n def __init__(self, word=None, hangmanbody=None):\n if hangmanbody is None:\n hangmanbody = [\n ' x x ',\n ' n ',\n ' \\\\_|_/ ',\n ' | ',\n ' / \\\\ ',\n ' d b '\n ]\n self.hangmanbody = hangmanbody\n\n if word is None:\n word = self.get_random_word()\n self.secret = word\n print(\"SECRET: '%s'\" % self.secret)\n self.guesslist = []\n self.failcount = 0\n self.donecount = 0\n self.blank = '*'*len(self.secret)\n\n @classmethod\n def get_random_word(cls):\n f=open('words_list.txt')\n words=f.readlines()\n f.close\n word = choice(words)\n if word[-1] == \"\\n\":\n word = word[:-1]\n return word\n\n def play_game(self):\n while True:\n self.guesslist.sort()\n self.show_hangman()\n print(self.blank)\n print('You have guessed: %s' % self.guesslist)\n print('You have %s strikes left.' % (6-self.failcount))\n\n guess=input(\"Guess a letter: \").lower()\n \n if not self.is_guess_valid(guess):\n continue\n \n self.guesslist += guess\n\n if guess in self.secret:\n for i in range(len(self.secret)):\n if guess == self.secret[i]:\n self.blank = self.blank[:i] + self.secret[i] + self.blank[i+1:]\n self.donecount+=1\n else:\n print('Nope! %s strikes left,' % (5-self.failcount))\n self.failcount += 1\n\n print('\\n')\n\n if self.failcount == 6:\n self.show_hangman()\n print(\"GAME OVER....the secret word was '%s'.\" % self.secret)\n break\n elif self.donecount == len(self.secret):\n print(\"YOU WIN!!! The word was '%s'.\" % self.secret)\n break\n\n def is_guess_valid(self, guess):\n if len(guess)!=1:\n print('One guess at a time.')\n return False\n \n if not guess.isalpha():\n print('This needs to be a letter dipwad.')\n return False\n \n if guess in self.guesslist and self.secret:\n print('You already guessed %s.'%guess)\n return False\n \n return True\n\n def show_hangman(self):\n if self.failcount > 0:\n print(\"\\n\".join(self.hangmanbody[0:self.failcount]))\n if self.failcount<6:\n print('\\n'*(5-self.failcount))\n\n\ndef hangman_intro():\n questions3=0\n while questions3<3:\n play=input('Wanna play hangman? y/n: ')\n questions3 +=1\n if play=='y':\n break\n else:\n print('Haha, I must have misheard you. I SAID:')\n if questions3==3:\n print(\"WRONG. We're playing now.\")\n break\n\n\n\n\n\n#THE BEGINNING OF IT ALL\nhangman_intro()\nwhile True:\n hangmaninstance = HangmanGame()\n hangmaninstance.play_game()\n print(\"YOU ARE PLAYING AGAIN! DO IT BETTER THIS TIME!\")\n","sub_path":"hangman_class.py","file_name":"hangman_class.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"276799127","text":"from django.core.paginator import Paginator\nfrom django.shortcuts import render, get_object_or_404\nfrom .models import Product, Cart,OderTracking,Transactions\nfrom django.contrib.auth.decorators import login_required\n\n\n# Create your views here.\ndef index(request):\n product = Product.objects.filter(Trending=True)\n new_arrival = Product.objects.filter(new_Arraivel=True)\n return render(request, \"index.html\", {'products': product, 'newarrival': new_arrival})\n\n\ndef Cart_View(request):\n products = Product.objects.filter(category=request.GET.get('category'))\n new_arrival = Product.objects.filter(new_Arraivel=True)\n mycontext = {\n 'products': new_arrival\n }\n return render(request, \"pages/cart.html\", mycontext)\n\n\ndef Shop(request):\n product = Product.objects.all()\n paginator = Paginator(product, 10)\n page = request.GET.get('page')\n products = paginator.get_page(page)\n context = {\n 'products': products\n }\n return render(request, \"pages/shop.html\", context)\n\n\ndef About(request):\n return render(request, \"pages/about.html\")\n\n\n@login_required(login_url=\"/login/\")\ndef CheckOut(request):\n current_user = request.user\n if request.method == 'POST':\n transactions = Transactions.objects.get_or_create(user=current_user)\n fname = request.POST['first_name']\n lname =request.POST['last_name']\n country =request.POST['Country']\n streetaddress =request.POST['streetaddress']\n appartment =request.POST['appartment']\n town =request.POST['town']\n postcode =request.POST['postcode']\n phone =request.POST['phone']\n total =request.POST['money']\n email =request.POST['email']\n payment =request.POST['payment']\n \n transactions[0].first_name=fname\n transactions[0].last_name=lname\n transactions[0].state_country=country\n transactions[0].street_Address=streetaddress\n transactions[0].appertment=appartment\n transactions[0].town=town\n transactions[0].postcode=postcode\n transactions[0].phone=phone\n transactions[0].email=email\n transactions[0].payment_Method=payment\n transactions[0].Total=total\n print(transactions[0].appertment)\n transactions[0].save() \n products = Product.objects.all()\n context = {\n 'products': products,\n }\n return render(request, \"pages/checkout.html\",context)\n\n@login_required(login_url=\"/login/\")\ndef OrderStatus(request):\n Ordertracker = OderTracking.objects.filter()\n context ={\n 'OderTracking':Ordertracker\n }\n return render(request, \"accounts/ProductDeliveryStatus.html\",context)\n\ndef Product_Details(request, id, Product_slug):\n product = get_object_or_404(Product, id=id, Product_slug=Product_slug, available=True)\n return render(request, \"pages/product-single.html\", {'product': product})\n\n\ndef Contact(request):\n return render(request, \"pages/contact.html\")\n\n","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"538202623","text":"import agent\nimport ctp_api\nimport lts_api\nimport base\nimport time\nimport logging\nimport mysqlaccess\nimport datetime\nimport misc\n\ndef save_LTS(user, insts, run_date):\n app_name = 'SaveAgent'\n my_agent = agent.SaveAgent(name = app_name, trader = None, cuser = None, instruments=insts, daily_data_days=0, min_data_days=0, tday = run_date)\n lts_api.make_user(my_agent, user, insts)\n try:\n while 1: time.sleep(1)\n except KeyboardInterrupt:\n my_agent.mdapis = []; my_agent.trader = None\n\ndef filter_main_cont(sdate):\n insts, prods = mysqlaccess.load_alive_cont(sdate)\n main_cont = {}\n for pc in prods:\n main_cont[pc], exch = mysqlaccess.prod_main_cont_exch(pc)\n main_insts = []\n for inst in insts:\n pc = misc.inst2product(inst)\n mth = int(inst[-2:])\n if mth in main_cont[pc]:\n main_insts.append(inst)\n return main_insts\n \ndef save_all(tday, prod_md = misc.PROD_USER):\n logging.basicConfig(filename=\"save_all_agent.log\",level=logging.INFO,format='%(name)s:%(funcName)s:%(lineno)d:%(asctime)s %(levelname)s %(message)s')\n save_insts = filter_main_cont(tday)\n app_name = 'SaveAgent'\n config = {'daily_data_days': 0, 'min_data_days': 0}\n my_agent = agent.SaveAgent(name = app_name, trader = None, cuser = None, instruments=save_insts,tday = tday, config = config)\n ctp_api.make_user(my_agent, prod_md)\n try:\n while 1:\n time.sleep(1)\n \n except KeyboardInterrupt:\n my_agent.mdapis = []; my_agent.trader = None\n\ndef save_lts_test(tday):\n logging.basicConfig(filename=\"save_lts_test.log\",level=logging.INFO,format='%(name)s:%(funcName)s:%(lineno)d:%(asctime)s %(levelname)s %(message)s')\n save_insts = ['510050', '510050C1502M02500', '510050P1502M02500']\n save_LTS(misc.LTS_SO_USER,save_insts)\n pass\n\nif __name__ == '__main__':\n save_all()\n pass","sub_path":"test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"298128815","text":"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=1, bias=True)\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return\n\n\nclass ResBlock(nn.Module):\n \"\"\"UNet 两层卷积块\n 收缩模块经过了两次卷积操作,每一次卷积之后都进行一次 relu 操作\n 参数:\n in_channels: 输入的通道数。\n out_channels: 输出的通道数。\n kernel_size: 卷积核的大小。默认使用 3×3 的卷积核\n stride: 卷积核移动步长。默认为 1\n padding: 填充。默认无填充\n bias: 卷积后的偏置。默认添加偏置\n\n 示例:\n contracting_block_1 = ContractingBlock(3, 32)\n contracting_block_2 = ContractingBlock(3, 32, 3, 1, 1, True)\n \"\"\"\n\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(ResBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion * planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass UpSamplingBlock(nn.Module):\n \"\"\"UNet 上采样和拼接模块\n 收缩模块经过了两次卷积操作,每一次卷积之后都进行一次 relu 操作\n 参数:\n in_channels: 输入的通道数。\n out_channels: 输出的通道数。\n kernel_size: 卷积核的大小。默认使用 2×2 的卷积核\n stride: 卷积核移动步长。默认为 1\n padding: 填充。默认无填充\n bias: 卷积后的偏置。默认添加偏置\n\n 示例:\n contracting_block_1 = ContractingBlock(3, 32)\n contracting_block_2 = ContractingBlock(3, 32, 3, 1, 1, True)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size=2, stride=2, padding=0, output_padding=1, bias=True):\n super(UpSamplingBlock, self).__init__()\n self.tran_conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride)\n\n def forward(self, x, concat_feature):\n tran_conv_out = self.tran_conv(x)\n\n out = torch.cat((concat_feature, tran_conv_out), dim=1)\n\n return out\n\n\nclass ChannelAttention(nn.Module):\n def __init__(self, in_planes, ratio=16):\n super(ChannelAttention, self).__init__()\n\n out_channels = 1 if 0 == (in_planes // 16) else in_planes // 16\n\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.max_pool = nn.AdaptiveMaxPool2d(1)\n\n self.fc1 = nn.Conv2d(in_planes, out_channels, 1, bias=False)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Conv2d(out_channels, in_planes, 1, bias=False)\n\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))\n max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))\n out = avg_out + max_out\n return self.sigmoid(out)\n\n\nclass SpatialAttention(nn.Module):\n def __init__(self, kernel_size=7):\n super(SpatialAttention, self).__init__()\n\n assert kernel_size in (3, 7), 'kernel size must be 3 or 7'\n padding = 3 if kernel_size == 7 else 1\n\n self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = torch.mean(x, dim=1, keepdim=True)\n max_out, _ = torch.max(x, dim=1, keepdim=True)\n x = torch.cat([avg_out, max_out], dim=1)\n x = self.conv1(x)\n return self.sigmoid(x)\n\n\nclass ChannelAttentionBasicBlock(nn.Module):\n\n def __init__(self, channels):\n super(ChannelAttentionBasicBlock, self).__init__()\n\n self.ca = ChannelAttention(channels)\n\n def forward(self, x):\n out = x\n\n out = self.ca(out) * out\n\n return out\n\n\nclass FusionBasicBlock(nn.Module):\n\n def __init__(self, conv_num, tran_conv_num, depth, target_channel, in_channel_list=None, cbam_block=True):\n super(FusionBasicBlock, self).__init__()\n\n self.cbam_block = cbam_block\n\n if in_channel_list is None:\n in_channel_list = [1, 32, 64, 128, 256]\n\n self.layers = nn.ModuleList()\n\n for i in range(conv_num):\n sequential = nn.Sequential(\n nn.Conv2d(in_channel_list[i], target_channel, 3, padding=1),\n nn.MaxPool2d(2 ** (depth - i - 1), 2 ** (depth - i - 1)),\n\n )\n\n if cbam_block:\n sequential.add_module('attention', ChannelAttentionBasicBlock(target_channel))\n\n self.layers.append(sequential)\n\n for i in range(tran_conv_num):\n sequential = nn.Sequential(\n nn.ConvTranspose2d(in_channel_list[depth + i], target_channel, 2 ** (i + 1), stride=2 ** (i + 1)),\n )\n\n if cbam_block:\n sequential.add_module('attention', ChannelAttentionBasicBlock(target_channel))\n\n self.layers.append(sequential)\n\n if cbam_block:\n self.sa = SpatialAttention()\n\n def forward(self, node_list, current_node=None):\n out = current_node\n\n for index, item in enumerate(self.layers):\n if out is None:\n out = item(node_list[index])\n else:\n out = out + item(node_list[index])\n\n if self.cbam_block:\n out = self.sa(out) * out\n\n return out\n\n\nclass MultiResolutionBlock(nn.Module):\n def __init__(self):\n super(MultiResolutionBlock, self).__init__()\n\n # layer 1(node 1)\n self.fb_basic_block_1_1 = FusionBasicBlock(0, 0, 1, 1)\n\n # layer 2(生成节点2)\n self.fb_basic_block_2_1 = FusionBasicBlock(1, 0, 2, 32) # 0 down 1 up\n\n # layer 3(节点1更新,节点2更新,生成节点3)\n # 节点 1 更新(节点2上采样+节点1)\n self.fb_basic_block_3_1 = FusionBasicBlock(0, 1, 1, 1)\n # 节点 2 更新(节点1下采样+节点2)\n self.fb_basic_block_3_2 = FusionBasicBlock(1, 0, 2, 32)\n # 生成节点 3(节点1下采样+节点2下采样)\n self.fb_basic_block_3_3 = FusionBasicBlock(2, 0, 3, 64)\n\n # layer 4(节点1更新,节点2更新,节点3更新,生成节点4)\n # 节点 1 更新(节点2上采样+节点3上采样 + 节点1)\n self.fb_basic_block_4_1 = FusionBasicBlock(0, 2, 1, 1)\n # 节点 2 更新(节点1下采样+节点2+节点3上采样)\n self.fb_basic_block_4_2 = FusionBasicBlock(1, 1, 2, 32)\n # 生成节点 3(节点1下采样+节点2下采样+节点3)\n self.fb_basic_block_4_3 = FusionBasicBlock(2, 0, 3, 64)\n # 生成节点 4(节点1下采样+节点2下采样+节点3下采样)\n self.fb_basic_block_4_4 = FusionBasicBlock(3, 0, 4, 128)\n\n # layer 5(节点1更新,节点2更新,节点3更新,节点4更新,生成节点5)\n # 节点 1 更新(节点2上采样+节点3上采样+节点4上采样 + 节点1)\n self.fb_basic_block_5_1 = FusionBasicBlock(0, 3, 1, 1)\n # 节点 2 更新(节点1下采样+节点2+节点3上采样+节点4上采样)\n self.fb_basic_block_5_2 = FusionBasicBlock(1, 2, 2, 32)\n # 节点 3 更新(节点1下采样+节点2下采样+节点3+节点4上采样)\n self.fb_basic_block_5_3 = FusionBasicBlock(2, 1, 3, 64)\n # 节点 4 更新(节点1下采样+节点2下采样+节点3下采样+节点4)\n self.fb_basic_block_5_4 = FusionBasicBlock(3, 0, 4, 128)\n # 生成节点 5(节点1下采样+节点2下采样+节点3下采样+节点4下采样)\n self.fb_basic_block_5_5 = FusionBasicBlock(4, 0, 5, 256)\n\n def forward(self, x):\n # layer 1\n input_rel_1 = self.fb_basic_block_1_1([x], current_node=x)\n\n # layer 2\n input_rel_2 = self.fb_basic_block_2_1([input_rel_1])\n\n # layer 3\n input_rel_1 = self.fb_basic_block_3_1([input_rel_2], input_rel_1)\n input_rel_2 = self.fb_basic_block_3_2([input_rel_1], input_rel_2)\n input_rel_3 = self.fb_basic_block_3_3([input_rel_1, input_rel_2])\n\n # layer 4\n input_rel_1 = self.fb_basic_block_4_1([input_rel_2, input_rel_3], input_rel_1)\n input_rel_2 = self.fb_basic_block_4_2([input_rel_1, input_rel_3], input_rel_2)\n input_rel_3 = self.fb_basic_block_4_3([input_rel_1, input_rel_2], input_rel_3)\n input_rel_4 = self.fb_basic_block_4_4([input_rel_1, input_rel_2, input_rel_3])\n\n # layer 5\n input_rel_1 = self.fb_basic_block_5_1([input_rel_2, input_rel_3, input_rel_4], input_rel_1)\n input_rel_2 = self.fb_basic_block_5_2([input_rel_1, input_rel_3, input_rel_4], input_rel_2)\n input_rel_3 = self.fb_basic_block_5_3([input_rel_1, input_rel_2, input_rel_4], input_rel_3)\n input_rel_4 = self.fb_basic_block_5_4([input_rel_1, input_rel_2, input_rel_3], input_rel_4)\n input_rel_5 = self.fb_basic_block_5_5([input_rel_1, input_rel_2, input_rel_3, input_rel_4])\n\n return [input_rel_1, input_rel_2, input_rel_3, input_rel_4, input_rel_5]\n\n\nclass FullyAggregationBlock(nn.Module):\n def __init__(self):\n super(FullyAggregationBlock, self).__init__()\n\n # stage 1\n # 节点1+节点2上采样+节点上采样+节点4上采样\n self.fb_basic_block_1 = FusionBasicBlock(0, 3, 1, 32, in_channel_list=[32, 64, 128, 256, 512], cbam_block=True)\n # stage 2\n # 节点1下采样+节点2+节点3上采样+节点4上采样\n self.fb_basic_block_2 = FusionBasicBlock(1, 2, 2, 64, in_channel_list=[32, 64, 128, 256, 512], cbam_block=True)\n # stage 3\n # 节点1下采样+节点2下采样+节点3+节点4上采样\n self.fb_basic_block_3 = FusionBasicBlock(2, 1, 3, 128, in_channel_list=[32, 64, 128, 256, 512], cbam_block=True)\n # stage 4\n # 节点1下采样+节点2下采样+节点3下采样+节点4\n self.fb_basic_block_4 = FusionBasicBlock(3, 0, 4, 256, in_channel_list=[32, 64, 128, 256, 512], cbam_block=True)\n\n def forward(self, x):\n [l1, l2, l3, l4] = x\n l1 = self.fb_basic_block_1([l2, l3, l4], l1)\n l2 = self.fb_basic_block_2([l1, l3, l4], l2)\n l3 = self.fb_basic_block_3([l1, l2, l4], l3)\n l4 = self.fb_basic_block_4([l1, l2, l3], l4)\n\n return [l1, l2, l3, l4]\n\n\nclass MultiInputBlock(nn.Module):\n def __init__(self):\n super(MultiInputBlock, self).__init__()\n\n # layer 1\n\n # layer 2\n self.conv_2 = nn.Conv2d(1, 32, 3, padding=1)\n self.down_sampling_2 = nn.MaxPool2d(2, 2)\n\n # layer 3\n self.conv_3 = nn.Conv2d(32, 64, 3, padding=1)\n self.down_sampling_3 = nn.MaxPool2d(2, 2)\n\n # layer 4\n self.conv_4 = nn.Conv2d(64, 128, 3, padding=1)\n self.down_sampling_4 = nn.MaxPool2d(2, 2)\n\n # layer 5\n self.conv_5 = nn.Conv2d(128, 256, 3, padding=1)\n self.down_sampling_5 = nn.MaxPool2d(2, 2)\n\n def forward(self, x):\n layer_1 = x\n layer_2 = self.down_sampling_2(self.conv_2(layer_1))\n layer_3 = self.down_sampling_3(self.conv_3(layer_2))\n layer_4 = self.down_sampling_4(self.conv_4(layer_3))\n layer_5 = self.down_sampling_5(self.conv_5(layer_4))\n\n return [layer_1, layer_2, layer_3, layer_4, layer_5]\n\n\nclass UNetMI(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.mi_block = MultiInputBlock()\n\n self.conv_block_1 = ResBlock(1, 32)\n self.down_sampling_1 = nn.MaxPool2d(2, 2)\n self.conv_block_2 = ResBlock(32, 64)\n self.down_sampling_2 = nn.MaxPool2d(2, 2)\n self.conv_block_3 = ResBlock(64, 128)\n self.down_sampling_3 = nn.MaxPool2d(2, 2)\n self.conv_block_4 = ResBlock(128, 256)\n self.down_sampling_4 = nn.MaxPool2d(2, 2)\n self.conv_block_5 = ResBlock(256, 512)\n\n self.up_sampling_1 = UpSamplingBlock(512, 256)\n self.conv_block_6 = ResBlock(512, 256)\n self.up_sampling_2 = UpSamplingBlock(256, 128)\n self.conv_block_7 = ResBlock(256, 128)\n self.up_sampling_3 = UpSamplingBlock(128, 64)\n self.conv_block_8 = ResBlock(128, 64)\n self.up_sampling_4 = UpSamplingBlock(64, 32)\n self.conv_block_9 = ResBlock(64, 32)\n\n self.out = nn.Conv2d(32, 2, 1)\n\n def forward(self, x):\n [input_l1, input_l2, input_l3, input_l4, input_l5] = self.mi_block(x)\n\n conv_block_out_1 = self.conv_block_1(input_l1)\n conv_block_out_2 = self.conv_block_2(input_l2)\n conv_block_out_3 = self.conv_block_3(input_l3)\n conv_block_out_4 = self.conv_block_4(input_l4)\n conv_block_out_5 = self.conv_block_5(input_l5)\n\n conv_block_out_6 = self.conv_block_6(self.up_sampling_1(conv_block_out_5, conv_block_out_4))\n conv_block_out_7 = self.conv_block_7(self.up_sampling_2(conv_block_out_6, conv_block_out_3))\n conv_block_out_8 = self.conv_block_8(self.up_sampling_3(conv_block_out_7, conv_block_out_2))\n conv_block_out_9 = self.conv_block_9(self.up_sampling_4(conv_block_out_8, conv_block_out_1))\n\n out = self.out(conv_block_out_9)\n\n return out\n","sub_path":"models/UNetMI.py","file_name":"UNetMI.py","file_ext":"py","file_size_in_byte":14049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"467132176","text":"from velociraptor.observations.objects import ObservationalData\n\nimport unyt\nimport numpy as np\nimport os\nimport sys\n\n# Exec the master cosmology file passed as first argument\nwith open(sys.argv[1], \"r\") as handle:\n exec(handle.read())\n\n# Cosmology\nh_sim = cosmology.h\nOmega_b = 0.0486 # Cosmology assumed by the M-TNG model\nOmega_m = 0.3089 # Cosmology assumed by the M-TNG model\n\ninput_filename = \"../raw/mtng_hmsm.txt\"\n\noutput_filename = \"MillenniumTNG_ratio.hdf5\"\noutput_directory = \"../\"\n\nif not os.path.exists(output_directory):\n os.mkdir(output_directory)\n\nprocessed = ObservationalData()\nraw = np.loadtxt(input_filename)\n\nM_200 = raw[:, 0] * unyt.Solar_Mass\nratio = raw[:, 1] * unyt.dimensionless\nratio *= Omega_b / Omega_m\n\n# Meta-data\ncomment = ()\ncitation = \"Pakmor et al. (2022) (MTNG)\"\nbibcode = \"2022arXiv221010060P\"\nname = \"Stellar mass - halos mass relation from Millennium-TNG (Fig. 2)\"\nplot_as = \"line\"\nredshift = 0.0\nh = h_sim\n\n# Write everything\nprocessed = ObservationalData()\nprocessed.associate_x(\n M_200,\n scatter=None,\n comoving=False,\n description=\"Halo Mass ($M_{200, {\\rm crit}}$)\",\n)\nprocessed.associate_y(\n ratio,\n scatter=None,\n comoving=True,\n description=\"Galaxy Stellar Mass / Halo Mass ($M_* / M_{200, {\\rm crit}}$)\",\n)\nprocessed.associate_citation(citation, bibcode)\nprocessed.associate_name(name)\nprocessed.associate_comment(comment)\nprocessed.associate_redshift(redshift)\nprocessed.associate_plot_as(plot_as)\nprocessed.associate_cosmology(cosmology)\n\noutput_path = f\"{output_directory}/{output_filename}\"\n\nif os.path.exists(output_path):\n os.remove(output_path)\n\nprocessed.write(filename=output_path)\n","sub_path":"data/GalaxyStellarMassHaloMass/conversion/convertMTNG.py","file_name":"convertMTNG.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"46010126","text":"\"\"\"\npicture.py\nAuthor: Katie Naughton\nCredit: I worked alone. \n\nAssignment:\n\nUse the ggame library to \"paint\" a graphical picture of something (e.g. a house, a face or landscape).\n\nUse at least:\n1. Three different Color objects.\n2. Ten different Sprite objects.\n3. One (or more) RectangleAsset objects.\n4. One (or more) CircleAsset objects.\n5. One (or more) EllipseAsset objects.\n6. One (or more) PolygonAsset objects.\n\nSee:\nhttps://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics\nfor general information on how to use ggame.\n\nSee:\nhttp://brythonserver.github.io/ggame/\nfor detailed information on ggame.\n\n\"\"\"\nfrom ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset\n\n# colors, no transparency\nred = Color(0xff0000, 1.0)\ngreen = Color(0x00ff00, 1.0)\nblue = Color(0x0000ff, 1.0)\nblack = Color(0x000000, 1.0)\nyellow = Color(0xffff00,1.0)\nlightblue = Color(0X00ffff,1.0)\ndarkgreen = Color(0x006400, 1.0)\ndarkblue = Color(0x483D8B, 1.0)\nwhite = Color(0xf8f8ff, 1.0)\n\n\n#line\nthinline=LineStyle(1, black)\n\n#line 2\nthinline2=LineStyle(1, darkblue)\n\n#sky day\nrectangle4=RectangleAsset (475, 1000, thinline, lightblue)\nSprite(rectangle4, (0, 0))\n\n#sky night\nrectangle6=RectangleAsset (460, 1000, thinline, darkblue)\nSprite(rectangle6, (475, 0))\n\n#house\nrectangle=RectangleAsset (250, 250, thinline, black)\nSprite(rectangle,(350, 150))\n\n#grass day\nrectangle2=RectangleAsset (475, 300, thinline, green)\nSprite(rectangle2, (0, 400))\n\n#grass night\nrectangle5=RectangleAsset (460, 350, thinline, darkgreen)\nSprite(rectangle5, (475, 400))\n\n#door\nrectangle3=RectangleAsset (100, 100, thinline, red)\nSprite(rectangle3, (400, 300))\n\n#doorknob\nellipse=EllipseAsset (5, 10, thinline, blue)\nSprite(ellipse, (425, 350))\n\n#roof\ntriangle=PolygonAsset([(1,100),(200,1),(400, 100)], thinline, black)\nSprite(triangle, (275,75))\n\n#sun\ncircle=CircleAsset(50, thinline, yellow)\nSprite(circle, (0,0))\n\n#moonwhite\ncircle2=CircleAsset(50, thinline, white)\nSprite(circle2, (800,0))\n\n#moondarkblue\ncircle3=CircleAsset(40, thinline2, darkblue)\nSprite(circle3, (800,0))\n\n\n\n\n\n\n\n\nmyapp = App()\nmyapp.run()\n","sub_path":"picture.py","file_name":"picture.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"302366132","text":"import pygame\nfrom Animations.secretAgent import SecretAgent\nfrom Maps.map import Map\nfrom Menu.mainMenu import MainMenu\n\nSOURCE_PICTURE = \"Properties/agent_sprites.png\"\nSIDEBAR_PICTURE = \"Properties/Sidebar.png\"\nTEST_MAP = \"Properties/map2.csv\"\nMENU_FILE = \"Properties/menu.csv\"\nBACKGROUND_COLOR = (86, 118, 255)\nDISPLAY_SIZE = (500, 320)\n\n\ndef main():\n while 1:\n screen = pygame.display.set_mode(DISPLAY_SIZE, pygame.DOUBLEBUF)\n clock = pygame.time.Clock()\n entire_map = Map(SOURCE_PICTURE, TEST_MAP)\n menu = MainMenu(SIDEBAR_PICTURE, MENU_FILE)\n agent = SecretAgent(SOURCE_PICTURE)\n while 1:\n if menu.key_check():\n break\n menu.draw_menu(screen)\n pygame.display.flip()\n\n while 1:\n clock.tick(30)\n agent.moveAgent()\n entire_map.move(agent.position)\n entire_map.collisionDetection(agent)\n screen.fill(BACKGROUND_COLOR)\n entire_map.draw(screen, DISPLAY_SIZE)\n agent.draw(screen, DISPLAY_SIZE)\n pygame.display.flip()\n if agent.return_to_menu and not agent.jump and not agent.isJumping:\n break\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"267758726","text":"import logging\nimport sys\n\nimport structlog\nfrom flask import Flask, redirect, url_for\nfrom flask_admin import Admin\nfrom flask_assets import Environment, Bundle\nfrom flask_qrcode import QRcode\n\nfrom website.constants import FLASK_SECRET_KEY\nfrom website.extensions import cache\nfrom website.views.home_view import HomeView\n\n\nclass App(Flask):\n def __init__(self):\n super().__init__(__name__)\n if __name__ != '__main__':\n gunicorn_logger = logging.getLogger('gunicorn.error')\n self.logger.handlers = gunicorn_logger.handlers\n self.logger.setLevel(gunicorn_logger.level)\n\n logging.basicConfig(\n format=\"%(message)s\", stream=sys.stdout, level=logging.INFO\n )\n structlog.configure(\n processors=[\n structlog.processors.KeyValueRenderer(\n key_order=[\"event\", \"request_id\"]\n )\n ],\n context_class=structlog.threadlocal.wrap_dict(dict),\n logger_factory=structlog.stdlib.LoggerFactory(),\n )\n assets = Environment(self)\n\n js = Bundle('js/app.js',\n filters='jsmin', output='gen/packed.js')\n assets.register('js_all', js)\n\n cache.init_app(self)\n QRcode(self)\n self.debug = False\n self.config['SECRET_KEY'] = FLASK_SECRET_KEY\n\n @self.route('/')\n def index():\n return redirect(url_for('home.index'))\n\n @self.errorhandler(404)\n def page_not_found(e):\n return redirect(url_for('home.index'))\n\n self.admin = Admin(app=self, url='/')\n\n home_view = HomeView(name='Home', endpoint='home')\n self.admin.add_view(home_view)\n\n\nif __name__ == '__main__':\n app = App()\n app.debug = True\n app.run(port=5001, use_reloader=True, use_debugger=True)\n","sub_path":"website/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"293664793","text":"#!/usr/bin/python3\n\nimport subprocess\nimport sys\n\n\ndef run_cmd(correction_file, args):\n args.insert(0, correction_file)\n\n if correction_file.endswith('.py'):\n args.insert(0, 'python3')\n elif correction_file.endswith('.pl'):\n args.insert(0, 'perl')\n elif correction_file.endswith('.go'):\n args.insert(0, 'run')\n args.insert(0, 'go')\n\n child = subprocess.Popen(args, stdout=subprocess.PIPE)\n out = child.communicate()[0]\n ret = child.returncode\n return out.decode(), ret\n\n\ndef check(correction_file, secret):\n \"\"\"\n Check if the corrected source code is still usable\n \"\"\"\n checks = [\n {\n 'params': ['odtokjupfpenmtyo'],\n 'response': 'here is the encrypted text',\n 'message': 'Encryption API is broken. Expecting to find: \"here is the encrypted text\" in the response\\n\\n'\n 'Your code output: \\n\\n{}',\n },\n {\n 'params': [''],\n 'response': 'here is the encrypted text',\n 'message': 'Encryption API is broken. Without user input, return example found in secret.\\n '\n 'Expecting to find: \"here is the encrypted text\" in the response\\n\\n'\n 'Your code output: \\n\\n{}',\n },\n {\n 'params': ['tooshort'],\n 'response': 'encryption problem!',\n 'message': 'Encryption API is broken. Expecting : \"Encryption problem!\"\\n\\n'\n 'Your code output: \\n\\n{}',\n },\n ]\n\n for _check in checks:\n\n out, return_code = run_cmd(correction_file, _check['params'])\n if return_code != 0:\n print(\"Invalid execution : {}\".format(out))\n sys.exit(1)\n if _check['response'] not in out.lower():\n print(_check['message'].format(out))\n return False\n\n return True\n\n\ndef main():\n secret = sys.argv[1]\n correction_file = sys.argv[2]\n return_code = 0 if check(correction_file, secret) else 2\n sys.exit(return_code)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"challs/data_exposure.dir/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"176409729","text":"ignore = '\":;,.-+=/\\|[]{}()*^&'\n\n\ndef word_count(s):\n # Your code here\n result = {}\n filtered_text = [\n word.strip(ignore).lower()\n for word in s.split()\n if word.strip(ignore)\n ]\n for text in filtered_text:\n result[text] = result.get(text, 0) + 1\n\n return result\n\n\nif __name__ == \"__main__\":\n print(word_count(\"\"))\n print(word_count(\"Hello\"))\n print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n print(word_count('This is a test of the emergency broadcast network. This is only a test.'))\n","sub_path":"applications/word_count/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"111463753","text":"# The edit distance between two strings refers to the minimum number of character\n# insertions, deletions, and substitutions required to change one string to the\n# other. For example, the edit distance between “kitten” and “sitting” is three:\n# substitute the “k” for “s”, substitute the “e” for “i”, and append a “g”.\n#\n# Given two strings, compute the edit distance between them.\n\n# Edit distance between a, b with m, n being the length of those strings\n# respectively\n\n# Levenshtein distance\n\ndef dist(a, m, b, n):\n # base case: empty strings\n if m == 0:\n return n\n\n if n == 0:\n return m\n\n # if the last characters of the strings match\n cost = 0 if (a[m - 1] == b[n - 1]) else 1\n\n return min(dist(a, m - 1, b, n) + 1, # deletion\n dist(a, m, b, n - 1) + 1, # insertion\n dist(a, m - 1, b, n - 1) + cost) # substitution\n\nif __name__ == '__main__':\n a = \"kitten\"\n b = \"sitting\"\n\n print(\"The edit distance is\", dist(a, len(a), b, len(b)))\n","sub_path":"editDistance.py","file_name":"editDistance.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87859233","text":"# -*- coding=utf-8 -*-\r\n#author:wang\r\n\r\nfrom docx import Document\r\nimport openpyxl\r\n\r\nclass WordOperation:\r\n \"\"\"\r\n word操作类,可以根据不同的输入内容,生成word文件,要求至少完成三类内容的输入,标题,副标题,正文\r\n \"\"\"\r\n def __init__(self, title, subtitle, text):\r\n self.title = title\r\n self.subtitle = subtitle\r\n self.text = text\r\n def write_word(self):\r\n \"写入word并保存\"\r\n try:\r\n doc = Document()\r\n doc.add_heading(self.title)\r\n doc.add_paragraph(self.subtitle,'Subtitle')\r\n doc.add_paragraph(self.text)\r\n name = self.title + '.docx'\r\n doc.save(name)\r\n print('word文件保存完成!')\r\n except Exception as f:\r\n print(f)\r\n \r\nclass ExcelOperation:\r\n '''\r\n 根据第一列的数据,按年进行拆分,放到新的工作表,例:2015年数据,表名为2015\r\n 按年拆分后的数据,在数据最后一行,添加平均价格\r\n from_file_name:数据文件名\r\n sheet_name:数据表名\r\n go_file_name:数据新建表名\r\n '''\r\n def __init__(self, from_file_name, sheet_name, go_file_name):\r\n self.wb = openpyxl.load_workbook(from_file_name)\r\n self.sh1 = self.wb[sheet_name]\r\n self.go_name = go_file_name\r\n\r\n def run(self):\r\n \"完成逻辑控制\"\r\n l = []\r\n for rows in self.sh1.rows:\r\n if rows[0].coordinate != 'A1':\r\n l.append(rows[0].value[:4])\r\n l = list(set(l))\r\n\r\n for x in l:\r\n self.write_data(x)\r\n\r\n self.wb.save(self.go_name)\r\n print('excel保存完成')\r\n\r\n\r\n def write_data(self, sheet_name):\r\n \"\"\"\r\n 写入表格\r\n sheet_name :数据保存的表名\r\n \"\"\"\r\n index = 2\r\n self.wb.create_sheet(sheet_name)\r\n sh2 = self.wb[sheet_name]\r\n sh2['A1'] = self.sh1['A1'].value\r\n sh2['B1'] = self.sh1['B1'].value\r\n for rows in self.sh1.rows:\r\n if rows[0].coordinate != 'A1' and rows[0].value[:4] == sheet_name:\r\n sh2['A' + str(index)] = rows[0].value\r\n sh2['B' + str(index)] = rows[1].value\r\n index += 1\r\n num = sh2.max_row\r\n # 求平均值\r\n l=[]\r\n for row in sh2.rows:\r\n if row[0].coordinate != 'A1':\r\n l.append(int(row[1].value))\r\n average_num = sum(l)/(num-1)\r\n \r\n sh2.cell(row=num+1, column=2).value = average_num\r\n sh2.cell(row=num+1, column=1).value = '平均分'\r\n\r\nclass OfficeOperation:\r\n \"选择需要的操作类,逻辑选择\"\r\n def operation(self):\r\n while True:\r\n print('请选择需要进行的操作')\r\n menu = {\r\n '1':'操作word',\r\n '2':'操作excel', \r\n '0':'quit'\r\n }\r\n for k,v in menu.items():\r\n print(k, v)\r\n try:\r\n choose = input('请输入需要选择的操作编号:') \r\n if choose == '0':\r\n print('退出!' )\r\n break\r\n elif choose == '1':\r\n WO = WordOperation('wangshuai', 'python', 'text')\r\n WO.write_word()\r\n elif choose == '2':\r\n EO = ExcelOperation('btc.xlsx', 'btc', 'btc-1.xlsx')\r\n EO.run()\r\n except Exception as f:\r\n print(\"输入有误请重新输入\")\r\n\r\ndef main():\r\n while True:\r\n print('请选择需要进行的操作')\r\n menu = {\r\n '1':'操作word',\r\n '2':'操作excel', \r\n '0':'quit'\r\n }\r\n for k,v in menu.items():\r\n print(k, v)\r\n try:\r\n choose = input('请输入需要选择的操作编号:') \r\n if choose == '0':\r\n print('退出!' )\r\n break\r\n elif choose == '1':\r\n WO = WordOperation('wangshuai', 'python', 'text')\r\n WO.write_word()\r\n elif choose == '2':\r\n EO = ExcelOperation('btc.xlsx', 'btc', 'btc-1.xlsx')\r\n EO.run()\r\n except Exception as f:\r\n print(\"输入有误请重新输入\")\r\n \r\nif __name__ == \"__main__\":\r\n main()","sub_path":"moude3/数据处理系统/office_process.py","file_name":"office_process.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"62657030","text":"def maxrem(num):\n rem=0\n nu = max(num)\n num.remove(nu)\n\n x=max(num)\n if((x%nu)>rem):\n rem=x%nu\n return rem\n num.remove(x)\n return rem\n\nif __name__ == '__main__':\n N = int(input())\n numstr = input()\n nums = set(map(int, numstr.split()))\n r = maxrem(nums)\n print(r)\n\n\n","sub_path":"code_Chef/maximum_remaining.py","file_name":"maximum_remaining.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"46742022","text":"import psycopg2\nimport argparse\nimport sys\nimport json\n\n\"\"\"\nparser = argparse.ArgumentParser(description='parse key pairs into a dictionary')\n\n\nclass StoreDictKeyPair(argparse.Action):\n def __init__(self, option_strings, dest, nargs=None, **kwargs):\n self._nargs = nargs\n super(StoreDictKeyPair, self).__init__(option_strings, dest, nargs=nargs, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n my_dict = {}\n print(\"values: {}\".format(values))\n for kv in values:\n k, v = kv.split(\":\")\n my_dict[k] = v\n setattr(namespace, self.dest, my_dict)\n\n\nparser.add_argument(\"--kv\", dest=\"my_dict\", action=StoreDictKeyPair, nargs=\"+\", metavar=\"KEY:VAL\")\n\nargs = parser.parse_args(sys.argv[1:])\n\n\"\"\"\n\n#\n# parser = argparse.ArgumentParser()\n# parser.add_argument('-i', '--input', help=\"JSON file to be processed\")\n# arguments = parser.parse_args()\n# inp = \"\"\n# if arguments.input:\n# # print(arguments.input, type(arguments.input))\n# inp = arguments.input\n# print(inp,type(inp))\n# else:\n# print(\"usage: program -i \")\n# sys.exit(-1)\n# data_info = {}\n# data = json.loads(inp)\n# print(data)\n# data = inp.replace(\"'{\", '')\n# data_0 = data.replace(\"}'\", '')\n# dict_data = data_0.split(\",\")\n# for kv in dict_data:\n# k, v = kv.split(\":\")\n# data_info[k] = v\n# print(data_info, type(data_info))\n# record_to_insert = (data_info.get(\"user\"), data_info.get(\"state\"), data_info.get(\"status\"))\n# print(record_to_insert, type(record_to_insert))\n\"\"\"\nconnection = None\n\ntry:\n connection = psycopg2.connect(\"dbname='gtaproject' host='10.10.1.10' user='gtauser' password='password'\")\n cur = connection.cursor()\n postgres_insert_query = \"INSERT INTO authentication_executionstatus (user_id, key, value) VALUES (%s,%s,%s)\"\n # record_to_insert = (2, \"status\", \"sucess\")\n record_to_insert = (data_info.get(\"user_id\"), data_info.get(\"state\"), data_info.get(\"status\"))\n cur.execute(postgres_insert_query, record_to_insert)\n connection.commit()\n\nexcept psycopg2.DatabaseError as err:\n print(f\"Error in DB inserting: {err}\")\n sys.exit(1)\nfinally:\n if connection:\n connection.close()\n\"\"\"\n\n# python db_status.py -i '{\"user\": srikanththalla, \"state\":\"GIT\", \"status\":\"Success\"}'\n\n\nimport psycopg2\nimport argparse\nimport sys\n# import pdb\nimport json\n\n\"\"\"\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', '--input', help=\"JSON file to be processed\")\narguments = parser.parse_args()\ninp = \"\"\nif arguments.input:\n inp = arguments.input\n\nelse:\n print(\"usage: program -i \")\n sys.exit(-1)\n#pdb.set_trace()\n\ndata_info = {}\nd=json.loads(inp)\n#print(d)\ndata_info.update(d)\"\"\"\n\n\n# print(data_info)\n\ndef write_db(ds, **kwargs):\n connection = None\n\n try:\n connection = psycopg2.connect(\"dbname='gtaproject1' host='10.10.1.10' user='gtauser' password='password'\")\n cur = connection.cursor()\n user_id = kwargs['dag_run'].conf['user_id']\n state_git = kwargs['dag_run'].conf['state_git']\n status_git = kwargs['dag_run'].conf['status_git']\n # pdb.set_trace()\n insert_query = \"\"\" INSERT INTO authentication_executionstatus (user_id, key, value) VALUES (%s,%s,%s)\"\"\"\n record_to_insert = (user_id, state_git, status_git)\n cur.execute(insert_query, record_to_insert)\n connection.commit()\n except psycopg2.DatabaseError as err:\n print(\"Error in DB inserting:\", err)\n sys.exit(1)\n\n finally:\n if connection:\n connection.close()\n\n\ngit_status = PythonOperator(task_id='git_status', provide_context=True, python_callable=write_db, dag=dag)\n","sub_path":"Core_python/_24_DB_Connection_Python/GTA_db_status.py","file_name":"GTA_db_status.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"71923664","text":"# Import the libraries\nimport math\nimport pandas_datareader as web\nimport time\nfrom datetime import datetime, timedelta, date\nfrom dateutil.relativedelta import relativedelta\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, LSTM\nfrom db_connect import lstm_usd_res, lstm_usd_remove\nfrom helper_connect import DBConnect # 디비 연결\n\n\ndef getDay(year, month, date_v):\n day = ['Mon', 'Tue', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']\n aday = date(year, month, date_v)\n bday = aday.weekday()\n return day[bday]\n\n\ndef lstm_usd(conn, df):\n data = df\n dataset = data.values\n training_data_len = math.ceil(len(dataset) * .8)\n\n # Scale the data\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_data = scaler.fit_transform(dataset)\n\n train_data = scaled_data[0:training_data_len, :]\n x_train = []\n y_train = []\n for i in range(60, len(train_data)):\n x_train.append(train_data[i-60:i, 0])\n y_train.append(train_data[i, 0])\n x_train, y_train = np.array(x_train), np.array(y_train)\n\n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\n\n model = Sequential()\n model.add(LSTM(units=50, return_sequences=True,\n input_shape=(x_train.shape[1], 1)))\n model.add(LSTM(units=50, return_sequences=False))\n model.add(Dense(units=25))\n model.add(Dense(units=1))\n\n model.compile(optimizer='adam', loss='mean_squared_error')\n\n model.fit(x_train, y_train, batch_size=1, epochs=1)\n\n test_data = scaled_data[training_data_len - 60:, :]\n x_test = []\n y_test = dataset[training_data_len:, :]\n for i in range(60, len(test_data)):\n x_test.append(test_data[i-60:i, 0])\n x_test = np.array(x_test)\n\n x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))\n\n predictions = model.predict(x_test)\n predictions = scaler.inverse_transform(predictions)\n\n rmse = np.sqrt(np.mean((predictions - y_test)**2))\n print(rmse)\n\n train = data[:training_data_len]\n valid = data[training_data_len:]\n valid['Predictions'] = predictions\n\n df = web.DataReader('KRW=X', data_source='yahoo', start='2003-01-01')\n new_df = df.filter(['Close'])\n last_60_days = new_df[-60:].values\n\n last_60_days_scaled = scaler.transform(last_60_days)\n\n X_test = []\n X_test.append(last_60_days_scaled)\n X_test = np.array(X_test)\n X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\n print(X_test.shape)\n pred_price = model.predict(X_test)\n pred_price = scaler.inverse_transform(pred_price)\n print(pred_price[0][0], \"함수한 result\")\n return pred_price[0][0]\n\n\nif __name__ == \"__main__\":\n conn = DBConnect()\n df = web.DataReader('KRW=X', data_source='yahoo', start='2003-01-01')\n data = df.filter(['Close'])\n for i in range(1, 31):\n today = date.today() + relativedelta(days=+i)\n if getDay(today.year, today.month, today.day) == 'Sat' or getDay(today.year, today.month, today.day) == 'Sun':\n continue\n else:\n result = lstm_usd(conn, data)\n data = data.reset_index()\n data = data.append({\"Date\": pd.Timestamp(\n today), \"Close\": float(result)}, ignore_index=True)\n data = data.set_index(\"Date\")\n lstm_usd_res(conn, today, result)\n lstm_usd_remove(conn)\n","sub_path":"Crawling/lstm_USD.py","file_name":"lstm_USD.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"138646626","text":"import qnt.data as qndata\nimport qnt.stats as qnstats\nimport qnt.xr_talib as qnxrtalib\nimport qnt.forward_looking as qnfl\nimport time\n\ndata = qndata.load_data(min_date=\"2010-01-01\", max_date=None, forward_order=True, dims=(\"time\", \"field\", \"asset\"))\n\n\ndef strategy(data):\n wma = qnxrtalib.WMA(data.sel(field='close'), 290)\n sroc = qnxrtalib.ROCP(wma, 35)\n\n is_liquid = data.sel(field=\"is_liquid\")\n weights = is_liquid.where(sroc > 0.0125)\n\n weights = weights / weights.sum(\"asset\", skipna=True)\n return weights.fillna(0.0)\n\n\nt0 = time.time()\noutput = qnfl.calc_output_and_check_forward_looking(data, strategy)\nt1 = time.time()\nprint(t1 - t0)\nstat = qnstats.calc_stat(data, output, max_periods=252 * 3)\nt2 = time.time()\nprint(t2 - t1)\nstat2 = qnstats.calc_stat(data, output, max_periods=252 * 3, per_asset=True)\nt3 = time.time()\nprint(t3 - t2)\n\nprint(stat2.sel(field='sharpe_ratio').transpose().to_pandas())\n\nqndata.write_output(output)\n","sub_path":"example/roc_example.py","file_name":"roc_example.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"496922150","text":"from pymongo import InsertOne, MongoClient, UpdateOne\nfrom os import environ\nfrom boto3 import client\nfrom re import compile, escape, IGNORECASE, sub\nfrom flask import abort, request, session\nfrom uuid import uuid4\nfrom functools import wraps\nfrom game_night.game import Game\n\ntry:\n _game_night = MongoClient('mongodb://{}:{}@{}/{}'.format(environ['MONGODB_USER'], environ['MONGODB_PASSWORD'], environ.get('MONGODB_HOST', 'localhost'), environ['MONGODB_DATABASE'])).game_night\nexcept:\n _game_night = MongoClient().game_night\n_api_keys = _game_night.api_keys\n_gamemasters = _game_night.gamemasters\n_games = _game_night.games\n\n_s3 = client('s3', aws_access_key_id = environ['S3_KEY'], aws_secret_access_key = environ['S3_SECRET'])\n\ndef _create_filters():\n filters = {}\n max_players = request.args.get('max_players')\n if max_players:\n try:\n filters['max_players'] = int(max_players)\n except:\n filters['max_players'] = -1\n min_players = request.args.get('min_players')\n if min_players:\n try:\n filters['min_players'] = int(min_players)\n except:\n filters['min_players'] = -1\n name = request.args.get('name')\n if name:\n try:\n filters['name'] = compile(name, IGNORECASE)\n except:\n filters['name'] = compile(escape(name), IGNORECASE)\n owner = request.args.get('owner')\n if owner:\n filters['owner'] = owner\n players = request.args.get('players')\n if players:\n try:\n players = int(players)\n filters['$and'] = [{'min_players': {'$lte': players}}, {'max_players': {'$gte': players}}]\n except:\n filters['$and'] = [{'min_players': {'$lte': -1}}, {'max_players': {'$gte': -1}}]\n submitter = request.args.get('submitter')\n if submitter:\n filters['submitter'] = submitter\n return filters\n\ndef delete_game(name):\n if _games.delete_one({'name': name}).deleted_count:\n try:\n id = list(_games.find().sort([('_id', -1)]).limit(10))[-1]['_id']\n _games.update_many({'_id': {'$gte': id}}, {'$set': {'new': True}})\n except:\n pass\n _s3.delete_object(Bucket = environ['S3_BUCKET'], Key = name + '.jpg')\n return True\n return False\n\ndef game_exists(name):\n return _games.count({'name': compile(f'^{escape(name)}$', IGNORECASE)})\n\ndef generate_api_key(write = False):\n uuid = str(uuid4())\n _api_keys.insert_one({'key': uuid, 'write': write})\n return uuid\n\ndef get_count():\n return _games.count(_create_filters())\n\ndef get_game(name):\n return _games.find_one({'name': name})\n\ndef get_games():\n return _games.find(_create_filters(), {'_id': False}).sort([('sort_name', 1)])\n\ndef get_newest_games():\n filters = _create_filters()\n filters['new'] = True\n return _games.find(filters, {'_id' : False}).sort([('_id', -1)])\n\ndef get_owners(all = False):\n owners = _games.distinct('owner') if all else _games.distinct('owner', _create_filters())\n owners.sort()\n return owners\n\ndef get_players():\n try:\n return _games.aggregate([{'$group': {'_id': False, 'max': {'$max': '$max_players'}, 'min': {'$min': '$min_players'}}}]).next()\n except:\n return None\n\ndef get_random_games(sample_size):\n return _games.aggregate([{'$match': _create_filters()}, {'$sample': {'size': sample_size}}, {'$project': {'_id': False}}])\n\ndef get_submissions():\n filters = _create_filters()\n filters['submitter'] = session['userinfo']['preferred_username']\n return _games.find(filters, {'_id': False}).sort([('sort_name', 1)])\n\ndef _insert_game(game):\n requests = [InsertOne(game)]\n games = list(_games.find().sort([('_id', -1)]).limit(10))\n if len(games) == 10:\n requests.append(UpdateOne({'_id': games[-1]['_id']}, {'$unset': {'new': 1}}))\n _games.bulk_write(requests)\n\ndef is_gamemaster():\n return _gamemasters.count({'username': session['userinfo']['preferred_username']})\n\ndef _prepare_game(game):\n del game['image']\n game['new'] = True\n game['sort_name'] = sub('(A|(An)|(The)) ', '', game['name'])\n game['submitter'] = session['userinfo']['preferred_username']\n\ndef require_gamemaster(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n if is_gamemaster():\n return function(*args, **kwargs)\n abort(403)\n return wrapper\n\ndef require_read_key(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n try:\n if not _api_keys.count({'key': request.headers['Authorization'][7:]}):\n abort(403)\n except:\n abort(403)\n return function(*args, **kwargs)\n return wrapper\n\ndef require_write_key(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n try:\n if not _api_keys.find_one({'key': request.headers['Authorization'][7:]})['write']:\n abort(403)\n except:\n abort(403)\n return function(*args, **kwargs)\n return wrapper\n\ndef submit_game():\n game = Game()\n if game.validate():\n game = game.data\n _s3.upload_fileobj(game['image'], environ['S3_BUCKET'], game['name'] + '.jpg', ExtraArgs = {'ContentType': game['image'].content_type})\n _prepare_game(game)\n _insert_game(game)\n return True\n return game, next(iter(game.errors.values()))[0]","sub_path":"game_night/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"23247324","text":"import copy\nimport json\nimport logging\nimport os\n\n\nfrom . import uri, utilities\n\nlogger = logging.getLogger(__name__)\n\n\n\n\ndefault_conf = {\n \"database\": {\n \"host\": \"localhost\",\n \"port\": 3004,\n \"db\": \"visinum\"\n },\n \"girder\": {\n \"apiUrl\": \"http://localhost:8080/api/v1\",\n \"apiKey\": \"EdKeaqELS40XIrepHcXZFuLQzrMOGUJOVIeeyR5Z\",\n },\n}\n\n\ndef initialize(conf=None):\n global app_config\n app_config = {}\n load_config(default_conf)\n if conf:\n load_config(conf)\n return app_config\n\n\ndef load_config(filepath):\n global app_config\n _config = {}\n if isinstance(filepath, str) and os.path.isfile(filepath):\n try:\n with open(filepath, 'r') as conf_fh:\n _config = json.load(conf_fh)\n except Exception as err: \n logger.error(\"load_config: cannot load config file «%s», failed with error «%s».\" % (filepath, err) )\n elif isinstance(filepath, dict):\n _config = filepath\n # elif isinstance(filepath, str):\n # try:\n # _config = json.loads(filepath)\n # except Exception as err: \n # logger.error(\"load_config: cannot load config string «%s», failed with error «%s».\" % (filepath, err) )\n else:\n logger.error(\"load_config: cannot find config file «%s» in directory «%s»\" % (filepath, os.getcwd()) )\n utilities.rupdate(app_config, _config)\n return app_config\n\n\n\n\ndef get_config(*keys):\n global app_config\n if not keys:\n return False\n _datadict = copy.deepcopy(app_config)\n for _key in keys:\n _val = _datadict.get(_key, None)\n if isinstance(_val, dict):\n _datadict = _val\n else:\n break\n return _val\n\n\ndef get_db_uri(db=None, collection=None, _id=None, vn_uri=None, port=None, **kwargs):\n _db_uri = get_config(\"database\")\n _db_uri.update(kwargs)\n if db:\n _db_uri[\"db\"] = db\n if collection:\n _db_uri[\"collection\"] = collection\n if port:\n _db_uri[\"port\"] = port\n if not _id and vn_uri:\n _id = uri.string2UUID(vn_uri)\n if _id:\n _db_uri[\"_id\"] = _id\n return _db_uri\n\n\n\ndef make_req_url(*paths):\n apiUrl = app_config[\"girder\"][\"apiUrl\"] \n if apiUrl.endswith(\"/\"):\n apiUrl = apiUrl[:-1]\n return apiUrl + \"/\" + \"/\".join(paths)\n","sub_path":"visinum/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"283626713","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom knack.arguments import enum_choice_list\nfrom .const import (SERVICE_ENDPOINT_AUTHORIZATION_PERSONAL_ACCESS_TOKEN,\n SERVICE_ENDPOINT_TYPE_GITHUB,\n SERVICE_ENDPOINT_AUTHORIZATION_SERVICE_PRINCIPAL,\n SERVICE_ENDPOINT_TYPE_AZURE_RM)\n\n\n# CUSTOM CHOICE LISTS\n_YES_NO_SWITCH_VALUES = ['yes', 'no']\n_SOURCE_CONTROL_VALUES = ['git', 'tfvc']\n_PROJECT_VISIBILITY_VALUES = ['private', 'public']\n_STATE_VALUES = ['invalid', 'unchanged', 'all', 'new', 'wellformed', 'deleting', 'createpending']\n_SERVICE_ENDPOINT_TYPE = [SERVICE_ENDPOINT_TYPE_GITHUB, SERVICE_ENDPOINT_TYPE_AZURE_RM]\n_SERVICE_ENDPOINT_AUTHORIZATION_SCHEME = [SERVICE_ENDPOINT_AUTHORIZATION_PERSONAL_ACCESS_TOKEN,\n SERVICE_ENDPOINT_AUTHORIZATION_SERVICE_PRINCIPAL]\n\n\ndef load_global_args(context):\n from azure.cli.core.commands.parameters import get_enum_type\n context.argument('organization', options_list=('--organization', '--org'),\n help='Azure Devops organization URL. Example: https://dev.azure.com/MyOrganizationName/')\n context.argument('detect', arg_type=get_enum_type(['on', 'off']),\n help='Automatically detect organization. Default is \"on\".')\n context.argument('project', options_list=('--project', '-p'), help='Name or ID of the project.')\n\n\ndef load_team_arguments(self, _):\n with self.argument_context('devops configure') as context:\n context.argument('defaults', options_list=('--defaults', '-d'), nargs='*')\n with self.argument_context('devops project') as context:\n context.argument('process', options_list=('--process', '-p'))\n context.argument('source_control', options_list=('--source-control', '-s'),\n **enum_choice_list(_SOURCE_CONTROL_VALUES))\n context.argument('description', options_list=('--description', '-d'))\n context.argument('state', **enum_choice_list(_STATE_VALUES))\n context.argument('visibility', **enum_choice_list(_PROJECT_VISIBILITY_VALUES))\n with self.argument_context('devops service-endpoint create') as context:\n context.argument('service_endpoint_type', **enum_choice_list(_SERVICE_ENDPOINT_TYPE))\n context.argument('authorization_scheme', **enum_choice_list(_SERVICE_ENDPOINT_AUTHORIZATION_SCHEME))\n with self.argument_context('devops project delete') as context:\n context.argument('yes', options_list=['--yes', '-y'], action='store_true',\n help='Do not prompt for confirmation.')\n with self.argument_context('devops configure') as context:\n context.argument('use_git_aliases', **enum_choice_list(_YES_NO_SWITCH_VALUES))\n context.argument('list_config', options_list=('--list', '-l'))\n\n with self.argument_context('devops') as context:\n load_global_args(context)\n\n with self.argument_context('repos') as context:\n load_global_args(context)\n\n with self.argument_context('artifacts') as context:\n load_global_args(context)\n\n with self.argument_context('boards') as context:\n load_global_args(context)\n\n with self.argument_context('pipelines') as context:\n load_global_args(context)\n","sub_path":"azure-devops/azext_devops/dev/team/arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"162445679","text":"\nimport logging\nimport queue\n\nfrom bluepy.btle import Peripheral\nfrom pybluepedal.common.base import BaseDelegate, BaseService\nfrom pybluepedal.common.byte_ops import byte_array_to_int\n\nlogger = logging.getLogger(\"CSCService\")\n\n\nclass CSCService(BaseService):\n UUID = \"00001816\"\n CHARACTERISTIC_MEASUREMENT = \"00002a5b\"\n CHARACTERISTIC_FEATURE = \"00002a5c\"\n CHARACTERISTIC_SENSOR_LOCATION = \"00002a5d\"\n\n ENABLE_NOTIFICATION_VALUE = (0x01, 0x00)\n\n FEATURE_CRANK_DATA = \"FEATURE_CRANK_DATA\"\n FEATURE_WHEEL_DATA = \"FEATURE_CRANK_DATA\"\n FEATURE_MULTIPLE_SENSOR_LOCATIONS = \"FEATURE_MULTIPLE_SENSOR_LOCATIONS\"\n\n CSC_FEATURES_MASK = {\n 0b00000001: FEATURE_CRANK_DATA,\n 0b00000010: FEATURE_WHEEL_DATA,\n 0b00000100: FEATURE_MULTIPLE_SENSOR_LOCATIONS,\n }\n\n CSC_FEATURES = {v: k for k, v in CSC_FEATURES_MASK.items()}\n\n def __init__(self, peripheral: Peripheral):\n super().__init__(peripheral, CSCService.UUID)\n\n def supports_feature(self, name: str) -> bool:\n \"\"\"Returns true if the feature is supported\"\"\"\n\n characteristics = self._service.getCharacteristics(\n forUUID=CSCService.CHARACTERISTIC_FEATURE)\n\n if len(characteristics) < 1:\n return False\n\n characteristic = characteristics[0]\n val = byte_array_to_int(characteristic.read())\n\n return CSCService.CSC_FEATURES[name] & val > 0\n\n def start_notifications(self, delegate: BaseDelegate):\n \"\"\"Starts the notifications for the characteristic measurement\"\"\"\n\n self._peripheral.setDelegate(delegate)\n\n characteristic = self._service.getCharacteristics(\n forUUID=CSCService.CHARACTERISTIC_MEASUREMENT)[0]\n\n resp = self._peripheral.writeCharacteristic(\n characteristic.getHandle() + 1, b\"\\x01\\x00\", True)\n\n logger.debug(f\"notification started: {resp}\")\n\n\nclass CSCDelegate(BaseDelegate):\n def __init__(self, producer_queue: queue.Queue):\n super().__init__(producer_queue)\n\n self._producer_queue = producer_queue\n\n def handleNotification(self, cHandle, data):\n logger.debug(f\"handing notification {cHandle} {data}\")\n\n values = bytearray(data)\n\n cumulative_wheel_revolutions = byte_array_to_int(bytes(values[1:5]))\n last_wheel_event_time = byte_array_to_int(bytes(values[5:7]))\n cumulative_crank_revolutions = byte_array_to_int(bytes(values[7:9]))\n last_crank_event_time = byte_array_to_int(bytes(values[9:]))\n\n data = {\n \"type\": \"CSC\",\n \"handle\": cHandle,\n \"cumulative_wheel_revolutions\": cumulative_wheel_revolutions,\n \"last_wheel_event_time\": last_wheel_event_time,\n \"cumulative_crank_revolutions\": cumulative_crank_revolutions,\n \"last_crank_event_time\": last_crank_event_time,\n }\n\n self._producer_queue.put(data)\n logger.debug(f\"added to queue {data}\")\n","sub_path":"pybluepedal/services/cycling_speed_cadence.py","file_name":"cycling_speed_cadence.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"181226239","text":"# -*- coding:utf-8 -*-\nimport pagerank\nimport OutputData\nfrom numpy import *\nimport InputData\n\n\nmatrixfile = 'gather_new.csv'\nOffsetRow = 1\nOffsetCol = 1\npi = pagerank.pagerankmain(matrixfile, OffsetRow, OffsetCol) #计算网站的pagerank值\n\nurlfile = 'id_url.csv'\nInputUrl = InputData.InputUrl()\nUrlList = InputUrl.ReadUrl(urlfile)\nIdList = InputUrl.ReadId(urlfile)\n\nn1 = len(UrlList)\nn2 = pi.shape[1]\nif n1 == n2:\n for i in range(n2):\n data = {\"_id\": int(IdList[i]), \"url\": UrlList[i], \"pr\": pi[0][i]}\n outputer = OutputData.Outputpi()\n outputer.OutputMongo(data)\n","sub_path":"NjuPagerank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"578274374","text":"# -*- coding: utf-8 -*-\n\nfrom typing import Union, AnyStr\nfrom decimal import Decimal, InvalidOperation\n\n\ndef num_to_words(n: Union[int, float, AnyStr, Decimal]) -> str:\n \"\"\"Convert a number to Vietnamese words.\n\n Convert a number to its Vietnamese formal spoken form. It supports\n long numbers (both integers and decimals).\n\n Parameters\n ----------\n n : int, float, Decimal or str\n The number to be converted. If `n` is a str, it will be converted\n to a Decimal object.\n\n Returns\n -------\n str\n The spoken form of the number\n \n Raises\n ------\n TypeError\n If the input's type is neither int, float, str nor Decimal\n\n ValueError\n If the input string does not represent a valid number\n \"\"\"\n \n digits = ('không', 'một', 'hai', 'ba', 'bốn', 'năm', 'sáu', 'bảy', 'tám', 'chín', 'mười')\n levels = ('đơn vị', 'nghìn', 'triệu')\n \n def per_digit(n):\n return [digits[int(s)] for s in str(n)]\n \n def per_thousand(n, linh=False):\n tarr = []\n if 100 <= n <= 999:\n n1, n2 = divmod(n, 100)\n tarr.append(digits[n1])\n tarr.append('trăm')\n if 1 <= n2 <= 9:\n tarr.append('linh')\n n = n2\n if 1 <= n <= 9:\n if linh:\n tarr.append('linh')\n tarr.append(digits[n])\n elif n <= 99 and n != 0:\n n1, n2 = divmod(n, 10)\n ele = digits[n2]\n if n1 == 1:\n tarr.append('mười')\n else:\n tarr.append(digits[n1])\n tarr.append('mươi')\n if n2 == 1:\n ele = 'mốt'\n elif n2 == 4:\n ele = 'tư'\n if n2 == 5:\n ele = 'lăm'\n if ele != 'không': tarr.append(ele)\n return tarr\n \n tarr = []\n if isinstance(n, str):\n try:\n n = Decimal(n)\n except InvalidOperation as e:\n raise ValueError(f\"'{n}' is not a valid number.\")\n elif not isinstance(n, (int, float, Decimal)):\n raise TypeError('The first parameter must be an integer or a float.')\n if int(n) == 0:\n tarr.append('không')\n elif int(n) < 0:\n tarr.append('âm')\n n = abs(n)\n ns = str(n)\n if '.' in ns:\n is_decimal = True\n intn, decn = ns.split('.')\n ns = intn\n else:\n is_decimal = False\n \n length = len(ns)\n splited = [ns[0:len(ns) % 3]] + [ns[i:i+3] for i in range(len(ns) % 3, len(ns), 3)]\n splited = list(filter(None, splited))\n \n for part in splited:\n pn = int(part)\n if pn != 0:\n if part[0] == '0' and 1 <= pn <= 99:\n tarr.append('không trăm')\n linh = True\n else:\n linh = False\n tarr.extend(per_thousand(pn, linh))\n bilis, thous = divmod((length - 1) // 3, 3)\n if thous > 0:\n tarr.append(levels[thous])\n tarr.extend(['tỉ'] * bilis)\n length -= 3\n \n if is_decimal:\n tarr.append('phẩy')\n if decn != '0':\n decn = decn.rstrip('0')\n if 2 <= len(decn) <= 3:\n dec_int = int(decn)\n if decn[0] == '0' and 1 <= dec_int <= 99:\n tarr.append('không trăm')\n tarr.extend(per_thousand(dec_int))\n else:\n tarr.extend(per_digit(int(decn)))\n tarr = list(filter(None, tarr))\n return ' '. join(tarr)","sub_path":"ctnx/number.py","file_name":"number.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"381460184","text":"import subprocess\nimport platform\nimport errno\nimport os\nfrom config import config\n\nimport win\n\nclass net():\n \n def __init__(self,card='wlan'):\n self.card = card\n self.pc_type = self._get_platform()\n self.current = self._settings_dir()\n self.config = config()\n\n arg = {'card': self.card,'pc_type':self.pc_type,\n 'direc': self.current,'config':self.config}\n \n systems = {'windows':win.windows(arg)}\n self.system = systems[self.pc_type]\n \n def _get_platform(self):\n try:\n plat = platform.system().lower()\n except Exception as e:\n plat = 0\n print(e)\n return plat\n \n def _settings_dir(self):\n direc = os.path.join(os.getcwd(),\"config\",\"profiles\")\n try:\n os.makedirs(direc) \n except OSError as e:\n if e.errno != errno.EEXIST:\n direc = \".\"\n \n return direc\n \n def sys(self):\n return self.system\n\n \ndef test_scan():\n work = net().sys()\n items = work.scan()\n \n for i in items:\n print('%s:%s------\\n'%(i.ssid,i.authentication))\n #if len(i.bssids) >=1:\n # print(i.bssids[0].id)\ndef test_disconnect():\n work = net().sys().disconnect()\n#test_scan()\n \n#test export\nsetting= {'name':'RGGCN','password':'123456'}\nprint(net().sys()._create_profile_xml(setting))\n\n#print([item.authentication for item in net().sys().scan()])\n\ninput(\"Press any key to exit.\")\n\n# connect(SSID,password)\n\n\n","sub_path":"wifi.py","file_name":"wifi.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"231844579","text":"\"\"\"\n==========================================================================\nJson implementation of :py:class:`~.base.mapper.MapperABC` abstract class\n==========================================================================\n\"\"\"\n\nfrom base.mapper import MapperABC\nimport json\n\n\nclass JsonMapper(MapperABC):\n \"\"\"\n Implementation of :py:class:`~.base.mapper.MapperABC` abstract class.\n For more info see MapperABC\n \"\"\"\n\n def load(self, data) -> 'JsonMapper':\n if data is None:\n self.data = dict()\n elif isinstance(data, str):\n self.data = json.loads(data)\n elif isinstance(data, dict):\n self.data = data\n else:\n raise TypeError(\n 'Supplied data must be json string or dictionary.'\n ' data you supplied was of %s type' % (\n str(type(data)),\n )\n )\n return self\n\n def get_item(self, key: str) -> any:\n return self.data.get(key)\n\n def dump(self):\n return json.dumps(self.dump_data, ensure_ascii=False)\n\n def set_item(self, key: str, value: any) -> None:\n self.dump_data[key] = value\n","sub_path":"jsonparser/jsonmapper.py","file_name":"jsonmapper.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"614127669","text":"# Libraries\nimport argparse\nimport sets\nimport numpy as np\nimport pandas as pd\nimport os, pickle\nimport copy\nimport sklearn.utils as sk\n\n# interact with code with `kill -SIGUSR2 `\nimport code\nimport signal\nsignal.signal(signal.SIGUSR2, lambda sig, frame: code.interact(local=dict(globals(), **locals())))\n\n# Declare the variables to be\n# read from command line\n\n# initial learning rate\nlr = 0\n# momentum\nmomentum = 0\n# number of hidden layers\nnum_hidden = 0\n# sizes of hidden layers\nsizes = [784]\n# activation function\nactivation = 'tanh'\n# loss function\nloss = 'ce'\n# optimization algorithm\nopt = 'gd'\n# batch size\nbatch_size = 20\n# annealing\nanneal = False\n# model save directory\nsave_dir = '/pa1'\n# save all thetas?\nsave_all_thetas = True\n# log save directory\nexpt_dir = '/pa1/exp'\n# path to train dataset\ntrain_path = 'train.csv'\n# path to test dataset\ntest_path = 'test.csv'\n# path to validation dataset\nval_path = ''\n# log frequency (steps)\nlog_frequency = 100\n# max epochs\nmax_epochs = 50\n# anneal type\nanneal_type = \"val\"\n# anneal lr threshold\nanneal_threshold = 4e-8\n# regularization parameter\nregularization = 1e-4\n\n# neural network parameters\nW_layer = [-1]\nB_layer = [-1]\n# neuron inputs and activations\nA_layer = [-1]\nH_layer = [-1]\n# Total number of layers\nL = 0\n# list containing all parameters\ntheta = []\n\ndef main():\n\tglobal theta\n\t# read training data into a pandas dataframe\n\tt_df = pd.read_csv(train_path)\n\tt_df.set_index('id', inplace=True)\n\tnum_features = len(t_df.columns)-1\n\t# import pdb\n\t# pdb.set_trace()\n\t# normalize between 0 and 1 - not required?\n\tt_df.iloc[:, 0:num_features] /= 255.0\n\tX = t_df.iloc[:, 0:num_features].as_matrix()\n\tX = X.reshape([X.shape[0],X.shape[1],1])\n\tY_temp = t_df.iloc[:, num_features].as_matrix()\n\tY = np.zeros([Y_temp.shape[0],10,1])\n\tfor i in range(Y_temp.shape[0]):\n\t\tY[i] = get_output_vector(Y_temp[i])\n\n\n\tglobal X_val, Y_val\n\t# read validation data into a pandas dataframe\n\tt_df_val = pd.read_csv(val_path)\n\tt_df_val.set_index('id', inplace=True)\n\t# normalize between 0 and 1 - not required?\n\tt_df_val.iloc[:, 0:num_features] /= 255.0\n\tX_val = t_df_val.iloc[:, 0:num_features].as_matrix()\n\tX_val = X_val.reshape([X_val.shape[0],X_val.shape[1],1])\n\tY_val_temp = t_df_val.iloc[:, num_features].as_matrix()\n\tY_val = np.zeros([Y_val_temp.shape[0],10,1])\n\tfor i in range(Y_val_temp.shape[0]):\n\t\tY_val[i] = get_output_vector(Y_val_temp[i])\n\n\n\t# # read test data into a pandas dataframe\n\t# t_df_test = pd.read_csv(test_path)\n\t# t_df_test.set_index('id', inplace=True)\n\t# # normalize between 0 and 1\n\t# t_df_test.iloc[:, 0:num_features] /= 255.0\n\t# X_test = t_df_test.iloc[:, 0:num_features].as_matrix()\n\t# X_test = X_test.reshape([X_test.shape[0],X_test.shape[1],1])\n\n\t\n\t# print(X.shape)\n\t# print(Y.shape)\n\n\t# neural network parameters\n\t# Use Xavier Glorot init for Weights - zero mean, suitable variance?\n\tfor i in xrange(1, L+1):\n\t\troot = np.sqrt(6/(sizes[i] + sizes[i-1]))\n\t\tif(opt == 'adam'):\n\t\t\tW_layer.append(np.random.uniform(-1*root, root,[sizes[i],sizes[i-1]]))\n\t\telse:\n\t\t\tW_layer.append(np.random.randn(sizes[i],sizes[i-1]))\n\t# Initialize bias as zeros\n\tfor i in xrange(1, L+1):\n\t\t# B_layer.append(np.zeros([sizes[i],1]))\n\t\t# B_layer.append(np.random.uniform(0,1,[sizes[i],1]))\n\t\tB_layer.append(np.random.randn(sizes[i],1))\n\t# neuron inputs and activations\n\tfor i in xrange(1, L+1):\n\t\tA_layer.append(np.zeros([sizes[i],1]))\n\tfor i in xrange(1, L):\n\t\tH_layer.append(np.zeros([sizes[i],1]))\n\t\n\ttheta = init_theta()\n\t\n\tglobal log_train_file, log_val_file, theta_pickle_file\n\tlog_train_file = open(os.path.join(expt_dir, \"log_train.txt\"), \"w\", 1)\n\tlog_val_file = open(os.path.join(expt_dir, \"log_val.txt\"), \"w\", 1)\n\ttheta_pickle_file = open(os.path.join(save_dir, \"theta.pickle\"), \"w\")\n\n\tif(opt == 'gd'):\n\t\tdo_mini_batch_gradient_descent(X, Y)\n\telif(opt == 'momentum'):\n\t\tmomentum_gradient_descent(X, Y)\n\telif(opt == 'nag'):\n\t\tnag_gradient_descent(X, Y)\n\telse:\n\t\tadam_gradient_descent(X, Y)\n\n\t# pickle.dump(theta, theta_pickle_file)\n\tlog_train_file.close()\n\tlog_val_file.close()\n\ttheta_pickle_file.close()\n\n\ndef make_dir(dir_path):\n\tif not os.path.exists(dir_path):\n\t\ttry:\n\t\t\tos.makedirs(dir_path)\n\t\texcept OSError as exc: # Guard against race condition\n\t\t\tif exc.errno != errno.EEXIST:\n\t\t\t\traise\n\nif __name__ == \"__main__\":\n\t# Initialize the parser\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--lr\", help=\"learning rate\")\n\tparser.add_argument(\"--momentum\", help=\"momentum\")\n\tparser.add_argument(\"--num_hidden\", help=\"num_hidden\")\n\tparser.add_argument(\"--sizes\", help=\"sizes of hidden layers\")\n\tparser.add_argument(\"--activation\", help=\"activation function\")\n\tparser.add_argument(\"--loss\", help=\"loss function\")\n\tparser.add_argument(\"--opt\", help=\"optimization algorithm\")\n\tparser.add_argument(\"--batch_size\", help=\"batch_size\")\n\tparser.add_argument(\"--anneal\", help=\"anneal\")\n\tparser.add_argument(\"--save_dir\", help=\"save directory\")\n\tparser.add_argument(\"--expt_dir\", help=\"log directory\")\n\tparser.add_argument(\"--train\", help=\"train path\")\n\tparser.add_argument(\"--test\", help=\"test path\")\n\tparser.add_argument(\"--val\", help=\"validation path\")\n\tparser.add_argument('--save_all_thetas', dest='save_all_thetas', action='store_true')\n\tparser.add_argument('--no-save_all_thetas', dest='save_all_thetas', action='store_false')\n\tparser.set_defaults(save_all_thetas=True)\n\t\n\targs = parser.parse_args()\n\tsave_all_thetas = args.save_all_thetas\n\n\t# Process the command line arguments\n\tif(args.lr):\n\t\tlr = float(args.lr)\n\t\tif(args.momentum):\n\t\t\tmomentum = float(args.momentum)\n\tif(args.num_hidden):\n\t\tnum_hidden = int(args.num_hidden)\n\tif(args.sizes):\n\t\ttmp = args.sizes.split(',')\n\t\tif(len(tmp)!=num_hidden):\n\t\t\tprint('argument mismatch!')\n\t\t\texit()\n\t\tfor x in tmp:\n\t\t\tsizes.append(int(x))\n\t\tsizes.append(10)\n\tif(args.activation):\n\t\tactivation = str(args.activation)\n\t\toptions = sets.Set(['sigmoid','tanh','relu','elu'])\n\t\tif(activation not in options):\n\t\t\tprint('Invalid activation function')\n\t\t\texit()\n\tif(args.loss):\n\t\tloss = str(args.loss)\n\t\toptions = sets.Set(['sq','ce'])\n\t\tif(loss not in options):\n\t\t\tprint('Invalid loss function')\n\t\t\texit()\n\tif(args.opt):\n\t\topt = str(args.opt)\n\t\toptions = sets.Set(['gd', 'momentum', 'nag', 'adam'])\n\t\tif(opt not in options):\n\t\t\tprint('Invalid loss function')\n\t\t\texit()\n\tif(args.batch_size):\n\t\tbatch_size = int(args.batch_size)\n\t\tif(batch_size != 1 and batch_size%5 != 0):\n\t\t\tprint('Invalid batch size')\n\t\t\texit()\n\tif(args.anneal):\n\t\tif(args.anneal == 'true' or args.anneal == 'True'):\n\t\t\tanneal = True\n\tif(args.save_dir):\n\t\tsave_dir = str(args.save_dir)\n\t\tmake_dir(save_dir)\n\tif(args.expt_dir):\n\t\texpt_dir = str(args.expt_dir)\n\t\tmake_dir(expt_dir)\n\tif(args.train):\n\t\ttrain_path = str(args.train)\n\tif(args.test):\n\t\ttest_path = str(args.test)\n\tif (args.val):\n\t\tval_path = str(args.val)\n# Set the total number of layers\nL = num_hidden + 1\n\ndef print_params():\n\tprint(\"lr: {}\".format(lr))\n\tprint(\"momentum: {}\".format(momentum))\n\tprint(\"num_hidden: {}\".format(num_hidden))\n\tprint(\"sizes: {}\".format(sizes))\n\tprint(\"activation: {}\".format(activation))\n\tprint(\"loss: {}\".format(loss))\n\tprint(\"opt: {}\".format(opt))\n\tprint(\"batch_size: {}\".format(batch_size))\n\tprint(\"anneal: {}\".format(anneal))\n\tprint(\"save_dir: {}\".format(save_dir))\n\tprint(\"expt_dir: {}\".format(expt_dir))\n\tprint(\"train_path: {}\".format(train_path))\n\tprint(\"test_path: {}\".format(test_path))\n\tprint(\"val_path: {}\".format(val_path))\n\tprint(\"save_all_thetas: {}\".format(save_all_thetas))\n\ndef cross_entropy_loss(y_hat, y):\n\treturn -1 * np.log2(y_hat[np.argmax(y)])[0]\n\ndef square_loss(y_hat, y):\n\treturn np.sum(np.square(y - y_hat))\n\ndef get_loss(y_hat, y):\n\tif(loss == 'ce'):\n\t\treturn cross_entropy_loss(y_hat, y)\n\telse:\n\t\treturn square_loss(y_hat, y)\n\ndef get_output_vector(n):\n\tout = np.zeros([10,1])\n\tout[n][0] = 1\n\treturn out\n\ndef sigmoid(x): \n\t# return 1.0 / (1.0 + np.exp(-x))\n\treturn 0.5 * (1 + tanh(0.5*x))\n\ndef sigmoid_der(x): \n\treturn sigmoid(x) * (1 - sigmoid(x))\n\ndef tanh(x):\n\t# return ( np.exp(x) - np.exp(-x) ) / ( np.exp(x) + np.exp(-x) )\n\treturn np.tanh(x)\n\ndef tanh_der(x):\n\treturn (1 - tanh(x) ** 2 )\n\ndef relu(x):\n\treturn (x*(x>0))\n\n# check division by 0?\ndef relu_der(x):\n\treturn ( (x*(x>0)) / (x+(x==0)) )\n\ndef elu(x):\n\treturn x*(x >= 0) + (np.exp(x) - 1)*(x < 0)\n\ndef elu_der(x):\n\treturn (x >= 0) + (x < 0)*(np.exp(x))\n\ndef activation_func(x):\n\tif(activation == 'sigmoid'):\n\t\treturn sigmoid(x)\n\telif(activation == 'relu'):\n\t\treturn relu(x)\n\telif(activation == 'elu'):\n\t\treturn elu(x)\n\telse:\n\t\treturn tanh(x)\n\ndef activation_der(x):\n\tif(activation == 'sigmoid'):\n\t\treturn sigmoid_der(x)\n\telif(activation == 'relu'):\n\t\treturn relu_der(x)\n\telif(activation == 'elu'):\n\t\treturn elu_der(x)\n\telse:\n\t\treturn tanh_der(x)\n\ndef softmax(x):\n # e_x = np.exp(x)\n e_x = np.exp(x - np.max(x))\n out = e_x / e_x.sum()\n return out\n\ndef calc_error_loss(X, Y):\n\tnum_samples, num_correct, loss = X.shape[0], 0, 0\n\ttrue_positive_count = np.zeros(10)\n\tfalse_positive_count = np.zeros(10)\n\tfalse_negative_count = np.zeros(10)\n\tfor x, y in zip(X, Y):\n\t\ty_hat = forward_propagation(x)\n\t\tloss += get_loss(y_hat, y)\n\t\ttrue_class = np.argmax(y)\n\t\tpred_class = np.argmax(y_hat)\n\t\tif (true_class == pred_class):\n\t\t\tnum_correct += 1\n\t\t\ttrue_positive_count[true_class] += 1\n\t\telse:\n\t\t\tfalse_negative_count[true_class] += 1\n\t\t\tfalse_positive_count[pred_class] += 1\n\n\tsum_tp = true_positive_count.sum()\n\tsum_fp = false_positive_count.sum()\n\tsum_fn = false_negative_count.sum()\n\tprecision = sum_tp/(sum_tp + sum_fp)\n\trecall = sum_tp/(sum_tp + sum_fn)\n\tif(precision == 0 or recall == 0):\n\t\t# print(precision, recall)\n\t\tprecision = 1\n\t\trecall = 1\n\n\tmean_f_score = 2.0/((1.0/precision)+(1.0/recall))\n\n\treturn ((num_samples-num_correct) * 1.0/num_samples, loss * 1.0/num_samples, mean_f_score)\n\ndef init_theta():\n\ttheta = []\n\ttheta.append(W_layer)\n\ttheta.append(B_layer)\n\treturn theta\n\ndef init_d_theta():\n\td_theta = [[], []]\n\tfor i in range(2):\n\t\td_theta[i].append(-1)\n\tfor j in xrange(1, L+1):\n\t\td_theta[0].append(np.zeros(W_layer[j].shape))\n\t\td_theta[1].append(np.zeros(B_layer[j].shape))\n\treturn d_theta\n\ndef update_adam_factors(d_theta, d_theta_sq):\n\tfor i in range(2):\n\t\tfor j in xrange(len(d_theta[i])):\n\t\t\td_theta_sq[i][j] = np.square(d_theta[i][j])\n\ndef add_and_set_theta(theta1, theta2):\n\tfor i in range(2):\n\t\tfor j in xrange(len(theta1[i])):\n\t\t\ttheta1[i][j] += theta2[i][j]\n\ndef sub_and_set_theta(theta1, theta2):\n\tfor i in range(2):\n\t\tfor j in xrange(len(theta1[i])):\n\t\t\ttheta1[i][j] -= theta2[i][j]\n\ndef scalar_mul_theta(theta, a):\n\tfor i in range(2):\n\t\tfor j in xrange(len(theta[i])):\n\t\t\ttheta[i][j] *= a;\n\ndef copy_to_theta(theta, theta1):\n\tfor i in range(2):\n\t\tfor j in xrange(len(theta[i])):\n\t\t\ttheta[i][j] = theta1[i][j]\n\ndef adam_decay_scale(m_t, v_t, epsilon):\n\tfor i in range(2):\n\t\tfor j in xrange(1, len(m_t[i])):\n\t\t\tm_t[i][j] *= ( 1.0 / np.sqrt( epsilon + v_t[i][j] ) )\n\ndef do_mini_batch_gradient_descent(X, Y):\n\td_theta = init_d_theta()\n\tprev_epoch_val_loss = float('inf')\n\tprev_epoch_train_loss = float('inf')\n\tprev_epoch_theta = []\n\tval_loss = 1\n\ttrain_loss = 1\n\ti = 0\n\tprev_i = -1\n\tcounter = 1\n\n\tbest_theta = []\n\tbest_val_score = 0\n\n\tglobal lr\n\twhile i < max_epochs:\n\t\tX,Y = sk.shuffle(X,Y)\n\t\tnum_points_seen = 0\n\t\tsteps = 0\n\t\tfor x,y in zip(X,Y):\n\t\t\ty_hat = forward_propagation(x)\n\t\t\tadd_and_set_theta(d_theta, backward_propagation(y, y_hat))\n\t\t\tnum_points_seen += 1\n\t\t\tif(num_points_seen % batch_size == 0):\n\t\t\t\t# seen one mini batch\n\t\t\t\tscalar_mul_theta(d_theta, lr)\n\t\t\t\tsub_and_set_theta(theta, d_theta)\n\t\t\t\tsteps += 1\n\t\t\t\tscalar_mul_theta(d_theta, 0)\n\t\t\t\tif steps % log_frequency == 0:\n\t\t\t\t\tprev_i = i\n\t\t\t\t\ttrain_error, train_loss, train_score = calc_error_loss(X, Y)\n\t\t\t\t\tval_error, val_loss, val_score = calc_error_loss(X_val, Y_val)\n\t\t\t\t\tif val_score > best_val_score:\n\t\t\t\t\t\tbest_theta = theta\n\t\t\t\t\t\tbest_theta_pickle_file = open(os.path.join(save_dir, \"best_theta.pickle\"), \"w\")\n\t\t\t\t\t\tpickle.dump(best_theta, best_theta_pickle_file)\n\t\t\t\t\t\tbest_theta_pickle_file.close()\n\t\t\t\t\t\tbest_val_score = val_score\n\n\t\t\t\t\tlog_train_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps, train_loss, train_error, lr, train_score))\n\t\t\t\t\tlog_val_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps, val_loss, val_error, lr, val_score))\n\t\t\t\t\tcounter += 1\n\t\tif save_all_thetas:\n\t\t\tpickle.dump(theta, theta_pickle_file)\n\n\t\trepeat_epoch = False\n\t\tif anneal_type == \"val\":\n\t\t\trepeat_epoch = (val_loss > prev_epoch_val_loss)\n\t\telse:\n\t\t\trepeat_epoch = (train_loss > prev_epoch_train_loss)\n\t\t\n\t\tif (anneal == True) and repeat_epoch:\n\t\t\tcopy_to_theta(theta, prev_epoch_theta)\n\t\t\tif (lr < anneal_threshold):\n\t\t\t\treturn;\n\t\t\tlr /= 2\n\t\telse:\n\t\t\tif (anneal == True):\n\t\t\t\tprev_epoch_theta = copy.deepcopy(theta)\n\t\t\t\tprev_epoch_train_loss = train_loss\n\t\t\t\tprev_epoch_val_loss = val_loss\n\t\t\ti += 1\n\ndef momentum_gradient_descent(X, Y):\n\td_theta = init_d_theta()\n\tupdate = init_d_theta()\n\n\tprev_epoch_val_loss = float('inf')\n\tprev_epoch_train_loss = float('inf')\n\tprev_epoch_theta = []\n\tprev_update = []\n\tval_loss = 1\n\ttrain_loss = 1\n\ti = 0\n\tprev_i = -1\n\tcounter = 1\n\n\tbest_theta = []\n\tbest_val_score = 0\n\n\tglobal lr\n\twhile i < max_epochs:\n\t\tX,Y = sk.shuffle(X,Y)\n\t\tnum_points_seen = 0\n\t\tsteps = 0\n\t\tfor x,y in zip(X,Y):\n\t\t\ty_hat = forward_propagation(x)\n\t\t\tadd_and_set_theta(d_theta, backward_propagation(y, y_hat))\n\t\t\tnum_points_seen += 1\n\t\t\tif(num_points_seen % batch_size == 0):\n\t\t\t\t# seen one mini batch\n\t\t\t\tscalar_mul_theta(update, momentum)\n\t\t\t\tscalar_mul_theta(d_theta, lr)\n\t\t\t\tadd_and_set_theta(update, d_theta)\n\t\t\t\tsub_and_set_theta(theta, update)\n\t\t\t\tsteps += 1\n\t\t\t\tscalar_mul_theta(d_theta, 0)\n\t\t\t\tif steps % log_frequency == 0:\n\t\t\t\t\tprev_i = i\n\t\t\t\t\ttrain_error, train_loss, train_score = calc_error_loss(X, Y)\n\t\t\t\t\tval_error, val_loss, val_score = calc_error_loss(X_val, Y_val)\n\t\t\t\t\tif val_score > best_val_score:\n\t\t\t\t\t\tbest_theta = theta\n\t\t\t\t\t\tbest_theta_pickle_file = open(os.path.join(save_dir, \"best_theta.pickle\"), \"w\")\n\t\t\t\t\t\tpickle.dump(best_theta, best_theta_pickle_file)\n\t\t\t\t\t\tbest_theta_pickle_file.close()\n\t\t\t\t\t\tbest_val_score = val_score\n\n\t\t\t\t\tlog_train_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps, train_loss, train_error, lr, train_score))\n\t\t\t\t\tlog_val_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps, val_loss, val_error, lr, val_score))\n\t\t\t\t\tcounter += 1\n\t\tif save_all_thetas:\n\t\t\tpickle.dump(theta, theta_pickle_file)\n\n\t\trepeat_epoch = False\n\t\tif anneal_type == \"val\":\n\t\t\trepeat_epoch = (val_loss > prev_epoch_val_loss)\n\t\telse:\n\t\t\trepeat_epoch = (train_loss > prev_epoch_train_loss)\n\t\t\n\t\tif (anneal == True) and repeat_epoch:\n\t\t\tcopy_to_theta(theta, prev_epoch_theta)\n\t\t\tcopy_to_theta(update, prev_update)\n\t\t\tif (lr < anneal_threshold):\n\t\t\t\treturn;\n\t\t\tlr /= 2\n\t\telse:\n\t\t\tif (anneal == True):\n\t\t\t\tprev_epoch_theta = copy.deepcopy(theta)\n\t\t\t\tprev_update = copy.deepcopy(update)\n\t\t\t\tprev_epoch_train_loss = train_loss\n\t\t\t\tprev_epoch_val_loss = val_loss\n\t\t\ti += 1\n\ndef nag_gradient_descent(X, Y):\n\td_theta = init_d_theta()\n\tupdate = init_d_theta()\n\n\tprev_epoch_val_loss = float('inf')\n\tprev_epoch_train_loss = float('inf')\n\tprev_epoch_theta = []\n\tprev_update = []\n\tval_loss = 1\n\ttrain_loss = 1\n\tcounter = 1\n\n\tbest_theta = []\n\tbest_val_score = 0\n\n\ti = 0\n\tprev_i = -1\n\tglobal lr\n\twhile i < max_epochs:\n\t\tX,Y = sk.shuffle(X,Y)\n\t\tnum_points_seen = 0\n\t\tsteps = 0\n\t\tfor x,y in zip(X,Y):\n\t\t\tif(num_points_seen % batch_size == 0):\n\t\t\t\tscalar_mul_theta(update, momentum)\n\t\t\t\tsub_and_set_theta(theta, update)\t\t\t\t\n\t\t\ty_hat = forward_propagation(x)\n\t\t\tadd_and_set_theta(d_theta, backward_propagation(y, y_hat))\n\t\t\tnum_points_seen += 1\n\t\t\tif(num_points_seen % batch_size == 0):\n\t\t\t\t# seen one mini batch\n\t\t\t\tscalar_mul_theta(d_theta, lr)\n\t\t\t\tadd_and_set_theta(update, d_theta)\n\t\t\t\tsub_and_set_theta(theta, d_theta)\n\t\t\t\tsteps += 1\n\t\t\t\tscalar_mul_theta(d_theta, 0)\n\t\t\t\tif steps % log_frequency == 0:\n\t\t\t\t\tprev_i = i\n\t\t\t\t\ttrain_error, train_loss, train_score = calc_error_loss(X, Y)\n\t\t\t\t\tval_error, val_loss, val_score = calc_error_loss(X_val, Y_val)\n\t\t\t\t\tif val_score > best_val_score:\n\t\t\t\t\t\tbest_theta = theta\n\t\t\t\t\t\tbest_theta_pickle_file = open(os.path.join(save_dir, \"best_theta.pickle\"), \"w\")\n\t\t\t\t\t\tpickle.dump(best_theta, best_theta_pickle_file)\n\t\t\t\t\t\tbest_theta_pickle_file.close()\n\t\t\t\t\t\tbest_val_score = val_score\n\n\t\t\t\t\tlog_train_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps, train_loss, train_error, lr, train_score))\n\t\t\t\t\tlog_val_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps, val_loss, val_error, lr, val_score))\n\t\t\t\t\tcounter += 1\n\t\tif save_all_thetas:\n\t\t\tpickle.dump(theta, theta_pickle_file)\n\n\t\trepeat_epoch = False\n\t\tif anneal_type == \"val\":\n\t\t\trepeat_epoch = (val_loss > prev_epoch_val_loss)\n\t\telse:\n\t\t\trepeat_epoch = (train_loss > prev_epoch_train_loss)\n\t\t\n\t\tif (anneal == True) and repeat_epoch:\n\t\t\tcopy_to_theta(theta, prev_epoch_theta)\n\t\t\tcopy_to_theta(update, prev_update)\n\t\t\tif (lr < anneal_threshold):\n\t\t\t\treturn;\n\t\t\tlr /= 2\n\t\telse:\n\t\t\tif (anneal == True):\n\t\t\t\tprev_epoch_theta = copy.deepcopy(theta)\n\t\t\t\tprev_update = copy.deepcopy(update)\n\t\t\t\tprev_epoch_train_loss = train_loss\n\t\t\t\tprev_epoch_val_loss = val_loss\n\t\t\ti += 1\n\ndef adam_gradient_descent(X, Y):\n\tbeta_1, beta_2, epsilon = 0.9, 0.999, 1e-8\n\td_theta = init_d_theta()\n\tm_t = init_d_theta()\n\tv_t = init_d_theta()\n\td_theta_sq = init_d_theta()\n\n\tprev_epoch_val_loss = float('inf')\n\tprev_epoch_train_loss = float('inf')\n\tprev_epoch_theta = []\n\tprev_m_t = []\n\tprev_v_t = []\n\tval_loss = 1\n\ttrain_loss = 1\n\n\tbest_theta = []\n\tbest_val_score = 0\n\n\tcounter = 1\n\n\ti = 0\n\tprev_i = -1\n\tglobal lr\n\tsteps = 0\n\tprev_steps = 0\n\twhile i < max_epochs:\n\t\tsteps_print = 0\n\t\tX,Y = sk.shuffle(X,Y)\n\t\tnum_points_seen = 0\n\t\tfor x,y in zip(X,Y):\n\t\t\tmasks = [-1]\n\t\t\ty_hat = forward_propagation_with_dropouts(x, masks)\n\t\t\tadd_and_set_theta(d_theta, backward_propagation_with_dropouts(y, y_hat, masks))\n\t\t\tnum_points_seen += 1\n\t\t\tif(num_points_seen % batch_size == 0):\n\t\t\t\t# seen one mini batch\n\t\t\t\tscalar_mul_theta(v_t, beta_2)\n\t\t\t\tupdate_adam_factors(d_theta, d_theta_sq)\n\t\t\t\tscalar_mul_theta(d_theta_sq, 1 - beta_2)\n\t\t\t\tadd_and_set_theta(v_t, d_theta_sq)\n\t\t\t\t\n\t\t\t\tscalar_mul_theta(m_t, beta_1)\n\t\t\t\tscalar_mul_theta(d_theta, 1 - beta_1)\n\t\t\t\tadd_and_set_theta(m_t, d_theta)\n\n\t\t\t\tsteps += 1\n\t\t\t\tsteps_print += 1\n\t\t\t\ttemp_m_t = copy.deepcopy(m_t)\n\t\t\t\ttemp_v_t = copy.deepcopy(v_t)\n\n\t\t\t\tscalar_mul_theta(temp_m_t, (1.0 / (1.0 - np.power(beta_1, steps))))\n\t\t\t\tscalar_mul_theta(temp_v_t, (1.0 / (1.0 - np.power(beta_2, steps))))\n\n\t\t\t\tadam_decay_scale(temp_m_t, temp_v_t, epsilon)\n\t\t\t\tscalar_mul_theta(temp_m_t, lr)\n\t\t\t\tsub_and_set_theta(theta, temp_m_t)\n\t\t\t\t\n\t\t\t\tscalar_mul_theta(d_theta, 0)\n\t\t\t\tif steps % log_frequency == 0:\n\t\t\t\t\tprev_i = i\n\t\t\t\t\ttrain_error, train_loss, train_score = calc_error_loss(X, Y)\n\t\t\t\t\tval_error, val_loss, val_score = calc_error_loss(X_val, Y_val)\n\n\t\t\t\t\tif val_score > best_val_score:\n\t\t\t\t\t\tbest_theta = theta\n\t\t\t\t\t\tbest_theta_pickle_file = open(os.path.join(save_dir, \"best_theta.pickle\"), \"w\")\n\t\t\t\t\t\tpickle.dump(best_theta, best_theta_pickle_file)\n\t\t\t\t\t\tbest_theta_pickle_file.close()\n\t\t\t\t\t\tbest_val_score = val_score\n\n\t\t\t\t\tlog_train_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps_print, train_loss, train_error, lr, train_score))\n\t\t\t\t\tlog_val_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps_print, val_loss, val_error, lr, val_score))\n\t\t\t\t\tcounter += 1\n\t\t\n\t\tif save_all_thetas:\n\t\t\tpickle.dump(theta, theta_pickle_file)\n\n\t\trepeat_epoch = False\n\t\tif anneal_type == \"val\":\n\t\t\trepeat_epoch = (val_loss > prev_epoch_val_loss)\n\t\telse:\n\t\t\trepeat_epoch = (train_loss > prev_epoch_train_loss)\n\t\t\n\t\tif (anneal == True) and repeat_epoch:\n\t\t\tcopy_to_theta(theta, prev_epoch_theta)\n\t\t\tcopy_to_theta(m_t, prev_m_t)\n\t\t\tcopy_to_theta(v_t, prev_v_t)\n\t\t\tsteps = prev_steps\n\t\t\tif (lr < anneal_threshold):\n\t\t\t\treturn;\n\t\t\tlr /= 2\n\t\telse:\n\t\t\tif (anneal == True):\n\t\t\t\tprev_epoch_theta = copy.deepcopy(theta)\n\t\t\t\tprev_m_t = copy.deepcopy(m_t)\n\t\t\t\tprev_v_t = copy.deepcopy(v_t)\n\t\t\t\tprev_epoch_train_loss = train_loss\n\t\t\t\tprev_epoch_val_loss = val_loss\n\t\t\t\tprev_steps = steps\n\t\t\ti += 1\n\n\ndef forward_propagation(x):\n\tH_layer[0] = x\n\tfor i in xrange(1, L):\n\t\tA_layer[i] = B_layer[i] + np.matmul(W_layer[i], H_layer[i-1])\n\t\tH_layer[i] = activation_func(A_layer[i])\n\tA_layer[L] = B_layer[L] + np.matmul(W_layer[L], H_layer[L-1])\n\ty_hat = softmax(A_layer[L]) \n\treturn y_hat\n\ndef forward_propagation_with_dropouts(x, masks):\n\tp = 0.92\n\tH_layer[0] = x\n\tfor i in xrange(1, L):\n\t\tA_layer[i] = B_layer[i] + np.matmul(W_layer[i], H_layer[i-1])\n\t\tH_layer[i] = activation_func(A_layer[i])\n\t\tU = (np.random.rand(*H_layer[i].shape) < p) / p\n\t\tH_layer[i] *= U\n\t\tmasks.append(U)\n\tA_layer[L] = B_layer[L] + np.matmul(W_layer[L], H_layer[L-1])\n\ty_hat = softmax(A_layer[L]) \n\treturn y_hat\n\n# TODO: find gradients for squared error - Done. Verify.\ndef backward_propagation(y, y_hat):\n\td_H = [-1]\n\td_A = [-1]\n\td_B = [-1]\n\td_W = [-1]\n\tfor i in xrange(1, L):\n\t\td_H.append(np.zeros(H_layer[i].shape))\n\tfor i in xrange(1, L+1):\n\t\td_A.append(np.zeros(A_layer[i].shape))\n\tfor i in xrange(1, L+1):\n\t\td_B.append(np.zeros(B_layer[i].shape))\n\tfor i in xrange(1, L+1):\n\t\td_W.append(np.zeros(W_layer[i].shape))\n\t\n\t# Output gradient computation\n\tif(loss == 'ce'):\n\t\td_A[L] = -(y - y_hat)\n\telse:\n\t\ttemp_1 = 2 * y_hat\n\t\ttemp_2 = y_hat - y + ( ( y_hat[np.argmax(y)] - np.square(y_hat).sum() ) * np.ones([10,1]) )\n\t\td_A[L] = temp_1 * temp_2\n\n\tfor k in xrange(L, 0, -1):\n\t\t# Parameter gradient computation\n\t\td_W[k] = np.matmul(d_A[k], H_layer[k-1].transpose())\n\t\td_B[k] = d_A[k]\n\n\t\t# Means we have already computed till W[1] and B[1]\n\t\tif(k == 1):\n\t\t\tbreak\n\n\t\t# Compute gradient wrt layer below\n\t\td_H[k-1] = np.matmul(W_layer[k].transpose(),d_A[k])\n\n\t\t# Compute gradient wrt layer below (pre-activation)\n\t\ttemp = activation_der(A_layer[k-1])\n\t\td_A[k-1] = d_H[k-1] * temp\n\n\td_theta = []\n\td_theta.append(d_W)\n\td_theta.append(d_B)\n\tscalar_mul_theta(d_theta, 1.0/batch_size)\n\treturn d_theta\n\ndef backward_propagation_with_dropouts(y, y_hat, masks):\n\td_H = [-1]\n\td_A = [-1]\n\td_B = [-1]\n\td_W = [-1]\n\tfor i in xrange(1, L):\n\t\td_H.append(np.zeros(H_layer[i].shape))\n\tfor i in xrange(1, L+1):\n\t\td_A.append(np.zeros(A_layer[i].shape))\n\tfor i in xrange(1, L+1):\n\t\td_B.append(np.zeros(B_layer[i].shape))\n\tfor i in xrange(1, L+1):\n\t\td_W.append(np.zeros(W_layer[i].shape))\n\t\n\t# Output gradient computation\n\tif(loss == 'ce'):\n\t\td_A[L] = -(y - y_hat)\n\telse:\n\t\ttemp_1 = 2 * y_hat\n\t\ttemp_2 = y_hat - y + ( ( y_hat[np.argmax(y)] - np.square(y_hat).sum() ) * np.ones([10,1]) )\n\t\td_A[L] = temp_1 * temp_2\n\n\tfor k in xrange(L, 0, -1):\n\t\t# Parameter gradient computation\n\t\td_W[k] = np.matmul(d_A[k], H_layer[k-1].transpose()) + 2*regularization*W_layer[k]\n\n\t\t# no need to regularize bias parameters: http://cs231n.github.io/neural-networks-2/#reg\n\t\td_B[k] = d_A[k]\n\n\t\t# Means we have already computed till W[1] and B[1]\n\t\tif(k == 1):\n\t\t\tbreak\n\n\t\t# Compute gradient wrt layer below\n\t\td_H[k-1] = np.matmul(W_layer[k].transpose(),d_A[k]) * masks[k-1] \n\n\t\t# Compute gradient wrt layer below (pre-activation)\n\t\ttemp = activation_der(A_layer[k-1])\n\t\td_A[k-1] = d_H[k-1] * temp\n\n\td_theta = []\n\td_theta.append(d_W)\n\td_theta.append(d_B)\n\tscalar_mul_theta(d_theta, 1.0/batch_size)\n\treturn d_theta\n\nif __name__ == '__main__':\n\tnp.random.seed(1234)\n\tprint_params()\n\tmain()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":23561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"99954146","text":"import json, os, sys, time, pickle, h5py, cv2, numpy as np\nsys.path.append('/usr/local/lib/')\nimport pyrealsense2 as rs\n\ndef start_pipes(pipelines,configs,serial_numbers):\n\tprofiles = {}\n\tfor n in serial_numbers[::-1]:\n\t\tprofile = pipelines[n].start(configs[n])\n\t\tprofiles[n] = profile\n\treturn profiles\n\ndef stop_pipes(pipelines):\n\tfor n in pipelines:\n\t\tpipelines[n].stop() \n \ndef get_metadata(serial_numbers,start_time,stop_time,num_frames,intrinsics, timestamps, PARAMS):\n\tmetadata = {'parameters':PARAMS,\n 'serial_numbers':serial_numbers,\n 'start_time':start_time,\n 'stop_time':stop_time,\n 'num_frames':num_frames,\n\t\t 'intrinsics':intrinsics,\n 'timestamps': timestamps}\n\treturn metadata\n\ndef get_pipelines(serial_numbers, PARAMS):\n\tpipelines = {}\n\tconfigs = {}\n\tfor n in serial_numbers:\n\t\ttry:\n\t\t\tpipeline = rs.pipeline()\n\t\t\tconfig = rs.config()\n\t\t\tconfig.enable_device(n)\n\t\t\tconfig.enable_stream(rs.stream.depth, PARAMS['frame_width'], PARAMS['frame_height'], rs.format.z16, PARAMS['fps'])\n\t\t\tconfig.enable_stream(rs.stream.color, PARAMS['frame_width'], PARAMS['frame_height'], rs.format.bgr8, PARAMS['fps'])\n\t\t\tconfig.enable_record_to_file(PARAMS['working_directory'] + '/data/' + PARAMS['session_name']+'_'+n+'.bag')\n\t\t\tpipelines[n] = pipeline\n\t\t\tconfigs[n] = config\n\t\texcept: \n\t\t\tprint('Error connecting to camera '+n)\n\treturn pipelines, configs\n\ndef get_connected_devices():\n\tctx = rs.context()\n\tds5_dev = rs.device()\n\tdevices = ctx.query_devices();\n\tserial_numbers = []\n\tfor d in devices:\n\t\tprint('Found device ',d)\n\t\tserial_numbers.append(str(d).split('S/N: ')[1].split(')')[0])\n\tif len(devices)==0:\n\t\tprint('No devices found')\n\treturn serial_numbers\n\ndef get_intrinsics(pipelines):\n\tintrinsics = {k:{} for k in pipelines.keys()}\n\tfor n in pipelines:\n\t\tins = pipelines[n].wait_for_frames().get_depth_frame().profile.as_video_stream_profile().intrinsics\n\t\tintrinsics[n]['ppx'] = ins.ppx\n\t\tintrinsics[n]['ppy'] = ins.ppy\n\t\tintrinsics[n]['fx'] = ins.fx\n\t\tintrinsics[n]['fy'] = ins.fy\n\treturn intrinsics\n\n","sub_path":"MoseqMulti_acquire.py","file_name":"MoseqMulti_acquire.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"26181468","text":"import dash\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\n\n\nstyles = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=styles)\n\napp.layout = html.Div([\n dcc.Input(id='n-multi', type='number', value=5),\n html.Table([html.Tr([html.Td(['x', html.Sup(2)]), html.Td(id='square')]),\n html.Tr([html.Td(['x', html.Sup(3)]), html.Td(id='cube')]),\n html.Tr([html.Td([2, html.Sup('x')]), html.Td(id='twos')]),\n html.Tr([html.Td([3, html.Sup('x')]), html.Td(id='threes')]),\n html.Tr([html.Td(['x', html.Sup('x')]), html.Td(id='x^x')])])])\n\n\n@app.callback(Output('square', 'children'),\n Output('cube', 'children'),\n Output('twos', 'children'),\n Output('threes', 'children'),\n Output('x^x', 'children'),\n Input('n-multi', 'value'))\ndef do_math(x):\n return x ** 2, x ** 3, 2 ** x, 3 ** x, x ** x\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n \n \n","sub_path":"plotly/tutorial/docs/03_basic_callbacks/multi_output.py","file_name":"multi_output.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"654046195","text":"# COMPILADO DE FUNÇÕES SIMPLES E ÚTEIS DE FORMA GERAL \n\n# Funções matemáticas\n\ndef maximo_divisor_comum(num1, num2):\n resto = num1 % num2\n while resto:\n num1 = num2\n num2 = resto\n resto = num1 % num2\n return num2 \n\n\ndef fatorial(valor):\n fatorial = 1\n while valor > 0:\n fatorial *= valor\n valor -=1\n return fatorial\n\n\ndef valida_opc(opc, msg):\n while opc not in range(1, 3):\n print(msg)\n opc = int(input())\n return opc\n\n\ndef get_inteiro(mensagem):\n try:\n return int(input(mensagem))\n except:\n print('Valor inválido.')\n return get_inteiro(mensagem)\n\n\ndef get_number(mensagem):\n try:\n return float(input(mensagem))\n except:\n print('Valor inválido.')\n return get_number(mensagem)\n\n\ndef get_int_positivo(msg):\n valor = get_inteiro(msg)\n while valor < 0:\n print('Valor digitado menor que 0.')\n valor = get_inteiro(msg)\n return valor\n\n\ndef apaga_lista(lista):\n for c in range(0, len(lista)):\n lista.pop()\n return lista\n\ndef fibonacci(parada): # escreve fibonacci até a 'parada'.\n penultimo = atual = 0\n ultimo = 1\n contador = 3\n print(0, 1, end = ' ')\n while contador <= parada:\n atual = penultimo + ultimo\n print(atual, end = ' ')\n penultimo = ultimo\n ultimo = atual\n contador += 1\n print()\n\ndef lista_fibonacci(parada): # escreve fibonacci até a 'parada'.\n fibonacci = [0, 1]\n for c in range(0, parada):\n fibonacci.append(fibonacci[c]+fibonacci[c+1])\n for e in range(0, parada):\n if e != parada-1:\n print(fibonacci[e], end = ' ')\n else:\n print(fibonacci[e])\n\n\ndef maior_valor(num1, num2):\n return num1 if num1 >= num2 else num2\n\n\ndef primo(valor): # checa se um valor é primo ou não\n\ttotal = 0\n\tfor count in range(1, valor+1):\n\t\tif valor % count == 0:\n\t\t\ttotal += 1\n\treturn True if total == 2 else False","sub_path":"Fabio03_For/utilidades.py","file_name":"utilidades.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"111095382","text":"#!/usr/bin/python3\nlist = ['172.30.8.0/23/172.30.5.3', '172.30.12.0/22/172.30.5.3', '192.168.128.0/18/172.30.5.3', '172.30.80.20/32/172.30.5.3']\n\ndef removezeros(ipaddr):\n\ti = 3 # 4 bytes in IPv4 address\n\tstrippedip = ''\n\twhile i >= 0 :\n\t\tif ipaddr.split(sep=\".\")[i] != '0':\n\t\t\tbreak\n\t\ti -= 1\n\tj = 0\n\twhile j <= i :\n\t\tstrippedip += ipaddr.split(sep=\".\")[j] #Rewriting the shorter IP address\n\t\tif i != j :\n\t\t\tstrippedip += '.' #Do not add separator . if it's the last byte\n\t\tj += 1\n\treturn strippedip\n\ndef dec2hex(number):\n\t\tfullhex=hex(int(number))\n\t\tif int(number) < 16 :\n\t\t\treturn ('0' + str(fullhex).split(sep=\"x\")[1])\n\t\treturn (fullhex.split(sep=\"x\")[1])\n\t\ndef ipblock(netip, netmask, gw):\n\tconcatblock = netmask + '.' + removezeros(netip) + '.' + gw\n\tfinalblock = ''\n\tfor byte in concatblock.split(sep=\".\"):\n\t\tfinalblock += dec2hex(byte)\n\t\tfinalblock += ':'\n\treturn (finalblock)\n\ndef option121(list):\n\tstring = ''\n\tfor route in list:\n\t\tnetip = route.split(sep=\"/\")[0]\n\t\tnetmask = route.split(sep=\"/\")[1]\n\t\tgw = route.split(sep=\"/\")[2]\n\t\tstring += ipblock(netip, netmask, gw)\n\treturn (string.strip(':'))\n\t\nprint(option121(list))\n","sub_path":"option121/option121.py","file_name":"option121.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"480000339","text":"# Request get and post\nimport requests\nr=requests.get('https://xkcd.com/1906/')\nprint(\"Status code is {0}\",r.status_code)\nprint(\"Header is {0}\",r.headers)\n\n\n#Download an image using Request Response\nimport requests\nreceive=requests.get('https://imgs.xkcd.com/comics/making_progress.png')\nwith open(r'C:\\Prosenjit\\Nupur\\image.png','wb') as f :\n f.write(receive.content)\n\n","sub_path":"HttpRequest.py","file_name":"HttpRequest.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"306113515","text":"# Author: eunsunlee\n# partial source: https://github.com/chanlhock/IMU/blob/master/imu.py\n# integration of rk4: https://www.dis.uniroma1.it/~pretto/papers/tpm_icra2014.pdf\n# integration of rk4: https://github.com/scomup/imu_tools/blob/master/src/imu_tracker/common.h\n\nimport socket, traceback\nimport csv\nimport struct\nimport sys, time, string, pygame\nfrom pygame.locals import *\nfrom ponycube import *\nfrom madgwickahrs import *\nimport quaternion\nfrom quaternion import QuaternionClass\nfrom a3 import IntegrationRK4,computeOmegaskew, QuatToRotMat, RotMatToQuat\n\n\naccel = [[0.0190, -0.0522, -0.9780],\n [0.0269, -0.0327, 0.9897],\n [0,-0.0093,1.0205],\n [0.0112, -0.0327, 1.0015],\n [0.0151, -0.0327, 1.0015]]\n\ngyro = [[-0.9375, -1.25, 0.875],\n [-1.3125, -2, 0.125],\n [-1.5625, -2.3125, -0.1875],\n [-1, -1.9375, -0.0625],\n [-0.6250, -1.3125, 0]]\nmag = [[0.20996090, 0.03125, -0.4487305],\n [0.2148438, 0.04101563, -0.4536133],\n [0.2148438, 0.04101563, -0.4536133],\n [0.2148438, 0.04101563, -0.4536133],\n [0.2148438, 0.04101563, -0.4536133]]\n\n\n\n\n\npygame.init()\nscreen = Screen(480,400,scale=1.5)\ncube = Cube(40,30,60)\nq = Quaternion(1,0,0,0)\nincr = Quaternion(0.96,0.01,0.01,0).normalized()\ncube.erase(screen)\ncube.draw(screen,q)\n\nprevious_timestamp = 0\n\nquat = QuaternionClass(1, 0, 0, 0)\nomega0 = [0,0,0]\n\n# a3quat = QuaternionClass(1, 0, 0, 0)\n\ndt = 1/256\n\nwhile 1:\n for i in range(len(mag)):\n ax = accel[i][0]\n ay = accel[i][1]\n az = accel[i][2]\n gx = gyro[i][0]\n gy = gyro[i][1]\n gz = gyro[i][2]\n mx = mag[i][0]\n my = mag[i][1]\n mz = mag[i][2]\n\n\n # A3 Algorithm gyroscope calibration\n omega1 = [gx, gy, gz]\n quat = IntegrationRK4(omega0, omega1, quat, dt)\n omega0 = omega1\n\n\n # Madgwick Algorithm\n # Imupredict = MadgwickAHRS();\n # Imupredict.quaternion = quat\n # Imupredict.sampleperiod = dt\n # Imupredict.update(gyro,accel,mag)\n # quat = Imupredict.quaternion\n\n qw = quat[0]\n qx = quat[1]\n qy = quat[3]\n qz = -quat[2]\n\n q.w = qw\n q.x = qx\n q.y = qy\n q.z = qz\n q = q.normalized()\n \n print(\"quat\")\n print(q.w, q.x, q.y, q.z)\n\n\n cube.erase(screen)\n cube.draw(screen,q)\n pygame.display.flip()\n pygame.time.delay(0)\n event = pygame.event.poll()\n if event.type == pygame.QUIT \\\n or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\n break\n if i == len(mag):\n i = 0\n\n\n\n\n\n\n\n\n","sub_path":"IMU_algorithms/sensorapp.py","file_name":"sensorapp.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"366208395","text":"# -*-coding:utf-8-*-\nimport getopt\nimport os\nimport sys\nimport re\ncurrent_path = os.path.abspath(os.path.dirname(__file__))\nproject_path = os.path.abspath(\"{}/../..\".format(current_path))\nsys.path.append(project_path)\nfrom tools.usage import usage_help\n__author__ = 'Allen Woo'\n\nclass Transations():\n\n def main(self):\n\n self.cfg_path = \"{}/babel.cfg\".format(current_path)\n self.extract_path = \"{}/apps\".format(project_path)\n s_ops = \"hq\"\n l_ops = [\"init\", \"update\", \"compile\", \"cfg=\", \"extract=\", \"output=\", \"lan=\", \"all-lan\",\n \"get-msgid=\", \"re-msgstr=\"]\n s_opexplain = [\"help\",\"quiet:A small amount of output\"]\n l_opexplain = [\"init translation\",\n \"update: extract and update\",\n \"compile\",\n \", The default:{}.\\n\\t\\tOptional: {}/babel_py.cfg\".format(self.cfg_path, current_path),\n \",The default: {}\".format(self.extract_path),\n \", Output directory.\\n\\t\\tSuce as:{}/translations/template\".format(self.extract_path),\n \", Such as: en_US, zh_Hans_CN\",\n \"View all languages\",\n \" Get 'msgid' in po file\",\n \" Fill the 'msgstr' in the po file with the translated text\"\n \"\\n (此参数指定的文件的格式和行号必须和--get-msgid导出的文件格式和行号一模一样)\"]\n\n action = [\"init, [--init --extract --output --lan en_US]\",\n \"update, [--update --extract --output ]\",\n \"compile, [--compile --output ]\",\n \"get-msgid [--get-msgid --lan ]\",\n \"re-msgstr [--re-msgstr --output --lan ]\"]\n\n opts, args = getopt.getopt(sys.argv[1:], s_ops, l_ops)\n func = None\n self.save_path = None\n self.quiet = \"\"\n self.lan = \"zh_Hans_CN\"\n self.msgid_tred_file_path = \"\"\n if not opts:\n usage_help(s_ops, s_opexplain, l_ops, l_opexplain, action=action)\n for op, value in opts:\n if op == \"-q\":\n self.quiet = \"-q\"\n elif op == \"--lan\":\n self.lan = value.strip()\n elif op == \"--all-lan\":\n os.system(\"pybabel --list-locales\")\n sys.exit()\n\n elif op == \"--cfg\":\n self.cfg_path = value.strip()\n\n elif op == \"--extract\":\n self.extract_path = value.rstrip(\"/\")\n\n elif op == \"--output\":\n self.save_path = value.rstrip(\"/\")\n\n elif op == \"--init\":\n func = self.init_tr\n\n elif op == \"--update\":\n func = self.update_tr\n\n elif op == \"--compile\":\n func = self.compile_tr\n\n elif op == \"--get-msgid\":\n self.save_path = value.rstrip(\"/\")\n func = self.get_msgid\n\n elif op == \"--re-msgstr\":\n self.msgid_tred_file_path = value.rstrip(\"/\")\n func = self.replace_msgstr\n\n elif op == \"-h\" or op == \"--help\":\n usage_help(s_ops, s_opexplain, l_ops, l_opexplain, action = action)\n\n if not os.path.exists(self.save_path):\n os.makedirs(self.save_path)\n\n func()\n\n def init_tr(self):\n\n '''\n compile transations\n '''\n self.cfg_sack()\n\n if not self.quiet:\n self.redirect = \"\"\n if self.lan:\n print(\"Extract...\")\n print(self.extract_path)\n os.system('pybabel {} extract -F {} -o {}/messages.pot {}'.format(self.quiet,\n self.cfg_path,\n self.save_path,\n self.extract_path))\n print(\"Init...\")\n os.system('pybabel {} init -i {}/messages.pot -d {} -l {}'.format(self.quiet,\n self.save_path,\n self.save_path,\n self.lan))\n self.print_cfg()\n print(\"Success\")\n else:\n print(\"You need to specify the language:--lan \\n\")\n\n\n def update_tr(self):\n\n '''\n update transations\n '''\n\n self.cfg_sack()\n\n lc_msg_path = \"{}/{}/LC_MESSAGES\".format(self.save_path, self.lan)\n po_filepath = os.path.join(lc_msg_path, \"messages.po\")\n\n if not os.path.exists(po_filepath):\n print(po_filepath)\n raise Exception(\"Missing messages.po file, may also be wrong language(--lan). please reinitialize translation. -h\")\n if not self.quiet:\n self.redirect = \"\"\n\n os.system('pybabel {} extract -F {} -k lazy_gettext -o {}/messages.pot {}'.format(self.quiet,\n self.cfg_path,\n self.save_path,\n self.extract_path))\n os.system('pybabel {} update -i {}/messages.pot -d {}'.format(self.quiet, self.save_path,\n self.save_path))\n\n self.update_process()\n self.print_cfg()\n print(\"Success\")\n\n def compile_tr(self):\n\n '''\n compile transations\n '''\n\n if not self.quiet:\n self.redirect = \"\"\n os.system('pybabel compile -d {} {}'.format(self.save_path, self.redirect))\n\n def update_process(self):\n\n lc_msg_path = \"{}/{}/LC_MESSAGES\".format(self.save_path, self.lan)\n po_filepath = os.path.join(lc_msg_path, \"messages.po\")\n if os.path.exists(po_filepath):\n with open(po_filepath) as rf:\n lines = rf.readlines()\n wf = open(\"{}_last.back\".format(po_filepath), \"w\")\n wf.writelines(lines)\n wf.close()\n\n abandoned_datas = {}\n datas = {}\n l = len(lines)\n for i in range(0, l):\n if re.search(r\"^#~ msgid.*\", lines[i]) and lines[i + 1].strip(\"#~ msgid\").strip().strip('\"\"') and lines[\n i + 1].strip(\"#~ msgstr\").strip().strip('\"\"'):\n abandoned_datas[lines[i].strip(\"#~ \").strip()] = lines[i + 1].strip(\"#~ \").strip()\n elif re.search(r\"^msgid.*\", lines[i]) and lines[i + 1].strip(\"msgid\").strip().strip('\"\"') and lines[\n i + 1].strip(\"msgstr\").strip().strip('\"\"'):\n datas[lines[i].strip()] = lines[i + 1].strip()\n\n for i in range(0, l):\n msgid = re.search(r\"^msgid.*\", lines[i])\n if msgid and lines[i].strip(\"msgid\").strip().strip('\"\"') and not lines[i + 1].strip(\"msgstr\").strip().strip(\n '\"\"'):\n l = lines[i].strip(\"\\n\")\n if l in abandoned_datas.keys():\n lines[i + 1] = abandoned_datas[l] + \"\\n\"\n if l in datas.keys():\n lines[i + 1] = datas[l] + \"\\n\"\n\n temp_lines = lines[:]\n l = len(temp_lines)\n for i in range(0, l):\n r = re.search(r\"^#~.*\", temp_lines[i])\n if r:\n lines.remove(temp_lines[i])\n\n wf = open(po_filepath, \"w\")\n wf.writelines(lines)\n wf.close()\n\n def cfg_sack(self):\n\n print(\"\\n* [Dangerous operation] Please check if the update option is wrong\\n\")\n print(\"Extraction path: {}\".format(self.extract_path))\n print(\"Output path: {}\".format(self.save_path))\n print(\"Cfg file: \" + self.cfg_path)\n self.print_cfg()\n print(\"\\n\")\n ch = input(\"Are you sure you want to use this cfg file?(Y/N): \")\n if ch.lower() not in [\"yes\", \"y\"]:\n sys.exit(0)\n\n def print_cfg(self):\n\n with open(self.cfg_path) as rf:\n print(\"* Extract content type[{}]:\".format(os.path.split(self.cfg_path)[-1]))\n for line in rf.readlines():\n print(\" \"+line.strip(\"\\n\"))\n\n\n def get_msgid(self):\n\n lc_msg_path = \"{}/{}/LC_MESSAGES\".format(self.save_path, self.lan)\n po_filepath = os.path.join(lc_msg_path, \"messages.po\")\n\n result_path = \"{}/result_msgid_text.txt\".format(current_path)\n wf = open(result_path, \"w\")\n with open(po_filepath) as rf:\n last_l = \"\"\n lines = rf.readlines()\n lines_num = len(lines)\n for i in range(0, lines_num):\n l = lines[i]\n isc = False\n tr_exists = False\n wl = None\n\n if i+1 < lines_num and re.search(r'^msgstr\\s\".+\"', lines[i+1]):\n # 已翻译的不需要提取\n continue\n\n if re.search(r\"^#:\\s.+:[0-9]+\", last_l):\n\n s = re.search(\"^msgid\\s(.+).+\", l)\n if l and s:\n wl = s.groups()[0].strip('\"')\n last_l = l\n\n elif last_l.strip() == 'msgid \"\"' and not re.search(\"^msgstr\\s.+\", l) and l:\n wl = l\n isc = True\n for j in range(1,4):\n if i + j < lines_num and re.search(r'^msgstr\\s.+', lines[i + j]) and re.search(r'^msgstr\\s\".+\"', lines[i + j]):\n\n # 已翻译的不需要提取\n tr_exists = True\n break\n if tr_exists:\n continue\n\n if wl:\n if wl.endswith(\"\\\\\"):\n wl = wl + '\"'\n wf.write(\"{}::: \".format(i+1) + wl + \"\\n\")\n if isc:\n continue\n\n last_l = l\n wf.close()\n print(\"Result path: {}\".format(result_path))\n\n def replace_msgstr(self):\n\n lc_msg_path = \"{}/{}/LC_MESSAGES\".format(self.save_path, self.lan)\n po_filepath = os.path.join(lc_msg_path, \"messages.po\")\n rfp = open(po_filepath, \"r\")\n polines = rfp.readlines()\n\n translated_text = self.msgid_tred_file_path\n with open(translated_text) as rf:\n last_text = \"\"\n for l in rf:\n l = l.split(\":::\")\n if l and len(l) > 1:\n if not l[0]:\n continue\n num = int(l[0])-1\n text = \"{}{}\".format(last_text, l[1].strip())\n if re.search(\"^msgstr\\s(.+).+\", polines[num + 1]):\n text = text.replace('\"\"', '')\n if re.search(r'.+[^\\\\]\".+', text):\n print(\"The number of lines that need to be manually confirmed (.po file): \\n{}: {}\\n\".format(num+1,text))\n\n text = 'msgstr \"{}\"'.format(text)\n\n polines[num + 1] = text.replace('\"\"', '\"') + \"\\n\"\n\n last_text = \"\"\n else:\n last_text = text\n\n\n rfp.close()\n with open(po_filepath, \"w\") as wf:\n wf.writelines(polines)\n\nif __name__ == '__main__':\n\n trs = Transations()\n trs.main()","sub_path":"tools/transations/transations_tool.py","file_name":"transations_tool.py","file_ext":"py","file_size_in_byte":12030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"90498484","text":"#!/bin/python3\n\nimport sys\n\ndef morganAndString(a, b):\n # Complete this function\n newList = []\n a = list(a)\n b = list(b)\n\n while len(a) != 0 and len(b) != 0:\n if a[0] <= b[0]:\n newList.append(a.pop(0))\n else:\n newList.append(b.pop(0))\n\n if len(a) == 0:\n newList = newList + b\n else:\n newList = newList + a\n\n newStr = \"\".join(newList)\n return newStr\n\nif __name__ == \"__main__\":\n t = int(input().strip())\n for a0 in range(t):\n a = input().strip()\n b = input().strip()\n result = morganAndString(a, b)\n print(result)\n","sub_path":"Algorithms/morgan and string.py","file_name":"morgan and string.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"262513228","text":"# -*- coding: -utf-8 -*-\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Journal Views\n\n\t\ndef journal(request):\n\t\n\t\n\tstudents = (\n\t{'id': 1, 'name': u'Подоба Віталій'},\n\t{'id': 2, 'name': u'Корост Андрій'},\n {'id': 3, 'name': u'Притула Тарас'},\n\t)\n\tdays = ['Ср 1', 'Чт 2', 'Пт 3', 'Сб 4', 'Нд 5', 'Пн 6', 'Вт 7', 'Ср 8', 'Чт 9', 'Пт 10', 'Сб 11', 'Нд 12', 'Пн 13', 'Вт 14', 'Ср 15', 'Чт 16', \n\t'Пт 17', 'Сб 18', 'Нд 19', 'Пн 20', 'Вт 21', 'Ср 22', 'Чт 23', 'Пт 24', 'Сб 25', 'Нд 26', 'Пн 27', 'Вт 28', 'Ср 29', 'Чт 30', 'Пт 31']\n\treturn render(request, 'students/journal.html', {'students': students, 'days': days})\n\t\n\t\n\t\n\ndef student_journal(request, sid):\n\treturn HttpResponse('

Student %s Journal

' % sid)\n\t\n\t\n\t\n\n\n","sub_path":"students/views/journal.py","file_name":"journal.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"505435437","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'upload'\n\nurlpatterns = [\n path('', views.upload_file, name='upload_file'),\n path('all_order', views.AllOrder.as_view(), name='all_order'),\n path('personnel', views.personnel, name='personnel'),\n ]\n","sub_path":"upload/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"473133224","text":"import random\nimport string\nimport json\nimport logging\nimport copy\n\nfrom ruamel.yaml import YAML, representer\n\nlogger = logging.getLogger(\"submitter.\" + __name__)\n\n\nclass NonAliasingRTRepresenter(representer.RoundTripRepresenter):\n \"\"\" Turn off auto-aliases in ruamel.yaml \"\"\"\n\n def ignore_aliases(self, data):\n return True\n\n\nyaml = YAML()\nyaml.default_flow_style = False\nyaml.preserve_quotes = True\nyaml.Representer = NonAliasingRTRepresenter\n\n\ndef load_json(path):\n \"\"\" Load the dictionary from a json file \"\"\"\n\n with open(path, \"r\") as file:\n data = json.load(file)\n\n return data\n\n\ndef dump_json(data, path):\n \"\"\" Dump the dictionary to a json file \"\"\"\n\n with open(path, \"w\") as file:\n json.dump(data, file, indent=4)\n\n\ndef dump_order_yaml(data, path):\n \"\"\" Dump the dictionary to a yaml file \"\"\"\n\n with open(path, \"w\") as file:\n yaml.dump(data, file)\n\n\ndef dump_list_yaml(data, path):\n \"\"\" Dump a list of dictionaries to a single yaml file \"\"\"\n\n with open(path, \"w\") as file:\n yaml.dump_all(data, file)\n\n\ndef get_yaml_data(path, stream=False):\n \"\"\" Retrieve the yaml dictionary form a yaml file and return it \"\"\"\n\n if stream:\n return yaml.load(path)\n\n with open(path, \"r\") as file:\n data = yaml.load(file)\n\n return data\n\n\ndef id_generator(size=8, chars=string.ascii_uppercase + string.digits):\n \"\"\" Generate an ID \"\"\"\n return \"\".join(random.choice(chars) for _ in range(size))\n\n\ndef check_lifecycle(node, interface_type):\n \"\"\"Check that an interface type is present \"\"\"\n if [x for x in node.interfaces if interface_type in x.type]:\n return True\n else:\n try:\n return \"create\" in node.type_definition.interfaces[interface_type]\n except (AttributeError, KeyError, TypeError):\n return False\n\n\ndef get_lifecycle(node, interface_type):\n \"\"\"Get inputs from TOSCA interfaces\n\n First, gets the interface from the direct parent, then updates it with the\n TOSCA interface inputs from the current node\n\n Returns:\n dict: a set of inputs for different lifecycle stages\n \"\"\"\n # Get the interfaces from the first parent\n lifecycle = _get_parent_interfaces(node, interface_type)\n properties = {k: v.value for k, v in node.get_properties().items()}\n\n # Update these interfaces with any inputs from the current node\n interfaces = [x for x in node.interfaces if interface_type in x.type]\n for stage in interfaces:\n _update_parent_spec(lifecycle, stage)\n\n resolve_get_functions(\n lifecycle,\n \"get_property\",\n lambda x: isinstance(x, list),\n lambda x, y: y.get(x[1]),\n properties,\n )\n\n return lifecycle\n\n\ndef _get_parent_interfaces(node, interface_type):\n interfaces = {}\n try:\n parent_interfaces = node.type_definition.interfaces[interface_type]\n parent_interfaces = copy.deepcopy(parent_interfaces)\n except (AttributeError, KeyError, TypeError):\n parent_interfaces = {}\n\n for stage, value in parent_interfaces.items():\n if stage == \"type\":\n continue\n try:\n interfaces[stage] = value.get(\"inputs\") or {}\n except AttributeError:\n interfaces[stage] = {}\n\n return interfaces\n\n\ndef _update_parent_spec(lifecycle, stage):\n lifecycle.setdefault(stage.name, {})\n if not stage.inputs:\n return\n\n try:\n lifecycle[stage.name][\"spec\"].update(stage.inputs[\"spec\"])\n stage.inputs[\"spec\"] = lifecycle[stage.name][\"spec\"]\n except KeyError:\n pass\n lifecycle[stage.name].update(stage.inputs)\n\n\ndef get_cloud_type(node, supported_clouds):\n \"\"\"Get parent types of a node\n\n Returns the cloud type from node type or parent types\n\n Returns:\n string: lowercase node type\n \"\"\"\n\n def generate_parents(node):\n while True:\n if not hasattr(node, \"type\"):\n break\n yield node.type.lower()\n node = node.parent_type\n\n for cloud in supported_clouds:\n if any(cloud in x for x in generate_parents(node)):\n return cloud\n\n\ndef get_cloud_config(\n insert_mode, runcmd_placeholder, default_cloud_config, tosca_cloud_config\n):\n\n if insert_mode == \"overwrite\":\n return tosca_cloud_config\n\n elif insert_mode == \"insert\":\n for x, y in tosca_cloud_config.items():\n try:\n idx = default_cloud_config[x].index(runcmd_placeholder)\n default_cloud_config[x][idx:idx] = y\n except (AttributeError, KeyError):\n default_cloud_config[x] = y\n except (ValueError, TypeError):\n default_cloud_config[x] = y + default_cloud_config[x]\n\n else:\n for x, y in tosca_cloud_config.items():\n try:\n if isinstance(default_cloud_config[x], bool):\n default_cloud_config[x] = y\n else:\n default_cloud_config[x] += y\n except KeyError:\n default_cloud_config[x] = y\n\n return default_cloud_config\n\n\ndef resolve_get_functions(\n dict_to_search, key_to_find, test_result_fn, resolve_result_fn, *args\n):\n \"\"\"Recursively update a dict with TOSCA 'get' functions\n\n Args:\n dict_to_search (dict): Dictionary to iterate through\n key_to_find (str): 'get' function to search for (eg 'get_input')\n test_result_fn (func): Function to test the result\n resolve_result_fn (func): Function to resolve the result\n args (*): Extra args to pass to resolve_result_fn\n\n Returns:\n None: Modifies the dictionary in place\n \"\"\"\n for key, value in dict_to_search.items():\n if key == key_to_find:\n return value\n\n elif isinstance(value, dict):\n result = resolve_get_functions(\n value, key_to_find, test_result_fn, resolve_result_fn, *args\n )\n if test_result_fn(result):\n dict_to_search[key] = resolve_result_fn(result, *args)\n\n elif isinstance(value, list):\n for index, item in enumerate(value):\n if not isinstance(item, dict):\n continue\n result = resolve_get_functions(\n item, key_to_find, test_result_fn, resolve_result_fn, *args\n )\n if test_result_fn(result):\n dict_to_search[key][index] = resolve_result_fn(\n result, *args\n )\n","sub_path":"submitter/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"81063766","text":"\nfrom collections import OrderedDict\n\nfrom wtforms import BooleanField, StringField, IntegerField, DecimalField, SelectField\nfrom wtforms.validators import InputRequired, Length, AnyOf, Optional\n\nfrom flask_admin.model.helpers import prettify_name\n\n\ndef empty_to_none(value):\n if value == '':\n return None\n return value\n\n\nclass FormGenerator(object):\n\n SWAGGER_FIELDS = {\n 'string': StringField,\n 'integer': IntegerField,\n 'boolean': BooleanField,\n 'float': DecimalField\n }\n\n def __init__(self, view, spec):\n self.view = view\n self.spec = spec\n\n def generate(self):\n if self.view.form:\n return self.view.form\n return self._generate_form()\n\n def _generate_form(self):\n columns = self._column_list()\n fields = OrderedDict((c, self._generate_field(c)) for c in columns)\n if self.view.form_extra_fields:\n fields.update(self.view.form_extra_fields)\n form_name = self.spec['title'] + 'Form'\n return type(str(form_name), (self.view.form_base_class, ), fields)\n\n def _column_list(self):\n columns = self.spec['properties'].keys()\n if self.view.form_columns:\n columns = [c for c in columns if c in self.view.form_columns]\n if self.view.form_excluded_columns:\n columns = [c for c in columns if c not in self.view.form_excluded_columns]\n if 'id' in columns:\n columns.remove('id')\n\n return self._sort_columns(columns)\n\n def _sort_columns(self, columns):\n if self.view.form_column_order:\n ordered = [c for c in self.view.form_column_order if c in columns]\n unordered = [c for c in columns if c not in ordered]\n return ordered + unordered\n else:\n required = [c for c in columns if c in self.spec.get('required', [])]\n others = [c for c in columns if c not in required]\n return required + others\n\n def _generate_field(self, column):\n field_class = self._field_class(column)\n field_name = self._field_name(column)\n kwargs = self._field_kwargs(column)\n return field_class(field_name, **kwargs)\n\n def _field_class(self, name):\n if self.view.form_overrides and name in self.view.form_overrides:\n return self.view.form_overrides[name]\n\n properties = self.spec['properties'][name]\n if 'enum' in properties:\n return SelectField\n return self.SWAGGER_FIELDS.get(\n properties.get('type', 'string'),\n StringField\n )\n\n def _field_name(self, name):\n default_name = prettify_name(name)\n if self.view.column_labels:\n return self.view.column_labels.get(name, default_name)\n return default_name\n\n def _field_kwargs(self, column):\n properties = self.spec['properties'][column]\n kwargs = {'filters': (empty_to_none,),\n 'validators': self._generate_validators(column),\n 'description': self._get_description(column, properties)}\n\n if 'enum' in properties:\n kwargs['choices'] = self._build_choices(properties['enum'])\n\n if self.view.form_args:\n kwargs.update(self.view.form_args.get(column, {}))\n\n return kwargs\n\n def _get_description(self, column, properties):\n description = properties.get('description')\n if self.view.column_descriptions:\n description = self.view.column_descriptions.get(column, description)\n return description\n\n def _build_choices(self, choices):\n return [(i, i) for i in choices]\n\n def _generate_validators(self, column):\n validators = []\n if column in self.spec.get('required', []):\n validators.append(InputRequired())\n else:\n validators.append(Optional())\n\n properties = self.spec['properties'][column]\n if 'minLength' in properties or 'maxLength' in properties:\n validators.append(Length(min=properties.get('minLength', -1),\n max=properties.get('maxLength', -1)))\n if 'enum' in properties:\n validators.append(AnyOf(properties['enum']))\n\n return validators\n\n\n","sub_path":"xadmin/backend/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"342075764","text":"import os,sys,math,copy\r\nf = open('../input.txt',\"r\")\r\noutput = open('../output.txt',\"w\")\r\ndef out(t,sol):\r\n\ts = \"Case #\" + str(t+1) + \": \" + str(sol)\r\n\tprint(s)\r\n\toutput.write(s + \"\\n\")\r\nT = int(f.readline())\r\n\r\ndef IsValid(P):\r\n\tif len(P)==0:\r\n\t\treturn True\r\n\tif len(P)==1:\r\n\t\treturn False\r\n\tif any(p[0]<0 for p in P):\r\n\t\treturn False\r\n\tk=0\r\n\tfor i in range(len(P)):\r\n\t\tif P[i][0]>P[k][0]:\r\n\t\t\tk=i\r\n\treturn sum(P[i][0] for i in range(len(P)) if i!=k) >= P[k][0]\r\n\r\ndef IsDone(P,s):\r\n\tglobal solution\r\n\tif solution != \"\":\r\n\t\treturn True\r\n\tif all(p[0]==0 for p in P):\r\n\t\tsolution=s\r\n\t\treturn True\r\n\treturn False\r\n\r\nalpha = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"]\r\nsolution = \"\"\r\n\r\ndef step(P,s):\r\n\tif IsDone(P,s):\r\n\t\treturn\r\n\tfor i in range(len(P)):\r\n\t\tL=copy.deepcopy(P)\r\n\t\tL[i][0]-=1\r\n\t\tsol=s+alpha[L[i][1]]+\" \"\r\n\t\tif IsDone(L,sol):\r\n\t\t\treturn\r\n\t\tif IsValid(L):\r\n\t\t\tstep(L,sol)\r\n\tfor i in range(len(P)):\r\n\t\tfor j in range(len(L)):\r\n\t\t\tL=copy.deepcopy(P)\r\n\t\t\tL[i][0]-=1\r\n\t\t\tL[j][0]-=1\r\n\t\t\tsol=s+alpha[L[i][1]]+alpha[L[j][1]]+\" \"\r\n\t\t\tif IsDone(L,sol):\r\n\t\t\t\treturn\r\n\t\t\tif IsValid(L):\r\n\t\t\t\tstep(L,sol)\r\n\r\nfor t in range(0,T):\r\n\tN = int(f.readline())\r\n\tL = [int(w) for w in f.readline().split()]\r\n\tsolution=\"\"\r\n\tP = [[L[i],i] for i in range(len(L))]\r\n\tP.sort(key=lambda x:x[0],reverse=True)\r\n\tstep(P,\"\")\r\n\tout(t,solution)","sub_path":"codes/CodeJamCrawler/16_3_1/Kotin/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"253457972","text":"import requests \nimport json\nimport threading\n# defining the api-endpoint \nREST = \"https://blockchain-restful-api.herokuapp.com/api/queries?blockchainID=1\"\nBLOCKCHAIN = \"http://localhost:3000/api/Order\"\n\ndef check():\n threading.Timer(10.0,check).start()\n # sending post request and saving response as response object \n apiResponse = requests.get(url = REST) \n \n # extracting response text \n responseText = apiResponse.text \n resp = json.loads(responseText)\n queryList = resp[\"queries\"]\n #print(queryList)\n # check if logs are empty\n if not queryList:\n print(\"No new logs...\")\n else:\n for query in queryList:\n query = query[\"query\"]\n query.pop(\"databaseID\")\n query.update({\"$class\":\"cpp.Order\"})\n r = requests.post(url = BLOCKCHAIN, data = query) \n \n # extracting response text \n order = r.text \n print(\"New Order:\",order) \n \ncheck()","sub_path":"get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"243409734","text":"#!/usr/bin/env python3\nimport shelve\nimport numpy as np\n\ndef get_par(j):\n res = {}\n l0 = j['H0']['maxLnL']\n l1 = j['H1']['maxLnL']\n res['lrt'] = 2 * l1 - 2 * l0\n res.update(j['H1']['maxLParameters'])\n return res\n\nif __name__ == '__main__':\n print('OG', 'tree', 'LRT', 'p', 'q', 'omega', 'kappa', 'p0')\n db = shelve.open('bur.db')\n for fn in db:\n if fn.endswith('.json'):\n try:\n dataset, bs, model, _ = fn.split('.')\n except ValueError:\n dataset, model, _ = fn.split('.')\n bs = 'ml'\n if model == 'M8':\n par = get_par(db[fn])\n print(dataset, bs, par['lrt'],\n par['p'], \n par['q'], \n par['omega'], \n par['kappa'], \n par['p0']\n )\n","sub_path":"scripts/m8_exp_all.py","file_name":"m8_exp_all.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"421852378","text":"#!/usr/bin/env python\n\n#########################\n#### Changson Wan ###\n#### 06-11-2018 ###\n#### version 2.0.0 ###\n#########################\n\n\"\"\"Script Description:\n\nBETA-Binding and Expression Targets Analysis: Use the ChIP-x data and Microarray/RNAseq data to predict a factor's targets\n\nPart1:\nuse binding data to calc the score of each gene to be regulated by factor.\n1. For each refseq gene in genome, input a distance (for example 100kb), then I get the peak center within 100kb from gene TSS.\n2. filter the peaks by p-value < 1e-5 from MACS, and only get top 10,000 peaks if it's more than 10,000\n3. Then calculate a sum of 'Score' for each gene use this formula:\n Sg = lambda ldx: sum([math.exp(-0.5-4*t) for t in ldx])\n4. output is in bed format. the 5th column is score.\n\ninput bed file should 'chr1' instead of 'chrI'\n\nPart2:\nuse differential expression data rank each genes by t-score from limma result\n1. input expression file need to be the standard limma result format\n2. Rank each genes by the t-score calculated by limma, the positive one represent up regualate,and negative one represent downregulate genes.\n3. Mutiply the rank of gene score and the rank of gene expression t score, get the the rank product\n4. Do the permutation to get the fdr to dicide the confidence of the target genes we found\n5. use the regulate potential score to do up and down regulate gene set association analysis compared to the genes with no differential expression as a background\n\nPart3:\nMotif analysis on target regions\n\nUpdate02/27/2015: BETA can do the CR/TF function prediction by both regulatory potential and distance\nThis code is free software; you can redistribute it and/or modify it.\n\n@version: $2.0.0$\n@author: Su Wang, Changson Wan\n@contact: wchangson@gmail.com\n\"\"\"\n\nimport sys\nimport argparse as ap\n\n\ndef prepare_argparser():\n \"\"\"Prepare optparser object. New options will be added in this\n function first.\n\n \"\"\"\n description = \"BETA --- Binding Expression Target Analysis\"\n epilog = \"For command line options of each command, type: %(prog)s COMMAND -h\"\n\n argparser = ap.ArgumentParser(description=description, epilog=epilog) # , usage = usage\n BETA_VERSION = '2.0.0'\n argparser.add_argument(\"--version\", action=\"version\", version=\"%(prog)s \" + BETA_VERSION)\n subparsers = argparser.add_subparsers(help=\"sub-command help\", dest='subcommand_name') # help=\"sub-command help\"\n\n # command for 'basic beta'\n add_basic_parser(subparsers)\n\n # command for 'super beta'\n # add_plus_parser(subparsers)\n # command for 'noexpre beta'\n add_minus_parser(subparsers)\n\n return argparser\n\n\ndef add_basic_parser(subparsers):\n \"\"\"Add main function 'target prediction' argument parsers.\n \"\"\"\n basicparser = subparsers.add_parser(\"basic\",\n help=\"Find Target Genes with only binding data: regulatiry potential score\",\n description=\"BETA-basic: Predict Direct targets of TF and the active/repressive function prediction.\\n\\\n EXAMPLE: BETA basic -p 2723_peaks.bed -e gene_exp.diff -k R -g hg38 -n test -o basic\")\n # Main Arguments\n basicparser.add_argument(\"-p\", \"--peakfile\", dest=\"peakfile\", type=str, required=True,\n help=\"The bed format of peaks binding sites. (BETA support 3 or 5 columns bed format, CHROM, START, END (NAME, SCORE))\")\n basicparser.add_argument(\"-g\", \"--genome\", dest=\"genome\", choices=(\"hg38\", \"mm10\", \"hg19\", \"mm9\"),\n help=\"Specify your species, hg38,mm10,hg19,mm9. For other genome assembily versions of human and mouse or other species, ignore this paramter\")\n basicparser.add_argument(\"-e\", \"--exprf\", dest=\"exprefile\", type=str, required=True,\n help=\"IThe differential expression file get from limma for MicroArray ddata and cuffdiff for RNAseq data\")\n basicparser.add_argument(\"-k\",\"--kind\", dest = \"kind\", choices = (\"LIM\", \"CUF\", \"BSF\", \"DES\"),\n help=\"the kind of your expression file,this is required,it can be LIM, CUF, BSF, O. LIM for LIMMA standard format. CUF for CUFDIFF standard format, BSF for BETA specific format and DES for DESeq2 standard formats. Default is LIM\",\n default=\"LIM\")\n basicparser.add_argument(\"-n\", \"--name\", dest=\"name\", type=str,\n help=\"this argument is used as the prefix of the result file. If not set, 'Basic' will be used instead\")\n basicparser.add_argument(\"-o\", \"--output\", dest=\"output\", type=str,\n help=\"the directory to store all the output files, if you don't set this, files will be output into the current directory\")\n # basicparser.add_argument(\"-d\", \"--distance\", dest=\"distance\", type=int,\n # help=\"Set a number which unit is 'base'. It will get peaks within this distance from gene TSS. default:100000 (100kb)\",\n # default=100000)\n basicparser.add_argument(\"-pn\", \"--peaknumber\", dest=\"peaknumber\", type=int,\n help=\"the number of peaks you want to consider, DEFAULT=100000\", default=100000)\n basicparser.add_argument(\"-d\", \"--decayrate\", dest=\"decayrate\", type=int,\n help=\"the decay rate in bp--distance that RP is 0.5 DEFAULT=10000\", default=10000)\n basicparser.add_argument(\"-df\", \"--fdr\", dest=\"diff_fdr\", type=float, help=\"Input a number 0~1 as a threshold to pick out the most significant differential expressed genes by FDR,\\\n DEFAULT = 0.05, that is select all the genes\", default=0.05)\n basicparser.add_argument(\"-da\", \"--amount\", dest=\"diff_amount\", type=float, help=\"Get the most significant differential expressed genes by the percentage(0-0.6) or number(larger than 1)Input a number between 0-1, the rank based on fdr\\\n for example, 2000, so that the script will only consider top 2000 genes as the differentially expressed genes. \\\n that is select top 50 percent genes of up and down seprately. NOTE: if you want to use diff_fdr, please set this parameter to 1, otherwise it will get the intersection of these two parameters\",\n default=1)\n # basicparser.add_argument(\"--symbol\", dest=\"symbol\", action='store_true',\n # help='whether to use symbol as output index')\n\n return\n\n\ndef add_minus_parser(subparsers):\n \"\"\"Add main function 'target prediction' argument parsers.\n \"\"\"\n minusparser = subparsers.add_parser(\"minus\",\n help=\"Find Target Genes with only binding data: regulatiry potential score\",\n description=\"BETA-basic: Predict Direct targets of TF and the active/repressive function prediction.\\n\\\n EXAMPLE: BETA minus -p 2723_peaks.bed -k R -g hg38 -n test -o basic\")\n # Main Arguments\n minusparser.add_argument(\"-p\", \"--peakfile\", dest=\"peakfile\", type=str, required=True,\n help=\"The bed format of peaks binding sites. (BETA support 3 or 5 columns bed format, CHROM, START, END (NAME, SCORE))\")\n minusparser.add_argument(\"-g\", \"--genome\", dest=\"genome\", choices=(\"hg38\", \"mm10\", \"hg19\", \"mm9\"), required=True,\n help=\"Specify your species, hg38,hg19,mm10,mm9. For other genome assembily versions of human and mouse .\")\n minusparser.add_argument(\"-n\", \"--name\", dest=\"name\", type=str,\n help=\"this argument is used as the prefix of the result file. If not set, 'Minus' will be used instead\")\n minusparser.add_argument(\"-o\", \"--output\", dest=\"output\", type=str,\n help=\"the directory to store all the output files, if you don't set this, files will be output into the current directory\")\n # minusparser.add_argument(\"-d\", \"--distance\", dest=\"distance\", type=int,\n # help=\"Set a number which unit is 'base'. It will get peaks within this distance from gene TSS. default:100000 (100kb)\",\n # default=100000)\n minusparser.add_argument(\"-pn\", \"--peaknumber\", dest=\"peaknumber\", type=int,\n help=\"the number of peaks you want to consider, DEFAULT=100000\", default=100000)\n minusparser.add_argument(\"-d\", \"--decayrate\", dest=\"decayrate\", type=int,\n help=\"the decay rate in bp--distance that RP is 0.5 DEFAULT=10000\", default=10000)\n minusparser.add_argument(\"--symbol\", dest=\"symbol\",action='store_true', help='whether to use symbol as output index')\n return\n\n\ndef main():\n \"\"\"The Main function/pipeline for BETA\"\"\"\n # parse options...\n argparser = prepare_argparser()\n args = argparser.parse_args()\n\n subcommand = args.subcommand_name\n if subcommand == \"basic\" or not subcommand:\n # pass\n from BETA.runbeta import basicrun\n basicrun(argparser)\n if subcommand == \"plus\":\n pass\n # from BETA.runbeta import plusrun\n # plusrun(argparser)\n if subcommand == \"minus\":\n from BETA.runbeta import minusrun\n minusrun(argparser)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n sys.stderr.write(\"User interrunpt me! ;-) Bye!\\n\")\n sys.exit(0)","sub_path":"bin/BETA2.py","file_name":"BETA2.py","file_ext":"py","file_size_in_byte":9511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"393373896","text":"from django.contrib.contenttypes.models import ContentType\nfrom django.core import urlresolvers\nfrom django.db import models\nfrom django.conf import settings\nfrom backend_interface.utils import convert_camelcase_to_lowercase\n\n\nclass InheritedModelQuerySet(models.query.QuerySet):\n \"\"\"\n Class that adds methods to queryset.\n \"\"\"\n def prefetch_objects(self):\n \"\"\"\n Retrieves the associated \"real\" objects for the elements in the current\n queryset. Remember that we don't have direct polymorphism in the DB.\n\n Returns them as a list, so this method must be called last in chain\n operations.\n \"\"\"\n\n # Eagerly get the list of instances because we use it twice\n instance_list = list(self.all())\n\n # Stores a dict of str => list\n # the key is a class name as a string\n # the value are the instances of InheritedModel that have that\n # class name\n object_id_dic = {}\n\n # Fill the dictionary just defined\n for instance in instance_list:\n if instance.class_name not in object_id_dic:\n object_id_dic[instance.class_name] = []\n\n object_id_dic[instance.class_name].append(instance.id)\n\n # Stores a dict of int => object\n # The keys are IDs of the instances given to this queryset\n # The values are the \"real\" objects of the given ID\n real_instance_resolutions = {}\n\n # Iterate over the dictionary defined at the beginning of the method\n for class_name, id_list in object_id_dic.items():\n # Get the \"real\" class of the objets\n object_class = models.get_model(\n settings.MODEL_APP_DICT[\n convert_camelcase_to_lowercase(class_name)\n ],\n class_name)\n\n if not object_class:\n object_class = models.get_model('backend_interface',\n class_name)\n\n # For optimization: get the list of foreign keys in the model so\n # that we may pre-emptively select the related objects all at once.\n related_fields = []\n for field in object_class._meta.fields:\n if field.__class__.__name__ == 'ForeignKey':\n related_fields.append(field.name)\n\n # Get the real objects and add them to the dictionary\n for object in object_class.objects.select_related(\n *related_fields).filter(pk__in=id_list):\n real_instance_resolutions[object.id] = object\n\n real_instance_list = []\n\n # Iterate once again over the list of instances, but this time a new\n # list is generated from the dictionary just created.\n # We can't just return the values of the dictionary just created\n # because the ordering would be most lkely wrong.\n for instance in instance_list:\n object = real_instance_resolutions[instance.id]\n # Set the cached instance of the \"real\" object just in case the\n # user mistakes the object for an InheritedModel and tries\n # to use us via the \"o\" property, which would re-execute the query\n # we just finished.\n object._cached_instance = object\n real_instance_list.append(object)\n\n return real_instance_list\n\n\nclass InheritedModelManager(models.Manager):\n def get_query_set(self):\n # Define our own queryset to be able to add methods to it\n return InheritedModelQuerySet(self.model)\n\n\nclass InheritedModel(models.Model):\n class_name = models.CharField(max_length=255, blank=True)\n objects = InheritedModelManager()\n\n def __init__(self, *args, **kwargs):\n super(InheritedModel, self).__init__(*args, **kwargs)\n self._cached_instance = None\n\n def _get_cached_instance(self):\n d = settings.MODEL_APP_DICT\n\n # Returns the \"real\" instance of the object\n if not self._cached_instance:\n object_class = models.get_model('backend_interface',\n self.class_name)\n if not object_class:\n object_class = models.get_model(\n d[\n convert_camelcase_to_lowercase(self.class_name)\n ],\n self.class_name)\n self._cached_instance = object_class.objects.get(pk=self.pk)\n return self._cached_instance\n\n # Accesor for the fields and method of the classes that extend\n # InheritedModel.\n o = property(_get_cached_instance)\n\n def get_admin_url(self):\n content_type = ContentType.objects.get_for_model(self.o.__class__)\n return urlresolvers.reverse(\"admin:%s_%s_change\" %\n (content_type.app_label,\n content_type.model), args=(self.pk,))\n\n def save(self, force_insert=False, force_update=False, using=None):\n # If this is the first save, set the class name!\n # InheritedModel shoould never be instantiated and saved directly,\n # always do so through a subclass\n if not self.pk:\n self.class_name = self.__class__.__name__\n super(InheritedModel, self).save(force_insert, force_update, using)\n\n class Meta:\n app_label = 'backend_interface'\n abstract = True\n","sub_path":"backend_interface/models/inherited_model.py","file_name":"inherited_model.py","file_ext":"py","file_size_in_byte":5394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"50767696","text":"import copy\n\nfrom django.conf.urls import url\nfrom django.shortcuts import render, reverse, redirect\nfrom django.utils.safestring import mark_safe\nfrom django.db.models import Q, DateTimeField, ManyToManyField\nfrom django.core.exceptions import ValidationError\n\nfrom .views import StarkIndex\nfrom .paginator import Pagination\n\n\nclass ShowList(object):\n \"\"\"这是一个展示类\"\"\"\n\n def __init__(self, config_class, request, queryset):\n # 将配置类对象封装到展示类中\n self.config_class = config_class\n self.request = request\n current_page = self.request.GET.get('page', 1)\n # 设置分页的内容\n self.pagination = Pagination(current_page=current_page,\n all_count=queryset.count(),\n per_page_num=self.config_class.per_page_num,\n param=request)\n\n start = self.pagination.start\n end = self.pagination.end\n self.queryset = queryset[start:end]\n\n def get_datetime_field(self, field, model):\n \"\"\"获取时间对象要显示的值\"\"\"\n value = getattr(model, field).strftime('%Y-%m-%d %X')\n return self.get_common_field(field, model, value)\n\n def get_foreignkey_field(self, field, model, current_field):\n \"\"\"展示外键字段\"\"\"\n temp = [item.__str__() for item in getattr(model, field).all()]\n return self.get_common_field(field, model, ','.join(temp))\n\n def get_common_field(self, field, model, value):\n \"\"\"获得普通字段的值\"\"\"\n if field in self.config_class.list_display_link:\n # 如果在link中应该是个a标签\n val = mark_safe('%s' % (\n reverse('%s_%s_update' % (self.config_class.app_label, self.config_class.model_name),\n args=(model.pk,)),\n value))\n else:\n val = value\n return val\n\n def get_body(self):\n \"\"\"获取表体\"\"\"\n body_list = []\n for model in self.queryset:\n temp = []\n for field in self.config_class.get_list_display():\n if field == '__str__':\n val = str(model)\n else:\n if isinstance(field, str):\n current_field = self.config_class.model._meta.get_field(field)\n # print(type(current_field))\n if current_field.choices:\n # 要判断当前对象是否是choice字段\n val = self.get_choice_field(field, model, current_field)\n elif isinstance(current_field, DateTimeField):\n # 怕段当前对象是否是date字段\n val = self.get_datetime_field(field, model)\n elif isinstance(current_field, ManyToManyField):\n # 展示一对多的字段\n val = self.get_foreignkey_field(field, model, current_field)\n else:\n val = self.get_common_field(field, model, getattr(model, field))\n else:\n val = field(self.config_class, is_header=False, obj=model)\n temp.append(val)\n body_list.append(temp)\n return body_list\n\n def get_choice_field(self, field, model, current_field):\n \"\"\"获取choice字段的值\"\"\"\n value = getattr(model, field)\n for i in current_field.choices:\n # 因为choices是一个元组套元组的,因此需要循环判断当前的val是多少\n if i[0] == value:\n val = i[1]\n return val\n\n def get_title(self):\n \"\"\"获取表头\"\"\"\n title_list = []\n for field in self.config_class.get_list_display():\n if isinstance(field, str):\n if field == '__str__':\n val = self.config_class.model._meta.verbose_name\n else:\n val = self.config_class.model._meta.get_field(field).verbose_name\n else:\n # 如果list_display中是一个函数, 而且还是表头的,就直接运行\n val = field(self.config_class, obj=self.config_class.model)\n title_list.append(val)\n continue\n if field != '__str__':\n # 如果表头是__str__,就直接跳过这个排序操作\n # 给表头添加一个排序字段\n path = copy.deepcopy(self.request.GET)\n if path.get('order_by') and path.get('order_by').startswith('-'):\n # 如果有, 就吧-号去掉,如果没有,就吧-号添加上\n path['order_by'] = field\n if self.request.GET.get('order_by') == '-' + field:\n val = mark_safe('%s' % (\n path.urlencode(), val))\n else:\n val = mark_safe('%s' % (\n path.urlencode(), val))\n else:\n path['order_by'] = '-' + field\n if self.request.GET.get('order_by') == field:\n val = mark_safe('%s' % (\n path.urlencode(), val))\n else:\n val = mark_safe('%s' % (\n path.urlencode(), val))\n\n title_list.append(val)\n return title_list\n\n def get_action(self):\n \"\"\"展示action的内容\"\"\"\n action_list = []\n for action in self.config_class.actions:\n tag = mark_safe('' % (action.__name__, action.short_description))\n action_list.append(tag)\n return action_list\n\n def get_foreignkey_union_search(self, current_field, field, temp):\n \"\"\"组合查询的字段是外键的时候处理的函数\"\"\"\n obj = current_field.rel.to\n # 循环组合查询字段,拼接a标签\n for item in obj.objects.all():\n # 要根据传递过来的地址,来重新赋值url,以实现组合查询\n path = copy.deepcopy(self.request.GET)\n path[field] = item.pk\n url = path.urlencode()\n # 判断当前点击的是哪一个\n if self.request.GET.get(field) == str(item.pk):\n temp.append(mark_safe('%s' % (url, item)))\n else:\n temp.append(mark_safe('%s' % (url, item)))\n return temp\n\n def get_choice_union_search(self, current_field, field, temp):\n \"\"\"如果组合查询字段是choices类型,就调用此函数\"\"\"\n choice_list = current_field.get_choices()\n path = copy.deepcopy(self.request.GET)\n\n for choice in choice_list:\n # 如果第一个字段为空,直接跳过\n if choice[0] != '':\n path[field] = choice[0]\n url = path.urlencode()\n # 判断当前点击的是哪一个字段,然后添加active\n if self.request.GET.get(field) == str(choice[0]):\n temp.append(mark_safe('%s' % (url, choice[1])))\n else:\n temp.append(mark_safe('%s' % (url, choice[1])))\n return temp\n\n def get_union_search(self):\n \"\"\"展示组合查询内容\"\"\"\n search_list = []\n for field in self.config_class.list_filter:\n # 为全部字段添加a标签\n path_global = copy.deepcopy(self.request.GET)\n temp = []\n if path_global.get(field):\n path_global.pop(field)\n url = path_global.urlencode()\n temp.append(mark_safe('全部' % (url)))\n # 获取到自定义的组合查询字段\n current_field = self.config_class.model._meta.get_field(field)\n if current_field.choices:\n temp = self.get_choice_union_search(current_field, field, temp)\n else:\n temp = self.get_foreignkey_union_search(current_field, field, temp)\n # search_list.append({'%s' % obj._meta.verbose_name: temp})\n search_list.append({'table': self.config_class.model._meta.get_field(field).verbose_name, 'obj': temp})\n return search_list\n\n\nclass BaseAdmin(object):\n \"\"\"这是一个默认的配置类\"\"\"\n list_display = ['__str__']\n list_display_link = []\n search_fields = []\n list_filter = []\n model_form = None\n per_page_num = 2 # 每一页显示的内容\n readonly_fields = []\n readonly_table = False # 设置表级只读\n\n def delete_union_action(self, request, queryset):\n \"\"\"批量删除操作\"\"\"\n # 如果当前表是只读的,只要进入此函数,不做任何操作\n if self.readonly_table:\n return\n queryset.delete()\n\n delete_union_action.short_description = '批量删除操作'\n actions = [delete_union_action]\n\n def __init__(self, model):\n \"\"\"将模型类封装到配置类对象中\"\"\"\n self.model = model\n self.app_label = model._meta.app_label\n self.model_name = model._meta.model_name\n\n def get_model_form(self):\n \"\"\"获取model_form组件\"\"\"\n config_class = self # 保存当前配置类对象以便于在闭包函数中使用\n if not self.model_form:\n from django.forms import ModelForm\n class BaseModelForm(ModelForm):\n \"\"\"modelForm基类\"\"\"\n\n class Meta():\n model = self.model\n fields = '__all__'\n\n def readonly_validationError(self, instance):\n \"\"\"只读字段后台表单验证\"\"\"\n for read_field in config_class.readonly_fields:\n temp = getattr(instance, read_field)\n # print(getattr(instance, read_field), self.cleaned_data.get(read_field))\n if isinstance(temp, str):\n if getattr(instance, read_field) != self.cleaned_data.get(read_field):\n # 如果有不相等的,就直接报错,代表是不合法的请求\n raise ValidationError('%s 为只读字段,请勿修改' % read_field)\n else:\n if hasattr(temp, 'all'):\n # 如果有all方法,代表过滤字段是一个多对多的字段\n queryset = temp.all()\n # print(self.cleaned_data.get(read_field), queryset,str(queryset) != str(self.cleaned_data.get(read_field)))\n if str(queryset) != str(self.cleaned_data.get(read_field)):\n raise ValidationError('%s 为只读字段,请勿修改' % read_field)\n\n def clean(self):\n \"\"\"写了一个全局钩子函数,用来对前端发送过来的表单进行额外的验证\"\"\"\n # 验证可读字段是否被修改\n instance = self.instance\n if hasattr(self, 'request'):\n request = self.request # request\n if request.path.endswith('add'):\n return self.cleaned_data\n # 这个代表是一个修改的表单\n self.readonly_validationError(instance)\n return self.cleaned_data\n\n self.model_form = BaseModelForm\n return self.model_form\n\n def edit_action(self, is_header=True, obj=None):\n \"\"\"编辑动作\"\"\"\n if is_header:\n return '编辑'\n return mark_safe(\n '编辑' % reverse('%s_%s_update' % (self.app_label, self.model_name), args=(obj.pk,)))\n\n def delete_action(self, is_header=True, obj=None):\n \"\"\"删除标签\"\"\"\n if is_header:\n return '删除'\n return mark_safe(\n '删除' % reverse('%s_%s_delete' % (self.app_label, self.model_name), args=(obj.pk,)))\n\n def check_box_action(self, is_header=True, obj=None):\n \"\"\"check标签\"\"\"\n if is_header:\n return '选择'\n return mark_safe('' % obj.pk)\n\n def get_list_display(self):\n \"\"\"获取list_display的值\"\"\"\n temp = []\n temp.append(BaseAdmin.check_box_action)\n temp.extend(self.list_display)\n if not self.list_display_link:\n temp.append(BaseAdmin.edit_action)\n temp.append(BaseAdmin.delete_action)\n return temp\n\n def show_item(self, request):\n \"\"\"展示视图函数\"\"\"\n # 执行action操作\n self.get_action(request)\n self.add_url = '%s_%s_add' % (self.app_label, self.model_name) # 在前端用来显示url的\n queryset = self.model.objects.all()\n # 搜索查询\n queryset = self.get_search(request, queryset)\n # 组合查询\n queryset = self.get_list_filter(request, queryset)\n\n # 查询完成之后对queryset对象进行排序\n queryset = self.get_order_by(request, queryset)\n show_list = ShowList(self, request, queryset) # 生成一个展示类对象\n return render(request, 'stark_show_item.html', {'show_list': show_list})\n\n def get_order_by(self, request, queryset):\n \"\"\"对传入的queryset进行排序\"\"\"\n if request.GET.get('order_by'):\n queryset = queryset.order_by(request.GET.get('order_by'))\n return queryset\n\n def get_list_filter(self, request, queryset):\n \"\"\"组合查询操作\"\"\"\n # 首先将查询字段去除,因为在组合查询之前已经对key_word进行查���了\n # params = copy.deepcopy(request.GET)\n # if 'key_word' in params.keys():\n # params.pop('key_word')\n # if 'page' in params.keys():\n # params.pop('page')\n # queryset = queryset.filter(**params)\n q = Q()\n # 不应该去循环request.path,而是应该去循环list_filter\n for field in self.list_filter:\n if request.GET.get(field):\n q.children.append((field, request.GET.get(field)))\n queryset = queryset.filter(q)\n return queryset\n\n def get_search(self, request, queryset):\n \"\"\"进行search查询操作\"\"\"\n\n self.key_word = ''\n if request.method == 'GET':\n key_word = request.GET.get('key_word') # 获取key_word的值\n if key_word:\n self.key_word = key_word\n q = Q()\n q.connector = 'or'\n for field in self.search_fields:\n q.children.append(('%s__icontains' % field, key_word))\n queryset = queryset.filter(q)\n return queryset\n\n def get_action(self, request):\n \"\"\"执行action操作\"\"\"\n if request.method == 'POST':\n # 如果是post,就代表点击了action\n action = request.POST.get('action')\n checkboxs = request.POST.getlist('select')\n if action and checkboxs:\n queryset = self.model.objects.filter(pk__in=checkboxs)\n action = getattr(self, action)\n if action:\n action(request, queryset)\n\n def add_item(self, request):\n \"\"\"添加视图函数\"\"\"\n model_form = self.get_model_form()\n if request.method == 'GET':\n # 通过model_form组件进行渲染页面\n model_form = model_form()\n return render(request, 'stark_add_item.html',\n {'model_form': model_form, 'readonly_table': self.readonly_table})\n # 如果这个表是只读的,只要是post请求,直接抛出异常渲染退出\n if self.readonly_table:\n # 如果是post请求,就通过model_form进行数据的保存就好了\n model_form.error = '此表不能修改,是制度的'\n return render(request, 'stark_add_item.html',\n {'model_form': model_form, 'readonly_table': self.readonly_table})\n\n # 如果是post请求,就通过model_form进行数据的保存就好了\n model_form = model_form(data=request.POST)\n model_form.request = request # 添加一个request对象,用来判断当前url的路由\n if model_form.is_valid():\n # 如果添加成功,就跳转到当前首页, 保存之后会返回一个queryset对象\n qureset = model_form.save()\n # 保存成功之后要判断当前是否是一个子页面\n auto_id = request.GET.get('auto_id')\n if auto_id:\n # 代表是一个子页面,然后就随便渲染一个页面,由这个页面进行跳转\n return render(request, 'temp.html', {\n 'auto_id': auto_id,\n 'pk': qureset.pk,\n 'name': str(qureset)\n })\n return redirect(reverse('%s_%s_list' % (self.app_label, self.model_name)))\n error = model_form.errors.popitem()\n if error:\n error = error[1]\n else:\n error = ''\n model_form.error = error\n # 如果不成功,就直接渲染这个页面\n return render(request, 'stark_add_item.html', {'model_form': model_form, 'readonly_table': self.readonly_table})\n\n def delete_item(self, request, pk):\n \"\"\"展示视图函数\"\"\"\n if request.method == 'GET':\n return render(request, 'stark_delete_ack.html')\n\n # 如果这个表是只读的,只要是post请求,直接抛出异常渲染退出\n if self.readonly_table:\n # 如果是post请求,就通过model_form进行数据的保存就好了\n return render(request, 'stark_delete_ack.html', {'error': '此表只读,不能修改'})\n\n # 获取pk的值删除并且跳转到展示页面\n self.model.objects.filter(pk=pk).delete()\n return redirect(reverse('%s_%s_list' % (self.app_label, self.model_name)))\n\n def update_item(self, request, pk):\n \"\"\"展示视图函数\"\"\"\n queryset = self.model.objects.filter(pk=pk).first()\n model_form = self.get_model_form()\n if request.method == 'GET':\n # 如果是get请求,就直接进行渲染\n model_form = model_form(instance=queryset)\n return render(request, 'stark_update_item.html', {'model_form': model_form, 'config_class': self})\n\n # 如果这个表是只读的,只要是post请求,直接抛出异常渲染退出\n if self.readonly_table:\n # 如果是post请求,就通过model_form进行数据的保存就好了\n model_form.error = '此表不能修改,是只读的'\n return render(request, 'stark_add_item.html', {'model_form': model_form, 'config_class': self})\n model_form = model_form(instance=queryset, data=request.POST)\n if model_form.is_valid():\n # 如果检验通过,就进行保存,并且跳转到主页面\n model_form.save()\n return redirect(reverse('%s_%s_list' % (self.app_label, self.model_name)))\n # 如果没有更新成功,则打印错误信息\n error = model_form.errors.popitem()\n if error:\n error = error[1]\n else:\n error = ''\n model_form.error = error\n # 如果不成功,就直接渲染这个页面\n return render(request, 'stark_add_item.html', {'model_form': model_form, 'config_class': self})\n\n @property\n def urls(self):\n \"\"\"生成一个二级的增删查改路由\"\"\"\n temp = [\n url(r'^list', self.show_item, name='%s_%s_list' % (self.app_label, self.model_name)),\n url(r'^add', self.add_item, name='%s_%s_add' % (self.app_label, self.model_name)),\n url(r'^(\\d+)/delete', self.delete_item, name='%s_%s_delete' % (self.app_label, self.model_name)),\n url(r'^(\\d+)/update', self.update_item, name='%s_%s_update' % (self.app_label, self.model_name)),\n ]\n return temp, None, None\n\n\nclass StarkAdmin(object):\n \"\"\"这是一个\"\"\"\n\n def __init__(self):\n self._register = {}\n\n def register(self, model, config_class=None):\n if not config_class:\n config_class = BaseAdmin\n app_label = model._meta.app_label\n # 如果当前app不在self_register中,就生成一个\n if app_label not in self._register:\n self._register[app_label] = {}\n self._register[app_label][model] = config_class(model)\n\n @property\n def urls(self):\n \"\"\"生成一个一级路由\"\"\"\n temp = []\n temp.append(url('^stark/', StarkIndex.as_view(), name='stark')) # 首先先添加一个主页路由\n # 循环生成一个增删查改四条路由\n for app_label, register in self._register.items():\n for model, config_class in register.items():\n temp.append(url('^%s/%s/' % (model._meta.app_label, model._meta.model_name), config_class.urls))\n return temp, None, None\n\n\nsite = StarkAdmin()\n","sub_path":"sites.py","file_name":"sites.py","file_ext":"py","file_size_in_byte":21662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"652190664","text":"def readinput():\n k,s = list(map(int,input().split()))\n return k,s\n\ndef main(k,s):\n count=0\n for x in range(k+1):\n for y in range(k+1):\n z=s-x-y\n #print(x,y,z)\n if 0<=z and z<=k:\n count+=1\n return count\n\nif __name__=='__main__':\n k,s=readinput()\n ans=main(k,s)\n print(ans)\n\n","sub_path":"Python_codes/p03835/s039966760.py","file_name":"s039966760.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"39748654","text":"#!/usr/bin/env python\n# python script to compare data analysis results\n\nimport argparse\nimport yaml\nimport IPython\nimport ROOT\nimport subprocess\nimport RawYieldSpectrumLoader\n\nglobalList = []\n\ndef main(input_files, doNorm):\n\n ROOT.TH1.AddDirectory(False)\n ROOT.gStyle.SetOptTitle(False)\n ROOT.gStyle.SetOptStat(0)\n\n subprocess.call(\"make\")\n ROOT.gSystem.Load(\"MassFitter.so\")\n\n baseline = None\n baselineName = None\n\n files = dict()\n norms = dict()\n baseline_norm = 1\n configs = []\n for input_file in input_files:\n norm = 1\n if \".yaml\" in input_file:\n f = open(input_file, 'r')\n config = yaml.load(f)\n f.close()\n configs.append(config)\n fname = \"{input_path}/{train}/{analysis}.root\".format(input_path=config[\"input_path\"], train=config[\"train\"], analysis=config[\"name\"])\n anaName = config[\"name\"]\n if doNorm:\n loader = RawYieldSpectrumLoader.RawYieldSpectrumLoader(config[\"input_path\"], config[\"train\"], config[\"name\"])\n loader.fDMeson = \"D0\"\n events = loader.LoadNumberOfEvents()\n print(\"{0} -> {1} events\".format(config[\"name\"], events))\n norm = 1.0 / events\n elif \".root\" in input_file:\n fname = input_file\n anaName = fname[fname.rfind(\"/\") + 1:fname.rfind(\".\")]\n else:\n print(\"Skipping file {0} because its type was not recognized\".format(input_file))\n continue\n file = ROOT.TFile(fname)\n if not file or file.IsZombie():\n print(\"Skipping file {0}, since it was not open successfully\".format(fname))\n continue\n if not baseline:\n baseline = file\n baselineName = anaName\n baseline_norm = norm\n print(\"Base line is {0}\".format(baselineName))\n inputPath = fname[0:fname.rfind(\"/\")]\n print(\"Input path is {0}\".format(inputPath))\n else:\n print(\"Adding file {0}\".format(anaName))\n files[anaName] = file\n norms[anaName] = norm\n\n if len(files) < 1:\n print(files)\n print(\"Less than 2 analysis to compare, quitting...\")\n exit(0)\n\n mylist = CompareObjects(baseline, files, baseline_norm, norms)\n outputFileName = \"_\".join([baselineName] + files.keys())\n outputFileName = \"{0}/Comparison_{1}.root\".format(inputPath, outputFileName)\n outputFile = ROOT.TFile(outputFileName, \"recreate\")\n outputFile.cd()\n rlist = GenerateRootList(mylist)\n for robj in rlist:\n robj.Write(robj.GetName(), ROOT.TObject.kSingleKey)\n outputFile.Close()\n print(\"Comparison results stored in {0}\".format(outputFileName))\n\ndef GenerateRootList(mylist):\n if len(mylist) < 1:\n return None\n rlist = ROOT.TList()\n for name, obj in mylist.iteritems():\n robj = None\n if isinstance(obj, dict):\n robj = GenerateRootList(obj)\n if robj:\n robj.SetName(name)\n elif isinstance(obj, ROOT.TObject):\n robj = obj\n else:\n print(\"Unexpected object type\")\n print(obj)\n if robj:\n rlist.Add(robj)\n return rlist\n\ndef CompareObjects(baseline, inputObjects, baseline_norm, norms):\n print(\"Comparing object {0}\".format(baseline.GetName()))\n mylist = dict()\n if len(inputObjects) < 1:\n print(\"Nothing to compare with, returning\")\n return\n if isinstance(baseline, ROOT.TDirectory):\n root_keys = baseline.GetListOfKeys()\n for root_key in root_keys:\n baselineObj = baseline.Get(root_key.GetName())\n objects = dict()\n for name, file in inputObjects.iteritems():\n obj = file.Get(root_key.GetName())\n if not obj:\n print(\"{0} not found for analysis {1}\".format(baselineObj.GetName(), name))\n continue\n objects[name] = obj\n mylist[root_key.GetName()] = CompareObjects(baselineObj, objects, baseline_norm, norms)\n\n elif isinstance(baseline, ROOT.TList):\n for baselineObj in baseline:\n objects = dict()\n for name, rlist in inputObjects.iteritems():\n obj = rlist.FindObject(baselineObj.GetName())\n if not obj:\n print(\"{0} not found for analysis {1}\".format(baselineObj.GetName(), name))\n continue\n objects[name] = obj\n mylist[baselineObj.GetName()] = CompareObjects(baselineObj, objects, baseline_norm, norms)\n\n elif isinstance(baseline, ROOT.TH1):\n baseline.Scale(baseline_norm)\n for (name, hist), norm in zip(inputObjects.iteritems(), norms.itervalues()):\n ratio = hist.Clone(\"{0}_ratio\".format(name))\n ratio.Scale(norm)\n ratio.Divide(baseline)\n mylist[ratio.GetName()] = ratio\n\n return mylist\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='D meson jet analysis for 2010 pp data.')\n parser.add_argument('files', metavar='file.yaml', nargs='+',\n help='Files to compare (can be a YAML or directly root file)')\n parser.add_argument(\"--norm\", action='store_const',\n default=False, const=True,\n help='Normalize by number of events.')\n\n args = parser.parse_args()\n\n main(args.files, args.norm)\n\n IPython.embed()\n","sub_path":"DMesonJetAnalysis/CompareDataAnalysis.py","file_name":"CompareDataAnalysis.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"423498427","text":"from django.shortcuts import render_to_response, redirect,render\nfrom books.models import Book, Author, Requestsinfo\nfrom django.contrib import auth\nfrom django.template.context_processors import csrf\nfrom books.forms import BookForm\n\n\ndef books(request):\n args = {}\n args['books'] = Book.objects.all()\n args['username'] = auth.get_user(request).username\n return render(request, 'books.html', args)\n\ndef book(request,book_id=0):\n args = {}\n args.update(csrf(request))\n\n if book_id == '0':\n book_form = BookForm()\n else:\n book = Book.objects.get(id=book_id)\n args['book'] = book\n book_form = BookForm(instance=book)\n\n args['username'] = auth.get_user(request).username\n args['model'] = Book\n args['form'] = book_form\n return render(request, 'book.html', args)\n\n\ndef book_save(request,book_id=0):\n if request.POST:\n if book_id == '0':\n form = BookForm(request.POST)\n else:\n book = Book.objects.get(id=book_id)\n form = BookForm(request.POST, instance=book)\n if form.is_valid():\n form.save()\n return redirect('/')\n\n\ndef requests(request):\n args = {}\n args['requests'] = Requestsinfo.objects.all().order_by('-requestsinfo_date')[:10]\n args['username'] = auth.get_user(request).username\n return render(request, 'requests.html', args)\n","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"19759202","text":"# -*- coding: utf-8 -*-\n\nimport pdb\nimport os\nimport json\nimport requests\nimport time\nimport traceback\n\n\nfrom TestCases.SafeCircle import config as CONFIG\nfrom lib.Common import *\nfrom lib.MyTools import *\n\n\nclass SafeCircle_API_10001:\n\t'''\n\tSummary\n\t\tCreate malicious url link\n\tURL\n\t\t/api/v4/widgets/mytools/available\n\n\t'''\n\n\tdef __init__(self):\n\n\t\tself.api_worker = MyTools(CONFIG.TESTER_01['account'],CONFIG.TESTER_01['password'])\n\t\tself.common_worker = Common()\n\n\t\tself.link_name = 'Youtube'\n\t\tself.link_url = 'https://www.youtube.com'\n\t\tself.result_dict = {u'status': -1, u'message': u'MALICIOUS_URL', u'code': 400}\n\n\tdef parse_response(self, r):\n\t\t\n\t\tif r.json and (r.result):\n\t\t\ttry:\n\t\t\t\treturn self.common_worker.check_dict_key_value(r.json,self.result_dict)\n\t\t\texcept KeyError as e:\n\t\t\t\treturn False, 'KeyError , msg:{0}'.format(e)\n\t\t\texcept Exception as e:\n\t\t\t\treturn False, 'Exception , msg:{0}'.format(e)\n\t\telse:\n\t\t\treturn (False, (r.status_code, r.text))\n\n\tdef run(self):\n\t\t\n\t\tself.common_worker.print_testcase(self.__class__.__name__,'Create a new my tools link')\n\n\t\tr = self.api_worker.Create_mytools_link(self.link_name,self.link_url)\n\n\t\tresult = self.parse_response(r)\n\t\t\n\t\treturn result\n\n\nclass SafeCircle_API_10002:\n\n\t'''\n\tSummary\n\t\tCreate a new predefine my tools link\n\tURL\n\t\t/api/v4/widgets/mytools/links/predefine\n\t'''\n\n\tdef __init__(self):\n\n\t\tself.api_worker = MyTools(CONFIG.TESTER_01['account'],CONFIG.TESTER_01['password'])\n\t\tself.common_worker = Common()\n\t\tself.MyToolsCategory = CONFIG.My_Tools_Categories\n\n\t\tself.categories_id = None\n\t\tself.categories_name = 'Test Category'\n\t\tself.link_categories = [0]\n\n\t\tself.link_name = 'Youtube'\n\t\tself.link_url = 'https://www.youtube.com'\n\t\tself.link_description = 'This Is Youtube'\n\n\t\tself.result_dict = {u'status': -1, u'message': u'MALICIOUS_URL', u'code': 400}\n\t\tself.link = {u'description': u'This is description', u'default': False, u'is_added': False, u'url': u'https://google.com', u'wrs_url': u'', u'icon': None, u'id': 2322, u'categories': self.link_categories, u'name': u'Test google links'}\n\t\t\n\n\tdef setup(self):\n\n\t\tself.MyToolsCategory[\"categories\"].append({'id':'','name':self.categories_name,'delete':False})\n\n\t\tr = self.api_worker.Update_categories(self.MyToolsCategory)\n\n\t\tself.categories_id = r.json[u'categories'][-1]['id']\n\n\t\tassert r.json['code'] == 200 ,'Setup -> Update category fail, msg:{0}'.format(r.json)\n\n\t\tself.link_categories.append(self.categories_id)\n\n\tdef teardown(self):\n\n\t\tself.MyToolsCategory[\"categories\"].pop()\n\t\tself.MyToolsCategory[\"categories\"].append({'id':self.categories_id,'name':'Test Category','delete':True})\n\n\t\tr = self.api_worker.Update_categories(self.MyToolsCategory)\n\t\tassert r.json['code'] == 200 ,'Teardown -> Update category fail, msg:{0}'.format(r.json)\n\n\tdef parse_response(self, r):\n\t\t\n\t\tif r.json and (r.result):\n\t\t\ttry:\n\t\t\t\treturn self.common_worker.check_dict_key_value(r.json,self.result_dict)\t\t\t\t\n\t\t\texcept KeyError as e:\n\t\t\t\treturn False, 'KeyError , msg:{0}'.format(e)\n\t\t\texcept Exception as e:\n\t\t\t\treturn False, 'Exception , msg:{0}'.format(e)\n\t\telse:\n\t\t\treturn (False, (r.status_code, r.text))\n\n\tdef run(self):\n\t\t\n\t\tself.common_worker.print_testcase(self.__class__.__name__,'Create a new predefine my tools link')\n\t\t\n\t\tself.setup()\n\n\t\tr = self.api_worker.Create_predefine_link(self.link_name,self.link_url,self.link_categories,'',self.link_description,False)\n\n\t\tresult = self.parse_response(r)\n\n\t\tself.teardown()\n\n\t\treturn result\n\n\n","sub_path":"TestCases/SafeCircle/MyTools_Non_Criteria.py","file_name":"MyTools_Non_Criteria.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"173565397","text":"import netifaces\nimport ipaddress\nimport sys\nfrom scapy.all import *\n\n\ndef get_networkIP(routerIP, binmask):\n\n e1, e2, e3, e4 = routerIP.split(\".\")\n binRouterIP = (\n bin(int(e1))[2:].zfill(8)\n + \".\"\n + bin(int(e2))[2:].zfill(8)\n + \".\"\n + bin(int(e3))[2:].zfill(8)\n + \".\"\n + bin(int(e4))[2:].zfill(8)\n )\n binNetworkIP = \"\"\n i = 0\n for x, y in zip(binRouterIP.replace(\".\", \"\"), binmask):\n i += 1\n if x == \"1\" and y == \"1\":\n binNetworkIP += \"1\"\n else:\n binNetworkIP += \"0\"\n if i == 8:\n binNetworkIP += \".\"\n i = 0\n e1, e2, e3, e4 = binNetworkIP[:-1].split(\".\")\n networkIP = (\n str(int(e1, 2))\n + \".\"\n + str(int(e2, 2))\n + \".\"\n + str(int(e3, 2))\n + \".\"\n + str(int(e4, 2))\n )\n mask = 0\n for l in str(binmask):\n if l == \"1\":\n mask += 1\n return networkIP + \"/\" + str(mask)\n\n\ntry:\n\treseau, netmask = sys.argv[1].split(\"/\")\n\ttry:\n\t\tint(netmask)\n\t\tmask = True\n\texcept:\n\t\tprint(\"Le masque de sous-reseau n'est pas valide\")\n\t\tmask = False\nexcept ValueError:\n\tprint(\"N'oubliez pas d'entrer le masque de sous reseau\")\n\tmask = False\n\n\nif mask:\n\tconnected = 0\n\n\tfor inter in netifaces.interfaces():\n\t\taddre = netifaces.ifaddresses(inter)\n\t\tif (addre[netifaces.AF_INET])[0][\"addr\"] == reseau:\n\t\t\tconnected += 1\n\n\tif connected == 0:\n\t\tprint(f\"Vous n'etes pas connecte au reseau {reseau}\")\n\telse:\n\t\tmaskbin = \"1\" * int(netmask) + \"0\" * (32-int(netmask))\n\t\tfor host in ipaddress.IPv4Network(get_networkIP(reseau, maskbin)):\n\t\t\tanswer = sr1(IP(dst=host, ttl=20)/ICMP(), timeout=2, verbose = False)\n\t\t\tif answer:\n\t\t\t\tprint(f\"{host} connecté\")\n\t\t\telse:\n\t\t\t\tprint(f\"{host} non connecte\")\n\n","sub_path":"reseau_python/nmap.py","file_name":"nmap.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"465072031","text":"# TITLE: User input in Python\r\n# DESCRIPTION: Stores user input into variables and outputs the variables\r\n# to the user.\r\n\r\ndef main():\r\n name = input('What is your name? ')\r\n age = input('How old are you? ')\r\n # output\r\n print('Hello', name, ' you are', age)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"python/UserInput.py","file_name":"UserInput.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"102582989","text":"import voluptuous as vol\n\nfrom esphome import pins\nimport esphome.config_validation as cv\nfrom esphome.const import CONF_ENABLE_TIME, CONF_ID, CONF_KEEP_ON_TIME, CONF_PIN\nfrom esphome.cpp_generator import Pvariable, add\nfrom esphome.cpp_helpers import gpio_output_pin_expression, setup_component\nfrom esphome.cpp_types import App, Component, esphome_ns\n\nPowerSupplyComponent = esphome_ns.class_('PowerSupplyComponent', Component)\n\nMULTI_CONF = True\n\nCONFIG_SCHEMA = cv.Schema({\n vol.Required(CONF_ID): cv.declare_variable_id(PowerSupplyComponent),\n vol.Required(CONF_PIN): pins.gpio_output_pin_schema,\n vol.Optional(CONF_ENABLE_TIME): cv.positive_time_period_milliseconds,\n vol.Optional(CONF_KEEP_ON_TIME): cv.positive_time_period_milliseconds,\n}).extend(cv.COMPONENT_SCHEMA.schema)\n\n\ndef to_code(config):\n pin = yield gpio_output_pin_expression(config[CONF_PIN])\n\n rhs = App.make_power_supply(pin)\n psu = Pvariable(config[CONF_ID], rhs)\n if CONF_ENABLE_TIME in config:\n add(psu.set_enable_time(config[CONF_ENABLE_TIME]))\n if CONF_KEEP_ON_TIME in config:\n add(psu.set_keep_on_time(config[CONF_KEEP_ON_TIME]))\n\n setup_component(psu, config)\n\n\nBUILD_FLAGS = '-DUSE_OUTPUT'\n","sub_path":"esphome/components/power_supply.py","file_name":"power_supply.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"49173682","text":"import tempfile\nimport unittest\nfrom unittest import mock\n\nimport pytest\n\nimport cupy\nfrom cupy import testing\n\n\ntry:\n import warnings\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=DeprecationWarning)\n import cupyx.optimizing\n import cupyx.optimizing._optimize\n import cupy.core._optimize_config\nexcept ImportError:\n pass\n\n\n@testing.gpu\n@testing.with_requires('optuna')\nclass TestOptimize(unittest.TestCase):\n\n def setUp(self):\n cupy.core._optimize_config._clear_all_contexts_cache()\n\n def test_optimize_reduction_kernel(self):\n my_sum = cupy.ReductionKernel(\n 'T x', 'T out', 'x', 'a + b', 'out = a', '0', 'my_sum')\n x = testing.shaped_arange((3, 4), cupy)\n y1 = my_sum(x, axis=1)\n with cupyx.optimizing.optimize():\n y2 = my_sum(x, axis=1)\n testing.assert_array_equal(y1, y2)\n\n def test_optimize_cache(self):\n target = cupyx.optimizing._optimize._optimize\n target_full_name = '{}.{}'.format(target.__module__, target.__name__)\n\n with mock.patch(target_full_name) as optimize_impl:\n my_sum = cupy.ReductionKernel(\n 'T x', 'T out', 'x', 'a + b', 'out = a', '0', 'my_sum')\n my_sum_ = cupy.ReductionKernel(\n 'T x', 'T out', 'x', 'a + b', 'out = a', '0', 'my_sum_')\n x = testing.shaped_arange((3, 4), cupy)\n x_ = testing.shaped_arange((3, 4), cupy)\n y = testing.shaped_arange((4, 4), cupy)\n z = testing.shaped_arange((3, 4), cupy)[::-1]\n assert x.strides == y.strides\n assert x.shape == z.shape\n\n with cupyx.optimizing.optimize():\n my_sum(x, axis=1)\n assert optimize_impl.call_count == 1\n my_sum(x, axis=1)\n assert optimize_impl.call_count == 1\n my_sum(x, axis=0)\n assert optimize_impl.call_count == 2\n my_sum(x_, axis=1)\n assert optimize_impl.call_count == 2\n my_sum(y, axis=1)\n assert optimize_impl.call_count == 3\n my_sum(z, axis=1)\n assert optimize_impl.call_count == 4\n my_sum_(x, axis=1)\n assert optimize_impl.call_count == 5\n\n with cupyx.optimizing.optimize(key='new_key'):\n my_sum(x, axis=1)\n assert optimize_impl.call_count == 6\n\n with cupyx.optimizing.optimize(key=None):\n my_sum(x, axis=1)\n assert optimize_impl.call_count == 6\n my_sum(x)\n assert optimize_impl.call_count == 7\n\n @testing.multi_gpu(2)\n def test_optimize_cache_multi_gpus(self):\n target = cupyx.optimizing._optimize._optimize\n target_full_name = '{}.{}'.format(target.__module__, target.__name__)\n\n with mock.patch(target_full_name) as optimize_impl:\n my_sum = cupy.ReductionKernel(\n 'T x', 'T out', 'x', 'a + b', 'out = a', '0', 'my_sum')\n\n with cupyx.optimizing.optimize():\n with cupy.cuda.Device(0):\n x = testing.shaped_arange((3, 4), cupy)\n my_sum(x, axis=1)\n assert optimize_impl.call_count == 1\n\n with cupy.cuda.Device(1):\n x = testing.shaped_arange((3, 4), cupy)\n my_sum(x, axis=1)\n assert optimize_impl.call_count == 2\n\n def test_optimize_pickle(self):\n my_sum = cupy.ReductionKernel(\n 'T x', 'T out', 'x', 'a + b', 'out = a', '0', 'my_sum')\n x = testing.shaped_arange((3, 4), cupy)\n\n with tempfile.TemporaryDirectory() as directory:\n filepath = directory + '/optimize_params'\n\n with cupyx.optimizing.optimize() as context:\n my_sum(x, axis=1)\n params_map = context._params_map\n context.save(filepath)\n\n cupy.core._optimize_config._clear_all_contexts_cache()\n\n with cupyx.optimizing.optimize() as context:\n assert params_map.keys() != context._params_map.keys()\n context.load(filepath)\n assert params_map.keys() == context._params_map.keys()\n\n with cupyx.optimizing.optimize(key='other_key') as context:\n with pytest.raises(ValueError):\n context.load(filepath)\n","sub_path":"tests/cupyx_tests/test_optimize.py","file_name":"test_optimize.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"373031478","text":"#Exercício Python 68: Faça um programa que jogue par ou ímpar com o computador. O jogo só será interrompido quando o jogador perder, mostrando o total de vitórias consecutivas que ele conquistou no final do jogo.\nimport random\npi = ['p','i']\nc = 0\nwhile True:\n esc = input('Escolha P para par e I para impar ').strip().upper()[0]\n while esc not in 'PI':\n esc = input('Escolha P para par e I para impar ').strip().upper()[0]\n num = int(input('escolha um número '))\n numcom = random.randint(0,5)\n res = num + numcom\n \n if esc in 'Pp':\n if res % 2 == 0:\n print('Você ganhou!!! ')\n c += 1\n else:\n print('Você perdeu!!! ')\n break\n if esc in 'Ii':\n if res % 2 != 0:\n print('Você ganhou!!! ')\n c +=1\n else:\n print('Você perdeu!!!')\n break\nprint(f'Você ganhou {c} vezes seguidas')\n \n \n ","sub_path":"jogo par ou impar.py","file_name":"jogo par ou impar.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"546095069","text":"def isPalindromic(s,start,end):\r\n while start<=end:\r\n if s[start]!=s[end]:\r\n return False\r\n start+=1\r\n end-=1\r\n return True\r\n\r\nn=input()\r\ncnt=0\r\nflag=True\r\nwhile not isPalindromic(n,0,len(n)-1):\r\n cnt+=1\r\n n=list(n)\r\n n_reverse=reversed(n)\r\n num1=int(''.join(n))\r\n num2=int(''.join(n_reverse))\r\n n=str(num1+num2)\r\n print(num1,'+',num2,'=',n)\r\n if cnt==10:\r\n flag=False\r\n print(\"Not found in 10 iterations.\")\r\n break\r\nif flag:\r\n print(n,\"is a palindromic number.\")","sub_path":"1136.py","file_name":"1136.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"81225218","text":"import numpy as np\nimport sklearn.datasets as datasets\nfrom sklearn.model_selection import train_test_split\nimport cv2\nimport torch\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader, Dataset, TensorDataset\n\n\nfrom .args import args\n\n\n\n# ----- Data to Image Transformer -----\n\ndef data2img(arr, font_size=50, resolution=(256, 256), font=cv2.FONT_HERSHEY_SIMPLEX):\n \"\"\" Structured Tabular Data to Image with cv2\n\n NOTE currently supports only iris and wine dataset\n \"\"\"\n x, y = resolution\n n_colums, n_features = 2, len(arr)\n n_lines = n_features % n_colums + int(n_features / n_colums)\n frame = np.ones((*resolution, 3), np.uint8)*0\n\n k = 0\n\n for i in range(n_colums):\n for j in range(n_lines):\n try:\n cv2.putText(\n frame, str(arr[k]), (30 + i * (x // n_colums), 5 + (j + 1) * (y // (n_lines + 1))),\n fontFace=font, fontScale=1, color=(255, 255, 255), thickness=2)\n k += 1\n except IndexError:\n break\n return np.array(frame, np.uint8)\n\n\n# ----- Dataset -----\n\nclass CustomTensorDataset(Dataset):\n def __init__(self, data, transform=None):\n self.data = data\n self.transform = transform\n\n def __len__(self):\n return len(self.data[0])\n\n def __getitem__(self, index):\n x = self.data[0][index]\n img = data2img(x)\n if self.transform:\n x = self.transform(img)\n\n if self.data[1] is not None:\n y = self.data[1][index]\n return x, y\n else:\n return x\n\nif __name__ == \"__main__\":\n pass\n\n","sub_path":"src/utils/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"393043334","text":"\"\"\"\n합계 0인 정수 쌍 구하기\n0을 제외한 nn개의 정수가 주어졌을 때, 합이 0에 가장 가까운 숫자쌍을 구하는 프로그램을 작성하세요.\n\n입력\n첫 번째 줄에 nn개의 정수들이 오름차순으로 주어집니다. nn의 값은 따로 주어지지 않습니다. (1 \\leq n \\leq 100,000,000)(1≤n≤100,000,000)\n\n출력\n합이 0에 가장 가까운 숫자쌍을 빈 칸으로 구분하여 출력합니다. 숫자쌍은 오름차순으로 정렬하여 출력하며, 정답이 여러개일 경우 그 중 하나만 출력하면 됩니다.\n\n입력 예시\n-193 30 94 100 194\n출력 예시\n-193 194\n\"\"\"\n\ndef sum_0(data):\n # Implement here\n \"\"\"\n :param data:\n :return:\n \"\"\"\n if len(data) <= 2:\n return data\n start, end = 0, len(data) - 1\n return_value = 99999\n return_pair = [0, 0]\n\n while (start != end):\n tmp_value = add_two_index(data, start, end)\n if return_value > tmp_value:\n return_value = tmp_value\n return_pair = [data[start], data[end]]\n\n if add_two_index(data, start, end - 1) < tmp_value:\n end += -1\n else:\n start += 1\n\n return return_pair\n\n\ndef add_two_index(arr, one_idx, two_idx):\n return abs(arr[one_idx] + arr[two_idx])\n\n\ndef main():\n '''\n 이 부분은 수정하지 마세요.\n '''\n\n given_data = input()\n\n data = [int(v.strip()) for v in given_data.split()]\n\n print(*sum_0(data))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"01. algorithm/datastructure/python/timecomplex/test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"607520625","text":"import matplotlib as mpl\nmpl.use('pdf')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport scipy.stats as stats\nfrom scipy.stats import chi2, norm, exponweib\nfrom icecube.umdtools import cache,misc\nfrom icecube import icetray, dataclasses, histlite\n\nfrom skylab import statistics\nfitfun = statistics.delta_chi2\nweibfun=statistics.weib\n\n##let's define weibfun here:\n\n\n##Make plots prettier\nmisc.tex_mpl_rc()\nw=4\npropsmall = mpl.font_manager.FontProperties (size='small')\npropxsmall = mpl.font_manager.FontProperties (size='x-small')\n\nalldata = '/data/user/brelethford/Output/stacking_sensitivity/4yr_Starburst/bstacking_3yr/flux/'\n\nbckg_sirin = alldata + 'background_trials/'\nbckg_schatto = alldata + 'schatto79_background_trials/'\nsens_sirin = alldata + 'flux_inj/sensitivity/'\nsens_schatto = alldata + 'schatto79_flux_inj/sensitivity/'\ndisc_sirin = alldata + 'flux_inj/discovery/'\ndisc_schatto = alldata + 'schatto79_flux_inj/discovery/'\n\nbins = 40\nrange = (0.0,40.0)\n\ndef makehist(datafolder):\n files = [cache.load(datafolder+file) for file in os.listdir(datafolder) if file.endswith('.array')]\n TS=[]\n for file in files:\n for entry in file:\n if entry[0]==0:\n if entry[1]<0:\n TS.append(0)\n else:\n TS.append(entry[1])\n TSs=TS\n chi2fit_flux = fitfun(TSs, df=1., floc=0., fscale=1.)\n print('background only: median TS = {}'.format(str(np.asscalar(chi2fit_flux.isf(0.5)))))\n print ('max TS = {}'.format(str(max(TSs))))\n print ('Percentage of TS=0 is: {}'.format(str(1.0-np.float(np.count_nonzero(TSs))/np.float(len(TSs)))))\n ##Now we make hists of the test statistics ##\n flux_hist =histlite.hist(TSs,bins=bins,range=range)\n return flux_hist,chi2fit_flux\n\nhist_sirin,chi_sirin = makehist(bckg_sirin)\nhist_schatto,chi_schatto = makehist(bckg_schatto)\ndef makehist_inj(datafolder):\n files = cache.load(datafolder+'gamma2.0.array')\n\n TSs=files['trials']['TS']\n\n chi2fit_flux = fitfun(TSs, df=1., floc=0., fscale=1.)\n weib_flux = weibfun(TSs,df=2., floc=0., fscale=1.)\n print('background plus injected: median TS = {}'.format(str(np.asscalar(chi2fit_flux.isf(0.5)))))\n print ('max TS = {}'.format(str(max(TSs))))\n print ('Percentage of TS=0 is: {}'.format(str(1.0-np.float(np.count_nonzero(TSs))/np.float(len(TSs)))))\n \n ##Now we make hists of the test statistics ##\n flux_hist =histlite.hist(TSs,bins=bins,range=range)\n return flux_hist,chi2fit_flux\n\ndef printflux(datafolder):\n files = cache.load(datafolder+'gamma2.0.array')\n flux= files['flux']\n mu = files['mu']\n TSval = files['TSval']\n return (flux, mu, TSval)\n\n#extract hists for injected trials\nhist_sirin_sens,chi_sirin_sens = makehist_inj(sens_sirin)\nhist_schatto_sens,chi_schatto_sens = makehist_inj(sens_schatto)\nhist_sirin_disc,chi_sirin_disc = makehist_inj(disc_sirin)\nhist_schatto_disc,chi_schatto_disc = makehist_inj(disc_schatto)\n\n## Now to plot. ##\nfig_bckg = plt.figure (figsize=(w, .75*w))\nax=plt.gca()\n\nlabels = ['sirin', 'schatto']\nflux_hists = [hist_sirin,hist_schatto]\nchi2_fits = [chi_sirin,chi_schatto]\n\nsens_hists = [hist_sirin_sens,hist_schatto_sens]\nsens_fits = [chi_sirin_sens,chi_schatto_sens]\ndisc_hists = [hist_sirin_disc,hist_schatto_disc]\ndisc_fits = [chi_sirin_disc,chi_schatto_disc]\n\ncolors=['green', 'blue']\n#First for bckg\nfor flux_hist, chi2_fit, color,label in zip(flux_hists,chi2_fits, colors,labels):\n x = np.linspace(0,40,100)\n histlite.plot1d(ax,flux_hist.normalize(integrate=True),histtype='step',color=color,label='{} - '.format(label) + r'$\\tilde{\\chi}^2$' + ': df={}'.format(str(round(chi2_fit.par[0],2))))\n ax.plot(x, chi2_fit.pdf(x), linestyle=':',color=color)#, label='{} - '.format(label) + r'$\\tilde{\\chi}^2$' + ': df={}'.format(str(round(chi2_fit.par[0],2))))\n#now for sens, then disc \nfor flux_hist, chi2_fit, color in zip(sens_hists,sens_fits, colors):\n histlite.plot1d(ax,flux_hist.normalize(integrate=True),histtype='step',color=color)\n #plt.axvline(chi2_fit.isf(0.1), color = color, linestyle = ':')\n\nfor flux_hist, chi2_fit, color in zip(disc_hists,disc_fits, colors):\n histlite.plot1d(ax,flux_hist.normalize(integrate=True),histtype='step',color=color)\n #plt.axvline(chi2_fit.isf(0.5), color = color, linestyle = ':')\n\n###NOTE - don't use the following two lines, they don't' give a good chi2 fit for some reason. I think we'd specifically need to use the deltachi2 function. \n#y = np.linspace(1e-5,20,100) #nonzero start to prevent infinity\n#ax.plot(y, chi2.pdf(y,1), linestyle=':',color='k', label=r'$\\tilde{\\chi}^2$: df=1')\n#for chi2_fit, weib_fit, color in zip(chi2_fits, weib_fits, colors):\n# x = np.linspace(0,20,100)\n# ax.plot(x, weib_fit.pdf(x), linestyle='--', color=color, label = 'weibull')\n# plt.axvline(chi2_fit.isf(norm.sf(5)), color = color, linestyle = ':')\n# plt.axvline(weib_fit.isf(norm.sf(5)), color = color, linestyle = '--')\n\n#Now let's get the signal\n\n#inj_flux = cache.load('/data/user/brelethford/Output/stacking_sensitivity/4yr_Starburst/4yr/flux_mhuber_git/flux_inj/sensitivity/gamma2.0.array')\n\n#def getTSinj(data):\n# TS = []\n# for trial in data['trials']:\n# TS.append(trial[1])\n# return TS\n\n#flux_inj_hist =histlite.hist(getTSinj(inj_flux),bins=bins,range=range)\n\n#histlite.plot1d(ax,flux_inj_hist.normalize(integrate=True),histtype='step', alpha = 0.5, color='green')\n\ndef ic_prelim(fig, x = 0.75, y = 0.8, **kw):\n \"\"\"Marks maps and plots as preliminary\"\"\"\n if 'color' not in kw:\n kw['color'] = 'red'\n if 'weight' not in kw:\n kw['weight'] = 'bold'\n fig.text(x, y, \"IceCube Preliminary\", **kw)\n\n\n#ax.set_title(r'4yr Starburst TS - updated')\nax.set_xlabel(r'TS')\nplt.subplots_adjust (left=.2, bottom=.2)\nic_prelim(fig_bckg, x = 0.5, y=0.3)\nax.set_ylabel(r'Probability Density') \nax.set_ylim(8e-5,1.2)\nax.set_xlim(0,40)\nax.semilogy() \nplt.legend(loc='upper right', prop=propxsmall, ncol=1)\nfig_bckg.savefig('/data/user/brelethford/AGN_Core/Plots/starburst/bstacking_starburst3yr_bckgTS.pdf')\n\nprint ( 'sens - sirin: flux, mu, TSval - ' + str(printflux(sens_sirin)))\nprint ( 'sens - schatto: flux, mu, TSval - ' + str(printflux(sens_schatto)))\nprint( 'disc - sirin: flux, mu, TSval - ' +str(printflux(disc_sirin)))\nprint( 'disc - schatto: flux, mu, TSval - ' +str(printflux(disc_schatto)))\n\n","sub_path":"skylab/sensitivity/stacking_sensitivity/plotting/obsolete/starburst_background.py","file_name":"starburst_background.py","file_ext":"py","file_size_in_byte":6260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"231112745","text":"\n\"\"\"\nThis is the main module.\n\nExample:\n $ python dispatch.py --key --host \n --port --user \n\nWhen the script starts. It matches the contents of two folders.\nIf destination has files/folders not present in source,\nthen it deletes them\n\nThen matches the current files/folder in the source direcory to destination directory\n\nThe utility also watches the contents of the source directory\nand take acions based on changes of the directory.\n\"\"\"\nimport argparse\nimport os\nimport sys\nimport signal\nimport time\nimport connector\n\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\nfrom utils import DSException\n\nclass DroidSync(object):\n \"\"\" Main DoidSync class\n\n Attributes:\n args (:obj:`string`) arguments specified during runtime\n source (str) Source folder to sync\n dest (str) Destination remote folder\n connection_client (:obj: connector.Connector) this is used to creat connection to remote host\n observer (:obj: Observer) The observer object of watchdog, that observe change in dir\n \"\"\"\n def parse_args(self, args):\n parser = argparse.ArgumentParser()\n parser.add_argument('source', help='Enter the source folder location')\n parser.add_argument('dest', help='Enter the destination folder location')\n parser.add_argument('--key', dest='key', help='Key to be used to ssh')\n parser.add_argument('--host', dest='host', help='Host to ssh into')\n parser.add_argument('--port', dest='port', help='Port fot the host to ssh', type=int)\n parser.add_argument('--pass', dest='password', help='Password to use')\n parser.add_argument('--key-pass', dest='key_password', help='Password for the private key file')\n parser.add_argument('--user', dest='user', help='user to login as')\n self.args = parser.parse_args(args)\n\n def __init__(self, args):\n self.parse_args(args)\n self.source = self.args.source\n self.dest = self.args.dest\n self.connector_client = connector.Connector(self.args.key,\n self.args.host,\n self.args.port,\n self.args.user,\n self.args.password,\n self.args.key_password)\n self.observer = Observer()\n\n def remove_files(self, source, dest):\n \"\"\"Remove the files not present in source\n\n Args:\n source (str): source dir\n dest (str): destination dir\n \"\"\"\n source_files = os.listdir(source)\n for name in self.connector_client.get_files_list(dest):\n if name not in source_files:\n self.connector_client.remove(dest, name)\n elif os.path.isdir(os.path.join(source, name)):\n self.remove_files(os.path.join(source, name), os.path.join(dest, name))\n\n def start(self):\n \"\"\"Start method starts executing the main logic\"\"\"\n if not os.path.isdir(self.source):\n raise DSException('%s is not a directory' % self.source)\n\n self.connector_client.check_dest_dir(self.dest)\n self.remove_files(self.source, self.dest)\n source_files = os.listdir(self.source)\n for source_file in source_files:\n name = os.path.join(self.source, source_file)\n target_file = os.path.join(self.dest, source_file)\n self.connector_client.check_target(name, target_file)\n\n event_handler = Handler(droidsync=self)\n self.observer.schedule(event_handler, self.source, recursive=True)\n self.observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n self.observer.stop()\n self.observer.join()\n\n\nclass Handler(FileSystemEventHandler):\n \"\"\"Handler class that handles the event emitted\n\n The class is inherted from FileSystemEventHandler.\n on_any_event is invoked as soon as any file system related event\n is emmited\n\n Attributes:\n droidsync: (:obj: DroidDync) - the droidsync object\n\n \"\"\"\n def __init__(self, droidsync, *args):\n super(Handler, self).__init__(*args)\n self.droidsync = droidsync\n\n def on_any_event(self, event):\n \"\"\" Method is called when any event occurs\n\n Args:\n event (:obj:) event object that initiated the handler\n \"\"\"\n src_path = event.src_path[event.src_path.find(self.droidsync.source) + len(self.droidsync.source):]\n event_type = event.event_type\n if not os.path.normpath(self.droidsync.source) == event.src_path:\n if event_type == 'moved':\n self.droidsync.connector_client.remove(self.droidsync.dest, src_path)\n dest_path = event.dest_path[event.dest_path.find(self.droidsync.source) + len(self.droidsync.source):]\n self.droidsync.connector_client.check_target(event.dest_path, os.path.join(self.droidsync.dest, dest_path))\n\n if event_type == 'modified' or event_type == 'created':\n self.droidsync.connector_client.check_target(event.src_path, os.path.join(self.droidsync.dest, src_path))\n\n if event_type == 'deleted':\n self.droidsync.connector_client.remove(self.droidsync.dest, src_path)\n\ndef handle_signal(signal, frame):\n os._exit(0)\n\ndef main(args=None):\n \"\"\"Main method\n\n Args:\n args (:obj:) arguments specified at runtime\n \"\"\"\n signal.signal(signal.SIGINT, handle_signal)\n client = DroidSync(args)\n client.start()\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))","sub_path":"dispatch.py","file_name":"dispatch.py","file_ext":"py","file_size_in_byte":5845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"18966524","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThere is a risk of loss when trading stocks, futures, forex, options and other\nfinancial instruments. Please trade with capital you can afford to\nlose. Past performance is not necessarily indicative of future results.\nNothing in this computer program/code is intended to be a recommendation, explicitly or implicitly, and/or\nsolicitation to buy or sell any stocks or futures or options or any securities/financial instruments.\nAll information and computer programs provided here is for education and\nentertainment purpose only; accuracy and thoroughness cannot be guaranteed.\nReaders/users are solely responsible for how to use these information and\nare solely responsible any consequences of using these information.\n\nIf you have any questions, please send email to IBridgePy@gmail.com\nAll rights reserved.\n\"\"\"\nfrom configuration import run_me\n\nfileName = 'example_show_positions_multi.py'\n# fileName = 'example_place_order_multi.py'\n\n# !!!!!! IMPORTANT !!!!!!!!!!!!!!!!!\naccountCode = ['DU16156', 'DU16157', 'DU16158'] # You need to change it to your own IB account numbers\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n'''\nIn the default mode, handle_data will be called every second.\nTo run Quantopian algorithms, handle_data will be called every minute\nPlease use the following runMode\n'''\n#runMode = 'run_like_quantopian'\n\nrun_me(fileName, globals())","sub_path":"RUN_ME_multi_account.py","file_name":"RUN_ME_multi_account.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"491566554","text":"from django.db import models\n\nfrom festivals.models import Festival\n\nFANS_IN_RATINGS_TABLE = ['Maarten', 'Adrienne']\n\n\nclass Film(models.Model):\n \"\"\"\n Film table.\n \"\"\"\n\n # Define the fields.\n festival = models.ForeignKey(Festival, on_delete=models.CASCADE)\n film_id = models.IntegerField()\n seq_nr = models.IntegerField()\n sort_title = models.CharField(max_length=128)\n title = models.CharField(max_length=128)\n title_language = models.CharField(max_length=2)\n subsection = models.CharField(max_length=32, null=True)\n duration = models.DurationField(null=False)\n medium_category = models.CharField(max_length=32)\n url = models.URLField(max_length=200)\n\n # Define a manager.\n films = models.Manager()\n\n class Meta:\n db_table = 'film'\n unique_together = ('festival', 'film_id')\n\n def __str__(self):\n return f\"{self.title} ({self.duration.total_seconds() / 60:.0f}')\"\n\n def duration_str(self):\n return ':'.join(f'{self.duration}'.split(':')[:2])\n\n\ndef me():\n fans = FilmFan.film_fans.all()\n return fans[0] if len(fans) > 0 else None\n\n\ndef set_current_fan(request):\n user_fan = get_user_fan(request.user)\n if user_fan is not None:\n request.session['fan_name'] = user_fan.name\n\n\ndef current_fan(session):\n fan_name = session.get('fan_name')\n fan = FilmFan.film_fans.get(name=fan_name) if fan_name is not None else None\n return fan\n\n\ndef unset_current_fan(session):\n if session.get('fan_name', False):\n del session['fan_name']\n\n\ndef user_name_to_fan_name(user_name):\n return f'{user_name[0].upper()}{user_name[1:]}'\n\n\ndef get_user_fan(user):\n if not user.is_authenticated:\n return None\n user_fan_name = user_name_to_fan_name(user.username)\n user_fan = FilmFan.film_fans.get(name=user_fan_name) if user_fan_name is not None else None\n return user_fan\n\n\ndef get_present_fans():\n return FilmFan.film_fans.filter(name__in=FANS_IN_RATINGS_TABLE)\n\n\nclass FilmFan(models.Model):\n\n # Define the fields.\n name = models.CharField(max_length=16, unique=True)\n seq_nr = models.IntegerField(unique=True)\n is_admin = models.BooleanField(default=False)\n\n # Define a manager.\n film_fans = models.Manager()\n\n class Meta:\n db_table = \"film_fan\"\n\n def __str__(self):\n return f'{self.name}'\n\n def initial(self):\n return self.name[:1] if self != me() else \"\"\n\n def switch_current(self, session):\n session['fan_name'] = self.name\n\n def fan_rating(self, film):\n try:\n fan_rating = FilmFanFilmRating.film_ratings.get(film=film, film_fan=self)\n except (KeyError, FilmFanFilmRating.DoesNotExist):\n fan_rating = None\n return fan_rating\n\n def fan_rating_str(self, film):\n fan_rating = self.fan_rating(film)\n return f'{fan_rating.rating}' if fan_rating is not None else '-'\n\n def fan_rating_name(self, film):\n fan_rating = self.fan_rating(film)\n if fan_rating is None:\n fan_rating = FilmFanFilmRating(film=film, film_fan=self, rating=0)\n name_by_rating = dict(FilmFanFilmRating.Rating.choices)\n return name_by_rating[fan_rating.rating]\n\n\ndef get_rating_name(rating_value):\n choices = FilmFanFilmRating.Rating.choices\n try:\n name = [name for value, name in choices if value == int(rating_value)][0]\n except IndexError:\n name = None\n return name\n\n\nclass FilmFanFilmRating(models.Model):\n \"\"\"\n Film Fan Film Rating table.\n \"\"\"\n\n class Rating(models.IntegerChoices):\n UNRATED = 0\n ALREADY_SEEN = 1\n WILL_SEE = 2\n VERY_BAD = 3\n BAD = 4\n BELOW_MEDIOCRE = 5\n MEDIOCRE = 6\n INDECISIVE = 7\n GOOD = 8\n VERY_GOOD = 9\n EXCELLENT = 10\n\n # Define the fields.\n film = models.ForeignKey(Film, on_delete=models.CASCADE)\n film_fan = models.ForeignKey(FilmFan, on_delete=models.CASCADE)\n rating = models.IntegerField(choices=Rating.choices)\n\n # Define a manager.\n film_ratings = models.Manager()\n\n class Meta:\n db_table = 'film_rating'\n unique_together = ('film', 'film_fan')\n\n def __str__(self):\n return f\"{self.film} - {self.film_fan.initial()}{self.rating}\"\n","sub_path":"FestivalPlanner/films/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"335116812","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n'''\nAuthor: Sphantix Hang\nDate: 2020-10-08 09:21:48\nLastEditors: Sphantix Hang\nLastEditTime: 2020-10-08 11:56:54\n'''\n\nimport threading\n\n\nclass SingletonType(type):\n _instance_lock = threading.Lock()\n\n def __call__(cls, *args, **kwargs):\n if not hasattr(cls, \"_instance\"):\n with SingletonType._instance_lock:\n if not hasattr(cls, \"_instance\"):\n cls._instance = super(\n SingletonType, cls).__call__(*args, **kwargs)\n return cls._instance\n","sub_path":"src/wisbec/design_patterns/singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"344190651","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass AlipayEbppInvoiceEcorderOrderQueryModel(object):\n\n def __init__(self):\n self._account_id = None\n self._agreement_no = None\n self._enterprise_id = None\n self._order_id = None\n self._order_type = None\n\n @property\n def account_id(self):\n return self._account_id\n\n @account_id.setter\n def account_id(self, value):\n self._account_id = value\n @property\n def agreement_no(self):\n return self._agreement_no\n\n @agreement_no.setter\n def agreement_no(self, value):\n self._agreement_no = value\n @property\n def enterprise_id(self):\n return self._enterprise_id\n\n @enterprise_id.setter\n def enterprise_id(self, value):\n self._enterprise_id = value\n @property\n def order_id(self):\n return self._order_id\n\n @order_id.setter\n def order_id(self, value):\n self._order_id = value\n @property\n def order_type(self):\n return self._order_type\n\n @order_type.setter\n def order_type(self, value):\n self._order_type = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.account_id:\n if hasattr(self.account_id, 'to_alipay_dict'):\n params['account_id'] = self.account_id.to_alipay_dict()\n else:\n params['account_id'] = self.account_id\n if self.agreement_no:\n if hasattr(self.agreement_no, 'to_alipay_dict'):\n params['agreement_no'] = self.agreement_no.to_alipay_dict()\n else:\n params['agreement_no'] = self.agreement_no\n if self.enterprise_id:\n if hasattr(self.enterprise_id, 'to_alipay_dict'):\n params['enterprise_id'] = self.enterprise_id.to_alipay_dict()\n else:\n params['enterprise_id'] = self.enterprise_id\n if self.order_id:\n if hasattr(self.order_id, 'to_alipay_dict'):\n params['order_id'] = self.order_id.to_alipay_dict()\n else:\n params['order_id'] = self.order_id\n if self.order_type:\n if hasattr(self.order_type, 'to_alipay_dict'):\n params['order_type'] = self.order_type.to_alipay_dict()\n else:\n params['order_type'] = self.order_type\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = AlipayEbppInvoiceEcorderOrderQueryModel()\n if 'account_id' in d:\n o.account_id = d['account_id']\n if 'agreement_no' in d:\n o.agreement_no = d['agreement_no']\n if 'enterprise_id' in d:\n o.enterprise_id = d['enterprise_id']\n if 'order_id' in d:\n o.order_id = d['order_id']\n if 'order_type' in d:\n o.order_type = d['order_type']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/AlipayEbppInvoiceEcorderOrderQueryModel.py","file_name":"AlipayEbppInvoiceEcorderOrderQueryModel.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"524447257","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2019 Diego Valverde\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nfrom . import funk_types\nfrom . import funk_constants\nfrom . import funk_ast\n\n\nclass Emitter:\n def __init__(self):\n self.index = 1\n self.code = ''\n self.scope_arg_map = {}\n self.scope_result_idx = None\n self.p_fn_args = None\n\n def emit(self):\n return self.code\n\n def get_node_data(self, node):\n p = [x for x in range(self.index, self.index + 1)]\n self.index = p[-1] + 1\n self.code += \"\"\"\n ;; extract data from Node\n %{0} = getelementptr inbounds %struct.tnode, %struct.tnode* {node}, i32 0, i32 1\n \"\"\".format(p[0], node=node)\n\n return '%{}'.format(p[-1])\n\n def set_node_type(self, node, funk_type, index=0):\n p = [x for x in range(self.index, self.index + 1)]\n self.index = p[-1] + 1\n self.code += \"\"\"\n ;;;store node type: {str_type}\n %{0} = getelementptr inbounds %struct.tnode, %struct.tnode* {node}, i32 0, i32 0\n store i8 {funk_type}, i8* %{0}, align 8\n \"\"\".format(p[0], str_type=funk_types.to_str[funk_type], node=node, funk_type=funk_type)\n\n def get_node_data_value(self, node, as_type=funk_types.int):\n p = [x for x in range(self.index, self.index + 4)]\n self.index = p[-1] + 1\n\n if as_type == funk_types.double:\n self.code += \"\"\"\n ;; Get node.data.value\n %{0} = getelementptr inbounds %struct.tnode, %struct.tnode* {node}, i32 0, i32 1\n %{1} = getelementptr inbounds %struct.tdata, %struct.tdata* %{0}, i32 0, i32 1\n %{2} = bitcast %union.data_type* %{1} to double*\n %{3} = load double, double* %{2}, align 8\n \"\"\".format(p[0], p[1], p[2], p[3], node=node)\n else:\n self.code += \"\"\"\n ;; Get node.data.value\n %{0} = getelementptr inbounds %struct.tnode, %struct.tnode* {node}, i32 0, i32 1\n %{1} = getelementptr inbounds %struct.tdata, %struct.tdata* %{0}, i32 0, i32 1\n %{2} = bitcast %union.data_type* %{1} to i32*\n %{3} = load i32, i32* %{2}, align 8\n \"\"\".format(p[0], p[1], p[2], p[3], node=node)\n\n return '%{}'.format(p[-1])\n\n def set_node_data_value(self, name, node, value, as_type):\n p = [x for x in range(self.index, self.index + 3)]\n self.index = p[-1] + 1\n\n if as_type == funk_types.double:\n self.code += \"\"\"\n ;; {name}.data.value = {value} -- double\n %{0} = getelementptr inbounds %struct.tnode, %struct.tnode* {node}, i32 0, i32 1\n %{1} = getelementptr inbounds %struct.tdata, %struct.tdata* %{0}, i32 0, i32 1\n %{2} = bitcast %union.data_type* %{1} to double*\n store double {value}, double* %{2}, align 8\n \"\"\".format(p[0], p[1], p[2], node=node, value=self.enconde_double_to_ieee754_32(value), name=name)\n else:\n self.code += \"\"\"\n ;; {name}.data.value = {value} -- int\n %{0} = getelementptr inbounds %struct.tnode, %struct.tnode* {node}, i32 0, i32 1\n %{1} = getelementptr inbounds %struct.tdata, %struct.tdata* %{0}, i32 0, i32 1\n %{2} = bitcast %union.data_type* %{1} to i32*\n store i32 {value}, i32* %{2}, align 8\n \"\"\".format(p[0], p[1], p[2], node=node, value=value, name=name)\n\n #raise Exception('Unsupported type {}'.format(type))\n\n\n\n def get_node_data_type(self, node, ret_i8=False):\n\n if ret_i8:\n p = [x for x in range(self.index, self.index + 3)]\n self.index = p[-1] + 1\n self.code += \"\"\"\n ;;Get node.data.type\n %{0} = getelementptr inbounds %struct.tnode, %struct.tnode* {node}, i32 0, i32 1\n %{1} = getelementptr inbounds %struct.tdata, %struct.tdata* %{0}, i32 0, i32 0\n %{2} = load i8, i8* %{1}, align 8\n \"\"\".format(p[0], p[1], p[2], node=node)\n\n else:\n p = [x for x in range(self.index, self.index + 4)]\n self.index = p[-1] + 1\n self.code += \"\"\"\n ;;Get node.data.type\n %{0} = getelementptr inbounds %struct.tnode, %struct.tnode* {node}, i32 0, i32 1\n %{1} = getelementptr inbounds %struct.tdata, %struct.tdata* %{0}, i32 0, i32 0\n %{2} = load i8, i8* %{1}, align 8\n %{3} = zext i8 %{2} to i32\n \"\"\".format(p[0], p[1], p[2], p[3], node=node)\n\n return '%{}'.format(p[-1])\n\n def set_node_data_type(self, name, node, type):\n p = [x for x in range(self.index, self.index + 2)]\n self.index = p[-1] + 1\n self.code += \"\"\"\n ;; {name}.data.type = '{type_string}\\'\n %{0} = getelementptr inbounds %struct.tnode, %struct.tnode* {node}, i32 0, i32 1\n %{1} = getelementptr inbounds %struct.tdata, %struct.tdata* %{0}, i32 0, i32 0\n store i8 {type}, i8* %{1}, align 8\n \"\"\".format(p[0], p[1], node=node, type=type, name=name, type_string=funk_types.to_str[type])\n\n def get_node_pointer(self, node):\n p = [x for x in range(self.index, self.index + 1)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n ;; get node pointer\n %{0} = load %struct.tnode*, %struct.tnode** {node}, align 8\n \"\"\".format(p[0], node=node)\n\n return '%{}'.format(p[-1])\n\n def get_node_type(self, node):\n\n p = [x for x in range(self.index, self.index + 3)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n %{0} = getelementptr inbounds %struct.tnode, %struct.tnode* {node}, i32 0, i32 0\n %{1} = load i8, i8* %{0}, align 8\n %{2} = zext i8 %{1} to i32\n \"\"\".format(p[0], p[1], p[2], node=node)\n\n return '%{}'.format(p[-1])\n\n def get_next_node(self, node):\n\n p = [x for x in range(self.index, self.index + 5)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n ;;return a copy of the next node from list\n %{0} = alloca %struct.tnode, align 8\n %{1} = getelementptr inbounds %struct.tnode, %struct.tnode* {node}, i32 0, i32 2\n %{2} = load %struct.tnode*, %struct.tnode** %{1}, align 8\n %{3} = bitcast %struct.tnode* %{0} to i8*\n %{4} = bitcast %struct.tnode* %{2} to i8*\n call void @memcpy(i8* align 8 %{3}, i8* align 8 %{4}, i64 {tnode_size}, i1 false)\n \"\"\".format(p[0], p[1], p[2], p[3], p[4], node=node, tnode_size=funk_constants.tnode_size_bytes)\n\n return '%{}'.format(p[0])\n\n def fcmp_signed(self, operation, a, b, result=None):\n return self.arith_helper(a, b, operation, result)\n\n def icmp_signed(self, operation, a, b, result=None):\n return self.arith_helper(a, b, operation, result)\n\n def boolean_op(self, operation, a, b, result=None):\n return self.arith_helper(a, b, operation, result)\n\n def external_function(self, name):\n self.code += \"\"\"\n declare void {name}(%struct.tnode*, %struct.tnode*, i32)\n \"\"\".format(name=name)\n\n def noop(self):\n p = [x for x in range(self.index, self.index + 1)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n %{0} = add i1 0, 0\n \"\"\".format(p[0])\n\n def br(self, label_true):\n self.code += \"\"\"\n br label %{label_true}\n \"\"\".format(label_true=label_true)\n\n def br_cond_reg(self, reg, label_true, label_false):\n p = [x for x in range(self.index, self.index + 2)]\n self.index = p[-1]\n self.code += \"\"\"\n br i1 {reg}, label %{label_true}, label %{label_false}\n \"\"\".format(reg=reg, label_true=label_true, label_false=label_false)\n\n def br_cond(self, cond, a, b, label_true, label_false):\n p = [x for x in range(self.index, self.index + 2)]\n self.index = p[-1]\n self.code += \"\"\"\n %{0} = icmp {cond} i32 {a}, {b}\n br i1 %{0}, label %{label_true}, label %{label_false}\n \"\"\".format(p[0], p[1], cond=cond, a=a, b=b, label_true=label_true, label_false=label_false)\n\n def add_comment(self, comment):\n self.code += \"\"\"\n ;;; {}\"\"\".format(comment)\n\n def ret(self):\n self.code += \"\"\"\n ret void\n \"\"\"\n self.index += 1\n\n def add_label(self, label):\n \"\"\"\n Labels can only be placed at the start of a basic block.\n In other words, they must go directly after a terminator instruction\n \"\"\"\n self.code += \"\"\"\n {label}:\n \"\"\".format(label=label)\n\n def add(self, a, b, result=None):\n return self.arith_helper(a, b, 'add', result)\n\n def sub(self, a, b, result=None):\n return self.arith_helper(a, b, 'sub', result)\n\n def srem(self, a, b, result=None):\n return self.arith_helper(a, b, 'mod', result)\n\n def sdiv(self, a, b, result=None):\n return self.arith_helper(a, b, 'div', result)\n\n def mul(self, a, b, result=None):\n return self.arith_helper(a, b, 'mul', result)\n\n def arith_helper(self, a, b, operation, result):\n self.add_comment('{} {} {}'.format(a, operation, b))\n\n if result is None:\n result = self.alloc_tnode('{} result'.format(operation))\n\n if isinstance(a, int):\n self.code += \"\"\"\n call void @funk_{operartion}_ri(%struct.tnode* {p_result}, %struct.tnode* {pA}, i32 {pB} )\n \"\"\".format(p_result=result, pA=b, pB=a, operartion=operation)\n elif isinstance(b, int):\n self.code += \"\"\"\n call void @funk_{operartion}_ri(%struct.tnode* {p_result}, %struct.tnode* {pA}, i32 {pB} )\n \"\"\".format(p_result=result, pA=a, pB=b, operartion=operation)\n elif isinstance(a, float):\n self.code += \"\"\"\n call void @funk_{operartion}_rf(%struct.tnode* {p_result}, %struct.tnode* {pA}, double {pB} )\n \"\"\".format(p_result=result, pA=b, pB=a, operartion=operation)\n elif isinstance(b, float):\n self.code += \"\"\"\n call void @funk_{operartion}_rf(%struct.tnode* {p_result}, %struct.tnode* {pA}, double {pB} )\n \"\"\".format(p_result=result, pA=a, pB=b, operartion=operation)\n else:\n self.code += \"\"\"\n call void @funk_{operartion}_rr(%struct.tnode* {p_result}, %struct.tnode* {pA}, %struct.tnode* {pB} )\n \"\"\".format(p_result=result, pA=a, pB=b, operartion=operation)\n\n return result\n\n def store_val(self, p_data, val):\n self.code += \"\"\"\n store i32 {val}, i32* %{p_data}, align 4\n \"\"\".format(val=val, p_data=p_data)\n\n def copy_node(self, node_src, node_dst):\n p = [x for x in range(self.index, self.index + 2)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n ;; copy node\n %{0} = bitcast %struct.tnode* {node_dst} to i8*\n %{1} = bitcast %struct.tnode* {node_src} to i8*\n call void @memcpy(i8* align 8 %{0}, i8* align 8 %{1}, i64 {tnode_size}, i1 false)\n \"\"\".format(p[0], p[1], node_dst=node_dst, node_src=node_src, tnode_size=funk_constants.tnode_size_bytes)\n\n def call_fn_ptr(self, fn_node, arguments, result=None):\n n = len(arguments)\n\n if result is None:\n result = self.allocate_fn_return_node()\n\n self.add_comment('Create the argument array')\n array = self.alloc_array_on_stack(n)\n\n prev = None\n for i in range(n):\n p_element = self.get_array_element(array, i, n)\n self.copy_node(arguments[i], p_element)\n\n head = self.get_array_element(array, 0, n)\n\n fn_data = self.get_node_data(fn_node)\n\n p = [x for x in range(self.index, self.index + 3)]\n self.index = p[-1] + 1\n self.code += \"\"\"\n ;; call Function Pointer\n\n %{0} = getelementptr inbounds %struct.tdata, %struct.tdata* {fn_data}, i32 0, i32 1\n %{1} = bitcast %union.data_type* %{0} to void (%struct.tnode*, i32, %struct.tnode*)**\n %{2} = load void (%struct.tnode*, i32, %struct.tnode*)*, void (%struct.tnode*, i32, %struct.tnode*)** %{1}, align 8\n call void %{2}(%struct.tnode* {result}, i32 {n}, %struct.tnode* {args})\n \"\"\".format(p[0], p[1], p[2], fn_data=fn_data, result=result, n=n, args=head)\n\n return result\n\n def allocate_fn_return_node(self):\n p = [x for x in range(self.index, self.index + 1)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n ;; allocate the function result node\n %{0} = alloca %struct.tnode, align 8\n \"\"\".format(p[0])\n\n return '%{}'.format(p[0])\n\n def call_function(self, fn, arguments, result=None):\n self.add_comment('====== call function {} {}'.format(fn, arguments))\n\n n = len(arguments)\n self.add_comment('Create the argument array')\n array = self.alloc_array_on_stack(n)\n\n for i in range(n):\n p_element = self.get_array_element(array, i, n)\n self.copy_node(arguments[i], p_element)\n\n head = self.get_array_element(array, 0, n)\n\n if result is None:\n result = self.allocate_fn_return_node()\n self.set_node_data_type('result', result, funk_types.int)\n\n self.code += \"\"\"\n ;;call the function\n call void {fn}(%struct.tnode* {result}, i32 {n}, %struct.tnode* {arguments})\n \"\"\".format(result=result, fn=fn, arguments=head, n=n)\n\n return result\n\n def set_null_result(self):\n self.set_node_type('%0', funk_types.empty_array)\n\n def get_result_data_pointer(self):\n\n p = [x for x in range(self.index, self.index + 2)]\n\n self.code += \"\"\"\n ;;; Get a pointer to the pointer to the result\n %{0} = alloca %struct.tnode*, align 8\n store %struct.tnode* %0, %struct.tnode** %{0}, align 8\n ;;Now get the actual data\n %{1} = load %struct.tnode*, %struct.tnode** %{0}, align 8\n \"\"\".format(p[0], p[1])\n\n self.index = p[-1] + 1\n return '%{}'.format(p[1])\n\n def get_function_argument_tnode(self, idx):\n p = [x for x in range(self.index, self.index + 2)]\n\n self.code += \"\"\"\n ;; get Node from pointer\n %{0} = load %struct.tnode*, %struct.tnode** {p_fn_args}, align 8\n %{1} = getelementptr inbounds %struct.tnode, %struct.tnode* %{0}, i64 {idx}\n \"\"\".format(p[0], p[1], idx=idx, p_fn_args=self.p_fn_args)\n\n self.index = p[-1] + 1\n return '%{}'.format(p[-1])\n\n def alloc_i32(self):\n p = [x for x in range(self.index, self.index + 1)]\n\n self.code += \"\"\"\n %{0} = alloca i32, align 4 \"\"\".format(p[0])\n self.index = p[-1] + 1\n return '%{}'.format(p[-1])\n\n def open_function(self, name, arg_count, ret_type='void'):\n self.index = 0\n self.scope_arg_map = {}\n self.scope_result_idx = None\n p = [None]\n\n if name == 'main':\n self.code += \"\"\"\n@.str_TRACE = private unnamed_addr constant [5 x i8] c\"--->\\00\", align 1\n\n\n;; ==========================\n;; ===\n;; ======= M A I N ==========\n;; ===\n;; ==========================\ndefine i32 @main() #0 {\n call void @init_random_seed()\n\n ;; Init the garbage collector\n call void @initGarbageCollector()\n \"\"\"\n self.index = 1\n elif name == 's2d_render':\n self.index = 1\n self.code += \"\"\"\ndefine void @s2d_render() #0 {\n \"\"\"\n\n\n else:\n\n self.index = 4 # number of arguments + result + the first label\n\n self.code += \"\"\"\n;; ======== {fn_name} Function implementation =========\n;; The first input argument is a pointer to the result\n;; The second argument contains the arity of the function\n;; The third argument is a list of nodes containing zero or more arguments\n\n\ndefine {ret_type} {fn_name}(%struct.tnode*, i32, %struct.tnode*) #0 {{\n \"\"\".format(fn_name=name, ret_type=ret_type)\n\n self.add_comment('pointer to result')\n p_result = self.alloc_tnode_pointer()\n\n # TODO: Arity is already a copy, why do I\n # TODO: need yet another copy on the stack?\n self.add_comment('function arity')\n arity = self.alloc_i32()\n\n self.code += \"\"\"\n store i32 %1, i32* {arg_count}, align 4\n\n \"\"\".format(arg_count=arity)\n\n self.add_comment('pointer to argument list')\n p_arglist = self.alloc_tnode_pointer()\n self.p_fn_args = p_arglist\n\n self.code += \"\"\"\n store %struct.tnode* %2, %struct.tnode** {p_arglist}, align 8\n \"\"\".format(p_arglist=p_arglist)\n\n # Note that all of the input arguments to the function\n # are copied into a new vector (sequential in memory) under\n # the function stack frame. The reason for this is:\n # 1 - In case of tail recursion we want to update this same vector\n # 2 - We don't wan't the callee to modify the caller's stack thus the copy\n # Note that this does not incur in additional stack space since this\n # is done only once in case of tail recursion (which is the most used\n # pattern in Funk\n\n self.add_comment('Create the argument array')\n array = self.alloc_array_on_stack(arg_count)\n\n array_ptr = self.get_array_element(array, 0, arg_count)\n self.code += \"\"\"\n call void @funk_memcp_arr(%struct.tnode* {dst}, %struct.tnode* {src}, i32 {n}, i8 1)\n \"\"\".format(dst=array_ptr, src='%2', n=arg_count)\n\n self.code += \"\"\"\n store %struct.tnode* {src}, %struct.tnode** {dst}, align 8\n \"\"\".format(src=array_ptr, dst=self.p_fn_args)\n start_label = 'start_{}'.format(name[1:])\n\n self.br(start_label)\n self.add_label(start_label)\n\n return arity\n\n def close_function(self, name):\n if name == 'main':\n self.code += \"\"\"\n ret i32 0\n }\n \"\"\"\n else:\n self.code += \"\"\"\n ;; Remember we return void because a pointer to the result\n ;; is passed as argument\n br label %l_{name}_end\n l_{name}_end:\n ret void\n }}\"\"\".format(result=self.scope_result_idx, name=name[1:])\n\n def enconde_double_to_ieee754_32(self, value):\n # For obscure historical reasons, llvm double literals are represented as\n # if they were doubles, but with the precision of a double.\n # return hex(struct.unpack('Q', struct.pack('d', double(value)))[0])[:-8] + '00000000'\n return value\n\n def load_global_function_to_data(self, data, global_symbol):\n p = [x for x in range(self.index, self.index + 2)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n ;; Store pointer to global function: \\'{global_symbol}\\'\n %{0} = getelementptr inbounds %struct.tdata, %struct.tdata* {data}, i32 0, i32 1\n %{1} = bitcast %union.data_type* %{0} to void (%struct.tnode*, i32, %struct.tnode*)**\n store void (%struct.tnode*, i32, %struct.tnode*)* {global_symbol}, void (%struct.tnode*, i32, %struct.tnode*)** %{1}, align 8\n \"\"\".format(p[0], p[1], data=data, global_symbol=global_symbol)\n\n def mark_node_for_garbage_collection(self, reg):\n self.code += \"\"\"\n call void @markNodeForGarbageCollection(%struct.tnode* {reg})\n \"\"\".format(reg=reg)\n\n def garbage_collector_register_allocation(self, ptr):\n self.code += \"\"\"\n\n call void @registerHeapAllocation(%struct.tnode* {ptr})\n \"\"\".format(ptr=ptr)\n\n def concat_list(self,left,right):\n p = [x for x in range(self.index, self.index + 1)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n %{0} = call %struct.tnode* @funk_concatenate_lists(%struct.tnode* {left}, %struct.tnode* {right})\n \"\"\".format(p[0], left=left, right=right)\n\n return '%{}'.format(p[-1])\n\n\n def malloc_right_node(self, ptr_left):\n p = [x for x in range(self.index, self.index + 1)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n %{0} = call %struct.tnode* @funk_mallocNodeRight(%struct.tnode* {ptr_left})\n \"\"\".format(p[0], ptr_left=ptr_left)\n\n return '%{}'.format(p[-1]) \n\n def print_collector_status(self):\n self.code += \"\"\"\n ;;call void @printCollectorStatus()\n \"\"\"\n\n def collect_garbage(self):\n self.print_collector_status()\n self.code += \"\"\"\n call void @collectGarbage()\n \"\"\"\n\n def allocate_in_heap(self):\n\n p = [x for x in range(self.index, self.index + 2)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n %{0} = call i8* @malloc(i64 {tnode_size}) #3\n %{1} = bitcast i8* %{0} to %struct.tnode*\n\n \"\"\".format(p[0], p[1], tnode_size=funk_constants.tnode_size_bytes)\n\n return '%{}'.format(p[1])\n\n def alloc_tnode_pointer(self):\n p = [x for x in range(self.index, self.index + 1)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n %{0} = alloca %struct.tnode*, align 8\"\"\".format(p[0])\n\n return '%{}'.format(p[0])\n\n def set_config_parameter(self, args):\n\n if len(args) != 2:\n raise Exception('=== set_config_parameter takes 2 parameters')\n\n id = args[0]\n value = args[1]\n\n self.code += \"\"\"\n call void @funk_set_config_param(i32 {}, i32 {})\n \"\"\".format(id.eval(), value.eval())\n\n def alloc_tnode(self, name, value=None, data_type=None, node_type=None):\n p = [x for x in range(self.index, self.index + 1)]\n\n self.code += \"\"\"\n ;; variable \\'{name}\\': allocate tnode\n %{0} = alloca %struct.tnode, align 8\n \"\"\".format(p[0], name=name)\n\n node = '%{}'.format(p[0])\n\n self.index = p[-1] + 1\n if value is not None:\n self.set_node_data_value(name, node, value, data_type)\n\n if data_type is not None:\n self.set_node_data_type(name, node, data_type)\n\n if node_type is not None:\n self.set_node_type(node, node_type)\n else:\n self.set_node_type(node, funk_types.scalar)\n\n return node\n\n def get_array_element(self, array, idx, lenght):\n p = [x for x in range(self.index, self.index + 1)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n %{0} = getelementptr inbounds [{lenght} x %struct.tnode], [{lenght} x %struct.tnode]* {array}, i64 0, i64 {idx}\n \"\"\".format(p[0], lenght=lenght, idx=idx, array=array)\n\n return '%{}'.format(p[-1])\n\n def alloc_array_on_stack(self, length):\n p = [x for x in range(self.index, self.index + 1)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n %{0} = alloca [{lenght} x %struct.tnode], align 16\n \"\"\".format(p[0], lenght=length)\n\n return '%{}'.format(p[-1])\n\n def set_array_element(self, array_address, index, data, tail=False):\n self.add_comment(';; >>>>> set_array_element ')\n\n if tail:\n type_array = funk_types.empty_array\n else:\n type_array = funk_types.array\n\n p_node = self.alloc_tnode_pointer()\n\n self.code += \"\"\"\n ;; Store the array address into the pointer\n store %struct.tnode* %{array_address}, %struct.tnode** %{0}, align 8\n \"\"\".format(p_node, array_address=array_address)\n\n self.set_node_type(p_node, funk_types.array, index)\n\n self.set_node_data(p_node, data, index)\n\n if not tail:\n self.set_next_node(p_node, index + 1)\n\n def set_next_node(self, node, next_node):\n\n p = [x for x in range(self.index, self.index + 1)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n ;; Set linked list next element\n %{0} = getelementptr inbounds %struct.tnode, %struct.tnode* {node}, i32 0, i32 2\n store %struct.tnode* {next_node}, %struct.tnode** %{0}, align 8\n \"\"\".format(p[0], next_node=next_node, node=node)\n\n def print_trace(self):\n p = [x for x in range(self.index, self.index + 1)]\n\n self.code += \"\"\"\n ;;Print a string\n %{0} = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([{format_len} x i8], [{format_len} x i8]* @.str_TRACE , i32 0, i32 0))\"\"\".format(\n p[0], format_len=5)\n\n self.index = p[-1] + 1\n\n\n def print_funk(self, funk, args):\n\n for arg_expr in args:\n arg = arg_expr.eval()\n\n if arg[:1] != '%':\n format_string = arg\n format_len = len(format_string) + 1\n p = [x for x in range(self.index, self.index + 1)]\n funk.preamble += \\\n \"\"\"\n@.str_{cnt} = private unnamed_addr constant [{format_len} x i8] c\"{format_string}\\00\", align 1\n \"\"\".format(cnt=funk.strings_count, format_len=format_len, format_string=format_string)\n\n self.code += \"\"\"\n ;;Print a string\n %{0} = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([{format_len} x i8], [{format_len} x i8]* @.str_{cnt} , i32 0, i32 0))\"\"\".format(\n p[0], format_len=format_len, cnt=funk.strings_count)\n\n funk.strings_count += 1\n\n self.index = p[-1] + 1\n else:\n self.code += \"\"\"\n call void @print_scalar(%struct.tnode* {node})\n \"\"\".format(node=arg)\n\n p = [x for x in range(self.index, self.index + 1)]\n self.code += \"\"\"\n ;; EOL\n %{0} = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str_DISP_EOL, i32 0, i32 0))\n ;;============ [END] Print ====\n \"\"\".format(p[0])\n self.index = p[-1] + 1\n\n def init_stack_variable(self, node):\n\n self.code += \"\"\"\n call void @createLhsStackVar(%struct.tnode* {node})\n \"\"\".format(node=node)\n\n def alloc_variable_linked_list(self, start, end, expr):\n start_val = self.get_node_data_value(start)\n end_val = self.get_node_data_value(end)\n\n if isinstance(expr, funk_ast.IntegerConstant):\n p = [x for x in range(self.index, self.index + 1)]\n self.code += \"\"\"\n ;;\n %{0} = call %struct.tnode* @funk_CreateLinkedListConstInt(i32 {start}, i32 {end}, i32 {val})\n\n \"\"\".format(p[0], start=start_val, end=end_val,\n val=expr.eval())\n self.index = p[-1] + 1\n return '%{}'.format(p[0])\n else:\n start_type = self.get_node_data_type(start, ret_i8=True)\n\n p = [x for x in range(self.index, self.index + 1)]\n self.code += \"\"\"\n ;;\n %{0} = call %struct.tnode* @createLinkedList(i32 {start}, i32 {end}, i8 zeroext {type})\n\n \"\"\".format(p[0], start=start_val, end=end_val,\n type=start_type)\n self.index = p[-1] + 1\n return '%{}'.format(p[0])\n\n def rand_int(self, funk, args):\n if len(args) != 2:\n raise Exception('=== rand_int takes 2 parameters')\n\n lower, upper = args\n\n p = [x for x in range(self.index, self.index + 1)]\n self.code += \"\"\"\n ;;\n %{0} = call i32 @rand_int(i32 {lower}, i32 {upper})\n\n \"\"\".format(p[0], lower=lower.eval(), upper=upper.eval())\n self.index = p[-1] + 1\n\n return '%{}'.format(p[0])\n\n def get_s2d_user_global_state(self, dst):\n\n self.code += \"\"\"\n ;;\n call void @get_s2d_user_global_state(%struct.tnode* noalias sret {})\n \"\"\".format(dst)\n\n def rand_double(self, funk, args, result=None):\n if len(args) != 2:\n raise Exception('=== rand_double takes 2 parameters')\n\n lower, upper = args\n\n p = [x for x in range(self.index, self.index + 1)]\n self.code += \"\"\"\n ;;\n %{0} = call double @rand_double(double {lower}, double {upper})\n \"\"\".format(p[0], lower=lower.eval(), upper=upper.eval())\n self.index = p[-1] + 1\n\n node_val = '%{}'.format(p[-1])\n\n if result is None:\n result = self.alloc_tnode(name='rand_double_result', value=node_val, data_type=funk_types.double)\n else:\n self.set_node_data_value('rand_double_result', result, node_val, as_type=funk_types.double)\n\n return result\n\n def s2d_create_window(self, funk, args):\n\n window_name = args[0].eval()\n width = args[1].eval()\n height = args[2].eval()\n\n format_len = len(window_name) + 1\n\n funk.preamble += \\\n \"\"\"\n@.str_{cnt} = private unnamed_addr constant [{format_len} x i8] c\"{format_string}\\00\", align 1\n \"\"\".format(cnt=funk.strings_count, format_len=format_len, format_string=window_name)\n p = [x for x in range(self.index, self.index + 4)]\n self.code += \"\"\"\n ;;Call simple2D create window\n %{0} = alloca i32, align 4\n %{1} = alloca %struct.S2D_Window*, align 8\n store i32 0, i32* %{0}, align 4\n %{2} = call %struct.S2D_Window* @S2D_CreateWindow(i8* getelementptr inbounds ([{format_len} x i8], [{format_len} x i8]* @.str_{cnt}, i32 0, i32 0), i32 {width}, i32 {height}, void (...)* null, void (...)* bitcast (void ()* {callback_fn} to void (...)*), i32 0)\n store %struct.S2D_Window* %{2}, %struct.S2D_Window** %{1}, align 8\n %{3} = load %struct.S2D_Window*, %struct.S2D_Window** %{1}, align 8\n \n \"\"\".format(\n p[0], p[1], p[2], p[3], format_len=format_len, cnt=funk.strings_count, height=height, width=width,\n callback_fn='@s2d_render')\n\n funk.strings_count += 1\n self.index = p[-1] + 1\n return p[3]\n\n def s2d_draw_line(self, funk, args):\n if len(args) != 9:\n raise Exception('=== s2d_draw_line takes 9 parameters')\n\n x1, y1, x2, y2, r, g, b, alpha, width = args\n\n x1 = self.get_node_data_value(x1.eval(), as_type=funk_types.double)\n x2 = self.get_node_data_value(x2.eval(), as_type=funk_types.double)\n y1 = self.get_node_data_value(y1.eval(), as_type=funk_types.double)\n y2 = self.get_node_data_value(y2.eval(), as_type=funk_types.double)\n\n p = [x for x in range(self.index, self.index + 4)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n %{0} = fptrunc double {x1} to float\n %{1} = fptrunc double {y1} to float\n %{2} = fptrunc double {x2} to float\n %{3} = fptrunc double {y2} to float\n call void @S2D_DrawLine(float %{0}, float %{1}, float %{2}, float %{3}, float {width}, float {r}, float {g},float {b}, float {alpha}, float {r}, float {g}, float {b}, float {alpha}, float {r}, float {g}, float {b}, float {alpha}, float {r}, float {g}, float {b}, float {alpha})\n \"\"\".format(p[0], p[1], p[2], p[3], x1=x1, x2=x2, y1=y1, y2=y2,\n r=float(r.eval()), g=float(g.eval()), b=float(b.eval()), alpha=float(alpha.eval()),\n width=float(width.eval()))\n\n def s2d_draw_point(self, funk, args):\n if len(args) != 6:\n raise Exception('=== s2d_draw_point takes 6 parameters')\n\n x1, y1, r, g, b, alpha = args\n p = [x for x in range(self.index, self.index + 2)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n %{0} = fptrunc double {x1} to float\n %{1} = fptrunc double {y1} to float\n call void @S2D_DrawCircle(float %{0}, float %{1}, float 1.0, i32 4, float {r}, float {g}, float {b}, float {alpha})\n \"\"\".format(p[0], p[1], x1=x1, y1=y1, r=float(r), g=g, b=b, alpha=alpha)\n\n def s2d_render_callback(self, funk, args):\n if len(args) > 1:\n raise Exception('=== s2d_render_callback takes 0 or 1 parameters')\n\n if len(args) > 0:\n global_state = args[0].eval()\n self.code += \"\"\"\n ;; s2d_render_callback\n call void @set_s2d_user_global_state(%struct.tnode* {global_state})\n\n \"\"\".format(global_state=global_state)\n\n if funk.function_scope.name == 'main':\n self.add_comment('DUMMY CALL @s2d_render')\n p = [x for x in range(self.index, self.index + 1)]\n self.index = p[-1] + 1\n\n self.code += \"\"\"\n %{0} = call i32 @S2D_Show( %struct.S2D_Window * %{s2d_window})\n \"\"\".format(p[0], s2d_window=funk.window)\n\n def s2d_quad(self, funk, args):\n if len(args) != 12:\n raise Exception('=== s2d_quad takes 12 parameters')\n\n v = []\n for arg in args:\n if isinstance(arg, funk_ast.DoubleConstant):\n x = arg.eval()\n elif isinstance(arg, funk_ast.IntegerConstant):\n x = float(arg.eval())\n else:\n\n x = arg.eval()\n p = [i for i in range(self.index, self.index + 1)]\n self.code += \"\"\"\n %{0} = call float @funk_ToFloat(%struct.tnode* {x})\n \"\"\".format(p[0], x=x)\n x = '%{}'.format(p[-1])\n self.index = p[-1] + 1\n\n\n v.append(x)\n\n self.code += \"\"\"\n ;; s2d_quad\n call void @S2D_DrawQuad( float {x1}, float {y1},float {r}, float {g}, float {b}, float {alpha},float {x2}, \\\n float {y2},float {r}, float {g}, float {b}, float {alpha},float {x3}, float {y3},float {r}, float {g}, \\\n float {b}, float {alpha},float {x4}, float {y4},float {r}, float {g}, float {b}, float {alpha})\n \"\"\".format(\n x1=v[0], y1=v[1], x2=v[2], y2=v[3], x3=v[4], y3=v[5], x4=v[6],\n y4=v[7], r=v[8], g=v[9], b=v[10], alpha=v[11])\n\n def fread_list(self, funk, args, result):\n if len(args) != 1:\n raise Exception('=== fread_list takes 1 parameter')\n\n path = args[0].eval()\n format_len = len(path) + 1\n\n funk.preamble += \\\n \"\"\"\n@.str_{cnt} = private unnamed_addr constant [{format_len} x i8] c\"{format_string}\\00\", align 1\n \"\"\".format(cnt=funk.strings_count, format_len=format_len, format_string=path)\n\n p = [i for i in range(self.index, self.index + 1)]\n self.code += \"\"\"\n \n %{0} = call %struct.tnode* @funk_read_list_from_file(i8* getelementptr inbounds ([{format_len} x i8], [{format_len} x i8]* @.str_{cnt} , i32 0, i32 0))\"\"\".format(\n p[0], format_len=format_len, cnt=funk.strings_count)\n\n self.index = p[-1] + 1\n funk.strings_count += 1\n\n if result is not None:\n q = [i for i in range(self.index, self.index + 2)]\n self.code += \"\"\"\n %{0} = bitcast %struct.tnode* {src} to i8*\n %{1} = bitcast %struct.tnode* %{dst} to i8*\n call void @memcpy(i8* align 8 %{0}, i8* align 8 %{1}, i64 40, i1 false)\n \"\"\".format(q[0], q[1], src=result, dst=p[0])\n\n self.index = q[-1] + 1\n return '%{}'.format(p[0])\n\n def exit(self, funk, args):\n if len(args) != 0:\n raise Exception('=== exit takes 0 parameter')\n\n self.code += \"\"\"\n call void @funk_exit()\n \"\"\"\n\n def sleep(self, funk, args):\n\n if len(args) != 1:\n raise Exception('=== sleep takes 1 parameter')\n\n useconds = args[0]\n\n self.code += \"\"\"\n call void @funk_sleep(i32 {useconds})\n \"\"\".format(useconds=useconds.eval())\n\n\n","sub_path":"funk/funk_llvm_emitter.py","file_name":"funk_llvm_emitter.py","file_ext":"py","file_size_in_byte":35934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"347302227","text":"from builtins import all\r\n\r\n\r\nclass Process:\r\n name = ''\r\n arrivalTime = 0\r\n burstTime = 0\r\n periority = 0\r\n def __init__(self, name, arrivalTime , burstTime, periority):\r\n self.name = name\r\n self.arrivalTime = arrivalTime\r\n self.burstTime = burstTime\r\n self.periority = periority\r\n\r\ndef main():\r\n allProcess = []\r\n\r\n names = ['p1','p2','p3','p4','p5']\r\n arrivalTimeList = [2,4,0,3,0]\r\n burstTimeList = [6,8,7,3,7]\r\n peroirityList = [4,1,3,2,5]\r\n\r\n for i in range(5):\r\n temp = Process(names[i],arrivalTimeList[i],burstTimeList[i], peroirityList[i])\r\n allProcess.append(temp)\r\n\r\n temp = allProcess.copy()\r\n\r\n que = []\r\n outPut = []\r\n curTime = 0\r\n isPreemptive = True\r\n min = 0\r\n index = 0\r\n while len(temp) > 0 or len(que) > 0:\r\n\r\n if(len(temp) != 0):\r\n for i in range(len(allProcess)):\r\n if curTime == allProcess[i].arrivalTime:\r\n que.append(allProcess[i])\r\n del(temp[0])\r\n if len(outPut) == 0 and len(que) != 0 and isPreemptive:\r\n isPreemptive = False\r\n min = que[0].periority\r\n index = 0\r\n for j in range(1,len(que)):\r\n if min > que[j].periority:\r\n min = que[j].periority\r\n index = j\r\n print('Current Time ' + str(curTime) + ' index is ' + str(index) + ' Process is '+ que[index].name + ' Burst Time is '+ str(que[index].burstTime ) + ' Periority is '+ str(que[index].periority))\r\n if len(que) > 0:\r\n if que[index].burstTime != 0:\r\n que[index].burstTime -= 1\r\n if que[index].burstTime == 0:\r\n outPut.append(que[index])\r\n del(que[index])\r\n if(len(que) > 0):\r\n min = que[0].periority\r\n index = 0\r\n for i in range(1,len(que)):\r\n if min > que[i].periority:\r\n min = que[i].periority\r\n index = i\r\n\r\n curTime += 1\r\n\r\n\r\n print('Current Time Graph ' + str(curTime))\r\n\r\n for i in range(len(outPut)):\r\n print (outPut[i].name)\r\n\r\n print(\"End Of the Program.\\n\")\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"PerioritySchedulingNonPreemptive.py","file_name":"PerioritySchedulingNonPreemptive.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"214079242","text":"def inorderSuccessor(self, root, p):\n if root is None or p is None:\n return None\n\n if root.val <= p.val:\n return self.inorderSuccessor(root.right, p)\n else:\n left = self.inorderSuccessor(root.left, p)\n if left is None:\n return root\n else:\n return left","sub_path":"Tree/DFS/inorder_successor_in_BST.py","file_name":"inorder_successor_in_BST.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"517131810","text":"import re\nimport spacy\nimport numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom typing import List\n\nfrom . import FUN_FACT_TITLE_CSV, TIL_TITLE_CSV, YSK_TITLE_CSV, REQUIRED_COLUMNS, BANNED_SUBREDDITS, TOKENIZATION_REGEX\n\nEPS = 1e-6\n\nclass WeightedEmbeddingSearch:\n\n def __init__(self):\n print(\"Loading data csv\")\n fun_fact_title_data = pd.read_csv(FUN_FACT_TITLE_CSV, usecols=REQUIRED_COLUMNS).dropna()\n til_title_data = pd.read_csv(TIL_TITLE_CSV, usecols=REQUIRED_COLUMNS).dropna()\n ysk_title_data = pd.read_csv(YSK_TITLE_CSV, usecols=REQUIRED_COLUMNS).dropna()\n\n title_data = pd.concat([\n fun_fact_title_data,\n til_title_data,\n ysk_title_data,\n ], join='inner').reset_index(drop=True)\n\n print(\"Computing tf-idf matrix\")\n self.vectorizer = TfidfVectorizer(stop_words='english', dtype=np.float32)\n tfidf_matrix = self.vectorizer.fit_transform(title_data[\"title\"])\n\n print(\"Loading spacy\")\n self.nlp = spacy.load('en_core_web_lg')\n\n print(\"Computing weighted embeddings\")\n features = self.vectorizer.get_feature_names()\n self.f_vectors = np.array([self.nlp.vocab[f].vector for f in features])\n weighted_embeddings = tfidf_matrix.dot(self.f_vectors)\n assert weighted_embeddings.shape == (len(title_data.index), 300)\n self.n_weighted_embeddings = weighted_embeddings / (np.linalg.norm(weighted_embeddings, axis=1)[:, np.newaxis] + EPS)\n\n print(\"Compressing pandas dataframe into index\")\n self.index = list(title_data.itertuples())\n\n print(\"Done loading {} rows\".format(len(title_data.index)))\n\n def _compute_query_embedding(self, query):\n query_tfidf = self.vectorizer.transform([query])\n if query_tfidf.count_nonzero() > 0:\n query_weighted = query_tfidf.dot(self.f_vectors).flatten()\n # average word embeddings if query words don't exist in our corpus (tfidf matrix)\n else:\n # query was all stopwords, so we'll have to manually tokenize\n tokens = TOKENIZATION_REGEX.findall(query.lower())\n query_weighted = np.average([self.nlp.vocab[t].vector for t in tokens], axis=0).flatten()\n return query_weighted\n\n def search(self, query, top=10):\n query_weighted = self._compute_query_embedding(query)\n # if we have no embeddings for the given query, we're out of luck\n if np.count_nonzero(query_weighted) == 0:\n return []\n\n n_query_weighted = query_weighted / (np.linalg.norm(query_weighted) + EPS)\n rankings = self.n_weighted_embeddings.dot(n_query_weighted)\n rankings_idx = np.argsort(-rankings)[:top]\n return self._format_results(rankings_idx)\n\n def _format_results(self, doc_ids: List[int]):\n results = [\n {\n \"type\": \"submission\",\n \"title\": self.index[d].title,\n \"subreddit\": self.index[d].subreddit,\n \"permalink\": self.index[d].permalink,\n \"score\": self.index[d].score,\n }\n for d in doc_ids\n ]\n return results\n","sub_path":"app/irsystem/models/search/weighted_embedding.py","file_name":"weighted_embedding.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"223000007","text":"from __future__ import unicode_literals\n\nimport json\n\nfrom moto.core.responses import BaseResponse\nfrom .models import glue_backend\n\n\nclass GlueResponse(BaseResponse):\n\n @property\n def glue_backend(self):\n return glue_backend\n\n @property\n def parameters(self):\n return json.loads(self.body)\n\n def create_database(self):\n database_name = self.parameters['DatabaseInput']['Name']\n self.glue_backend.create_database(database_name)\n return \"\"\n\n def get_database(self):\n database_name = self.parameters.get('Name')\n database = self.glue_backend.get_database(database_name)\n return json.dumps({'Database': {'Name': database.name}})\n\n def create_table(self):\n database_name = self.parameters.get('DatabaseName')\n table_input = self.parameters.get('TableInput')\n table_name = table_input.get('Name')\n self.glue_backend.create_table(database_name, table_name, table_input)\n return \"\"\n\n def get_table(self):\n database_name = self.parameters.get('DatabaseName')\n table_name = self.parameters.get('Name')\n table = self.glue_backend.get_table(database_name, table_name)\n return json.dumps({\n 'Table': {\n 'DatabaseName': table.database_name,\n 'Name': table.name,\n 'PartitionKeys': table.partition_keys,\n 'StorageDescriptor': table.storage_descriptor\n }\n })\n\n def get_tables(self):\n database_name = self.parameters.get('DatabaseName')\n tables = self.glue_backend.get_tables(database_name)\n return json.dumps(\n {\n 'TableList': [\n {\n 'DatabaseName': table.database_name,\n 'Name': table.name,\n 'PartitionKeys': table.partition_keys,\n 'StorageDescriptor': table.storage_descriptor\n } for table in tables\n ]\n }\n )\n","sub_path":"moto/glue/responses.py","file_name":"responses.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"426852694","text":"import numpy as np\nimport librosa\n\n\nclass neuron:\n def __init__(self, countInputNeurons):\n coeffic = 1 / np.sqrt(countInputNeurons)\n self.weights = np.random.uniform(0.5 - coeffic, 0.5 + coeffic, countInputNeurons)\n\n def distance(self, x):\n return np.sqrt(np.sum((x - self.weights) ** 2))\n\n def update_weights(self, x, learning_rate):\n self.weights = [self.weights[i] + learning_rate * (x[i] - self.weights[i]) for i in range(len(self.weights))]\n\n\nclass NN:\n def __init__(self, countInputNeurons, countClusters, learning_rate):\n self.learning_rate = learning_rate\n self.neurons = [neuron(countInputNeurons) for i in range(countClusters)]\n\n def train(self, X, countEpochs):\n for epoch in range(countEpochs):\n print(\"epoch : \", epoch)\n indexes = list(np.arange(len(X)))\n while len(indexes) != 0:\n rand_index = np.random.choice(indexes)\n x = X[rand_index]\n indexes.remove(rand_index)\n distances = [neuron.distance(x) for neuron in self.neurons]\n winner_neuron = self.neurons[int(np.argmin(distances))]\n winner_neuron.update_weights(x, self.learning_rate)\n self.learning_rate = self.learning_rate - 0.05\n\n def test(self, x):\n distances = [neuron.distance(x) for neuron in self.neurons]\n return np.argmin(distances)\n\n\ndef SplitAudio(data, sr, window_ms, margin_ms):\n print(\"SplitAudio...\")\n partsAudio = []\n stepWindow = int((sr / 1000) * window_ms)\n stepMargin = int((sr / 1000) * margin_ms)\n # count_step = math.ceil(len(data) / stepMargin)\n for i in range(0, len(data), stepMargin):\n partAudio = np.array(data[i:i + stepWindow])\n if len(partAudio) == stepWindow:\n partsAudio.append(partAudio)\n return partsAudio\n\n\ndef toMel(f):\n return 1127 * np.log(1 + f / 700)\n\n\ndef toBark(f):\n return 8.96 * np.log(0.978 + 5 * np.log(0.994 + pow((f + 75.4) / 2173, 1.347)))\n\n\ndef getFeature(data):\n mfcc = np.array(librosa.feature.mfcc(data, sr, n_mfcc=12)).flatten()\n freqs = np.abs(np.fft.fft(data))**2\n mels = [toMel(f) for f in freqs]\n mel = (np.max(mels) + np.min(mels))/2\n res = np.append([], mfcc)\n return res\n\n\npath = \"C:/Users/Ibrag/Desktop/Diplom/Test5/Audio/\"\nname = \"FilteredAudiotest5.wav\"\ndata, sr = librosa.load(path + name)\nparts = SplitAudio(data, sr, 30, 30)\nfeatures = []\nfor part in parts:\n # print(getFeature(part).shape)\n features.append(getFeature(part))\n\n\nprint(np.array(features).shape)\nnn = NN(np.array(features).shape[1], 2, 0.6)\nnn.train(features, 1000)\n\nfor part in parts:\n print(nn.test(getFeature(part)))\n\n","sub_path":"Code Python/gmm_ubm/attempt1000/NN.py","file_name":"NN.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"122691173","text":"import serial\n\nserialPort = serial.Serial(port = \"/dev/tty.usbmodem00000356041\", baudrate=115200, bytesize=8, timeout=2, stopbits=serial.STOPBITS_ONE)\n\nserialString = \"\" # Used to hold data coming over UART\nserialPort.writelines(\"0x03\")\n# Wait until there is data waiting in the serial buffer\nif(serialPort.in_waiting > 0):\n # Read data out of the buffer until a carraige return / new line is found\n serialString = serialPort.readline()\n # Print the contents of the serial data\n print(serialString.decode('Ascii'))","sub_path":"Bread_Board_Z80_Project/comm_app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"347346562","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 20 14:50:44 2019\n\n@author: Lucia\n\"\"\"\n\nimport numpy as np\nimport time\n\nimport os\n\n#import Gen_Scope_Classes as Gen_Scope\nimport Pyxi.FileModule as FileMod\nimport Pyxi.DataAcquisition as DataAcq\n\nif __name__ == '__main__':\n \n #File To Save\n Dictname =\"F:\\\\Dropbox (ICN2 AEMD - GAB GBIO)\\\\PyFET\\\\LuciaScripts\\\\Lucia\\\\DCTests\\\\RTest_Normal\"\n FileName = Dictname +'.h5'\n \n if os.path.isfile(FileName):\n print('Remove File')\n os.remove(FileName)\n\n #Calculas per al Scope\n GenFs = 20e6 #La Fs de generació es necessita aqui per asegurar que sigui multiple de FsScope\n ScopeFs = 1e6\n nFs = round(GenFs/ScopeFs)\n ScopeFs = GenFs/nFs\n tFetch = 0.3\n NumFetch = 1\n BufferSize = round(tFetch*ScopeFs)\n tFetch = BufferSize/ScopeFs\n ScopeOffset = int(ScopeFs*6)\n # Rows d'exemple a continuació: no borrar\n# Rows = [('Row1', 0), ('Row2', 1), ('Row3', 2), ('Row4', 3), ('Row5', 4), ('Row6', 5), ('Row7', 6), ('Row8', 7)]\n Rows = [('Row1', 0),('Row2', 1), ('Row3', 2), ('Row4', 3), ('Row5', 4), ('Row6', 5), ('Row7', 6), ('Row8', 7)]\n RowsArray = []\n rangeScope = 1 #options 0.05, 0.2, 1, 6, 30\n LSB = rangeScope/(2**16)\n PCBGain = 10e3\n MaxFileSize = 10000e6\n# dtype = 'int16'\n dtype = 'float'\n \n FileBuf = FileMod.FileBuffer(FileName=FileName,\n MaxSize=MaxFileSize,\n nChannels=len(Rows),\n dtype=dtype) \n \n RowsConfig = {}\n for row in Rows:\n RowsConfig[row[0]] = {}\n RowsArray.append(row[1])\n RowsConfig[row[0]]['Enable'] = True\n RowsConfig[row[0]]['Index'] = row[1]\n RowsConfig[row[0]]['AcqVRange'] = rangeScope\n \n #Dades per crear ColsConfig i cridar a \"Columns()\" \n #Modifica Cols segons els generadores que es vulguin utilitzar\n# #Cols = (('Col1', 'PXI1Slot2', 0, 0), \n# ('Col2', 'PXI1Slot2', 1, 1), \n# ('Col3', 'PXI1Slot3', 0, 2), \n# ('Col4', 'PXI1Slot3', 1, 3)\n## ) -- e.g. Standard form with all Cols\n Cols = (('Col1', 'PXI1Slot2', 0, 0), \n ('Col2', 'PXI1Slot2', 1, 1), \n ('Col3', 'PXI1Slot3', 0, 2), \n ('Col4', 'PXI1Slot3', 1, 3)\n ) \n numSweeps = 10\n GenSize = 20e3\n Ts = 1/GenFs\n t = np.arange(0, Ts*GenSize, Ts) \n CMVoltage = -0.15\n #Calculs per al Generador \n #definir les Fc que es volen utilitzar\n Fc=np.array([70e3, 85e3, 100e3, 115e3])\n# Ph = np.array([0, 0, 0, 0])\n Ph = np.array([144.596, -45.1778, -125.836, -110.565])\n for ind, f in enumerate(Fc):\n nc = round((GenSize*f)/GenFs)\n Fc[ind] = (nc*GenFs)/GenSize\n \n A=np.ndarray((numSweeps, 4))\n A[:,0] = np.linspace(0.01,0.1, num=numSweeps) #Sweep per Col1\n A[:,1] = np.linspace(0.01,0.1, num=numSweeps) #Sweep per Col2\n A[:,2] = np.linspace(0.01,0.1, num=numSweeps) #Sweep per Col3\n A[:,3] = np.linspace(0.01,0.1, num=numSweeps) #Sweep per Col4\n \n #Es crea una Cols Config que es configurará per cada Sweep amb la freq correcta\n# ColsConfig={'Col1':{'Frequency': Fc0,\n# 'amplitude': A[0],\n# 'Gain': 0.5,\n# 'Resource': 'PXI1Slot2'\n# 'Index':0},\n# 'Col1':{'Frequency': Fc0,\n# 'amplitude': A[0],\n# 'Gain': 0.5,\n# 'Resource': 'PXI1Slot2'\n# 'Index':0}, \n# }\n ColsConfig={}\n for Col in Cols:\n ColsConfig[Col[0]]={'Frequency': Fc[Col[3]],\n 'Phase': Ph[Col[3]],\n 'Amplitude': 0,\n 'Gain': 0, #2*Amplitude\n 'Resource':Col[1],\n 'Index': Col[2]}\n\n #Fetching \n InFetch = np.ndarray((BufferSize, len(Rows)), dtype=dtype)\n# InFetchInt = np.ndarray((BufferSize, len(Rows)), dtype=dtype)\n \n Procs = {}\n demind = 0\n \n for SweepInd, Ac in enumerate(A):\n dsetname = 'Sw{0:03d}'.format(SweepInd)\n \n for Col in Cols:\n ColsConfig[Col[0]]['Amplitude']=Ac[Col[3]]\n ColsConfig[Col[0]]['Gain']=2*Ac[Col[3]]\n ACqSet = DataAcq.DataAcquisition(ColsConfig=ColsConfig, \n FsGen=GenFs, \n GenSize=GenSize,\n RowsConfig=RowsConfig,\n FsScope=ScopeFs,\n GainBoard=PCBGain,\n ResourceScope='PXI1Slot4')\n \n ACqSet.stopSessions() \n ACqSet.setSignals(ColsConfig=ColsConfig,\n Vcm=CMVoltage) \n ACqSet.initSessions()\n \n FileBuf.InitDset(dsetname)\n InFetch, LSB = ACqSet.GetData(BufferSize=BufferSize,\n channels=RowsArray,\n OffsetRows=ScopeOffset,\n dtype=dtype)\n\n FileBuf.AddSample(InFetch)\n for nr in range(len(Rows)):\n for col in Cols:\n ProcsArgs = {'dset': dsetname,\n 'dInd': nr, # Row index inside dataset\n 'col': col[0],\n 'cInd': col[3],\n 'Fc': Fc[col[3]],\n 'Ac': Ac[col[3]],\n 'Fs': ScopeFs,\n 'BuffSize': BufferSize,\n 'GenFs': GenFs,\n 'GenSize': GenSize, \n 'Samps': GenSize/(GenFs/ScopeFs), # DemOscSize\n 'Vgs' : CMVoltage,\n 'Gain': PCBGain,\n 'LSB': LSB,\n 'PhaseFc': Ph\n }\n \n Demkey = 'Dem{0:03d}'.format(demind)\n demind += 1 \n Procs[Demkey] = ProcsArgs\n \n \n FileBuf.close()\n ACqSet.stopSessions() \n \n FileMod.GenArchivo(name=Dictname, dic2Save=Procs)\n \n \n \n \n \n \n \n \n","sub_path":"PyFreqMux/Sweeps/AcSweepInt16.py","file_name":"AcSweepInt16.py","file_ext":"py","file_size_in_byte":6460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"64846777","text":"\"\"\"\n 获取论文目录信息\n\"\"\"\nimport pickle\nimport time\nfrom argparse import ArgumentParser\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nap = ArgumentParser()\nap.add_argument(\"-i\", \"--input\", required=True, help=\"输入文件名\")\nap.add_argument(\"-o\", \"--output\", required=True, help=\"输出文件名\")\nargs = vars(ap.parse_args())\n\nchrome_options = Options()\nchrome_options.add_argument(\"--incognito\")\nchrome_options.add_argument(\"--headless\")\nchrome_options.add_argument('blink-settings=imagesEnabled=false')\ndriver = webdriver.Chrome(options=chrome_options)\n\n# 得到来自get_items.py的检索结果信息\nwith open(args[\"input\"], \"rb\") as f:\n results = pickle.load(f)\ny = 0\ntry:\n url = \"https://kns.cnki.net/kns/brief/result.aspx?dbprefix=CDMD\"\n driver.get(url)\n\n # 切换到专业检索板块\n WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, \"#\\\\31 _4\"))).click()\n time.sleep(2)\n\n for i, r in enumerate(results):\n # 简单使用标题和作者进行检索\n try:\n cmd = f\"TI={r['title']} AND AU={r['author']}\"\n\n # 只进行3次获取尝试\n for try_times in range(3):\n try:\n # 填充检索框并进行检索\n driver.execute_script(\n f\"document.querySelector('#expertvalue').value='{cmd}'\"\n )\n driver.execute_script(\n \"document.querySelector('#btnSearch').click();\")\n\n time.sleep(2)\n\n # 切换到搜索结果frame\n driver.switch_to.frame(\"iframeResult\")\n\n # 点击检索结果链接进入详情页\n driver.execute_script(\n \"document.querySelector('#ctl00 > table > tbody > tr:nth-child(2) > td > table > tbody > tr:nth-child(2) > td:nth-child(2) > a').click();\"\n )\n\n # 切换到详情页\n driver.switch_to.window(driver.window_handles[-1])\n\n # 点击目录页\n driver.execute_script(\n \"document.querySelector('#DownLoadParts > a:nth-child(3)').click();\"\n )\n\n # 切换到目录页\n driver.switch_to.window(driver.window_handles[-1])\n\n # 获取目录\n results[i][\"toc\"] = driver.execute_script(\n \"return document.querySelector('body > div.wrapper.section1 > div.trends > div > table').innerText\"\n )\n\n # 关闭此次检索的详情页和目录页\n driver.close()\n driver.switch_to.window(driver.window_handles[-1])\n driver.close()\n\n # 回到检索页\n driver.switch_to.window(driver.window_handles[-1])\n break\n except:\n # 失败的话重新打开进行检索\n print(f\"Retry: {try_times}\")\n driver.quit()\n driver = webdriver.Chrome(options=chrome_options)\n url = \"https://kns.cnki.net/kns/brief/result.aspx?dbprefix=CDMD\"\n driver.get(url)\n WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable(\n (By.CSS_SELECTOR, \"#\\\\31 _4\"))).click()\n time.sleep(2)\n\n info = f\"{r['year']} - {r['school']} - {r['title']} - {r['author']} - {r['degree']}\"\n if \"toc\" in r:\n y += 1\n print(f\"[No.{i:04d}] [Y] - [{info}]\")\n else:\n print(f\"[No.{i:04d}] [N] - [{info}]\")\n except:\n pass\n driver.quit()\nexcept:\n print(f\"[Done!] Succeed: {y} items\")\n with open(args[\"output\"], \"wb\") as f:\n pickle.dump(results, f)","sub_path":"CNKI_spider/v1/get_toc.py","file_name":"get_toc.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"198797184","text":"# -*- coding: utf-8 -*-\n# Author: haoyu\n\nfrom sanic import Sanic\nfrom sanic.response import json\n\n\napp = Sanic()\n\n@app.route('/')\nasync def test(request):\n return json({'msg': 'hello, i am listening on port 8080'})\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=8080, debug=True)\n","sub_path":"docker/sanic_web/sanic_8080.py","file_name":"sanic_8080.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"419460016","text":"from sharadar.live.brokers.ib_broker import TWSConnection\nimport os\n\ntws = TWSConnection('localhost:4002:123')\ntws.bind()\n\nif tws.isConnected():\n tws.disconnect()\n os._exit(os.EX_OK)\nelse:\n os._exit(os.EX_IOERR)\n\n\n\n\n\n\n\n\n\n","sub_path":"sharadar/util/ib_checkhealth.py","file_name":"ib_checkhealth.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"239956351","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 30 14:56:38 2021\n\n@author: HaoLI\n\"\"\"\n# evaluate gradient boosting algorithm for classification\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom numpy import mean\nfrom numpy import std\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.metrics import roc_curve, auc, roc_auc_score ###计算roc和auc\nfrom sklearn.model_selection import train_test_split\nimport lightgbm as lgb\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler, LabelEncoder\nimport time\nimport datetime\nfrom imblearn.over_sampling import RandomOverSampler\n\n# check and set the working directory\nos.getcwd()\n#os.chdir('/Users/HaoLI/Dropbox/FinTech/raw_data')\nos.chdir('/Users/HaoLI/Stata/credit/data')\ndf = pd.read_csv('data1210rename_use.csv')\ncol_names = list(df.columns.values[3:30]) \ncol_names.remove('default_geq_1') #X中不能包含目标函数y\ncol_names.remove('default_geq_2')\ncol_names.remove('default_geq_3')\nbase_col_names = col_names[0:13] # for baseline model 仅仅包含银行数据+早中晚,而不包含消费数据\ndf_fillna = df.fillna(0) # fill NA with 0. 无消费以0计\nX = df_fillna[col_names]\ny = df_fillna.default_geq_1 # Target variable\nX_base = df_fillna[base_col_names]\ny_base = df_fillna.default_geq_1 # Target variable\n\n\n#numerical_columns=['id', 'loanAmnt', 'term', 'interestRate', 'installment', 'employmentTitle', 'homeOwnership']\n\n#Specifying the parameter\nn_estimators=100\nlearning_rate=0.1\nmax_depth=6\nnum_leaves=16\nfeature_fraction=1\nbagging_fraction=1\nverbosity=20\nnum_boost_round=20000\nverbose_eval=1000\nearly_stopping_rounds=200\nreg_alpha=2\nreg_lambda=15 \n\nlist_rec = [] #记录参数\nlist_feature_importance = []\nfor random_state in range(0,20):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30, random_state = random_state)\n #如果 random_state = None (默认值),会随机选择一个种子,这样每次都会得到不同的数据划分。给 random_state 设置相同的值,那么当别人重新运行你的代码的时候就能得到完全一样的结果,复现和你一样的过程。\n X_base_train, X_base_test, y_base_train, y_base_test = train_test_split(X_base, y_base, test_size = 0.30)\n ros = RandomOverSampler(random_state=0)\n X_train, y_train = ros.fit_resample(X_train, y_train)\n X_base_train, y_base_train = ros.fit_resample(X_base_train, y_base_train)\n #min_max_scaler = MinMaxScaler()\n #X_train = min_max_scaler.fit_transform(X_train)\n #X_test = min_max_scaler.fit_transform(X_test)\n #sc = StandardScaler()\n #X_train = sc.fit_transform(X_train)\n #X_test = sc.fit_transform(X_test) \n \n #converting the dataset into proper LGB format \n train_matrix=lgb.Dataset(X_train, label=y_train)\n valid_matrix= lgb.Dataset(X_test, label=y_test)\n\n params = {\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': 'auc',\n \"n_estimators\":n_estimators,\n 'learning_rate': learning_rate,#较小的学习率,较大的决策树个数\n 'max_depth': max_depth,#树的最大深度,防止过拟合\n 'num_leaves': num_leaves,\n 'feature_fraction': feature_fraction, #每次选择所有的特征训练树\n 'bagging_fraction': bagging_fraction,\n }\n classifier=lgb.train(params, train_set=train_matrix, valid_sets=valid_matrix, num_boost_round=num_boost_round, verbose_eval=verbose_eval, early_stopping_rounds=early_stopping_rounds)\n \n ax=lgb.plot_importance(classifier, figsize=(15,15))\n plt.show() \n importance = classifier.feature_importance(importance_type='split')\n feature_name = col_names\n importance = importance/sum(importance)\n list_feature_importance.append(importance)\n\n # use trained model and testing data to predict\n y_train_pred = classifier.predict(X_train)\n y_test_pred=classifier.predict(X_test)\n #### ROC curve and Area-Under-Curve (AUC)\n train_fpr, train_tpr, tr_thresholds = roc_curve(y_train, y_train_pred)\n test_fpr, test_tpr, te_thresholds = roc_curve(y_test, y_test_pred)\n print(auc(train_fpr, train_tpr))\n print(auc(test_fpr, test_tpr))\n \n plt.figure(figsize=(12,6))\n lgb.plot_importance(classifier)\n plt.title(\"Feature Importances\")\n plt.show()\n \n plt.grid()\n plt.plot(train_fpr, train_tpr, label=\" AUC TRAIN =\"+str(auc(train_fpr, train_tpr)))\n plt.plot(test_fpr, test_tpr, label=\" AUC TEST =\"+str(auc(test_fpr, test_tpr)))\n plt.plot([0,1],[0,1],'g--')\n plt.legend()\n plt.xlabel(\"True Positive Rate\")\n plt.ylabel(\"False Positive Rate\")\n t = ''' \n n_estimators = %s, learning_rate = %s, max_depth = %s\n num_leaves = %s, feature_fraction = %s, bagging_fraction = %s\n verbosity = %s, num_boost_round = %s\n verbose_eval = %s, early_stopping_rounds = %s, random_state = %s\n '''%(n_estimators,learning_rate,max_depth, num_leaves, feature_fraction, \n bagging_fraction, verbosity, num_boost_round, \n verbose_eval, early_stopping_rounds, random_state) \n plt.title(\"AUC(LightGBM ROC curve)\"+t)\n plt.grid(color='black', linestyle='-', linewidth=0.5)\n time1 = datetime.datetime.now()\n #对现在时间格式化,以此作为文件名\n time2 = time1.strftime('%Y-%m-%d-%H%M%S')\n plt.savefig(\"/Users/HaoLI/Stata/credit/out/ROC figure/Figure_\"+time2+\".png\", bbox_inches = 'tight') \n plt.show()\n list_rec.append([auc(train_fpr, train_tpr), auc(test_fpr, test_tpr),\n n_estimators,\n learning_rate,\n max_depth,\n num_leaves,\n feature_fraction,\n bagging_fraction,\n verbosity,\n num_boost_round,\n verbose_eval,\n early_stopping_rounds,\n random_state\n ])\n\nlist_rec_1 = list_rec\ndf = pd.DataFrame(list_rec, columns = ['IS_AUC','OOS_AUC', 'n_estimators',\n 'learning_rate',\n 'max_depth',\n 'num_leaves',\n 'feature_fraction',\n 'bagging_fraction',\n 'verbosity',\n 'num_boost_round',\n 'verbose_eval',\n 'early_stopping_rounds',\n 'random_state'])\ndf.to_csv('lightGBM_AUC.csv')\n\nlist_feature_importance_1 = list_feature_importance\nfeature_importance = pd.DataFrame(list_feature_importance, columns = col_names )\nfeature_importance.to_csv('lighGBM_feature_importance.csv',index=False)\n","sub_path":"lgb_run.py","file_name":"lgb_run.py","file_ext":"py","file_size_in_byte":7051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"60036307","text":"import random\nimport logging\n# KlibsTesting Param overrides\n#\n# Any param that is commented out by default is either deprecated or else not yet implemented--don't uncomment or use\n#\n#########################################\n# Logging Defaults\n#########################################\nlog_to_file = True\nlevel = logging.INFO\n\n#########################################\n# Display Settings\n#########################################\nadditional_displays = []\nscreen_origin = (0,0) # always (0,0) unless multiple displays in use\n#\n#########################################\n# Available Hardware\n#########################################\neye_tracker_available = True\neye_tracking = True\nlabjack_available = False\nlabjacking = False\n#\n#########################################\n# Environment Aesthetic Defaults\n#########################################\ndefault_fill_color = (0, 0, 0, 255)\ndefault_color = (255, 255, 255, 255)\ndefault_response_color = default_color\ndefault_input_color = default_color\ndefault_font_size = 28\ndefault_font_name = 'Frutiger'\ndefault_timeout_message = \"Too slow!\"\n#\n#########################################\n# EyeLink Sensitivities\n#########################################\nview_distance = 57 # in centimeters, 57m = in 1deg of visual angle per horizontal cm of screen\nsaccadic_velocity_threshold = 20\nsaccadic_acceleration_threshold = 5000\nsaccadic_motion_threshold = 0.15\n#\nfixation_size = 1, # deg of visual angle\nbox_size = 1, # deg of visual angle\ncue_size = 1, # deg of visual angle\ncue_back_size = 1, # deg of visual angle\n#\n#########################################\n# Experiment Structure\n#########################################\nmulti_session_project = False\ncollect_demographics = True\nmanual_demographics_collection = False\npracticing = False\ntrials_per_block = 24\nblocks_per_experiment = 1\ntrials_per_participant = 0\ntable_defaults = {}\n#\n#########################################\n# Development Mode Settings\n#########################################\ndm_suppress_debug_pane = False\ndm_auto_threshold = True\ndm_trial_show_mouse = True\n\n#\n#########################################\n# Data Export Settings\n#########################################\ndata_columns = None\ndefault_participant_fields = [[\"userhash\", \"participant\"], \"sex\", \"age\", \"handedness\"]\ndefault_participant_fields_sf = [[\"userhash\", \"participant\"], \"random_seed\", \"sex\", \"age\", \"handedness\"]\n\n\n#\n#########################################\n# PROJECT-SPECIFIC VARS\n#########################################\nsaccade_response_cond = False\nkeypress_response_cond = True\noffset_size = 3","sub_path":"ExpAssets/Config/MixedMotionCueingEffects_params.py","file_name":"MixedMotionCueingEffects_params.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"154503920","text":"#!/usr/bin/python\n#coding:utf-8\n\n# フレームにラベルを配置するサンプル\n\nimport Tkinter as Tk\n\nl = Tk.Label(None,text='yahoo',bg='yellow')\nl.pack()\nl.configure(anchor=Tk.W) #テキストを配置する位置\nl.config(bd = 10.0) # 縁の幅\nl.config(font=('Times', '18')) # フォントの種類とサイズ\nl.config(fg='green') # フォントの色\nl.config(height=2,width=10) # ラベルの高さ(行数)と幅(文字数)\nl.config(padx=2,pady=2) # デフォルトは1pixel. 縁とテキストとの間の余白\nl.config(relief=Tk.RAISED) # ヘリの形\nl.config(relief=Tk.SUNKEN)\nl.config(relief=Tk.GROOVE)\nl.config(relief=Tk.RIDGE)\nl.config(text='google') # 文字\nl.config(underline=2) # アンダーラインを引く 引数にリストは使えない\nl.config(wraplength=3) # 改行幅\nl.mainloop()\n\n\n# Tk.Label(master, **options)\n# 最初の引数で親 widget を指定し、キーワード引数を続けます。\n# Label の option Tkinter reference: 10. The Label widget や、 The Tkinter Label Widget も参照\n# anchor\tテキストを配置する位置を指定。デフォルトは Tk.CENTER N, NE, E, SE, S, SW, W, NW, or CENTER\n# bg or background\t背景色を指定\n# bitmap\t表示する bitmap を指定\n# bd or borderwidth\t縁の幅を指定。デフォルトは 2.\n# cursor\tカーソルの形を指定。\n# font\tテキストを表示する際のフォントを指定。\n# fg or foreground\tフォントの色、または bitmap の色を指定\n# height\tラベルの高さを行数で指定。pixel 単位でないことに注意\n# image\tラベルに貼る image を指定します。\n# justify\tテキストが複数行にわたる場合、左寄せ、右寄せ、中央寄せを指定\n# padx\t縁とテキストとの間の横の余白。\n# pady\t縁とテキストとの間の縦の余白。\n# relief\tヘリの形 Tk.FLAT(default), Tk.RAISED, Tk.SUNKEN, Tk.GROOVE, Tk.RIDGE\n# takefocus\t普通は Label には focus はない。もし、focus を持ってきたければ、takefocus を 1 にする。\n# text\t表示する文字列\n# textvariable\t StringVar を指定。変化する文字列を表示するとき用いる。\n# underline\t (0 から数え始めて) n 番目の文字にアンダーラインをつける。デフォルトは -1.\n# width\tLabel の幅、文字数単位。pixel 単位ではない。\n# wraplength\t 改行幅\n\n# configure()による属性の変更\n# configure(属性名=属性値)\n","sub_path":"library/stdlib/tkinter/Label.py","file_name":"Label.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"194207098","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mooda import WaterFrame\n\ndates = pd.date_range(start='20180101', end='20180131', freq='T')\n\n# data = Sinus creation with noise and trend\nfs = dates.size\nprint(fs)\nx = np.arange(fs)\n# Signal\nf = 30\nvalues = [5 + np.sin(2*np.pi*f * (i/fs)) for i in x]\n# noise\nnoise = np.random.randn(dates.size)/12\n# Trend\nf_trend = 3\nvalues_trend = [2 * np.sin(2*np.pi*f_trend * (i/fs)) for i in x]\n\ndata = values + noise + values_trend\n\n# Spikes\nspike_values = []\nfor i in x:\n spike = np.random.randn()\n if spike > 3.8 or spike < -3.8:\n spike_values.append(3*spike)\n else:\n spike_values.append(0)\n\ndata = data + spike_values\n\n# Flat zones\nflat_values = []\nflat_repeat = 0\ni_flat = 0\nfor i in x:\n if i_flat < flat_repeat:\n data[i] = data[i-1]\n i_flat += 1\n else:\n i_flat = 0\n flat_repeat = 0\n flat_bool = np.random.randn()\n if flat_bool > 3.8:\n flat_repeat = flat_bool*1000\n print(flat_repeat)\n\n\ndf = pd.DataFrame(data, index=dates, columns=[\"TEMP\"])\ndf.index.name = 'TIME'\ndf['TEMP_QC'] = 0\n\n# Creation of WaterFrame\nwf = WaterFrame()\nwf.data = df.copy()\nwf.metadata[\"name\"] = \"Test data with errors\"\nwf.meaning[\"TEMP\"] = \"Seawater temperature\"\nunits = {'units': 'degree Celsius'}\nwf.meaning['TEMP'] = units\n\nwf.to_pickle('test_errors.pkl')\n\ndf.plot()\n\nplt.show()\n","sub_path":"other_codes/creation_bad_data.py","file_name":"creation_bad_data.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"241751172","text":"CONF = {\n \"dim_axis\": {\n \"sample\": 0,\n \"trait\": 1,\n \"candidate\": 1,\n \"covariate\": 1,\n \"sample_0\": 0,\n \"sample_1\": 1,\n },\n \"dim_names\": {\"sample\", \"candidate\", \"covariate\", \"trait\"},\n \"data_names\": {\"trait\", \"genotype\", \"covariates\", \"covariance\"},\n \"short_data_names\": {\"y\", \"G\", \"M\", \"K\"},\n \"data_synonym\": {\n \"y\": \"trait\",\n \"trait\": \"y\",\n \"G\": \"genotype\",\n \"genotype\": \"G\",\n \"M\": \"covariates\",\n \"covariates\": \"M\",\n \"K\": \"covariance\",\n \"covariance\": \"K\",\n },\n \"data_dims\": {\"trait\": [\"sample\", \"trait\"], \"genotype\": [\"sample\", \"candidate\"]},\n}\n","sub_path":"limix/_data/_conf.py","file_name":"_conf.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"479742725","text":"\"\"\"\nBasic topoplot\n==============\n\nBasic topographic plot based on channel names.\n\n.. image:: ../../picture/pictopo/ex_basic_topoplot.png\n\"\"\"\nfrom visbrain import Topo\n\n# Create a topoplot instance :\nt = Topo()\n\n# Create a list of channels, data, title and colorbar label :\nname = 'Topo_1'\nchannels = ['C3', 'C4', 'Cz', 'Fz', 'Pz']\ndata = [10, 20, 30, 10, 10]\ntitle = 'Basic topoplot illustration'\ncblabel = 'Colorbar label'\n\n# Add a central topoplot :\nt.add_topoplot(name, data, channels=channels, title=title, cblabel=cblabel)\n\n# Show the window :\nt.show()\n","sub_path":"examples/topo/00_basic_topoplot.py","file_name":"00_basic_topoplot.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"330236636","text":"import logging\r\nimport os\r\nfrom typing import List\r\n\r\nimport click\r\nimport coloredlogs\r\nfrom dask.distributed import Client, as_completed\r\nfrom tqdm import tqdm\r\nfrom utoolbox.io.dataset import open_dataset\r\n\r\nfrom ..tasks import downsample_naive, write_tiff\r\nfrom .utils import create_dir\r\n\r\n__all__ = [\"main\"]\r\n\r\nlogger = logging.getLogger(\"segmentation.pipeline.flows\")\r\n\r\n\r\n@click.command()\r\n@click.argument(\"src_dir\", type=click.Path(file_okay=False, dir_okay=True, exists=True))\r\ndef main(src_dir):\r\n logging.getLogger(\"tifffile\").setLevel(logging.ERROR)\r\n coloredlogs.install(\r\n level=\"DEBUG\", fmt=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%H:%M:%S\"\r\n )\r\n\r\n # assume we have tunnel the scheduler to local\r\n scheduler = \"localhost:8786\"\r\n logger.info(f'connecting to scheduler at \"{scheduler}\"')\r\n client = Client(scheduler, timeout=\"300s\") # 5 min\r\n print(client)\r\n\r\n src_dir = os.path.abspath(src_dir)\r\n\r\n # load dataset\r\n src_ds = open_dataset(src_dir)\r\n desc = tuple(\r\n f\"{k}={v}\" for k, v in zip((\"x\", \"y\", \"z\"), reversed(src_ds.tile_shape))\r\n )\r\n logger.info(f\"tiling dimension ({', '.join(desc)})\")\r\n\r\n # generate tile index list (TODO deal with multi-color/view here)\r\n def groupby_tiles(inventory, index: List[str]):\r\n \"\"\"\r\n Aggregation function that generates the proper internal list layout for all the tiles in their natural N-D layout.\r\n\r\n Args:\r\n inventory (pd.DataFrame): the listing inventory\r\n index (list of str): the column header\r\n \"\"\"\r\n tiles = []\r\n for _, tile in inventory.groupby(index[0]):\r\n if len(index) > 1:\r\n # we are not at the fastest dimension yet, decrease 1 level\r\n tiles.extend(groupby_tiles(tile, index[1:]))\r\n else:\r\n # fastest dimension, call retrieval function\r\n tiles.append(src_ds[tile])\r\n return tiles\r\n\r\n index = [\"tile_y\", \"tile_x\"]\r\n if \"tile_z\" in src_ds.index.names:\r\n index = [\"tile_z\"] + index\r\n logger.info(f\"a {len(index)}-D tiled dataset\")\r\n\r\n tiles = groupby_tiles(src_ds, index)\r\n logger.info(f\"{len(tiles)} to process\")\r\n\r\n tiles_bin4 = [downsample_naive(tile, 4) for tile in tiles]\r\n\r\n dname = os.path.basename(src_dir)\r\n dname = f\"{dname}_bin4\"\r\n dst_dir = os.path.join(os.path.dirname(src_dir), dname)\r\n create_dir(dst_dir)\r\n\r\n # write back\r\n write_back_tasks = []\r\n for i, tile in enumerate(tiles_bin4):\r\n fname = f\"tile_{i:04d}.tif\"\r\n path = os.path.join(dst_dir, fname)\r\n future = write_tiff(path, tile)\r\n write_back_tasks.append(future)\r\n\r\n # submit task\r\n futures = client.compute(write_back_tasks, scheduler=\"processes\")\r\n with tqdm(total=len(futures)) as pbar:\r\n for future in as_completed(futures, with_results=False):\r\n try:\r\n uri = future.result()\r\n uri = os.path.basename(uri)\r\n\r\n pbar.set_description(uri)\r\n pbar.update(1)\r\n except Exception as error:\r\n logger.exception(error)\r\n future.release()\r\n\r\n logger.info(\"closing scheduler connection\")\r\n client.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"segmentation/pipeline/flows/bin4.py","file_name":"bin4.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"111270817","text":"from exceptions import SyntaxError\n\nfrom sly import Lexer\n\n\nclass JFTTLexer(Lexer):\n tokens = {\n PLUS, MINUS, MULTIPLY, DIVIDE, MODULO, LPARENTHESIS, RPARENTHESIS, # arithmetic signs\n EQUAL, NOT_EQUAL, LESS_OR_EQUAL, GREATER_OR_EQUAL, LESS_THAN, GREATER_THAN, # relations\n IF, THEN, ELSE, ENDIF, # conditions\n WHILE, DO, ENDWHILE, ENDDO, FOR, FROM, TO, DOWNTO, ENDFOR, # loops\n READ, WRITE, # io\n NUMBER, ASSIGN, # declarations and constants\n SEMICOLON, COLON, COMMA, # separators\n DECLARE, BEGIN, END, PIDENTIFIER # program section\n }\n\n ignore = ' \\t'\n ignore_comment = r'\\[(.|\\s)*?\\]'\n\n PLUS = r'PLUS'\n MINUS = r'MINUS'\n MULTIPLY = r'TIMES'\n DIVIDE = r'DIV'\n MODULO = r'MOD'\n LPARENTHESIS = r'\\('\n RPARENTHESIS = r'\\)'\n\n EQUAL = r'EQ'\n NOT_EQUAL = r'NEQ'\n LESS_OR_EQUAL = r'LEQ'\n GREATER_OR_EQUAL = r'GEQ'\n LESS_THAN = r'LE'\n GREATER_THAN = r'GE'\n\n IF = r'IF'\n THEN = r'THEN'\n ELSE = r'ELSE'\n ENDIF = r'ENDIF'\n DOWNTO = r'DOWNTO'\n\n WHILE = r'WHILE'\n DO = r'DO'\n ENDWHILE = r'ENDWHILE'\n ENDDO = r'ENDDO'\n\n FOR = r'FOR'\n FROM = r'FROM'\n TO = r'TO'\n ENDFOR = r'ENDFOR'\n\n READ = r'READ'\n WRITE = r'WRITE'\n\n ASSIGN = r'ASSIGN'\n\n SEMICOLON = r'\\;'\n COLON = r'\\:'\n COMMA = r'\\,'\n\n DECLARE = r'DECLARE'\n BEGIN = r'BEGIN'\n END = r'END'\n\n PIDENTIFIER = r'[_a-z]+'\n\n @_(r'\\-?[0-9]+')\n def NUMBER(self, t):\n t.value = int(t.value)\n return t\n\n @_(r'\\n')\n def ignore_newline(self, t):\n self.lineno += len(t.value)\n\n # Error handling rule\n def error(self, t):\n raise SyntaxError(self.lineno)\n","sub_path":"compiler/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"315741963","text":"from django.conf.urls import url, include\nfrom ordersys import views\nfrom rest_framework.routers import DefaultRouter\nfrom rest_framework.authtoken.views import obtain_auth_token\nfrom rest_framework_jwt.views import obtain_jwt_token\n\nrouter = DefaultRouter()\nrouter.register(r'users', views.UserList)\nrouter.register(r'goods', views.GoodList)\nrouter.register(r'orders', views.OrderList)\nurlpatterns = [\n # jwt token\n url(r'^jwt_auth/', obtain_jwt_token),\n # token\n url(r'^tk_auth/', obtain_auth_token),\n url(r'^orders/create/$', views.CrOrder.as_view()),\n url(r'^', include(router.urls)),\n url(r'^$', views.api_root),\n url(r'^users/(?P[0-9]+)/$', views.UserDetail.as_view(), name='user-detail'),\n url(r'^goods/(?P[0-9]+)/$', views.GoodDetail.as_view(), name='goods-detail'),\n url(r'^orders/(?P[0-9]+)/$', views.OrderDetail.as_view(), name='orders-detail'),\n url(r'^orders/(?P[0-9]+)/cancel/$', views.OrdercancelDetail.as_view(), name='orderscancel-detail')\n]\n","sub_path":"week09/order/ordersys/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"366911238","text":"import os\nimport random\nimport sys\n\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask import render_template, request, Flask, flash, redirect, url_for, \\\n abort, jsonify, Response, make_response\n\n#sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))\n\nimport pygments.lexers\nfrom pygments import highlight\nfrom pygments.formatters import HtmlFormatter\n\napp = Flask(__name__)\napp.config.from_object('settings')\ndb = SQLAlchemy(app)\n\nclass Sprunge(db.Model):\n name = db.Column(db.String(), primary_key=True)\n content = db.Column(db.Text())\n date = db.Column(DateTime())\n\n def render_content(self, syntax=None):\n if not syntax:\n self.response.headers['Content-Type'] = 'text/plain; charset=UTF-8'\n self.response.out.write(c.content + '\\n')\n return\n try:\n lexer = pygments.lexers.get_lexer_by_name(syntax)\n except:\n lexer = pygments.lexers.TextLexer()\n return highlight(self.content,\n lexer,\n HtmlFormatter(full=True,\n style='borland',\n lineanchors='n',\n linenos='table',\n encoding='utf-8'))\n\n\n\ndef help():\n u = 'http://sprunge.us'\n r = 'sprunge'\n return \"\"\"\n\n
\nsprunge(1)                          SPRUNGE                          sprunge(1)\n\nNAME\n    sprunge: command line pastebin:\n\nSYNOPSIS\n    <command> | curl -F '%s=<-' %s\n\nDESCRIPTION\n    add ?<lang> to resulting url for line numbers and syntax highlighting\n\nEXAMPLES\n    ~$ cat bin/ching | curl -F '%s=<-' %s\n       %s/VZiY\n    ~$ firefox %s/VZiY?py#n-7\n\nSEE ALSO\n    http://github.com/rupa/sprunge\n\n
\"\"\" % (r, u, r, u, u, u)\n\ndef new_id(self):\n nid = ''\n symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'\n while len(nid) < 4:\n n = random.randint(0,35)\n nid = nid + symbols[n:n+1]\n return nid\n\n@app.route(\"/\", methods=[\"GET\"])\ndef index():\n if not got:\n self.response.out.write(self.help(self.u, self.r))\n return\n\n@app.route(\"/\", methods=[\"GET\"])\ndef get_sprunge(sprunge_id, **args):\n\n # deleting\n #if got.endswith('/secretpassword'):\n # got = got.split('/')\n # c = Sprunge.gql('WHERE name = :1', got[0]).get()\n # self.response.headers['Content-Type'] = 'text/plain; charset=UTF-8'\n # if c:\n # self.response.out.write('''\n # deleting %s\n # --------\n # c.content\n # ''' % (got[0], c.content))\n # c.delete()\n # else:\n # self.response.out.write(got[0] + ' not here')\n # return\n sprunge = db.session.query(Sprunge)\\\n .filter_by(name=sprunge_id)\\\n .one()\n #c = Sprunge.gql('WHERE name = :1', got).get()\n if not sprunge:\n return str(sprunge_id) + ' not found'\n \n\n syntax = None\n return c.render_content(syntax) + \"foobar:\" + str(args)\n\n\n@app.route(\"/\", methods=[\"POST\"])\ndef post(self, got):\n self.response.headers['Content-Type'] = 'text/plain'\n got = self.request.query_string\n if self.request.get(self.r):\n nid = self.new_id()\n while Sprunge.gql('WHERE name = :1', nid).get():\n nid = self.new_id()\n s = Sprunge()\n s.content = self.request.get(self.r)\n s.name = nid\n\n # delete the oldest sprunge\n old = Sprunge.gql('ORDER BY date ASC LIMIT 1').get()\n if old:\n old.delete()\n\n s.put()\n self.response.out.write(' ' + self.u + '/' + nid + '\\n')\n\ndef main():\n #application = webapp.WSGIApplication([(r'/(.*)', Index)],debug=False)\n #wsgiref.handlers.CGIHandler().run(application)\n pass\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"sprunge.py","file_name":"sprunge.py","file_ext":"py","file_size_in_byte":3921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"644415552","text":"\"\"\"Test file for filter File\"\"\"\nfrom unittest import TestCase\n\nfrom rawsec_cli.filter import filterProjects\n\n\nclass TestFilter(TestCase):\n \"\"\"Test filter class\"\"\"\n\n def setUp(self):\n \"\"\"setup test\"\"\"\n self.projects = [\n {\n \"name\": \"test\",\n \"description\": \"a\",\n \"language\": \"Python\",\n \"price\": \"Paid\",\n \"online\": \"True\",\n \"blackarch\": \"test\",\n },\n {\n \"name\": \"test2\",\n \"description\": \"ab\",\n \"language\": \"Go\",\n \"price\": \"Free\",\n \"online\": \"False\",\n \"blackarch\": \"test\",\n },\n ]\n\n def testFilterProjects(self):\n \"\"\" test FilterProjects function\"\"\"\n projects = filterProjects(\n self.projects,\n lang=\"Python\",\n paid=True,\n online=True,\n blackarch=True,\n )\n self.assertEqual(projects, [self.projects[0]])\n\n projects = filterProjects(\n self.projects,\n lang=\"Go\",\n free=True,\n offline=True,\n blackarch=True,\n )\n self.assertEqual(projects, [self.projects[1]])\n","sub_path":"tests/test_filter.py","file_name":"test_filter.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"232754758","text":"\"\"\"Implementation of treadmill admin ldap CLI app_group plugin.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\n\nimport click\nfrom ldap3.core import exceptions as ldap_exceptions\n\nfrom treadmill import admin\nfrom treadmill import cli\nfrom treadmill import context\nfrom treadmill import yamlwrapper as yaml\n\n\ndef init():\n \"\"\"Configures app CLI group\"\"\"\n # Disable too many branches.\n #\n # pylint: disable=R0912\n formatter = cli.make_formatter('app')\n\n @click.group()\n def app():\n \"\"\"Manage applications\"\"\"\n pass\n\n @app.command()\n @click.option('-m', '--manifest', help='Application manifest.',\n type=click.Path(exists=True, readable=True))\n @click.argument('app')\n @cli.admin.ON_EXCEPTIONS\n def configure(app, manifest):\n \"\"\"Create, get or modify an app configuration\"\"\"\n admin_app = admin.Application(context.GLOBAL.ldap.conn)\n if manifest:\n with io.open(manifest, 'rb') as fd:\n data = yaml.load(stream=fd)\n try:\n admin_app.create(app, data)\n except ldap_exceptions.LDAPEntryAlreadyExistsResult:\n admin_app.replace(app, data)\n\n try:\n cli.out(formatter(admin_app.get(app)))\n except ldap_exceptions.LDAPNoSuchObjectResult:\n click.echo('App does not exist: %s' % app, err=True)\n\n @app.command(name='list')\n @cli.admin.ON_EXCEPTIONS\n def _list():\n \"\"\"List configured applicaitons\"\"\"\n admin_app = admin.Application(context.GLOBAL.ldap.conn)\n cli.out(formatter(admin_app.list({})))\n\n @app.command()\n @click.argument('app')\n @cli.admin.ON_EXCEPTIONS\n def delete(app):\n \"\"\"Delete applicaiton\"\"\"\n admin_app = admin.Application(context.GLOBAL.ldap.conn)\n try:\n admin_app.delete(app)\n except ldap_exceptions.LDAPNoSuchObjectResult:\n click.echo('App does not exist: %s' % app, err=True)\n\n del delete\n del _list\n del configure\n\n return app\n","sub_path":"lib/python/treadmill/cli/admin/ldap/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87736667","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom ..items import TarkariItem\n\nclass TarkariSpider(scrapy.Spider):\n name = 'tarkari'\n start_urls = ['http://kalimatimarket.gov.np/home/wpricelist?fbclid=IwAR3xnZcl3UR5JAoUev-no6ElvTQmJIfyOeCa90EhgaXTgH0TQzUVexWT0SY']\n\n def parse(self, response):\n items = TarkariItem()\n name = response.css('td:nth-child(1)').css('::text').extract()\n unit = response.css('td:nth-child(2)').css('::text').extract()\n minimum = response.css('td:nth-child(3)').css('::text').extract()\n maximum = response.css('td:nth-child(4)').css('::text').extract()\n average = response.css('td:nth-child(5)').css('::text').extract()\n # date = response.css('h4 span').css('::text').extract()\n # sk = date[0]\n # br = sk.split(' ')\n # print(br)\n # datee = br[6] + br[5] + ',' + br[7]\n\n\n items['name'] = name\n items['unit'] = unit\n items['minimum'] = minimum\n items['maximum'] = maximum\n items['average'] = average\n # items['datee'] = datee\n yield items\n\n\n","sub_path":"tarkari/tarkari/spiders/tarkari.py","file_name":"tarkari.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"587002690","text":"from pamqp import ContentHeader\nfrom pamqp import specification\nfrom pamqp.body import ContentBody\n\nfrom amqpstorm import Channel\nfrom amqpstorm.basic import Basic\nfrom amqpstorm.exception import AMQPChannelError\nfrom amqpstorm.exchange import Exchange\nfrom amqpstorm.queue import Queue\nfrom amqpstorm.tests.utility import FakeConnection\nfrom amqpstorm.tests.utility import TestFramework\nfrom amqpstorm.tx import Tx\n\n\nclass ChannelTests(TestFramework):\n def test_channel_with_statement_when_closed(self):\n with Channel(0, None, 360) as channel:\n self.assertIsInstance(channel, Channel)\n\n def test_channel_with_statement_when_open(self):\n connection = FakeConnection(FakeConnection.CLOSED)\n with Channel(0, connection, 360) as channel:\n channel.set_state(channel.OPEN)\n self.assertIsInstance(channel, Channel)\n\n def test_channel_with_statement_when_failing(self):\n connection = FakeConnection()\n try:\n with Channel(0, connection, 360) as channel:\n channel.exceptions.append(AMQPChannelError('error'))\n channel.check_for_errors()\n except AMQPChannelError as why:\n self.assertIsInstance(why, AMQPChannelError)\n\n self.assertEqual(self.get_last_log(),\n 'Closing channel due to an unhandled exception: '\n 'error')\n\n def test_channel_id(self):\n channel = Channel(0, None, 360)\n\n self.assertEqual(int(channel), 0)\n\n channel = Channel(1557, None, 360)\n\n self.assertEqual(int(channel), 1557)\n\n def test_channel_build_inbound_messages(self):\n channel = Channel(0, FakeConnection(), 360)\n channel.set_state(channel.OPEN)\n\n message = self.message.encode('utf-8')\n message_len = len(message)\n\n deliver = specification.Basic.Deliver()\n header = ContentHeader(body_size=message_len)\n body = ContentBody(value=message)\n\n channel._inbound = [deliver, header, body]\n\n for msg in channel.build_inbound_messages(break_on_empty=True):\n self.assertIsInstance(msg.body, str)\n self.assertEqual(msg.body.encode('utf-8'), message)\n\n def test_channel_build_inbound_messages_without_break_on_empty(self):\n channel = Channel(0, FakeConnection(), 360)\n channel.set_state(channel.OPEN)\n\n message = self.message.encode('utf-8')\n message_len = len(message)\n\n deliver = specification.Basic.Deliver()\n header = ContentHeader(body_size=message_len)\n body = ContentBody(value=message)\n\n for _ in range(25):\n channel._inbound.append(deliver)\n channel._inbound.append(header)\n channel._inbound.append(body)\n\n messages_consumed = 0\n for msg in channel.build_inbound_messages(break_on_empty=False):\n messages_consumed += 1\n self.assertIsInstance(msg.body, str)\n self.assertEqual(msg.body.encode('utf-8'), message)\n if messages_consumed >= 10:\n channel.set_state(channel.CLOSED)\n self.assertEqual(messages_consumed, 10)\n\n def test_channel_build_inbound_messages_as_tuple(self):\n channel = Channel(0, FakeConnection(), 360)\n channel.set_state(channel.OPEN)\n\n message = self.message.encode('utf-8')\n message_len = len(message)\n\n deliver = specification.Basic.Deliver()\n header = ContentHeader(body_size=message_len)\n body = ContentBody(value=message)\n\n channel._inbound = [deliver, header, body]\n\n for msg in channel.build_inbound_messages(break_on_empty=True,\n to_tuple=True):\n self.assertIsInstance(msg, tuple)\n self.assertEqual(msg[0], message)\n\n def test_channel_process_data_events(self):\n channel = Channel(0, FakeConnection(), 360)\n channel.set_state(channel.OPEN)\n\n message = self.message.encode('utf-8')\n message_len = len(message)\n\n deliver = specification.Basic.Deliver()\n header = ContentHeader(body_size=message_len)\n body = ContentBody(value=message)\n\n channel._inbound = [deliver, header, body]\n\n def callback(msg):\n self.assertIsInstance(msg.body, str)\n self.assertEqual(msg.body.encode('utf-8'), message)\n\n channel.consumer_callback = callback\n channel.process_data_events()\n\n def test_channel_process_data_events_as_tuple(self):\n channel = Channel(0, FakeConnection(), 360)\n channel.set_state(channel.OPEN)\n\n message = self.message.encode('utf-8')\n message_len = len(message)\n\n deliver = specification.Basic.Deliver()\n header = ContentHeader(body_size=message_len)\n body = ContentBody(value=message)\n\n channel._inbound = [deliver, header, body]\n\n def callback(body, channel, method, properties):\n self.assertIsInstance(body, bytes)\n self.assertIsInstance(channel, Channel)\n self.assertIsInstance(method, dict)\n self.assertIsInstance(properties, dict)\n self.assertEqual(body, message)\n\n channel.consumer_callback = callback\n channel.process_data_events(to_tuple=True)\n\n def test_channel_start_consuming(self):\n channel = Channel(0, FakeConnection(), 360)\n channel.set_state(channel.OPEN)\n\n message = self.message.encode('utf-8')\n message_len = len(message)\n\n deliver = specification.Basic.Deliver()\n header = ContentHeader(body_size=message_len)\n body = ContentBody(value=message)\n\n channel._inbound = [deliver, header, body]\n\n def callback(msg):\n self.assertIsInstance(msg.body, str)\n self.assertEqual(msg.body.encode('utf-8'), message)\n channel.set_state(channel.CLOSED)\n\n channel.consumer_callback = callback\n channel.add_consumer_tag('travis-ci')\n channel.start_consuming()\n\n def test_channel_start_consuming_no_consumer_tag(self):\n channel = Channel(0, FakeConnection(), 360)\n channel.set_state(channel.OPEN)\n\n message = self.message.encode('utf-8')\n message_len = len(message)\n\n deliver = specification.Basic.Deliver()\n header = ContentHeader(body_size=message_len)\n body = ContentBody(value=message)\n\n channel._inbound = [deliver, header, body]\n\n def callback(msg):\n self.assertIsInstance(msg.body, str)\n self.assertEqual(msg.body.encode('utf-8'), message)\n\n channel.consumer_callback = callback\n channel.start_consuming()\n\n def test_channel_open(self):\n def on_open_ok(_, frame_out):\n self.assertIsInstance(frame_out, specification.Channel.Open)\n channel.rpc.on_frame(specification.Channel.OpenOk())\n\n channel = Channel(0, FakeConnection(on_write=on_open_ok), 360)\n\n # Close Channel.\n channel.open()\n\n self.assertEqual(channel._state, channel.OPEN)\n\n def test_channel_close(self):\n def on_close_ok(_, frame_out):\n if isinstance(frame_out, specification.Basic.Cancel):\n channel.rpc.on_frame(specification.Basic.CancelOk())\n return\n channel.rpc.on_frame(specification.Channel.CloseOk())\n\n channel = Channel(0, FakeConnection(on_write=on_close_ok), 360)\n\n # Set up Fake Channel.\n channel._inbound = [1, 2, 3]\n channel.set_state(channel.OPEN)\n channel._consumer_tags = ['1', '2', '3']\n\n # Close Channel.\n channel.close()\n\n self.assertEqual(channel._inbound, [])\n self.assertEqual(channel._consumer_tags, [])\n self.assertEqual(channel._state, channel.CLOSED)\n self.assertFalse(channel.exceptions)\n\n def test_channel_close_gracefully_with_queued_error(self):\n def on_close_ok(_, frame_out):\n if isinstance(frame_out, specification.Basic.Cancel):\n raise AMQPChannelError('travis-ci')\n channel.rpc.on_frame(specification.Channel.CloseOk())\n\n channel = Channel(0, FakeConnection(on_write=on_close_ok), 360)\n\n # Set up Fake Channel.\n channel._inbound = [1, 2, 3]\n channel.set_state(channel.OPEN)\n channel._consumer_tags = ['1', '2', '3']\n channel.exceptions.append(AMQPChannelError('travis-ci'))\n\n # Close Channel.\n channel.close()\n\n self.assertEqual(channel._inbound, [])\n self.assertEqual(channel._consumer_tags, [])\n self.assertEqual(channel._state, channel.CLOSED)\n self.assertTrue(channel.exceptions)\n\n def test_channel_close_when_already_closed(self):\n fake_connection = FakeConnection()\n channel = Channel(0, fake_connection, 360)\n\n # Set up Fake Channel.\n channel._inbound = [1, 2, 3]\n channel.set_state(channel.CLOSED)\n channel._consumer_tags = ['1', '2', '3']\n\n def state_set(state):\n self.assertEqual(state, channel.CLOSED)\n\n channel.set_state = state_set\n\n # Close Channel.\n channel.close()\n\n self.assertFalse(fake_connection.frames_out)\n\n self.assertEqual(channel._inbound, [])\n self.assertEqual(channel._consumer_tags, [])\n self.assertEqual(channel._state, channel.CLOSED)\n self.assertFalse(channel.exceptions)\n\n def test_channel_confirm_deliveries(self):\n def on_select_ok(*_):\n channel.rpc.on_frame(specification.Confirm.SelectOk())\n\n connection = FakeConnection(on_write=on_select_ok)\n channel = Channel(0, connection, 0.01)\n channel.set_state(Channel.OPEN)\n\n self.assertFalse(channel.confirming_deliveries)\n self.assertEqual(channel.confirm_deliveries(), {})\n self.assertTrue(channel.confirming_deliveries)\n\n def test_channel_close_channel(self):\n channel = Channel(0, FakeConnection(), 360)\n\n # Set up Fake Channel.\n channel._inbound = [1, 2, 3]\n channel.set_state(channel.OPEN)\n channel._consumer_tags = [1, 2, 3]\n\n close_frame = specification.Channel.Close(reply_code=200,\n reply_text='travis-ci')\n # Close Channel.\n channel._close_channel(close_frame)\n\n self.assertEqual(channel._inbound, [])\n self.assertEqual(channel._consumer_tags, [])\n self.assertEqual(channel._state, channel.CLOSED)\n\n def test_channel_basic_handler_is_defined(self):\n channel = Channel(0, None, 360)\n\n self.assertIsInstance(channel.basic, Basic)\n\n def test_channel_exchange_handler_is_defined(self):\n channel = Channel(0, None, 360)\n\n self.assertIsInstance(channel.exchange, Exchange)\n\n def test_channel_queue_handler_is_defined(self):\n channel = Channel(0, None, 360)\n\n self.assertIsInstance(channel.queue, Queue)\n\n def test_channel_tx_handler_is_defined(self):\n channel = Channel(0, None, 360)\n\n self.assertIsInstance(channel.tx, Tx)\n","sub_path":"amqpstorm/tests/unit/channel/channel_tests.py","file_name":"channel_tests.py","file_ext":"py","file_size_in_byte":11088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"625512758","text":"# BerkeleyInterface.berkeleyinterface\n# Main functionality of the interface\n# author: XuL\n\n\n\"\"\"\nPython interface to the Berkeley Parser\n\nThis has the advantage over other implementations which essentially automate a\ncall to the jar file: this actually duplicates the main() method, allowing\nmultiple parse calls and ability to modify options without the overhead of\nloading the grammar file each time (and without having to use Java!)\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport sys\nimport jpype\n\n##########################################################################\n## Main Functionality\n##########################################################################\n\n\n\ndef startup(classpath):\n \n '''Start the JVM. This MUST be called before any other jpype functions'''\n jpype.startJVM(jpype.getDefaultJVMPath(), \"-Djava.class.path=%s\" % classpath, \"-Xmx500m\")\n\n\n\n\ndef dictToArgs(d):\n '''Convert a dict of options to a list of command-line-style args'''\n boolDefaults = [ \"tokenize\", \"binarize\", \"scores\", \"keepFunctionLabels\",\n \"substates\", \"accurate\", \"modelScore\", \"confidence\", \"sentence_likelihood\",\n \"tree_likelihood\", \"variational\", \"render\", \"chinese\", \"useGoldPOS\",\n \"dumpPosteriors\", \"ec_format\",\n ] # these all default to False and only require the switch if True\n\n # get a list of \"-key\", \"value\" or just \"-key\" if key is in boolDefaults\n args = [j for i in [(\"-\"+k, '%s'%v) if k not in boolDefaults else (\"-\"+k,) for k,v in iter(d.items())] for j in i]\n return args\n\n\n\n\n\ndef getOpts(args):\n '''\n Converts given command-line-style args to opts for parser functions.\n\n Note that changing options for:\n accurate, chinese, grFileName, kbest, nGrammars, nThreads, scores,\n substates, viterbi, variational\n after calling loadGrammar will NOT update the parser.\n\n Specifically, options for:\n grFileName, kbest, nThreads\n are used in both parser setup (loadGrammar) and actual parsing (parseInput)\n\n Options for:\n binarize, confidence, dumpPosteriors, ec_format, goldPOS, inputFile,\n keepFunctionLabels, maxLength, modelScore, outputFile, render,\n sentence_likelihood, tokenize, tree_likelihood\n do not affect the grammar loading and may be changed between those steps.\n\n The JVM must be started before calling this function.\n '''\n Options = jpype.JClass(\"edu.berkeley.nlp.PCFGLA.BerkeleyParser$Options\")\n OptionParser = jpype.JClass(\"edu.berkeley.nlp.PCFGLA.OptionParser\")\n optParser = OptionParser(Options)\n opts = optParser.parse(args, True)\n return opts\n\n\n\n\n\ndef loadGrammar(opts):\n '''\n Loads the grammar and lexicon for the parser, given options.\n Returns the initialized parser.\n '''\n threshold = 1.0\n\n if opts.chinese: #todo WARNING: THIS IS UNTESTED\n Corpus = jpype.JClass(\"edu.berkeley.nlp.PCFGLA.Corpus\")\n Corpus.myTreebank = Corpus.TreeBankType.CHINESE\n\n parser = None\n\n\n if opts.nGrammars != 1: #todo\n print (\"Multiple grammars not implemented!\")\n sys.exit(1)\n else:\n inFileName = opts.grFileName\n ParserData = jpype.JClass(\"edu.berkeley.nlp.PCFGLA.ParserData\")\n pData = ParserData.Load(inFileName)\n if pData is None:\n print (\"Failed to load grammar from file '%s'.\"%inFileName)\n sys.exit(1)\n grammar = pData.getGrammar()\n lexicon = pData.getLexicon()\n Numberer = jpype.JClass(\"edu.berkeley.nlp.util.Numberer\")\n Numberer.setNumberers(pData.getNumbs())\n if opts.kbest == 1:\n CoarseToFineMaxRuleParser = jpype.JClass(\"edu.berkeley.nlp.PCFGLA.CoarseToFineMaxRuleParser\")\n parser = CoarseToFineMaxRuleParser(grammar, lexicon, threshold, -1,\n opts.viterbi, opts.substates, opts.scores, opts.accurate, opts.variational,\n True, True)\n else:\n CoarseToFineNBestParser = jpype.JClass(\"edu.berkeley.nlp.PCFGLA.CoarseToFineNBestParser\")\n parser = CoarseToFineNBestParser(grammar, lexicon, opts.kbest, threshold,\n -1, opts.viterbi, opts.substates, opts.scores, opts.accurate,\n opts.variational, False, True)\n\n parser.binarization = pData.getBinarization()\n\n\n return parser\n\n\n\n\n\n\n \ndef parse(parser, opts, sent):\n '''\n Uses parser with opts to parse the input string to output.\n\n '''\n\n sent = sent.strip()\n\n sentence = None\n\n PTBLineLexer = jpype.JClass(\"edu.berkeley.nlp.io.PTBLineLexer\")\n TreeAnnotations = jpype.JClass(\"edu.berkeley.nlp.PCFGLA.TreeAnnotations\")\n tokenizer = PTBLineLexer()\n sentence = tokenizer.tokenizeLine(sent)\n\n pt = None\n st = jpype.java.util.ArrayList()\n for s in sentence:\n st.add(s)\n\n parsedTree = parser.getBestConstrainedParse(st, pt, None)\n parsedTree = TreeAnnotations.unAnnotateTree(parsedTree,opts.keepFunctionLabels)\n\n\n return parsedTree\n\n\n\n\ndef shutdown():\n '''Shut down the JVM'''\n jpype.shutdownJVM()\n\n\n\n","sub_path":"berkeleyinterface.py","file_name":"berkeleyinterface.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"646760754","text":"# Copyright 2021, Yahoo\n# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms\nfrom abc import ABC, abstractmethod\n\nfrom ...testplan.schema import TestPlan\nfrom ...utils.hooks import EventHook\nfrom ..exceptions.executor_errors import YChaosTargetConfigConditionFailedError\n\n\nclass BaseExecutor(EventHook, ABC):\n \"\"\"\n TargetExecutor defines the target where the agents are executed to\n test the resiliency of the system. A simple example of Target Executor is the\n [MachineTargetExecutor][ychaos.core.executor.MachineTargetExecutor.MachineTargetExecutor]\n which holds the program logic to execute the agents (with Coordinator) in Virtual Machines/BareMetals\n\n Each new Target Executor overrides the `execute()` method which defines \"what\" is to be done\n to execute the agents in a particular target environment.\n\n This class extends the [EventHook][ychaos.utils.hooks.EventHook] class, which\n implies each of the target executor can define its own events and the hooks that will\n be called during the trigger of an event.\n \"\"\"\n\n __target_type__: str\n\n def __init__(self, testplan: TestPlan, *args, **kwargs):\n super(BaseExecutor, self).__init__()\n self.testplan = testplan\n self._validate_target_config()\n\n def _get_target_type(self):\n # To avoid circular import\n from ...testplan.attack import TargetType\n\n return TargetType(self.__target_type__)\n\n def _validate_target_config(self):\n target_config = self.testplan.attack.get_target_config()\n if self.testplan.attack.target_type != self._get_target_type():\n raise YChaosTargetConfigConditionFailedError(\"Target type mismatch\")\n\n # Even though ideally this branch is never entered by the code (unless there is some issue with pydantic)\n if not isinstance(\n target_config, self._get_target_type().metadata.schema\n ): # pragma: no cover\n raise YChaosTargetConfigConditionFailedError(\n \"Target configuration is not processable for this executor\"\n )\n\n @abstractmethod\n def execute(self) -> None:\n \"\"\"\n Define \"what\" is to be done when the testplan consists\n the instruction to execute the agents in a particular\n target environment.\n\n Returns:\n None\n \"\"\"\n pass # Implement in Executors\n","sub_path":"src/ychaos/core/executor/BaseExecutor.py","file_name":"BaseExecutor.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"377771024","text":"from django.shortcuts import render\nfrom .models import Perro,Adoptante\nfrom django.shortcuts import render, get_object_or_404\nfrom .forms import PerroForm\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm,AuthenticationForm\nfrom django.contrib.auth import login,logout\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .serializers import perroSerializer\n\n# Create your views here.\ndef inicio(request):\n lista_ultimas_mascotas_adoptado = Perro.objects.filter(estado_mascota=\"adoptado\")[:3]\n lista_ultimas_mascotas_rescatado = Perro.objects.filter(estado_mascota=\"rescatado\").order_by('-id_mascota')[:3]\n lista_ultimas_mascotas_disponible = Perro.objects.filter(estado_mascota=\"disponible\").order_by('-id_mascota')[:3]\n context = {'lista_ultimas_mascotas_disponible': lista_ultimas_mascotas_disponible,\n 'lista_ultimas_mascotas_adoptado': lista_ultimas_mascotas_adoptado,\n 'lista_ultimas_mascotas_rescatado': lista_ultimas_mascotas_rescatado}\n return render(request, 'misperris/inicio.html',context)\n\ndef listar_perro(request):\n listar_todas_mascotas = Perro.objects.all()\n lista_ultimas_mascotas_disponible = Perro.objects.filter(estado_mascota=\"disponible\")\n context = {'listar_todas_mascotas': listar_todas_mascotas,\n 'lista_ultimas_mascotas_disponible':lista_ultimas_mascotas_disponible}\n return render(request, 'misperris/listar_perro.html',context)\n\ndef detalle_perro(request,pk):\n perro = get_object_or_404(Perro, pk=pk)\n context = {'perro':perro}\n return render (request, 'misperris/detalle_perro.html',context)\n\ndef nuevo_perro(request):\n if request.method == 'POST':\n form = PerroForm( request.POST,request.FILES)\n if form.is_valid():\n perron = form.save(commit=False)\n perron.save()\n return redirect('detalle_perro',pk=perron.pk)\n else:\n form = PerroForm()\n context = {'form':form}\n return render(request, 'misperris/editar_perro.html',context)\n\ndef editar_perro(request,pk):\n perro = get_object_or_404(Perro,pk=pk)\n if request.method == 'POST':\n form = PerroForm(request.POST,request.FILES, instance=perro)\n if form.is_valid():\n perro = form.save(commit=False)\n perro.save()\n return redirect('detalle_perro',pk=perro.pk)\n else:\n form = PerroForm(instance = perro)\n context = {'form':form}\n return render(request,'misperris/editar_perro.html',context)\n\ndef eliminar_perro(request,pk):\n perro = get_object_or_404(Perro,pk=pk)\n perro.delete()\n\n return redirect('listar_perro')\n\ndef adoptar_perro(request,pk):\n perro = get_object_or_404(Perro,pk=pk)\n adoptar_perro = Perro.objects.filter(pk=pk).update(estado_mascota='adoptado')\n return redirect('listar_perro')\n\ndef formulario(request):\n if request.method == 'POST':\n correoAdoptante = request.POST.get('txtemail',True)\n rutAdoptante = request.POST.get('txtrut',True)\n nombreAdoptante = request.POST.get('txtname',True)\n fechaAdoptante = request.POST.get('txtdate',True)\n telefonoAdoptante = request.POST.get('txtphone',True)\n regionAdoptante = request.POST.get('txtRegion',True)\n ciudadAdoptante = request.POST.get('txtCity',True)\n tipoviviendaAdoptante = request.POST.get('sel-vivienda',True)\n objetos = Adoptante(correo_adoptante = correoAdoptante,\n rut_adoptante = rutAdoptante,\n nombre_adoptante = nombreAdoptante,\n fecha_adoptante = fechaAdoptante,\n telefono_adoptante = telefonoAdoptante,\n region_adoptante = regionAdoptante,\n ciudad_adoptante = ciudadAdoptante,\n tipo_vivienda_adoptante = tipoviviendaAdoptante)\n \n \n objetos.save()\n return redirect('crear_usuario')\n return render(request, 'misperris/formulario.html',{})\n\n\n\ndef crear_usuario(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n # log the user in\n login(request, user)\n return redirect('inicio')\n else:\n form = UserCreationForm() \n \n return render(request, 'account/crear_usuario.html', { 'form': form })\n\ndef login_view(request):\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n # log the user in\n user = form.get_user()\n login(request, user)\n return redirect('inicio')\n else:\n form = AuthenticationForm()\n return render(request, 'account/login.html', { 'form': form })\n\ndef logout_view(request):\n if request.method == 'POST':\n logout(request)\n return redirect('inicio') \n \nclass PerroLista(APIView):\n\n def get(self,request):\n perros = Perro.objects.all()\n serializer = perroSerializer(perros,context={\"request\":request},many=True)\n return Response(serializer.data)\n def post(self):\n pass","sub_path":"misperris/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"623334606","text":"import datetime\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView\nfrom reports.models import Report\nfrom userprofiles.mixins import LoginRequiredMixin\n\n\nclass DownloadReports(LoginRequiredMixin, TemplateView):\n template_name = \"reports/download_reports.html\"\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n airport = request.user.profile.airport_default\n context['reports'] = Report.objects.filter(airport=airport, download_report=True)\n return self.render_to_response(context)\n\n def post(self, request, *args, **kwargs):\n airport = self.request.user.profile.airport_default\n date_start = datetime.datetime.strptime(request.POST['DateFrom'], '%Y-%m-%d').date()\n date_end = datetime.datetime.strptime(request.POST['DateTo'], '%Y-%m-%d').date()\n report = request.POST['type_report']\n data = {'airport': airport, 'date_start': date_start, 'date_end': date_end, 'report': report}\n query = Report.objects.download_report(data)\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=\"%s.xls\"' % query[1]\n query[0].save(response)\n return response\n","sub_path":"reports/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"208767700","text":"import numpy as np, pandas as pd\nimport random\n\nagents_count = 1000\n\nagents = pd.DataFrame([], index=range(agents_count))\n\nagents['quantity'] = np.where(np.random.choice([True, False], agents_count),\n np.random.randint(100, 1000, agents_count),\n 0)\nagents['buy_price'] = np.random.triangular(100, 105, 125)\n\n\nfor i in range(1000):\n if random.randint(0, 1) == 1:\n quantity = np.random.randint(100, 1000)\n else:\n quantity = 0\n buy = np.random.normal(100, 10)\n buy_count = random.randint(0, (1000 - quantity) // 10)\n sell = buy + np.random.triangular(0.1, 1, 5)\n sell_count = random.randint(0, quantity // 10)\n agent = {\n 'quantity': quantity,\n 'buy': buy,\n 'buy_count': buy_count,\n 'sell': sell,\n 'sell_count': sell_count,\n }\n print(agent)\n agents.append(agent)\n\n\nfor i in range(1000):\n pass\n","sub_path":"trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"19541278","text":"from .image import BossImageSource\nfrom .metadata import BossMetadata\n\nfrom ...frontends.precomputed import CloudVolumePrecomputed\n\nfrom .. import get_cache_path\nfrom ...cacheservice import CacheService\nfrom ...cloudvolume import SharedConfiguration, register_plugin\nfrom ...paths import strict_extract\n\ndef create_boss(\n cloudpath, mip=0, bounded=True, autocrop=False,\n fill_missing=False, cache=False, compress_cache=None,\n cdn_cache=True, progress=False, info=None, provenance=None,\n compress=None, non_aligned_writes=False, parallel=1,\n delete_black_uploads=False, green_threads=False\n ):\n path = strict_extract(cloudpath)\n config = SharedConfiguration(\n cdn_cache=cdn_cache,\n compress=compress,\n compress_level=None,\n green=green_threads,\n mip=mip,\n parallel=parallel,\n progress=progress,\n )\n cache = CacheService(\n cloudpath=get_cache_path(cache, cloudpath),\n enabled=bool(cache),\n config=config,\n compress=compress_cache,\n )\n\n meta = BossMetadata(cloudpath, cache=cache, info=info)\n image = BossImageSource(\n config, meta, cache, \n autocrop=bool(autocrop),\n bounded=bool(bounded),\n non_aligned_writes=bool(non_aligned_writes), \n )\n\n return CloudVolumePrecomputed(\n meta, cache, config, \n imagesrc, mesh=None, skeleton=None,\n mip=mip\n )\n\ndef register():\n register_plugin('boss', create_boss)","sub_path":"cloudvolume/datasource/boss/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"44590947","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 30 21:56:17 2018\r\n\r\n@author: MCA\r\n\r\nfunction prints pascalls triangle to the n-th row\r\n\"\"\"\r\n\r\ndef pascal(n):\r\n \r\n def one(m):\r\n return m.append(1)\r\n \r\n lines = []\r\n for j in range(n):\r\n line = []\r\n if j == 0:\r\n one(line)\r\n elif j == 1:\r\n one(line)\r\n one(line)\r\n \r\n else:\r\n one(line)\r\n for i in range(j-1):\r\n line.append(lines[j-1][i]+lines[j-1][i+1])\r\n one(line)\r\n \r\n lines.append(line)\r\n \r\n return lines\r\n \r\nprint (pascal(5))","sub_path":"pascalTria.py","file_name":"pascalTria.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"80451475","text":"class Solution:\n def rotate(self, matrix: [[int]]):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: void Do not return anything, modify matrix in-place instead.\n \"\"\"\n n = len(matrix)\n m = int(n / 2) if n % 2 == 0 else int((n - 1) / 2)\n\n for i in range(n):\n for j in range(i):\n self.swap(matrix, i, j, j, i)\n \n for i in range(n):\n for j in range(m):\n self.swap(matrix, i, j, i, n - 1 - j)\n \n\n def swap(self, matrix: [[int]], x1: int, y1: int, x2: int, y2: int):\n tmp = matrix[x1][y1]\n matrix[x1][y1] = matrix[x2][y2]\n matrix[x2][y2] = tmp\n\nm = [[1,2,3],[4,5,6],[7,8,9]]\nSolution().rotate(m)\nprint(m)","sub_path":"Solutions/48RotateImage.py","file_name":"48RotateImage.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"537302110","text":"# This file is part of Tryton. The COPYRIGHT file at the top level of\n# this repository contains the full copyright notices and license terms.\nfrom trytond.model import ModelSQL, fields\nfrom trytond.transaction import Transaction\nfrom trytond.pool import Pool, PoolMeta\n\n\nclass Purchase(metaclass=PoolMeta):\n __name__ = 'purchase.purchase'\n invoice_lines = fields.Function(fields.Many2Many(\n 'account.invoice.line', None, None, \"Invoice Lines\"),\n 'get_invoice_lines', searcher='search_invoice_lines')\n invoice_lines_ignored = fields.Many2Many(\n 'purchase.purchase-ignored-account.invoice.line',\n 'purchase', 'invoice', 'Invoice Lines Ignored', readonly=True)\n\n def get_invoice_lines(self, name):\n return list({il.id for l in self.lines for il in l.invoice_lines})\n\n @classmethod\n def search_invoice_lines(cls, name, clause):\n return [('lines.' + clause[0],) + tuple(clause[1:])]\n\n def create_invoice(self):\n pool = Pool()\n Invoice = pool.get('account.invoice')\n InvoiceLine = pool.get('account.invoice.line')\n\n invoice = super(Purchase, self).create_invoice()\n\n if invoice:\n lines_to_delete = [l for l in invoice.lines if l.type != 'line']\n lines = [l for l in invoice.lines if l.type == 'line']\n InvoiceLine.write(lines, {\n 'invoice': None,\n 'invoice_type': invoice.type,\n 'party': invoice.party.id,\n })\n InvoiceLine.delete(lines_to_delete)\n Invoice.cancel([invoice])\n Invoice.delete([invoice])\n return None\n return invoice\n\n def get_invoice_state(self):\n state = super(Purchase, self).get_invoice_state()\n skips = set(x.id for x in self.invoice_lines_ignored)\n invoice_lines = [l for l in self.invoice_lines if l.id not in skips]\n if invoice_lines:\n if any(l.invoice and l.invoice.state == 'cancelled'\n for l in invoice_lines):\n return 'exception'\n elif (state == 'paid'\n and all(l.invoice for l in invoice_lines)\n and all(l.invoice.state == 'paid' for l in invoice_lines)):\n return 'paid'\n else:\n return 'waiting'\n return state\n\n @classmethod\n def copy(cls, purchases, default=None):\n if default is None:\n default = {}\n else:\n default = default.copy()\n default.setdefault('invoice_lines_ignored', None)\n return super(Purchase, cls).copy(purchases, default=default)\n\n\nclass PurchaseIgnoredInvoiceLine(ModelSQL):\n 'Purchase - Ignored Invoice Line'\n __name__ = 'purchase.purchase-ignored-account.invoice.line'\n _table = 'purchase_invoice_line_ignored_rel'\n purchase = fields.Many2One('purchase.purchase', 'Purchase',\n ondelete='CASCADE', select=True, required=True)\n invoice = fields.Many2One('account.invoice.line', 'Invoice Line',\n ondelete='RESTRICT', select=True, required=True)\n\n\nclass HandleInvoiceException(metaclass=PoolMeta):\n __name__ = 'purchase.handle.invoice.exception'\n\n def transition_handle(self):\n state = super(HandleInvoiceException, self).transition_handle()\n\n invoice_lines = []\n for invoice_line in self.record.invoice_lines:\n if (invoice_line.invoice\n and invoice_line.invoice.state == 'cancelled'):\n invoice_lines.append(invoice_line.id)\n if invoice_lines:\n self.model.write([self.record], {\n 'invoice_lines_ignored': [('add', invoice_lines)],\n })\n self.model.__queue__.process([self.record])\n return state\n","sub_path":"purchase_invoice_line_standalone/purchase.py","file_name":"purchase.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"134332039","text":"from sklearn.datasets import load_digits\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\n\ndigits, labels = load_digits(return_X_y=True)\nprint(digits.shape,'###', labels.shape) #(1797, 64) ### (1797,)\n\npermutate = np.random.permutation(digits.shape[0])\n#print(permutate.shape)\nperm_digits, perm_labels = digits[permutate,:], labels[permutate]\nN = 200\ntest_data, test_labels= perm_digits[:N], perm_labels[:N]\nvali_data, vali_labels = perm_digits[N:2*N], perm_labels[N:2*N]\ntrain_data, train_labels = perm_digits[2*N:], perm_labels[2*N:]\nprint(vali_data.shape, vali_labels.shape)\nfor Nr in [5,10,20,100]:\n for Depth in [2,5,10,'pure']:\n d = None if Depth == \"pure\" else Depth\n clf = RandomForestClassifier(n_estimators=Nr, max_depth = d)\n clf.fit(train_data, train_labels)\n scores =clf.score(vali_data, vali_labels)\n print('Nr=', Nr, 'Depth=', Depth, scores)\n\nNr= 20\nDepth= 10\n#0.975\n\nNr= 100\nDepth= 10\nclf = RandomForestClassifier(n_estimators=Nr, max_depth=Depth)\nclf.fit(np.concatenate([train_data, vali_data]), np.concatenate([train_labels, vali_labels]))\nscores = clf.score(test_data, test_labels)\nprint(scores)\n","sub_path":"RandomForest/RF.py","file_name":"RF.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"555823350","text":"#encoding=utf-8\nimport sys\nimport io\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\n\nif __name__=='__main__':\n print(123)\n if len(sys.argv) > 1:\n menus = sys.argv[1]\n print(menus)\n","sub_path":"源代码/FoodStreet/build/classes/PythonFile/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"626465854","text":"import json\nimport sys\n\nfrom scripts.remediations import common\n\n\nclass PortErrors:\n\n junos_cmd = 'show interfaces {intf} statistics'\n awx_dc_drain_job_template = 47\n\n def __init__(self, logger, opts):\n self.logger = logger\n self.opts = opts\n\n def run(self, inp, args):\n # get interface stats frm device\n self.logger.info('Running remediation Port Errors for: {}, Id: {}'.format(\n inp['name'], inp['id']))\n device = inp['data']['device']\n out = {\n 'Port Errors': f\"{device}:{inp['data']['entity']}\",\n 'Description': f\"{inp['data'].get('description', '')}\",\n 'Start Time': f\"{inp['start_time']}\",\n }\n cmd = self.junos_cmd.format(intf=inp['data']['entity'])\n try:\n ip = common.nb_device_ip(self.opts.get('netbox_url'), device)\n output = common.run_junos_command(ip, cmd, self.opts)\n except common.CommonException as ex:\n self.logger.error(\n 'failed to run command on device: {} / {}: {}'.format(device, ip, ex))\n out[\"error\"] = f'Failed to run junos command: {ex}'\n common.exit(out, False)\n try:\n task_id = inp['data'].get('task_id')\n if task_id:\n common.add_issue_comment(self.opts, task_id, output)\n except common.CommonException as ex:\n self.logger.error('Failed to add task comment: {}'.format(ex))\n\n # implement auto drain for dc links only for now\n result = True\n if inp['data']['labels'].get('role', '') == 'dc' and args.auto_drain.lower() == 'true':\n if device in ['ps01-c1-chi1', 'ps02-c1-chi1']:\n peerDevice = inp['data']['labels'].get('peerDevice')\n if not peerDevice or not peerDevice.startswith('rs'):\n # exception for pod0 - dont drain any uplinks\n self.logger.info('Not draining any uplinks on pod0 PS')\n out['auto-drain'] = False\n common.exit(out, result)\n self.logger.info(\n f\"Attempting auto-drain of {device}:{inp['data']['entity']}\")\n dry_run = 'true'\n if hasattr(args, 'no_dry_run'):\n dry_run = 'false'\n out, result = self.auto_drain_dc(\n out, device, inp['data']['entity'], dry_run)\n if result:\n self.logger.info('Auto drain successful')\n\n common.exit(out, result)\n\n def auto_drain_dc(self, out, device, interface, dry_run):\n result = False\n token = self.opts.get('awx_token')\n url = self.opts.get('awx_url')\n if not url or not token:\n self.logger.error('Missing AWX url or token, unable to auto-drain')\n out['auto-drain'] = False\n return out, result\n e = {'interface': interface, 'dry_run': dry_run, 'undrain': 'false'}\n if dry_run == 'true':\n self.logger.info('Performing dry-run drain/undrain')\n try:\n job_id, result = common.run_awx_job(\n url, token, self.awx_dc_drain_job_template, e, limit=device, timeout=120)\n out['auto-drained'] = result\n out['awx_job_id'] = job_id\n if result:\n out['message'] = (\n 'This interface has been auto-drained. Use https://awx.simulprod.com/#/templates/job_template/{} to undrain'.format(\n self.awx_dc_drain_job_template)\n )\n except Exception as ex:\n self.logger.error('Failed to run awx job: {}'.format(ex))\n out['error'] = f'Failed to auto-drain: {ex}'\n return out, result\n","sub_path":"scripts/remediations/port_errors.py","file_name":"port_errors.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"385574455","text":"# Create a data pipe.\npipe.create('subset', 'N-state')\n\n# Load the sequence from one of the PCS files.\nsequence.read('pcs_dy.txt', mol_name_col=1, res_num_col=2, res_name_col=3, spin_num_col=4, spin_name_col=5)\n\n# Deselect all spins.\ndeselect.all()\n\n# Select the five spins for the subset.\nselect.spin(':99@H')\nselect.spin(':108@H')\nselect.spin(':114@H')\nselect.spin(':119@H')\nselect.spin(':126@H')\n\n# Load the PCS data and write out the subset.\nlns = ['dy', 'er', 'tb', 'tm']\nfor ln in lns:\n # Read.\n pcs.read(align_id=ln, file='pcs_%s.txt'%ln, mol_name_col=1, res_num_col=2, res_name_col=3, spin_num_col=4, spin_name_col=5, data_col=6, error_col=7)\n\n # Write.\n pcs.write(align_id=ln, file='pcs_%s_subset.txt'%ln, force=True)\n","sub_path":"test_suite/shared_data/frame_order/cam/subset_pcs.py","file_name":"subset_pcs.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"174435021","text":"'''\nAdapted from https://github.com/rosinality/stylegan2-pytorch/blob/master/train.py\n'''\n\nimport os\ntry:\n import wandb\n\nexcept ImportError:\n wandb = None\n\nimport torch\nfrom torch import nn\nfrom torch.utils import data\nfrom torchvision import transforms, utils\nfrom tqdm import tqdm\nfrom GANs.styleganv2 import Generator, Discriminator\nfrom datas.dataset_utils import MultiResolutionDataset, data_sampler, sample_data\nfrom non_leaking import augment\nfrom losses import d_logistic_loss, d_r1_loss, g_nonsaturating_loss, g_path_regularize\n\nfrom train_utils import requires_grad, accumulate, mixing_noise\nfrom utils import stylegan_parser\nfrom optims import ACGD, BCGD\n\n\ndef train(args, loader, generator, discriminator, optimizer, g_ema, device):\n ckpt_dir = 'checkpoints/stylegan-acgd'\n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n fig_dir = 'figs/stylegan-acgd'\n if not os.path.exists(fig_dir):\n os.makedirs(fig_dir)\n loader = sample_data(loader)\n pbar = range(args.iter)\n pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)\n mean_path_length = 0\n\n r1_loss = torch.tensor(0.0, device=device)\n path_loss = torch.tensor(0.0, device=device)\n path_lengths = torch.tensor(0.0, device=device)\n mean_path_length_avg = 0\n loss_dict = {}\n if args.gpu_num > 1:\n g_module = generator.module\n d_module = discriminator.module\n else:\n g_module = generator\n d_module = discriminator\n accum = 0.5 ** (32 / (10 * 1000))\n ada_augment = torch.tensor([0.0, 0.0], device=device)\n ada_aug_p = args.augment_p if args.augment_p > 0 else 0.0\n ada_aug_step = args.ada_target / args.ada_length\n r_t_stat = 0\n\n sample_z = torch.randn(args.n_sample, args.latent, device=device)\n\n for idx in pbar:\n i = idx + args.start_iter\n\n if i > args.iter:\n print(\"Done!\")\n break\n\n real_img = next(loader)\n real_img = real_img.to(device)\n noise = mixing_noise(args.batch, args.latent, args.mixing, device)\n fake_img, _ = generator(noise)\n\n if args.augment:\n real_img_aug, _ = augment(real_img, ada_aug_p)\n fake_img, _ = augment(fake_img, ada_aug_p)\n else:\n real_img_aug = real_img\n\n fake_pred = discriminator(fake_img)\n real_pred = discriminator(real_img_aug)\n\n d_loss = d_logistic_loss(real_pred, fake_pred)\n # d_loss = fake_pred.mean() - real_pred.mean()\n loss_dict[\"loss\"] = d_loss.item()\n loss_dict[\"real_score\"] = real_pred.mean().item()\n loss_dict[\"fake_score\"] = fake_pred.mean().item()\n\n # d_regularize = i % args.d_reg_every == 0\n d_regularize = False\n if d_regularize:\n real_img_cp = real_img.clone().detach()\n real_img_cp.requires_grad = True\n real_pred_cp = discriminator(real_img_cp)\n r1_loss = d_r1_loss(real_pred_cp, real_img_cp)\n d_loss += args.r1 / 2 * r1_loss * args.d_reg_every\n loss_dict[\"r1\"] = r1_loss.item()\n\n # g_regularize = i % args.g_reg_every == 0\n g_regularize = False\n if g_regularize: # TODO adapt code for nn.DataParallel\n path_batch_size = max(1, args.batch // args.path_batch_shrink)\n noise = mixing_noise(path_batch_size, args.latent, args.mixing, device)\n fake_img, latents = generator(noise, return_latents=True)\n\n path_loss, mean_path_length, path_lengths = g_path_regularize(\n fake_img, latents, mean_path_length\n )\n generator.zero_grad()\n weighted_path_loss = args.path_regularize * args.g_reg_every * path_loss\n if args.path_batch_shrink:\n weighted_path_loss += 0 * fake_img[0, 0, 0, 0]\n\n d_loss += weighted_path_loss\n mean_path_length_avg = mean_path_length.item()\n\n loss_dict[\"path\"] = path_loss.mean().item()\n loss_dict[\"path_length\"] = path_lengths.mean().item()\n\n optimizer.step(d_loss)\n # update ada_aug_p\n if args.augment and args.augment_p == 0:\n ada_augment_data = torch.tensor(\n (torch.sign(real_pred).sum().item(), real_pred.shape[0]), device=device\n )\n ada_augment += ada_augment_data\n if ada_augment[1] > 255:\n pred_signs, n_pred = ada_augment.tolist()\n r_t_stat = pred_signs / n_pred\n if r_t_stat > args.ada_target:\n sign = 1\n else:\n sign = -1\n ada_aug_p += sign * ada_aug_step * n_pred\n ada_aug_p = min(1, max(0, ada_aug_p))\n ada_augment.mul_(0)\n\n accumulate(g_ema, g_module, accum)\n\n d_loss_val = loss_dict[\"loss\"]\n r1_val = loss_dict['r1']\n path_loss_val = loss_dict[\"path\"]\n real_score_val = loss_dict[\"real_score\"]\n fake_score_val = loss_dict[\"fake_score\"]\n path_length_val = loss_dict[\"path_length\"]\n\n pbar.set_description(\n (\n f\"d: {d_loss_val:.4f}; g: {d_loss_val:.4f}; r1: {r1_val:.4f}; \"\n f\"path: {path_loss_val:.4f}; mean path: {mean_path_length_avg:.4f}; \"\n f\"augment: {ada_aug_p:.4f}\"\n )\n )\n if wandb and args.wandb:\n wandb.log(\n {\n \"Generator\": d_loss_val,\n \"Discriminator\": d_loss_val,\n \"Augment\": ada_aug_p,\n \"Rt\": r_t_stat,\n \"R1\": r1_val,\n \"Path Length Regularization\": path_loss_val,\n \"Mean Path Length\": mean_path_length,\n \"Real Score\": real_score_val,\n \"Fake Score\": fake_score_val,\n \"Path Length\": path_length_val,\n }\n )\n if i % 100 == 0:\n with torch.no_grad():\n g_ema.eval()\n sample, _ = g_ema([sample_z])\n utils.save_image(\n sample,\n f\"figs/stylegan-acgd/{str(i).zfill(6)}.png\",\n nrow=int(args.n_sample ** 0.5),\n normalize=True,\n range=(-1, 1),\n )\n if i % 1000 == 0:\n torch.save(\n {\n \"g\": g_module.state_dict(),\n \"d\": d_module.state_dict(),\n \"g_ema\": g_ema.state_dict(),\n \"d_optim\": optimizer.state_dict(),\n \"args\": args,\n \"ada_aug_p\": ada_aug_p,\n },\n f\"checkpoints/stylegan-acgd/{str(i).zfill(6)}.pt\",\n )\n\n\nif __name__ == '__main__':\n torch.backends.cudnn.benchmark = True\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print(device)\n parser = stylegan_parser()\n parser.add_argument('--optimizer', type=str, default='ACGD')\n parser.add_argument('--lr_d', type=float, default=1e-4)\n parser.add_argument('--lr_g', type=float, default=1e-4)\n parser.add_argument('--gpu_num', type=int, default=1)\n parser.add_argument('--tol', type=float, default=1e-10)\n parser.add_argument('--atol', type=float, default=1e-16)\n args = parser.parse_args()\n args.latent = 512\n args.n_mlp = 8\n args.start_iter = 0\n args.distributed =False\n\n generator = Generator(\n args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier\n ).to(device)\n discriminator = Discriminator(\n args.size, channel_multiplier=args.channel_multiplier\n ).to(device)\n g_ema = Generator(\n args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier\n ).to(device)\n g_ema.eval()\n accumulate(g_ema, generator, 0)\n g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1)\n d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)\n optimizer = ACGD(max_params=generator.parameters(),\n min_params=discriminator.parameters(),\n lr_max=args.lr_g, lr_min=args.lr_d,\n tol=args.tol, atol=args.atol,\n device=device,\n beta=0.99 ** g_reg_ratio)\n if args.ckpt is not None:\n print(\"load model:\", args.ckpt)\n ckpt = torch.load(args.ckpt, map_location=lambda storage, loc: storage)\n try:\n ckpt_name = os.path.basename(args.ckpt)\n args.start_iter = int(os.path.splitext(ckpt_name)[0])\n except ValueError:\n pass\n generator.load_state_dict(ckpt[\"g\"])\n discriminator.load_state_dict(ckpt[\"d\"])\n g_ema.load_state_dict(ckpt[\"g_ema\"])\n optimizer.load_state_dict(ckpt[\"d_optim\"])\n # TODO: check the following two lines\n del ckpt\n torch.cuda.empty_cache()\n\n optimizer.set_lr(lr_max=args.lr_g, lr_min=args.lr_d)\n if args.gpu_num > 1:\n generator = nn.DataParallel(generator, list(range(args.gpu_num)))\n discriminator = nn.DataParallel(discriminator, list(range(args.gpu_num)))\n\n transform = transforms.Compose(\n [\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\n ]\n )\n\n dataset = MultiResolutionDataset(args.path, transform, args.size)\n loader = data.DataLoader(\n dataset,\n batch_size=args.batch,\n sampler=data_sampler(dataset, shuffle=True, distributed=args.distributed),\n drop_last=True,\n )\n\n if wandb is not None and args.wandb:\n wandb.init(project=\"styleganv2-acgd\",\n config={'lr_d': args.lr_d,\n 'lr_g': args.lr_g,\n 'Image size': args.size,\n 'Batchsize': args.batch,\n 'CG tolerance': args.tol}\n )\n train(args, loader, generator, discriminator, optimizer, g_ema, device)","sub_path":"train_stylegan_cgd.py","file_name":"train_stylegan_cgd.py","file_ext":"py","file_size_in_byte":10085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"123382277","text":"'''\nimport requests\n\nfrom bs4 import BeautifulSoup\n\n\ndef main():\n resp = requests.get('https://v.taobao.com/v/content/live?catetype=704&from=taonvlang')\n soup = BeautifulSoup(resp.text, 'lxml')\n for img_tag in soup.select('img[src]'):\n print(img_tag.attrs['src'])\n\n\nif __name__ == '__main__':\n main()\n'''\n\n#运行上面的程序会发现没有任何的输出,因为页面的HTML代码上根本找不到标\n# 签。接下来我们使用Selenium来获取到页面上的动态内容,再提取主播图片。\n\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n\ndef main():\n driver = webdriver.Chrome()\n driver.get('https://v.taobao.com/v/content/live?catetype=704&from=taonvlang')\n soup = BeautifulSoup(driver.page_source, 'lxml')\n for img_tag in soup.body.select('img[src]'):\n print(img_tag.attrs['src'])\n\n\nif __name__ == '__main__':\n main()","sub_path":"day70 使用selenium.py","file_name":"day70 使用selenium.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"613301837","text":"#!/usr/bin/env python3\n\nimport pprint\nfrom pprint import pprint\n\n# Could get paths of a file\nimport pathlib\nfrom pathlib import Path\n\nimport json\n\nimport numpy as np\n\n# create path object for input dir\nfeatures_dir = Path('.')\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.use('TkAgg')\n\n# PCA library\nfrom sklearn.decomposition import PCA\n\n# our little library :)\n\n#Function that gets the vector & cleans it.\ndef get_vectors(dir_path):\n token_values = {}\n # find all .jsonl files in features_dir\n for path in features_dir.glob(dir_path):\n # convert path obj to str\n str_path = str(path)\n\n # debug print out\n print('\\n########################')\n print('Starting: %s' % str_path)\n print('########################\\n')\n\n # open the current file & load the json files in a dictionary\n with open(str_path, 'r') as f: \n # decode json into data variable\n data = json.load(f)\n\n # iterate over the json data & throw out CLS & SEP\n for feature in data['features']:\n if feature['token'] in ['[CLS]', '[SEP]']:\n continue\n print('Token: %s' % feature['token'])\n\n # initialize new dict\n token_values[feature['token']] = {}\n\n # alias new dictionary\n token_dict = token_values[feature['token']]\n\n # loop over the word vector stuff\n for vector in feature['layers']:\n print(' Index: %d' % vector['index'])\n print(' Values: (%d)' % len(vector['values']))\n \n #stores the token values inside the token_dict, which contains (index & values)\n token_dict[vector['index']] = np.array(vector['values'])\n\n # loop over each value in hidden layer values\n for n in vector['values']:\n print(' %f' % n)\n return token_values\n\ndebug_vectors = get_vectors('debug/*.jsonl')\n\nfor token, indices in debug_vectors.items():\n for index, values in indices.items():\n print(index, values)\n break\n break\n","sub_path":"old_data/get_vec.py","file_name":"get_vec.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"631164881","text":"from piece import Men\nfrom piece import King\nimport time\n\n\nclass Board:\n def __init__(self, cols, rows):\n self.board = [[0 for _ in range(cols)] for _ in range(rows)]\n self.turn = 'white'\n self.extra_atack = None\n self.p1Name = \"Player 1\"\n self.p2Name = \"Player 2\"\n self.p1Time = 300\n self.p2Time = 300\n self.p1PrevTime = self.p1Time\n self.p2PrevTime = self.p2Time\n self.winner = None\n self.must_atack = False\n self.ready = False\n self.start_time = 0\n self.moves_list = []\n\n self.board[1][0] = Men('black')\n self.board[3][0] = Men('black')\n self.board[5][0] = Men('black')\n self.board[7][0] = Men('black')\n\n self.board[0][1] = Men('black')\n self.board[2][1] = Men('black')\n self.board[4][1] = Men('black')\n self.board[6][1] = Men('black')\n\n self.board[1][2] = Men('black')\n self.board[3][2] = Men('black')\n self.board[5][2] = Men('black')\n self.board[7][2] = Men('black')\n\n self.board[0][5] = Men('white')\n self.board[2][5] = Men('white')\n self.board[4][5] = Men('white')\n self.board[6][5] = Men('white')\n\n self.board[3][6] = Men('white')\n self.board[1][6] = Men('white')\n self.board[5][6] = Men('white')\n self.board[7][6] = Men('white')\n\n self.board[0][7] = Men('white')\n self.board[2][7] = Men('white')\n self.board[4][7] = Men('white')\n self.board[6][7] = Men('white')\n\n self.update_moves()\n\n def make_move(self, start_c, start_r, c, r):\n start_cell = self.board[start_c][start_r]\n if not self.must_atack and start_cell != 0 and \\\n (start_c, start_r, c, r) in start_cell.moves and \\\n self.turn == start_cell.get_color():\n self.board[c][r] = self.board[start_c][start_r]\n self.board[start_c][start_r] = 0\n self.change_turn()\n self.check_king()\n self.update_moves()\n self.moves_list.append(f'{start_c}{start_r}{c}{r}')\n\n def make_atack(self, start_c, start_r, c, r):\n start_cell = self.board[start_c][start_r]\n if self.must_atack and start_cell != 0 and \\\n (start_c, start_r, c, r) in start_cell.atacks and \\\n (self.extra_atack is None or\n self.extra_atack == (start_c, start_r)) and \\\n self.turn == start_cell.get_color():\n self.board[c][r] = self.board[start_c][start_r]\n self.board[start_c][start_r] = 0\n self.clear_diag(start_c, start_r, c, r)\n self.check_winner()\n self.check_king()\n self.board[c][r].valid_atacks(c, r, self.board)\n if self.board[c][r].atacks == set():\n self.change_turn()\n self.extra_atack = None\n else:\n self.extra_atack = (c, r)\n self.update_moves()\n self.moves_list.append(f'{start_c}{start_r}{c}{r}')\n\n def change_turn(self):\n self.start_time = time.time()\n if self.turn == 'white':\n self.turn = 'black'\n self.p1PrevTime = self.p1Time\n else:\n self.turn = 'white'\n self.p2PrevTime = self.p2Time\n\n def clear_diag(self, start_c, start_r, finish_c, finish_r):\n # DOWN_RIGHT\n if finish_c > start_c and finish_r > start_r:\n while start_c < (finish_c - 1):\n start_c += 1\n start_r += 1\n self.board[start_c][start_r] = 0\n # DOWN_LEFT\n elif finish_c < start_c and finish_r > start_r:\n while start_c > (finish_c + 1):\n start_c -= 1\n start_r += 1\n self.board[start_c][start_r] = 0\n # UP_LEFT\n elif finish_c < start_c and finish_r < start_r:\n while start_c > (finish_c + 1):\n start_c -= 1\n start_r -= 1\n self.board[start_c][start_r] = 0\n # UP_RIGHT\n elif finish_c > start_c and finish_r < start_r:\n while start_c < (finish_c - 1):\n start_c += 1\n start_r -= 1\n self.board[start_c][start_r] = 0\n else:\n print('[ERROR] Wrong data in clear_diag()')\n\n def must_to_atack(self):\n for col in self.board:\n for cell in col:\n if cell != 0 and cell.get_color() == self.turn and \\\n cell.atacks != set():\n self.must_atack = True\n return None\n self.must_atack = False\n\n def check_king(self):\n for c, col in enumerate(self.board):\n if col[7] != 0 and col[7].get_color() == 'black' and \\\n col[7].get_type() == 'men':\n self.board[c][7] = King('black')\n if self.extra_atack is not None:\n self.extra_atack = (c, 7)\n for c, col in enumerate(self.board):\n if col[0] != 0 and col[0].get_color() == 'white' and \\\n col[0].get_type() == 'men':\n self.board[c][0] = King('white')\n if self.extra_atack is not None:\n self.extra_atack = (c, 0)\n\n def check_winner(self):\n count_white, count_black, winner = 0, 0, 0\n for col in self.board:\n for cell in col:\n if cell != 0:\n winner = cell\n if cell.get_color() == 'white':\n count_white += 1\n else:\n count_black += 1\n if min(count_white, count_black) > 0:\n return None\n self.winner = winner.get_color()\n\n def update_moves(self):\n for c, col in enumerate(self.board):\n for r, cell in enumerate(col):\n if cell != 0:\n cell.valid_atacks(c, r, self.board)\n cell.valid_moves(c, r, self.board)\n\n self.must_to_atack()\n","sub_path":"src/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"189650275","text":"\"\"\"QuartzNet, acoustic model for speech recognition \nbased on paper: https://arxiv.org/abs/1910.10261\n\"\"\"\n\nfrom typing import List, Union\nfrom dataclasses import dataclass\n\nimport torch\nfrom torch.nn.modules import dropout, padding\nfrom torch.utils import data\n\nfrom src.nn import (\n TorchModule,\n DepthwiseSeperableConv1D\n)\n\n\n@dataclass\nclass PreConfig:\n input_channels: int\n kernel_size: int\n filter_size: int\n dropout: float\n\n\n@dataclass\nclass BlockConfig:\n input_channels: List[int]\n filters: List[int]\n kernels: List[int]\n drop_rates: List[int]\n repeat: List[int] = 0\n\n def __str__(self) -> str:\n return f\"neural config with {len(self.input_channels)} stack of convolution\"\n\n\n@dataclass\nclass PostConfig:\n input_channels: List[int]\n filters: List[int]\n kernels: List[int]\n drop_rates: List[int]\n\n\nclass PreBlock(TorchModule):\n def __init__(self, config: PreConfig) -> None:\n super().__init__()\n\n self.conv = torch.nn.Conv1d(\n in_channels=config.input_channels,\n out_channels=config.filter_size,\n kernel_size=config.kernel_size,\n )\n\n self.drop = torch.nn.Dropout(config.dropout)\n self.norm = torch.nn.BatchNorm1d(config.filter_size)\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n return self.drop(self.relu(self.norm(self.conv(x))))\n\n\ndef factory_gen(config: Union[BlockConfig, PostConfig]):\n\n if isinstance(config, PostConfig):\n return zip(\n config.input_channels,\n config.filters,\n config.kernels,\n config.drop_rates,\n )\n\n return zip(\n config.input_channels,\n config.filters,\n config.kernels,\n config.drop_rates,\n config.repeat\n )\n\n\nclass PostBlock(TorchModule):\n def __init__(self, config: PostConfig) -> None:\n super().__init__()\n\n self.model = torch.nn.Sequential()\n\n for idx, cell_config in enumerate(factory_gen(config), start=1):\n # extract parameters for each single cell\n in_channels, out_channels, kernel, drop = cell_config\n\n self.model.add_module(\n f\"Conv1d::{idx}\",\n torch.nn.Conv1d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel,\n )\n )\n\n self.model.add_module(f\"Drop::{idx}\", torch.nn.Dropout(drop))\n self.model.add_module(\n f\"BatchNorm::{idx}\",\n torch.nn.BatchNorm1d(out_channels)\n )\n self.model.add_module(\n f\"RELU::{idx}\",\n torch.nn.ReLU()\n )\n\n self.pointwise = torch.nn.Conv1d(\n in_channels=config.filters[-1],\n out_channels=config.filters[-1],\n kernel_size=1\n )\n\n def forward(self, x):\n return self.pointwise(self.model(x))\n\n\nclass QuartzSubBlock(TorchModule):\n def __init__(self, in_channels, out_channels, kernel_size, drop) -> None:\n super().__init__()\n\n self.conv = DepthwiseSeperableConv1D(\n input_channels=in_channels,\n output_channels=out_channels,\n kernel_size=kernel_size,\n dropout=drop\n )\n\n self.norm = torch.nn.BatchNorm1d(out_channels)\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n return self.relu(self.norm(self.conv(x)))\n\n\nclass TimeChannel(TorchModule):\n def __init__(self, in_channels, out_channels, kernel_size, drop) -> None:\n super().__init__()\n\n self.conv = DepthwiseSeperableConv1D(\n input_channels=in_channels,\n output_channels=out_channels,\n kernel_size=kernel_size,\n stride=1,\n padding=kernel_size//2,\n dropout=drop\n )\n\n self.point_wise = torch.nn.Conv1d(\n in_channels=out_channels,\n out_channels=out_channels,\n kernel_size=1\n )\n\n self.norm = torch.nn.BatchNorm1d(out_channels)\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n x = self.point_wise(self.conv(x))\n return self.relu(self.norm(x))\n\n\nclass QuartzSubBlock(TorchModule):\n def __init__(self, in_channels, out_channels, kernel_size, drop, repeat) -> None:\n super().__init__()\n\n self.pipe = torch.nn.Sequential()\n\n self.pipe.add_module(\n \"TimeChannel::first\",\n TimeChannel(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n drop=drop\n )\n )\n\n for idx in range(repeat - 1):\n self.pipe.add_module(\n \"TimeChannel::{}\".format(idx),\n TimeChannel(\n in_channels=out_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n drop=drop\n )\n )\n\n self.last_sub_block = torch.nn.Sequential(\n DepthwiseSeperableConv1D(\n input_channels=out_channels,\n output_channels=out_channels,\n kernel_size=kernel_size,\n padding=kernel_size//2\n ),\n torch.nn.Conv1d(out_channels, out_channels, kernel_size=1),\n torch.nn.BatchNorm1d(out_channels)\n )\n\n self.residual = torch.nn.Sequential(\n torch.nn.Conv1d(in_channels, out_channels, kernel_size=1),\n torch.nn.BatchNorm1d(out_channels)\n )\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n output = self.pipe(x)\n residual = self.residual(x)\n output = self.last_sub_block(output) + residual\n return self.relu(output)\n\n\nclass QuartzBlock(TorchModule):\n def __init__(self, config: BlockConfig) -> None:\n super().__init__()\n\n self.model = torch.nn.Sequential()\n\n for idx, cell_config in enumerate(factory_gen(config), start=1):\n in_channels, out_channels, kernel, drop, repeat = cell_config\n\n self.model.add_module(\n \"Sub-Block::{}\".format(idx),\n QuartzSubBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel,\n drop=drop,\n repeat=repeat\n )\n )\n\n def forward(self, x):\n return self.model(x)\n\n\nclass QuartzNet(TorchModule):\n def __init__(\n self,\n pre_config: PreConfig,\n block_config: BlockConfig,\n post_config: PostConfig,\n ):\n\n super().__init__()\n\n self.pre = PreBlock(pre_config)\n self.blocks = QuartzBlock(block_config)\n self.post = PostBlock(post_config)\n\n def forward(self, x):\n return self.post(self.blocks(self.pre(x)))\n","sub_path":"src/nn/quartz.py","file_name":"quartz.py","file_ext":"py","file_size_in_byte":6953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"334395937","text":"import urllib.request, urllib.parse\nimport requests\nimport json\nimport math\nimport ssl\nimport pymysql.cursors\nimport os\nimport time\nimport get_proxy_list\nimport sys\nimport time\nimport hashlib\nimport requests\nimport random\nimport threading\n\n\ndef mysql_configure(): # mysql配置\n conn = pymysql.connect(host='122.112.248.56', user='dwts', passwd='12121992', port=3306, charset='utf8mb4')\n conn.select_db('dwts')\n # conn = pymysql.connect(host='115.182.4.116', user='assess', passwd='Abcd1234', port=3306, charset='utf8')\n # conn.select_db('assess')\n return conn.cursor(), conn\n\n\ndef configureRoot():\n target_root = os.path.expanduser('~') + '\\\\bilibili_v2'\n if not os.path.exists(target_root):\n os.makedirs(target_root)\n return target_root\n\n\n# 使用x代理\ndef generateRequestProxyV2():\n _version = sys.version_info\n\n is_python3 = (_version[0] == 3)\n\n orderno = \"ZF20184206305g2nCdV\"\n secret = \"7a3eb1b3b91b4b44ae774e6f96152da1\"\n ip = \"forward.xdaili.cn\"\n port = \"80\"\n ip_port = ip + \":\" + port\n timestamp = str(int(time.time())) # 计算时间戳\n string = \"orderno=\" + orderno + \",\" + \"secret=\" + secret + \",\" + \"timestamp=\" + timestamp\n\n if is_python3:\n string = string.encode()\n md5_string = hashlib.md5(string).hexdigest() # 计算sign\n sign = md5_string.upper() # 转换成大写\n auth = \"sign=\" + sign + \"&\" + \"orderno=\" + orderno + \"&\" + \"timestamp=\" + timestamp\n proxy = {\"http\": \"http://\" + ip_port, \"https\": \"https://\" + ip_port}\n headers = {\"Proxy-Authorization\": auth}\n\n return headers, proxy\n # r = requests.get(url, headers=headers, proxies=proxy, verify=False, allow_redirects=False)\n\n\n# 切换代理\ndef generateRequestProxy():\n global global_proxy_list\n global global_proxy_flag\n global global_proxy_addr\n\n if global_proxy_flag:\n choose_proxy()\n global_proxy_addr = global_proxy_list.pop()\n # 创建ProxyHandler\n proxy_support = urllib.request.ProxyHandler(global_proxy_addr)\n # 创建Opener\n opener = urllib.request.build_opener(proxy_support)\n # 安装OPener\n urllib.request.install_opener(opener)\n print('proxy started use' + str(global_proxy_addr))\n\n\n# 模拟request请求\ndef requestConf(avid, pn):\n global global_proxy_flag\n # time.sleep(random.randint(1, 3))\n\n time.sleep(random.randint(300, 600)/1000)\n if global_proxy_flag:\n headers, proxy = generateRequestProxyV2()\n else:\n headers = {}\n proxy = {}\n params = urllib.parse.urlencode({'oid': int(avid), 'pn': int(pn), 'type': 1, 'sort': 0, 'psize': 20})\n url = 'https://api.bilibili.com/x/v2/reply?%s' % params\n print(url)\n\n try:\n jscontent = requests.get(url, headers=headers, proxies=proxy, verify=False, allow_redirects=False)\n # print(jscontent.content.decode('utf-8','ignore'))\n return jscontent.content.decode('utf-8', 'ignore')\n except Exception:\n raise\n\n\n# 保存信息 savetype 1:mysql 2:txt\ndef saveInfo(uid, uname, message, rtime, avid, pn, rpid, up_id, user_sex, user_level, user_vip, savetype=1):\n if savetype == 1:\n try:\n cur.execute(\n 'INSERT INTO bilibili_comment '\n '(id,av,up_id,comment,user_id,user_name,user_level,user_vip,user_sex,part_date,reply_id,page_num) '\n 'VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',\n [0, avid, up_id, message, uid, uname, user_level, user_vip, user_sex,\n time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(rtime)), rpid, pn])\n except Exception as e:\n print('message is ' + message)\n raise\n elif savetype == 2:\n target_root = configureRoot()\n target_file = '%s\\\\%s.txt' % (target_root, avid)\n with open(target_file, 'a', encoding='utf-8') as f: # uid replyid pn\n f.write(\n str(uid) + '\\t' + str(rpid) + '\\t' + str(pn) + '\\t' + str(uname) + '\\t' + time.strftime(\n \"%Y-%m-%d %H:%M:%S\",\n time.localtime(\n rtime)) + '\\t' + str(\n message).replace('\\n', ' ') + '\\n')\n\n else:\n print('invalid type to save')\n\n\n# 处理json数据\ndef analyzeJson(jsData):\n mid = jsData['member']['mid']\n name = jsData['member']['uname']\n sign = jsData['content']['message']\n rtime = jsData['ctime']\n rpid = jsData['rpid']\n u_level = jsData['member']['level_info']['current_level']\n u_vip = jsData['member']['vip']['vipType']\n gender = jsData['member']['sex']\n\n return {'uid': mid, 'uname': name, 'message': sign, 'rtime': rtime, 'rpid': rpid, 'user_sex': gender,\n 'user_level': u_level, 'user_vip': u_vip}\n\n\ndef jsonDecodeHtml(page_info):\n jsDict = json.loads(page_info)\n return jsDict\n\n\n# 处理单个av号\ndef procAvid(avid):\n global global_proxy_addr\n msg = ''\n if not global_proxy_addr == {}:\n msg = '[!代理使用:%s]' % str(global_proxy_addr)\n\n print(' %s处理av%s 第一页' % (msg, avid))\n first_page_info = requestConf(avid, 1)\n jsDict = jsonDecodeHtml(first_page_info)\n if jsDict['code'] == 0:\n jsData = jsDict['data']\n jsPages = jsData['page']\n pageMax = math.ceil(jsPages['count'] / jsPages['size'])\n jsReplys = jsData['replies']\n uper_id = jsData['upper']['mid'] # up主人id\n\n for jsReply in jsReplys: # 处理第一页数据\n saveInfo(**analyzeJson(jsReply), avid=avid, pn=1, up_id=uper_id)\n for pn in range(2, pageMax + 1): # 处理后续页面数据\n print('%s处理av%s 第%d页' % (msg, avid, pn))\n poc(avid, pn)\n\n\ndef getNextAvid(): # 获取第一个未处理的avid\n cur.execute(\"SELECT avid FROM bilibili_av_list WHERE pro_flag = 0 ORDER BY id Asc Limit 1\")\n result = cur.fetchall()\n\n if result:\n return result[0][0]\n else:\n return None\n\n\ndef switchFlag(avid):#改变标识\n cur.execute(\"UPDATE bilibili_av_list set pro_flag = 1,proc_date_time = NOW() WHERE avid = %s \", [avid])\n conn.commit()\n\n\n# 获取av号列表\ndef proc_avids():\n global global_proxy_addr\n global global_proxy_list\n global global_proxy_flag\n\n av_id = getNextAvid()\n print(av_id)\n while av_id: # 存在待处理avid\n try:\n procAvid(int(av_id))\n print('commit av%s 数据到mysql' % av_id)\n switchFlag(av_id)\n print('保存成功')\n proc_avids()\n except Exception as e:\n print('Exception occurs ' + e.__str__())\n print('change urllib')\n print('rollback av%s 数据到mysql' % av_id)\n conn.rollback()\n if global_proxy_flag:\n generateRequestProxyV2() # 生成代理\n print('用新代理处理av%s' % av_id)\n procAvid(int(av_id))\n else:\n print('运行到av%s被限制,无法继续' % av_id)\n exit()\n\n\n# generateRequestProxyV2() # 生成代理\n# target_root = os.path.expanduser('~') + '/哔哩哔哩AV号.txt'\n# with open(target_root, 'r') as f:\n# for line in f.readlines(): # 逐行获取每一行av号\n# try:\n# procAvid(int(line))\n# print('commit av%s 数据到mysql' % line)\n# conn.commit()\n# print('保存成功')\n# except Exception as e:\n# print('Exception occurs ' + e.__str__())\n# print('change urllib')\n# print('rollback av%s 数据到mysql' % line)\n# conn.rollback()\n# if global_proxy_flag:\n# generateRequestProxyV2() # 生成代理\n# print('用新代理处理av%s' % line)\n# procAvid(int(line))\n# else:\n# print('运行到av%s被限制,无法继续' % line)\n# exit()\n\n\n# 处理数据并保存\ndef poc(avid, pn):\n jscontent = requestConf(avid, pn)\n jsDict = jsonDecodeHtml(jscontent)\n if jsDict['code'] == 0:\n jsData = jsDict['data']\n jsReplys = jsData['replies']\n uper_id = jsData['upper']['mid'] # up主人id\n for jsReply in jsReplys:\n saveInfo(**analyzeJson(jsReply), avid=avid, pn=pn, up_id=uper_id)\n\n\n# 获取代理ip列表\ndef choose_proxy():\n global global_proxy_list\n if len(global_proxy_list) <= 0:\n print('获取代理IP列表')\n for proxy in get_proxy_list.get_result_proxy():\n global_proxy_list.append(proxy)\n global_proxy_list.append({})\n\n print('获取成功' + global_proxy_list.__str__())\n return global_proxy_list\n\n\nglobal_proxy_switch = time.time() # 切换时间记录,>0当前正在使用代理,<0当前未使用代理\nglobal_proxy_flag = False # 全局切换代理标识\nglobal_proxy_list = [] # 全局代理存储list\nglobal_proxy_addr = {} # 代理实体\nglobal_open = None\nglobal_request = None\n\nssl._create_default_https_context = ssl._create_unverified_context # 关闭ssl证书验证\nrequests.packages.urllib3.disable_warnings()\ncur, conn = mysql_configure() # 初始化数据库\n\nproc_avids() # 开始处理\n# procAvid(10150031)\nprint('Success')\n","sub_path":"scrapy/scrapyBilibiliV2UseXdaili/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":9239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"205892950","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\nCode from QgisCloudPluginDialog\n A QGIS plugin\n Publish maps on qgiscloud.com\n -------------------\n begin : 2011-04-04\n copyright : (C) 2011 by Sourcepole\n email : pka@sourcepole.ch\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtXml import *\nfrom qgis.core import *\n\nfrom pyarchinit_US_mainapp import pyarchinit_US\nfrom pyarchinit_Site_mainapp import pyarchinit_Site\nfrom pyarchinit_Periodizzazione_mainapp import pyarchinit_Periodizzazione\nfrom pyarchinit_Struttura_mainapp import pyarchinit_Struttura\nfrom pyarchinit_Inv_Materiali_mainapp import pyarchinit_Inventario_reperti\nfrom pyarchinit_Upd_mainapp import pyarchinit_Upd_Values\nfrom pyarchinitConfigDialog import pyArchInitDialog_Config\nfrom pyarchinitInfoDialog import pyArchInitDialog_Info\nfrom pyarchinit_Gis_Time_controller import pyarchinit_Gis_Time_Controller\nfrom pyarchinit_image_viewer_main import Main\nfrom pyarchinit_Schedaind_mainapp import pyarchinit_Schedaind\nfrom pyarchinit_Detsesso_mainapp import pyarchinit_Detsesso\nfrom pyarchinit_Deteta_mainapp import pyarchinit_Deteta\nfrom pyarchinit_Tafonomia_mainapp import pyarchinit_Tafonomia\nfrom pyarchinit_Archeozoology_mainapp import pyarchinit_Archeozoology\nfrom pyarchinit_UT_mainapp import pyarchinit_UT\nfrom pyarchinit_images_directory_export_mainapp import pyarchinit_Images_directory_export\n#from pyarchinit_PDF_administrator_mainapp import pyarchinit_PDFAdministrator\nfrom pyarchinit_pdf_export_mainapp import pyarchinit_pdf_export\nfrom ui_pyarchinitplugin import Ui_PyarchinitPlugin\n\n\n#from ui_login import Ui_LoginDialog\n##from qgiscloudapi.qgiscloudapi import *\n##from db_connections import DbConnections\n##from local_data_sources import LocalDataSources\n##from data_upload import DataUpload\n##import os.path\n##import sys\n##import urllib\n##import traceback\n##import re\n##import time\n##import platform\n\nclass PyarchinitPluginDialog(QDockWidget):\n\t\n\n\tdef __init__(self, iface):\n\t\tQDockWidget.__init__(self, None)\n\t\tself.iface = iface\n## self.version = version\n\t\t# Set up the user interface from Designer.\n\t\tself.ui = Ui_PyarchinitPlugin()\n\t\tself.ui.setupUi(self)\n\t\tQObject.connect(self.ui.btnUStable, SIGNAL(\"clicked()\"), self.runUS)\n\t\tQObject.connect(self.ui.btnUStable_2, SIGNAL(\"clicked()\"), self.runUS)\n\n\t\tQObject.connect(self.ui.btnStrutturatable, SIGNAL(\"clicked()\"), self.runStruttura)\n\t\tQObject.connect(self.ui.btnPeriodotable, SIGNAL(\"clicked()\"), self.runPer)\n\n\t\tQObject.connect(self.ui.btnSitotable, SIGNAL(\"clicked()\"), self.runSite)\n\t\tQObject.connect(self.ui.btnSitotable_2, SIGNAL(\"clicked()\"), self.runSite)\n\n\t\tQObject.connect(self.ui.btnReptable, SIGNAL(\"clicked()\"), self.runInr)\n\t\tQObject.connect(self.ui.btnReptable_2, SIGNAL(\"clicked()\"), self.runInr)\n\t\tQObject.connect(self.ui.btnReptable_3, SIGNAL(\"clicked()\"), self.runInr)\n\t\n\t\tQObject.connect(self.ui.btnMedtable, SIGNAL(\"clicked()\"), self.runImageViewer)\n\t\tQObject.connect(self.ui.btnExptable, SIGNAL(\"clicked()\"), self.runImages_directory_export)\n\t\t\n\t\tQObject.connect(self.ui.btnPDFmen, SIGNAL(\"clicked()\"), self.runPDFadministrator)\n\t\tQObject.connect(self.ui.btnUTtable, SIGNAL(\"clicked()\"), self.runUT)\n\n\tdef runSite(self):\n\t\tpluginGui = pyarchinit_Site(self.iface)\n\t\tpluginGui.show()\n\t\tself.pluginGui = pluginGui # save\n\t\t\n\tdef runPer(self):\n\t\tpluginGui = pyarchinit_Periodizzazione(self.iface)\n\t\tpluginGui.show()\n\t\tself.pluginGui = pluginGui # save\n\n\tdef runStruttura(self):\n\t\tpluginGui = pyarchinit_Struttura(self.iface)\n\t\tpluginGui.show()\n\t\tself.pluginGui = pluginGui # save\n\n\tdef runUS(self):\n\t\tpluginGui = pyarchinit_US(self.iface)\n\t\tpluginGui.show()\n\t\tself.pluginGui = pluginGui # save\n\n\tdef runInr(self):\n\t\tpluginGui = pyarchinit_Inventario_reperti(self.iface)\n\t\tpluginGui.show()\n\t\tself.pluginGui = pluginGui # save\n\n\tdef runGisTimeController(self):\n\t\tpluginGui = pyarchinit_Gis_Time_Controller(self.iface)\n\t\tpluginGui.show()\n\t\tself.pluginGui = pluginGui # save\n\n\tdef runUpd(self):\n\t\tpluginGui = pyarchinit_Upd_Values(self.iface)\n\t\tpluginGui.show()\n\t\tself.pluginGui = pluginGui # save\n\n\tdef runConf(self):\n\t\tpluginConfGui = pyArchInitDialog_Config()\n\t\tpluginConfGui.show()\n\t\tself.pluginGui = pluginConfGui # save\n\n\tdef runInfo(self):\n\t\tpluginInfoGui = pyArchInitDialog_Info()\n\t\tpluginInfoGui.show()\n\t\tself.pluginGui = pluginInfoGui # save\n\n\tdef runImageViewer(self):\n\t\tpluginImageView = Main()\n\t\tpluginImageView.show()\n\t\tself.pluginGui = pluginImageView # save\n\t\t\n\tdef runImages_directory_export(self):\n\t\tpluginImage_directory_export = pyarchinit_Images_directory_export()\n\t\tpluginImage_directory_export.show()\n\t\tself.pluginGui = pluginImage_directory_export # save\n\n\tdef runTafonomia(self):\n\t\tpluginTafonomia = pyarchinit_Tafonomia(self.iface)\n\t\tpluginTafonomia.show()\n\t\tself.pluginGui = pluginTafonomia # save\n\n\tdef runSchedaind(self):\n\t\tpluginIndividui = pyarchinit_Schedaind(self.iface)\n\t\tpluginIndividui.show()\n\t\tself.pluginGui = pluginIndividui # save\n\n\tdef runDetsesso(self):\n\t\tpluginSesso = pyarchinit_Detsesso(self.iface)\n\t\tpluginSesso.show()\n\t\tself.pluginGui = pluginSesso # save\n\n\tdef runDeteta(self):\n\t\tpluginEta = pyarchinit_Deteta(self.iface)\n\t\tpluginEta.show()\n\t\tself.pluginGui = pluginEta # save\n\n\tdef runArcheozoology(self):\n\t\tpluginArchezoology = pyarchinit_Archeozoology(self.iface)\n\t\tpluginArchezoology.show()\n\t\tself.pluginGui = pluginArchezoology # save\n\n\tdef runUT(self):\n\t\tpluginUT = pyarchinit_UT(self.iface)\n\t\tpluginUT.show()\n\t\tself.pluginGui = pluginUT # save\n\n\tdef runPDFadministrator(self):\n\t\tpluginPDFadmin = pyarchinit_pdf_export()\n\t\tpluginPDFadmin.show()\n\t\tself.pluginGui = pluginPDFadmin # save\n","sub_path":"pyarchinit/pyarchinitplugindialog.py","file_name":"pyarchinitplugindialog.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"171906204","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nimport logging\n\nfrom openerp import api, fields, models, _\nfrom openerp.exceptions import UserError, ValidationError\nfrom openerp.tools.safe_eval import safe_eval as eval\n\n_logger = logging.getLogger(__name__)\n\n\n\nclass AccountInvoiceLine(models.Model):\n _inherit = 'account.invoice.line'\n\n ''' New field Commision Bonus\n\n Commision is calculated from the product\n '''\n\n @api.one\n @api.depends('price_unit', 'product_id', 'price_subtotal')\n def _get_commision_bonus(self):\n if self.price_unit and self.product_id:\n if self.product_id.commision_type == 'percentage':\n self.commision_bonus = self.price_subtotal * self.product_id.commision_rate / 100\n elif self.product_id.commision_type == 'fixed':\n self.commision_bonus = self.product_id.commision_rate * self.quantity\n else:\n self.commision_bonus = 0\n\n commision_bonus = fields.Monetary(\"Commision\", compute=\"_get_commision_bonus\", store=True)","sub_path":"fal_sale_commision/models/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"606733352","text":"#!/usr/bin/env python3\n\"\"\"\nCreated on Tue Feb 4 13:33:27 2020\n\n@author: plusuncold\n\"\"\"\n\nfrom typing import Dict\nimport enchant\nimport string\n\n\ndef get_user_inputted_word():\n print(f'Input word for correction: (or \\'!\\' to cancel, or \\'@\\' to quit) ', end='')\n user_input = input()\n \n if user_input == '!':\n return (None, False)\n elif user_input == '@':\n print('Quitting program')\n quit()\n \n return (user_input, False)\n \n\ndef get_correction_for_word(word : str, count : int, count_capitalized : int, dictionary, index : int, total : int):\n if count == count_capitalized:\n word = string.capwords(word)\n print(f'Correct \\'{word}\\'? (all {count} occurances are capitalized, correction {index} of {total})')\n else:\n print(f'Correct \\'{word}\\'? ({count} occurances, {count_capitalized} are capitalized, correction {index} of {total})')\n \n print(f'0 - No', end='')\n suggestions = dictionary.suggest(word)\n for key_number, suggestion in enumerate(suggestions):\n if key_number > 9:\n break\n print(f', {key_number+1} - \\'{suggestion}\\'', end='')\n print('')\n user_input = input()\n if user_input == 'd':\n return (None, True)\n elif user_input == 'e':\n user_input, _ = get_user_inputted_word()\n if user_input:\n return (user_input, False)\n elif user_input == 'q':\n print('Quitting program')\n quit()\n try:\n key_number = int(user_input)\n if key_number == 0:\n return (None, False)\n elif key_number >= 1 and key_number <= 9:\n return (suggestions[key_number-1], False)\n except:\n print(f'Input {user_input} is not valid. Please input \\'d\\', \\'e\\' or a number between 0 and 9')\n return get_correction_for_word(word, count, dictionary, index, total)\n \n\n\ndef corrections_for_words(unique_words, dict_lang):\n count_unique_words = len(unique_words)\n d = enchant.Dict(dict_lang)\n misspelled_words = { k: (c, cc) for k, (c, cc) in unique_words.items() if not d.check(k)}\n misspelled_words = sorted(misspelled_words.items(),\n key=lambda kv: kv[1],\n reverse=True)\n count_misspelled_words = len(misspelled_words)\n print(f'Of {count_unique_words} unique words in the ePub, {count_misspelled_words} were not found in dictionary \\'{dict_lang}\\'')\n print('\\nCorrect any misspelled words')\n print('Enter:\\n\\ta number 1-9 to select a correction')\n print('\\t\\'0\\' to leave the word as is')\n print('\\t\\'e\\' to enter the correct word')\n print('\\t\\'d\\' to finish correcting words or')\n print('\\t\\'q\\' to quit\\n')\n \n print('Unique misspelled words')\n for word, (_, _) in misspelled_words:\n print(word, end=' ')\n print('')\n \n corrections = {}\n index = 1\n for word, (count, count_capitalized) in misspelled_words:\n correction, finished = get_correction_for_word(word, count, count_capitalized, d, index, count_misspelled_words)\n if finished:\n break\n if correction:\n corrections[word] = correction\n print('')\n index += 1\n return corrections","sub_path":"correct_spellings.py","file_name":"correct_spellings.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"220274747","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# noinspection PyUnresolvedReferences\nimport vtkmodules.vtkInteractionStyle\n# noinspection PyUnresolvedReferences\nimport vtkmodules.vtkRenderingOpenGL2\nfrom vtkmodules.vtkCommonColor import vtkNamedColors\nfrom vtkmodules.vtkCommonCore import (\n vtkMinimalStandardRandomSequence,\n vtkPoints\n)\nfrom vtkmodules.vtkCommonDataModel import (\n vtkQuadraticTetra,\n vtkUnstructuredGrid\n)\nfrom vtkmodules.vtkFiltersCore import vtkGlyph3D\nfrom vtkmodules.vtkFiltersGeneral import vtkTessellatorFilter\nfrom vtkmodules.vtkFiltersSources import vtkSphereSource\nfrom vtkmodules.vtkRenderingCore import (\n vtkActor,\n vtkDataSetMapper,\n vtkRenderWindow,\n vtkRenderWindowInteractor,\n vtkRenderer\n)\n\n\ndef main():\n namedColors = vtkNamedColors()\n\n uGrid = MakeQuadraticTetra()\n\n tessellate = vtkTessellatorFilter()\n tessellate.SetInputData(uGrid)\n\n mapper = vtkDataSetMapper()\n mapper.SetInputConnection(tessellate.GetOutputPort())\n mapper.ScalarVisibilityOff()\n\n # Create an actor for the grid\n actor = vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetDiffuseColor(\n namedColors.GetColor3d('Tomato'))\n actor.GetProperty().SetEdgeColor(\n namedColors.GetColor3d('IvoryBlack'))\n actor.GetProperty().EdgeVisibilityOn()\n\n sphereSource = vtkSphereSource()\n sphereSource.SetRadius(0.02)\n\n glyph3D = vtkGlyph3D()\n glyph3D.SetInputData(uGrid)\n glyph3D.SetSourceConnection(sphereSource.GetOutputPort())\n glyph3D.ScalingOff()\n glyph3D.Update()\n\n glyph3DMapper = vtkDataSetMapper()\n glyph3DMapper.SetInputConnection(glyph3D.GetOutputPort())\n glyph3DMapper.ScalarVisibilityOff()\n\n glyph3DActor = vtkActor()\n glyph3DActor.SetMapper(glyph3DMapper)\n glyph3DActor.GetProperty().SetColor(\n namedColors.GetColor3d('Banana'))\n\n # Visualize\n renderer = vtkRenderer()\n renderWindow = vtkRenderWindow()\n renderWindow.SetWindowName('QuadraticTetra')\n renderWindow.AddRenderer(renderer)\n interactor = vtkRenderWindowInteractor()\n interactor.SetRenderWindow(renderWindow)\n\n renderer.AddActor(actor)\n renderer.AddActor(glyph3DActor)\n renderer.SetBackground(namedColors.GetColor3d('SlateGray'))\n\n renderWindow.Render()\n interactor.Start()\n\n\ndef MakeQuadraticTetra():\n aTetra = vtkQuadraticTetra()\n points = vtkPoints()\n\n pcoords = aTetra.GetParametricCoords()\n rng = vtkMinimalStandardRandomSequence()\n points.SetNumberOfPoints(aTetra.GetNumberOfPoints())\n rng.SetSeed(5070) # for testing\n for i in range(0, aTetra.GetNumberOfPoints()):\n perturbation = [0.0] * 3\n for j in range(0, 3):\n rng.Next()\n perturbation[j] = rng.GetRangeValue(-0.1, 0.1)\n aTetra.GetPointIds().SetId(i, i)\n points.SetPoint(i, pcoords[3 * i] + perturbation[0],\n pcoords[3 * i + 1] + perturbation[1],\n pcoords[3 * i + 2] + perturbation[2])\n\n # Add the points and tetra to an unstructured grid\n uGrid = vtkUnstructuredGrid()\n uGrid.SetPoints(points)\n uGrid.InsertNextCell(aTetra.GetCellType(), aTetra.GetPointIds())\n\n return uGrid\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/Python/GeometricObjects/QuadraticTetra.py","file_name":"QuadraticTetra.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"47590212","text":"x = 10\ny = 5\n\narreglo_string = \"Hola Mundo\"\n#print(arreglo_string[0:11])\ntexto1 = 'BIENVENIDO'\ntexto2 = 'tupu'\ntexto3 = 'JAJAJAJA'\n\nfinal = '{} a {} => {}'.format(texto1 , texto2 , texto3)\nfinal = final.lower()\n\npos = final.find('tupu')\nprint(pos)\n#print('conlssole.log(\"hola\")')\n#print(final)\n\n#\"\"\" jeje \"\"\"\ndef hola():\n print('funciom llamada');\n","sub_path":"scripts/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"221399118","text":"from typing import List\n\n\nclass Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n if len(intervals) == 0:\n return []\n\n intervals.sort(key=lambda x: x[0])\n start = intervals[0][0]\n end = intervals[0][1]\n result = []\n for interval in intervals[1:]:\n # 当前区间存在重叠,当前区间起始位置小于上一区间的结束位置\n if interval[0] <= end:\n end = max(end, interval[1])\n # 当前不存在重叠\n else:\n result.append([start, end]) # 加入解集\n start = interval[0] # 更新start end\n end = interval[1]\n\n result.append([start, end])\n\n return result\n\n\nif __name__ == '__main__':\n intervals = [[1, 3], [2, 6], [8, 10], [15, 18]]\n print(Solution().merge(intervals))\n","sub_path":"51_100/56_merge_intervals.py","file_name":"56_merge_intervals.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"635699380","text":"\"\"\"\n @Author:DarknessFor9\n @DateTime:7/8/19 5:46 PM\n\"\"\"\nfrom re import match\n\"\"\"\n1-11.匹配所有能够表示有效电子邮件地址的集合(从一个宽松的正则表达式开始,然后尝试使它尽可能严谨,不过要保持正确的的功能)\n\"\"\"\npattern = r'\\w+@\\w+\\.com'\n\ncontent = {\n '2668210489@qq.com',\n '17386225884@163.com',\n '1134609220',\n '2668210489'\n}\n\nfor i in content:\n result = match(pattern, i)\n if result is not None:\n print(result.group())\n","sub_path":"code/ReModule/practice/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"390287148","text":"from pydub import AudioSegment\nimport os\nimport numpy as np\nfrom tqdm import tqdm\nfrom joblib import Parallel, delayed\nimport scipy.io.wavfile as wav\nfrom python_speech_features import logfbank\n\nimport argparse\nimport csv\nfrom os import listdir\nfrom os.path import isfile, join\n\n\ndev_path = '/home/neda.ahmadi/GermanSpeechRecognition/dutchdataset/dev.tsv'\ntrain_path = '/home/neda.ahmadi/GermanSpeechRecognition/dutchdataset/train.tsv'\ntest_path = '/home/neda.ahmadi/GermanSpeechRecognition/dutchdataset/test.tsv'\nroot = '/home/neda.ahmadi/GermanSpeechRecognition/clips_flac/'\n\n#dev_path = '/data/s3559734/DutchDS/dev.tsv'\n#train_path = '/data/s3559734/DutchDS/train.tsv'\n#test_path = '/data/s3559734/DutchDS/test.tsv'\n#root = '/data/s3559734/DutchSpeechRecognition/clips_flac/'\n\nn_jobs = -2\nn_filters = 40\nwin_size = 0.025/3\nnorm_x = False\n\ndef traverse(root,path,search_fix='.wav',return_label=False):\n files = sorted(os.listdir(root))\n numfiles = len(files)\n if path == \"train\":\n set = files[:int(0.7*numfiles)]\n elif path == \"dev\":\n set = files[int(0.7*numfiles):int(0.9 *numfiles)]\n else:\n set = files[int(0.9*numfiles):]\n f_list = []\n #with open('/data/s3559734/DutchDS/validated.tsv') as txt_file:\n with open('/home/neda.ahmadi/GermanSpeechRecognition/dutchdataset/validated.tsv') as txt_file:\n reader = csv.reader(txt_file, delimiter='\\t')\n # print('reader',reader)\n for line in reader:\n if (line[1][:-4] + \".wav\") in set:\n if return_label:\n f_list.append(line[2])\n else:\n f_list.append(root + line[1][:-4]+\".wav\")\n return f_list\n\ndef wav2logfbank(f_path):\n (rate,sig) = wav.read(f_path)\n fbank_feat = logfbank(sig,rate,winlen=win_size,nfilt=n_filters)\n np.save(f_path[:-3]+'fb'+str(n_filters),fbank_feat)\n\n\ndef norm(f_path,mean,std):\n np.save(f_path,(np.load(f_path)-mean)/std)\n\n\nprint('----------Processing Datasets----------')\nprint('Training sets :',train_path)\nprint('Validation sets :',dev_path)\nprint('Testing sets :',test_path)\n\ntr_file_list = traverse(root,\"train\")\ndev_file_list = traverse(root,\"dev\")\ntt_file_list = traverse(root,\"test\")\n\n\n# # wav 2 log-mel fbank\nprint('---------------------------------------')\nprint('Processing wav2logfbank...',flush=True)\n\nprint('Training',flush=True)\nresults = Parallel(n_jobs=n_jobs,backend=\"threading\")(delayed(wav2logfbank)(i[:-4]+'.wav') for i in tqdm(tr_file_list))\n\nprint('Validation',flush=True)\nresults = Parallel(n_jobs=n_jobs,backend=\"threading\")(delayed(wav2logfbank)(i[:-4]+'.wav') for i in tqdm(dev_file_list))\n\nprint('Testing',flush=True)\nresults = Parallel(n_jobs=n_jobs,backend=\"threading\")(delayed(wav2logfbank)(i[:-4]+'.wav') for i in tqdm(tt_file_list))\n \n# # log-mel fbank 2 feature\nprint('---------------------------------------') \nprint('Preparing Training Dataset...',flush=True)\n\ntr_file_list = traverse(root,\"train\",search_fix='.fb'+str(n_filters))\ntr_text = traverse(root,\"train\",return_label=True)\n\nX = []\nfor f in tr_file_list:\n # print(np.load(f[:-3]+\"fb40.npy\"))\n # X.append(np.load(f))\n X.append(np.load(f[:-3] +\"fb40.npy\")) \n\n# Normalize X\nif norm_x:\n mean_x = np.mean(np.concatenate(X,axis=0),axis=0)\n std_x = np.std(np.concatenate(X,axis=0),axis=0)\n\n results = Parallel(n_jobs=n_jobs,backend=\"threading\")(delayed(norm)(i,mean_x,std_x) for i in tqdm(tr_file_list))\n\n# Sort data by signal length (long to short)\naudio_len = [len(x) for x in X]\n\ntr_file_list = [tr_file_list[idx] for idx in reversed(np.argsort(audio_len))]\ntr_text = [tr_text[idx] for idx in reversed(np.argsort(audio_len))]\n\n# Create char mapping\nchar_map = {}\nchar_map[''] = 0\nchar_map[''] = 1\nchar_idx = 2\n\n# map char to index\nfor text in tr_text:\n for char in text:\n if char not in char_map:\n char_map[char] = char_idx\n char_idx +=1\n\n# Reverse mapping\nrev_char_map = {v:k for k,v in char_map.items()}\n\n# Save mapping\nwith open(root+'idx2chap.csv','w') as f:\n f.write('idx,char\\n')\n for i in range(len(rev_char_map)):\n f.write(str(i)+','+rev_char_map[i]+'\\n')\n\n# text to index sequence\ntmp_list = []\nfor text in tr_text:\n tmp = []\n for char in text:\n tmp.append(char_map[char])\n tmp_list.append(tmp)\ntr_text = tmp_list\ndel tmp_list\n\n# write dataset\nfile_name = 'train.csv'\n\nprint('Writing dataset to '+root+file_name+'...',flush=True)\n\nwith open(root+file_name,'w') as f:\n f.write('idx,input,label\\n')\n for i in range(len(tr_file_list)):\n f.write(str(i)+',')\n f.write(tr_file_list[i]+',')\n for char in tr_text[i]:\n f.write(' '+str(char))\n f.write('\\n')\n\nprint()\nprint('Preparing Validation Dataset...',flush=True)\n\ndev_file_list = traverse(root,\"dev\",search_fix='.fb'+str(n_filters))\ndev_text = traverse(root,\"dev\",return_label=True)\n\nX = []\nfor f in dev_file_list:\n X.append(np.load(f[:-3] +\"fb40.npy\"))\n# Normalize X\nif norm_x:\n results = Parallel(n_jobs=n_jobs,backend=\"threading\")(delayed(norm)(i,mean_x,std_x) for i in tqdm(dev_file_list))\n# Sort data by signal length (long to short)\naudio_len = [len(x) for x in X]\n\ndev_file_list = [dev_file_list[idx] for idx in reversed(np.argsort(audio_len))]\ndev_text = [dev_text[idx] for idx in reversed(np.argsort(audio_len))]\n\n# text to index sequence\ntmp_list = []\nfor text in dev_text:\n tmp = []\n for char in text:\n tmp.append(char_map[char])\n tmp_list.append(tmp)\ndev_text = tmp_list\ndel tmp_list\n\n# write dataset\nfile_name = 'dev.csv'\n\nprint('Writing dataset to '+root+file_name+'...',flush=True)\nwith open(root+file_name,'w') as f:\n f.write('idx,input,label\\n')\n for i in range(len(dev_file_list)):\n f.write(str(i)+',')\n f.write(dev_file_list[i]+',')\n for char in dev_text[i]:\n f.write(' '+str(char))\n f.write('\\n')\n\nprint()\nprint('Preparing Testing Dataset...',flush=True)\n\ntest_file_list = traverse(root,\"test\",search_fix='.fb'+str(n_filters))\ntt_text = traverse(root,\"test\",return_label=True)\n\nX = []\nfor f in test_file_list:\n X.append(np.load(f[:-3] +\"fb40.npy\"))\n\n# Normalize X\nif norm_x:\n results = Parallel(n_jobs=n_jobs,backend=\"threading\")(delayed(norm)(i,mean_x,std_x) for i in tqdm(test_file_list))\n\n# Sort data by signal length (long to short)\naudio_len = [len(x) for x in X]\n\ntest_file_list = [test_file_list[idx] for idx in reversed(np.argsort(audio_len))]\ntt_text = [tt_text[idx] for idx in reversed(np.argsort(audio_len))]\n\n# text to index sequence\ntmp_list = []\nfor text in tt_text:\n tmp = []\n for char in text:\n try:\n tmp.append(char_map[char])\n except:\n print(char)\n tmp_list.append(tmp)\ntt_text = tmp_list\ndel tmp_list\n\n# write dataset\nfile_name = 'test.csv'\n\nprint('Writing dataset to '+root+file_name+'...',flush=True)\n\nwith open(root+file_name,'w') as f:\n f.write('idx,input,label\\n')\n for i in range(len(test_file_list)):\n f.write(str(i)+',')\n f.write(test_file_list[i]+',')\n for char in tt_text[i]:\n f.write(' '+str(char))\n f.write('\\n')\n","sub_path":"util/dutch_preprocess.py","file_name":"dutch_preprocess.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"294953822","text":"import os\nimport time\nimport tempfile\n\n# Print current dir\ncurDIr = os.getcwd()\nprint(curDIr)\n\n# Create a directory\n\nos.mkdir('Directory_Test') # check in windows explorer\n\n# After 5 sec rename the directory\ntime.sleep(5)\nos.rename('Directory_Test', 'New_Directory_Test')\n\ntime.sleep(5)\nos.rmdir('New_Directory_Test')\n\n\n\n# Temporary file\n\nprint(\"Current temp directory: \", tempfile.gettempdir())\nfp = tempfile.TemporaryFile()\nfp.write(b'Hello World!')\nfp.seek(0)\nprint(fp.read())\nfp.close()\n","sub_path":"file_dir.py","file_name":"file_dir.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"88655638","text":"#coding:utf8\nimport pymysql\n\nglobal conn,cur,a,b,c,d\nconn=pymysql.connect(host='127.0.0.1', port=3306, user='alca', passwd='111111', db='autoline', charset='utf8')\ncur=conn.cursor(pymysql.cursors.DictCursor)\n\nclass sql():\n def invest_id(ziduan,phone):\n # sql=\"SELECT * FROM fiz_red_packet WHERE fk_user_id='\"+str(fk_user_id)+\"' AND fiz_red_packet.dc_category='10' AND dt_issue_time like '2017-07%'GROUP BY dt_create_time DESC ;\"\n sql=\"select b.vc_name as '投资人',b.vc_cellphone as '电话',b.pk_id,a.nb_freeze_amount,a.nb_total_interest,a.nb_amount,a.nb_loan_amount,a.dt_create_time from fiz_dmd_balance a,fiz_user b where a.fk_user_id=b.pk_id and b.pk_id=(select pk_id from fiz_user r where vc_cellphone='\"+str(phone)+\"') order by dt_create_time DESC \"\n cur.execute(sql)\n result=cur.fetchall()\n # print ((len(result)))\n list=[]\n r=[result[x][''+str(ziduan)+''] for x in range(len(result)) if result[x][''+str(ziduan)+'']]\n for a in range(0,len(r)):\n # print (r[a])\n list.append(r[a])\n list1 = 0\n for x in range(0, len(list)):\n # print(list[x])\n list1 += list[x]\n print(list1)\n return list1\n # print (result[0]['fk_invest_id'])\n\n def dongjie(ziduan,sqll):\n # sql = \"select * from fiz_dmd_redeem_freeze_record i where i.dc_status in ('00','20')\"\n sql=sqll\n cur.execute(sql)\n result = cur.fetchall()\n # print((len(result)))\n list = []\n r = [result[x][''+str(ziduan)+''] for x in range(len(result)) if result[x][''+str(ziduan)+'']]\n for a in range(0, len(r)):\n # print (r[a])\n list.append(r[a])\n # print (list)\n list1=0\n for x in range(0,len(list)):\n # print (list[x])\n list1 +=list[x]\n print (list1)\n return list1\n\n\n def redpacket(investid):\n # global a,b,c,d\n sql='SELECT l.nb_amount,s.nb_amount,l.vc_remark,r.* FROM fiz_red_packet l,fiz_plan r,fiz_plan_invest s WHERE l.fk_plan_invest_id=s.pk_id and s.fk_plan_id=r.pk_id and l.fk_plan_invest_id=\"'+str(investid)+'\";'\n cur.execute(sql)\n result=cur.fetchall()\n # print (result[0])\n # print (\"收益:%s\"\n # \"投资金额:%s\"\n # \"投资期限:%s\"\n # \"投资类型:%s\"%(result[0]['nb_amount'],result[0]['s.nb_amount'],result[0]['nb_period'],result[0]['dc_period_type']))\n a=result[0]['nb_amount']\n b=result[0]['s.nb_amount']\n c=result[0]['nb_period']\n d=result[0]['dc_period_type']\n e=result[0]['vc_remark']\n return a,b,c,d,e\n # print (\"收益:%s 投资金额:%s 投资期限:%s 投资类型:%s\"%(int(result['nb_amount']),int(result['s.nb_amopunt']),int(result['dc_period']),int(result['dc_period_type'])))\n\n def shouyi(a,b,c,d,e):\n # print (a)\n # print (b)\n # print (c)\n # print (d)\n # print (e)\n yue=float(b)*float(c)/365*0.005\n ji=float(b)*float(c)/365*0.01\n bannian=float(b)*float(c)/365*0.015\n nian=float(b)*float(c)/365*0.02\n Yue=float(b)*float(c)/12*0.005\n Ji=float(b)*float(c)/12*0.01\n Bannian=float(b)*float(c)/12*0.015\n Nian=float(b)*float(c)/12*0.02\n if d==\"00\":\n if '首投' in e:\n print(a, b, c, d, e)\n print(\"首投\",10)\n print('----------------------------------')\n elif 0<=c<28:\n print(a, b, c, d, e)\n print (\"新手标\",yue)\n print ('----------------------------------')\n elif 28<=c<89:\n print(a, b, c, d, e)\n print (\"月标\",yue)\n print ('----------------------------------')\n # print (yue)\n elif 89<=c<180:\n print(a, b, c, d, e)\n print (\"季度标\",ji)\n print ('----------------------------------')\n elif 180<=c<360:\n print(a, b, c, d, e)\n print (\"半年标\",bannian)\n print ('----------------------------------')\n elif 360<=c:\n print(a, b, c, d, e)\n print (\"年标\",nian)\n print ('----------------------------------')\n else:\n if '首投' in e:\n print(a, b, c, d, e)\n print(\"首投\",10)\n print('----------------------------------')\n elif 1<=c<3:\n print(a, b, c, d, e)\n print (\"月标\",Yue)\n print ('----------------------------------')\n elif 3<=c<6:\n print(a, b, c, d, e)\n print (\"季度标\",Ji)\n print ('----------------------------------')\n elif 6<=c<12:\n print(a, b, c, d, e)\n print (\"半年标\",Bannian)\n print ('----------------------------------')\n elif 12<=c:\n print(a, b, c, d, e)\n print (\"年标\",Nian)\n print ('----------------------------------')\n\n\n def daishoubenxi(self):\n # sql = \"SELECT * FROM fiz_plan_invest WHERE fk_user_id='6e706599-5abd-4298-842a-cd766122b998'GROUP BY dt_create_time DESC;\"\n sql = \"SELECT * FROM fiz_plan_invest WHERE fk_user_id='a4eb8023-2b72-42ec-a4a1-a5ab320e6c2b'GROUP BY dt_create_time DESC;\"\n cur.execute(sql)\n result = cur.fetchall()\n print (len(result))\n num = len(result)\n c = 0\n d = 0\n for a in range(0,num):\n nb_amount=result[a]['nb_amount']\n nb_profit=result[a]['nb_profit']\n print (nb_amount)\n print (nb_profit)\n c +=nb_amount\n d +=nb_profit\n print (c,d,c+d)\n\n\n def qianyueyonghu(self):\n '''查找签约用户进行登录参数化'''\n sql = \"SELECT r.fk_rcmd_id,rcmded_user_id,l.vc_name,l.vc_account, l.vc_cellphone,r.first_date,r.dt_reward_end_date,r.reg_date from rpt_rcmd_rcmdedor r,fiz_user l WHERE r.rcmded_user_id=l.pk_id AND r.fk_rcmd_id=(select pk_id from fiz_user r where vc_cellphone='13761917640') and r.first_date is not null order BY r.dt_create_time DESC \"\n cur.execute(sql)\n result = cur.fetchall()\n print ((len(result)))\n list = []\n r = [result[x]['vc_cellphone'] for x in range(len(result)) if result[x]['vc_cellphone']]\n for a in range(0, len(r)):\n # print (r[a])\n list.append(r[a])\n list1 = 0\n return list\n # for x in range(0, len(list)):\n # print(list[x])\n # list1 += list[x]\n\n\n\nif __name__ == '__main__':\n a = sql.dongjie('nb_amount',\"select b.vc_name as '投资人',b.vc_cellphone as '电话',b.pk_id,a.nb_freeze_amount,a.nb_total_interest,a.nb_amount,a.nb_loan_amount,a.dt_create_time from fiz_dmd_balance a,fiz_user b where a.fk_user_id=b.pk_id order by dt_create_time DESC \")\n b=sql.dongjie('nb_loan_amount',\"select b.vc_name as '投资人',b.vc_cellphone as '电话',b.pk_id,a.nb_freeze_amount,a.nb_total_interest,a.nb_amount,a.nb_loan_amount,a.dt_create_time from fiz_dmd_balance a,fiz_user b where a.fk_user_id=b.pk_id order by dt_create_time DESC \")\n c=sql.dongjie('nb_freeze_amount',\"select b.vc_name as '投资人',b.vc_cellphone as '电话',b.pk_id,a.nb_freeze_amount,a.nb_total_interest,a.nb_amount,a.nb_loan_amount,a.dt_create_time from fiz_dmd_balance a,fiz_user b where a.fk_user_id=b.pk_id order by dt_create_time DESC \")\n d=sql.dongjie('nb_freeze_cash_amount',\"select * from fiz_dmd_redeem_freeze_record i where i.dc_status in ('00','20');\")\n e=sql.dongjie ('nb_freeeze_loan_amount',\"select * from fiz_dmd_redeem_freeze_record i where i.dc_status in ('00','20');\")\n f = sql.dongjie('nb_freeze_cash_amount',\n \"select * from fiz_dmd_redeem_freeze_record i where i.dc_status in ('90');\")\n print (a+b)\n print (\"活期理财总额:%s\"%(a+b-c+d+e+f))\n\n phoneNO='13761917640'\n A=sql.invest_id('nb_amount',phoneNO)\n B=sql.invest_id('nb_loan_amount',phoneNO)\n C=sql.invest_id('nb_freeze_amount',phoneNO)\n D=sql.dongjie('nb_freeze_cash_amount',\"select * from fiz_dmd_redeem_freeze_record i where i.dc_status in ('00','20') and i.fk_user_id=(select pk_id from fiz_user r where vc_cellphone='\"+str(phoneNO)+\"');\")\n E=sql.dongjie('nb_freeeze_loan_amount',\"select * from fiz_dmd_redeem_freeze_record i where i.dc_status in ('00','20') and i.fk_user_id=(select pk_id from fiz_user r where vc_cellphone='\"+str(phoneNO)+\"');\")\n F=sql.dongjie('nb_freeze_cash_amount',\"select * from fiz_dmd_redeem_freeze_record i where i.dc_status in ('90') and i.fk_user_id=(select pk_id from fiz_user r where vc_cellphone='\"+str(phoneNO)+\"');\")\n print (\"个人活期理财总额:%s\"%(A+B-C+D+E+F))\n","sub_path":"interface/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":8919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"286788620","text":"def is_isogram(string):\n x = 0\n y = 1\n string = string.lower()\n while x < len(string):\n while y < (len(string)):\n if string[x] == string[y]:\n return False\n y += 1\n x += 1\n y = x + 1\n return True\n\nprint(is_isogram(\"hgwlrw\"))","sub_path":"codewars/isograms.py","file_name":"isograms.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"5460689","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 20 23:06:38 2020\r\n\r\n@author: abdelhamid\r\n\"\"\"\r\nimport torch\r\nfrom torch.autograd import Function\r\n\r\nclass loss_function(Function):\r\n def __init__(self, Con):\r\n self.Con = Con\r\n \r\n def forward(self, output, labels, mean_dir,device):\r\n \r\n mat1 = torch.exp(torch.mm(output,mean_dir)*self.Con)\r\n mat2 = torch.zeros(mat1.shape[0]).to(device)\r\n for i in range(mat1.shape[0]):\r\n mat2[i] = mat1[i,labels.type(torch.long)[i]]\r\n \r\n mat3 = -torch.log(mat2/torch.sum(mat1,1))\r\n \r\n loss = torch.sum(mat3)\r\n \r\n return loss\r\n ","sub_path":"VMF/loss_function.py","file_name":"loss_function.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"219326665","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 31 11:32:15 2016\n\n@author: nathan\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nwith open(\"mylog.log\") as f:\n data = f.readlines()\n data = data[2:]\n\nx = [row.split()[0] for row in data]\ny1 = [row.split()[1] for row in data]\ny2 =[row.split()[2] for row in data]\ny3 = [float(row.split()[3])*(2/90030.) for row in data]\ny4 =[row.split()[4] for row in data]\ny5 =[row.split()[5] for row in data]\n\nfig1 = plt.figure()\n#fig2 = plt.figure()\n#fig3 = plt.figure()\n#fig4 = plt.figure()\n#fig5 = plt.figure()\n\nax1 = fig1.add_subplot(111)\n#ax2 = fig2.add_subplot(111)\n#ax3 = fig3.add_subplot(111)\n#ax4 = fig4.add_subplot(111)\n#ax5 = fig5.add_subplot(111)\n\nvar1='Temp and KE'\n#var2='Potential Energy'\n#var3='Kinetic Energy'\n#var4='Volume'\n#var5='Pressure'\n\nax1.set_title(var1) \nax1.set_xlabel('Timestep')\nax1.set_ylabel(var1)\n#ax2.set_title(var2) \n#ax2.set_xlabel('Timestep')\n#ax2.set_ylabel(var2)\n#ax3.set_title(var3) \n#ax3.set_xlabel('Timestep')\n#ax3.set_ylabel(var3)\n\"\"\"ax4.set_title(var4) \nax4.set_xlabel('Timestep')\nax4.set_ylabel(var4)\nax5.set_title(var5) \nax5.set_xlabel('Timestep')\nax5.set_ylabel(var5)\n\"\"\"\nax1.plot(x,y1, c='r', label='T')\n#ax2.plot(x,y2, c='blue', label='PE')\nax1.plot(x,y3, c='g', label='KE')\n#ax4.plot(x,y4, c='b', label='V')\n#ax5.plot(x,y5, c='orange', label='P')\n\nleg1 = ax1.legend()\n#leg2 = ax2.legend()\n#leg3 = ax3.legend()\n#leg4 = ax4.legend()\n#leg5 = ax5.legend()\n\nplt.show()\n","sub_path":"analysis/graphlog.py","file_name":"graphlog.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"370837424","text":"\"\"\"\nAPI:\n1. Create short URL: HTTP GET ?targetUrl=\n2. Read short URL: HTTP GET /\n\nAPI Gateway, Lambda, DynamoDB (+ monitoring and traffic generator)\n\"\"\"\nfrom aws_cdk.core import Construct, Duration, Stack, CfnOutput, Fn\nfrom aws_cdk import aws_apigateway, aws_dynamodb, aws_ec2, aws_lambda\nfrom cdk_watchful import Watchful\nfrom os import environ\n\nfrom .traffico import Traffico\n\n\nVPC_ID = environ.get(\"VPC_ID\")\nALERT_EMAIL = environ.get(\"ALERT_EMAIL\")\n\n\n# Main application stack\nclass UrlShortenerStack(Stack):\n def __init__(self, scope: Construct, id: str, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n\n # The code that defines your stack goes here\n table = aws_dynamodb.Table(\n self, \"ShortCodeMappingTable\",\n partition_key=aws_dynamodb.Attribute(name=\"id\", type=aws_dynamodb.AttributeType.STRING))\n \n function = aws_lambda.Function(\n self, \"UrlShortenerFunction\",\n code=aws_lambda.Code.asset(\"./lambda\"),\n handler=\"handler.main\",\n timeout=Duration.minutes(5),\n runtime=aws_lambda.Runtime.PYTHON_3_7)\n \n table.grant_read_write_data(function)\n \n function.add_environment(\"TABLE_NAME\", table.table_name)\n \n api = aws_apigateway.LambdaRestApi(self, \"UrlShortenerApi\", handler=function)\n \n wf = Watchful(self, \"watchful\", alarm_email=ALERT_EMAIL)\n wf.watch_scope(self)\n \n CfnOutput(self, \"UrlShortenerApiUrl\", value=api.url, export_name=\"UrlShortenerApiUrl\")\n\n\n# Separate stack that includes the traffic generator\nclass TrafficStack(Stack):\n def __init__(self, scope: Construct, id: str, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n\n Traffico(\n self, \"TrafficGenerator\",\n vpc=aws_ec2.Vpc.from_lookup(self, \"vpc\", vpc_id=VPC_ID),\n url=Fn.import_value(\"UrlShortenerApiUrl\"),\n tps=10)\n","sub_path":"CDK/url-shortener/url_shortener/url_shortener_stack.py","file_name":"url_shortener_stack.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"554622248","text":"#! /usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n#plot histogram\ndef hist(in_col):\n plt.clf()\n col_name=in_col.dtype.names[0]\n plt.hist(in_col[col_name])\n plt.xlabel(col_name)\n plt.ylabel(\"Count\")\n #plt.show()\n plt.savefig(\"./\"+col_name+'.pdf')\n\n\n#ploy 2 panel histogram \ndef hist2pan(in_col_1, in_col_2):\n plt.clf()\n col_name_1 = in_col_1.dtype.names[0]\n col_name_2 = in_col_2.dtype.names[0]\n fig, axes = plt.subplots(2, 1)#2 rows\n plt.subplots_adjust(hspace=0.25)\n axes[0].hist(in_col_1[col_name_1])\n axes[0].set_xlabel(col_name_1)\n axes[0].set_ylabel(\"Count\")\n axes[1].hist(in_col_2[col_name_2])\n axes[1].set_xlabel(col_name_2)\n axes[1].set_ylabel(\"Count\")\n #plt.show()\n plt.savefig('./2panel_'+col_name_1+'_and_'+col_name_2+'.pdf')\n\n\n#ploy 2 sample histogram \ndef hist2sam(in_col_1, in_col_2):\n plt.clf()\n col_name_1 = in_col_1.dtype.names[0]\n col_name_2= in_col_2.dtype.names[0]\n p1=plt.hist(in_col_1[col_name_1])\n p2=plt.hist(in_col_2[col_name_2])\n plt.xlabel(col_name_1)\n plt.ylabel(\"Count\")\n plt.legend([\"subsample 1\", \"subsample 2\"])\n #plt.show()\n plt.savefig('./2_sample_'+col_name_1+'_and_'+col_name_2+'.pdf')\n\n\n\"\"\"\nd1=np.array(np.random.normal(10, 5, 100))\nd2=np.array(np.random.normal(10, 5, 70))\nd1.reshape(100, 1) \nd2.reshape(70, 1)\nstruct_d1=np.array(d1, dtype=[(\"id_1\", int)])\nstruct_d2=np.array(d2, dtype=[(\"id_2\", int)])\nhist2sam(struct_d1, struct_d2)\n\"\"\"\n\n","sub_path":"segue_plot/hist.py","file_name":"hist.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"629790135","text":"from django.shortcuts import render,redirect\nfrom .forms import ImageUploadForm\nfrom django.contrib import messages\nfrom PIL import Image\nimport copy\nfrom .models import Task\nfrom accounts.forms import Loginform\nimport os\nimport imghdr\nfrom django.core.mail import EmailMessage\nfrom commonauth.settings import EMAIL_FROM_ADDRESS, BASE_DIR\nfrom django.contrib import sessions\ndef doaction(*args,**kwargs):\n \n pass\n\n\ndef home(request):\n\n if request.method == 'POST':\n image = request.FILES['image']\n desired_size = int(request.POST.get(\"requiredSize\", 100_000))\n size = image.size\n if request.user.is_authenticated:\n print(request.user,\"is authneticted\")\n task = Task(image=image, requiredSize=desired_size, user=request.user)\n task.save()\n ori_size = copy.deepcopy(size)\n else:\n\n print(request.user,\"is not authneticted\")\n messages.error(request,\"redirect to login \")\n return redirect(\"/\")\n\n if size <= desired_size:\n messages.error(request, \"image size is already less than required\")\n imageuploadform = ImageUploadForm()\n context = {'imageuploadform': imageuploadform}\n else:\n i = Image.open(image)\n ori_pix = i.size\n newpath = os.path.join(BASE_DIR, f'media/tasks/reducedSize_{image.name}')\n quality = int(desired_size / size * 100)\n i.save(newpath, quality=quality)\n if not imghdr.what(newpath) == \"jpeg\" or imghdr.what(newpath) == \"png\" or imghdr.what(newpath) == \"jpg\" :\n messages.error(request, imghdr.what(newpath))\n while size > desired_size:\n i.save(newpath, quality=quality)\n print(desired_size, size, quality)\n size = os.path.getsize(newpath)\n quality -= 1\n if quality <= 0:\n messages.info(request, \"reduced to least possible size\")\n break\n messages.info(request, f\"Image Size Reduced to {size} Bytes from {ori_size} {ori_pix} \")\n \n try:\n \n task = Task.objects.filter(user=request.user).filter(image__icontains=image.name)[0]\n except Exception as e:\n \n messages.error(request,f\"error : {e} user :{request.user} \")\n\n\n\n context = {'task': task}\n context['user'] = request.user\n mail = EmailMessage(\n 'Subject here',\n f'Hi {request.user.username} \\n,PFA the File with Reduced size ',\n EMAIL_FROM_ADDRESS,\n [request.user.email]\n )\n mail.attach_file(newpath)\n try:\n mail.send()\n except Exception as error:\n messages.error(request,\n f\"Mail could not sent due to Error {error}some issue at server end Please try later\")\n else:\n messages.info(request, \"new file has been sent to your registered mail id\")\n else:\n imageuploadform = ImageUploadForm()\n loginform = Loginform()\n request.session['user'] = request.user.username\n context = {'imageuploadform': imageuploadform}\n if request.user == \"AnonymousUser\":\n dir(request.session)\n context['user'] = request.user\n context['loginform'] = loginform\n print(\"before render\")\n return render(request, \"pixelhome.html\", context=context)\n","sub_path":"pixel/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"449628469","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom locations.items import GeojsonPointItem\n\nclass FrysElectronisSpider(scrapy.Spider):\n name = 'frys-electronics'\n item_attributes = { 'brand': \"Fry's Electronics\" }\n allowed_domains = ['www.frys.com']\n start_urls = (\n 'https://www.frys.com/ac/storeinfo/storelocator',\n )\n\n def parse_store(self, response):\n address_lines = response.xpath('//div[@id=\"rightside\"]/div[@id=\"text3\"]/div[@id=\"address\"]//b/text()').extract()\n address = ', '.join([ a.strip() for a in address_lines ])\n phone = [ t for t in response.xpath('//div[@id=\"rightside\"]/div[@id=\"text3\"]/div[@id=\"address\"]//text()').extract() if 'Phone' in t ]\n coordinates = [ c for c in response.xpath('//div[@id=\"rightside\"]/div[@id=\"text3\"]/div[@id=\"maps\"]/text()').extract() if '°' in c ]\n\n properties = {\n 'addr_full': address,\n 'website': response.url,\n 'ref': response.url.split('/')[-1],\n }\n\n if len(phone) == 1:\n properties['phone'] = phone[0].replace('Phone', '').strip().replace('(', '').replace(') ', '-')\n \n # Try to parse the degree, minutes, seconds coordinate pair\n if coordinates and len(coordinates) == 1:\n # Add a comma to separate lat and lon\n if '\" -' in coordinates[0]:\n coordinates[0] = coordinates[0].replace('\" -', '\", -')\n latlon = coordinates[0].split(',')\n properties['lat'] = self.dms2dd(latlon[0])\n properties['lon'] = self.dms2dd(latlon[1])\n else:\n # Fall back to the ll URL param in the google maps URL\n mapsLink = response.xpath('//div[@id=\"rightside\"]/div[@id=\"text3\"]/div[@id=\"maps\"]/a/@href').extract_first()\n if 'll=' in mapsLink:\n latlon = mapsLink.split('ll=')[1].split('&')[0].split(',')\n properties['lat'] = float(latlon[0])\n properties['lon'] = float(latlon[1])\n\n yield GeojsonPointItem(**properties)\n \n def dms2dd(self, dms):\n sign = 1\n if '-' in dms or 'W' in dms:\n sign = -1\n degrees = [ d.strip() for d in dms.split('°') ]\n d = int(degrees[0].replace('+', '').replace('-', '').replace('N', '').replace('W', ''))\n minutes = degrees[1].split(\"'\")\n if '.' in minutes[0]:\n m = float(minutes[0]) / 60\n s = 0\n else:\n m = float(minutes[0]) / 60\n s = float(minutes[1].replace('\"', '').strip()) / 3600\n dd = (d + m + s) * sign\n return dd\n\n def parse(self, response):\n urls = response.xpath('//div[@id=\"main-stores\"]//table//a/@href').extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_store)\n","sub_path":"locations/spiders/frys_electronics.py","file_name":"frys_electronics.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"111125666","text":"import random\n\nword_bank = [\n \"kendricklamar\", \"landoncollins\", \"jalenramsey\", \"jcole\", \"aboogie\", \"ehs\", \"ajbouye\", \"boondocks\"\n ]\n\nguess_word = []\nhiddenWord = random.choice(word_bank)\nlength_word = len(hiddenWord)\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\nword_bank = []\n\n\ndef beginning():\n print(\"Hello Player\")\n\n while True:\n name = input(\"Enter Name\")\n\n if name == '':\n print(\"You can't put nothing.\")\n else:\n break\n\n\nbeginning()\n\n\ndef newfunc():\n print(\"Well, that's perfect moment to play some Hangman!\")\n\n while True:\n choice = input(\"Would You like to play?\").upper()\n\n if choice == \"YES\" or choice == \"Y\":\n break\n elif choice == \"NO\" or choice == \"N\":\n exit(\"That's a shame! Have a nice day\")\n else:\n print(\"Answer with only Yes or No\")\n continue\n\n\ndef change():\n for characters in hiddenWord:\n guess_word.append(\"-\")\n print(\"Ok, so the word You need to guess has\", length_word, \"characters\")\n\n\nprint(\"Be aware that You can enter only 1 letter from a-z\")\nprint(guess_word)\n\n\ndef guessing():\n guess_taken = 1\n while guess_taken < 10:\n guess = input(\"Pick a letter\").lower()\n if not guess in alphabet:\n print(\"Enter a letter from a-z alphabet\")\n elif guess in word_bank:\n print(\"You have already guessed that letter Try again!\")\n else:\n\n word_bank.append(guess)\n if guess in hiddenWord:\n print(\"You Got it!\")\n for x in range(0, length_word):\n if hiddenWord[x] == guess:\n guess_word[x] = guess\n print(guess_word)\n\n if not '-' in guess_word:\n print(\"You won now leave before we take that away!\")\n break\n else:\n print(\"Try Again!\")\n guess_taken += 1\n if guess_taken == 10:\n print(\" Sorry Man, You lost -_-! The hidden word was\", hiddenWord)\n\n\nchange()\nguessing()\n\nprint(\"Game Over!\")\n","sub_path":"notes/Caleb B. - Hangman.py","file_name":"Caleb B. - Hangman.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"272089193","text":"#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport math\nimport os.path as op\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description=\"Estimate Earth's size from two shadow readings at different latitudes.\"\n )\n parser.add_argument(\n 'results',\n nargs='?',\n help=\"Path to a JSON file containing reading results\",\n )\n return parser\n\ndef main():\n parser = get_parser()\n args = parser.parse_args()\n results_path = args.results\n if not results_path:\n print(\"No results file path specified. Reading simulation.json\")\n results_path = op.join(op.dirname(__file__), 'simulation.json')\n results = json.load(open(results_path, 'rt'))\n stick_length = results['stick_length_m']\n reading1 = results['reading1_m']\n reading2 = results['reading2_m']\n distance = results['distance_km']\n # tan(a) = opposite / adjacent\n angle1 = math.atan(reading1 / stick_length)\n angle2 = math.atan(reading2 / stick_length)\n diff = abs(angle2 - angle1)\n WHOLE_CIRCLE = math.pi * 2\n proportion = diff / WHOLE_CIRCLE\n estimate = distance / proportion\n output_lines = [\n \"Stick length: {:1.6f} m\".format(stick_length),\n \"Distance between readings: {:1.2f} km\".format(distance),\n \"Reading 1: {:1.6f} m\".format(reading1),\n \"Reading 2: {:1.6f} m\".format(reading2),\n \"Angle 1: {:1.6f} rad\".format(angle1),\n \"Angle 2: {:1.6f} rad\".format(angle2),\n \"Earth's estimated circumference: {:1.2f} km\".format(estimate),\n ]\n print('\\n'.join(output_lines))\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"mission1/calc-earth-size.py","file_name":"calc-earth-size.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"378914815","text":"# -*- coding: utf-8 -*-\n\nimport uuid\nimport json\n\nfrom django.core.urlresolvers import reverse\nfrom django.conf.urls import patterns, include, url\nfrom django import test\nfrom django.db.models import get_model\n\nfrom rest_framework.views import APIView\nfrom rest_framework import viewsets\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom taiga import urls\nfrom taiga.base import auth\nfrom taiga.base.users.tests import create_user, create_domain\nfrom taiga.projects.tests import create_project\n\nfrom taiga.domains.models import Domain, DomainMember\nfrom taiga.projects.models import Membership\n\n\nclass TestAuthView(viewsets.ViewSet):\n authentication_classes = (auth.Token,)\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, *args, **kwargs):\n return Response(\"ok\")\n\n\nurls.urlpatterns += patterns(\"\",\n url(r'^test-api/v1/auth/', TestAuthView.as_view({\"get\": \"get\"}), name=\"test-token-auth\"),\n)\n\n\nclass TokenAuthTests(test.TestCase):\n fixtures = [\"initial_domains.json\",]\n def setUp(self):\n self.user1 = create_user(1)\n\n def test_token_auth_01(self):\n response = self.client.get(reverse(\"test-token-auth\"))\n self.assertEqual(response.status_code, 401)\n\n def test_token_auth_02(self):\n token = auth.get_token_for_user(self.user1)\n response = self.client.get(reverse(\"test-token-auth\"),\n HTTP_AUTHORIZATION=\"Bearer {}\".format(token))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.content, b'\"ok\"')\n\n\nclass RegisterTests(test.TestCase):\n fixtures = [\"initial_domains.json\",]\n\n def setUp(self):\n self.user1 = create_user(1)\n self.domain1 = create_domain(\"localhost1\", True)\n self.domain2 = create_domain(\"localhost2\", False)\n self.role = self._create_role()\n self.project = create_project(1, self.user1)\n\n def test_public_register_01(self):\n data = {\n \"username\": \"pepe\",\n \"password\": \"pepepepe\",\n \"first_name\": \"pepe\",\n \"last_name\": \"pepe\",\n \"email\": \"pepe@pepe.com\",\n \"type\": \"public\",\n }\n\n url = reverse(\"auth-register\")\n response = self.client.post(url, data, HTTP_X_HOST=self.domain1.name)\n self.assertEqual(response.status_code, 201)\n\n self.assertEqual(DomainMember.objects.filter(domain=self.domain1).count(), 1)\n self.assertEqual(self.project.memberships.count(), 0)\n\n\n def test_public_register_02(self):\n data = {\n \"username\": \"pepe\",\n \"password\": \"pepepepe\",\n \"first_name\": \"pepe\",\n \"last_name\": \"pepe\",\n \"email\": \"pepe@pepe.com\",\n \"type\": \"public\",\n }\n\n url = reverse(\"auth-register\")\n response = self.client.post(url, data, HTTP_X_HOST=self.domain2.name)\n self.assertEqual(response.status_code, 400)\n\n def test_private_register_01(self):\n data = {\n \"username\": \"pepe\",\n \"password\": \"pepepepe\",\n \"first_name\": \"pepe\",\n \"last_name\": \"pepe\",\n \"email\": \"pepe@pepe.com\",\n \"type\": \"private\",\n }\n\n url = reverse(\"auth-register\")\n response = self.client.post(url, data, HTTP_X_HOST=self.domain2.name)\n self.assertEqual(response.status_code, 400)\n\n def test_private_register_02(self):\n membership = self._create_invitation(\"pepe@pepe.com\")\n\n data = {\n \"username\": \"pepe\",\n \"password\": \"pepepepe\",\n \"first_name\": \"pepe\",\n \"last_name\": \"pepe\",\n \"email\": \"pepe@pepe.com\",\n \"type\": \"private\",\n \"existing\": False,\n \"token\": membership.token,\n }\n\n self.assertEqual(self.project.memberships.exclude(user__isnull=True).count(), 0)\n\n url = reverse(\"auth-register\")\n response = self.client.post(url, data=json.dumps(data),\n content_type=\"application/json\",\n HTTP_X_HOST=self.domain2.name)\n\n self.assertEqual(response.status_code, 201)\n self.assertEqual(self.project.memberships.exclude(user__isnull=True).count(), 1)\n self.assertEqual(self.project.memberships.get().role, self.role)\n self.assertEqual(DomainMember.objects.filter(domain=self.domain1).count(), 0)\n self.assertEqual(DomainMember.objects.filter(domain=self.domain2).count(), 1)\n\n def test_private_register_03(self):\n membership = self._create_invitation(\"pepe@pepe.com\")\n\n data = {\n \"username\": self.user1.username,\n \"password\": self.user1.username,\n \"type\": \"private\",\n \"existing\": True,\n \"token\": membership.token,\n }\n\n self.assertEqual(self.project.memberships.exclude(user__isnull=True).count(), 0)\n\n url = reverse(\"auth-register\")\n response = self.client.post(url, data=json.dumps(data),\n content_type=\"application/json\",\n HTTP_X_HOST=self.domain2.name)\n\n self.assertEqual(response.status_code, 201)\n self.assertEqual(self.project.memberships.exclude(user__isnull=True).count(), 1)\n self.assertEqual(self.project.memberships.get().role, self.role)\n self.assertEqual(DomainMember.objects.filter(domain=self.domain1).count(), 0)\n self.assertEqual(DomainMember.objects.filter(domain=self.domain2).count(), 1)\n\n\n def _create_invitation(self, email):\n token = str(uuid.uuid1())\n membership_model = get_model(\"projects\", \"Membership\")\n\n instance = membership_model(project=self.project,\n email=email,\n role=self.role,\n user=None,\n token=token)\n instance.save()\n return instance\n\n def _create_role(self):\n role_model = get_model(\"users\", \"Role\")\n instance = role_model(name=\"foo\", slug=\"foo\",\n order=1, computable=True, project_id=1)\n\n instance.save()\n return instance\n","sub_path":"taiga/base/auth/tests/tests_auth.py","file_name":"tests_auth.py","file_ext":"py","file_size_in_byte":6278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"580480387","text":"import os\nimport logging\n\nfrom django.views.decorators.cache import never_cache\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import *\nfrom django.http import HttpResponse, JsonResponse\nfrom django.utils import timezone\n\nfrom .models import *\nfrom .statistics import *\nfrom . import gpx\n\n\ndef index(request):\n if request.user.is_authenticated():\n return redirect('dashboard')\n else:\n return render(request, 'training/index.html', {'users_count': User.objects.all().count()})\n\n\n@login_required\ndef dashboard(request):\n return render(request, 'training/dashboard.html', {'statistics': Statistics(request.user)})\n\n\n@login_required\ndef statistics(request):\n return render(request, 'training/statistics.html', {'statistics': Statistics(request.user)})\n\n\n@login_required\ndef start_workout(request):\n workout = Workout.objects.create(user=request.user)\n return redirect('workout', workout.id)\n\n\n@login_required\ndef finish_workout(request, training_session_id):\n workout = Workout.objects.get(pk=training_session_id)\n workout.finish()\n workout.save()\n\n try:\n current_excercise = workout.excercise_set.order_by('-pk')[0]\n current_excercise.time_finished = timezone.now()\n current_excercise.save()\n except:\n pass\n\n return redirect('workout', workout.id)\n\n\n@login_required\ndef workout(request, training_session_id):\n workout = get_object_or_404(Workout, pk=training_session_id, user=request.user)\n\n gpx = None\n try:\n gpx = workout.gpx_set.get()\n except:\n pass\n\n return render(request, 'training/workout.html', {'workout': workout,\n 'most_common_reps': Reps.most_common(),\n 'statistics': Statistics(request.user),\n 'gpx': gpx})\n\n\n@login_required\ndef add_excercise(request, training_session_id):\n workout = Workout.objects.get(pk=training_session_id, user=request.user)\n\n try:\n current_excercise = workout.excercise_set.order_by('-pk')[0]\n current_excercise.time_finished = timezone.now()\n current_excercise.save()\n except:\n pass\n\n excercise = workout.excercise_set.create(name=request.POST['name'])\n try:\n workout.start()\n except:\n pass\n workout.save()\n\n excercise.time_started = timezone.now()\n excercise.save()\n\n return redirect('workout', training_session_id)\n\n\n@login_required\ndef add_reps(request, excercise_id):\n s = Excercise.objects.get(pk=excercise_id, workout__user=request.user)\n s.reps_set.create(reps=request.POST['reps'])\n\n s.time_updated = timezone.now()\n s.save()\n return redirect('workout', s.workout.id)\n\n\n@login_required\ndef delete_workout(request, workout_id):\n workout = Workout.objects.get(pk=workout_id, user=request.user)\n workout.delete()\n return redirect('dashboard')\n\n\nfrom django import forms\n\nclass UploadGpxForm(forms.Form):\n gpxfile = forms.FileField(label='Select a file')\n\n\n@login_required\ndef upload_gpx(request):\n if request.method == \"POST\":\n form = UploadGpxForm(request.POST, request.FILES)\n if form.is_valid():\n gpx.upload_gpx(request)\n return redirect('dashboard')\n else:\n return render(request, 'training/upload_gpx.html', {'form': form})\n else:\n form = UploadGpxForm()\n return render(request, 'training/upload_gpx.html', {'form': form})\n\n\nclass ConnectWithEndomondoForm(forms.Form):\n email = forms.CharField(label='e-mail')\n password = forms.CharField(label='password', widget=forms.PasswordInput())\n\n\n@login_required\ndef endomondo(request):\n if request.method == \"POST\":\n gpx.connect_to_endomondo(request.user, request.POST[\"email\"], request.POST[\"password\"])\n return redirect('endomondo')\n else:\n key = gpx.endomondo_key(request.user)\n form = ConnectWithEndomondoForm()\n return render(request, 'training/endomondo.html', {'form': form, 'key': key})\n\n\n@login_required\ndef synchronize_endomondo(request):\n gpx.synchronize_endomondo(request.user)\n return redirect('endomondo')\n\n\n@login_required\n@never_cache\ndef synchronize_endomondo_ajax(request):\n count = gpx.synchronize_endomondo(request.user, 10)\n return JsonResponse({\"imported_count\": count})\n\n\n@login_required\ndef disconnect_endomondo(request):\n gpx.disconnect_endomondo(request.user)\n return redirect('endomondo')\n\n\n@login_required\ndef purge_endomondo(request):\n gpx.purge_endomondo_workouts(request.user)\n return redirect('dashboard')\n","sub_path":"training/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"299818974","text":"# list/set comprehension syntax\n# [ expr(item) for item in iterable ]\n\n\nwords = \"Why sometimes I have believed as many as six impossible things before breakfast\".split()\n\nprint([len(word) for word in words])\n\nfrom math import factorial\n\nf = [len(str(factorial(x))) for x in range(20)]\nprint(f)\n\nf_set = {len(str(factorial(x))) for x in range(20)}\nprint(f_set)\n\n\n# dict comprehension syntax\n# [ expr(item): value_expr(item) for item in iterable ]\ncountry_to_capital = {\n \"United Kingdom\": \"London\",\n \"Brazil\": \"Brasília\",\n \"Morocco\": \"Rabat\",\n \"Sweden\": \"Stockholm\",\n}\ncapital_to_country = {\n capital: country for country, capital in country_to_capital.items()\n}\nfrom pprint import pprint as pp\n\npp(capital_to_country)\n\nwords = [\"hi\", \"hello\", \"foxtrot\", \"hotel\"]\nw_dic = {x[0]: x for x in words}\nprint(w_dic)\n\n\nimport os\nimport glob\n\nfile_sizes = {\n os.path.realpath(file): os.stat(file).st_size for file in glob.glob(\"*.py\")\n}\npp(file_sizes)\n\n\nfrom math import sqrt\n\n\ndef is_prime(x):\n if x < 2:\n return False\n for i in range(2, int(sqrt(x) + 1)):\n if x % i == 0:\n return False\n return True\n\n\nprint([x for x in range(101) if is_prime(x)])\nprime_square_divisors = {x * x: (1, x, x * x) for x in range(100) if is_prime(x)}\npp(prime_square_divisors)\n\n\ndef first(iterable):\n iterator = iter(iterable)\n try:\n return next(iterator)\n except StopIteration:\n raise ValueError(\"Iterable is empty.\")\n\n\nprint(first([\"1st\", \"2nd\", \"3rd\"]))\nprint(first({\"1st\", \"2nd\", \"3rd\"}))\nprint(first(set()))\n","sub_path":"hello_world/getting_started/iterables/comprehension.py","file_name":"comprehension.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"94741726","text":"\n\nfrom xai.brain.wordbase.verbs._curate import _CURATE\n\n#calss header\nclass _CURATES(_CURATE, ):\n\tdef __init__(self,): \n\t\t_CURATE.__init__(self)\n\t\tself.name = \"CURATES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"curate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_curates.py","file_name":"_curates.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"464453394","text":"\ndef task():\n\tfname = \"[Joshua]\"\n\tlname = \"[Hassan]\"\n\tHNG_ID = \"[HNG-00533]\"\n\temail = \"cutejosh2@gmail.com\"\n\tlanguage = \"[Python]\"\n\t# using string concatenation\n\tprint(\"Hello World, this is {} {} with HNGi7 ID {} using {} for stage 2 task. {}\".format(fname, lname, HNG_ID, language, email))\n\ntask()","sub_path":"testScripts/__cutejosh__.py","file_name":"__cutejosh__.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"604247254","text":"# vim:ts=4:sw=4:expandtab\n'''Example of event firing.\n'''\nimport time\nimport random\nfrom diesel import Application, Loop, sleep, fire, wait, log\n\ndef gunner():\n x = 1\n while True:\n fire('bam', x)\n x += 1\n sleep()\n\ndef sieged():\n t = time.time()\n while True:\n n = wait('bam')\n if n % 10000 == 0:\n log.info(n)\n if n == 50000:\n delt = time.time() - t\n log.info(\"50,000 messages in %.3fs (%.1f/s)\" % (delt, 50000 / delt))\n a.halt()\n\na = Application()\nlog = log.sublog('fire-system', log.info)\na.add_loop(Loop(gunner))\na.add_loop(Loop(sieged))\na.run()\n","sub_path":"examples/fire.py","file_name":"fire.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"267081550","text":"'''\nApplication to callculate - Herotek.ir\n==========================================\n\n***INPUTS***\n--> 1 | ertefa\n--> 2 | arz\n--> 3 | gheymat\n\n------------------\n***OUTPUTS***\n--> 1 | ertefa_frame ==> ertefa - 6\n--> 2 | arz_frame ==> arz - 6\n--> 3 | motaharek ==> ertefa - 7.5\n\n--> 4 | ertefa_tor ==> ertefa - 3.5\n--> 5 | gam_tor ==> arz / 2.5\n\n--> 6 | metr_moraba ==> ertefa X arz\n--> 7 | gheymat_koll ==> metr_moraba X gheymat\n'''\n\nimport os\n\nfrom kivy.app import App\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.textinput import TextInput\n\n\nimport arabic_reshaper\nfrom bidi.algorithm import get_display\n\n# ---------------------------------------------------\n# Reshape Texts\n# *** Inputs Text\nertefa_text = arabic_reshaper.reshape(u'ارتفاع اصلی')\nreshape_ertefa_text = get_display(ertefa_text)\n\narz_text = arabic_reshaper.reshape(u'عرض اصلی')\nreshape_arz_text = get_display(arz_text)\n\ngheymat_text = arabic_reshaper.reshape(u'قیمت واحد')\nreshape_gheymat_text = get_display(gheymat_text)\n\n# =================\n# *** Outputs text\nertefa_frame_text= 'ارتفاع فریم'\narz_frame_text = 'عرض فریم'\nmotaharek = 'متحرک'\n# -- section 2\nertefa_tor_text = 'ارتفاع تور'\ngam_tor_text = 'گام تور'\n# -- section3\nmetr_moraba_text = 'متر مربع'\ngheymat_koll_text = 'قیمت کل'\n# ---------------------------------------------------\n\n\nclass Hero(App):\n\n def build(self):\n self.icon = 'icon.png'\n self.title = 'ملزومات توری های پلیسه تورسان'\n # Main Layout\n w = BoxLayout(orientation='vertical')\n label1 = Label(text = reshape_ertefa_text , font_name='yekan.ttf' , font_size = 18)\n label2 = Label(text = reshape_arz_text , font_name='yekan.ttf' , font_size = 18)\n w.add_widget(label1)\n w.add_widget(label2)\n\n # return a Button() as a root widget\n return w\n\n\n\n\nif __name__ == '__main__':\n Hero().run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"238863740","text":"# This script is used to separate the stations in SA-OBS into 3 clusters\n# Several steps are inplemented:\n# S1-read precip data from SA-OBS\n# S2-clean data and PCA\n# S3-cluster the data\n#\n# Written by Harry Li\n\n# import libraries\nimport numpy as np\nimport pathmagic # noqa: F401\nfrom modules.stats.mod_stats_clustering import kmeans_cluster\nfrom modules.datareader.mod_dataread_obs_TRMM import readobs_pre_TRMM_day\nfrom modules.stats.mod_stats_clim import mon2clim\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import normalize, scale\nfrom sklearn.cluster import KMeans\nimport pickle\nplt.switch_backend('agg')\n\n\n############################################################################\n# setup directory\n############################################################################\noutdir = '/scratch/d/dylan/harryli/cesm1/vrcesm/Analysis/vrseasia_AMIP_1979_to_2005/pre/obs/TRMM/clustering/overland/'\n\n############################################################################\n# set parameters\n############################################################################\n# variable info\nvarname = 'Total Precip'\nvarstr = 'precipitation'\nvar_unit = 'mm/day'\n\n# time bounds\niniyear = 1998\nendyear = 2005\nnyears = endyear - iniyear + 1\nyearts = np.arange(iniyear, endyear+1)\n# yearts = np.delete(yearts,9,None)\nprint(yearts)\n\n# define regions\nlatbounds = [5, 30]\nlonbounds = [90, 115]\n\n# mainland Southeast Asia\nreg_lats = [5, 24]\nreg_lons = [97, 110]\n\n# set data frequency\nfrequency = 'day'\n\n# create months for plot\nmonths = np.arange(1, 13, 1)\nmonnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n\n# color for clusters\ncolors = ['red', 'blue', 'green', 'magenta', 'darkslategrey', 'orange']\n\n# days in each month\nmdays = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\nmindx = [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365]\n\n############################################################################\n# read data\n############################################################################\n\nprint('Reading TRMM data...')\n\n# read TRMM\nvar, time, lats, lons = readobs_pre_TRMM_day('precipitation', iniyear, endyear, latbounds, lonbounds, oceanmask=1)\n\n# print(time)\nprint(var.shape)\n# print(var.mask)\nprint(time[time.month == 2])\n\n############################################################################\n# clustering\n############################################################################\nlat_1 = np.argmin(np.abs(lats - reg_lats[0]))\nlat_2 = np.argmin(np.abs(lats - reg_lats[1]))\nlon_1 = np.argmin(np.abs(lons - reg_lons[0]))\nlon_2 = np.argmin(np.abs(lons - reg_lons[1]))\nprint(lat_1, lat_2)\nprint(lon_1, lon_2)\n\nreg_lats = lats[lat_1: lat_2+1]\nreg_lons = lons[lon_1: lon_2+1]\n\ndataset_var = var[:, lat_1:lat_2+1, lon_1:lon_2+1]\nsamplelons, samplelats = np.meshgrid(lons[lon_1:lon_2+1], lats[lat_1:lat_2+1])\n# print(dataset_var[0, :, :].mask)\n# print(np.argwhere(np.logical_xor(var[5, :, :].mask, var[0, :, :].mask)))\n\nsamplelons[dataset_var[0, :, :].mask] = np.nan\nsamplelats[dataset_var[0, :, :].mask] = np.nan\ndataset_var[dataset_var[:, :, :].mask] = np.nan\n\nprint(dataset_var[0, :, :])\nprint(dataset_var.shape)\nprint(samplelats.shape)\nprint(samplelats)\nprint(samplelons)\n\ntemp_var = dataset_var.reshape((dataset_var.shape[0], -1))\nprint(temp_var.shape)\ntemp_var = np.transpose(temp_var, (1, 0))\n# print(np.argwhere(np.isnan(temp_var)))\ntemp_var = np.delete(temp_var, np.argwhere(np.isnan(temp_var[:, 0])), 0)\nprint(temp_var.shape)\n# print(np.argwhere(np.isnan(temp_var)))\n\n# create the lat/lon list\ntemp_lat = samplelats.flatten()\ntemp_lon = samplelons.flatten()\nprint(temp_lat.shape)\n\n# get rid of nan\ntemp_lat = temp_lat[~np.isnan(temp_lat)]\ntemp_lon = temp_lon[~np.isnan(temp_lon)]\nprint(temp_lat.shape)\n\nfname = 'TRMM_precip_kmeans_nclusters'\nncluster = 6\noutdir2 = '/scratch/d/dylan/harryli/cesm1/vrcesm/Analysis/vrseasia_AMIP_1979_to_2005/pre/obs/TRMM/clustering/overland/' + \\\n str(ncluster)+' clusters/'\n# res_spectral = spectral_cluster(dataset_var, ncluster, stnlats, stnlons, stnnames, outdir+fname)\n# res = kmeans_cluster(dataset_var, ncluster, stnlats, stnlons, stnnames, outdir+fname, map_plot=False)\n\nnclusters_list = np.arange(2, 10)\nJcost = []\nres = []\n\n# use pca to compress data\n# temp_var = scale(temp_var)\n# pca = PCA().fit(temp_var)\n# print(pca.explained_variance_ratio_)\n# print(np.sum(pca.explained_variance_ratio_))\n# sum = 0\n# k = 0\n# while sum < 0.99:\n# sum += pca.explained_variance_ratio_[k]\n# k += 1\n# print(k)\n\nfor icluster in nclusters_list:\n print('run kMeans algorithm for '+str(icluster)+' clusters')\n\n # use pca as first guess\n pca = PCA(n_components=icluster).fit(temp_var)\n kmeans = KMeans(n_clusters=icluster, init=pca.components_, n_init=1, max_iter=300).fit(temp_var)\n\n # use pca to compress data\n # pca_data = PCA(n_components=k).fit_transform(temp_var)\n # print(pca_data.shape)\n # kmeans = KMeans(n_clusters=icluster, init='k-means++', n_init=100, max_iter=300).fit(pca_data)\n\n print(kmeans.labels_)\n Jcost.append(kmeans.inertia_)\n\n res.append(kmeans)\n\n # plot the map\n temp_label = kmeans.labels_\n\n # project the result onto the map using the temp_label and temp_lat/lon\n maps_res = np.empty((len(lats), len(lons)))\n maps_res[:] = np.nan\n\n for idx in range(len(temp_label)):\n lat_res = np.argmin(np.abs(lats - temp_lat[idx]))\n lon_res = np.argmin(np.abs(lons - temp_lon[idx]))\n maps_res[lat_res, lon_res] = temp_label[idx]\n\n # create figure\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # create basemap\n map = Basemap(projection='cyl', llcrnrlat=latbounds[0], urcrnrlat=latbounds[1],\n llcrnrlon=lonbounds[0], urcrnrlon=lonbounds[1], resolution='l')\n map.drawcoastlines(linewidth=0.3)\n map.drawcountries()\n\n # draw lat/lon lines\n parallels = np.arange(latbounds[0], latbounds[1], 5)\n meridians = np.arange(lonbounds[0], lonbounds[1], 5)\n map.drawparallels(parallels, labels=[1, 0, 0, 0], fontsize=4, linewidth=0.1)\n map.drawmeridians(meridians, labels=[0, 0, 0, 1], fontsize=4, linewidth=0.1)\n\n mlons, mlats = np.meshgrid(lons, lats)\n x, y = map(mlons, mlats)\n\n # plot the contour\n cs = map.contourf(x, y, maps_res, levels=np.arange(-0.5, icluster),\n cmap=plt.cm.get_cmap('viridis', icluster), alpha=0.9)\n\n # add colorbar\n fig.subplots_adjust(bottom=0.22, wspace=0.2, hspace=0.2)\n cbar_ax = fig.add_axes([0.25, 0.17, 0.5, 0.02])\n cbar = fig.colorbar(cs, cax=cbar_ax, orientation='horizontal', ticks=range(icluster))\n cbar.ax.tick_params(labelsize=4)\n cbar.set_label('Cluster labels')\n plt.clim(-0.5, icluster-0.5)\n\n title = 'K-means clustering: '+str(icluster)+' groups'\n plt.suptitle(title, fontsize=9, y=0.95)\n plt.savefig(outdir+fname+'_'+str(icluster)+'.pdf', bbox_inches='tight', dpi=3000)\n if icluster == ncluster:\n plt.savefig(outdir2+fname+'_'+str(icluster)+'.pdf', bbox_inches='tight', dpi=3000)\n plt.close(fig)\n\n# plot to choose best cluster numbers\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(nclusters_list, Jcost, c='k', linewidth=1.5)\nplt.xticks(fontsize=6)\nplt.yticks(fontsize=6)\nplt.xlabel('Number of clusters')\nplt.ylabel('Sum of squared distances')\n\ntitle = 'K-means clustering'\nplt.suptitle(title, fontsize=9, y=0.95)\nplt.savefig(outdir+fname+'.pdf', bbox_inches='tight', dpi=3000)\nplt.close(fig)\n\nfor idx in range(len(res)):\n temp = res[idx]\n print('number of clusterings: '+str(idx+2))\n print(temp.labels_)\n\nkmeans_res = {}\nfor icluster in nclusters_list:\n kmeans_res[str(icluster)+' clusters'] = res[icluster-nclusters_list[0]].labels_\nkmeans_res['kmeans_res_lats'] = temp_lat\nkmeans_res['kmeans_res_lons'] = temp_lon\nkmeans_res['reg_lats'] = reg_lats\nkmeans_res['reg_lons'] = reg_lons\n\n############################################################################\n# plot for the different groups\n############################################################################\n\nindex = np.arange(9)\nbar_width = 0.8\nopacity = 0.8\nshape_type = ['', '', '', '', '..', '..', '..', '..', '//']\n\ncluster_labels = res[ncluster-2].labels_\n# cluster_labels = cluster_labels.reshape((lat_2-lat_1+1, lon_2-lon_1+1))\n# cluster_labels = np.array([0,0,1,2,0,0,0,0,0,0,1,0,2,0,2,1,0,0,2,1,1,0])\nprint('Current number of clusterings is: '+str(ncluster))\n\n# project the result onto the map using the temp_label and temp_lat/lon\nmaps_res = np.empty((len(lats), len(lons)))\nmaps_res[:] = np.nan\n\nfor idx in range(len(cluster_labels)):\n lat_res = np.argmin(np.abs(lats - temp_lat[idx]))\n lon_res = np.argmin(np.abs(lons - temp_lon[idx]))\n maps_res[lat_res, lon_res] = cluster_labels[idx]\n\nmaps_res = maps_res[lat_1: lat_2 + 1, lon_1: lon_2+1]\n\n# # test the method to select cluster\n# maps_res_3d = np.broadcast_to(maps_res == 0, dataset_var.shape)\n# temp = np.ma.masked_where(maps_res_3d, dataset_var)\n#\n# print(np.argwhere(np.logical_xor(maps_res_3d[0, :, :], maps_res == 0)))\n# print(maps_res_3d)\n# print(dataset_var[maps_res_3d].shape)\n# print(temp.shape)\n\ndataset_monmean = np.ma.zeros((nyears*12, lat_2-lat_1+1, lon_2-lon_1+1))\nfor iyear in np.arange(nyears):\n for imon in range(12):\n dataset_monmean[iyear*12+imon, :, :] = np.ma.mean(\n dataset_var[((time.year == iniyear+iyear) & (time.month == (imon+1))), :, :], axis=0)\n\n# print(dataset_monmean)\n\ndataset_gpmonmean = np.ma.zeros((nyears*12, ncluster))\nfor idx in range(dataset_monmean.shape[0]):\n for icluster in range(ncluster):\n temp = dataset_monmean[idx, :, :]\n # print(np.argwhere(np.logical_xor(temp.mask, dataset_monmean.mask)))\n # print(temp)\n dataset_gpmonmean[idx, icluster] = np.ma.mean(temp[maps_res == icluster])\n # print(temp[maps_res == icluster].shape)\n # print(np.count_nonzero(~np.isnan(maps_res)))\n\nprint(dataset_gpmonmean)\nkmeans_res['dataset_gpmonmean'] = dataset_gpmonmean\n\n# ############################################################################\n# # plot for monthly mean TS\n# ############################################################################\n\nmonthts = np.arange((endyear-iniyear+1)*12) + 1\nxlabel = 'Month'\nylabel = 'Precip (mm/day)'\nxticks = np.arange(6, (endyear-iniyear+1)*12, 12)\nxticknames = [str(iyear) for iyear in yearts]\n\n# plot monthly mean ts for each group\nfor icluster in range(ncluster):\n groupname = 'group'+str(icluster)\n\n print('plot mothly mean ts for ' + groupname)\n\n title = str(iniyear)+' to '+str(endyear)+' TRMM Monthly mean precip TS in the group: '+str(icluster)\n fname = 'TRMM_prect_monthly_mean_ts_group_'+str(icluster)+'.png'\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for idx in range(len(cluster_labels)):\n if cluster_labels[idx] == icluster:\n lat_res = np.argmin(np.abs(reg_lats - temp_lat[idx]))\n lon_res = np.argmin(np.abs(reg_lons - temp_lon[idx]))\n plt.plot(monthts, dataset_monmean[:, lat_res, lon_res], color='grey', marker='o',\n markersize=1, linestyle='-', linewidth=1.5, alpha=0.4)\n\n plt.plot(monthts, dataset_gpmonmean[:, icluster], color=colors[icluster], marker='o',\n markersize=1, linestyle='-', linewidth=1.5, label=groupname)\n\n plt.legend(handlelength=4, fontsize=5)\n plt.xticks(xticks, xticknames, fontsize=6)\n plt.yticks(fontsize=6)\n plt.xlabel(xlabel, fontsize=8)\n plt.ylabel(ylabel, fontsize=8)\n\n plt.suptitle(title, fontsize=9, y=0.95)\n plt.savefig(outdir2+fname, bbox_inches='tight', dpi=1000)\n plt.close(fig)\n\n# plot monthly mean ts for all groups\nprint('plot mothly mean ts for all groups')\nfig = plt.figure()\nax = fig.add_subplot(111)\n\ntitle = str(iniyear)+' to '+str(endyear)+' TRMM Monthly mean precip TS'\nfname = 'TRMM_prect_monthly_mean_ts_groupinone.png'\n\nfor idx in range(len(cluster_labels)):\n lat_res = np.argmin(np.abs(reg_lats - temp_lat[idx]))\n lon_res = np.argmin(np.abs(reg_lons - temp_lon[idx]))\n plt.plot(monthts, dataset_monmean[:, lat_res, lon_res], color=colors[cluster_labels[idx]], marker='o',\n markersize=1, linestyle='-', linewidth=1.5, alpha=0.4)\nlines = []\nlegendnames = []\nfor icluster in range(ncluster):\n iname = 'group'+str(icluster)\n legendnames.append(iname)\n lines += plt.plot(monthts, dataset_gpmonmean[:, icluster], color=colors[icluster], marker='o',\n markersize=1, linestyle='-', linewidth=1.5, label=iname)\n\nplt.legend(lines, legendnames, handlelength=4, fontsize=5)\nplt.xticks(xticks, xticknames, fontsize=6)\nplt.yticks(fontsize=6)\nplt.xlabel(xlabel, fontsize=8)\nplt.ylabel(ylabel, fontsize=8)\n\nplt.suptitle(title, fontsize=9, y=0.95)\nplt.savefig(outdir2+fname, bbox_inches='tight', dpi=1000)\nplt.close(fig)\n\n\n# ############################################################################\n# # plot for annual mean TS\n# ############################################################################\nxlabel = 'Year'\nylabel = 'Precip (mm/day)'\n\ndataset_annmean = np.ma.zeros((nyears, lat_2-lat_1+1, lon_2-lon_1+1))\nfor iyear in np.arange(nyears):\n dataset_annmean[iyear, :, :] = np.ma.mean(dataset_var[(time.year == iniyear+iyear), :, :], axis=0)\n\n# print(dataset_monmean)\n\ndataset_gpannmean = np.ma.zeros((nyears, ncluster))\nfor idx in range(dataset_annmean.shape[0]):\n for icluster in range(ncluster):\n temp = dataset_annmean[idx, :, :]\n dataset_gpannmean[idx, icluster] = np.ma.mean(temp[maps_res == icluster])\n\nprint(dataset_gpannmean)\nkmeans_res['dataset_gpannmean'] = dataset_gpannmean\n\n# plot annual mean ts for each group\nfor icluster in range(ncluster):\n groupname = 'group'+str(icluster)\n\n print('plot annual mean ts for ' + groupname)\n\n title = str(iniyear)+' to '+str(endyear)+' TRMM Annual mean precip TS in the group: '+str(icluster)\n fname = 'TRMM_prect_annual_mean_ts_group_'+str(icluster)+'.png'\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for idx in range(len(cluster_labels)):\n if cluster_labels[idx] == icluster:\n lat_res = np.argmin(np.abs(reg_lats - temp_lat[idx]))\n lon_res = np.argmin(np.abs(reg_lons - temp_lon[idx]))\n plt.plot(yearts, dataset_annmean[:, lat_res, lon_res], color='grey', marker='o',\n markersize=1, linestyle='-', linewidth=1.5, alpha=0.4)\n\n plt.plot(yearts, dataset_gpannmean[:, icluster], color=colors[icluster], marker='o',\n markersize=1, linestyle='-', linewidth=1.5, label=groupname)\n\n plt.legend(handlelength=4, fontsize=5)\n plt.xticks(fontsize=6)\n plt.yticks(fontsize=6)\n plt.xlabel(xlabel, fontsize=8)\n plt.ylabel(ylabel, fontsize=8)\n\n plt.suptitle(title, fontsize=9, y=0.95)\n plt.savefig(outdir2+fname, bbox_inches='tight', dpi=1000)\n plt.close(fig)\n\n# plot annual mean ts for all groups\nprint('plot annual mean ts for all groups')\nfig = plt.figure()\nax = fig.add_subplot(111)\n\ntitle = str(iniyear)+' to '+str(endyear)+' TRMM Annual mean precip TS'\nfname = 'TRMM_prect_annual_mean_ts_groupinone.png'\n\nfor idx in range(len(cluster_labels)):\n lat_res = np.argmin(np.abs(reg_lats - temp_lat[idx]))\n lon_res = np.argmin(np.abs(reg_lons - temp_lon[idx]))\n plt.plot(yearts, dataset_annmean[:, lat_res, lon_res], color=colors[cluster_labels[idx]], marker='o',\n markersize=1, linestyle='-', linewidth=1.5, alpha=0.4)\nlines = []\nlegendnames = []\nfor icluster in range(ncluster):\n iname = 'group'+str(icluster)\n legendnames.append(iname)\n lines += plt.plot(yearts, dataset_gpannmean[:, icluster], color=colors[icluster], marker='o',\n markersize=1, linestyle='-', linewidth=1.5, label=iname)\n\nplt.legend(lines, legendnames, handlelength=4, fontsize=5)\nplt.xticks(fontsize=6)\nplt.yticks(fontsize=6)\nplt.xlabel(xlabel, fontsize=8)\nplt.ylabel(ylabel, fontsize=8)\n\nplt.suptitle(title, fontsize=9, y=0.95)\nplt.savefig(outdir2+fname, bbox_inches='tight', dpi=1000)\nplt.close(fig)\n\n# ############################################################################\n# # plot for histogram\n# ############################################################################\nxlabel = 'Precip (mm/day)'\nylabel = 'Frequency'\n\n# plot annual mean ts for each group\nbinarrays = []\nfor icluster in range(ncluster):\n groupname = 'group'+str(icluster)\n\n print('plot histogram for ' + groupname)\n\n maps_res_3d = np.broadcast_to(maps_res == icluster, dataset_var.shape)\n tempdata = dataset_var[maps_res_3d]\n binmax = np.amax(tempdata[~np.isnan(tempdata)])*2./3.\n binarray = np.arange(0, binmax, binmax/20)\n binarrays.append(binarray)\n\n title = str(iniyear)+' to '+str(endyear)+' TRMM Total precip distribution in the group: '+str(icluster)\n fname = 'TRMM_prect_hist_group_'+str(icluster)+'.png'\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for idx in range(len(cluster_labels)):\n if cluster_labels[idx] == icluster:\n lat_res = np.argmin(np.abs(reg_lats - temp_lat[idx]))\n lon_res = np.argmin(np.abs(reg_lons - temp_lon[idx]))\n tempdata = dataset_var[:, lat_res, lon_res]\n y, binEdges = np.histogram(tempdata[~np.isnan(tempdata)], bins=binarray, density=True)\n bincenters = 0.5*(binEdges[1:]+binEdges[:-1])\n plt.plot(bincenters, y, c='grey', linestyle='-', linewidth=1.5, alpha=0.4)\n\n tempdata = dataset_var[maps_res_3d]\n y, binEdges = np.histogram(tempdata[~np.isnan(tempdata)], bins=binarray, density=True)\n bincenters = 0.5*(binEdges[1:]+binEdges[:-1])\n plt.plot(bincenters, y, c=colors[icluster], linestyle='-', linewidth=1.5, label=groupname)\n\n plt.yscale('log')\n plt.legend(handlelength=4, fontsize=5)\n plt.xticks(fontsize=6)\n plt.yticks(fontsize=6)\n plt.xlabel(xlabel, fontsize=8)\n plt.ylabel(ylabel, fontsize=8)\n\n plt.suptitle(title, fontsize=9, y=0.95)\n plt.savefig(outdir2+fname, bbox_inches='tight', dpi=1000)\n plt.close(fig)\n\n# plot annual mean ts for all groups\nprint('plot histogram for all groups')\nfig = plt.figure()\nax = fig.add_subplot(111)\n\ntitle = str(iniyear)+' to '+str(endyear)+' TRMM Total precip distribution'\nfname = 'TRMM_prect_hist_groupinone.png'\n\nfor idx in range(len(cluster_labels)):\n lat_res = np.argmin(np.abs(reg_lats - temp_lat[idx]))\n lon_res = np.argmin(np.abs(reg_lons - temp_lon[idx]))\n tempdata = dataset_var[:, lat_res, lon_res]\n y, binEdges = np.histogram(tempdata[~np.isnan(tempdata)], bins=binarrays[cluster_labels[idx]], density=True)\n bincenters = 0.5*(binEdges[1:]+binEdges[:-1])\n plt.plot(bincenters, y, c=colors[cluster_labels[idx]], linestyle='-', linewidth=1.5, alpha=0.4)\n\nlines = []\nlegendnames = []\nfor icluster in range(ncluster):\n iname = 'group'+str(icluster)\n legendnames.append(iname)\n maps_res_3d = np.broadcast_to(maps_res == icluster, dataset_var.shape)\n tempdata = dataset_var[maps_res_3d]\n y, binEdges = np.histogram(tempdata[~np.isnan(tempdata)], bins=binarrays[icluster], density=True)\n bincenters = 0.5*(binEdges[1:]+binEdges[:-1])\n lines += plt.plot(bincenters, y, c=colors[icluster], linestyle='-', linewidth=1.5, label=iname)\n\nplt.yscale('log')\nplt.legend(lines, legendnames, handlelength=4, fontsize=5)\nplt.xticks(fontsize=6)\nplt.yticks(fontsize=6)\nplt.xlabel(xlabel, fontsize=8)\nplt.ylabel(ylabel, fontsize=8)\n\nplt.suptitle(title, fontsize=9, y=0.95)\nplt.savefig(outdir2+fname, bbox_inches='tight', dpi=300)\nplt.close(fig)\n\n# ############################################################################\n# # plot for long term mean climatology\n# ############################################################################\nxlabel = 'Month'\nylabel = 'Precip (mm/day)'\n\ndataset_climmean = np.ma.zeros((12, lat_2-lat_1+1, lon_2-lon_1+1))\nfor imon in range(12):\n dataset_climmean[imon, :, :] = np.ma.mean(dataset_var[(time.month == (imon+1)), :, :], axis=0)\n\n# print(dataset_monmean)\n\ndataset_gpclimmean = np.ma.zeros((12, ncluster))\nfor idx in range(dataset_climmean.shape[0]):\n for icluster in range(ncluster):\n temp = dataset_climmean[idx, :, :]\n dataset_gpclimmean[idx, icluster] = np.ma.mean(temp[maps_res == icluster])\n\nprint(dataset_gpclimmean)\nkmeans_res['dataset_gpclimmean'] = dataset_gpclimmean\n\n# plot climatology for each group\nfor icluster in range(ncluster):\n groupname = 'group'+str(icluster)\n\n print('plot climatology for ' + groupname)\n\n title = str(iniyear)+' to '+str(endyear)+' TRMM Seasonal cycle of precip in the group: '+str(icluster)\n fname = 'TRMM_prect_clim_mean_line_group_'+str(icluster)+'.png'\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for idx in range(len(cluster_labels)):\n if cluster_labels[idx] == icluster:\n lat_res = np.argmin(np.abs(reg_lats - temp_lat[idx]))\n lon_res = np.argmin(np.abs(reg_lons - temp_lon[idx]))\n plt.plot(months, dataset_climmean[:, lat_res, lon_res], color='grey', marker='o',\n markersize=1, linestyle='-', linewidth=1.5, alpha=0.4)\n\n plt.plot(months, dataset_gpclimmean[:, icluster], color=colors[icluster], marker='o',\n markersize=1, linestyle='-', linewidth=1.5, label=groupname)\n\n plt.legend(handlelength=4, fontsize=5)\n plt.xticks(fontsize=6)\n plt.yticks(fontsize=6)\n plt.xlabel(xlabel, fontsize=8)\n plt.ylabel(ylabel, fontsize=8)\n\n plt.suptitle(title, fontsize=9, y=0.95)\n plt.savefig(outdir2+fname, bbox_inches='tight', dpi=1000)\n plt.close(fig)\n\n# plot annual mean ts for all groups\nprint('plot annual mean ts for all groups')\nfig = plt.figure()\nax = fig.add_subplot(111)\n\ntitle = str(iniyear)+' to '+str(endyear)+' TRMM Seasonal cycle of precip'\nfname = 'TRMM_prect_clim_mean_line_groupinone.png'\n\nfor idx in range(len(cluster_labels)):\n lat_res = np.argmin(np.abs(reg_lats - temp_lat[idx]))\n lon_res = np.argmin(np.abs(reg_lons - temp_lon[idx]))\n plt.plot(months, dataset_climmean[:, lat_res, lon_res], color=colors[cluster_labels[idx]], marker='o',\n markersize=1, linestyle='-', linewidth=1.5, alpha=0.4)\nlines = []\nlegendnames = []\nfor icluster in range(ncluster):\n iname = 'group'+str(icluster)\n legendnames.append(iname)\n lines += plt.plot(months, dataset_gpclimmean[:, icluster], color=colors[icluster], marker='o',\n markersize=1, linestyle='-', linewidth=1.5, label=iname)\n\nplt.legend(lines, legendnames, handlelength=4, fontsize=5)\nplt.xticks(fontsize=6)\nplt.yticks(fontsize=6)\nplt.xlabel(xlabel, fontsize=8)\nplt.ylabel(ylabel, fontsize=8)\n\nplt.suptitle(title, fontsize=9, y=0.95)\nplt.savefig(outdir2+fname, bbox_inches='tight', dpi=1000)\nplt.close(fig)\n\n\n# test pickle data\npickle.dump(kmeans_res, open(outdir+'TRMM_kmeans_result_overland_'+str(ncluster)+'cluster.p', \"wb\"))\nres_load = pickle.load(open(outdir+'TRMM_kmeans_result_overland_'+str(ncluster)+'cluster.p', \"rb\"))\nprint(res_load['4 clusters'])\nprint(res_load['kmeans_res_lats'].shape)\nprint(res_load['kmeans_res_lats'])\n","sub_path":"SEA_vrcesm/clustering/TRMM_clustering_overland.py","file_name":"TRMM_clustering_overland.py","file_ext":"py","file_size_in_byte":23105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"262174099","text":"\nimport numpy as np\nimport torch\nfrom torch import nn\nimport os\nimport functools\nfrom . nnmodels import *\n##############################################################################\n# Main Model\n##############################################################################\nclass Pix2PixHDModel(nn.Module):\n def name(self):\n return 'Pix2PixHDModel'\n \n def init_loss_filter(self,G_GAN_Face=False,G_VGG_face=False,D_face_real=False, D_face_fake=False):\n flags = (G_GAN_Face,G_VGG_face,D_face_real, D_face_fake)\n def loss_filter(g_gan_face,g_vgg_face,d_face_real, d_face_fake):\n return [l for (l,f) in zip((g_gan_face,g_vgg_face,d_face_real, d_face_fake),flags) if f]\n return loss_filter\n \n def __init__(self, head_label_nc, output_nc):\n super(Pix2PixHDModel, self).__init__() \n self.isTrain=True\n self.Tensor= torch.cuda.FloatTensor\n \n self.gpu_ids=[0,1,2,3]\n self.resize_or_crop='scale_width'\n if self.resize_or_crop != 'none' or not self.isTrain: # when training at full res this causes OOM\n torch.backends.cudnn.benchmark = True\n self.no_instance=True\n \n self.data_type=32\n self.head_label_nc=head_label_nc\n self.feat_num=3+head_label_nc\n #self.feat_num=head_label_nc\n self.output_nc=output_nc\n self.HEAD_SIZE=64\n self.HALF_HEAD=32\n self.BODY_SIZE=512\n ##### define networks \n \n # Face Enhancer net\n #norm_layer1 = functools.partial(nn.InstanceNorm2d, affine=False)\n norm_layer1=nn.BatchNorm2d\n self.facenetG = GlobalGenerator(self.feat_num, self.output_nc, norm_layer=norm_layer1).cuda()\n self.facenetG.apply(weights_init)\n self.facenetG = nn.DataParallel(self.facenetG)\n \n # Discriminator network\n self.n_layers_D=3\n self.num_D=2\n if self.isTrain:\n use_sigmoid = False\n \n \n self.facenetD = FaceNLayerDiscriminator(self.output_nc, 64, self.n_layers_D,use_sigmoid=use_sigmoid).cuda()\n self.facenetD.apply(weights_init)\n self.facenetD = nn.DataParallel(self.facenetD)\n \n self.verbose=False\n \n # set loss functions and optimizers\n self.pool_size=0\n self.lr=2e-4\n self.beta1=0.5\n self.no_lsgan=False\n if self.isTrain:\n if self.pool_size > 0 and (len(self.gpu_ids)) > 1:\n raise NotImplementedError(\"Fake Pool Not Implemented for MultiGPU\")\n self.fake_pool = ImagePool(self.pool_size)\n self.old_lr = self.lr\n\n # define loss functions\n self.loss_filter = self.init_loss_filter(True,True,True,True)\n \n # GAN loss\n #self.criterionGAN = GANLoss(use_lsgan=not self.no_lsgan, tensor=self.Tensor).cuda()\n self.Gloss=nn.MSELoss()\n \n # Discriminator loss\n self.Dloss=nn.MSELoss()\n \n # VGG loss\n self.criterionVGG = VGGLoss().cuda()\n self.criterionVGG=nn.DataParallel(self.criterionVGG)\n \n \n # Names so we can breakout loss\n self.loss_names = self.loss_filter('G_GAN_Face',\"G_VGG_face\",\\\n 'D_face_real', 'D_face_fake')\n\n # initialize optimizers\n # optimizer G\n self.niter_fix_global=0\n self.n_local_enhancers=1\n if self.niter_fix_global > 0: \n import sys\n if sys.version_info >= (3,0):\n finetune_list = set()\n else:\n from sets import Set\n finetune_list = Set()\n\n params_dict = dict(self.facenetG.named_parameters())\n params = []\n for key, value in params_dict.items(): \n if key.startswith('model' + str(self.n_local_enhancers)): \n params += [value]\n finetune_list.add(key.split('.')[0]) \n print('------------- Only training the local enhancer network (for %d epochs) ------------' % self.niter_fix_global)\n print('The layers that are finetuned are ', sorted(finetune_list)) \n else:\n params = list(self.facenetG.parameters()) \n self.optimizer_G_face = torch.optim.Adam(params, lr=self.lr, betas=(self.beta1, 0.999))\n \n \n \n \n # optimizer D \n params = list(self.facenetD.parameters()) \n self.optimizer_D_face = torch.optim.Adam(params, lr=self.lr, betas=(self.beta1, 0.999))\n \n def discriminate_face(self, input_label, test_image, use_pool=False):\n input_concat = torch.cat((input_label, test_image.detach()), dim=1)\n if use_pool: \n fake_query = self.fake_pool.query(input_concat)\n return self.facenetD(fake_query)\n else:\n return self.facenetD(input_concat)\n\n def forward(self, head_lbl,fake_head, real_head, infer=False):\n #---------------------------------Head Enhancement----------------------------------------\n \n #---------------------------------Train Generator----------------------------------------\n head_input=torch.cat((fake_head, head_lbl), dim=1)\n head_buff=self.facenetG(head_input)\n enhanced_head=head_buff\n #enhanced_head=head_buff+fake_head\n \n # GAN loss (Fake Passability Loss) \n fake_features = self.facenetD.module.extract_features(enhanced_head)\n with torch.no_grad():\n real_features = self.facenetD.module.extract_features(real_head)\n loss_G_GAN_face = self.Gloss(fake_features, real_features)\n \n self.lambda_feat=2.0\n \n loss_G_VGG_face =self.criterionVGG(enhanced_head, real_head) * self.lambda_feat \n \n loss_G=loss_G_GAN_face+loss_G_VGG_face\n loss_Gavg = torch.mean(loss_G)\n ############### Backward Pass ####################\n # update generator weights\n self.optimizer_G_face.zero_grad()\n loss_Gavg.backward()\n self.optimizer_G_face.step()\n #---------------------------------Train Discriminator----------------------------------------\n \n \n reapeat_time=4\n for _ in range(reapeat_time):\n with torch.no_grad():\n head_input=torch.cat((fake_head, head_lbl), dim=1)\n head_buff=self.facenetG(head_input)\n #enhanced_head=head_buff+fake_head\n enhanced_head=head_buff\n # Real Detection and Loss \n real_labels = self.facenetD(real_head)\n with torch.no_grad():\n ones = torch.ones_like(real_labels)\n zeros = torch.zeros_like(real_labels)\n\n # one sided label smoothing for vanilla gan\n ones.uniform_(.9, 1.1)\n zeros.uniform_(-.1, .1)\n\n loss_D_head_real = self.Dloss(real_labels, ones)\n\n fake_labels = self.facenetD(enhanced_head)\n loss_D_head_fake = self.Dloss(fake_labels, zeros) \n\n loss_D=loss_D_head_real+loss_D_head_fake\n loss_Davg = torch.mean(loss_D)\n ############### Backward Pass ####################\n # update discriminator weights\n self.optimizer_D_face.zero_grad()\n\n loss_Davg.backward()\n self.optimizer_D_face.step()\n \n # Only return the fake_B image if necessary to save BW\n return [ self.loss_filter( loss_G_GAN_face,loss_G_VGG_face,loss_D_head_real, loss_D_head_fake), None if not infer else enhanced_head ]\n#---------------------------------syh----2020.08.06---------------------------\n def inference(self, head_lbl,fake_head):\n head_input=torch.cat((fake_head, head_lbl), dim=1)\n with torch.no_grad():\n enhanced_head=self.facenetG(head_input)\n enhanced_head+=fake_head\n return enhanced_head\n\n def sample_features(self, inst): \n # read precomputed feature clusters \n self.checkpoints_dir=\"./checkpoints\"\n self.name=\"target\"\n self.cluster_path=\"cluster\"\n cluster_path = os.path.join(self.checkpoints_dir, self.name, self.cluster_path) \n features_clustered = np.load(cluster_path).item()\n\n # randomly sample from the feature clusters\n inst_np = inst.cpu().numpy().astype(int) \n feat_map = self.Tensor(inst.size()[0], self.feat_num, inst.size()[2], inst.size()[3])\n for i in np.unique(inst_np): \n label = i if i < 1000 else i//1000\n if label in features_clustered:\n feat = features_clustered[label]\n cluster_idx = np.random.randint(0, feat.shape[0]) \n \n idx = (inst == int(i)).nonzero()\n for k in range(self.feat_num): \n feat_map[idx[:,0], idx[:,1] + k, idx[:,2], idx[:,3]] = feat[cluster_idx, k]\n if self.data_type==16:\n feat_map = feat_map.half()\n return feat_map\n\n def update_fixed_params(self):\n # after fixing the global generator for a number of iterations, also start finetuning it\n params = list(self.netG.parameters()) \n self.optimizer_G_face = torch.optim.Adam(params, lr=self.lr, betas=(self.beta1, 0.999))\n if self.verbose:\n print('------------ Now also finetuning global generator -----------')\n\n def update_learning_rate(self):\n self.niter_decay=20\n lrd = self.lr / self.niter_decay\n lr = self.old_lr - lrd \n for param_group in self.optimizer_D_face.param_groups:\n param_group['lr'] = lr\n for param_group in self.optimizer_G_face.param_groups:\n param_group['lr'] = lr\n if self.verbose:\n print('update learning rate: %f -> %f' % (self.old_lr, lr))\n self.old_lr = lr","sub_path":"utils/model_face.py","file_name":"model_face.py","file_ext":"py","file_size_in_byte":10314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"290810841","text":"#!/usr/bin/env python3\nimport sys\nimport cv2\nimport numpy as np\nfrom scipy.ndimage.filters import maximum_filter\nfrom scipy import signal\nimport matplotlib.pyplot as plt\n\n# Hyper parameter setting\nHYP_W = 10 # gaussian window size\nHYP_SIGMA = 1 # var of gaussian\nHYP_K = 0.05 # Harris corner -> k\nHYP_THRESH = 0.03 # Threshold for R\nHYP_PATCH_SIZE = 5 # patch size for corner match\nHYP_INLIER_THRESH = 10 # Inlier distance threshold\nHYP_ROUND = 50 # RANSAC round\n\ndef detect_corners(image):\n \"\"\"Harris corner detector.\n\n Args:\n - image (2D float64 array): A grayscale image.\n\n Returns:\n - corners (list of 2-tuples): A list of 2-tuples representing the locations\n of detected corners. Each tuple contains the (x, y) coordinates of a\n pixel, where y is row index and x is the column index, i.e. `image[y, x]`\n gives the corresponding pixel intensity.\n \"\"\"\n\n hyper_w = HYP_W # gaussian window size\n hyper_sigma = HYP_SIGMA\n hyper_k = HYP_K \n hyper_thresh = HYP_THRESH # threshold of score (% of max value of the score)\n\n size_y, size_x = np.shape(image)\n\n I_x = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize = 3)\n I_y = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize = 3)\n\n I_x_sq = np.multiply(I_x, I_x)\n I_y_sq = np.multiply(I_y, I_y)\n I_xy = np.multiply(I_x, I_y)\n\n # create a gaussian window\n y, x = np.ogrid[-0.5*hyper_w:0.5*hyper_w, -0.5*hyper_w:0.5*hyper_w]\n gaussian_window = np.exp(-(x*x + y*y)/(2.0*hyper_sigma**2))\n summ = np.sum(gaussian_window)\n gaussian_window /= summ\n\n # convolution with gaussian_window\n I_xx = signal.convolve2d(I_x_sq, gaussian_window, mode = 'same')\n I_yy = signal.convolve2d(I_y_sq, gaussian_window, mode = 'same')\n I_xy = signal.convolve2d(I_xy, gaussian_window, mode = 'same')\n\n # calculate R\n R = np.multiply(I_xx, I_yy) - np.multiply(I_xy, I_xy) - hyper_k * np.square(I_xx + I_yy) \n R_thresh = np.copy(R)\n R_thresh[R < (np.max(R)*hyper_thresh)] = 0\n R_nms = nonmaxsup(R_thresh, 3)\n\n corners_y, corners_x = np.where(R_nms > 0)\n corners = list(zip(corners_y, corners_x))\n\n return corners\n\n\ndef nonmaxsup(scores, ksize):\n \"\"\"Apply non-maximum suppression on a score map.\n\n Args:\n - scores (2D float64 array): A score map.\n - ksize (int): Kernel size of the maximum filter being used.\n\n Returns:\n - suppressed (2D float64 array): Suppressed version of `scores` where all\n elements except the local maxima are set to 0.\n \"\"\"\n suppressed = np.copy(scores)\n filtered = maximum_filter(suppressed, (ksize, ksize))\n maxima = (suppressed == filtered)\n suppressed[np.logical_not(maxima)] = 0\n return suppressed\n\n\ndef match_corners(image1, image2, corners1, corners2):\n \"\"\"Match corners using mutual marriages.\n\n Args:\n - image1 (2D float64 array): A grayscale image.\n - image2 (2D float64 array): A grayscale image.\n - corners1 (list of 2-tuples): Corners in image1.\n - corners2 (list of 2-tuples): Corners in image2.\n\n Returns:\n - matches (list of 2-tuples): A list of 2-tuples representing the matching\n indices. Each tuple contains two integer indices. For example, tuple\n (0, 42) indicates that corners1[0] is matched to corners2[42].\n \"\"\"\n \n matches = []\n # hyper parameter setting\n hyper_patch_size = HYP_PATCH_SIZE\n\n size1_y, size1_x = np.shape(image1)\n size2_y, size2_x = np.shape(image2)\n no_corner1 = len(corners1)\n no_corner2 = len(corners2)\n\n mPatch_1 = np.empty([no_corner1,(2*hyper_patch_size)**2], dtype = float)\n mPatch_2 = np.empty([no_corner2,(2*hyper_patch_size)**2], dtype = float)\n\n # for image 1, get each patch with a center at each corner\n for cIdx_1 in range(no_corner1):\n y_tmp, x_tmp = corners1[cIdx_1]\n\n y_from = max(y_tmp - hyper_patch_size, 0)\n y_to = min(y_tmp + hyper_patch_size, size1_y)\n x_from = max(x_tmp - hyper_patch_size, 0)\n x_to = min(x_tmp + hyper_patch_size, size1_x)\n\n patch_img1 = image1[y_from:y_to, x_from:x_to].flatten().astype(float)\n patch_img1_diff = patch_img1 - np.mean(patch_img1)\n variance = np.sqrt(np.sum(np.square(patch_img1_diff))/patch_img1.size)\n patch_img1_norm = patch_img1_diff / variance\n\n if(len(patch_img1_norm) != (2*hyper_patch_size)**2):\n patch_img1_norm = np.zeros((2*hyper_patch_size)**2)\n mPatch_1[cIdx_1] = patch_img1_norm\n\n # for image 2, get each patch with a center at each corner\n for cIdx_2 in range(no_corner2):\n y_tmp, x_tmp = corners2[cIdx_2]\n\n y_from = max(y_tmp - hyper_patch_size, 0)\n y_to = min(y_tmp + hyper_patch_size, size1_y)\n x_from = max(x_tmp - hyper_patch_size, 0)\n x_to = min(x_tmp + hyper_patch_size, size1_x)\n\n patch_img2 = image2[y_from:y_to, x_from:x_to].flatten().astype(float)\n patch_img2_diff = patch_img2 - np.mean(patch_img2)\n variance = np.sqrt(np.sum(np.square(patch_img2_diff))/patch_img2.size)\n patch_img2_norm = patch_img2_diff / variance\n\n if(len(patch_img2_norm) != (2*hyper_patch_size)**2):\n patch_img2_norm = np.zeros((2*hyper_patch_size)**2)\n mPatch_2[cIdx_2] = patch_img2_norm\n\n # Cross correlation\n mNCC = np.dot(mPatch_1, np.transpose(mPatch_2))\n\n # best match from 1 to 2\n best_f1t2 = np.argmax(mNCC, axis = 1)\n # best match from 2 to 1\n best_f2t1 = np.argmax(mNCC, axis = 0)\n\n \n for cIdx in range(len(best_f1t2)):\n if (cIdx == best_f2t1[best_f1t2[cIdx]]):\n pair = (cIdx, best_f1t2[cIdx])\n matches.append(pair)\n\n print('# of matches: ', len(matches))\n return matches\n\n\ndef draw_matches(image1, image2, corners1, corners2, matches,\n outlier_labels=None):\n \"\"\"Draw matched corners between images.\n\n Args:\n - matches (list of 2-tuples)\n - image1 (3D uint8 array): A color image having shape (H1, W1, 3).\n - image2 (3D uint8 array): A color image having shape (H2, W2, 3).\n - corners1 (list of 2-tuples)\n - corners2 (list of 2-tuples)\n - outlier_labels (list of bool)\n\n Returns:\n - match_image (3D uint8 array): A color image having shape\n (max(H1, H2), W1 + W2, 3).\n \"\"\"\n H1 = image1.shape[0]\n H2 = image2.shape[0]\n # if size is different\n if H1 != H2:\n if H1 < H2:\n image1 = np.pad(image1, ((0, (H2-H1)), (0, 0), (0, 0)), mode=\"constant\")\n else:\n image2 = np.pad(image2, ((0, (H1-H2)), (0, 0), (0, 0)), mode=\"constant\")\n\n concat_img = np.concatenate((image1, image2), axis = 1)\n \n color_dot = (0, 255, 0)\n radius = 4\n thickness = 2\n if outlier_labels is None:\n for mIdx in range(len(matches)):\n center1 = corners1[matches[mIdx][0]]\n center2 = corners2[matches[mIdx][1]]\n center2 = (center2[0], center2[1]+np.shape(image1)[1])\n\n match_image = cv2.line(concat_img, (center1[1], center1[0]), (center2[1], center2[0]), [255, 0, 0], thickness)\n match_image = cv2.circle(concat_img, (center1[1], center1[0]), radius, color_dot, -1)\n match_image = cv2.circle(concat_img, (center2[1], center2[0]), radius, color_dot, -1) \n else:\n for mIdx in range(len(matches)):\n center1 = corners1[matches[mIdx][0]]\n center2 = corners2[matches[mIdx][1]]\n center2 = (center2[0], center2[1]+np.shape(image1)[1])\n\n if outlier_labels[mIdx] == True:\n # if this match is outlier -> red\n match_image = cv2.line(concat_img, (center1[1], center1[0]), (center2[1], center2[0]), [0, 0, 255], thickness)\n else: # if this match is inlier -> blue\n match_image = cv2.line(concat_img, (center1[1], center1[0]), (center2[1], center2[0]), [255, 0, 0], thickness)\n match_image = cv2.circle(concat_img, (center1[1], center1[0]), radius, color_dot, -1)\n match_image = cv2.circle(concat_img, (center2[1], center2[0]), radius, color_dot, -1) \n\n return match_image\n\n\ndef compute_affine_xform(corners1, corners2, matches):\n \"\"\"Compute affine transformation given matched feature locations.\n\n Args:\n - corners1 (list of 2-tuples)\n - corners1 (list of 2-tuples)\n - matches (list of 2-tuples)\n\n Returns:\n - xform (2D float64 array): A 3x3 matrix representing the affine\n transformation that maps coordinates in image1 to the corresponding\n coordinates in image2.\n - outlier_labels (list of bool): A list of Boolean values indicating whether\n the corresponding match in `matches` is an outlier or not. For example,\n if `matches[42]` is determined as an outlier match after RANSAC, then\n `outlier_labels[42]` should have value `True`.\n \"\"\"\n # Hyper parameter\n hyper_inlier_thresh = HYP_INLIER_THRESH\n hyper_round = HYP_ROUND\n\n inliers_tmp = []\n inliers = []\n for rIdx in range(hyper_round+1):\n outlier_labels_tmp = np.zeros(len(matches), dtype=bool)\n match = matches[np.random.randint(len(matches))]\n # get corner coordinate\n corner1 = corners1[match[0]]\n corner2 = corners2[match[1]]\n # compute the translation vector\n trans_y = corner2[0] - corner1[0]\n trans_x = corner2[1] - corner1[1]\n trans = [trans_y, trans_x]\n\n for mIdx, matchTmp in enumerate(matches):\n cornerTmp1 = np.array([corners1[matchTmp[0]][0], corners1[matchTmp[0]][1]]) + trans\n cornerTmp2 = [corners2[matchTmp[1]][0], corners2[matchTmp[1]][1]]\n \n diff = np.sqrt(np.sum((cornerTmp2 - cornerTmp1)**2))\n\n if diff <= hyper_inlier_thresh:\n inliers_tmp.append(matchTmp)\n else:\n outlier_labels_tmp[mIdx] = True\n\n # inlier update\n if len(inliers_tmp) > len(inliers):\n inliers = inliers_tmp\n outlier_labels = outlier_labels_tmp\n inliers_tmp = []\n\n print(\"# of inliers: \" ,len(inliers))\n print()\n\n if len(inliers) >= 3:\n # Compute affine transformation\n A = []\n b = []\n xform = np.zeros([3,3], dtype=np.float64)\n for mIdx, inlier in enumerate(inliers):\n from_ = corners1[inlier[0]]\n to_ = corners2[inlier[1]]\n A_tmp = [from_[1], from_[0], 1, 0, 0, 0]\n A.append(A_tmp)\n A_tmp = [0, 0, 0, from_[1], from_[0], 1]\n A.append(A_tmp)\n\n b.append(to_[1])\n b.append(to_[0])\n\n A_trans = np.transpose(A)\n\n xform = (((np.linalg.inv(A_trans.dot(A))).dot(A_trans)).dot(b)).reshape(2,3)\n xform = np.vstack((xform,[0,0,1]))\n\n print(\"Affine transformation matxix: \")\n print(xform)\n else:\n print(\"Inlier less than 3, cannot compute affine transformation matrix\")\n sys.exit()\n\n return xform, list(outlier_labels)\n \n\n# Extra Credit\ndef compute_proj_xform(corners1, corners2, matches):\n \"\"\"Compute projective transformation given matched feature locations.\n\n Args:\n - corners1 (list of 2-tuples)\n - corners1 (list of 2-tuples)\n - matches (list of 2-tuples)\n\n Returns:\n - xform (2D float64 array): A 3x3 matrix representing the projective\n transformation that maps coordinates in image1 to the corresponding\n coordinates in image2.\n - outlier_labels (list of bool)\n \"\"\"\n raise NotImplementedError\n\n\ndef stitch_images(image1, image2, xform):\n \"\"\"Stitch two matched images given the transformation between them.\n\n Args:\n - image1 (3D uint8 array): A color image.\n - image2 (3D uint8 array): A color image.\n - xform (2D float64 array): A 3x3 matrix representing the transformation\n between image1 and image2. This transformation should map coordinates\n in image1 to the corresponding coordinates in image2.\n\n Returns:\n - image_stitched (3D uint8 array)\n \"\"\"\n rows, cols, ch = image2.shape\n\n blend_weight = 0.5\n\n image_warped = cv2.warpAffine(image1, xform[0:2, :], (cols, rows))\n\n image_stitched = cv2.addWeighted(image_warped, blend_weight, image2, 1-blend_weight, 0)\n\n return image_stitched\n\n\n\n# Extra Credit\ndef detect_blobs(image):\n \"\"\"Laplacian blob detector.\n\n Args:\n - image (2D float64 array): A grayscale image.\n\n Returns:\n - corners (list of 2-tuples): A list of 2-tuples representing the locations\n of detected blobs. Each tuple contains the (x, y) coordinates of a\n pixel, which can be indexed by image[y, x].\n - scales (list of floats): A list of floats representing the scales of\n detected blobs. Has the same length as `corners`.\n - orientations (list of floats): A list of floats representing the dominant\n orientation of the blobs.\n \"\"\"\n raise NotImplementedError\n\n\n# Extra Credit\ndef compute_descriptors(image, corners, scales, orientations):\n \"\"\"Compute descriptors for corners at specified scales.\n\n Args:\n - image (2d float64 array): A grayscale image.\n - corners (list of 2-tuples): A list of (x, y) coordinates.\n - scales (list of floats): A list of scales corresponding to the corners.\n Must have the same length as `corners`.\n - orientations (list of floats): A list of floats representing the dominant\n orientation of the blobs.\n\n Returns:\n - descriptors (list of 1d array): A list of desciptors for each corner.\n Each element is an 1d array of length 128.\n \"\"\"\n if len(corners) != len(scales) or len(corners) != len(orientations):\n raise ValueError(\n '`corners`, `scales` and `orientations` must all have the same length.')\n\n raise NotImplementedError\n\n\n# Extra Credit\ndef match_descriptors(descriptors1, descriptors2):\n \"\"\"Match descriptors based on their L2-distance and the \"ratio test\".\n\n Args:\n - descriptors1 (list of 1d arrays):\n - descriptors2 (list of 1d arrays):\n\n Returns:\n - matches (list of 2-tuples): A list of 2-tuples representing the matching\n indices. Each tuple contains two integer indices. For example, tuple\n (0, 42) indicates that corners1[0] is matched to corners2[42].\n \"\"\"\n raise NotImplementedError\n\n\ndef baseline_main(argv):\n\n img_1_name = argv[0]\n img_2_name = argv[1]\n\n img1 = cv2.imread('data/' + img_1_name + '.png', cv2.IMREAD_COLOR)\n img2 = cv2.imread('data/' + img_2_name + '.png', cv2.IMREAD_COLOR)\n\n print(70* \"=\")\n print('image 1: ' + img_1_name + '.png')\n print('image 2: ' + img_2_name + '.png')\n print()\n\n gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) / 255.0\n gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) / 255.0\n\n corners_gray1 = detect_corners(gray1)\n corners_gray2 = detect_corners(gray2)\n\n matches = match_corners(gray1, gray2, corners_gray1, corners_gray2)\n xform, outlier_labels = compute_affine_xform(corners_gray1, corners_gray2, matches)\n\n rows, cols, ch = img1.shape\n image_warped = cv2.warpAffine(img1, xform[0:2, :], (cols, rows))\n\n match_image = draw_matches(img1, img2, corners_gray1, corners_gray2, matches, outlier_labels)\n\n image_stitched = stitch_images(img1, img2, xform)\n\n cv2.imwrite('output/_' + img_1_name + '_' + img_2_name + '_feature_match.png', match_image) \n cv2.imwrite('output/_' + img_1_name + '_' + img_2_name + '_stitch.png', image_stitched) \n cv2.imwrite('output/_' + img_1_name + '_' + img_2_name + '_warped.png', image_warped)\n\n # parameter save\n param_file = open(\"output/_\" + img_1_name + \"_\" + img_2_name + \"_parameters.txt\", \"w+\")\n param_file.write(\"w : %f\\n\" %HYP_W)\n param_file.write(\"sigma: %f\\n\" %HYP_SIGMA)\n param_file.write(\"k : %f\\n\" %HYP_K)\n param_file.write(\"R threshold : %f\\n\" %HYP_THRESH)\n param_file.write(\"Corner match patch size : %d\\n\" %HYP_PATCH_SIZE)\n param_file.write(\"Inlier distance threshold : %d\\n\" %HYP_INLIER_THRESH)\n param_file.write(\"# of RANSAC round : %d\\n\" %HYP_ROUND)\n\n\n\n print(30*\"=\", \"complete\", 30*\"=\")\n\n\n# Extra Credit\ndef extra_main():\n pass\n\n\nif __name__ == '__main__':\n baseline_main(sys.argv[1:])\n # extra_main()\n\n","sub_path":"image_alignment.py","file_name":"image_alignment.py","file_ext":"py","file_size_in_byte":16176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"142961123","text":"# @Author: wangyaominde\n# @Date: 2019-01-18 16:13:45\n# @Last modified by: wangyaominde\n# @Last modified time: 2019-09-03T11:05:47+08:00\n\nimport json\nimport requests\n#关闭https证书验证警告\nrequests.packages.urllib3.disable_warnings()\nfor i in range(5):\n r = requests.get(url=\"https://v1.hitokoto.cn/?c=h\")\n a=r.json()\n print(a['hitokoto']+\"---\"+a['from'])\n","sub_path":"pycrawler/一言api.py","file_name":"一言api.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"427186152","text":"\"\"\"\nbyceps.application.blueprints.blueprints\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n:Copyright: 2014-2023 Jochen Kupperschmidt\n:License: Revised BSD (see `LICENSE` file for details)\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom collections.abc import Iterator\nfrom typing import Optional\n\nfrom flask import Flask\nimport structlog\n\nfrom byceps.util.framework.blueprint import get_blueprint\n\n\nlog = structlog.get_logger()\n\n\nBlueprintReg = tuple[str, Optional[str]]\n\n\ndef register_blueprints(app: Flask) -> None:\n \"\"\"Register blueprints depending on the configuration.\"\"\"\n for name, url_prefix in _get_blueprints(app):\n blueprint = get_blueprint(name)\n app.register_blueprint(blueprint, url_prefix=url_prefix)\n\n if app.config['API_ENABLED']:\n register_api_blueprints(app)\n log.info('API: enabled')\n else:\n log.info('API: disabled')\n\n\ndef _get_blueprints(app: Flask) -> Iterator[BlueprintReg]:\n \"\"\"Yield blueprints to register on the application.\"\"\"\n app_mode = app.byceps_app_mode\n\n if app_mode.is_admin() or app_mode.is_site():\n yield from _get_blueprints_common()\n\n if app_mode.is_admin():\n yield from _get_blueprints_admin()\n log.info('Admin blueprints: enabled')\n else:\n log.info('Admin blueprints: disabled')\n\n if app_mode.is_site():\n yield from _get_blueprints_site()\n log.info('Site blueprints: enabled')\n else:\n log.info('Site blueprints: disabled')\n\n yield ('monitoring.healthcheck', '/health')\n\n if app.config['METRICS_ENABLED']:\n yield ('monitoring.metrics', '/metrics')\n log.info('Metrics: enabled')\n else:\n log.info('Metrics: disabled')\n\n if (app_mode.is_admin() or app_mode.is_site()) and app.config.get(\n 'STYLE_GUIDE_ENABLED', False\n ):\n yield ('common.style_guide', '/style_guide')\n log.info('Style guide: enabled')\n else:\n log.info('Style guide: disabled')\n\n\ndef _get_blueprints_common() -> Iterator[BlueprintReg]:\n yield from [\n ('common.authentication.password', '/authentication/password' ),\n ('common.core', None ),\n ('common.guest_server', None ),\n ('common.locale', '/locale' ),\n ]\n\n\ndef _get_blueprints_site() -> Iterator[BlueprintReg]:\n yield from [\n ('site.attendance', '/attendance' ),\n ('site.authentication.login', '/authentication' ),\n ('site.board', '/board' ),\n ('site.connected_external_accounts.discord', '/connected_external_accounts/discord' ),\n ('site.consent', '/consent' ),\n ('site.core', None ),\n ('site.dashboard', '/dashboard' ),\n ('site.guest_server', '/guest_servers' ),\n ('site.homepage', '/' ),\n ('site.news', '/news' ),\n ('site.newsletter', '/newsletter' ),\n ('site.orga_team', '/orgas' ),\n ('site.page', None ),\n ('site.party_history', '/party_history' ),\n ('site.seating', '/seating' ),\n ('site.shop.order', '/shop' ),\n ('site.shop.orders', '/shop/orders' ),\n ('site.site', None ),\n ('site.snippet', None ),\n ('site.ticketing', '/tickets' ),\n ('site.tourney', '/tourneys' ),\n ('site.user.avatar', '/users' ),\n ('site.user.creation', '/users' ),\n ('site.user.current', '/users' ),\n ('site.user.settings', '/users/me/settings' ),\n ('site.user.email_address', '/users/email_address' ),\n ('site.user_profile', '/users' ),\n ('site.user_badge', '/user_badges' ),\n ('site.user_group', '/user_groups' ),\n ('site.user_message', '/user_messages' ),\n ]\n\n\ndef _get_blueprints_admin() -> Iterator[BlueprintReg]:\n yield from [\n ('admin.api', '/admin/api' ),\n ('admin.attendance', '/admin/attendance' ),\n ('admin.authentication.login', '/authentication' ),\n ('admin.authorization', '/admin/authorization' ),\n ('admin.board', '/admin/boards' ),\n ('admin.brand', '/admin/brands' ),\n ('admin.consent', '/admin/consent' ),\n ('admin.core', '/' ),\n ('admin.dashboard', '/admin/dashboard' ),\n ('admin.guest_server', '/admin/guest_servers' ),\n ('admin.jobs', '/admin/jobs' ),\n ('admin.language', '/admin/languages' ),\n ('admin.maintenance', '/admin/maintenance' ),\n ('admin.more', '/admin/more' ),\n ('admin.news', '/admin/news' ),\n ('admin.newsletter', '/admin/newsletter' ),\n ('admin.orga', '/admin/orgas' ),\n ('admin.orga_presence', '/admin/presence' ),\n ('admin.orga_team', '/admin/orga_teams' ),\n ('admin.page', '/admin/pages' ),\n ('admin.party', '/admin/parties' ),\n ('admin.seating', '/admin/seating' ),\n ('admin.shop', None ),\n ('admin.shop.article', '/admin/shop/articles' ),\n ('admin.shop.catalog', '/admin/shop/catalogs' ),\n ('admin.shop.email', '/admin/shop/email' ),\n ('admin.shop.order', '/admin/shop/orders' ),\n ('admin.shop.cancelation_request', '/admin/shop/cancelation_requests' ),\n ('admin.shop.shipping', '/admin/shop/shipping' ),\n ('admin.shop.shop', '/admin/shop/shop' ),\n ('admin.shop.storefront', '/admin/shop/storefronts' ),\n ('admin.site', '/admin/sites' ),\n ('admin.site.navigation', '/admin/sites/navigation' ),\n ('admin.snippet', '/admin/snippets' ),\n ('admin.ticketing', '/admin/ticketing' ),\n ('admin.ticketing.category', '/admin/ticketing/categories' ),\n ('admin.ticketing.checkin', '/admin/ticketing/checkin' ),\n ('admin.tourney', None ),\n ('admin.tourney.category', '/admin/tourney/categories' ),\n ('admin.tourney.tourney', '/admin/tourney/tourneys' ),\n ('admin.user', '/admin/users' ),\n ('admin.user_badge', '/admin/user_badges' ),\n ('admin.webhook', '/admin/webhooks' ),\n ]\n\n\ndef register_api_blueprints(app: Flask) -> None:\n api = get_blueprint('api')\n api_v1 = get_blueprint('api.v1')\n\n for name, url_prefix in [\n ('attendance', '/attendances' ),\n ('snippet', '/snippets' ),\n ('tourney.avatar', '/tourney/avatars' ),\n ('tourney.match.comments', '/tourney' ),\n ('ticketing', '/ticketing' ),\n ('user', '/users' ),\n ('user_avatar', '/user_avatars' ),\n ('user_badge', '/user_badges' ),\n ]:\n package = f'api.v1.{name}'\n blueprint = get_blueprint(package)\n api_v1.register_blueprint(blueprint, url_prefix=url_prefix)\n\n api.register_blueprint(api_v1, url_prefix='/v1')\n app.register_blueprint(api, url_prefix='/api')\n","sub_path":"byceps/blueprints/blueprints.py","file_name":"blueprints.py","file_ext":"py","file_size_in_byte":9223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"364219486","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport logging\nimport os\nimport shutil\n\nimport numpy as np\nimport torch\nfrom pyspark import SparkConf\nfrom sparktestingbase.sqltestcase import SQLTestCase\n\n\nSEED = 42\n\n\nclass ReagentSQLTestBase(SQLTestCase):\n def getConf(self):\n conf = SparkConf()\n # set shuffle partitions to a low number, e.g. <= cores * 2 to speed\n # things up, otherwise the tests will use the default 200 partitions\n # and it will take a lot more time to complete\n conf.set(\"spark.sql.shuffle.partitions\", \"12\")\n conf.set(\"spark.port.maxRetries\", \"30\")\n return conf\n\n def setUp(self):\n super().setUp()\n assert not os.path.isdir(\"metastore_db\"), \"metastore_db already exists\"\n\n torch.manual_seed(SEED)\n np.random.seed(SEED)\n logging.basicConfig()\n\n def tearDown(self):\n super().tearDown()\n\n # removes Derby from last runs\n if os.path.isdir(\"metastore_db\"):\n shutil.rmtree(\"metastore_db\")\n","sub_path":"reagent/test/workflow/reagent_sql_test_base.py","file_name":"reagent_sql_test_base.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"407581221","text":"\"\"\"Bounds a composition to have a specified set of components.\"\"\"\nfrom gemd.entity.bounds.base_bounds import BaseBounds\nfrom gemd.entity.util import array_like\n\n\nclass CompositionBounds(BaseBounds):\n \"\"\"\n Composition bounds, parameterized by a set of string-valued category labels.\n\n Parameters\n ----------\n components: list, tuple, or set or strings\n A collection of the components that must be present in the composition.\n\n \"\"\"\n\n typ = \"composition_bounds\"\n\n def __init__(self, components=None):\n self._components = None\n self.components = components\n\n @property\n def components(self):\n \"\"\"Get the allowed components.\"\"\"\n return self._components\n\n @components.setter\n def components(self, value):\n if value is None:\n self._components = set()\n elif isinstance(value, array_like()):\n self._components = set(value)\n elif isinstance(value, set):\n self._components = value\n else:\n raise ValueError(\"Components must be a list, tuple, or set: {}\".format(value))\n\n if not all(isinstance(x, str) for x in self.components):\n raise ValueError(\"All the components must be strings\")\n\n def contains(self, bounds: BaseBounds) -> bool:\n \"\"\"\n Check if another bounds is contained by this bounds.\n\n The other bounds must also be a CompositionBounds and its components must be a subset of\n this bounds's set of required components.\n\n Parameters\n ----------\n bounds: BaseBounds\n Other bounds object to check.\n\n Returns\n -------\n bool\n True if the other bounds is contained by this bounds.\n\n \"\"\"\n if not super().contains(bounds):\n return False\n if not isinstance(bounds, CompositionBounds):\n return False\n\n return bounds.components.issubset(self.components)\n\n def as_dict(self):\n \"\"\"\n Convert bounds to a dictionary.\n\n Returns\n -------\n dict\n A dictionary with \"type\" and \"components\" (a sorted list of the components).\n\n \"\"\"\n return {\"type\": self.typ, \"components\": sorted(list(self.components))}\n","sub_path":"gemd/entity/bounds/composition_bounds.py","file_name":"composition_bounds.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"79713637","text":"cnt = 0\nt = int(input())\nn = input().split()\nfor i in range(len(n)):\n n[i] = int(n[i])\nmin_r = abs(n[0] - n[1])\nfor i in range(len(n) - 1):\n for j in range(i + 1, len(n)):\n if abs(n[i] - n[j]) < min_r:\n cnt = 0\n min_r = abs(n[i] - n[j])\n if abs(n[i] - n[j]) == min_r:\n cnt += 1\nprint(min_r, cnt)\n\"\"\"\nhttp://codeforces.com/contest/792/problem/A\n\"\"\"\n","sub_path":"codeforces/792A.py","file_name":"792A.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"226214713","text":"from django.contrib.auth.models import User\nimport rest_framework_filters as filters\n\nfrom talentmap_api.bidding.models import Bid, UserBidStatistics\nfrom talentmap_api.bidding.filters import UserBidStatisticsFilter\n\nfrom talentmap_api.position.models import Grade, Skill\nfrom talentmap_api.position.filters import GradeFilter, SkillFilter\n\nfrom talentmap_api.language.filters import QualificationFilter\nfrom talentmap_api.language.models import Qualification\n\nfrom talentmap_api.user_profile.models import UserProfile\n\nfrom talentmap_api.organization.filters import CountryFilter\nfrom talentmap_api.organization.models import Country\n\nfrom talentmap_api.common.filters import full_text_search, ALL_TEXT_LOOKUPS, DATE_LOOKUPS, FOREIGN_KEY_LOOKUPS\n\n\nclass UserFilter(filters.FilterSet):\n\n class Meta:\n model = User\n fields = {\n \"first_name\": ALL_TEXT_LOOKUPS,\n \"last_name\": ALL_TEXT_LOOKUPS,\n \"username\": ALL_TEXT_LOOKUPS\n }\n\n\nclass UserProfileFilter(filters.FilterSet):\n user = filters.RelatedFilter(UserFilter, name='user', queryset=User.objects.all())\n cdo = filters.RelatedFilter('talentmap_api.user_profile.filters.UserProfileFilter', name='cdo', queryset=UserProfile.objects.all())\n grade = filters.RelatedFilter(GradeFilter, name='grade', queryset=Grade.objects.all())\n skills = filters.RelatedFilter(SkillFilter, name='skills', queryset=Skill.objects.all())\n language_qualifications = filters.RelatedFilter(QualificationFilter, name='language_qualifications', queryset=Qualification.objects.all())\n primary_nationality = filters.RelatedFilter(CountryFilter, name=\"primary_nationality\", queryset=Country.objects.all())\n secondary_nationality = filters.RelatedFilter(CountryFilter, name=\"primary_nationality\", queryset=Country.objects.all())\n\n class Meta:\n model = UserProfile\n fields = {\n \"date_of_birth\": DATE_LOOKUPS,\n \"phone_number\": ALL_TEXT_LOOKUPS,\n \"user\": FOREIGN_KEY_LOOKUPS,\n \"cdo\": FOREIGN_KEY_LOOKUPS,\n \"grade\": FOREIGN_KEY_LOOKUPS,\n \"skills\": FOREIGN_KEY_LOOKUPS,\n \"language_qualifications\": FOREIGN_KEY_LOOKUPS,\n \"primary_nationality\": FOREIGN_KEY_LOOKUPS,\n \"secondary_nationality\": FOREIGN_KEY_LOOKUPS\n }\n\n\nclass ClientFilter(UserProfileFilter):\n bid_statistics = filters.RelatedFilter(UserBidStatisticsFilter, name=\"bid_statistics\", queryset=UserBidStatistics.objects.all())\n\n is_bidding = filters.BooleanFilter(name=\"bidlist\", method=\"filter_is_bidding\")\n is_in_panel = filters.BooleanFilter(name=\"bidlist\", method=\"filter_is_in_panel\")\n is_on_post = filters.BooleanFilter(name=\"bidlist\", method=\"filter_is_on_post\")\n is_bidding_no_handshake = filters.BooleanFilter(name=\"bidlist\", method=\"filter_is_bidding_no_handshake\")\n\n def filter_is_bidding(self, queryset, name, value):\n value = bool(value)\n if value:\n return queryset.exclude(bidlist=None)\n else:\n return queryset.filter(bidlist=None)\n\n def filter_is_in_panel(self, queryset, name, value):\n value = bool(value)\n if value:\n return queryset.filter(bidlist__status=Bid.Status.in_panel)\n else:\n return queryset.exclude(bidlist__status=Bid.Status.in_panel)\n\n def filter_is_on_post(self, queryset, name, value):\n value = bool(value)\n if value:\n return queryset.exclude(assignments__current_for_position=None)\n else:\n return queryset.filter(assignments__current_for_position=None)\n\n def filter_is_bidding_no_handshake(self, queryset, name, value):\n value = bool(value)\n if value:\n return queryset.exclude(bidlist=None).filter(bidlist__handshake_offered_date=None).distinct()\n else:\n return queryset.exclude(bidlist=None).exclude(bidlist__handshake_offered_date=None).distinct()\n\n # Full text search across multiple fields\n q = filters.CharFilter(name=\"user\", method=full_text_search(\n fields=[\n \"user__first_name\",\n \"user__last_name\",\n \"user__username\",\n \"skills__code\",\n \"skills__description\",\n \"skills__cone__name\",\n \"language_qualifications__language__short_description\",\n \"primary_nationality__name\",\n \"secondary_nationality__name\"\n ]\n ))\n","sub_path":"talentmap_api/user_profile/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"398486658","text":"import tkinter as tk\nfrom tkinter import ttk\n\nw = tk.Tk()\nw.title('Messy Sergio')\n\ndef click_me():\n action.configure(text = 'hello, ' + name.get() + ' ' + number_chosen.get())\n\n# adding a textbox entry widget\nname = tk.StringVar()\nname_entered = ttk.Entry(w, width = 12, textvariable = name)\nname_entered.pack()\n\n# adding a button\naction = ttk.Button(w, text = 'click me!', command = click_me)\naction.pack()\n\nttk.Label(w, text = 'choose a number:').pack()\n\nnumber = tk.StringVar()\nnumber_chosen = ttk.Combobox(w, width = 12,\n textvariable = number,\n state = 'readonly')\n\nnumber_chosen['values'] = (1,2,3,4,5,100)\n\nnumber_chosen.current(0)\nnumber_chosen.pack()\n\nw.mainloop()\n\n","sub_path":"tkinter/stolen_code/before_classes.py","file_name":"before_classes.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"564066068","text":"import requests\nimport json\nfrom lxml import etree\nimport time, datetime\n\nbase_url = 'https://www.instagram.com'\n#query hash\nquery_hash = ''\n#http header\nhttp_header = {\n 'Connection':'keep-alive',\n 'Host':'www.instagram.com',\n 'Referer':'https://www.instagram.com/instagram/',\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',\n 'X-Requested-With':'XMLHttpRequest'\n}\nstartTime = '2019-05-01 00:00:00'\nendTime = '2019-05-31 23:59:59'\n\n\nclass Crawler():\n def __init__(self, account, startTime=None, endTime=None):\n self.account = account\n self.account_url = base_url + '/' + account\n self.pics_list = []\n self.startTime = int(\n time.mktime(time.strptime(\n startTime, \"%Y-%m-%d %H:%M:%S\"))) if startTime else None\n self.endTime = int(\n time.mktime(time.strptime(\n endTime, \"%Y-%m-%d %H:%M:%S\"))) if endTime else None\n self.id = None\n self.has_next_page = False\n self.end_cursor = None\n self.var_value = {\"id\": \"\", \"first\": 20, \"after\": \"\"}\n self._getBaseInfo_()\n\n def _getBaseInfo_(self):\n r = requests.get(self.account_url, headers=http_header)\n r.encoding = r.apparent_encoding\n print(r.status_code)\n html = etree.HTML(r.content.decode())\n h = html.xpath('''//script[@type=\"text/javascript\"]''')[\n 3].text.replace('window._sharedData = ', '').strip()[:-1]\n pic_data = json.loads(h, encoding='utf-8')\n base_data = pic_data[\"entry_data\"][\"ProfilePage\"][0][\"graphql\"][\"user\"]\n self.var_value[\"id\"] = self.id = base_data['id']\n self.end_cursor = base_data[\"edge_owner_to_timeline_media\"][\n \"page_info\"][\"end_cursor\"]\n self.has_next_page = base_data[\"edge_owner_to_timeline_media\"][\n \"page_info\"][\"has_next_page\"]\n pics = base_data['edge_owner_to_timeline_media']['edges']\n for pic in pics:\n if pic['node'] != None:\n post_time = pic['node']['taken_at_timestamp']\n if self.endTime and self.endTime < post_time:\n continue\n elif (not self.startTime) or self.startTime <= post_time:\n print(\n time.strftime(\"%Y-%m-%d %H:%M:%S\",\n time.localtime(post_time)))\n self.pics_list.append(self._parse_(pic['node']))\n else:\n return\n\n def _parse_(self, node):\n pic_data_dic = {}\n shortCode = node['shortcode']\n thumbnail = node['thumbnail_src']\n post_time = node['taken_at_timestamp']\n like_count = node['edge_media_preview_like']['count']\n comment_count = node['edge_media_to_comment']['count']\n text_list = node['edge_media_to_caption']['edges']\n text = text_list[0]['node']['text'] if text_list else ''\n pictures = {}\n for picture in node['thumbnail_resources']:\n size = str(picture['config_width']) + 'x' + str(\n picture['config_height'])\n pictures[size] = picture['src']\n pic_data_dic['URL'] = base_url + '/p/' + shortCode\n pic_data_dic['Like_Count'] = like_count\n pic_data_dic['Text'] = text\n pic_data_dic['Commen_Count'] = comment_count\n pic_data_dic['Time'] = post_time\n pic_data_dic['Thumbnail'] = thumbnail\n pic_data_dic['Pictures'] = pictures\n return pic_data_dic\n\n def run(self):\n while self.has_next_page:\n self.var_value['after'] = self.end_cursor\n next_url = ('%s/graphql/query/?query_hash=%s&variables={0}' %\n (base_url, query_hash)).format(\n json.dumps(self.var_value))\n r = requests.get(next_url, headers=http_header)\n r.encoding = r.apparent_encoding\n pic_data = json.loads(r.text, encoding='uft-8')\n base_data = pic_data[\"data\"][\"user\"][\n \"edge_owner_to_timeline_media\"]\n self.end_cursor = base_data[\"page_info\"][\"end_cursor\"]\n self.has_next_page = base_data[\"page_info\"][\"has_next_page\"]\n for pic in base_data['edges']:\n if pic['node'] != None:\n post_time = pic['node']['taken_at_timestamp']\n if self.endTime and self.endTime < post_time:\n continue\n elif (not self.startTime) or self.startTime <= post_time:\n print(\n time.strftime(\"%Y-%m-%d %H:%M:%S\",\n time.localtime(post_time)))\n self.pics_list.append(self._parse_(pic['node']))\n else:\n return\n\n def print(self):\n fout = open(self.account + '.html', 'w', encoding='utf-8')\n fout.write(\"\")\n fout.write(\"\")\n fout.write(\"\")\n fout.write(\"\")\n fout.write(\"\")\n fout.write(\"\")\n fout.write(\"\")\n fout.write(\"\")\n fout.write(\"\")\n fout.write(\"\")\n fout.write(\"\")\n fout.write(\"\")\n fout.write(\"\")\n for i, data in enumerate(self.pics_list):\n fout.write('')\n # order\n fout.write('' % (i + 1))\n # time\n fout.write('' % time.strftime(\n \"%Y-%m-%d %H:%M:%S\", time.localtime(data['Time'])))\n # default\n fout.write('' % data['Thumbnail'])\n # sized\n fout.write('')\n # Text\n fout.write('' % data['Text'])\n # like\n fout.write(\"\" % data['Like_Count'])\n # comment\n fout.write(\"\" % data['Commen_Count'])\n # url\n fout.write('' %\n (data['URL'], data['URL']))\n fout.write('')\n fout.write('
OrderTimeDefault PictureSized PicturesTextLikeCommentURL
%d%s')\n for k, v in data['Pictures'].items():\n fout.write('%s' % (v, k))\n fout.write('
')\n fout.write('
%s%s%s%s
')\n fout.write(\"\")\n fout.write(\"\")\n fout.close()\n\n\nif __name__ == \"__main__\":\n # crawler = Crawler('arcteryx')\n # crawler = Crawler('patagonia')\n crawler = Crawler('mountainhardwear', startTime=startTime, endTime=endTime)\n crawler.run()\n crawler.print()\n","sub_path":"ins.py","file_name":"ins.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"64546883","text":"import sinatraMainClass as sMC;\nclass sinatraFrontEnd(sMC.sinatraMainClass):\n\tdef __init__(self):\n\t\timport numpy as np;\n\t\tself._sinatraMainClass__className = 'sinatraAudio';\n\t\tself.__trainingRows = np.zeros((1, 35));\n\t\tself.__trainingClass = np.zeros((1,1));\n\tdef gatherTrainData(self, aD):\n\t\timport numpy as np;\n\t\ttrainMatrix , thing1, thing2= self.segmentate(aD);\n\t\trowNumber = len(trainMatrix[:,0]);\n\t\tclassArray = np.ones(rowNumber) * aD.getlClass();\n\t\ttemp = self.__trainingRows;\n\t\tnewMatrix = np.zeros((rowNumber + len(temp[:,0]), len(temp[0,:]) ));\n\t\tnewMatrix[0:(len(temp[:,0])),:] = temp;\n\t\tnewMatrix[(len(temp[:,0])):, :] = trainMatrix;\n\t\tself.__trainingRows =newMatrix;\n\t\ttemp = self.__trainingClass;\n\t\tnewArray = np.zeros(len(temp) + len(classArray));\n\t\tnewArray[0:len(temp)] = temp;\n\t\tnewArray[len(temp):] = classArray;\n\t\tself.__trainingClass = newArray;\n\t\treturn 1;\n\tdef getTrainData(self):\n\t\treturn self.__trainingRows, self.__trainingClass;\n\tdef trainModel(self):\n\t\tfrom sklearn.neural_network import MLPClassifier;\n\t\tprint(\"creating nn\");\n\t\tnn = MLPClassifier(solver='lbgfs', alpha=1e-5, hidden_layer_sizes=(10,), random_state=1);\n\t\tprint(\"starting training\");\n\t\tnn.fit(self.__trainingRows, self.__trainingClass);\n\t\tprint(\"finishing training\");\n\t\tself.__nn = nn;\n\t\treturn 1;\n\tdef predict(self, aD):\n\t\timport numpy as np;\n\t\tpredictMatrix, thing1, thing2 = self.segmentate(aD);\n\t\tres1 = np.zeros(len(predictMatrix[:,0]));\n\t\tfor iter in range(len(predictMatrix[:,0])):\n\t\t\trow2predict = predictMatrix[iter,:];\n\t\t\trow2predict = row2predict.reshape(1,-1);\n\t\t\tres1[iter] = self.__nn.predict(row2predict);\n\t\treturn res1;\n\tdef normalize(self, aD):\n\t\t#\n\t\t#\n\t\t#\n\t\t#\n\t\timport sinatraIO;\n\t\timport numpy as np;\n\t\taFAD = aD.getAudio();\n\t\tmeanAFAD = np.mean(aFAD);\n\t\tstdAFAD = np.std(aFAD);\n\t\taFAD = (aFAD - meanAFAD)/stdAFAD;\n\t\taD.modAudio(aFAD);\n\t\treturn 1;\n\tdef segmentate(self, aD):\n\t\t#\n\t\t#\tsegmentate allows the Front End to find which are the pieces of the\n\t\t#\tspeech.\n\t\t#\t\t1 - Clean the sound. We guess that high frequency\n\t\t#\t\t\tsounds are not important for the accent. So,\n\t\t#\t\t\twe need to perform a fourier transform, remove\n\t\t#\t\t\tthe high frequency terms, and come back to period through\n\t\t#\t\t\tan inverse fourier transform.\n\t\t#\t\t2 - Perform the derivatives of the amplitude. This is not a\n\t\t#\t\t\ta trivial problem, since the spectrum is not regular, and\n\t\t#\t\t\tdifferents minimums and maximums can be found. An approach\n\t\t#\t\t\tcould be to split the sound in intervals and perform the\n\t\t#\t\t\tderivatives only with the maximum values of each interval.\n\t\t#\n\t\t#\n\t\t#\tcoeffCleaning : treshold under which fourier transformed frequencies\n\t\t#\t\t\t\t\tare removed\n\t\t#\twS\t: window-size\n\t\timport sinatraIO;\n\t\timport sinatraFilter;\n\t\timport numpy as np;\n\t\tfilterBox = sinatraFilter.sinatraFiltersBox();\n\t\tcoeffCleaning = 1.5;\n\t\twS = 800;\n\t\trowL = 10000;\n\t\tprint(\"reading {0}\".format(aD.getName()));\n\t\tself.normalize(aD);\n\t\taD = aD.getAudio();\n\t\tprint(\"\\tremoving high frequencies\");\n\t\tsplitNumber = int(len(aD)/wS);\n\t\taDTransformed = np.fft.fft(aD);\n\t\tmeanADTransformed = np.mean(np.absolute(aDTransformed));\n\t\taDTransformed[aDTransformed < coeffCleaning*meanADTransformed] = 0;\n\t\taDTransformed[20000:] = 0;\n\t\taDClean = np.fft.ifft(aDTransformed);\n\t\taDClean = np.real(aDClean);\n\t\t#aDClean = aD;\n\t\tprint(\"\\tfiltering\");\n\t\t#y,z,n = filterBox.entropyInWindow(aDClean, 750);\n\t\t#aDClean = aDClean*(n>=1);\n\t\taCY = np.zeros(splitNumber); aCX = np.zeros(splitNumber);\n\t\tfD = np.zeros(splitNumber-2); sD = np.zeros(splitNumber-2);\n\t\taCX, aCY = filterBox.filterMaxInWindow(aDClean, wS);\n\t\taCY = aCY ** 2;\n\t\tprint(\"\\tprocessing first and second order derivatives\");\n\t\tfor derIter in range(1, splitNumber-2):\n\t\t\tfD[derIter] = (aCY[derIter+1]-aCY[derIter-1])/2;\n\t\t\tsD[derIter] = (1/4)*(aCY[derIter + 1] + aCY[derIter - 1] - 2*aCY[derIter]);\n\t\tprint(\"\\tsegmentating\");\n\t\tstatusMin = 0;\n\t\tstatusMax = 0;\n\t\tcutPoints = [0, 0];\n\t\trowControl = 1;\n\t\tmatrixX = np.zeros(35);\n\t\ttestArray = np.zeros(2);\n\t\tfor sIter in range(2, splitNumber-3):\n\t\t\tif (fD[sIter]*fD[sIter+1]) <= 0:\n\t\t\t\tif (sD[sIter]+sD[sIter + 1]) > 0:\n\t\t\t\t\tcutPoints[statusMin]=aCX[sIter];\n\t\t\t\t\tstatusMin = statusMin + 1;\n\t\t\t\telse:\n\t\t\t\t\tstatusMax = 1;\n\t\t\tif (statusMin == 2) and (statusMax == 1):\n\t\t\t\tstatusMin = 1;\n\t\t\t\tstatusMax = 0;\n\t\t\t\trowX = np.zeros(rowL);\n\t\t\t\ttokkenL = int(cutPoints[1]-cutPoints[0] + 1);\n\t\t\t\tif (tokkenL > 500) and (tokkenL < rowL):\n\t\t\t\t\trowX[1:tokkenL] = aDClean[int(cutPoints[0]):int(cutPoints[1])];\n\t\t\t\t\trowX = self.extractFeatures(rowX);\n\t\t\t\t\ttempTestArray = testArray;\n\t\t\t\t\trowControl = rowControl + 1;\n\t\t\t\t\ttestArray = np.zeros((rowControl, 2));\n\t\t\t\t\ttestArray[0:(rowControl-1),:] = tempTestArray;\n\t\t\t\t\ttestArray[rowControl-1, 0] = cutPoints[0];\n\t\t\t\t\ttestArray[rowControl-1, 1] = cutPoints[1];\n\t\t\t\t\ttempMatrixX = matrixX;\n\t\t\t\t\tmatrixX = np.zeros((rowControl, 35))\n\t\t\t\t\tmatrixX[0:(rowControl - 1), :] = tempMatrixX;\n\t\t\t\t\tmatrixX[(rowControl - 1), :] = rowX;\n\t\t\t\tcutPoints[0] = cutPoints[1];\n\t\tprint(\"task completed\");\n\t\tmatrixX = matrixX[1:,:];\n\t\ttestArray = testArray[1:,:];\n\t\treturn matrixX, testArray, (rowControl - 1);\n\n\tdef extractFeatures(self, tokken):\n\t\timport sinatraFilter as sF;\n\t\timport numpy as np;\n\t\tfilterBox = sF.sinatraFiltersBox();\n\t\twS = int(len(tokken) / 10);\n\t\tfeatureVector = list();\n\t\tfeatureBox = {};\n\t\tfeatureBox['mean'] = np.mean(tokken);\n\t\tfeatureBox['std'] = np.std(tokken);\n\t\tfeatureBox['len'] = len(tokken)\n\t\tfeatureBox['meanPositive'] = np.mean(tokken[tokken > 0]);\n\t\tfeatureBox['meanNegative'] = np.mean(tokken[tokken < 0]);\n\t\tx, featureBox['XmaxFilter'] = filterBox.filterMaxInWindow(tokken, wS);\n\t\tx, featureBox['XsoftMaxFilter'] = filterBox.softenedMaxWindow(tokken,wS);\n\t\tx, featureBox['XmeanLogAvWindow'] = filterBox.sumAbsWindow(tokken,wS);\n\t\tfor fkeys, feature in sorted(featureBox.items()):\n\t\t\tif fkeys[0]=='X':\n\t\t\t\tfor intIter in feature:\n\t\t\t\t\tfeatureVector.append(intIter);\n\t\t\telse:\n\t\t\t\tfeatureVector.append(feature);\n\t\treturn featureVector;\n","sub_path":"sinatraFrontEnd.py","file_name":"sinatraFrontEnd.py","file_ext":"py","file_size_in_byte":5948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"368325089","text":"#!/usr/bin/python\n#encoding:utf8\n\nimport sys\nimport os\n\n# 设置默认编码为utf8\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nSOURCE_NAME = \"lua\"\nSOURCE_PATH = \"e:/torchlight/master/arpg_prg/\"\nTARGET_NAME = \"lua\"\nTARGET_PATH = \"e:/works/Work/Lua/Game/\"\n\ncmd = \"python CSharp/CloneCSproj.py %s %s %s %s\" % (SOURCE_NAME, SOURCE_PATH, TARGET_NAME, TARGET_PATH)\nos.system(cmd)\n","sub_path":"Pythons/CloneCSproj/CSharp/CloneLuaCSproj.py","file_name":"CloneLuaCSproj.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"635623796","text":"from pyowm import OWM\nimport pifacecad as p\n\nAPI_key = '5b1b288295312574acc75b06e2cc00e8'\n\nowm = OWM(API_key)\ncad = p.PiFaceCAD()\n\n#5 cities\ncities = [2643741, 2988507, 1835848, 5128581, 2147714]\n\n#weather\ncloudy = [0x7, 0xf, 0x7, 0x0, 0x1c, 0x1e, 0x1c, 0x0]\nsunny = [0x17, 0x7, 0xb, 0x18, 0x12, 0x6, 0x4, 0x0]\nrainy = [0xe, 0x1f, 0xe, 0x11, 0x15, 0x15, 0x4, 0x0]\nsnowy = [0xe, 0x1f, 0xe, 0x11, 0xa, 0x15, 0xa, 0x0]\nfoggy = [0x0, 0x0, 0x15, 0xa, 0x1f, 0xa, 0x15, 0x0]\nthunder = [0x1, 0xb, 0x1b, 0x1a, 0x10, 0x0, 0x0]\nwindy = [0x0, 0x1a, 0x1e, 0x0, 0xd, 0xf, 0x0, 0x0]\n\nweather = [cloudy, sunny, rainy, snowy, foggy, thunder, windy]\n\n#bitmap load\nfor i in range(7):\n cad.lcd.store_custom_bitmap(i, weather[i])\n\ndef weatherBitmap(w):\n wstatus = w.get_weather_code()\n if wstatus < 300:\n cad.lcd.write_custom_bitmap(5)\n elif wstatus < 600:\n cad.lcd.write_custom_bitmap(2)\n elif wstatus < 700:\n cad.lcd.write_custom_bitmap(3)\n elif wstatus == 741:\n cad.lcd.write_custom_bitmap(4)\n elif wstatus == 800:\n cad.lcd.write_custom_bitmap(1)\n elif 800 < wstatus < 900:\n cad.lcd.write_custom_bitmap(0)\n elif wstatus < 904:\n cad.lcd.write_custom_bitmap(5)\n elif wstatus == 904:\n cad.lcd.write_custom_bitmap(1)\n elif wstatus == 905:\n cad.lcd.write_custom_bitmap(6)\n elif wstatus == 906:\n cad.lcd.write_custom_bitmap(2) \n\ndef get_weather(index):\n obs = owm.weather_at_id(cities[index])\n w = obs.get_weather()\n loc = obs.get_location()\n temp = w.get_temperature(unit='celsius')['temp']\n\n #write\n cad.lcd.clear()\n cad.lcd.write(loc.get_name() + ' ')\n weatherBitmap(w)\n cad.lcd.write('\\n' + w.get_status()+ ' ' + str(temp) + ' C')\n","sub_path":"worldweather.py","file_name":"worldweather.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"42580142","text":"\"\"\"\nJeroen Langhorst\nS253457\n\nRuntime: O(n*log(n)) due to .sort()\nMemory: O(n) to store input and O(n) in auxiliary memory\n\"\"\"\n\nimport sys\n\n\nclass Appointment:\n start = 0\n end = 0\n\n def __init__(self, start, end):\n self.start = start\n self.end = end\n\n def __repr__(self):\n return repr((self.start, self.end))\n\n\ndef print_list(current_list):\n output = []\n\n for item in current_list:\n output.append(str(item)+\" \")\n\n print(output)\n\n\ndef main():\n\n first = True\n appointment_list = []\n\n for line in sys.stdin:\n\n # Skip the first element of the input\n if not first:\n\n # # Split data in starting and ending time\n separated = line.split(\" \")\n a = Appointment(int(separated[0]), int(separated[1]))\n\n appointment_list.append(a)\n\n first = False\n\n # Sort the stack based on ending dates\n appointment_list.sort(key=lambda x: x.start)\n # print_list(appointment_list)\n\n if len(appointment_list) > 0:\n stack = [appointment_list[0]]\n\n # Push first element on stack\n appointment_list.remove(appointment_list[0])\n\n for item in appointment_list:\n\n first = stack[-1]\n\n # If no overlap\n if first.end < item.start:\n stack.append(item)\n\n elif first.end < item.end:\n first.end = item.end\n stack.pop()\n stack.append(first)\n\n taken = 0\n for item in stack:\n taken += item.end - item.start + 1\n\n print(stack[-1].end - taken + 1)\n else:\n print(0)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Challenge1/cal.py","file_name":"cal.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"565776321","text":"import os\nimport sys\n\nfrom github import Github, GithubException\n\nSERVICE_LABEL_COLOR = \"e99695\"\n\n#\n# Help\n#\n\n\ndef print_help():\n print(\"\"\"\nUsage: label_tool.py [command] [options]\n\nCommands:\n audit [repo file] - prints out labels from each repo\n push [repo file] [label file] - pushes labels to all repos\n\nNotes:\n Expects environment variable GH_TOKEN to be filled with your \n access token to GitHub. This can be generated on GitHub under\n Account->Settings->Developer settings->Personal access tokens.\n \"\"\")\n sys.exit()\n\n#\n# Push\n#\n\n\ndef get_repo(repo_name):\n con = Github(os.environ[\"GH_TOKEN\"])\n repo = con.get_repo(repo_name)\n repo.name # Force checking if repo exists, otherwise \"get_repo\" does nothing\n return repo\n\n\ndef create_label(repo, label):\n print(f\"Adding label {label}\")\n try:\n repo.create_label(label, SERVICE_LABEL_COLOR)\n print(f\"+ Created label {label}\")\n except GithubException as err:\n err_code = err.data['errors'][0].get('code', '')\n if err.status == 422 and err_code == \"already_exists\":\n print(f\"* Label {label} already exists\")\n return\n raise\n\n\ndef push_labels(repo_name, label_list):\n print(f\"Getting repo {repo_name}\")\n repo = get_repo(repo_name)\n\n print(\"Adding labels to repo\")\n for label in label_list:\n create_label(repo, label)\n\n\ndef push(repolist_filepath, labellist_filepath):\n print(f\"Reading label list from file: {labellist_filepath}\")\n with open(labellist_filepath, \"r\") as lfile:\n label_list = lfile.read().splitlines()\n\n print(f\"Reading repo list from file: {repolist_filepath}\")\n with open(repolist_filepath, \"r\") as rfile:\n for repo_name in rfile.read().splitlines():\n if not repo_name.startswith(\"//\") and not repo_name.startswith(\"#\"):\n push_labels(repo_name, label_list)\n\n#\n# Audit\n#\n\n\ndef print_labels(repo_name):\n print(f\"Printing labels in repo {repo_name}\")\n repo = get_repo(repo_name)\n for label in repo.get_labels():\n print(f\" {label.name}\")\n\n\ndef audit(repolist_filepath):\n print(f\"Reading repo list from file: {repolist_filepath}\")\n with open(repolist_filepath, \"r\") as rfile:\n for line in rfile.read().splitlines():\n if not line.startswith(\"//\"):\n print_labels(line)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print_help()\n\n if sys.argv[1] == \"push\" and len(sys.argv) == 4:\n push(sys.argv[2], sys.argv[3])\n elif sys.argv[1] == \"audit\" and len(sys.argv) == 3:\n audit(sys.argv[2])\n else:\n print_help()\n","sub_path":"scripts/python/repo_labels/label_tool.py","file_name":"label_tool.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"533733939","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 13 13:13:56 2018\n\n@author: michelle\n\"\"\"\n\n# ATOC 4500 Final Project\n# 13 December 2018\n\n# Plotting time series of katabatic winds \n\n# Reading in necessary modules\nimport numpy as np\nfrom netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap # For spatial plots (vector fields, streamlines, etc)\nfrom pyresample import kd_tree, geometry # regridding package\n\n\nif __name__ == '__main__':\n\n # Reading in daily u10m\n df_u = Dataset(\"/users/michelle/Desktop/Fall 2018/ATOC 4500-001/Project/KNMI/u10m.KNMI-2001.ASE055.ERAIN.DD.nc\",'r')\n print (df_u.variables.keys())\n print (df_u.variables['rotated_pole'])\n\n lon = df_u.variables['lon'][:] # Lon, degrees east, shape (361,456) = (lat,lon)\n lat = df_u.variables['lat'][:] # Lat, degrees north, shape (361,456) = (lat,lon)\n rlon = df_u.variables['rlon'][:] # Lon in rotated pole grid, degrees, X axis, shape (456)\n rlat = df_u.variables['rlat'][:] # Lat in rotated pole grid, degrees, Y axis, shape (361)\n direc = df_u.variables['dir'][:] # Angle of rotation, degrees, (361,456)\n # Fill Value: 9.969209968386869e+36\n height = df_u.variables['height'][:] # Height above the surface, m, postive up, (1)\n time = df_u.variables['time'][:] # Time, days since 1979-01-01, shape (3652)\n # Start date yyyymmddhh = 2001010100; 2001, January 1, 1:00\n u10m = df_u.variables['u10m'][:] # Zonal wind speed, m/s, shape (3652,1,361,456) = (time,height,lat,lon)\n # 24-hr average of 3-hr instantaneous values, grid mapping = rotated pole, m s^-1\n # Fill value: -9999.0\n rotated_pole = df_u.variables['rotated_pole']\n \n # Reading in daily v10m\n df_v = Dataset(\"/users/michelle/Desktop/Fall 2018/ATOC 4500-001/Project/KNMI/v10m.KNMI-2001.ASE055.ERAIN.DD.nc\",'r')\n print(df_v.variables.keys())\n print(df_v.variables['v10m'])\n v10m = df_v.variables['v10m'] # Meridional wind speed, m/s, shape (3652,1,361,456) = (time,height,lat,lon)\n # 24-hr average of 3-hr instantaneous values, grid mapping = rotated pole, m s^-1\n # Fill value: -9999.0\n\n\n # ----\n \n # Specifying u10m, v10m bounds\n u10m = u10m[:,0,:,:] # Gets rid of height dimension\n v10m = v10m[:,0,:,:]\n \n \n # ----\n \n # Isolating zonal wind speed at a point in PIB\n # Pyresample uses \"nearest neighbor\" search to find a point closest to \\\n # specified longitude and latitude\n \n # Start with t0 at 74S, 105W\n grid = geometry.GridDefinition(lons=lon,lats=lat)\n u10m_t0_old = u10m[0,:,:]\n points = geometry.SwathDefinition(lons=np.array([-105-.1,-105,-105+.1]), lats=np.array([-74-.1,-74,-74+.1]))\n u10m_t0_new = kd_tree.resample_nearest(grid,u10m_t0_old,points,radius_of_influence=5000)\n print(u10m_t0_new)\n # Note: MIDDLE VALUE is the wind speed at time t at that point\n \n \n # Looping through time at 74.5S, 103W\n time_range = np.linspace(0,3651,num=3652)\n #time_range_test = time_range[0:1000]\n \n u10m_pt = []\n f=0\n grid = geometry.GridDefinition(lons=lon,lats=lat)\n for t in time_range:\n u10m_old = u10m[f,:,:]\n points = geometry.SwathDefinition(lons=np.array([-103-.1,-103,-103+.1]), lats=np.array([-74.5-.1,-74.5,-74.5+.1]))\n u10m_new = kd_tree.resample_nearest(grid,u10m_old,points,radius_of_influence=5000)\n #print(u10m_new)\n u10m_pt.append(u10m_new[1])\n f=f+1\n\n # Writing data to text file\n a = np.array(u10m_pt)\n np.savetxt(\"/users/michelle/Desktop/Fall 2018/ATOC 4500-001/Project/KNMI/u10m_daily_745S103W.txt\", a, fmt=\"%f\")\n\n\n # ---\n\n # Reading data in (daily u10m at 74.5S, 103W for 2001-2010)\n c = open('/users/michelle/Desktop/Fall 2018/ATOC 4500-001/Project/KNMI/u10m_daily_745S103W.txt', 'r')\n u10m_745S103W_file = c.read().split('\\n')\n c.close()\n \n u10m_745S103W_file = np.array(u10m_745S103W_file)\n \n \n # Time series of Zonal Wind Speed at 74.5S, 103W for 2001-2010\n plt.close()\n plt.gcf().clear()\n fig = plt.figure(figsize=(8,6))\n time_range = np.linspace(0,3651,num=3652)\n plt.plot(time_range,u10m_745S103W_file)\n plt.xlabel(\"Days since Jan-01-2001\")\n #plt.ylabel(\"Wind Speed ($ms^{-1}$)\")\n plt.title(\"Katabatic Wind Speed in Pine Island Bay\")\n plt.savefig('/users/michelle/Desktop/Fall 2018/ATOC 4500-001/Project/KNMI/u10m_pt_2001-2010.png')\n plt.show()\n \n \n # Separate into seasons of interest: Oct-Nov-Dec\n u10m_0102 = u10m_745S103W_file[273:365].astype(np.float)\n u10m_0203 = u10m_745S103W_file[638:730].astype(np.float)\n u10m_0304 = u10m_745S103W_file[1003:1095].astype(np.float)\n u10m_0405 = u10m_745S103W_file[1369:1461].astype(np.float)\n u10m_0506 = u10m_745S103W_file[1734:1826].astype(np.float)\n u10m_0607 = u10m_745S103W_file[2099:2191].astype(np.float)\n u10m_0708 = u10m_745S103W_file[2464:2556].astype(np.float)\n u10m_0809 = u10m_745S103W_file[2830:2922].astype(np.float)\n u10m_0910 = u10m_745S103W_file[3195:3287].astype(np.float)\n u10m_1011 = u10m_745S103W_file[3560:3652].astype(np.float)\n \n # ---\n \n \n # CLIMATOLOGY\n \n # Develop climatology (average u10m on each day in Oct-Nov-Dec for 2001-2010)\n days_in_season = np.linspace(0,91,92)\n u10m_spring_climatology = []\n for i in range(0,92):\n u10m_mean = np.mean([u10m_0102[i],u10m_0203[i],u10m_0304[i],u10m_0405[i],\\\n u10m_0506[i],u10m_0607[i],u10m_0708[i],u10m_0809[i],\\\n u10m_0910[i],u10m_1011[i]])\n u10m_spring_climatology.append(u10m_mean)\n \n \n # Plot a time series of the mean and several years\n plt.close()\n plt.figure(figsize=(8,6))\n plt.plot(days_in_season,u10m_spring_climatology,label=\"Mean\")\n \n all_years = [u10m_0102,u10m_0203,u10m_0304,u10m_0405,u10m_0506,u10m_0607,\\\n u10m_0708,u10m_0809,u10m_0910,u10m_1011]\n for year in all_years:\n name = str(year)\n label = \"20\"+name[5]+name[6]\n plt.plot(days_in_season,year,label=label) # STILL NEED TO FIGURE OUT LABELS\n plt.legend()\n plt.xlabel(\"Days since October 1\")\n plt.ylabel(\"Wind Speed ($ms^{-1}$)\")\n plt.title(\"Zonal Wind Speed in Oct-Nov-Dec\")\n \n plt.savefig('/users/michelle/Desktop/Fall 2018/ATOC 4500-001/Project/KNMI/u10m_spring_climatology.png')\n\n \n # Calculate standard deviation of 2010-11 season from the mean\n difference = u10m_1011 - u10m_spring_climatology\n std_1011 = np.std(difference,0)\n \n difference = u10m_0102 - u10m_spring_climatology\n std_0102 = np.std(difference,0)\n \n \n\n \n \n \n \n \n \n# LOOPING THROUGH MONTHS IN DREW'S ISOLATED POINT\ngrid = geometry.GridDefinition(lons=mon_ave_u10m_lon, lats=mon_ave_u10m_lat)\ntime_range = np.linspace(0,443,num=444)\nf = 0\nu10m_windspeed = []\nu10m_windspeed_absval = []\nfor t in time_range:\n u10m_data_old = mon_ave_u10m_u10m[f,0,:,:]\n # Defining new \"grid\" to which we will map the data (point of latitude 70S, lon 115W)\n points_on_70S = geometry.SwathDefinition(lons=np.array([-115-.1,-115,-115+.1]), lats=np.array([-70-.1,-70,-70+.1]))\n u10m_data_new = kd_tree.resample_nearest(grid,u10m_data_old,points_on_70S,radius_of_influence=5000)\n #print(u10m_data_new)\n u10m_windspeed.append(u10m_data_new[1])\n u10m_windspeed_absval.append(abs(u10m_data_new[1]))\n f = f+1\n # Note: MIDDLE VALUE in output is the wind speed at month f at that point\n#u10_windspeed = u10m_data_new[:,1] \n \n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"12-13-18_wind_time_series.py","file_name":"12-13-18_wind_time_series.py","file_ext":"py","file_size_in_byte":7585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"153718698","text":"#!/usr/bin/env python3\nimport json\nimport sys\n\nif len(sys.argv) < 2:\n print('repository interface file is required')\n sys.exit(1)\n\nrepo_interface_file = sys.argv[1]\n\nif len(sys.argv) < 3:\n print('target path is required')\n sys.exit(1)\n\ntarget_path = sys.argv[2]\n\ntry:\n with open(repo_interface_file) as f:\n repo = json.loads(f.read())\n if repo is not None and repo[\"services\"] is not None:\n for service in repo[\"services\"]:\n if service[\"filepath\"] is not None:\n del service[\"filepath\"]\n if service[\"isAsync\"] is not None:\n del service[\"isAsync\"]\n with open(target_path, 'w') as o:\n o.write('export const Repository = ')\n o.write(json.dumps(repo, indent=2))\n o.close()\n f.close()\nexcept Exception as e:\n print('Error when publishing repository interface file:', e)\n\n","sub_path":"bin/lib/publish_repository_service_definitions_to_core_project.py","file_name":"publish_repository_service_definitions_to_core_project.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"221179742","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.rcParams['figure.dpi'] = 300\nimport random\nfrom json import dumps\nfrom tqdm import tqdm\nfrom tensorboardX import SummaryWriter\n\nimport torch\nimport torch.nn.functional as F\n\nfrom predictor.predictor import Predictor\nfrom predictor.vanilla_lstm_predictor import VanillaLSTMPredictor\n\nfrom data_utils.highd_dataset import build_highd_data_loader\nfrom data_utils.process_highd.track import *\n\nimport util\nfrom args import args\n\ndt = 1 / DEFAULT_FRAME_RATE\n\ndef main(args):\n # Set up logging and devices\n if args.name == 'train':\n args.name = args.model\n if args.model == 'cvae':\n args.name += '_zbest' if args.most_likely else '_full'\n args.save_dir = util.get_save_dir(args, training=False)\n log = util.get_logger(args.save_dir, args.name)\n device, args.gpu_ids = util.get_available_devices()\n # device, args.gpu_ids = 'cpu', None\n log.info(f'Using device {device}...')\n log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')\n\n # Set random seed\n log.info(f'Using random seed {args.seed}...')\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n\n # Get model\n log.info('Building model...')\n if args.model == 'cvae':\n model = Predictor(state_dim=args.state_dim,\n rel_state_dim=args.state_dim,\n pred_dim=args.pred_dim,\n edge_type_dim=args.n_edge_types,\n nhe_hidden_size=args.nhe_hidden_size,\n ehe_hidden_size=args.ehe_hidden_size,\n nfe_hidden_size=args.nfe_hidden_size,\n decoder_hidden_size=args.decoder_hidden_size,\n gmm_components=args.gmm_components,\n log_sigma_min=args.log_sigma_min,\n log_sigma_max=args.log_sigma_max,\n log_p_yt_xz_max=args.log_p_yt_xz_max,\n kl_weight=args.kl_weight,\n device=device)\n elif args.model == 'vanilla':\n model = VanillaLSTMPredictor(state_dim=args.state_dim,\n pred_dim=args.pred_dim,\n hidden_size=32,\n device=device)\n \n log.info(f'Loading checkpoint from {args.load_path}...')\n model, step = util.load_model(model, args.load_path, args.gpu_ids)\n\n model = model.to(device)\n model.eval()\n\n log.info('Building dataset...')\n test_data_list = [args.dataset_id]\n test_loader = build_highd_data_loader(test_data_list, args.eval_batch_size)\n dataset_size = len(test_loader.dataset)\n log.info(f'Test dataset size = {dataset_size}')\n\n vis_idxs = np.random.randint(0, dataset_size, args.n_vis) # n_vis = bs\n input_seq, input_masks, input_edge_types, pred_seq = test_loader.dataset[vis_idxs]\n\n # Predict\n log.info('Predicting...')\n if args.model == 'cvae':\n sampled_future, z_p_samples = model.predict(\n input_seq, input_masks, input_edge_types, args.n_z_samples_pred, most_likely=args.most_likely)\n # sampled_future.shape = (n_z_samples, n_vis, pred_seq_len, 2)\n elif args.model == 'vanilla':\n sampled_future = model.predict(input_seq, args.n_pred_steps)\n sampled_future = sampled_future.unsqueeze(0)\n # (1, n_vis, n_pred_steps, 2), n_z_samples = 1\n\n mse = F.mse_loss(sampled_future.mean(dim=0), pred_seq[:, :, 1, 1, 2:4])\n log.info(f'MSE = {mse.item()}')\n # exit()\n\n sampled_future = sampled_future.detach().cpu().numpy()\n input_seq = input_seq.detach().cpu().numpy()\n pred_seq = pred_seq.detach().cpu().numpy() # (n_vis, pred_seq_len, 2)\n\n log.info('Visualizing...')\n for traj_id in tqdm(range(args.n_vis)):\n sampled_vels = sampled_future[:, traj_id] # (x_dot, y_dot), (n_samples, pred_seq_len, 2)\n input_traj = input_seq[traj_id, :, 1, 1, :2] # (x, y), (in_seq_len, 2)\n pred_traj = pred_seq[traj_id, :, 1, 1, :2] # (x, y), (pred_seq_len, 2)\n \n plt.plot(denormalize(input_traj[:, 0], MIN_X, MAX_X), input_traj[:, 1] * LANE_WIDTH, color='blue')\n plt.plot(denormalize(pred_traj[:, 0], MIN_X, MAX_X), pred_traj[:, 1] * LANE_WIDTH, color='green')\n \n ground_truth_vels = pred_seq[traj_id, :, 1, 1, 2:4]\n \n \n x_t, y_t = denormalize(input_traj[-1, 0], MIN_X, MAX_X), input_traj[-1, 1] * LANE_WIDTH\n traj_x_t, traj_y_t = [], []\n for t in range(args.n_pred_steps):\n x_dot_t, y_dot_t = ground_truth_vels[t]\n x_dot_t = denormalize(x_dot_t, MIN_ABS_VEL_X, MAX_ABS_VEL_X)\n y_dot_t = denormalize(y_dot_t, MIN_ABS_VEL_Y, MAX_ABS_VEL_Y)\n x_t = x_t + dt * x_dot_t\n y_t = y_t + dt * y_dot_t\n traj_x_t.append(x_t)\n traj_y_t.append(y_t)\n plt.plot(traj_x_t, traj_y_t, color='m')\n for i in range(len(sampled_vels)):\n x, y = denormalize(input_traj[-1, 0], MIN_X, MAX_X), input_traj[-1, 1] * LANE_WIDTH\n _sampled_vels = sampled_vels[i] # (pred_seq_len, 2)\n traj_x, traj_y = [], []\n for t in range(args.n_pred_steps):\n x_dot, y_dot = _sampled_vels[t]\n x_dot = denormalize(x_dot, MIN_ABS_VEL_X, MAX_ABS_VEL_X)\n y_dot = denormalize(y_dot, MIN_ABS_VEL_Y, MAX_ABS_VEL_Y)\n x = x + dt * x_dot\n y = y + dt * y_dot\n traj_x.append(x)\n traj_y.append(y)\n plt.plot(traj_x, traj_y, color='red', alpha=0.5)\n\n # plt.legend(['Input Traj.', 'True Future Traj.', 'True Vel-Inferred Future Traj.', 'Predictions'])\n plt.xlabel('x (m)')\n plt.ylabel('y (m)')\n # plt.gcf().set_figwidth(25) # inches\n # plt.gca().set_aspect('equal', 'box')\n plt.gcf().set_size_inches(6, 3) # 10 : 3\n # plt.gca().set_xlim(0, 420)\n plt.gca().set_ylim(-6, 6)\n plt.tight_layout()\n plt.savefig(args.save_dir + f'/{traj_id}.png')\n plt.gca().cla()\n\nif __name__ == '__main__':\n main(args)\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"37178697","text":"#!/usr/bin/env python3\n\nfrom flask import Flask, render_template, g,\\\n redirect, url_for, session, request, json, make_response\nfrom functools import wraps\nfrom pydbus import SessionBus\nimport os\nimport re\n\napp = Flask(__name__)\napp.user_home = os.path.expanduser(\"~\")\napp.secret_key = \"ASD\"\n\n@app.before_request\ndef get_player():\n bus = SessionBus()\n g.player = bus.get('org.mpris.MediaPlayer2.dplayer', '/org/mpris/MediaPlayer2')\n g.extensions = ('.avi', '.mp4', '.m4a', '.mov', '.mpg', '.mpeg',\n '.ogg', '.flac', '.mkv')\n\n\n@app.route('/', defaults={\"path\": \"patterns\"})\n@app.route('/explore/')\ndef explore(path):\n directories = []\n files = []\n internal_path = os.path.join(app.user_home, path)\n for content in os.listdir(internal_path):\n fullpath = os.path.join(internal_path, content)\n if os.path.isdir(fullpath) and not content.startswith('.'):\n directories.append((os.path.join(path, content), content))\n else:\n ext = os.path.splitext(content)[-1].lower()\n if ext in g.extensions:\n filepath = (os.path.join(path, content))\n sub_exists = os.path.exists(os.path.join(\n internal_path, content.replace(ext, '.srt')))\n files.append((filepath, content, sub_exists))\n\n parent = list(os.path.split(path))\n parent.pop()\n parent = os.path.join(*parent)\n parent = (parent, os.path.basename(parent))\n directories.sort(key=natural_key)\n files.sort(key=natural_key)\n return render_template('listing.html', files=files,\n directories=directories, title=parent)\n\n\ndef natural_key(string_):\n \"\"\"See http://www.codinghorror.com/blog/archives/001018.html\"\"\"\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)',\n string_[0])]\n\n\n@app.route('/play/')\ndef play(path):\n full_path = os.path.join(app.user_home, path)\n session['filename'] = full_path\n g.player.OpenUri(full_path)\n return redirect(url_for('controls'))\n\n\n@app.route('/controls')\ndef controls():\n return render_template('controls.html')\n\n\n@app.route('/command/', methods=['POST'])\ndef command():\n cmd = request.form['cmd']\n redirect = False\n if cmd == 'stop':\n g.player.Stop();\n redirect = url_for('explore')\n elif cmd == 'playpause':\n g.player.PlayPause();\n elif cmd == 'plus10':\n g.player.Seek(10*1000000);\n elif cmd == 'minus10':\n g.player.Seek(-10*1000000);\n return json.dumps({\"redirect\": redirect})\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"38569099","text":"class fSelectVolunteer :\r\n def __init__(self, formObj, parentForm) :\r\n self.app = formObj.ClientApplication\r\n self.MarketerId = None\r\n self.MarketerName = None\r\n\r\n def OnDoubleClick(self,sender):\r\n self.FormObject.Close(1)\r\n\r\n def DisplayListData(self):\r\n BranchId = self.uipFilter.GetFieldValue('LBranch.BranchId') or ''\r\n\r\n AddParam = \"\"\r\n \r\n FilterName = (self.uipFilter.FilterName or '').strip()\r\n if FilterName != '' :\r\n AddParam += \" and Full_Name like '%s' \" % FilterName\r\n \r\n\r\n self.qMarketer.OQLText = \"\"\"\r\n select from Marketer\r\n [Branch_Id=:BranchId\r\n %s\r\n ]\r\n (Full_Name,\r\n MarketerId,\r\n self)\r\n then order by Full_Name;\r\n \"\"\" % AddParam\r\n\r\n self.qMarketer.SetParameter('BranchId', BranchId)\r\n self.qMarketer.DisplayData()\r\n\r\n def bApplyFilterClick(self, sender):\r\n self.DisplayListData()\r\n\r\n def GetMarketer(self):\r\n self.DisplayListData()\r\n\r\n st = self.FormContainer.Show()\r\n if st == 1:\r\n self.MarketerId = self.qMarketer.GetFieldValue('Marketer.MarketerId')\r\n self.MarketerName = self.qMarketer.GetFieldValue('Marketer.Full_Name')\r\n\r\n return st\r\n\r\n\r\n\r\n","sub_path":"dialogs/Transaksi/fSelectMarketer_intr.py","file_name":"fSelectMarketer_intr.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"112690118","text":"import numpy as np\n'''27. Лінійний масив містить відомості про кількість опадів, що випали за\nкожен з 12 місяців одного року. Складіть програму, що визначає загальну кількість\nопадів протягом цього року, середньомісячну кількість опадів, кількість посушливих\nмісяців (коли кількість опадів було менше 30 мм), найпосушливіший місяць року.\n(Кудрявцев Владислав)'''\nA=np.zeros(12,dtype=int)\nn=len(A)\nsumm=0 #Количество всех дождей за год\nnt=0 #Кол-во сухих месяцов\nnd=0 #Самый сухой месяц\nfor i in range(n):\n A[i]=int(input('Input drop times:'))\n summ+=A[i]\n if A[i]<30:\n nt+=1\n if A[i]==min(A):\n nd=i\nprint(A)\nprint(f'All drops in year:{summ} \\nLess drops:{nt} \\nLess drops month:{nd}')\n \n \n","sub_path":"27.py","file_name":"27.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"381633606","text":"\"\"\"A simple checker for types of functions in twitterverse_functions.py.\"\"\"\n\nfrom typing import Any, Dict, List\nfrom io import StringIO\nimport unittest\nimport checker_generic\nimport twitterverse_functions as tf\n\nFILENAME = 'twitterverse_functions.py'\nPYTA_CONFIG = 'pyta/a3_pyta.txt'\nTARGET_LEN = 79\nSEP = '='\n\nCONSTANTS = {\n 'USERNAME': 'username',\n 'NAME': 'name',\n 'LOCATION': 'location',\n 'WEB': 'web',\n 'BIO': 'bio',\n 'FOLLOWING': 'following',\n 'ENDBIO': 'ENDBIO',\n 'END': 'END',\n 'SEARCH': 'SEARCH',\n 'FILTER': 'FILTER',\n 'PRESENT': 'PRESENT',\n 'OPERATIONS': 'operations',\n 'FOLLOWER': 'follower',\n 'FOLLOWERS': 'followers',\n 'NAME_INCLUDES': 'name-includes',\n 'LOCATION_INCLUDES': 'location-includes',\n 'SORT_BY': 'sort-by',\n 'POPULARITY': 'popularity',\n 'FORMAT': 'format',\n 'LONG': 'long'\n}\n\nDATA_FILE = \"\"\"tomCruise\nTom Cruise\nLos Angeles, CA\nhttp://www.tomcruise.com\nOfficial TomCruise.com crew tweets. We love you guys! \nVisit us at Facebook!\nENDBIO\nkatieH\nEND\nkatieH\nKatie Holmes\n\nwww.tomkat.com\nENDBIO\nEND\n\"\"\"\n\nQUERY_FILE = \"\"\"SEARCH\ntomCruise\nfollowing\nfollowers\nFILTER\nfollowing katieH\nname-includes tom\nlocation-includes CA \nPRESENT\nsort-by username\nformat long\n\"\"\"\n\nTWITTER_DATA = {'tomCruise': {'name': 'Tom Cruise',\n 'location': 'Los Angeles, CA',\n 'web': 'http://www.tomcruise.com',\n 'bio': 'Official TomCruise.com crew tweets. ' +\n 'We love you guys!\\nVisit us at Facebook!',\n 'following': ['katieH']},\n 'katieH': {'name': 'Katie Holmes', 'location': '',\n 'web': 'www.tomkat.com', 'bio': '', 'following': []}}\n\nQUERY = {'SEARCH': {'username': 'tomCruise',\n 'operations': ['following', 'followers']},\n 'FILTER': {'following': 'katieH',\n 'name-includes': 'tom', 'location-includes': 'CA'},\n 'PRESENT': {'sort-by': 'username', 'format': 'long'}}\n\nLONG_RESULT = \"\"\"----------\nkatieH\nname: Katie Holmes\nlocation: \nwebsite: www.tomkat.com\nbio:\n\nfollowing: []\n----------\ntomCruise\nname: Tom Cruise\nlocation: Los Angeles, CA\nwebsite: http://www.tomcruise.com\nbio:\nOfficial TomCruise.com crew tweets. We love you guys!\nVisit us at Facebook!\nfollowing: ['katieH']\n----------\n\"\"\"\n\n\nclass CheckTest(unittest.TestCase):\n \"\"\"Type checker for assignment functions.\"\"\"\n\n def test_process_data(self) -> None:\n \"\"\"Test function process_data.\"\"\"\n\n data_keys = ['name', 'location', 'web', 'bio', 'following']\n msg = 'process_data should return a TwitterverseDict'\n open_data_file = StringIO(DATA_FILE)\n result = tf.process_data(open_data_file)\n for user in result:\n self.assertTrue(isinstance(user, str), msg)\n self._has_these_keys(result[user], data_keys, msg)\n for key in result[user]:\n if key == 'following':\n self.assertTrue(isinstance(result[user][key], list), msg)\n for item in result[user][key]:\n self.assertTrue(isinstance(item, str), msg)\n else:\n self.assertTrue(isinstance(result[user][key], str), msg)\n\n def test_process_query(self) -> None:\n \"\"\"Test function process_query.\"\"\"\n\n query_keys = ['SEARCH', 'FILTER', 'PRESENT']\n msg = 'process_query should return a valid QueryDict'\n open_query_file = StringIO(QUERY_FILE)\n result = tf.process_query(open_query_file)\n self._has_these_keys(result, query_keys, msg)\n\n # Search spec\n self._has_these_keys(result['SEARCH'], ['username', 'operations'], msg)\n self.assertTrue(isinstance(result['SEARCH']['operations'], list), msg)\n for item in result['SEARCH']['operations']:\n self.assertTrue(isinstance(item, str), msg)\n self.assertTrue(isinstance(result['SEARCH']['username'], str), msg)\n\n # Filter spec\n filter_keys = ['following', 'follower', 'name-includes',\n 'location-includes', 'bio-includes']\n self._has_these_keys(result['FILTER'], filter_keys, msg)\n self._is_dict_of_Ks_and_Vs(result['FILTER'], str, str, msg)\n\n # Sorting spec\n self._has_these_keys(result['PRESENT'], ['sort-by', 'format'], msg)\n self._is_dict_of_Ks_and_Vs(result['PRESENT'], str, str, msg)\n\n def test_get_search_results(self) -> None:\n \"\"\"Test function get_search_results.\"\"\"\n\n self._test_returns_list_of(tf.get_search_results,\n [TWITTER_DATA, QUERY['SEARCH']], [str])\n\n def test_get_filter_results(self) -> None:\n \"\"\"Test function get_filter_results.\"\"\"\n\n self._test_returns_list_of(tf.get_filter_results,\n [TWITTER_DATA, ['tomCruise', 'katieH'],\n QUERY['FILTER']], [str])\n\n def test_get_present_string(self) -> None:\n \"\"\"Test function get_present_string.\"\"\"\n\n result = tf.get_present_string(TWITTER_DATA,\n ['tomCruise', 'katieH'],\n QUERY['PRESENT'])\n msg = '''get_present_string should return a str, but returned {}'''\n self.assertTrue(isinstance(result, str), msg.format(type(result)))\n\n msg = '''incorrect formatting of presentation string, expected {}\\n got {}\\n'''\n self.assertEqual(result, LONG_RESULT, msg.format(LONG_RESULT, result))\n\n def test_all_followers(self) -> None:\n \"\"\"Test function all_followers.\"\"\"\n\n self._test_returns_list_of(tf.all_followers,\n [TWITTER_DATA, 'katieH'], [str])\n\n def test_check_constants(self) -> None:\n \"\"\"Values of constants.\"\"\"\n\n print('\\nChecking that constants refer to their original values')\n self._check_constants(CONSTANTS, tf)\n print(' check complete')\n\n def _test_returns_list_of(self, func, args, types):\n \"\"\"Check that func when called with args returns a list of elements\n of typef from types.\n\n \"\"\"\n\n result = checker_generic.type_check_simple(func, args, list)\n self.assertTrue(result[0], result[1])\n\n msg = '{} should return a list of length {}'\n self.assertEqual(len(result[1]), len(types),\n msg.format(func.__name__, len(types)))\n\n msg = ('Element at index {} in the list returned by {} '\n 'should be of type {}. Got {}.')\n for i, typ in enumerate(types):\n self.assertTrue(isinstance(result[1][i], typ),\n msg.format(i, func.__name__, typ, result[1][i]))\n\n def _has_these_keys(self, result: object, valid_keys: List[str], msg: str):\n \"\"\"Check if result is a dict with keys from a set of valid keys.\n \"\"\"\n self.assertTrue(isinstance(result, dict), msg)\n\n for k in result:\n self.assertTrue(k in valid_keys,\n msg + ', but key ' + str(k) + ' is not in ' +\n str(valid_keys))\n\n def _is_dict_of_Ks_and_Vs(self, result: object, key_tp: type,\n val_tp: type, msg: str):\n \"\"\"Check if result is a dict with keys of type key_tp and values\n of type val_tp.\n \"\"\"\n\n self.assertTrue(isinstance(result, dict), msg)\n\n for (key, val) in result.items():\n self.assertTrue(isinstance(key, key_tp),\n (msg + ', but one or more keys is not of type '\n + str(key_tp)))\n self.assertTrue(isinstance(val, val_tp),\n (msg + ', but value ' + str(val) + ' is not of type '\n + str(val_tp)))\n\n def _check_simple_type(self, func: callable, args: list,\n expected: type) -> None:\n \"\"\"Check that func called with arguments args returns a value of type\n expected. Display the progress and the result of the check.\n\n \"\"\"\n\n print('\\nChecking {}...'.format(func.__name__))\n result = checker_generic.type_check_simple(func, args, expected)\n self.assertTrue(result[0], result[1])\n print(' check complete')\n\n def _test_returns_list_of(self, func, args, types):\n \"\"\"Check that func when called with args returns a list of elements\n of typef from types.\n\n \"\"\"\n\n print('\\nChecking {}...'.format(func.__name__))\n\n result = checker_generic.type_check_simple(func, args, list)\n self.assertTrue(result[0], result[1])\n\n msg = '{} should return a list of length {}'\n self.assertEqual(len(result[1]), len(types),\n msg.format(func.__name__, len(types)))\n\n msg = ('Element at index {} in the list returned by get_station '\n 'should be of type {}. Got {}.')\n for i, typ in enumerate(types):\n self.assertTrue(isinstance(result[1][i], typ),\n msg.format(i, typ, result[1][i]))\n\n print(' check complete')\n\n def _check_constants(self, name2value: Dict[str, object], mod: Any) -> None:\n \"\"\"Check that, for each (name, value) pair in name2value, the value of\n a variable named name in module mod is value.\n \"\"\"\n\n for name, expected in name2value.items():\n actual = getattr(mod, name)\n msg = 'The value of constant {} should be {} but is {}.'.format(\n name, expected, actual)\n self.assertEqual(expected, actual, msg)\n\n\nchecker_generic.ensure_no_io('twitterverse_functions')\n\nprint(''.center(TARGET_LEN, SEP))\nprint(' Start: checking coding style '.center(TARGET_LEN, SEP))\nchecker_generic.run_pyta(FILENAME, PYTA_CONFIG)\nprint(' End checking coding style '.center(TARGET_LEN, SEP))\n\nprint(' Start: checking type contracts '.center(TARGET_LEN, SEP))\nunittest.main(exit=False)\nprint(' End checking type contracts '.center(TARGET_LEN, SEP))\n\nprint('\\nScroll up to see ALL RESULTS:')\nprint(' - checking coding style')\nprint(' - checking type contract\\n')\n","sub_path":"a3/a3_checker.py","file_name":"a3_checker.py","file_ext":"py","file_size_in_byte":10221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"57997951","text":"import random\n\nclass State():\n\n\tpins = [1, 2, 3]\n\tmoves = [\"s1\", \"s2\", \"s3\", \"b1\", \"b2\", \"b3\"]\n\t# If the disks are in different pins, we name the state first with where the big one is\n\tstates = [\"b1s1\", \"b1s2\", \"b1s3\", \"s2b2\", \"s3b3\", \"b3s2\", \"b2s3\", \"b3s3\", \"b2s2\", \"b3s1\", \"b2s1\", \"s1b1\"]\n\n\t# If the disks are in different pins, the first disk is the big\n\tdef __init__(self, name, obeyProb):\n\t\tself.name = name\n\t\tif name[1] == name[3]:\t# The two disks are in the same pin \n\t\t\tif name[0] == \"b\":\t\t# The botton disk is the big one\n\t\t\t\tself.onTop = \"s\"\n\t\t\telse: self.onTop = \"b\"\t\n\t\telse: self.onTop = None\n\t\tself.b = name[1]\n\t\tself.s = name[3]\n\t\tself.obeyProb = obeyProb\n\n\tdef getReward(self):\n\t\treward = -1\n\t\tif self.onTop == \"b\":\n\t\t\treward = -10\n\t\telif self.s == self.b and int(self.s) == 3:\n\t\t\treward = 100\n\t\treturn reward\n\n\t# Action 1 to 3 is moving the small to the pin of the number\n\t# Action 3 to 6 is moving the big to the pin of the number - 3\n\tdef check_move(self, action):\n\t\tnewState = \"\"\n\t\tif action[0] == \"s\": # If we are moving the small disk\n\t\t\tif action[1] == self.b: # And the pin where it is going the big one is already there\n\t\t\t\tonTopAux = \"s\"\n\t\t\telse: \n\t\t\t\tonTopAux = None\n\t\t\tsAux = action[1]\n\t\t\tbAux = self.b\n\t\t\tnewState = \"b\" +bAux + \"s\" + sAux \n\t\telse: # We are moving the big disk\n\t\t\tif action[1] == self.s: # if the pin where it is going the small disk is already there\n\t\t\t\tonTopAux = \"b\"\n\t\t\t\tnewState = \"s\" +self.s+ \"b\" + action[1] \n\t\t\telse: \n\t\t\t\tonTopAux = None\n\t\t\t\tnewState = \"b\" +action[1] + \"s\" + self.s \n\t\t\tbAux = action[1]\n\t\t\tsAux = self.s\n\t\t# The state b3s3 is an end state and the only possibility is to stay there with a reward of 0\n\t\tif self.name == newState:\n\t\t\treturn 0, newState \n\t\treturn State(newState, self.obeyProb).getReward(), newState\n\n\tdef get_error_move(self, action):\n\t\t# Get the error move\n\t\tif action[0] == \"s\":\n\t\t\treturn \"s\" + str([pin for pin in self.pins if pin != int(self.s) and pin != int(action[1])][0])\n\t\telse:\n\t\t\treturn \"b\" + str([pin for pin in self.pins if pin != int(self.b) and pin != int(action[1])][0])\n\n\tdef make_move(self, action):\n\t\tif self.name == \"b3s3\":\n\t\t\treturn self.check_move(action)\n\t\tif random.random() > self.obeyProb: # Mistake happens\n\t\t\treturn self.check_move(self.get_error_move(action))\n\t\telse: \n\t\t\treturn self.check_move(action)\n\n\t# returns a list of (probability, reward, s') transition tuples\n\tdef get_transition_probs(self, action):\n\n\t\tpossibleMoves = []\n\t\treward, newState = self.check_move(action)\n\t\tpossibleMoves.append((self.obeyProb, reward, newState))\n\t\tif self.obeyProb == 1:\n\t\t\treturn possibleMoves\n\n\t\t# The state b3s3 is an end state and the only possibility is to stay there with a reward of 0\n\t\tif self.name == \"b3s3\":\n\t\t\treturn possibleMoves\n\n\n\t\treward, newState = self.check_move(get_error_move)\n\t\tpossibleMoves.append((1 - self.obeyProb, reward, newState))\n\t\treturn possibleMoves\n","sub_path":"state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"356101254","text":"import os\nimport numpy as np\nfrom PIL import Image\n\n__author__ = \"Tianhong Gan\"\n__email__ = \"tianhonggan@outlook.com\"\n\ndef plot_channels(channels, num_classes, n_ch):\n ''' plot channels selected on 64 channel template\n\n Keyword arguments:\n channels -- array of selected channels\n num_classes -- number of classes\n n_ch -- number of channels selected\n '''\n os.makedirs(f'plot_channels/plots', exist_ok=True)\n background = Image.open(f'plot_channels/64_channel_sharbrough_bg.png')\n for i in channels:\n channel = i + 1\n img = Image.open(f'plot_channels/channels/{channel}.png')\n background.paste(img, (0, 0), img)\n background.save(f'plot_channels/plots/class{num_classes}_nch{n_ch}.png',\"PNG\")\n\ndef plot_heatmap_avg(num_classes,type):\n ''' plot heatmap of 64-channels on 10-10 international system - average across all 12 filters\n\n Keyword arguments:\n num_classes -- number of classes\n type -- 'channels' for coloured ring around channels or 'channels_fill' for opaque colour heatmap\n '''\n os.makedirs(f'plot_channels/plots/heatmap', exist_ok=True)\n\n w_temp = np.loadtxt(open(f'results/w_{num_classes}class_csp.csv', \"rb\"), delimiter=\" \")\n w = np.zeros(64) # creating an empty of dimension 64 x 2 for channel number and energy\n\n index = 0 # iterator\n\n for channel in w_temp:\n for filter in channel:\n w[index] += np.sqrt(filter**2) # set channel energy\n index+=1\n\n mean = np.mean(w)\n sd = np.std(w)\n\n background = Image.open(f'plot_channels/64_channel_sharbrough_bg.png')\n\n channel = 1\n for i in w:\n if i > mean+sd:\n img = Image.open(f'plot_channels/{type}/{channel}.png')\n background.paste(img, (0, 0), img)\n elif i > mean:\n img = Image.open(f'plot_channels/{type}/{channel}_o.png')\n background.paste(img, (0, 0), img)\n elif i > mean-sd:\n img = Image.open(f'plot_channels/{type}/{channel}_y.png')\n background.paste(img, (0, 0), img)\n else:\n pass\n channel += 1\n background.save(f'plot_channels/plots/heatmap/class{num_classes}.png',\"PNG\")\n\ndef plot_heatmap(num_classes,type):\n ''' plot heatmap of 64-channels on 10-10 international system - for all 12 csp filters\n\n Keyword arguments:\n num_classes -- number of classes\n type -- 'channels' for coloured ring around channels or 'channels_fill' for opaque colour heatmap\n '''\n os.makedirs(f'plot_channels/plots/heatmap', exist_ok=True)\n\n w = np.loadtxt(open(f'results/w_{num_classes}class_csp.csv', \"rb\"), delimiter=\" \")\n\n mean = np.mean(w)\n sd = np.std(w)\n\n for filter in range(12):\n background = Image.open(f'plot_channels/64_channel_sharbrough_bg.png')\n\n channel = 1\n for i in w[:,filter]:\n if i > mean+sd:\n img = Image.open(f'plot_channels/{type}/{channel}.png')\n background.paste(img, (0, 0), img)\n elif i > mean:\n img = Image.open(f'plot_channels/{type}/{channel}_o.png')\n background.paste(img, (0, 0), img)\n elif i > mean-sd:\n img = Image.open(f'plot_channels/{type}/{channel}_y.png')\n background.paste(img, (0, 0), img)\n else:\n pass\n channel += 1\n background.save(f'plot_channels/plots/heatmap/class{num_classes}_filter{filter}.png',\"PNG\")\n\n# plot_heatmap_avg(4,'channels_fill')\n# plot_heatmap(4,'channels_fill')\n\n\n''' make white background transparent '''\n\n# for i in range(64):\n# channel = i + 1\n# img = Image.open(f'plot_channels/channels/{channel}_y.png')\n# img = img.convert(\"RGBA\")\n# datas = img.getdata()\n#\n# newData = []\n# for item in datas:\n# if item[0] == 255 and item[1] == 255 and item[2] == 255:\n# newData.append((255, 255, 255, 0))\n# else:\n# newData.append(item)\n#\n# img.putdata(newData)\n# img.save(f'plot_channels/channels/{channel}_y.png', \"PNG\")\n","sub_path":"plot_channels.py","file_name":"plot_channels.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"401040309","text":"#from estruturado.MeusModulos.Clientes import *\nfrom estruturado.MeusModulos import Clientes\nimport copy\n#adicione um import para acessar o módulo ContaCorrente\nfrom estruturado.MeusModulos import ContaCorrente\nimport unittest\n\nclass TestClientes(unittest.TestCase):\n def test_01_list(self):\n self.assertEqual(Clientes.nro_clientes(Clientes.clientes_inicial), 5)\n\n def test_02_list(self):\n clientes_copia = copy.deepcopy(Clientes.clientes_inicial)\n Clientes.excluir_cliente(clientes_copia, 3)\n self.assertEqual(Clientes.nro_clientes(clientes_copia), 4)\n\n def test_03_busca_acha(self):\n cliente = Clientes.pesquisar_cliente(Clientes.clientes_inicial, 1)\n if type(cliente) != dict:\n self.fail('nao achei o cliente 1')\n cliente_3 = Clientes.pesquisar_cliente(Clientes.clientes_inicial, 1)\n if type(cliente_3) != dict:\n self.fail('nao achei o cliente 3')\n\n def test_04_busca_excessao(self):\n try:\n Clientes.pesquisar_cliente(Clientes.clientes_inicial,10)\n except Clientes.ClienteNaoEncontradoException:\n print('ok, sua busca por cliente 10 retornou excessao, como devia')\n except:\n self.fail('sua busca por cliente 10 retornou excessao, mas nao a correta')\n else:\n self.fail('sua busca por cliente 10 nao retornou excessao')\n\n def test_05_credito(self):\n clientes_copia = copy.deepcopy(Clientes.clientes_inicial)\n cliente = Clientes.pesquisar_cliente(clientes_copia, 1)\n ContaCorrente.credito(10, cliente, 10, 10)\n self.assertEqual(cliente['saldo'], 20)\n\n def test_06_debito(self):\n clientes_copia = copy.deepcopy(Clientes.clientes_inicial)\n cliente = Clientes.pesquisar_cliente(clientes_copia, 1)\n ContaCorrente.debito(10, cliente, 10, 10)\n self.assertEqual(cliente['saldo'], 0)\n \n def test_07_debito_negativando(self):\n clientes_copia = copy.deepcopy(Clientes.clientes_inicial)\n cliente = Clientes.pesquisar_cliente(clientes_copia, 3)\n ContaCorrente.debito(30, cliente, 10, 30)\n self.assertEqual(cliente['saldo'], 0)\n ContaCorrente.debito(10, cliente, 10, 10) #saque da conta zerada, mas o terceiro parametro,\n #limite_credito, diz que pode\n self.assertEqual(cliente['saldo'], -10)\n \n def test_08_debito_alem_do_limite(self):\n clientes_copia = copy.deepcopy(Clientes.clientes_inicial)\n cliente = Clientes.pesquisar_cliente(clientes_copia, 2)\n ContaCorrente.debito(20, cliente, 10, 20)\n self.assertEqual(cliente['saldo'], 0)\n ContaCorrente.debito(10, cliente, 10, 10) #saque da conta zerada, mas o terceiro parametro,\n #limite_credito, diz que pode\n self.assertEqual(cliente['saldo'], -10)\n try:\n ContaCorrente.debito(10, cliente, 10, 10) #estou excedendo o limite\n except:\n print('ok, voce deu uma excessao quando o limite foi excedido')\n else: #se nao rolou excessao\n self.fail('voce nao deu uma excessao quando excedemos o limite')\n self.assertEqual(cliente['saldo'], -10) #verificar se vc nao debitou a\n #operacao invalida\n\n def test_09_transferencia_funciona(self):\n clientes_copia = copy.deepcopy(Clientes.clientes_inicial)\n doador,receptor = 2,5\n valor_doado = 20\n limite_credito, limite_transferencia = 0,100\n ContaCorrente.transferencia(clientes_copia,doador,receptor,valor_doado,limite_credito,limite_transferencia)\n self.assertEqual(Clientes.pesquisar_cliente(clientes_copia,2)['saldo'],0)\n self.assertEqual(Clientes.pesquisar_cliente(clientes_copia,5)['saldo'],70)\n \n def test_10_transferencia_falha_limite_credito(self):\n clientes_copia = copy.deepcopy(Clientes.clientes_inicial)\n doador,receptor = 2,5\n valor_doado = 30\n limite_credito, limite_transferencia = 0,100\n try:\n ContaCorrente.transferencia(clientes_copia,doador,receptor,valor_doado,limite_credito,limite_transferencia)\n except ContaCorrente.LimiteCreditoExcedidoException:\n print('ao usar credito demais, a transferencia falha com a excessao correta')\n except:\n self.fail('ao usar credito demais, a transferencia falhou com a excessao errada')\n else:\n self.fail('ao usar credito demais, a transferencia nao levantou excessao')\n\n self.assertEqual(Clientes.pesquisar_cliente(clientes_copia,2)['saldo'],20)\n self.assertEqual(Clientes.pesquisar_cliente(clientes_copia,5)['saldo'],50)\n \n def test_11_transferencia_falha_limite_transferencia(self):\n clientes_copia = copy.deepcopy(Clientes.clientes_inicial)\n doador,receptor = 2,5\n valor_doado = 20\n limite_credito, limite_transferencia = 10,10\n try:\n ContaCorrente.transferencia(clientes_copia,doador,receptor,valor_doado,limite_credito,limite_transferencia)\n except ContaCorrente.LimiteTransferenciaExcedidoException:\n print('ao ultrapassar o limite de transferencia, a transferencia falha com a excessao correta')\n except:\n self.fail('ao ultrapassar o limite de transferencia, a transferencia falha com a excessao errada')\n else:\n self.fail('ao ultrapassar o limite de transferencia, a transferencia nao falhou, mas devia')\n\n self.assertEqual(Clientes.pesquisar_cliente(clientes_copia,2)['saldo'],20)\n self.assertEqual(Clientes.pesquisar_cliente(clientes_copia,5)['saldo'],50)\n \n \n\n\ndef run_tests():\n suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestClientes)\n unittest.TextTestRunner(verbosity=2,failfast=True).run(suite)\n\nif __name__ == '__main__':\n run_tests()\n","sub_path":"funcoes_banco/test_clientes.py","file_name":"test_clientes.py","file_ext":"py","file_size_in_byte":5826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"194237487","text":"from ..models import (Scraper, ArticleSpider, ArticleThread,\n Article, CrawlerSet, CrawlerItem, ScraperAnalysis)\nfrom ..serializers import (ScraperSerializer, ArticleSpiderSerializer, ArticleThreadSerializer, ArticleSerializer,\n CrawlerSetSerializer, CrawlerItemSerializer, ScraperAnalysisSerializer)\nfrom rest_framework.permissions import AllowAny, IsAdminUser, IsAuthenticated\nfrom rest_framework import viewsets, status, filters\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import ListAPIView, ListCreateAPIView\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\nfrom ..pagination import CrawlerItemPagination, ScraperPagination, ArticleSpiderPagination, ArticleThreadPagination, ArticlePagination\n# from django_filters.rest_framework import DjangoFilterBackend\nimport datetime, time, json, math, statistics\nfrom .selectors import (get_scraper_analysis, scraper_obj_not_finish, get_article_spider_not_in_use, get_article_thread_not_in_use,\n get_articles_not_in_use, add_article, get_crawler_crawler_set\n)\nfrom django.conf import settings\nfrom django.core.cache.backends.base import DEFAULT_TIMEOUT\nfrom django.utils.decorators import method_decorator\n# from django.views.decorators.cache import cache_page\n# from django.views.decorators.vary import vary_on_cookie\nfrom django.db import transaction\nfrom .selectors import (get_scrapers, get_crawler_sets, get_articles)\n\nCACHE_TTL = getattr(settings, 'CACHE_TTL', DEFAULT_TIMEOUT)\n\nis_testing = settings.TESTING\n\n# @cache_page(CACHE_TTL)\n@permission_classes([IsAdminUser])\n@api_view(['GET'])\ndef dashboard(request):\n\n scraper_by_date = Scraper.objects.filter()\n march_24 = Scraper.objects.filter(timestamp__year='2021', timestamp__month='03', timestamp__day='24')\n march_25 = Scraper.objects.filter(timestamp__year='2021', timestamp__month='03', timestamp__day='25')\n march_26 = Scraper.objects.filter(timestamp__year='2021', timestamp__month='03', timestamp__day='26')\n march_27 = Scraper.objects.filter(timestamp__year='2021', timestamp__month='03', timestamp__day='27')\n march_28 = Scraper.objects.filter(timestamp__year='2021', timestamp__month='03', timestamp__day='28')\n march_29 = Scraper.objects.filter(timestamp__year='2021', timestamp__month='03', timestamp__day='29')\n # print(len(dates))\n # samples = Scraper.objects.filter(timestamp__gte=datetime.date(2021, 3, 24),\n # sampledate__lte=datetime.date(2021, 3, 25)) \n data_march_24 = list(map(lambda scraper: scraper.crawler_set.get_total_parsed_article(), march_24))\n data_march_25 = list(map(lambda scraper: scraper.crawler_set.get_total_parsed_article(), march_25))\n data_march_26 = list(map(lambda scraper: scraper.crawler_set.get_total_parsed_article(), march_26))\n data_march_27 = list(map(lambda scraper: scraper.crawler_set.get_total_parsed_article(), march_27))\n data_march_28 = list(map(lambda scraper: scraper.crawler_set.get_total_parsed_article(), march_28))\n data_march_29 = list(map(lambda scraper: scraper.crawler_set.get_total_parsed_article(), march_29))\n\n dates = []\n dates.append(len(march_24))\n dates.append(len(march_25))\n dates.append(len(march_26))\n dates.append(len(march_27))\n dates.append(len(march_28))\n dates.append(len(march_29))\n data = {}\n results = []\n\n results.append(dict({\n \"total_data\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_articles(), march_24))),\n \"success\":sum(list(map(lambda scraper: scraper.crawler_set.get_total_parsed_article(), march_24))),\n \"error\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_error(), march_24))),\n \"skip\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_skip_error(), march_24))),\n \"date\": list(map(lambda scraper:scraper.timestamp, march_24))[0]\n }))\n \n results.append(dict({\n \"total_data\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_articles(), march_25))),\n \"success\":sum(list(map(lambda scraper: scraper.crawler_set.get_total_parsed_article(), march_25))),\n \"error\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_error(), march_25))),\n \"skip\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_skip_error(), march_25))),\n \"date\": list(map(lambda scraper:scraper.timestamp, march_25))[0]\n }))\n\n results.append(dict({\n \"total_data\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_articles(), march_26))),\n \"success\":sum(list(map(lambda scraper: scraper.crawler_set.get_total_parsed_article(), march_26))),\n \"error\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_error(), march_26))),\n \"skip\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_skip_error(), march_26))),\n \"date\": list(map(lambda scraper:scraper.timestamp, march_26))[0]\n }))\n \n results.append(dict({\n \"total_data\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_articles(), march_27))),\n \"success\":sum(list(map(lambda scraper: scraper.crawler_set.get_total_parsed_article(), march_27))),\n \"error\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_error(), march_27))),\n \"skip\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_skip_error(), march_27))),\n \"date\": list(map(lambda scraper:scraper.timestamp, march_27))[0]\n }))\n\n results.append(dict({\n \"total_data\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_articles(), march_28))),\n \"success\":sum(list(map(lambda scraper: scraper.crawler_set.get_total_parsed_article(), march_28))),\n \"error\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_error(), march_28))),\n \"skip\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_skip_error(), march_28))),\n \"date\": list(map(lambda scraper:scraper.timestamp, march_28))[0]\n }))\n\n results.append(dict({\n \"total_data\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_articles(), march_29))),\n \"success\":sum(list(map(lambda scraper: scraper.crawler_set.get_total_parsed_article(), march_29))),\n \"error\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_error(), march_29))),\n \"skip\": sum(list(map(lambda scraper: scraper.crawler_set.get_total_skip_error(), march_29))),\n \"date\": list(map(lambda scraper:scraper.timestamp, march_29))[0]\n }))\n data['results'] = results\n data['total_data'] = sum(dates)\n \n return Response(data)\n\n# MAIN LOGIC FUNCTION FOR SAVING AND ADDING OBJECTS IN SCRAPER\n@transaction.atomic\n@permission_classes([IsAdminUser])\n@api_view(['POST', ])\ndef scraper_logic_process(request):\n t1 = time.perf_counter()\n # TESTING AREA DATA\n if is_testing:\n f = open('test_data.json')\n data = json.load(f)\n # PRODUCTION DATA\n else:\n data = request.data\n\n #GET: get scraper analysis object\n scraper_analysis = get_scraper_analysis(request)\n\n # GET: get crawler set is_finished = False\n crawler_set = get_crawler_crawler_set(request)\n\n # GET: get scraper obj is_finished = False\n scraper = scraper_obj_not_finish(request)\n\n # INITIALIZE split data of fime finished => hour, minute, second\n hour, minute, second = data.get('time_finished').split(':')\n\n # INITIALIZE CHUNKED SPIDERS\n spiders = data.get('spiders')\n\n try:\n # LOOP: ADD & SAVE all ARTICLES to its respective THREAD, then add THREADS to its respective SPIDER\n # => when finish or succesfull update in_use and is_finished = True\n for spider in spiders:\n # GET or CREATE: get the current not in use spider obj\n spider_obj = get_article_spider_not_in_use(request)\n print(\"----------------- : SPIDER START\")\n # ADD & SAVE: save all article items and add to thread object || LOOP: for loop of all threads for each spider.\n for thread_obj in spider:\n thread = get_article_thread_not_in_use(request)\n # LOOP: ARTICLE(S) PER THREAD\n for obj in thread_obj:\n print(\"\")\n print(thread_obj[obj])\n # SAVE ARTICLE and add to its respective THREAD\n add_article(request, thread_obj[obj])\n print(\"\")\n\n # GET: Assign all articles not in use\n articles = get_articles_not_in_use(request)\n\n # LOOP: Add all recent saved articles to current thread.\n for article in articles:\n print(\"article added to its respective THREAD\")\n thread.articles.add(article)\n article.in_use = True\n article.save()\n\n # ADD: add current thread to existing not in_use spider object\n spider_obj.thread_crawlers.add(thread)\n thread.in_use = True\n thread.save()\n print(\"THREAD added to its respective SPIDER\")\n # END OF THREAD\n\n # ADD: add current spider to main parent => SCRAPER OBJ with field of is_finished=False\n print(\"SPIDER added to its respective SCRAPER\")\n scraper.spiders.add(spider_obj)\n # UPDATE: patch existing spider in_use to True => meaning its already added to main parent => SCRAPER\n spider_obj.in_use = True\n spider_obj.save()\n\n # End of LOOP for SPIDER\n print(\"END of loop for spiders\")\n except Exception as e:\n print(\"errors in spider\")\n print(\"Process terminated by scrapy\")\n print(e)\n scraper.terminated_process = True\n\n # END OF LOOP | SAVE: instansiate all other required data.\n try:\n scraper.data = data.get('data')\n scraper.workers = data.get('workers')\n scraper.crawler_set = crawler_set\n scraper.info_log = data.get('info_log')\n scraper.error_log = data.get('error_log')\n scraper.time_finished = datetime.time(\n int(hour), int(minute), int(second))\n scraper.is_finished = True\n scraper.save()\n \n # ADD: add scraper object to scraper analysis object\n scraper_analysis.scrapers.add(scraper)\n\n # UPDATE: update crawler set in_use into True\n crawler_set.is_finished = True\n crawler_set.save()\n print(\"ALL DONE!\")\n print(round(time.perf_counter() - t1, 2))\n except Exception as e:\n print(\"Error occur when saving scraper data\")\n print(e)\n\n # END OF LOGIC :)\n return Response({\"message\": \"Succesfully created Scraper Object\"})\n\n\n\nclass CrawlerSetViewset(viewsets.ModelViewSet):\n serializer_class = CrawlerSetSerializer\n lookup_field = 'id'\n\n def get_queryset(self):\n return CrawlerSet.objects.all().order_by('-timestamp')\n\n def list(self, request, *args, **kwargs):\n return super().list(request, *args, **kwargs)\n\n def get_permissions(self):\n # \"\"\"\n # Instantiates and returns the list of permissions that this view requires.\n # \"\"\"\n if self.action == 'list':\n permission_classes = [IsAuthenticated]\n elif self.action == 'retrieve':\n permission_classes = [IsAuthenticated] # AllowAny\n elif self.action == 'create':\n permission_classes = [IsAdminUser] # AllowAny\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n\nclass CrawlerItemviewset(viewsets.ModelViewSet):\n serializer_class = CrawlerItemSerializer\n pagination_class = CrawlerItemPagination\n # filter_backends = [DjangoFilterBackend, filters.SearchFilter]\n filter_backends = [filters.SearchFilter]\n search_fields = ['article_id', 'article_error_status', 'article_url']\n lookup_field = 'id'\n\n def get_queryset(self):\n return CrawlerItem.objects.all().order_by('-timestamp')\n\n def create(self, request, *args, **kwargs):\n resp_data = {}\n if is_testing:\n f = open('test_article_items.json')\n data = json.load(f)\n else:\n data = request.data\n \n for data in data:\n print(\"--------------------- crawler set | ITEM\")\n item_serializer = self.serializer_class(data=data)\n if item_serializer.is_valid(): \n item_serializer.save()\n print(\"SAVED\")\n # # TODO: check if _id already exists in database\n # # If exists drop it. Otherwise, add it.\n # GET / CHECK crawler set with is_finished = False\n crawler_obj = get_crawler_crawler_set(request)\n crawler_items = save_crawler_item_to_crawler_set(request, crawler_obj, False)\n resp_data['message'] = \"{} article(s) successfully added to your spiders.\".format(\n len(crawler_items))\n return Response(resp_data, status=status.HTTP_201_CREATED)\n\n def list(self, request, *args, **kwargs):\n return super().list(request, *args, **kwargs)\n\n def get_permissions(self):\n # \"\"\"\n # Instantiates and returns the list of permissions that this view requires.\n # \"\"\"\n if self.action == 'list':\n permission_classes = [IsAuthenticated]\n elif self.action == 'retrieve':\n permission_classes = [IsAuthenticated] # AllowAny\n elif self.action == 'create':\n permission_classes = [IsAdminUser] # AllowAny\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n\n@permission_classes([IsAdminUser])\n@api_view(['GET', ])\ndef optimize_log_file(request):\n scrapers = Scraper.objects.all()\n data = {}\n items = []\n \n for scraper in scrapers:\n # print(len(scraper.info_log))\n # print(scraper.info_log[:4])\n\n divisible_n = math.ceil(len(scraper.info_log) / 4)\n chunk_info = [scraper.info_log[i:i + divisible_n]\n for i in range(0, len(scraper.info_log), divisible_n)]\n chunked_join_str = ''.join(chunk_info[::3])\n # div_n = len(scraper.info_log) // 3\n items.append(\n {\"id\": scraper.id ,\"divisible_n\": divisible_n, \"total length before\": len(scraper.info_log), \"total length after\": len(chunked_join_str),\"before\": scraper.info_log, \"after\": chunked_join_str}\n )\n data['items'] = items\n return Response(data)\n\n'''\n DASHBOARD RETURN JSON OBJECTS\n'''\n@permission_classes(['IsAdminUser'])\n@api_view(['GET', ])\ndef scrapers_analysis(request):\n data = {}\n scrapers = get_scrapers(request)\n article_data = get_crawler_sets(request)\n articles = get_articles(request)\n\n # LOGIC for appending and computing to get the sum of all errors\n error_list = []\n [error_list.append(article.get_total_error()) for article in article_data]\n error_list = list(map(lambda article: article.get_total_error(), article_data))\n # Same as above => another logic is by using map\n\n # LOGIC for appending and computing to get the average of all download latency\n download_latency_list = list(map(lambda article: article.get_avg_dl_latency(), article_data))\n\n # Total data spawned by scrapy\n data_list = list(map(lambda scraper: scraper.data, scrapers))\n\n # Instantiate a list of total parsed articles\n parsed_article_list = list(map(lambda article: article.get_total_parsed_article(), article_data))\n\n # LOGIC for computing an absolute total number of missed artilces or skip articles\n missed_article_list = list(map(lambda scraper: scraper.get_total_missed_articles(), scrapers))\n \n\n data['total_data'] = sum(data_list)\n data['total_articles'] = len(articles)\n data['average_download_latency'] = round(statistics.mean(download_latency_list), 2)\n\n data['successful_parsed_articles'] = sum(parsed_article_list)\n data['unsuccessful_parse_articles'] = sum(error_list)\n data['missed_articles'] = sum(missed_article_list)\n \n data['total_scraping_rounds'] = len(scrapers)\n \n return Response(data, status=status.HTTP_200_OK)\n\n\n'''\n ALL function helpers for CRAWLER SET OBJECT\n CRAWLER SET => get or create CRAWLER SET object\n\n'''\n\n# FUNCTION for saving and adding crawler item.\n\n\ndef save_crawler_item_to_crawler_set(request, crawler_obj, is_exist):\n # GET: get all crawler items with in_use = False\n crawler_items = CrawlerItem.objects.filter(in_use=False)\n if is_exist:\n for item in crawler_items:\n print(item)\n try:\n crawler_obj.crawlers.add(item)\n item.in_use = True\n item.save()\n print(\"{} was successfully added as item crawler\".format(item))\n except Exception as e:\n print(\"Error\")\n print(e)\n else:\n for item in crawler_items:\n try:\n crawler_obj.crawlers.add(item)\n item.in_use = True\n item.save()\n print(\"{} was successfully added as item crawler\".format(item))\n except Exception as e:\n print(\"Error\")\n print(e)\n return crawler_items","sub_path":"scraper/views/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":17621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"312802452","text":"import turtle\n\nclass rectangle():\n \"\"\"Represents a rectangle.\n attributes: width, height.\n \"\"\"\n\n\nclass circle():\n \"\"\"Represents a circle.\n attributes: radius.\n \"\"\"\n radius=50\n\ndef draw_rect(r):\n \"\"\" Draws a rectangle with given width and height using turtle\"\"\"\n for i in range(2):\n turtle.fd(r.width)\n turtle.lt(90)\n turtle.fd(r.height)\n turtle.lt(90)\n\ndef draw_circle(c):\n \"\"\"Draws a circle with given radius using turtle\"\"\"\n turtle.circle(c.radius)\n\ndef main():\n r = rectangle()\n r.width=50\n r.height=200\n c = circle()\n c.radius=50\n print(draw_rect(r))\n turtle.reset()\n print(draw_circle(c))\n turtle.mainloop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"lab5/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"43151134","text":"from random import randint\n\nimport pygame\n\n\nclass Board:\n def __init__(self, wid, heig):\n self.wid = wid\n self.heig = heig\n self.board = [[0] * wid for _ in range(heig)]\n self.left = 0\n self.top = 0\n self.cell_size = 50\n\n def set_view(self, left, top, cell_size):\n self.left = left\n self.top = top\n self.cell_size = cell_size\n\n def render(self):\n for y in range(self.wid):\n for x in range(self.heig):\n pygame.draw.rect(screen, (255, 255, 255),\n (self.left + x * self.cell_size, self.top + y * self.cell_size,\n self.cell_size, self.cell_size), 1)\n\n def get_cell(self, mouse_pos):\n block = (mouse_pos[0] - board.left) // board.cell_size, (mouse_pos[1] - board.top) // board.cell_size\n return block if not (block[0] < 0 or block[0] >= board.wid or block[1] < 0 or block[1] >= board.heig) else None\n\n def on_click(self, cell_coords):\n block = cell_coords\n if block is not None:\n board.board[block[1]][block[0]] = (board.board[block[1]][block[0]] + 1) % 3\n\n def get_click(self, mouse_pos):\n cell = self.get_cell(mouse_pos)\n self.on_click(cell)\n\n\nclass Minesweeper(Board):\n def __init__(self, width, height, screen, mines):\n super().__init__(width + 2, height + 2)\n self.mines = mines\n self.screen = screen\n counter = mines - 1\n while counter >= 0:\n w = randint(1, width - 2)\n h = randint(1, height - 2)\n if self.board[h][w] == 0:\n self.board[h][w] = 10\n counter -= 1\n\n def render(self):\n for i in range(1, len(self.board) - 1):\n for j in range(1, len(self.board[i]) - 1):\n pygame.draw.rect(self.screen, pygame.Color('white'), (\n self.left + j * self.cell_size, self.top + i * self.cell_size, self.cell_size, self.cell_size), 1)\n if self.board[i][j] == 10:\n pygame.draw.rect(self.screen, pygame.Color('red'), (\n self.left + 1 + j * self.cell_size, self.top + 1 + i * self.cell_size, self.cell_size - 2,\n self.cell_size - 2))\n\n def open_cell(self, pos):\n w, h = self.get_cell(pos)\n counter = 0\n if self.board[h][w + 1] == 10:\n counter += 1\n if self.board[h + 1][w] == 10:\n counter += 1\n if self.board[h + 1][w + 1] == 10:\n counter += 1\n if self.board[h + 1][w - 1] == 10:\n counter += 1\n if self.board[h][w - 1] == 10:\n counter += 1\n if self.board[h - 1][w] == 10:\n counter += 1\n if self.board[h - 1][w + 1] == 10:\n counter += 1\n if self.board[h - 1][w - 1] == 10:\n counter += 1\n show_text(str(counter), w * self.cell_size + self.cell_size // 2, h * self.cell_size + self.cell_size // 2,\n self.screen)\n\n\ndef show_text(text, x, y, surface, color='white', size=30, align='left'):\n font = pygame.font.Font(None, size)\n\n string_rendered = font.render(text, 1, pygame.Color(color))\n text_rect = string_rendered.get_rect()\n text_rect.x = x\n text_rect.y = y\n if align == 'center':\n text_rect.x -= text_rect.width // 2\n text_rect.y -= text_rect.height // 2\n pygame.draw.rect(surface, pygame.Color('black'), text_rect)\n surface.blit(string_rendered, text_rect)\n\n\npygame.init()\nsize = width, height = 800, 600\nscreen = pygame.display.set_mode(size)\n\nrunning = True\nfps = 60\nv = 100\nboard = Minesweeper(5, 7, screen, 7)\nclock = pygame.time.Clock()\n\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n board.open_cell(event.pos)\n if event.type == pygame.MOUSEBUTTONUP:\n pos = event.pos\n rising = True\n\n board.render()\n\n clock.tick(fps)\n pygame.display.flip()\n\npygame.quit()\n","sub_path":"Урок Проект PyGame 4/Дедушка сапёра.py","file_name":"Дедушка сапёра.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"352740647","text":"import numpy as np\nfrom scipy.stats import chi2\n\n\nclass MahalanobisDistance:\n\n \"\"\"\n Description:\n ------------\n\n Class that computes the mahalanobis distance in order to detect multidimensional outliers.\n For more info, check:\n\n https://en.wikipedia.org/wiki/Mahalanobis_distance\n https://blogs.sas.com/content/iml/2012/02/15/what-is-mahalanobis-distance.html\n\n Parameters:\n -----------\n\n p_value [optional float]: probability of obtaining test results at least as extreme as the results actually\n observed, under the assumption that the null hypothesis is correct\n\n H0: mu_i = mu_j / i != j\n H1: mu_i != mu_j\n\n * mu_i -> mean of i\n\n All the records that have a p-value lower than self.p_value will be detected as outlier\n\n Important:\n ----------\n\n This distance only works with 2 or more variables. In case of using 1 variable it will not work and it should be\n fine using the z_score.\n \"\"\"\n\n def __init__(self, p_value=0.05):\n\n self.p_value = p_value\n\n def compute_mahalanobis_individual(self, vector, means_vector, cov_matrix):\n\n x_mu = vector - means_vector\n inv_covmat = np.linalg.inv(cov_matrix)\n\n distance = np.dot(np.dot(x_mu, inv_covmat), x_mu.T)\n p_value = 1 - chi2.cdf(distance, len(vector) - 1)\n\n if p_value < self.p_value:\n print(f\"The record {vector} was detected as an outlier\")\n\n def multidimensional_outliers(self, data, means_vector, cov_matrix):\n\n for i in range(data.shape[1]):\n self.compute_mahalanobis_individual(data[:, i], means_vector, cov_matrix)\n","sub_path":"redata/statistics/mahalonobis_distance.py","file_name":"mahalonobis_distance.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"450934187","text":"from keras.applications.vgg16 import VGG16\nfrom keras.applications.vgg16 import decode_predictions, preprocess_input\nfrom keras.preprocessing.image import load_img, img_to_array\nimport numpy as np\n\n\nmodel = VGG16()\n\ndog = load_img('imgs/dog.jpg', target_size=(224, 224))\ndog = img_to_array(dog)\ncat = load_img('imgs/cat.jpg', target_size=(224, 224))\ncat = img_to_array(cat)\ngoma = load_img('imgs/goma.jpeg', target_size=(224, 224))\ngoma = img_to_array(goma)\n\n# convert RGB2BGR and centerize\ndog = preprocess_input(dog)\ncat = preprocess_input(cat)\ngoma = preprocess_input(goma)\n\ninput_array = np.stack([dog, cat, goma])\n\nprobs = model.predict(input_array)\nresults = decode_predictions(probs)\n\nassume_dog = results[0]\nassume_cat = results[1]\nassume_goma = results[2]\n\nprint(assume_dog)\nprint(assume_cat)\nprint(assume_goma)\n","sub_path":"deeplearning/keras/book/vgg16Series/usePretrainedModel/pretrained.py","file_name":"pretrained.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"183952100","text":"import numpy as np\nimport itertools\nimport fwdpy11 as fp11 #fwdpy11 \nfrom fwdpy11.model_params import SlocusParams\nimport fwdpy11.wright_fisher as wf\nimport fwdpy11.sampling as fps\nimport libsequence.polytable as polyt\nfrom libsequence.summstats import PolySIM\n\ndef str2byte(tup,fmtstring):\n byte_tup = (tup[0],bytearray(tup[1],fmtstring))\n return(byte_tup)\n\nclass RecordStats:\n \"\"\"\n Record some basic stats about the population\n including average fitness\n \"\"\"\n def __init__(self,ngen,rng):\n self.ngen=ngen\n self.generation = [] \n self.N = []\n self.rng=rng\n self.relative_load = []\n self.segregating_load = []\n self.fixed_load = []\n self.total_load = []\n self.fixed_deleterious = []\n self.fixed_neutral = []\n self.mean_deleterious_per_diploid = []\n self.mean_neutral_per_diploid = []\n self.cumulative_deleterious_frequency = []\n self.cumulative_neutral_frequency = []\n self.neutral_tajd = []\n self.total_tajd = []\n self.neutral_pi = []\n self.total_pi = []\n self.neutral_hprime = []\n self.total_hprime = []\n self.deleterious_het = [] \n \n def __call__(self,pop):\n \"\"\"\n The call operator will be given with the whole population.\n One can then operate in a read-only way, no copies made.\n \"\"\"\n if ( pop.generation % self.ngen == 0 ):\n #first get necessary information from the population\n N = len(pop.diploids) # Population size\n w = [ ind.w for ind in pop.diploids ] # fitness\n fixed_s = [mut.s for mut in pop.fixations if not mut.neutral]\n #Frequency of deleterious mutations\n cumulative_deleterious_frequency = np.sum([ j/float(2*N) for i,j in \n zip(pop.mutations,pop.mcounts) if \n not i.neutral and j > 0 ])\n cumulative_neutral_frequency = np.sum([ j/float(2*N) for i,j in \n zip(pop.mutations,pop.mcounts) if \n i.neutral and j > 0 ])\n #How many of each type of mutation does each diploid have?\n mean_deleterious_per_diploid = np.mean([len(pop.gametes[ind.first].smutations) + \n len(pop.gametes[ind.second].smutations) for \n ind in pop.diploids ])\n mean_deleterious_het = np.mean([len(set(\n pop.gametes[ind.first].smutations).symmetric_difference(\n pop.gametes[ind.second].smutations)) for\n ind in pop.diploids ])\n\n mean_neutral_per_diploid = np.mean([len(pop.gametes[ind.first].mutations) + \n len(pop.gametes[ind.second].mutations) for \n ind in pop.diploids]) \n #How many fixed mutations are there?\n fixed_deleterious = len(fixed_s)\n fixed_neutral = [mut.neutral for mut in pop.fixations].count(True)\n relative_load = 1 - np.mean(w)/np.max(w)\n segregating_load = 1 - np.mean(w)\n fixed_w = np.prod([1+2*s for s in fixed_s])\n fixed_load = 1 - fixed_w\n total_load = 1 - np.mean([fixed_w*ind.w for ind in pop.diploids])\n\n #Statistics on population samples:\n samp = fps.sample_separate(self.rng,pop,100,False)\n neutral_sample = polyt.SimData([str2byte(mut,'utf-8') \n for mut in samp[0]])\n combined_sample = polyt.SimData([str2byte(mut,'utf-8') \n for mut in \n list(itertools.chain.from_iterable(samp))])\n ps_neutral = PolySIM(neutral_sample)\n ps_combined = PolySIM(combined_sample)\n neutral_tajd = ps_neutral.tajimasd()\n total_tajd = ps_combined.tajimasd()\n neutral_pi = ps_neutral.thetapi()\n total_pi = ps_combined.thetapi()\n neutral_hprime = ps_neutral.hprime()\n total_hprime = ps_combined.hprime() \n #Now append these to the recorder\n self.generation.append(pop.generation)\n self.N.append(N)\n self.relative_load.append(relative_load)\n self.segregating_load.append(segregating_load)\n self.fixed_load.append(fixed_load)\n self.total_load.append(total_load)\n self.fixed_deleterious.append(fixed_deleterious)\n self.fixed_neutral.append(fixed_neutral)\n self.mean_deleterious_per_diploid.append(mean_deleterious_per_diploid)\n self.mean_neutral_per_diploid.append(mean_neutral_per_diploid)\n self.cumulative_deleterious_frequency.append(cumulative_deleterious_frequency)\n self.cumulative_neutral_frequency.append(cumulative_neutral_frequency)\n self.neutral_tajd.append(neutral_tajd)\n self.total_tajd.append(total_tajd)\n self.neutral_pi.append(neutral_pi)\n self.total_pi.append(total_pi)\n self.neutral_hprime.append(neutral_hprime)\n self.total_hprime.append(total_hprime)\n self.deleterious_het.append(mean_deleterious_het)\n \n","sub_path":"project/RecordStats.py","file_name":"RecordStats.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"259969133","text":"'''\n@author: xiongfei\n@license: (C) Copyright 2013-2017, Node Supply Chain Manager Corporation Limited.\n@contact: 386344277@qq.com\n@file: Mytest.py\n@time: 2018/4/12 上午11:16\n@desc: shanghaijiaotong university\n'''\nimport tensorflow as tf\nimport re\nfrom collections import Counter\nimport numpy as np\nfrom sklearn import preprocessing\nfrom tqdm import tqdm\n\nstop_words_path = './chinese_stopword.txt'\n\ndef get_stop_word(file):\n with open(file, 'r') as f:\n words = f.readlines()\n words_dict = {word.strip(): word.strip() for word in words}\n return words_dict\n\ndef pad_sentence(sentence, padding_word=\"\", sequence_length=400):\n if sequence_length > len(sentence):\n num_padding = sequence_length - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n else:\n new_sentence = sentence[:sequence_length]\n return new_sentence\n\ndef clean_line(line, stop_words):\n line = re.sub(r'[0-9|\\u3000]', '', line)\n words = line.split(' ')\n filtered_words = [word for word in words[:-1] if word not in stop_words]\n return filtered_words, words[-1]\n\n\ndef data_label_load(filename):\n data_list = []\n y_list = []\n with open(filename, 'r') as f:\n stop_words = get_stop_word(stop_words_path)\n for line in f:\n line, label = clean_line(line, stop_words)\n data_list.append(line)\n y_list.append(label)\n return data_list, y_list\n\n\ndef get_vocabuary(data_list):\n word_count = Counter()\n for line in data_list:\n word_count.update(line)\n sorted_words = sorted(word_count.items(), key=lambda x: x[1], reverse=True)\n WordToindex = {item[0]: i for i, item in enumerate(sorted_words, start=2)}\n indexToWord = {i: item[0] for i, item in enumerate(sorted_words, start=2)}\n WordToindex[\"\"] = 0\n WordToindex[\"\"] = 1\n indexToWord[0] = \"\"\n indexToWord[1] = \"\"\n print(\"vocabuary_size: {}\".format(len(WordToindex)))\n return WordToindex, indexToWord\n\n\ndef build_example(datas, labels):\n # 生成examle格式字典\n examples = []\n for data, label in zip(datas, labels):\n example = {\"text\": data, \"label\": label}\n examples.append(example)\n return examples\n\n\ndef batch_iter(data, label, batch_size, num_steps, shuffle=True):\n data_size = len(label)\n num_batches_per_epoch = data_size // batch_size\n num_epoches = num_steps // num_batches_per_epoch\n for epoch in range(num_epoches):\n if shuffle:\n shuffle_index = np.random.permutation(np.arange(data_size))\n shuffle_data = data[shuffle_index]\n shuffle_label = label[shuffle_index]\n else:\n shuffle_data = data\n shuffle_label = label\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield shuffle_data[start_index: end_index], shuffle_label[start_index:end_index]\n\n\ndef build_features(config, examples, data_type, out_file, word2idx_dict):\n # 设置元素作为序列的最大长度,便于对齐\n para_limit = config.sequence_len\n # 生成tfrecord file\n print(\"Processing {} examples...\".format(data_type))\n writer = tf.python_io.TFRecordWriter(out_file)\n for example in tqdm(examples):\n # 向量\n # 检查数据格式是否正确,避免和这次一样的错误\n raw_text = pad_sentence(example['text'], sequence_length=para_limit)\n #这里一定要设置dtype类型,转换前和转换后一定要一致,否则就会出现数据错误\n text = np.array([word2idx_dict[word] for word in raw_text],dtype=np.int32)\n label = np.array(example['label'], np.int32)\n #如何写入序列化向量和矩阵,全部转化为BytesList\n record = tf.train.Example(features=tf.train.Features(feature={\n \"text\": tf.train.Feature(bytes_list=tf.train.BytesList(value=[text.tostring()])),\n \"label\": tf.train.Feature(bytes_list=tf.train.BytesList(value=[label.tostring()])),\n }))\n writer.write(record.SerializeToString())\n\ndef prepro(config):\n train_data, train_label = data_label_load(config.train_data_path)\n test_data, test_label = data_label_load(config.test_data_path)\n print(\"train data: {}\".format(len(train_data)))\n print(\"test data: {}\".format(len(test_data)))\n # label encode\n le = preprocessing.LabelEncoder()\n le.fit(train_label + test_label)\n train_label = le.transform(train_label)\n test_label = le.transform(test_label)\n train_examples = build_example(train_data, train_label)\n test_examples = build_example(test_data, test_label)\n WordToindex, indexToWord = get_vocabuary(train_data + test_data)\n build_features(config, train_examples, \"train\", config.train_record_file, WordToindex)\n build_features(config, test_examples, \"test\", config.test_record_file, WordToindex)\n","sub_path":"text_classification_multi/data_preprocess.py","file_name":"data_preprocess.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"405025410","text":"import os\nimport random\n\nfrom collections import namedtuple, deque\n\nimport numpy as np\nimport pandas as pd\n\nfrom quad_controller_rl import util\nfrom quad_controller_rl.agents.base_agent import BaseAgent\nfrom quad_controller_rl.agents.model import Actor, Critic\n\n\nclass OUNoise:\n '''Ornstein-Uhlenbeck process.'''\n\n def __init__(self, size, mu=None, theta=0.15, sigma=0.3):\n '''Initialize parameters and noise process.'''\n self.size = size\n self.mu = mu if mu is not None else np.zeros(self.size)\n self.theta = theta\n self.sigma = sigma\n self.state = np.ones(self.size) + self.mu\n self.reset()\n\n def reset(self):\n '''Reset the internal state (= noise) to mean (mu).'''\n self.state = self.mu\n\n def sample(self):\n '''Update internal state and return it as a noise sample.'''\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))\n self.state = x + dx\n return self.state\n\n\nclass ReplayBuffer:\n \"\"\"Fixed-size circular buffer to store experience tuples.\"\"\"\n\n def __init__(self, size=1000):\n \"\"\"Initialize a ReplayBuffer object.\"\"\"\n self.size = size # maximum size of buffer\n self.memory = [] # internal memory (list)\n self.idx = 0 # current index into circular buffer\n self.experience = namedtuple(\"Experience\",\n field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n\n def add(self, state, action, reward, next_state, done):\n \"\"\"Add a new experience to memory.\"\"\"\n # Create an Experience object, add it to memory\n # Note: If memory is full, start overwriting from the beginning\n if len(self.memory) < self.size:\n self.memory.append(None)\n self.memory[self.idx] = self.experience(state, action, reward, next_state, done)\n self.idx = (self.idx + 1) % self.size\n\n def sample(self, batch_size=64):\n \"\"\"Randomly sample a batch of experiences from memory.\"\"\"\n # Return a list or tuple of Experience objects sampled from memory\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n return len(self.memory)\n\n\nclass DDPGAgentHover(BaseAgent):\n '''Reinforcement Learning agent that learns using DDPG.'''\n def __init__(self, task):\n # Task (environment) information\n self.task = task\n self.state_size = 7\n self.action_size = 3 # force only\n\n self.state_low = np.concatenate([\n self.task.observation_space.low[:3],\n np.array([0.0, 0.0, 0.0, 0.0])\n ])\n self.state_high = np.concatenate([\n self.task.observation_space.high[:3],\n self.task.observation_space.high[:3] - self.task.observation_space.low[:3],\n np.array([self.task.observation_space.high[2] - 10.0])\n ])\n self.state_range = self.state_high - self.state_low\n print(\"state low: {} state high: {} state range: {}\".format(\n self.state_low, self.state_high, self.state_range))\n\n # clip action\n self.action_low = self.task.action_space.low[:self.action_size]\n self.action_high = self.task.action_space.high[:self.action_size]\n\n # Actor (Policy) Model\n self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high)\n self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high)\n\n # Critic (Value) Model\n self.critic_local = Critic(self.state_size, self.action_size)\n self.critic_target = Critic(self.state_size, self.action_size)\n\n # Intialize target model parameters with local model parameters\n self.actor_target.model.set_weights(self.actor_local.model.get_weights())\n self.critic_target.model.set_weights(self.critic_local.model.get_weights())\n\n # Noise process\n self.noise = OUNoise(self.action_size)\n\n # Replay memory\n self.buffer_size = 100000\n self.batch_size = 64\n self.memory = ReplayBuffer(self.buffer_size)\n\n # Algorithm parameters\n self.gamma = 0.99 # discount factor\n self.tau = 0.001 # for soft update of target parameters\n\n # Score tracker\n self.best_score = -np.inf\n\n # Episode variables\n self.reset_episode_vars()\n\n # Save episode stats\n self.stats_filename = os.path.join(\n util.get_param('out'),\n 'hover/stats_{}.csv'.format(util.get_timestamp())) # path to CSV file\n self.stats_columns = ['episode', 'total_reward'] # specify columns to save\n print('Saving stats {} to {}'.format(self.stats_columns, self.stats_filename)) # [debug]\n\n # Save weights\n self.save_weights_every = 100\n self.actor_filename = os.path.join(\n util.get_param('out'),\n 'hover/actor_checkpoints_{}.h5'.format(util.get_timestamp())\n )\n self.critic_filename = os.path.join(\n util.get_param('out'),\n 'hover/critic_checkpoints_{}.h5'.format(util.get_timestamp())\n )\n print('Actor filename: ', self.actor_filename)\n print('Critic filename: ', self.critic_filename)\n \n self.episode_num = 1\n self.reset_episode_vars()\n\n def write_stats(self, stats):\n '''Write single episode stats to CSV file.'''\n df_stats = pd.DataFrame([stats], columns=self.stats_columns) # single-row dataframe\n df_stats.to_csv(self.stats_filename, mode='a', index=False,\n header=not os.path.isfile(self.stats_filename)) # write header first time only\n\n def preprocess_state(self, state):\n '''Reduce state vector to relevant dimensions.'''\n return state[:self.state_size]\n\n def postprocess_action(self, action):\n '''Return complete action vector.'''\n complete_action = np.zeros(self.task.action_space.shape) # shape: (6, )\n complete_action[0:self.action_size] = action\n return complete_action\n\n def reset_episode_vars(self):\n self.last_state = None\n self.last_action = None\n self.total_reward = 0.0\n self.count = 0\n self.noise.reset()\n\n def step(self, state, reward, done):\n # Transform state vector\n # state = state.reshape(1, -1) # convert to row vector\n state = self.preprocess_state(state) # reduce state vector\n state = (state - self.state_low) / self.state_range # scale to [0.0, 1.0]\n\n # Choose an action\n action = self.act(state)\n\n # Save experience / reward\n if self.last_state is not None and self.last_action is not None:\n self.memory.add(self.last_state, self.last_action, reward, state, done)\n self.total_reward += reward\n self.count += 1\n\n # Learn, if enough samples are available in memory\n if len(self.memory) > self.batch_size:\n experiences = self.memory.sample(self.batch_size)\n self.learn(experiences)\n\n if done:\n # Write episode stats\n self.write_stats([self.episode_num, self.total_reward])\n if self.episode_num % self.save_weights_every == 0:\n print(\"Saving model weights... (episode_num: {})\".format(self.episode_num))\n self.actor_local.model.save_weights(self.actor_filename)\n self.critic_local.model.save_weights(self.critic_filename)\n self.episode_num += 1\n\n score = self.total_reward / float(self.count) if self.count else 0.0\n if score > self.best_score:\n self.best_score = score\n print('DDPG: t = {:4d}, score = {:7.3f} (best = {:7.3f})'.format(self.count, score, self.best_score)) # [debug]\n self.reset_episode_vars()\n\n self.last_state = state\n self.last_action = action\n return self.postprocess_action(action)\n\n def act(self, states, add_noise=True):\n '''Return actions for given state(s) as per current policy.'''\n states = np.reshape(states, [-1, self.state_size])\n actions = self.actor_local.model.predict(states)\n if add_noise:\n actions += self.noise.sample() # add some noise for exploration\n return actions\n\n def learn(self, experiences):\n '''Update policy and value parameters using given batch of experience tuples.'''\n # Convert experience tuples to separate arrays for each element (states, actions, rewards, etc.)\n states = np.vstack([e.state for e in experiences if e is not None])\n actions = np.array([e.action for e in experiences if e is not None]).astype(np.float32).reshape(-1, self.action_size)\n rewards = np.array([e.reward for e in experiences if e is not None]).astype(np.float32).reshape(-1, 1)\n dones = np.array([e.done for e in experiences if e is not None]).astype(np.uint8).reshape(-1, 1)\n next_states = np.vstack([e.next_state for e in experiences if e is not None])\n\n # Get predicted next-state actions and Q values from target models\n actions_next = self.actor_target.model.predict_on_batch(next_states)\n Q_targets_next = self.critic_target.model.predict_on_batch([next_states, actions_next])\n\n # Compute Q targets for current states and train critic model (local)\n Q_targets = rewards + self.gamma * Q_targets_next * (1 - dones)\n self.critic_local.model.train_on_batch(x=[states, actions], y=Q_targets)\n\n # Train actor model (local)\n action_gradients = np.reshape(self.critic_local.get_action_gradients([states, actions, 0]), (-1, self.action_size))\n self.actor_local.train_fn([states, action_gradients, 1]) # custom training function\n\n # Soft-update target models\n self.soft_update(self.critic_local.model, self.critic_target.model)\n self.soft_update(self.actor_local.model, self.actor_target.model)\n\n def soft_update(self, local_model, target_model):\n '''Soft update model parameters.'''\n local_weights = np.array(local_model.get_weights())\n target_weights = np.array(target_model.get_weights())\n\n new_weights = self.tau * local_weights + (1 - self.tau) * target_weights\n target_model.set_weights(new_weights)\n","sub_path":"quad_controller_rl/src/quad_controller_rl/agents/ddpg_agent_hover.py","file_name":"ddpg_agent_hover.py","file_ext":"py","file_size_in_byte":10354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"387072679","text":"import random\nn = random.randint(1,9)\ns = str(input('Какой сегодня день недели: '))\n\ndef str_n(s,n):\n if len(s) > n:\n return s.upper()\n else:\n return s\nprint (str_n(s,n))\n","sub_path":"HW-2/str_n.py","file_name":"str_n.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"257135388","text":"import math\nimport time\n\n#Problem 6: The sum of the squares of the first ten natural numbers is,\n\n# (1+2+\\cdots+10)^2=55^2=3025.\n# \n# Hence the difference between the sum of the squares of the first ten natural numbers and \n# the square of the sum is:\n\n# 3025-385=2640. \n \n# Find the difference between the sum of the squares of the first one hundred natural \n# numbers and the square of the sum.\n\n\nsumofsquares = 0\nsquareofsums = 0\n\n#Find the sum of all squares \ndef sumSquares(count):\n\n sos = 0\n for x in range(1,count+1):\n sos += x**2 \n return sos \n \n\n\n#Find the square of all sums method #1 ---- Slower ---- \ndef squareSums1(count):\n \n sos = 0\n for x in range(1,count+1):\n sos += x \n return sos**2 \n \n#Find the square of all sums method #2 ---- FASTER ----\ndef squareSums2(count):\n\n tot = ((count+1)*count)/2\n sos = tot**2\n return sos \n\n#Start run time clock for function 1 \nstart = time.time()\n\n# Set for loop for multiple interations (10000)\nfor x in range(10000): \n \n #Find difference using method 2 \n a = squareSums2(100) - sumSquares(100)\n \n#Return run time\nelapsed = (time.time() - start)\n\n\n#print answer\nprint('Sum of squares for 10: %s Found in: %s seconds') % (a,elapsed)\n \n\n\n\n\n","sub_path":"problem6.py","file_name":"problem6.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"439960733","text":"import json\nimport os, fnmatch\n\nLOG_DIRECTORY = \"./final_filtered_localized/\"\n\ndef main():\n midplane_list = []\n directory_list = os.scandir(LOG_DIRECTORY)\n for entry in directory_list:\n if(entry.is_file()):\n midplane_list.append(entry.name)\n\n print(\"Number of midplane files: {}\".format(len(midplane_list)))\n\n fatal_sequences = []\n for midplane in midplane_list:\n print(midplane)\n with open(\"{}{}\".format(LOG_DIRECTORY, midplane), \"r\") as log:\n data = json.load(log)\n\n sequence_list = []\n last_fatal_time = None\n prev_last_fatal = None\n for entry in data:\n # Hit Fatal event\n if(entry[\"SEVERITY\"] == \"FATAL\"):\n if(last_fatal_time == None\n or entry[\"EVENT_TIME\"] - last_fatal_time >= 7200):\n prev_last_fatal = last_fatal_time\n last_fatal_time = entry[\"EVENT_TIME\"]\n # Start the back up extract sequence\n new_sequence = []\n found_start = False\n sequence_list.reverse()\n for seq_item in sequence_list:\n # Find the start\n if(not found_start):\n # If the event is within 1 minute before the fatal event\n if(entry[\"EVENT_TIME\"] - seq_item[\"EVENT_TIME\"] >= 60):\n # If the event is within 45 minutes before the fatal event\n #if(entry[\"EVENT_TIME\"] - seq_item[\"EVENT_TIME\"] >= 2700):\n # If the event is within 15 minutes before the fatal event\n #if(entry[\"EVENT_TIME\"] - seq_item[\"EVENT_TIME\"] >= 900):\n # If the event is within 75 minutes before the fatal event\n #if(entry[\"EVENT_TIME\"] - seq_item[\"EVENT_TIME\"] >= 4500):\n # If the event is within 120 minutes before the fatal event\n #if(entry[\"EVENT_TIME\"] - seq_item[\"EVENT_TIME\"] >= 7200):\n new_sequence.append(seq_item)\n found_start = True\n else:\n new_sequence.append(seq_item)\n if(len(new_sequence) == 6):\n break\n\n print(\"New sequnce: {}\".format(len(new_sequence)))\n if(len(new_sequence) >= 2 and\n prev_last_fatal != None and\n new_sequence[len(new_sequence) - 1][\"EVENT_TIME\"] - prev_last_fatal >= 14400):\n print(len(new_sequence))\n new_sequence.reverse()\n fatal_sequences.append(new_sequence)\n else:\n print(\"Threw out sequence, because precceded by close FATAL event\")\n sequence_list = []\n else:\n prev_last_fatal = last_fatal_time\n last_fatal_time = entry[\"EVENT_TIME\"]\n sequence_list = []\n continue\n\n else:\n sequence_list.append(entry)\n\n print(len(fatal_sequences))\n\n fatal_seq_file = open(\"fatal_sequences\", \"w\")\n fatal_seq_file.write(json.dumps(fatal_sequences, indent=4))\n fatal_seq_file.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"fatal_sequence_extraction.py","file_name":"fatal_sequence_extraction.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"522504325","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import redirect, render, render_to_response, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.contrib import messages\nfrom Exam.models import Role\nfrom .models import Result\nfrom datetime import datetime\nfrom .control import get_info\n\n\n@login_required\ndef result_index(request):\n role_list = Role.objects.all().order_by('-id')\n if role_list.exists():\n role = role_list[0]\n result_list = Result.objects.all()\n this_year = datetime.today().year\n year_list = [str(this_year-1) + '-' + str(this_year), str(this_year-2) + '-' + str(this_year-1)]\n semester_list = Result.SEMESTER.values()\n semester_year_list = [x+y for x in year_list for y in semester_list]\n if 'name' in request.GET and request.GET['name']:\n result_list = result_list.filter(student__name__contains=request.GET['name'])\n if 'class_name' in request.GET and request.GET['class_name']:\n result_list = result_list.filter(class_name__name__contains=request.GET['class_name'])\n if 'account' in request.GET and request.GET['account']:\n result_list = result_list.filter(account__name__contains=request.GET['account'])\n if 'min_result' in request.GET and request.GET['min_result']:\n result_list = result_list.filter(result__gte=request.GET['min_result'])\n if 'max_result' in request.GET and request.GET['max_result']:\n result_list = result_list.filter(result__lte=request.GET['max_result'])\n if 'year' in request.GET and request.GET['year']:\n value = '0' + bin(int(request.GET['year']))[2:]\n year = year_list[int(value[-2])]\n semester = Result.SEMESTER.keys()[int(value[-1])]\n result_list = result_list.filter(year=year, semester=semester)\n count = len(result_list)\n return render(request, 'result/result_index.html', locals())\n\n\n@login_required\ndef result_edit(request):\n teacher = request.user.customer\n if request.method == 'POST':\n year, semester = get_info()\n\n account = request.POST['account']\n class_name = request.POST['class_name']\n result_list = request.POST.getlist('result')\n id_list = request.POST.getlist('id')\n\n for result_num, student_id in zip(result_list, id_list):\n result = Result()\n result.student_id = student_id\n result.account_id = account\n result.class_name_id = class_name\n result.year = year\n result.semester = semester\n result.result = result_num\n result.save()\n return HttpResponseRedirect(reverse('result_index'))\n else:\n course_list = teacher.teacher.all()\n account_list = list(set(course.account for course in course_list))\n return render(request, 'result/result_edit.html', locals())\n","sub_path":"Result/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"650543214","text":"import numpy as np\n\n\nclass DeadlockError(Exception):\n pass\n\n\ndef check_for_resource_deadlock(available, allocated, requested):\n '''\n Check the current process/resource state for a deadlock.\n\n :param list(list) available: resource availability matrix\n :param list(list) allocated: resource allocation matrix\n :param requested: unfulfilled process resource requests\n\n Example allocation matrix\n\n r0 r1 r2 rn\n ---------------\n p0 | 0 | 1 | 2 | . |\n p1 | 1 | 0 | 0 | . |\n pn | . | . | . | . |\n ---------------\n\n :raises DeadlockError:\n '''\n # Numpy arrays are more functional here\n available = np.array(available)\n allocated = np.array(allocated)\n requested = np.array(requested)\n\n process_count = allocated.shape[0]\n finished = np.zeros(process_count, bool)\n\n # Make a list of finished processes.\n # I.e. they have no resources.\n for i, alloc in enumerate(allocated):\n finished[i] = not np.any(alloc)\n\n try_to_finish(finished, requested, available, allocated)\n\n if not np.all(finished):\n raise DeadlockError('Deadlock has occured!')\n\ndef try_to_finish(finished, requested, available, allocated):\n '''\n Free resources by 'granting' processes their resources if\n they are available. Do this until it is no longer possible.\n\n :param np.ndarray finished:\n :param np.ndarray requested:\n :param np.ndarray available:\n :param np.ndarray allocated:\n\n :return None:\n '''\n for i, _ in enumerate(finished):\n if not finished[i] and np.all(requested[i] <= available):\n # \"Finish him!\"\n available += allocated[i]\n finished[i] = True\n\n # No matter where we in the list, restart\n # at the beginning because we may have just\n # unlocked an earlier process.\n try_to_finish(finished, requested, available, allocated)\n break\n\ndef parse_from_website(params):\n '''\n Parse json response from UI, which was written in\n great haste! and doesn't match what I originally wrote!!!\n\n !!!\n\n :param dict params:\n\n :return dict:\n '''\n resources = params.get('resources')\n if resources is None or not isinstance(resources, dict):\n raise ValueError\n\n # Available is a 1D array\n available = [int(v) for v in resources.values()]\n # Keep a list of names so we can cross reference\n # indexes later on.\n resource_names = list(resources.keys())\n\n processes = params.get('processes')\n if processes is None or not isinstance(processes, dict):\n raise ValueError\n\n # Fill in the allocated/request matrices\n allocated = [[0 for x in resources] for y in processes]\n requested = [[0 for x in resources] for y in processes]\n i = 0\n try:\n for process_name, info in processes.items():\n alloc_info = info['allocated']\n for alloc in alloc_info:\n # Seems legit\n resource_name = alloc.split('cube_')[1]\n resource_name, number_allocated = resource_name.split('_')\n resource_index = resource_names.index(resource_name)\n number_allocated = int(number_allocated)\n allocated[i][resource_index] = number_allocated\n request_info = info['requested']\n for req_info in request_info:\n # Also seems legit.\n resource_name = req_info.split('cube_')[1]\n resource_name, number_allocated = resource_name.split('_')\n resource_index = resource_names.index(resource_name)\n number_allocated = int(number_allocated)\n requested[i][resource_index] = number_allocated\n i += 1\n except Exception:\n # We did our best\n raise ValueError('Cannot parse from website')\n\n # Forgive me for my sins\n # Gotta update available!\n available = np.array(available)\n for alloc in allocated:\n available -= np.array(alloc)\n available = available.tolist()\n\n return {\n 'allocated': allocated,\n 'requested': requested,\n 'available': available,\n }\n\nif __name__ == '__main__':\n\n allocation = [\n [1, 0, 1],\n [0, 1, 0],\n ]\n available = [0, 0, 0]\n requests2 = [\n [0, 1, 0],\n [0, 0, 1]\n ]\n\n try:\n check_for_resource_deadlock(available, allocation, requests2)\n\n except DeadlockError:\n print('Deadlock')\n else:\n print('No Deadlock')\n","sub_path":"deadlock_detection.py","file_name":"deadlock_detection.py","file_ext":"py","file_size_in_byte":4510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"512023156","text":"import argparse\nimport os\nos.environ['ANSI_COLORS_DISABLED']=\"1\"\nimport wk\n\ndef run_default():\n words = 'Hello, I am wpkit2 ,use me !'\n length = 50\n print('*' * length)\n print(words.center(length, '*'))\n print('*' * length)\ndef main_bold():\n parser = argparse.ArgumentParser()\n parser.add_argument('-command', type=str)\n args = parser.parse_args()\n if args.command is None:\n run_default()\n elif args.command=='deploy':\n pass\ndef main():\n import fire\n class Cli:\n @staticmethod\n def hi():\n run_default()\n\n @staticmethod\n def downtee( key, path=None, overwrite=False):\n from wk.extra.gitspace import Store\n store = Store()\n store.get(key, path=path, overwrite=overwrite)\n\n @staticmethod\n def uptee( key, path, recursive=False):\n from wk.extra.gitspace import Store\n store = Store()\n store.set(key, path, recursive)\n\n @staticmethod\n def deploy(service,*args,**kwargs):\n if service=='fsapp':\n from wk.applications import fsapp\n fsapp.setup_default(*args,**kwargs)\n elif service=='zspt':\n from wk.applications import zspt\n zspt.setup_default(*args,**kwargs)\n else:\n print(\"Service %s is not valid.\"%(service))\n fire.Fire(Cli)\n\nif __name__ == '__main__':\n main()\n","sub_path":"build/lib/wk/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"603393431","text":"#!/usr/bin/python\n#coding:utf8\n\n# 自定义公用变量\n\n# 运行模式, sequential 连续, timer 定时\nrun_mode = 'sequential'\n\n# 是否测试环境\nis_test = True\n\n# 应用名称\napp = \"train\"\n\n# 默认运行脚本目录\n#base_path=/home/ap/dip/appjob/shelljob/TS150/violate\nbase_path = \"/home/ap/dip_ts150/ts150_script/ccb_risk_scoring\"\nbase_path = \"..\"\n\n# 运行目录\nrun_path = \"%s/train\" % base_path\n\n# 默认Hive数据库名\ndefault_hive_db = \"train\"\n\n# 默认GP数据库名\ndefault_gp_schema = \"app_siam\"\n\n# 日志级别\nlog_level = 'debug'\nlog_level = 'info'\n# log_level = 'error'\n\n# 作业起始日期,该日期前一天的数据不检查是否存在\napp_start_date = '20151106'\n\n# 通知消息接收人,可以是手机号或邮箱\nnotice_receiver = ['18159283921', 'wuzhaohui@tienon.com']\n\n","sub_path":"non_self/7_scheduler/var.py","file_name":"var.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"100116693","text":"from django.test import TestCase\nfrom django.views.generic import View\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom access.views import ManagedAccessViewMixin\n\nfrom .utils import (\n SuccessfulRequirement,\n UnSuccessfulRequirement,\n successful_response,\n unsuccessful_response,\n)\n\n\nclass TestView(View):\n dispatch_called = False\n\n def dispatch(self, *args, **kwargs):\n self.dispatch_called = True\n return successful_response\n\n\nclass FakeView(ManagedAccessViewMixin, TestView):\n pass\n\n\nclass TestManagedAccessViewMixin(TestCase):\n def setUp(self):\n self.view = FakeView()\n self.request = {}\n\n def test_successful(self):\n first = SuccessfulRequirement()\n second = SuccessfulRequirement()\n self.view.access_requirements = [first, second]\n result = self.view.dispatch(self.request)\n self.assertTrue(self.view.dispatch_called)\n self.assertEqual(result, successful_response)\n\n def test_first_unfulfilled(self):\n first = UnSuccessfulRequirement()\n second = SuccessfulRequirement()\n self.view.access_requirements = [first, second]\n result = self.view.dispatch(self.request)\n self.assertFalse(self.view.dispatch_called)\n self.assertEqual(result, unsuccessful_response)\n\n def test_second_unfulfilled(self):\n first = SuccessfulRequirement()\n second = UnSuccessfulRequirement()\n self.view.access_requirements = [first, second]\n result = self.view.dispatch(self.request)\n self.assertFalse(self.view.dispatch_called)\n self.assertEqual(result, unsuccessful_response)\n\n def test_get_access_requirements(self):\n with self.assertRaises(ImproperlyConfigured):\n self.view.get_access_requirements()\n self.view.access_requirements = []\n","sub_path":"access/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"413605098","text":"import maya.cmds as cmds\nimport maya.utils\n\n#Part1: Clean up PxrSurface names to always end in _Pxr\nshadingGroups = cmds.ls( type=\"shadingEngine\" )\npxrSurfs = cmds.ls( type=\"PxrSurface\" ) + cmds.ls( type=\"PxrMarschnerHair\" )\nlamberts = cmds.ls( type=\"lambert\" )\n\nproject_dir = mel.eval('workspace -q -rd')\n\ngrp_map_pxrSurfs = dict()\ngrp_map_lamberts = dict()\n\nfor surf in pxrSurfs:\n if ':' in surf:\n continue\n if not cmds.listConnections(surf):\n continue\n for con in cmds.listConnections(surf):\n if cmds.nodeType(con) == \"shadingEngine\":\n grp_map_pxrSurfs[con] = surf\n\nfor lamb in lamberts:\n if ':' in surf:\n continue\n if not cmds.listConnections(lamb):\n continue\n for con in cmds.listConnections(lamb):\n print(cmds.nodeType(con))\n if cmds.nodeType(con) == \"shadingEngine\":\n grp_map_lamberts[con] = lamb\n\n\nfor grp in grp_map_pxrSurfs.keys():\n surf = grp_map_pxrSurfs[grp]\n\n #Rename the PxrSurface if it doesn't match convention\n if (len(surf) > 4):\n if (surf[-4:] != '_Pxr'):\n cmds.rename ( surf, surf + '_Pxr' )\n surf = surf + '_Pxr'\n\n #If the lambert doesn't already exist, create it\n if (not cmds.objExists(surf[:-4] + \"_lambert\")):\n lamb = cmds.createNode( 'lambert' )\n cmds.connectAttr(lamb + '.outColor', grp + '.surfaceShader', force=True )\n #rename the lambert viewport shader to match convention\n cmds.rename ( lamb, surf[:-4] + '_lambert' )\n lamb = surf[:-4] + '_lambert'\n\n #get the diffuse channel of each connected PxrSurface\n diffuse_textures = cmds.listConnections ( surf + \".diffuseColor\" )\n #if this PxrSurface doesn't have any diffuse textures\n if (not diffuse_textures):\n diffuse_color = cmds.getAttr (surf + \".diffuseColor\")[0]\n print( \"Old diffuse: \", diffuse_color, \"\\n\" )\n cmds.setAttr ( lamb + \".color\", diffuse_color[0], diffuse_color[1], diffuse_color[2] )\n else:\n tex_orig_name = \"\"\n if ( cmds.nodeType ( diffuse_textures[0]) == \"PxrTexture\" ):\n tex_orig_name = cmds.getAttr ( diffuse_textures[0] + \".filename\" )\n else:\n tex_orig_name = cmds.getAttr ( diffuse_textures[0] + \".fileTextureName\" )\n\n viewport_tex = \"\"\n if (not cmds.objExists(surf[:-4] + \"_viewport_tex\" )):\n viewport_tex = cmds.shadingNode('file', asTexture=True )\n else:\n viewport_tex = surf[:-4] + \"_viewport_tex\"\n cmds.connectAttr ( viewport_tex + \".outColor\", lamb + \".color\", force=True )\n\n tex_filepath = tex_orig_name\n tex_post = tex_orig_name[-4:]\n if (tex_post == \".tex\"):\n tex_filepath = tex_orig_name[:-4] \n\n cmds.setAttr ( viewport_tex + \".fileTextureName\", tex_filepath, type=\"string\" )\n \n #rename the new texture to match naming convention\n new_tex_name = surf[:-4] + \"_viewport_tex\"\n cmds.rename ( viewport_tex, new_tex_name )\n \n #rename the PxrSufrace texture to match naming convention\n old_tex_rename = surf[:-4] + \"_render_tex\"\n cmds.rename ( diffuse_textures[0], old_tex_rename )\n\n\n #-----Create a _GLSL shader------------\n #--------------------------------------\n if (not cmds.objExists(surf[:-4] + \"_GLSL\")):\n new_GLSL = cmds.createNode( 'GLSLShader' );\n cmds.setAttr( new_GLSL + \".shader\", \"assets\\cellShader_plugin\\\\flyers_signs_shader\\\\flyer_shader.ogsfx\", type=\"string\" )\n #Plug the old diffuse into the GLSL shader\n diffuse_textures = cmds.listConnections ( lamb + \".color\" )\n if (diffuse_textures):\n cmds.connectAttr ( diffuse_textures[0] + \".outColor\", new_GLSL + \".fdiffuse_color_tex\", force=True )\n cmds.setAttr ( new_GLSL + \".fuse_tex\", 1)\n else:\n diffuse_color_full = cmds.getAttr (surf + \".diffuseColor\")[0]\n trip = (diffuse_color_full[0], diffuse_color_full[1], diffuse_color_full[2])\n cmds.setAttr (new_GLSL + \".diffuseColorRGB\", trip[0], trip[1], trip[2], type=\"double3\")\n\n \"\"\"\n #Create a normal map and plug that in\n normalmap_textures = cmds.listConnections ( surf + \".bumpNormal\" )\n if (normalmap_textures):\n norm_orig_name = \"\"\n if ( cmds.nodeType ( normalmap_textures[0]) == \"PxrNormalMap\" ):\n norm_orig_name = cmds.getAttr ( normalmap_textures[0] + \".filename\" )\n else:\n norm_orig_name = cmds.getAttr ( normalmap_textures[0] + \".fileTextureName\" )\n \n viewport_norm = \"\"\n if (not cmds.objExists(surf[:-4] + \"_view_norm\" )):\n viewport_norm = cmds.shadingNode('file', asTexture=True )\n else:\n viewport_norm = surf[:-4] + \"_view_norm\"\n cmds.connectAttr ( viewport_norm + \".outColor\", new_GLSL + \".normalMap\", force=True )\n\n norm_filepath = norm_orig_name\n norm_post = norm_orig_name[-4:]\n if (norm_post == \".tex\"):\n norm_filepath = norm_orig_name[:-4]\n\n cmds.setAttr ( viewport_norm + \".fileTextureName\", norm_filepath, type=\"string\" )\n \n #rename the new texture to match naming convention\n new_norm_name = surf[:-4] + \"_view_norm\"\n cmds.rename ( viewport_norm, new_norm_name )\n \n #rename the PxrSufrace texture to match naming convention\n old_norm_rename = surf[:-4] + \"_render_norm\"\n cmds.rename ( normalmap_textures[0], old_norm_rename )\n cmds.connectAttr ( new_norm_name + \".outColor\", new_GLSL + \".normalMap\", force=True )\n cmds.setAttr ( new_GLSL + \".useNormal\", 1)\n else:\n cmds.setAttr ( new_GLSL + \".useNormal\", 0)\n \"\"\"\n #Create a presence map and plug that in\n presence_textures = cmds.listConnections ( surf + \".presence\" )\n if (presence_textures):\n presence_orig_name = cmds.getAttr ( presence_textures[0] + \".fileTextureName\" )\n \n viewport_presence = \"\"\n if (not cmds.objExists(surf[:-4] + \"_view_presence\" )):\n viewport_presence = cmds.shadingNode('file', asTexture=True )\n else:\n viewport_presence = surf[:-4] + \"_view_presence\"\n cmds.connectAttr ( viewport_presence + \".outColor\", new_GLSL + \".presence_map\", force=True )\n\n presence_filepath = presence_orig_name\n presence_post = presence_orig_name[-4:]\n if (presence_post == \".tex\"):\n presence_filepath = presence_orig_name[:-4]\n\n cmds.setAttr ( viewport_presence + \".fileTextureName\", presence_filepath, type=\"string\" )\n \n #rename the new texture to match naming convention\n new_presence_name = surf[:-4] + \"_view_presence\"\n cmds.rename ( viewport_presence, new_presence_name )\n \n #rename the PxrSufrace texture to match naming convention\n old_presence_rename = surf[:-4] + \"_render_presence\"\n cmds.rename ( presence_textures[0], old_presence_rename )\n \n cmds.setAttr ( new_GLSL + \".use_presence\", 1)\n\n new_GLSL_name = surf[:-4] + \"_GLSL\"\n cmds.rename(new_GLSL, new_GLSL_name)\n\n surfLocName = surf[:-4] + \"_Loc\"\n loc = surfLocName\n if (not cmds.objExists( surfLocName ) ):\n loc = cmds.spaceLocator ( name=surfLocName )[0]\n cmds.hyperShade( objects=grp )\n if (len(cmds.ls(sl=True)) > 0):\n mesh = cmds.ls(sl=True)[0]\n mesh = mesh[:mesh.find('Shape')] + mesh[mesh.find('Shape')+5:]\n pos = cmds.xform(mesh, q=True, ws=True, rp=True)\n cmds.move ( pos[0], pos[1], pos[2], surfLocName, absolute=True )\n cmds.parent( surfLocName, mesh )\n cmds.hide (loc)\n\n cmds.connectAttr ( loc + \"Shape.worldPosition.worldPositionX\", new_GLSL_name + \".objWorldOffsetX\" )\n cmds.connectAttr ( loc + \"Shape.worldPosition.worldPositionY\", new_GLSL_name + \".objWorldOffsetY\" )\n cmds.connectAttr ( loc + \"Shape.worldPosition.worldPositionZ\", new_GLSL_name + \".objWorldOffsetZ\" )\n\n\n","sub_path":"scripts/setupShadersFlyers.py","file_name":"setupShadersFlyers.py","file_ext":"py","file_size_in_byte":8270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"7476630","text":"import random\nimport pymysql\nimport json\nimport requests\n\njson_ = {\n \"touser\":\"orUqn07wEfmVPRlVe-NrUaMU1snE\",\n \"template_id\":\"Rt1l46Z84J4_01IsEjX4VgjgxyTyuSX3muZD6bilmRY\",\n \"url\":\"http://weixin.qq.com/download\",\n\n \"data\":{\n\n \"hello\"\n \"\":{\n \"value\":\"巧克力\",\n \"color\":\"#173177\"\n },\n\n }\n }\naccess_token = '14_OgN346pI-svU0mnfuBZDwLd8G_5xAp4dEgouaCRdxXjh6xe-iKm3i7z1fon6OnzNV40oRN2bz2do3aTvtlPhrZNtMMv1o0qHDlFAO2YcZqnhGoBWO4zumUIFHUMy8_bNaHYpD_tzSEV665nkBSZcAJAKBI'\nurl = 'https://api.weixin.qq.com/cgi-bin/message/template/send?access_token=' + access_token\njson_ = json.dumps(json_, ensure_ascii=False).encode('utf-8')\nres = requests.post(url, data=json_)\nprint(res)\nprint(res.text)","sub_path":"pogdesign_grab/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"288779001","text":"import numpy as np\r\nimport pandas as pd\r\ndef dataset(info_file):\r\n if info_file is not None:\r\n # 1. Read in patient and sample id from info file.\r\n info = pd.read_csv(info_file)\r\n info_selected = info[(info['Current criteria']=='TB') | (info['Current criteria']=='Not TB')] \r\n \r\n label_list = info_selected['Current criteria'].values\r\n print(f\"Propotion of positive samples: {np.mean(label_list=='TB')}\")\r\n \r\n label_list = (label_list == 'TB').astype('uint8') # Convert label 'Confirmed TB' and 'Unlikely TB' to 1 and 0\r\n \r\n def format_sample_id(x):\r\n return str(x).replace(' ', '').replace(')', '').replace('(', '_')\r\n info_selected_sample_id = info_selected['Sample ID'].values\r\n sample_id_list = [x for x in info_selected_sample_id]\r\n else:\r\n label_list = None\r\n return label_list\r\n","sub_path":"dataset/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"619436722","text":"import os\nfrom teaman import settings\n\ndef get_image_paths(url):\n\tfilename=os.path.basename(url)\n\tbase,ext=os.path.splitext(filename)\n\tthumbname=base+'_thumb'+ext\n\tupload_path=\"%s%s%s\" % (settings.MEDIA_ROOT,'uploads/',filename)\n\tthumb_path=\"%s%s%s\" % (settings.MEDIA_ROOT,'thumbs/',thumbname)\n\treturn upload_path,thumb_path\n","sub_path":"teaman/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"146069221","text":"import asyncio\nimport base64\nimport hashlib\nimport json\nimport logging\nimport re\nfrom datetime import datetime, timedelta, timezone\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Dict, List, NamedTuple, Optional\n\nimport chevron\nimport msgpack\nfrom aiohttp import ClientConnectionError, ClientError, ClientSession\nfrom arq import Actor, BaseWorker, Drain, concurrent, cron\nfrom arq.utils import from_unix_ms, to_unix_ms, truncate\nfrom chevron import ChevronError\nfrom phonenumbers import parse as parse_number\nfrom phonenumbers import (NumberParseException, PhoneNumberFormat, PhoneNumberType, format_number, is_valid_number,\n number_type)\nfrom phonenumbers.geocoder import country_name_for_number, description_for_number\nfrom ua_parser.user_agent_parser import Parse as ParseUserAgent\n\nfrom .es import ElasticSearch\nfrom .models import THIS_DIR, BaseWebhook, ClickInfo, EmailSendMethod, MandrillWebhook, MessageStatus, SmsSendMethod\nfrom .render import EmailInfo, render_email\nfrom .render.main import MessageTooLong, SmsLength, apply_short_links, sms_length\nfrom .settings import Settings\nfrom .utils import ApiError, Mandrill, MessageBird\n\ntest_logger = logging.getLogger('morpheus.worker.test')\nmain_logger = logging.getLogger('morpheus.worker')\nMOBILE_NUMBER_TYPES = PhoneNumberType.MOBILE, PhoneNumberType.FIXED_LINE_OR_MOBILE\nONE_DAY = 86400\nONE_YEAR = ONE_DAY * 365\n\n\nclass EmailJob(NamedTuple):\n group_id: str\n send_method: str\n first_name: str\n last_name: str\n user_link: int\n address: str\n tags: List[str]\n pdf_attachments: List[dict]\n main_template: str\n mustache_partials: Dict[str, dict]\n macros: Dict[str, dict]\n subject_template: str\n company_code: str\n from_email: str\n from_name: str\n subaccount: str\n important: bool\n context: dict\n headers: dict\n\n\nclass SmsJob(NamedTuple):\n group_id: str\n send_method: str\n first_name: str\n last_name: str\n user_link: int\n number: str\n tags: List[str]\n main_template: str\n company_code: str\n country_code: str\n from_name: str\n context: dict\n\n\nclass Number(NamedTuple):\n number: str\n country_code: str\n number_formatted: str\n descr: str\n is_mobile: bool\n\n\nclass SmsData(NamedTuple):\n number: Number\n message: str\n shortened_link: dict\n length: SmsLength\n\n\nclass UpdateStatus(str, Enum):\n duplicate = 'duplicate'\n missing = 'missing'\n updated = 'updated'\n added = 'added'\n\n\nclass Sender(Actor):\n def __init__(self, settings: Settings=None, **kwargs):\n self.settings = settings or Settings()\n self.redis_settings = self.settings.redis_settings\n super().__init__(**kwargs)\n self.session = self.es = self.mandrill = self.messagebird = None\n self.mandrill_webhook_auth_key = None\n self.email_click_url = f'https://{self.settings.click_host_name}/l'\n self.sms_click_url = f'{self.settings.click_host_name}/l'\n\n async def startup(self):\n main_logger.info('Sender initialising session and elasticsearch and mandrill...')\n self.session = ClientSession(loop=self.loop)\n self.es = ElasticSearch(settings=self.settings, loop=self.loop)\n self.mandrill = Mandrill(settings=self.settings, loop=self.loop)\n self.messagebird = MessageBird(settings=self.settings, loop=self.loop)\n\n async def shutdown(self):\n self.session.close()\n self.es.close()\n self.mandrill.close()\n self.messagebird.close()\n\n @concurrent\n async def send_emails(self,\n recipients_key, *,\n uid,\n main_template,\n mustache_partials,\n macros,\n subject_template,\n company_code,\n from_email,\n from_name,\n method,\n subaccount,\n important,\n tags,\n context,\n headers):\n if method == EmailSendMethod.email_mandrill:\n coro = self._send_mandrill\n elif method == EmailSendMethod.email_test:\n coro = self._send_test_email\n else:\n raise NotImplementedError()\n tags.append(uid)\n main_logger.info('sending email group %s via %s', uid, method)\n base_kwargs = dict(\n group_id=uid,\n send_method=method,\n main_template=main_template,\n mustache_partials=mustache_partials,\n macros=macros,\n subject_template=subject_template,\n company_code=company_code,\n from_email=from_email,\n from_name=from_name,\n subaccount=subaccount,\n important=important,\n )\n if 'styles__sass' not in context and re.search('\\{\\{\\{ *styles *\\}\\}\\}', main_template):\n context['styles__sass'] = (THIS_DIR / 'extra' / 'default-styles.scss').read_text()\n\n drain = Drain(\n redis_pool=await self.get_redis_pool(),\n raise_task_exception=True,\n max_concurrent_tasks=10,\n shutdown_delay=60,\n )\n jobs = 0\n async with drain:\n async for raw_queue, raw_data in drain.iter(recipients_key):\n if not raw_queue:\n break\n\n msg_data = msgpack.unpackb(raw_data, encoding='utf8')\n data = dict(\n context=dict(context, **msg_data.pop('context')),\n headers=dict(headers, **msg_data.pop('headers')),\n tags=list(set(tags + msg_data.pop('tags'))),\n **base_kwargs,\n **msg_data,\n )\n drain.add(coro, EmailJob(**data))\n # TODO stop if worker is not running\n jobs += 1\n return jobs\n\n async def _send_mandrill(self, j: EmailJob):\n email_info = await self._render_email(j)\n if not email_info:\n return\n main_logger.info('%s: send to \"%s\" subject=\"%s\" body=%d attachments=[%s]',\n j.group_id, j.address, truncate(email_info.subject, 40), len(email_info.html_body),\n ', '.join(f'{a[\"name\"]}:{len(a[\"html\"])}' for a in j.pdf_attachments))\n data = {\n 'async': True,\n 'message': dict(\n html=email_info.html_body,\n subject=email_info.subject,\n from_email=j.from_email,\n from_name=j.from_name,\n to=[\n dict(\n email=j.address,\n name=email_info.full_name,\n type='to'\n )\n ],\n headers=email_info.headers,\n track_opens=True,\n track_clicks=False,\n auto_text=True,\n view_content_link=False,\n signing_domain=j.from_email[j.from_email.index('@') + 1:],\n subaccount=j.subaccount,\n tags=j.tags,\n inline_css=True,\n important=j.important,\n attachments=[a async for a in self._generate_base64_pdf(j.pdf_attachments)]\n ),\n }\n send_ts = datetime.utcnow()\n response, exc = None, None\n for i in range(3):\n try:\n response = await self.mandrill.post('messages/send.json', **data)\n except ClientConnectionError as e:\n exc = e\n main_logger.info('%s: client connection error, email: \"%s\", retrying...', j.group_id, j.address)\n await asyncio.sleep(0.5)\n except (ClientError, ApiError) as e:\n exc = e\n break\n else:\n exc = None\n break\n\n if exc or response is None:\n e_name = exc.__class__.__name__\n main_logger.exception('%s: error while posting to mandrill, email: \"%s\", %s: %s',\n j.group_id, j.address, e_name, exc)\n await self._store_email_failed(MessageStatus.send_request_failed, j, f'Error sending email: {e_name}')\n return\n\n data = await response.json()\n assert len(data) == 1, data\n data = data[0]\n assert data['email'] == j.address, data\n await self._store_email(data['_id'], send_ts, j, email_info)\n\n async def _send_test_email(self, j: EmailJob):\n email_info = await self._render_email(j)\n if not email_info:\n return\n\n data = dict(\n from_email=j.from_email,\n from_name=j.from_name,\n group_id=j.group_id,\n headers=email_info.headers,\n to_address=j.address,\n to_name=email_info.full_name,\n to_user_link=j.user_link,\n tags=j.tags,\n important=j.important,\n attachments=[f'{a[\"name\"]}:{base64.b64decode(a[\"content\"]).decode(errors=\"ignore\"):.40}'\n async for a in self._generate_base64_pdf(j.pdf_attachments)],\n )\n msg_id = re.sub(r'[^a-zA-Z0-9\\-]', '', f'{j.group_id}-{j.address}')\n send_ts = datetime.utcnow()\n output = (\n f'to: {j.address}\\n'\n f'msg id: {msg_id}\\n'\n f'ts: {send_ts}\\n'\n f'subject: {email_info.subject}\\n'\n f'data: {json.dumps(data, indent=2)}\\n'\n f'content:\\n'\n f'{email_info.html_body}\\n'\n )\n if self.settings.test_output: # pragma: no branch\n Path.mkdir(self.settings.test_output, parents=True, exist_ok=True)\n save_path = self.settings.test_output / f'{msg_id}.txt'\n test_logger.info('sending message: %s (saved to %s)', output, save_path)\n save_path.write_text(output)\n await self._store_email(msg_id, send_ts, j, email_info)\n\n async def _render_email(self, j: EmailJob):\n try:\n return render_email(j, self.email_click_url)\n except ChevronError as e:\n await self._store_email_failed(MessageStatus.render_failed, j, f'Error rendering email: {e}')\n\n async def _generate_base64_pdf(self, pdf_attachments):\n headers = dict(\n pdf_page_size='A4',\n pdf_zoom='1.25',\n pdf_margin_left='8mm',\n pdf_margin_right='8mm',\n )\n for a in pdf_attachments:\n async with self.session.get(self.settings.pdf_generation_url, data=a['html'], headers=headers) as r:\n if r.status == 200:\n pdf_content = await r.read()\n yield dict(\n type='application/pdf',\n name=a['name'],\n content=base64.b64encode(pdf_content).decode(),\n )\n else:\n data = await r.text()\n main_logger.warning('error generating pdf %s, data: %s', r.status, data)\n\n async def _store_email(self, uid, send_ts, j: EmailJob, email_info: EmailInfo):\n await self.es.post(\n f'messages/{j.send_method}/{uid}',\n company=j.company_code,\n send_ts=send_ts,\n update_ts=send_ts,\n status=MessageStatus.send,\n group_id=j.group_id,\n to_first_name=j.first_name,\n to_last_name=j.last_name,\n to_user_link=j.user_link,\n to_address=j.address,\n from_email=j.from_email,\n from_name=j.from_name,\n tags=j.tags,\n subject=email_info.subject,\n body=email_info.html_body,\n attachments=[f'{a[\"id\"] or \"\"}::{a[\"name\"]}' for a in j.pdf_attachments],\n )\n for url, token in email_info.shortened_link:\n await self.es.post(\n 'links/c/',\n token=token,\n url=url,\n company=j.company_code,\n send_method=j.send_method,\n send_message_id=uid,\n expires_ts=datetime.utcnow() + timedelta(days=365*50)\n )\n\n async def _store_email_failed(self, status: MessageStatus, j: EmailJob, error_msg):\n await self.es.post(\n f'messages/{j.send_method}',\n company=j.company_code,\n send_ts=datetime.utcnow(),\n update_ts=datetime.utcnow(),\n status=status,\n group_id=j.group_id,\n to_first_name=j.first_name,\n to_last_name=j.last_name,\n to_user_link=j.user_link,\n to_address=j.address,\n from_email=j.from_email,\n from_name=j.from_name,\n tags=j.tags,\n body=error_msg,\n attachments=[a['name'] for a in j.pdf_attachments]\n )\n\n @classmethod\n def validate_number(cls, number, country, include_description=True) -> Optional[Number]:\n try:\n p = parse_number(number, country)\n except NumberParseException:\n return\n\n if not is_valid_number(p):\n return\n\n is_mobile = number_type(p) in MOBILE_NUMBER_TYPES\n descr = None\n if include_description:\n country = country_name_for_number(p, 'en')\n region = description_for_number(p, 'en')\n descr = country if country == region else f'{region}, {country}'\n\n return Number(\n number=format_number(p, PhoneNumberFormat.E164),\n country_code=f'{p.country_code}',\n number_formatted=format_number(p, PhoneNumberFormat.INTERNATIONAL),\n descr=descr,\n is_mobile=is_mobile,\n )\n\n @concurrent\n async def send_smss(self,\n recipients_key, *,\n uid,\n main_template,\n company_code,\n cost_limit,\n country_code,\n from_name,\n method,\n context,\n tags):\n if method == SmsSendMethod.sms_test:\n coro = self._test_send_sms\n elif method == SmsSendMethod.sms_messagebird:\n coro = self._messagebird_send_sms\n else:\n raise NotImplementedError()\n tags.append(uid)\n main_logger.info('sending group %s via %s', uid, method)\n base_kwargs = dict(\n group_id=uid,\n send_method=method,\n main_template=main_template,\n company_code=company_code,\n country_code=country_code,\n from_name=from_name if country_code != 'US' else self.settings.us_send_number,\n )\n drain = Drain(\n redis_pool=await self.get_redis_pool(),\n raise_task_exception=True,\n max_concurrent_tasks=10,\n shutdown_delay=60,\n )\n jobs = 0\n async with drain:\n async for raw_queue, raw_data in drain.iter(recipients_key):\n if not raw_queue:\n break\n\n if cost_limit is not None:\n spend = await self.check_sms_limit(company_code)\n if spend >= cost_limit:\n main_logger.warning('cost limit exceeded %0.2f >= %0.2f, %s', spend, cost_limit, company_code)\n break\n msg_data = msgpack.unpackb(raw_data, encoding='utf8')\n data = dict(\n context=dict(context, **msg_data.pop('context')),\n tags=list(set(tags + msg_data.pop('tags'))),\n **base_kwargs,\n **msg_data,\n )\n drain.add(coro, SmsJob(**data))\n # TODO stop if worker is not running\n jobs += 1\n return jobs\n\n async def _sms_prep(self, j: SmsJob) -> Optional[SmsData]:\n number_info = self.validate_number(j.number, j.country_code, include_description=False)\n msg, error, shortened_link, msg_length = None, None, None, None\n if not number_info or not number_info.is_mobile:\n error = f'invalid mobile number \"{j.number}\"'\n main_logger.warning('invalid mobile number \"%s\" for \"%s\", not sending', j.number, j.company_code)\n else:\n shortened_link = apply_short_links(j.context, self.sms_click_url, 12)\n try:\n msg = chevron.render(j.main_template, data=j.context)\n except ChevronError as e:\n error = f'Error rendering SMS: {e}'\n else:\n try:\n msg_length = sms_length(msg)\n except MessageTooLong as e:\n error = str(e)\n\n if error:\n await self.es.post(\n f'messages/{j.send_method}',\n company=j.company_code,\n send_ts=datetime.utcnow(),\n update_ts=datetime.utcnow(),\n status=MessageStatus.render_failed,\n to_first_name=j.first_name,\n to_last_name=j.last_name,\n to_user_link=j.user_link,\n to_address=number_info.number_formatted if number_info else j.number,\n group_id=j.group_id,\n from_name=j.from_name,\n tags=j.tags,\n body=error,\n )\n else:\n return SmsData(number=number_info, message=msg, shortened_link=shortened_link, length=msg_length)\n\n async def _test_send_sms(self, j: SmsJob):\n sms_data = await self._sms_prep(j)\n if not sms_data:\n return\n\n # remove the + from the beginning of the number\n msg_id = f'{j.group_id}-{sms_data.number.number[1:]}'\n send_ts = datetime.utcnow()\n cost = 0.012 * sms_data.length.parts\n output = (\n f'to: {sms_data.number}\\n'\n f'msg id: {msg_id}\\n'\n f'ts: {send_ts}\\n'\n f'group_id: {j.group_id}\\n'\n f'tags: {j.tags}\\n'\n f'company_code: {j.company_code}\\n'\n f'from_name: {j.from_name}\\n'\n f'cost: {cost}\\n'\n f'length: {sms_data.length}\\n'\n f'message:\\n'\n f'{sms_data.message}\\n'\n )\n if self.settings.test_output: # pragma: no branch\n Path.mkdir(self.settings.test_output, parents=True, exist_ok=True)\n save_path = self.settings.test_output / f'{msg_id}.txt'\n test_logger.info('sending message: %s (saved to %s)', output, save_path)\n save_path.write_text(output)\n await self._store_sms(msg_id, send_ts, j, sms_data, cost)\n\n async def _messagebird_get_mcc_cost(self, redis, mcc):\n rates_key = 'messagebird-rates'\n if not await redis.exists(rates_key):\n # get fresh data on rates by mcc\n main_logger.info('getting fresh pricing data from messagebird...')\n url = (\n f'{self.settings.messagebird_pricing_api}'\n f'?username={self.settings.messagebird_pricing_username}'\n f'&password={self.settings.messagebird_pricing_password}'\n )\n async with self.session.get(url) as r:\n assert r.status == 200, (r.status, await r.text())\n data = await r.json()\n if not next((1 for g in data if g['mcc'] == '0'), None):\n main_logger.error('no default messagebird pricing with mcc \"0\"', extra={\n 'data': data,\n })\n data = {g['mcc']: f'{float(g[\"rate\"]):0.5f}' for g in data}\n await asyncio.gather(\n redis.hmset_dict(rates_key, data),\n redis.expire(rates_key, ONE_DAY),\n )\n rate = await redis.hget(rates_key, mcc, encoding='utf8')\n if not rate:\n main_logger.warning('no rate found for mcc: \"%s\", using default', mcc)\n rate = await redis.hget(rates_key, '0', encoding='utf8')\n assert rate, f'no rate found for mcc: {mcc}'\n return float(rate)\n\n async def _messagebird_get_number_cost(self, number: Number):\n cc_mcc_key = f'messagebird-cc:{number.country_code}'\n pool = await self.get_redis_pool()\n async with pool.get() as redis:\n mcc = await redis.get(cc_mcc_key)\n if mcc is None:\n main_logger.info('no mcc for %s, doing HLR lookup...', number.number)\n api_number = number.number.replace('+', '')\n await self.messagebird.post(f'lookup/{api_number}/hlr')\n data = None\n for i in range(30):\n r = await self.messagebird.get(f'lookup/{api_number}')\n data = await r.json()\n if data['hlr']['status'] == 'active':\n main_logger.info('found result for %s after %d attempts %s',\n number.number, i, json.dumps(data, indent=2))\n break\n await asyncio.sleep(1)\n mcc = str(data['hlr']['network'])[:3]\n await redis.setex(cc_mcc_key, ONE_YEAR, mcc)\n return await self._messagebird_get_mcc_cost(redis, mcc)\n\n async def _messagebird_send_sms(self, j: SmsJob):\n sms_data = await self._sms_prep(j)\n if sms_data is None:\n return\n msg_cost = await self._messagebird_get_number_cost(sms_data.number)\n\n cost = sms_data.length.parts * msg_cost\n send_ts = datetime.utcnow()\n main_logger.info('sending SMS to %s, parts: %d, cost: %0.2fp',\n sms_data.number.number, sms_data.length.parts, cost * 100)\n r = await self.messagebird.post(\n 'messages',\n originator=j.from_name,\n body=sms_data.message,\n recipients=[sms_data.number.number],\n allowed_statuses=201,\n reference='morpheus', # required to prompt status updates to occur\n )\n data = await r.json()\n if data['recipients']['totalCount'] != 1:\n main_logger.error('not one recipients in send response', extra={'data': data})\n await self._store_sms(data['id'], send_ts, j, sms_data, cost)\n\n async def _store_sms(self, uid, send_ts, j: SmsJob, sms_data: SmsData, cost: float):\n await self.es.post(\n f'messages/{j.send_method}/{uid}',\n company=j.company_code,\n send_ts=send_ts,\n update_ts=send_ts,\n status=MessageStatus.send,\n group_id=j.group_id,\n to_first_name=j.first_name,\n to_last_name=j.last_name,\n to_user_link=j.user_link,\n to_address=sms_data.number.number_formatted,\n from_name=j.from_name,\n tags=j.tags,\n body=sms_data.message,\n cost=cost,\n extra=sms_data.length._asdict(),\n )\n for url, token in sms_data.shortened_link:\n await self.es.post(\n 'links/c/',\n token=token,\n url=url,\n company=j.company_code,\n send_method=j.send_method,\n send_message_id=uid,\n expires_ts=datetime.utcnow() + timedelta(days=90)\n )\n\n async def check_sms_limit(self, company_code):\n r = await self.es.get(\n 'messages/_search?size=0',\n query={\n 'bool': {\n 'filter': [\n {\n 'term': {'company': company_code}\n },\n {\n 'range': {'send_ts': {'gte': 'now-28d/d'}}\n }\n ]\n }\n },\n aggs={\n 'total_spend': {'sum': {'field': 'cost'}}\n }\n )\n data = await r.json()\n return data['aggregations']['total_spend']['value']\n\n @concurrent(Actor.LOW_QUEUE)\n async def update_mandrill_webhooks(self, events):\n mandrill_webhook = MandrillWebhook(events=events)\n # do in a loop to avoid elastic search conflict\n statuses = {}\n for m in mandrill_webhook.events:\n status = await self.update_message_status('email-mandrill', m, log_each=False)\n if status in statuses:\n statuses[status] += 1\n else:\n statuses[status] = 1\n main_logger.info('updating %d messages: %s', len(mandrill_webhook.events),\n ' '.join(f'{k}={v}' for k, v in statuses.items()))\n\n @concurrent\n async def store_click(self, *, target, ip, ts, user_agent, send_method, send_message_id):\n extra = {\n 'target': target,\n 'ip': ip,\n 'user_agent': user_agent,\n }\n if user_agent:\n ua_dict = ParseUserAgent(user_agent)\n platform = ua_dict['device']['family']\n if platform in {'Other', None}:\n platform = ua_dict['os']['family']\n extra['user_agent_display'] = ('{user_agent[family]} {user_agent[major]} on '\n '{platform}').format(platform=platform, **ua_dict).strip(' ')\n\n # TODO process ip and add geo info\n m = ClickInfo(\n ts=ts,\n status='click',\n message_id=send_message_id,\n extra_=extra\n )\n return await self.update_message_status(send_method, m)\n\n async def update_message_status(self, es_type, m: BaseWebhook, log_each=True) -> UpdateStatus:\n h = hashlib.md5(f'{to_unix_ms(m.ts)}-{m.status}-{json.dumps(m.extra(), sort_keys=True)}'.encode())\n ref = f'event-{h.hexdigest()}'\n async with await self.get_redis_conn() as redis:\n v = await redis.incr(ref)\n if v > 1:\n log_each and main_logger.info('event already exists %s, ts: %s, '\n 'status: %s. skipped', m.message_id, m.ts, m.status)\n return UpdateStatus.duplicate\n await redis.expire(ref, 86400)\n\n r = await self.es.get(f'messages/{es_type}/{m.message_id}', allowed_statuses=(200, 404))\n if r.status == 404:\n return UpdateStatus.missing\n data = await r.json()\n\n old_update_ts = from_unix_ms(data['_source']['update_ts'])\n if m.ts.tzinfo:\n old_update_ts = old_update_ts.replace(tzinfo=timezone.utc)\n\n # give 1 second \"lee way\" for new event to have happened just before the old event\n status = UpdateStatus.updated if m.ts >= (old_update_ts - timedelta(seconds=1)) else UpdateStatus.added\n\n log_each and main_logger.info('adding event %s, ts: %s, status: %s, updating status: %r',\n m.message_id, m.ts, m.status, status == UpdateStatus.updated)\n await self.es.post(\n f'events/{es_type}/',\n message=m.message_id,\n ts=m.ts,\n status=m.status,\n extra=m.extra(),\n timeout_=20,\n )\n if status == UpdateStatus.updated:\n try:\n await self.es.post(f'messages/{es_type}/{m.message_id}/_update?retry_on_conflict=5',\n doc={'update_ts': m.ts, 'status': m.status}, timeout_=30)\n except ApiError as e: # pragma: no cover\n # no error here if we know the problem\n if e.status == 409:\n main_logger.info('ElasticSearch conflict for %s, ts: %s, status: %s', m.message_id, m.ts, m.status)\n else:\n raise\n except asyncio.TimeoutError: # pragma: no cover\n main_logger.info('timeout updating message %s, ts: %s, status: %s', m.message_id, m.ts, m.status)\n return status\n\n\nclass AuxActor(Actor): # pragma: no cover\n def __init__(self, settings: Settings = None, **kwargs):\n self.settings = settings or Settings()\n self.redis_settings = self.settings.redis_settings\n super().__init__(**kwargs)\n self.es = None\n\n async def startup(self):\n main_logger.info('AuxActor initialising elasticsearch...')\n self.es = ElasticSearch(settings=self.settings, loop=self.loop)\n\n async def shutdown(self):\n self.es.close()\n\n @cron(hour=3, minute=0)\n async def snapshot_es(self):\n await self.es.create_snapshot()\n\n\nclass Worker(BaseWorker): # pragma: no cover\n max_concurrent_tasks = 4\n timeout_seconds = 1200\n shadows = [Sender, AuxActor]\n\n def __init__(self, **kwargs):\n self.settings = Settings(sender_cls='app.worker.Sender')\n kwargs['redis_settings'] = self.settings.redis_settings\n super().__init__(**kwargs)\n\n async def shadow_kwargs(self):\n d = await super().shadow_kwargs()\n d['settings'] = self.settings\n return d\n","sub_path":"morpheus/app/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":29127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"304544160","text":"import vk\r\nimport time\r\n\r\n\r\ndef intersection(f1, f2):\r\n s1 = set(f1)\r\n s2 = set(f2)\r\n return set.intersection(s1, s2)\r\n\r\n\r\ndef university_id(query):\r\n session = vk.Session()\r\n vkapi = vk.API(session)\r\n university = vkapi.database.getUniversities(q=query)\r\n print(university)\r\n\r\n\r\nclass Statistics():\r\n our_pids = []\r\n\r\n def __init__(self, vkapi):\r\n self.vkapi = vkapi\r\n with open(\"publics.txt\", 'r', encoding=\"utf8\") as data:\r\n for line in data:\r\n public_id = line.split(')')[0][1:]\r\n self.our_pids.append(int(public_id))\r\n\r\n def show_covering(self, user_id):\r\n user_publics = self.vkapi.groups.get(user_id=user_id, filter='publics', extended=1)[1:]\r\n time.sleep(0.3)\r\n total = len(user_publics)\r\n counter = 0\r\n for up in user_publics:\r\n pid = up['gid']\r\n if pid in self.our_pids:\r\n counter += 1\r\n\r\n print(\"{0:0.1f}% ({1} out of {2})\".format(counter * 100 / total, counter, total))\r\n\r\n\r\n def show_covering_stats(self, users):\r\n for u in users:\r\n self.show_covering(u['uid'])","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"72076278","text":"from apscheduler.schedulers.asyncio import AsyncIOScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nimport differ\nimport docker\nimport configparser\nimport etcd_client\nimport json\nimport base64\nimport glob\nimport os\nimport logging\nimport git\nimport subprocess\nfrom datetime import datetime\n\n\ndocker_client = docker.from_env()\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nscheduler = AsyncIOScheduler()\nscheduler.add_executor('processpool')\njobstore = False\nif config.has_section('POSTGRES') and config['POSTGRES']['DB'] != '':\n postgres_user = config['POSTGRES']['USER']\n postgres_pass = config['POSTGRES']['PASSWORD']\n postgres_db = config['POSTGRES']['DB']\n postgres_host = config['POSTGRES']['HOST']\n url = 'postgresql://{}:{}@{}/{}'.format(postgres_user, postgres_pass,\n postgres_host, postgres_db)\n scheduler.add_jobstore('sqlalchemy', url=url)\n jobstore = True\n#logging.basicConfig(level=logging.DEBUG)\n#logging.getLogger('apscheduler').setLevel(logging.DEBUG)\nscheduler.start()\n\n\ndef schedule_run(data):\n #data = request.get_json()\n response = {}\n env = {}\n command = []\n renku = False\n container = data['container']\n response['container'] = container\n print(container)\n tool = data['tool']\n response['tool'] = tool\n print(tool)\n dataset = data['dataset']\n response['dataset'] = dataset\n print(dataset)\n if 'env' in data:\n env = data['env']\n if 'command' in data:\n command = data['command']\n if data['renku']:\n renku = True\n if data['cron']:\n freq = data['freq']\n if freq == 'daily':\n job = scheduler.add_job(run_container, 'interval', days=1,\n args=[container, command, env, tool, dataset, renku], id=tool,\n replace_existing=True,\n misfire_grace_time=64800, coalesce=True)\n elif freq == 'weekly':\n job = scheduler.add_job(run_container, 'interval', weeks=1,\n args=[container, command, env, tool, dataset, renku], id=tool,\n replace_existing=True,\n misfire_grace_time=64800, coalesce=True)\n else:\n job = scheduler.add_job(run_container, CronTrigger.from_crontab(freq),\n args=[container, command, env, tool, dataset, renku], id=tool,\n replace_existing=True,\n misfire_grace_time=64800, coalesce=True)\n response['job'] = job.id\n return response\n else:\n response['exec_result'] = run_container(container, command, env, tool, dataset, renku)\n return response\n\n\ndef run_container(container, command, env, tool, dataset, renku):\n result = {}\n status = \"\"\n if renku:\n datadir = f'{dataset}/data/input'\n docker_client.containers.run(container, command=command, environment=env,\n volumes={datadir: {'bind': '/usr/src/app/data'},\n '/var/run/docker.sock':\n {'bind': '/var/run/docker.sock'},\n '/usr/bin/docker':\n {'bind': '/usr/bin/docker'}},\n network='host')\n status = renku_update(dataset)\n return status\n else:\n docker_client.containers.run(container, command=command, environment=env,\n volumes={dataset: {'bind': '/usr/src/app/data'},\n '/var/run/docker.sock':\n {'bind': '/var/run/docker.sock'},\n '/usr/bin/docker':\n {'bind': '/usr/bin/docker'}},\n network='host')\n result = differ.detect(dataset, tool)\n return result\n\n\ndef renku_update(path):\n # Get the Renku project repo\n repo = git.Repo(path)\n cwd = os.getcwd()\n os.chdir(path)\n # Attempt a pull\n try:\n #origin = repo.remotes.origin\n #origin.pull()\n subprocess.run('git pull', shell=True)\n except:\n logging.info(\"Pull not completed\")\n #os.chdir(cwd)\n # Commit and push new data\n try:\n #repo.git.add('.')\n #repo.git.commit(m=\"Auto: Data update\")\n #repo.git.push()\n subprocess.run('git add .', shell=True)\n subprocess.run('git commit -m \"Auto: Data update\"', shell=True)\n subprocess.run('git push', shell=True)\n except:\n logging.error(\"Data update failed\")\n #os.chdir(cwd)\n return \"Error pushing data to Renku\"\n # Run the renku workflow\n try:\n subprocess.run(\"renku -S update\", shell=True)\n except:\n logging.error(\"Renku update failed\")\n #os.chdir(cwd)\n return \"Error running Renku workflow\"\n # Push changes\n try:\n subprocess.run('git push', shell=True)\n except:\n logging.error(\"Final push failed\")\n #os.chdir(cwd)\n return \"Error pushing workflow result\"\n os.chdir(cwd)\n return \"Renku project successfully updated\"\n\n\n\ndef listen():\n try:\n print(etcd_client.list('notifications'))\n logging.info(etcd_client.list('notifications'))\n # Send notifications as email\n # Delete notifications\n except:\n print(\"No notifications\")\n logging.info(\"No notifications\")\n\ndef list_jobs():\n job_list = scheduler.get_jobs()\n response = {}\n for item in job_list:\n response[item.id] = str(item.next_run_time)\n return response\n\n\ndef delete_job(id):\n scheduler.remove_job(id)\n return \"Unscheduled \" + id\n\n\nif jobstore:\n scheduler.add_job(listen, 'interval', seconds=10, id = 'listen',\n replace_existing=True,\n misfire_grace_time=5, coalesce=True)\n","sub_path":"schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"184366932","text":"import random\r\nimport sys\r\nimport time\r\nimport tkinter as tk\r\nimport numpy as np\r\nimport math\r\n\r\nke = 10000\r\nm = 1\r\n\r\ndef move_to(x, y):\r\n canvas.coords(object_simulated,x-10,y-10,x+10,y+10)\r\n canvas.create_oval(x-0.5,y-0.5,x+0.5,y+0.5, fill =\"black\")\r\n root.update()\r\n return\r\n\r\n\r\nroot = tk.Tk() # spawns the window (which will be called \"root\")\r\nroot.title(\"Simulation of Equation received\")\r\ncanvas = tk.Canvas(root, width=1300, height=740) # creates a canvas to draw on\r\ncanvas.pack() # spawns the squares and pack them in the canvas\r\n\r\nobject_simulated = canvas.create_oval(400, 400, 405, 405, fill='white')\r\nroot.update()\r\n\r\ndef find_acceleration(x1,my_xs,q1,my_qs,real_distance):\r\n global ke\r\n global m\r\n acceleration = 0\r\n direction = 1\r\n for i in range (len(my_xs)):\r\n try:\r\n distance = x1 - my_xs[i] \r\n print(distance)\r\n print(\"Is the distance \\n\")\r\n if (distance < 0):\r\n direction = -1\r\n acceleration += (distance*ke*q1*my_qs[i])/((real_distance[i]**3)*m)\r\n except ZeroDivisionError:\r\n acceleration += 0\r\n print(\"Calculating acceleration \" + str(acceleration))\r\n #print(ke,x1,q1,my_xs,my_qs,m)\r\n return acceleration\r\n\r\ndef find_velocity (vi,a):\r\n vf = vi + a*dt_prime\r\n print(\"Calculating velocity \" + str(vf))\r\n return vf\r\ndef find_position(xi, vi):\r\n xf = xi + vi*dt_prime\r\n print(\"Calculating position \" + str(xf))\r\n return xf\r\n\r\n\r\n\r\nrandom.seed()\r\n\r\n\r\nx_prime = [500, 500, 500, 500, 500, 500]\r\ny_prime = [500, 500, 500, 500, 500, 500]\r\nq_prime = 10\r\nvx_prime = [random.randint(-350, 350) for i in range(6)]\r\nvy_prime = [random.randint(-350, 350) for i in range(6)]\r\ndt_prime = 0.01\r\nenergy_loss_x_prime = 0.99\r\nenergy_loss_y_prime = 0.99\r\n\r\ncharges_value = [-10,-5, -10, random.randint(-20,-5),0,0,0]\r\ncharges_x = [600,500,200, random.randint(100,1000),0,0,0]\r\ncharges_y = [500,400,400,random.randint(100,700),0,0,0]\r\ntrue_distances = [0,0,0,0,0,0,0]\r\n\r\ndef spawn_charges():\r\n j = random.randint(3,7)\r\n for i in range(j):\r\n my_choice = [random.randint(-20,-5) for l in range(4)]\r\n charges_value[i] = random.choice(my_choice)\r\n charges_x[i] = random.randint(100,1300)\r\n charges_y[i] = random.randint(100,700)\r\n if ((len(charges_x) - j - 1) > 0):\r\n for i in range(j, len(charges_x)-1):\r\n charges_value[i] = 0\r\n charges_x[i] = -200\r\n charges_y[i] = -200\r\n for i in range(len(charges_x)):\r\n if (charges_value[i] < 0):\r\n s = canvas.create_oval(charges_x[i] - 5, charges_y[i] - 5, charges_x[i] + 5, charges_y[i] + 5, fill='red')\r\n charges_simulated.append(s)\r\n else:\r\n s = canvas.create_oval(charges_x[i] - 5, charges_y[i] - 5, charges_x[i] + 5, charges_y[i] + 5, fill='blue')\r\n charges_simulated.append(s)\r\n print(str(charges_x[i])+str(charges_y[i]) + \"those are the coordinates\" )\r\n root.update()\r\n return\r\n\r\n\r\ncharges_simulated = []\r\n\r\nspawn_charges()\r\nfor i in range(len(charges_x)):\r\n s = canvas.create_oval(charges_x[i] - charges_value[i]/2, charges_y[i] - charges_value[i]/2, charges_x[i] + charges_value[i]/2, charges_y[i] + charges_value[i]/2, fill='red')\r\n charges_simulated.append(s)\r\n print(str(charges_x[i])+str(charges_y[i]) + \"those are the coordinates\" )\r\nroot.update()\r\n\r\nskip = 0\r\nfor i in range (0,5000):\r\n for k in range(0, 6):\r\n\t move_to(x_prime[k],y_prime[k])\r\n\t print(\"this is x_prime \\n\")\r\n\r\n\t for j in range (len(charges_x)):\r\n\t true_distances[j] = math.sqrt((charges_x[j]-x_prime[k])**2 + (charges_y[j]-y_prime[k])**2)\r\n\t print(\"Euclidean distance to planet \" + str(j) + \" is \" + str(true_distances[j]) + \" km\")\r\n\t if (true_distances[j] <= 15):\r\n\t skip = 1\r\n\r\n\t if (skip ==1):\r\n\t vx_prime[k] = -vx_prime[k]*energy_loss_x_prime\r\n\t vy_prime[k] = -vy_prime[k]*energy_loss_y_prime\r\n\t x_prime[k] = find_position(x_prime[k],vx_prime[k])\r\n\t y_prime[k] = find_position(y_prime[k],vy_prime[k])\r\n\t print(\"Reversing position\")\r\n\t time.sleep(0.001)\r\n\t skip = 0\r\n\t continue\r\n\r\n\t ax_prime = find_acceleration(x_prime[k],charges_x,q_prime,charges_value,true_distances)\r\n\t vx_prime[k] = find_velocity(vx_prime[k],ax_prime)\r\n\t x_prime[k] = find_position(x_prime[k],vx_prime[k])\r\n\t print(\"this is x_prime :\" + str(x_prime[k]))\r\n\t ay_prime = find_acceleration(y_prime[k],charges_y,q_prime,charges_value,true_distances)\r\n\t vy_prime[k] = find_velocity(vy_prime[k],ay_prime)\r\n\t y_prime[k] = find_position(y_prime[k],vy_prime[k])\r\n\t print(\"this is y_prime\" + str(y_prime[k]))\r\n\t time.sleep(0.001)\r\n\r\n","sub_path":"animationOfSpaceship.py","file_name":"animationOfSpaceship.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"591004058","text":"\"\"\"\n23\nmerge k sorted lists\nhard\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def mergeKLists(self, lists):\n\n from queue import PriorityQueue\n\n heap = PriorityQueue()\n head = ListNode(0)\n p = head\n counter = 0\n\n for l in lists:\n heap.put((l.val, counter, l))\n counter += 1\n\n while not heap.empty():\n val, _, curr = heap.get()\n p.next = ListNode(val)\n p = p.next\n if curr.next:\n heap.put((curr.next.val, counter, curr.next))\n counter += 1\n\n return head.next\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Q023-v2.py","file_name":"Q023-v2.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"211287280","text":"from ListNode import ListNode\n\nclass Solution(object):\n def reorderList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: void Do not return anything, modify head in-place instead.\n \"\"\"\n if not head or not head.next or not head.next.next:\n return\n slow=head\n fast=head\n while fast.next and fast.next.next:\n fast=fast.next.next\n slow=slow.next\n\n head2=slow.next\n slow.next=None\n\n pre=None\n cur=head2\n #print (cur.val,\"v\")\n while cur:\n next=cur.next\n cur.next=pre\n pre=cur\n cur=next\n #print (pre.val,\"vv\")\n head2=pre\n\n dummy=ListNode(0)\n cur=dummy\n node1=head\n node2=head2\n while node1 or node2:\n if node1:\n cur.next=node1\n cur=cur.next\n node1=node1.next\n if node2:\n cur.next=node2\n cur=cur.next\n node2=node2.next\n return dummy.next\n\nhead = ListNode(1)\nnode1 = ListNode(2)\nnode2 = ListNode(3)\nnode3 = ListNode(4)\nnode4 = ListNode(5)\nhead.next = node1\nnode1.next = node2\nnode2.next = node3\nnode3.next = node4\n\nhead2=Solution().reorderList(head)\nwhile head2:\n print (head2.val)\n head2=head2.next\n\n\n\n\n","sub_path":"ReorderList.py","file_name":"ReorderList.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"33307104","text":"import pytest\nimport six\nimport tensorflow as tf\nfrom mock import Mock\n\nfrom tfsnippet.stochastic import StochasticTensor, validate_n_samples\nfrom tfsnippet.utils import TensorWrapper, register_tensor_wrapper_class\n\nif six.PY2:\n LONG_MAX = long(1) << 63 - long(1)\nelse:\n LONG_MAX = 1 << 63 - 1\n\n\nclass ValidateNSamplesTestCase(tf.test.TestCase):\n\n def test_static_values(self):\n # type checks\n for o in [object(), 1.2, LONG_MAX]:\n with pytest.raises(\n TypeError, match='xyz cannot be converted to int32'):\n _ = validate_n_samples(o, 'xyz')\n\n # value checks\n self.assertIsNone(validate_n_samples(None, 'xyz'))\n self.assertEqual(validate_n_samples(1, 'xyz'), 1)\n with pytest.raises(ValueError, match='xyz must be positive'):\n _ = validate_n_samples(0, 'xyz')\n with pytest.raises(ValueError, match='xyz must be positive'):\n _ = validate_n_samples(-1, 'xyz')\n\n def test_dynamic_values(self):\n # type checks\n for o in [tf.constant(1.2, dtype=tf.float32),\n tf.constant(LONG_MAX, dtype=tf.int64)]:\n with pytest.raises(\n TypeError, match='xyz cannot be converted to int32'):\n _ = validate_n_samples(o, 'xyz')\n\n # value checks\n with self.test_session():\n self.assertEqual(\n validate_n_samples(\n tf.constant(1, dtype=tf.int32), 'xyz').eval(), 1)\n with pytest.raises(Exception, match='xyz must be positive'):\n _ = validate_n_samples(\n tf.constant(0, dtype=tf.int32), 'xyz').eval()\n with pytest.raises(Exception, match='xyz must be positive'):\n _ = validate_n_samples(\n tf.constant(-1, dtype=tf.int32), 'xyz').eval()\n\n\nclass _MyTensorWrapper(TensorWrapper):\n\n def __init__(self, wrapped):\n self._self_wrapped = wrapped\n\n @property\n def tensor(self):\n return self._self_wrapped\n\n\nregister_tensor_wrapper_class(_MyTensorWrapper)\n\n\nclass StochasticTensorTestCase(tf.test.TestCase):\n\n def test_equality(self):\n distrib = Mock(is_reparameterized=False)\n samples = tf.constant(0.)\n t = StochasticTensor(distrib, samples)\n self.assertEqual(t, t)\n self.assertEqual(hash(t), hash(t))\n self.assertNotEqual(StochasticTensor(distrib, samples), t)\n\n def test_construction(self):\n distrib = Mock(is_reparameterized=True, is_continuous=True)\n samples = tf.constant(12345678., dtype=tf.float32)\n\n # test basic construction\n t = StochasticTensor(distrib, samples, n_samples=1, group_ndims=2)\n self.assertIs(t.distribution, distrib)\n self.assertTrue(t.is_reparameterized)\n self.assertTrue(t.is_continuous)\n self.assertEqual(t.n_samples, 1)\n self.assertEqual(t.group_ndims, 2)\n self.assertEqual(t.dtype, tf.float32)\n self.assertIsInstance(t.tensor, tf.Tensor)\n with self.test_session():\n self.assertEqual(t.eval(), 12345678.)\n self.assertEqual(t.tensor.eval(), 12345678)\n\n # test initializing from TensorWrapper\n samples = tf.constant(1.)\n t = StochasticTensor(Mock(is_reparameterized=False),\n _MyTensorWrapper(samples))\n self.assertIs(t.tensor, samples)\n\n # test specifying is_reparameterized\n t = StochasticTensor(Mock(is_reparameterized=True), tf.constant(0.),\n is_reparameterized=False)\n self.assertFalse(t.is_reparameterized)\n\n # test construction with dynamic group_ndims\n t = StochasticTensor(distrib, samples,\n group_ndims=tf.constant(2, dtype=tf.int32))\n with self.test_session():\n self.assertEqual(t.group_ndims.eval(), 2)\n\n # test construction with bad dynamic group_ndims\n t = StochasticTensor(distrib, samples,\n group_ndims=tf.constant(-1, dtype=tf.int32))\n with self.test_session():\n with pytest.raises(Exception,\n match='group_ndims must be non-negative'):\n _ = t.group_ndims.eval()\n\n # test construction with dynamic n_samples\n t = StochasticTensor(distrib, samples,\n n_samples=tf.constant(2, dtype=tf.int32))\n with self.test_session():\n self.assertEqual(t.n_samples.eval(), 2)\n\n # test construction with bad dynamic n_samples\n t = StochasticTensor(distrib, samples,\n n_samples=tf.constant(0, dtype=tf.int32))\n with self.test_session():\n with pytest.raises(Exception,\n match='n_samples must be positive'):\n _ = t.n_samples.eval()\n\n def test_prob_and_log_prob(self):\n # test default group_ndims\n distrib = Mock(\n is_reparameterized=True,\n log_prob=Mock(return_value=tf.constant(1.)),\n prob=Mock(return_value=tf.constant(2.)),\n )\n t = StochasticTensor(distrib, tf.constant(0.))\n given = t.tensor\n with self.test_session():\n self.assertEqual(t.log_prob().eval(), 1.)\n self.assertEqual(t.log_prob().eval(), 1.)\n self.assertEqual(t.prob().eval(), 2.)\n self.assertEqual(t.prob().eval(), 2.)\n self.assertEqual(distrib.log_prob.call_args_list, [((given, 0),)])\n self.assertEqual(distrib.prob.call_args_list, [((given, 0),)])\n\n # test group_ndims equal to default\n distrib.log_prob.reset_mock()\n distrib.prob.reset_mock()\n with self.test_session():\n self.assertEqual(t.log_prob(group_ndims=0).eval(), 1.)\n self.assertEqual(t.prob(group_ndims=0).eval(), 2.)\n distrib.log_prob.assert_not_called()\n distrib.prob.assert_not_called()\n\n # test group_ndims different from default\n distrib.log_prob.reset_mock()\n distrib.prob.reset_mock()\n with self.test_session():\n self.assertEqual(t.log_prob(group_ndims=1).eval(), 1.)\n self.assertEqual(t.prob(group_ndims=2).eval(), 2.)\n self.assertEqual(distrib.log_prob.call_args_list, [((given, 1),)])\n self.assertEqual(distrib.prob.call_args_list, [((given, 2),)])\n\n # test use dynamic group_ndims\n t = StochasticTensor(distrib, tf.constant(0.),\n group_ndims=tf.constant(1, dtype=tf.int32))\n given = t.tensor\n distrib.log_prob.reset_mock()\n distrib.prob.reset_mock()\n with self.test_session():\n self.assertEqual(t.log_prob(group_ndims=t.group_ndims).eval(), 1.)\n self.assertEqual(t.log_prob(group_ndims=t.group_ndims).eval(), 1.)\n self.assertEqual(t.prob(group_ndims=t.group_ndims).eval(), 2.)\n self.assertEqual(t.prob(group_ndims=t.group_ndims).eval(), 2.)\n self.assertEqual(distrib.log_prob.call_args_list,\n [((given, t.group_ndims),)])\n self.assertEqual(distrib.prob.call_args_list,\n [((given, t.group_ndims),)])\n\n def test_repr(self):\n t = StochasticTensor(\n Mock(is_reparameterized=False),\n Mock(spec=tf.Tensor, __repr__=Mock(return_value='repr_output'))\n )\n self.assertEqual(repr(t), 'StochasticTensor(repr_output)')\n","sub_path":"tests/test_stochastic.py","file_name":"test_stochastic.py","file_ext":"py","file_size_in_byte":7483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"537684388","text":"#!/usr/bin/env python3\n\nimport logging\nimport os\nimport datetime\nimport traceback\nfrom timeit import default_timer as timer\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.metrics import confusion_matrix\nimport pandas as pd\nimport numpy as np\n\n# models\nimport models\n\n# data\nimport datasets\nimport cfg\n\n# local utilities\nimport stats\nimport graphs\n\n\n# -----------------------------------------------------------\n\ndef tostrlist(v):\n a = np.array([str(e) for e in v])\n return list(a)\n\n\n# -----------------------------------------------------------\n\n__logger__ = [None]\n\n\ndef log():\n if __logger__[0] is not None:\n return __logger__[0]\n\n log_name = '{:%Y-%m-%d_%H.%M.%S}'.format(datetime.datetime.now()) + '_' + os.path.basename(__file__) + '.log'\n\n log_fp = cfg.ensure_fp(cfg.data_root + \"logs\", log_name)\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(log_name)\n fh = logging.FileHandler(log_fp)\n fh.setLevel(logging.INFO)\n logger.addHandler(fh)\n __logger__[0] = logger\n return __logger__[0]\n\n\n# -----------------------------------------------------------\n\n\nclass Trainalyser: # because it trains and analyses...\n\n def __init__(self, working_folder, ds):\n self.working_folder = working_folder\n self.ds = ds\n\n self.X_train = None\n self.X_test = None\n self.y_train = None\n self.y_test = None\n self.y_predict = None\n self.classification_report = None\n self.accuracy_score = None\n\n self.errors = []\n self.num_errors = 0\n\n log().info(\"------------------------ NEW CLASSIFIER ---------------------------------\")\n\n def on_err(self, err, note=None):\n if note is not None:\n self.errors.append(note)\n log().error(note)\n self.errors.append(err)\n log().error(err)\n self.num_errors = self.num_errors + 1\n\n def split(self, splitter=train_test_split, test_size=0.33):\n log().info(\n \"splitting data into train/test %0.2f/%0.2f using splitter: '%s'\",\n 1.0 - test_size,\n test_size,\n splitter.__name__)\n\n self.X_train, self.X_test, self.y_train, self.y_test = splitter(self.ds.data, self.ds.target)\n\n def train(self, model):\n start = timer()\n model.fit(self.X_train, self.y_train)\n elapsed = timer() - start\n log().info(\"train: took %.2fs\", elapsed)\n\n def test(self, model):\n start = timer()\n self.y_predict = model.predict(self.X_test)\n elapsed = timer() - start\n log().info(\"test: took %.2fs\", elapsed)\n\n def eval(self, classifier, splitter):\n start = timer()\n scores = cross_val_score(classifier, self.ds.data, self.ds.target, cv=splitter)\n elapsed = timer() - start\n log().info(\"eval: took %.2fs\", elapsed)\n\n log().info(scores)\n log().info(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n log().info(\"\")\n\n def target_names(self):\n if hasattr(self.ds, 'target_names'):\n return np.array([str(e) for e in self.ds.target_names])\n\n return None\n\n def assess(self):\n tn = self.target_names()\n if tn is not None:\n self.classification_report = metrics.classification_report(\n self.y_test,\n self.y_predict,\n target_names=tn)\n self.accuracy_score = metrics.accuracy_score(self.y_test, self.y_predict)\n else:\n log().info('assess() not run, no target names in data store')\n\n def report(self):\n log().info(\"------------------------- START REPORT ----------------------------------\")\n\n df = self.classification_report_dataframe()\n df.to_csv(self.working_folder / 'classification_report.csv', index=False)\n\n log().info('classification report:\\n%s', self.classification_report)\n log().info('accuracy score: %f', self.accuracy_score)\n\n # check for encoders\n if hasattr(self.ds, 'encoders'):\n test = list(self.ds.encoders[self.ds.target_name].inverse_transform(self.y_test))\n predict = list(self.ds.encoders[self.ds.target_name].inverse_transform(self.y_predict))\n stats.report(test, predict, self.ds.target_names, log())\n else:\n tn = self.target_names()\n if tn is not None:\n stats.report(self.y_test, self.y_predict, tn, log())\n else:\n log().info('stats.report() not run, no target names in data store')\n\n log().info(\"-------------------------- END REPORT -----------------------------------\")\n\n def classification_report_dataframe(self):\n report_data = []\n lines = self.classification_report.split('\\n')\n for line in lines[2:-3]:\n row = {}\n row_data = line.split()\n row['class'] = row_data[0]\n row['precision'] = row_data[1]\n row['recall'] = row_data[2]\n row['f1-score'] = row_data[3]\n row['support'] = row_data[4]\n report_data.append(row)\n return pd.DataFrame.from_dict(report_data)\n\n def graph(self):\n tn = self.target_names()\n if tn is not None:\n cm = confusion_matrix(tostrlist(self.y_test), tostrlist(self.y_predict))\n graphs.plot_cm(cm, tn, self.working_folder)\n else:\n log().info('graph() not run, no target names in data store')\n\n\n# -----------------------------------------------------------\n\n\ndef train_and_evaluate(mm, features_id, target_name):\n feat_fp = cfg.ensure_fp(cfg.features_root + features_id, cfg.features)\n model_path = cfg.ensure_path(cfg.models_root + \"/\" + features_id)\n\n working_folder = cfg.ensure_path(model_path / mm.name)\n ds = datasets.from_csv_with_target_names(feat_fp, cfg.onehot_targets, target_name)\n\n analyser = Trainalyser(working_folder, ds)\n\n log().info(\"target: {}\".format(target_name))\n log().info(\"features id: {}\".format(features_id))\n log().info(\"model name: {}\".format(mm.name))\n log().info(\"model description: {}\".format(mm.description))\n\n try:\n analyser.split()\n except:\n log().error('analyser.split()')\n log().error(traceback.format_exc())\n return analyser\n\n try:\n analyser.train(mm.model)\n except:\n log().error('analyser.train(mm.model)')\n log().error(traceback.format_exc())\n return analyser\n\n try:\n analyser.test(mm.model)\n except:\n log().error('analyser.test(mm.model)')\n log().error(traceback.format_exc())\n return analyser\n\n try:\n analyser.assess()\n except:\n log().error('analyser.assess()')\n log().error(traceback.format_exc())\n return analyser\n\n try:\n analyser.report()\n except:\n log().error('analyser.report()')\n log().error(traceback.format_exc())\n return analyser\n\n try:\n analyser.graph()\n except:\n log().error('analyser.graph()')\n log().error(traceback.format_exc())\n return analyser\n\n mm.save(working_folder)\n\n return analyser\n\n\ndef make_features_fp(features_class):\n fp = cfg.ensure_fp(cfg.features_root + features_class, cfg.features)\n return fp\n\n\ndef train_and_evaluate_all():\n for mm in models.generate():\n for features_class in cfg.features_classes:\n for features_filter in cfg.features_filters:\n target_name = features_filter[1][-1]\n\n features_id = features_class + '_' + features_filter[0]\n\n tr = train_and_evaluate(mm, features_id, target_name)\n if tr.num_errors > 0:\n log().info('=========================================================================')\n log().info('========================= ERROR SUMMARY =================================')\n for err in tr.errors:\n log().Info(err)\n log().info('=========================================================================')\n log().info('')\n\n\ndef main():\n train_and_evaluate_all()\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"app/api/analyser/trainalyser2.py","file_name":"trainalyser2.py","file_ext":"py","file_size_in_byte":8323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"230071976","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/bauble/plugins/plants/ask_tpl.py\n# Compiled at: 2016-10-03 09:39:22\nimport difflib, requests, csv, logging\nlogger = logging.getLogger(__name__)\nimport threading\n\nclass AskTPL(threading.Thread):\n running = None\n\n def __init__(self, binomial, callback, threshold=0.8, timeout=4, gui=False, group=None, verbose=None, **kwargs):\n super(AskTPL, self).__init__(group=group, target=None, name=None, verbose=verbose)\n logger.debug('new %s, already running %s.', self.name, self.running and self.running.name)\n if self.running is not None:\n if self.running.binomial == binomial:\n logger.debug('%s has same query as %s, do not start %s', self.name, self.running.name, self.name)\n binomial = None\n else:\n logger.debug('%s has other query than %s, stop %s', self.name, self.running.name, self.running.name)\n self.running.stop()\n if binomial:\n self.__class__.running = self\n self._stop = False\n self.binomial = binomial\n self.threshold = threshold\n self.callback = callback\n self.timeout = timeout\n self.gui = gui\n return\n\n def stop(self):\n self._stop = True\n\n def stopped(self):\n return self._stop\n\n def run(self):\n\n def ask_tpl(binomial):\n result = requests.get('http://www.theplantlist.org/tpl1.1/search?q=' + binomial + '&csv=true', timeout=self.timeout)\n logger.debug(result.text)\n l = result.text[1:].split('\\n')\n result = [ row for row in csv.reader(k.encode('utf-8') for k in l if k)\n ]\n header = result[0]\n result = result[1:]\n return [ dict(zip(header, k)) for k in result if k[7] == '' ]\n\n class ShouldStopNow(Exception):\n pass\n\n class NoResult(Exception):\n pass\n\n if self.binomial is None:\n return\n else:\n try:\n accepted = None\n logger.debug('%s before first query', self.name)\n candidates = ask_tpl(self.binomial)\n logger.debug('%s after first query', self.name)\n if self.stopped():\n raise ShouldStopNow('after first query')\n if len(candidates) > 1:\n for item in candidates:\n g, s = item['Genus'], item['Species']\n seq = difflib.SequenceMatcher(a=self.binomial, b='%s %s' % (g, s))\n item['_score_'] = seq.ratio()\n\n found = sorted(candidates, cmp=lambda a, b: cmp(a['_score_'], b['_score_']) or cmp(b['Taxonomic status in TPL'], a['Taxonomic status in TPL']))[(-1)]\n if found['_score_'] < self.threshold:\n found['_score_'] = 0\n elif candidates:\n found = candidates.pop()\n else:\n raise NoResult\n if found['Accepted ID']:\n logger.debug('found this: %s', str(found))\n accepted = ask_tpl(found['Accepted ID'])\n logger.debug('ask_tpl on the Accepted ID returns %s', accepted)\n if accepted:\n accepted = accepted[0]\n else:\n logger.debug('taxon %s %s (%s) is marked as synonym. accepted form (%s) is at infraspecific rank.', found['Genus'], found['Species'], found['ID'], found['Accepted ID'])\n logger.debug('%s after second query', self.name)\n if self.stopped():\n raise ShouldStopNow('after second query')\n except ShouldStopNow:\n logger.debug('%s interrupted : do not invoke callback', self.name)\n return\n except Exception as e:\n logger.debug('%s (%s)%s : completed with trouble', self.name, type(e).__name__, e)\n self.__class__.running = None\n found = accepted = None\n\n self.__class__.running = None\n logger.debug('%s before invoking callback' % self.name)\n if self.gui:\n import gobject\n gobject.idle_add(self.callback, found, accepted)\n else:\n self.callback(found, accepted)\n return\n\n\ndef citation(d):\n return ('%(Genus hybrid marker)s%(Genus)s %(Species hybrid marker)s%(Species)s %(Authorship)s (%(Family)s)' % d).replace(' ', ' ')\n\n\ndef what_to_do_with_it(found, accepted):\n if found is None and accepted is None:\n logger.info('nothing matches')\n return\n else:\n logger.info('%s', citation(found))\n if accepted == []:\n logger.info('invalid reference in tpl.')\n if accepted:\n logger.info('%s - is its accepted form', citation(accepted))\n return\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n while True:\n binomial = raw_input()\n if not binomial:\n if AskTPL.running is not None:\n AskTPL.running.stop()\n break\n AskTPL(binomial, what_to_do_with_it, timeout=2).start()","sub_path":"pycfiles/bauble-1.0.62-py2.7/ask_tpl.py","file_name":"ask_tpl.py","file_ext":"py","file_size_in_byte":5492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"602719537","text":"from random import uniform, random\nimport math\n\nclass Perceptron:\n\n def __init__(self, numberOfinps):\n self.numberOfinps = numberOfinps\n\n self.weights = []\n self.value = random()\n self.bias = random()\n self.delta= random()\n\n self._generate_weight(numberOfinps)\n\n def _generate_weight(self, num_of_weights):\n for i in range(num_of_weights):\n self.weights.append(uniform(0.0, 1.0))\n\nclass Layer:\n def __init__(self, thisLayer, lastLayer):\n # ile Perceptron\n self.thisLayer = thisLayer\n self.lastLayer = lastLayer\n\n self.neurons = []\n\n for _ in range(self.thisLayer):\n # tworzenie neuronow dla pojedynczej warstwy\n self.neurons.append(Perceptron(lastLayer))\n\n\nclass Network:\n def __init__(self, nppl):\n # warstwy\n self.layers = []\n\n # ksztalt sieci\n self.nppl = nppl\n\n # stala uczaca\n self.learningRate = 0.6\n\n # tworzenie sieci\n for i in range(len(self.nppl)):\n if i != 0:\n self.layers.append(Layer(nppl[i], nppl[i-1]))\n else:\n self.layers.append(Layer(nppl[i], 0))\n\n\n def _sigmoida(self, x):\n return 1 / (1 + (math.e ** (-x)))\n # return 1 / (1 + math.pow(math.e, (-x)))\n\n def derivative(self, x):\n return x - (x ** 2.0)\n\n def forward(self, inp):\n output = []\n\n for i in range(len(self.layers[0].neurons)):\n self.layers[0].neurons[i].value = inp[i]\n\n for k in range(1, len(self.layers)):\n for i in range(0, len(self.layers[k].neurons)):\n sum = 0.0\n\n for j in range(0, len(self.layers[k-1].neurons)):\n sum += self.layers[k].neurons[i].weights[j] * self.layers[k - 1].neurons[j].value\n\n sum += self.layers[k].neurons[i].bias\n\n self.layers[k].neurons[i].value = self._sigmoida(sum)\n\n # for i in range(len(self.layers[-1].neurons)):\n # output.append(self.layers[-1].neurons[i].value)\n\n for neuron in self.layers[-1].neurons:\n output.append(neuron.value)\n\n return output\n\n # tu sie uczy siec\n # inp -> dane\n # output -> rozwiazanie dla danych\n def backward(self, inp, output):\n newOutput = self.forward(inp)\n\n for i in range(0, len(self.layers[-1].neurons)):\n error = output[i] - newOutput[i]\n self.layers[-1].neurons[i].delta = error * self.derivative(newOutput[i])\n\n for k in range(len(self.layers)-2, 0, -1):\n\n for i in range(len(self.layers[k].neurons)):\n error = 0.0\n\n for j in range(len(self.layers[k+1].neurons)):\n error += self.layers[k+1].neurons[j].delta * self.layers[k+1].neurons[j].weights[i]\n\n self.layers[k].neurons[i].delta = error * self.derivative(self.layers[k].neurons[i].value)\n\n for i in range(len(self.layers[k+1].neurons)):\n for j in range(len(self.layers[k].neurons)):\n self.layers[k+1].neurons[i].weights[j] += self.learningRate * self.layers[k+1].neurons[i].delta * self.layers[k].neurons[j].value\n self.layers[k+1].neurons[i].bias += self.learningRate * self.layers[k+1].neurons[i].delta\n\n error = 0.0\n for i in range(len(output)):\n error += abs(newOutput[i] - output[i])\n\n error /= len(output)\n\n return error\n","sub_path":"wsn/materiały/Wstep_do_sieci_neuronowych/zadanie_4_wstepna_propagacja/Logic/Prop.py","file_name":"Prop.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"3778474","text":"from django.conf.urls import url\n\nfrom . import views, statistics\n\nurlpatterns = [\n url(r'^diagram', views.context_diagram, name='contex_diagram'),\n url(r'^hangup/', views.hangup, name=\"hangup\"),\n url(r\"^statistics\", statistics.statistics, name='statistics' ), \n url(r\"^\", views.parse_input, name=\"parse_input\"),\n ]","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"420476570","text":"import torch\nfrom torch_geometric.nn.models import GAE, InnerProductDecoder\nimport numpy as np\nimport scipy.sparse as sp\nfrom torch.nn import Parameter as Param\nfrom torch import Tensor\nfrom torch_geometric.nn.conv import RGCNConv, GCNConv, MessagePassing\nfrom sklearn import metrics\nfrom torch.utils.checkpoint import checkpoint\nimport torch.nn.functional as F\nfrom torch_geometric.data import Data\nfrom pytorch_memlab import profile\n\n\ntorch.manual_seed(1111)\nnp.random.seed(1111)\n\nEPS = 1e-13\n\ndef remove_bidirection(edge_index, edge_type):\n\n mask = edge_index[0] > edge_index[1]\n keep_set = mask.nonzero().view(-1)\n\n if edge_type is None:\n return edge_index[:, keep_set]\n else:\n return edge_index[:, keep_set], edge_type[keep_set]\n\n\ndef to_bidirection(edge_index, edge_type=None):\n tmp = edge_index.clone()\n tmp[0, :], tmp[1, :] = edge_index[1, :], edge_index[0, :]\n if edge_type is None:\n return torch.cat([edge_index, tmp], dim=1)\n else:\n return torch.cat([edge_index, tmp], dim=1), torch.cat([edge_type, edge_type])\n\n\ndef get_range_list(edge_list):\n tmp = []\n s = 0\n for i in edge_list:\n tmp.append((s, s + i.shape[1]))\n s += i.shape[1]\n return torch.tensor(tmp)\n\n\ndef process_edges(raw_edge_list, p=0.9):\n train_list = []\n test_list = []\n train_label_list = []\n test_label_list = []\n\n for i, idx in enumerate(raw_edge_list):\n train_mask = np.random.binomial(1, p, idx.shape[1])\n test_mask = 1 - train_mask\n train_set = train_mask.nonzero()[0]\n test_set = test_mask.nonzero()[0]\n\n train_list.append(idx[:, train_set])\n test_list.append(idx[:, test_set])\n\n train_label_list.append(torch.ones(2 * train_set.size, dtype=torch.long) * i)\n test_label_list.append(torch.ones(2 * test_set.size, dtype=torch.long) * i)\n\n train_list = [to_bidirection(idx) for idx in train_list]\n test_list = [to_bidirection(idx) for idx in test_list]\n\n train_range = get_range_list(train_list)\n test_range = get_range_list(test_list)\n\n train_edge_idx = torch.cat(train_list, dim=1)\n test_edge_idx = torch.cat(test_list, dim=1)\n\n train_et = torch.cat(train_label_list)\n test_et = torch.cat(test_label_list)\n\n return train_edge_idx, train_et, train_range, test_edge_idx, test_et, test_range\n\n\ndef negative_sampling(pos_edge_index, num_nodes):\n idx = (pos_edge_index[0] * num_nodes + pos_edge_index[1])\n idx = idx.to(torch.device('cpu'))\n\n perm = torch.tensor(np.random.choice(num_nodes**2, idx.size(0)))\n mask = torch.from_numpy(np.isin(perm, idx).astype(np.uint8))\n rest = mask.nonzero().view(-1)\n while rest.numel() > 0: # pragma: no cover\n tmp = torch.tensor(np.random.choice(num_nodes**2, rest.size(0)))\n mask = torch.from_numpy(np.isin(tmp, idx).astype(np.uint8))\n perm[rest] = tmp\n rest = mask.nonzero().view(-1)\n\n row, col = perm / num_nodes, perm % num_nodes\n return torch.stack([row, col], dim=0).long().to(pos_edge_index.device)\n\n\ndef typed_negative_sampling(pos_edge_index, num_nodes, range_list):\n tmp = []\n for start, end in range_list:\n tmp.append(negative_sampling(pos_edge_index[:, start: end], num_nodes))\n return torch.cat(tmp, dim=1)\n\n\ndef sparse_id(n):\n idx = [[i for i in range(n)], [i for i in range(n)]]\n val = [1 for i in range(n)]\n i = torch.LongTensor(idx)\n v = torch.FloatTensor(val)\n shape = (n, n)\n\n return torch.sparse.FloatTensor(i, v, torch.Size(shape))\n\n\ndef dense_id(n):\n idx = [i for i in range(n)]\n val = [1 for i in range(n)]\n out = sp.coo_matrix((val, (idx, idx)), shape=(n, n), dtype=float)\n\n return torch.Tensor(out.todense())\n\n\ndef auprc_auroc_ap(target_tensor, score_tensor):\n y = target_tensor.detach().cpu().numpy()\n pred = score_tensor.detach().cpu().numpy()\n auroc, ap = metrics.roc_auc_score(y, pred), metrics.average_precision_score(y, pred)\n y, xx, _ = metrics.ranking.precision_recall_curve(y, pred)\n auprc = metrics.ranking.auc(xx, y)\n\n return auprc, auroc, ap\n\n\ndef uniform(size, tensor):\n bound = 1.0 / np.sqrt(size)\n if tensor is not None:\n tensor.data.uniform_(-bound, bound)\n\n\nclass MyRGCNConv(MessagePassing):\n r\"\"\"\n Args:\n in_channels (int): Size of each input sample.\n out_channels (int): Size of each output sample.\n num_relations (int): Number of relations.\n num_bases (int): Number of bases used for basis-decomposition.\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n **kwargs (optional): Additional arguments of\n :class:`torch_geometric.nn.conv.MessagePassing`.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n num_relations,\n num_bases,\n after_relu,\n bias=False,\n **kwargs):\n super(MyRGCNConv, self).__init__(aggr='mean', **kwargs)\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.num_relations = num_relations\n self.num_bases = num_bases\n self.after_relu = after_relu\n\n self.basis = Param(torch.Tensor(num_bases, in_channels, out_channels))\n self.att = Param(torch.Tensor(num_relations, num_bases))\n self.root = Param(torch.Tensor(in_channels, out_channels))\n\n if bias:\n self.bias = Param(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n\n self.att.data.normal_(std=1/np.sqrt(self.num_bases))\n\n if self.after_relu:\n self.root.data.normal_(std=2/self.in_channels)\n self.basis.data.normal_(std=2/self.in_channels)\n\n else:\n self.root.data.normal_(std=1/np.sqrt(self.in_channels))\n self.basis.data.normal_(std=1/np.sqrt(self.in_channels))\n\n if self.bias is not None:\n self.bias.data.zero_()\n\n def forward(self, x, edge_index, edge_type):\n \"\"\"\"\"\"\n return self.propagate(\n edge_index, x=x, edge_type=edge_type)\n\n def message(self, x_j, edge_index_j, edge_type):\n w = torch.matmul(self.att, self.basis.view(self.num_bases, -1))\n w = w.view(self.num_relations, self.in_channels, self.out_channels)\n w = w[edge_type, :, :]\n out = torch.bmm(x_j.unsqueeze(1), w).squeeze(-2)\n return out\n\n def update(self, aggr_out, x):\n\n out = aggr_out + torch.matmul(x, self.root)\n\n if self.bias is not None:\n out = out + self.bias\n return out\n\n def __repr__(self):\n return '{}({}, {}, num_relations={})'.format(\n self.__class__.__name__, self.in_channels, self.out_channels,\n self.num_relations)\n\n\n\nclass MyGAE(torch.nn.Module):\n\n def __init__(self, encoder, decoder=None):\n super(MyGAE, self).__init__()\n self.encoder = encoder\n self.decoder = InnerProductDecoder() if decoder is None else decoder\n\n\nclass MyRGCNConv2(MessagePassing):\n r\"\"\"\n Args:\n in_channels (int): Size of each input sample.\n out_channels (int): Size of each output sample.\n num_relations (int): Number of relations.\n num_bases (int): Number of bases used for basis-decomposition.\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n **kwargs (optional): Additional arguments of\n :class:`torch_geometric.nn.conv.MessagePassing`.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n num_relations,\n num_bases,\n after_relu,\n bias=False,\n **kwargs):\n super(MyRGCNConv2, self).__init__(aggr='mean', **kwargs)\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.num_relations = num_relations\n self.num_bases = num_bases\n self.after_relu = after_relu\n\n self.basis = Param(torch.Tensor(num_bases, in_channels, out_channels))\n self.att = Param(torch.Tensor(num_relations, num_bases))\n self.root = Param(torch.Tensor(in_channels, out_channels))\n\n if bias:\n self.bias = Param(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n\n self.att.data.normal_(std=1/np.sqrt(self.num_bases))\n\n if self.after_relu:\n self.root.data.normal_(std=2/self.in_channels)\n self.basis.data.normal_(std=2/self.in_channels)\n\n else:\n self.root.data.normal_(std=1/np.sqrt(self.in_channels))\n self.basis.data.normal_(std=1/np.sqrt(self.in_channels))\n\n if self.bias is not None:\n self.bias.data.zero_()\n\n def forward(self, x, edge_index, edge_type, range_list):\n \"\"\"\"\"\"\n return self.propagate(\n edge_index, x=x, edge_type=edge_type, range_list=range_list)\n\n def message(self, x_j, edge_index, edge_type, range_list):\n w = torch.matmul(self.att, self.basis.view(self.num_bases, -1))\n w = w.view(self.num_relations, self.in_channels, self.out_channels)\n # w = w[edge_type, :, :]\n # out = torch.bmm(x_j.unsqueeze(1), w).squeeze(-2)\n\n out_list = []\n for et in range(range_list.shape[0]):\n start, end = range_list[et]\n\n tmp = torch.matmul(x_j[start: end, :], w[et])\n\n # xxx = x_j[start: end, :]\n # tmp = checkpoint(torch.matmul, xxx, w[et])\n\n out_list.append(tmp)\n\n # TODO: test this\n return torch.cat(out_list)\n\n def update(self, aggr_out, x):\n\n out = aggr_out + torch.matmul(x, self.root)\n\n if self.bias is not None:\n out = out + self.bias\n return out\n\n def __repr__(self):\n return '{}({}, {}, num_relations={})'.format(\n self.__class__.__name__, self.in_channels, self.out_channels,\n self.num_relations)\n","sub_path":"src/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":10234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"460934945","text":"#coding=utf-8\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport argparse\nimport functools\nimport numpy as np\nimport paddle.fluid as fluid\nimport deeplearning_backbone.paddlecv.model_provider as paddlecv\n#加载自定义文件\nimport models\nfrom attack.attack_pp import FGSM, PGD, M_PGD, G_FGSM, L_PGD, T_PGD, T_FGSM\nfrom utils import init_prog, save_adv_image, process_img, tensor2img, calc_mse, add_arguments, print_arguments\nwith_gpu = os.getenv('WITH_GPU', '0') != '0'\n#######parse parameters\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n\nadd_arg('class_dim', int, 120, \"Class number.\")\nadd_arg('shape', str, \"3,224,224\", \"output image shape\")\nadd_arg('input', str, \"./input_image/\", \"Input directory with images\")\nadd_arg('output', str, \"./output_image/\", \"Output directory with images\")\nadd_arg('tt', int, 0, \"the num is model\")\n\n\nargs = parser.parse_args()\nprint_arguments(args)\n\n######Init args\nimage_shape = [int(m) for m in args.shape.split(\",\")]\nclass_dim=args.class_dim\ninput_dir = args.input\noutput_dir = args.output\ntt = args.tt\n\n# model_name =\"ResNeXt50_32x4d\"\n# pretrained_model = \"../paddle1.5/ResNeXt50_32x4dxxxxxxxxxxx\"\n# pretrained_model = [\"../paddle1.5/ResNeXt50_32x4dxx\",\n# \"../paddle1.5/ResNeXt50_32x4dxxx\",\n# \"../paddle1.5/ResNeXt50_32x4dxxxx\",\n# \"../paddle1.5/ResNeXt50_32x4dxxxxx\",\n# \"../paddle1.5/ResNeXt50_32x4dxxxxxx\",\n# \"../paddle1.5/ResNeXt50_32x4dxxxxxxxxx\",\n# \"../paddle1.5/ResNeXt50_32x4dxxxxxxxxxx\",\n# \"../paddle1.5/ResNeXt50_32x4dxxxxxxxxxxx\"]\n\n# model_name0 = \"MobileNetV2_x2_0\"\n# pretrained_model0 = \"../paddle1.5/MobileNetV2\"\n\n# model_name0 = \"InceptionV4\"\n# pretrained_model0 = \"../paddle1.5/InceptionV4\"\n#\n# model_name0 = \"VGG19\"\n# pretrained_model0 = \"../paddle1.5/VGG19\"\n\n# model_name0 = \"DistResNet\"\n# pretrained_model0 = \"../paddle1.5/DistResNet\"\n\n# model_name0 =\"SE_ResNeXt101_32x4d\"\n# pretrained_model0 =\"../paddle1.5/SE_ResNeXt101_32x4d\"\n\n\n# model_name = \"DarkNet53\"\n# pretrained_model = \"../paddle1.6/DarkNet53\"\n\n# model_name = \"DenseNet161\"\n# pretrained_model = \"../paddle1.6/DenseNet161\"\n\n# model_name = \"DPN131\"\n# pretrained_model = \"../paddle1.6/DPN131\"\n\nmodel_name0 = \"VGG16\"\npretrained_model0 = \"../paddle1.6/VGG16\"\n\n# model_name = \"ResNeXt101_32x8d_wsl\"\n# pretrained_model = \"../paddle1.6/ResNeXt101_32x8d_wsl\"\n\nmodel_name0 = \"ResNet50\"\npretrained_model0 = \"../paddle1.6/ResNet50\"\n\n# model_name = \"SE_ResNet50_vd\"\n# pretrained_model = \"../paddle1.6/SE_ResNet50_vd\"\n\nmodel_name0 = \"EfficientNetB0\"\npretrained_model0 = \"../paddle1.6/EfficientNetB0\"\n\n# model_name = \"ShuffleNetV2_swish\"\n# pretrained_model = \"../paddle1.6/ShuffleNetV2_swish\"\n\n# model_name = \"AlexNet\"\n# pretrained_model = \"../paddle1.6/AlexNet\"\n\n# model_name = \"SqueezeNet1_1\"\n# pretrained_model = \"../paddle1.6/SqueezeNet1_1\"\n\n#\n# model_name = \"ResNet50_vd\"\n# pretrained_model = \"../paddle1.6/ResNet50_vd\"\n\nmodel_name0 =\"DenseNet121\"\npretrained_model0 =\"../paddle1.6/DenseNet121\"\n#\n# model_name0 =\"Xception65\"\n# pretrained_model0 =\"../paddle1.6/Xception65\"\n#\n# model_name =\"EfficientNetB4\"\n# pretrained_model =\"../paddle1.6/EfficientNetB4\"\n#\n# model_name =\"Res2Net50_26w_4s\"\n# pretrained_model =\"../paddle1.6/Res2Net50_26w_4s\"\n#\n# model_name =\"HRNet_W32_C\"\n# pretrained_model =\"../paddle1.6/HRNet_W32_C\"\n#\n# model_name =\"ResNeXt101_vd_32x4d\"\n# pretrained_model =\"../paddle1.6/ResNeXt101_vd_32x4d\"\n#\nmodel_name0 = \"ResNeXt50_vd_32x4d\"\npretrained_model0 = \"../paddle1.6/ResNeXt50_vd_32x4d\"\n\n# model_name =\"ResNeXt50_vd_64x4d\"\n# pretrained_model =\"../paddle1.6/ResNeXt50_vd_64x4d\"\n#\n# model_name =\"ShuffleNetV2_x2_0\"\n# pretrained_model =\"../paddle1.6/ShuffleNetV2_x2_0\"\n#\n# model_name =\"SENet154_vd\"\n# pretrained_model =\"../paddle1.6/SENet154_vd\"\n\nmodel_name0 =\"ResNeXt152_64x4d\"\npretrained_model0 =\"../paddle1.6/ResNeXt152_64x4d\"\n\n# model_name0 =\"ResNeXt101_32x32d_wsl\"\n# pretrained_model0 =\"../paddle1.6/ResNeXt101_32x32d_wsl\"\n\nmodel_name0 =\"DenseNet264\"\npretrained_model0 =\"../paddle1.6/DenseNet264\"\n\nmodel_name0 =\"HRNet_W64_C\"\npretrained_model0 =\"../paddle1.6/HRNet_W64_C\"\n# model_name = [\"ResNeXt50_32x4d\", \"MobileNetV2_x2_0\", \"InceptionV4\", \"VGG19\", \"DistResNet\", \"SE_ResNeXt101_32x4d\"]\n# pretrained_model = [\"../paddle1.5/ResNeXt50_32x4d\", \"../paddle1.5/MobileNetV2\", \"../paddle1.5/InceptionV4\", \"../paddle1.5/VGG19\", \"../paddle1.5/DistResNet\", \"../paddle1.5/SE_ResNeXt101_32x4d\"]\n# #\n# model_name = [\"DarkNet53\", \"DenseNet161\", \"DPN131\", \"VGG16\", \"ResNeXt101_32x8d_wsl\", \"ResNet50\", \"SE_ResNet50_vd\", \"EfficientNetB0\", \"ShuffleNetV2_swish\", \"AlexNet\", \"SqueezeNet1_1\", \"ResNet50_vd\", \"DenseNet121\", \"Xception65\", \"EfficientNetB4\", \"Res2Net50_26w_4s\", \"HRNet_W32_C\", \"ResNeXt101_vd_32x4d\", \"ResNeXt50_vd_64x4d\", \"ShuffleNetV2_x2_0\", \"SENet154_vd\", \"InceptionV4\"]\n# pretrained_model = \"../paddle1.6/\" + model_name[tt]\n\nmodel_name = \"DARTS_4M\"\npretrained_model = \"../paddle1.6/DARTS_4M\"\n\nmodel_name0 = \"DARTS_6M\"\npretrained_model0 = \"../paddle1.6/DARTS_6M\"\n\nval_list = 'val_list.txt'\nuse_gpu=False\n\n######Attack graph\n\nadv_program=fluid.Program()\n#完成初始化\nwith fluid.program_guard(adv_program):\n\n input_layer = fluid.layers.data(name='image', shape=image_shape, dtype='float32')\n # 设置为可以计算梯度\n input_layer.stop_gradient = False\n\n #model definition\n model = models.__dict__[model_name]()\n #model = paddlecv.get_model(\"inceptionv4\")\n\n out_logits = model.net(input=input_layer, class_dim=class_dim)\n out = fluid.layers.softmax(out_logits)\n\n # place = fluid.CUDAPlace(0) if with_gpu else fluid.CPUPlace()\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n\n fluid.io.load_params(executor=exe, dirname=pretrained_model, main_program=adv_program)\n\n\n#设置adv_program的BN层状态\ninit_prog(adv_program)\n\n#创建测试用评估模式\neval_program = adv_program.clone(for_test=True)\n\n### 定义梯度\nwith fluid.program_guard(adv_program):\n label = fluid.layers.data(name=\"label\", shape=[1] ,dtype='int64')\n loss = fluid.layers.cross_entropy(input=out, label=label)\n gradients = fluid.backward.gradients(targets=loss, inputs=[input_layer])[0]\n\n\n\n\n######Inference\ndef inference(img):\n fetch_list = [out.name]\n\n result = exe.run(eval_program,\n fetch_list=fetch_list,\n feed={ 'image':img })\n result = result[0][0]\n pred_label = np.argmax(result)\n pred_score = result[pred_label].copy()\n return pred_label, pred_score\n\n######FGSM attack\n#untarget attack\ndef attack_nontarget_by_FGSM(img, src_label):\n pred_label = src_label\n #mom = 0.8\n step = 8.0/256.0\n eps = 128.0/256.0\n while pred_label == src_label:\n\n #生成对抗样本\n # adv=L_PGD(adv_program=adv_program,eval_program=eval_program,gradients=gradients,o=img,\n # input_layer=input_layer,output_layer=out, step_size=step,epsilon=eps, iteration=8, pix_num=224*224*3/30,\n # isTarget=False,target_label=0,use_gpu=use_gpu)\n\n # adv = G_FGSM(adv_program=adv_program, eval_program=eval_program, gradients=gradients, o=img,\n # input_layer=input_layer, output_layer=out, step_size=step, epsilon=eps,\n # pix_num=224 * 224 * 3, isTarget=False, target_label=0, use_gpu=use_gpu)\n\n # adv = T_PGD(adv_program=adv_program, eval_program=eval_program, gradients=gradients, o=img,\n # input_layer=input_layer, output_layer=out, step_size=step, epsilon=eps,iteration=8,t=0,\n # pix_num=3*224*224/30, isTarget=False, target_label=0, use_gpu=use_gpu)\n\n adv = T_FGSM(adv_program=adv_program, eval_program=eval_program, gradients=gradients, o=img,\n input_layer=input_layer, output_layer=out, step_size=step, epsilon=eps, t=0.6,\n pix_num=3*224*224/30, isTarget=False, target_label=0, use_gpu=use_gpu)\n\n pred_label, pred_score = inference(adv)\n step *= 1.5\n #mom *= 0.8\n if step > eps:\n break\n\n\n print(\"Test-score: {0}, class {1}\".format(pred_score, pred_label))\n\n if pred_label != src_label:\n print(\"攻击成功��{0}->{1}\".format(src_label, pred_label))\n else:\n adv = img\n print(\"攻击失败,去除扰动,保存为源图像\")\n\n adv_img=tensor2img(adv)\n return adv_img\n\n######PGD attack\n#untarget attack\ndef attack_nontarget_by_PGD(img, src_label):\n pred_label = src_label\n\n step = 8.0/256.0\n eps = 16.0/256.0\n while pred_label == src_label:\n #生成对抗样本\n adv=PGD(adv_program=adv_program,eval_program=eval_program,gradients=gradients,o=img,\n input_layer=input_layer,output_layer=out,step_size=step,epsilon=eps,iteration=10,\n isTarget=False,target_label=0,use_gpu=use_gpu)\n\n pred_label, pred_score = inference(adv)\n step *= 2\n if step > eps:\n break\n\n print(\"Test-score: {0}, class {1}\".format(pred_score, pred_label))\n\n\n\n adv_img=tensor2img(adv)\n return adv_img\n\n\n####### Main #######\ndef get_original_file(filepath):\n with open(filepath, 'r') as cfile:\n full_lines = [line.strip() for line in cfile]\n cfile.close()\n original_files = []\n for line in full_lines:\n label, file_name = line.split()\n original_files.append([file_name, int(label)])\n return original_files\n\n\n\n\ndef gen_adv():\n mse = 0\n original_files = get_original_file(input_dir + val_list)\n num = 1\n cout = 0\n\n print(\"the model is {}\".format(model_name))\n for filename, label in original_files:\n\n\n\n img_path = input_dir + filename\n print(\"Image: {0} \".format(img_path))\n img=process_img(img_path)\n\n #print(img.shape)\n\n result = exe.run(eval_program,\n fetch_list=[out],\n feed={input_layer.name: img})\n result = result[0][0]\n\n o_label = np.argsort(result)[::-1][:1][0]\n\n print(\"原始标签为{0}\".format(o_label))\n\n if o_label == int(label):\n adv_img = attack_nontarget_by_FGSM(img, label)\n #adv_img = attack_nontarget_by_PGD(img, label)\n else:\n print(\"{0}个样本已为对抗样本, name为{1}\".format(num, filename))\n img = tensor2img(img)\n #print(img.shape)\n image_name, image_ext = filename.split('.')\n save_adv_image(img, output_dir + image_name + '.png')\n num += 1\n cout += 1\n continue\n image_name, image_ext = filename.split('.')\n ##Save adversarial image(.png)\n save_adv_image(adv_img, output_dir+image_name+'.png')\n\n org_img = tensor2img(img)\n score = calc_mse(org_img, adv_img)\n mse += score\n num += 1\n print(\"成功attack的有 {}\".format(120-cout))\n print(\"ADV {} files, AVG MSE: {} \".format(len(original_files), mse/len(original_files)))\n\n\ndef main():\n #gen_adv(0)\n gen_adv()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"attack_code/attack_second.py","file_name":"attack_second.py","file_ext":"py","file_size_in_byte":11342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"568369067","text":"import argparse\nimport os\nfrom glob import glob\nfrom math import floor, ceil\nimport cv2\nimport matplotlib.pyplot as plt\nimport xml.etree.ElementTree as ET\nimport pandas as pd\nimport numpy as np\nimport pydicom\n\n\nparser = argparse.ArgumentParser(description='Convert INbreast dataset annotations to PASCAL VOC 2007 structure')\nparser.add_argument('inbreast_dir', help='input INbreast dataset base dir')\nparser.add_argument('output_dir')\nparser.add_argument('--gen_mass', help='generate annotations for masses', action='store_true')\nparser.add_argument('--gen_calc', help='generate annotations for microcalcifications', action='store_true')\nparser.add_argument('--calc_padding', help='padding added to microcalcifications clusters', type=int, default=20)\nparser.add_argument('--pathology_filter', action='append', help='filter by pathology')\nparser.add_argument('--pathology_in_name', help='add pathology do annotation name', action='store_true')\n\nargs = parser.parse_args()\n\nxml_dir = os.path.join(args.inbreast_dir, 'AllXML')\ndcm_dir = os.path.join(args.inbreast_dir, 'AllDICOMs')\noutput_dir = os.path.join(args.output_dir, 'VOC2007')\ngen_mass = args.gen_mass\ngen_calc = args.gen_calc\n\n###\n# prepare data\n###\n\n# cases\n\nprint('reading dataset cases...')\n\ncases_fn = os.path.join(args.inbreast_dir, 'INbreast.csv')\ncases_df = pd.read_csv(cases_fn, sep=';')\ncases_df.drop(columns=['Patient ID', 'Patient age', 'Acquisition date'], inplace=True)\ncases_df.rename(columns={'Laterality': 'laterality', 'View': 'view', 'Acquisition date': 'acquisition_date', 'File Name': 'id', 'ACR': 'breasy_density', 'Bi-Rads': 'birads_specific'}, inplace=True)\ncases_df['birads'] = cases_df['birads_specific'].apply(lambda x: x[0])\n\nprint('found %d cases' % len(cases_df))\n\n\ndef pathology(birads):\n if birads >= 1 and birads <= 3:\n return 'BENIGN'\n elif birads > 3:\n return 'MALIGNANT'\ncases_df['pathology'] = cases_df['birads'].apply(lambda x: pathology(int(x)))\n\npathology_filter = [x.upper() for x in (args.pathology_filter or [])]\nif len(pathology_filter) > 0:\n cases_df = cases_df[cases_df.pathology.isin(pathology_filter)]\n\nprint('using %d cases' % len(cases_df))\nprint('')\n\n\n# rois\n\nprint('reading rois...')\n\nrois = []\nfor id in cases_df.id:\n roi_fn = os.path.join(xml_dir, '%d.xml' % id)\n if not os.path.exists(roi_fn):\n print('File not found %s' % roi_fn)\n continue\n tree = ET.parse(roi_fn)\n root = tree.getroot()\n for elem_roi in root.findall('./dict/array/dict/array/dict'):\n abnormality = elem_roi[15].text\n if not ((abnormality == 'Mass' and gen_mass) or (abnormality == 'Calcification' and gen_calc)):\n continue\n index = elem_roi[7].text\n points_x = []\n points_y = []\n for elem_point in elem_roi[21].findall('./'):\n x, y = elem_point.text.strip('()').split(',')\n points_x.append(int(float(x)))\n points_y.append(int(float(y)))\n rois.append([id, int(index), abnormality, min(points_x), min(points_y), max(points_x), max(points_y)])\nrois_df = pd.DataFrame(rois, columns=['id', 'index', 'abnormality', 'min_x', 'min_y', 'max_x', 'max_y'])\n\nprint('using %d rois' % len(rois_df))\nprint('')\n\n\n# metadata\n# TODO: gerar número do paciente\n\nprint('reading dicoms metadata...')\n\ndcm_data = []\nfor id in rois_df.id.unique():\n print('\\r%s' % id, end=\"\")\n dcm_fns = glob(os.path.join(dcm_dir, str(id) + '*.dcm'))\n assert len(dcm_fns) == 1\n dcm_fn = dcm_fns[0]\n dcm = pydicom.dcmread(dcm_fn)\n dcm_data.append([id, dcm.Rows, dcm.Columns, os.path.basename(dcm_fn)])\ndcm_df = pd.DataFrame(dcm_data, columns=['id', 'rows', 'columns', 'dcm_fn'])\n\nprint('')\nprint('found %s dicom files' % len(dcm_df))\nprint('')\n\n# join dataframes\n# TODO: usar left para no caso de não haver anotações\ndf = rois_df.set_index('id').join(cases_df.set_index('id')).join(dcm_df.set_index('id'))\n\n# expand calcifications bounding boxes\ndef expand_calcifications(r, padding):\n if r['abnormality'] != 'Calcification':\n return r\n r['min_x'] = max(0, r['min_x'] - padding)\n r['min_y'] = max(0, r['min_y'] - padding)\n r['max_x'] = min(r['max_x'] + padding, r['columns'])\n r['max_y'] = min(r['max_y'] + padding, r['rows'])\n return r\ndf = df.apply(lambda r: expand_calcifications(r, args.calc_padding), axis=1)\n\n\n###\n# write annotations\n###\n\n# sobre o pascal voc: o que fazer com pose e truncated?\n# sobre o inbreast: o que fazer com acr, birads?\n\npathology_in_name = args.pathology_in_name\ndef to_xml(index, grouped):\n folder = 'VOC2007'\n filename = str(index) + '.jpg'\n width = grouped['columns'].min()\n height = grouped['rows'].min()\n laterality = grouped['laterality'].min()\n view = grouped['view'].min()\n breast_density = grouped['breasy_density'].min()\n birads = grouped['birads'].min()\n birads_specific = grouped['birads_specific'].min()\n abnormality = grouped['abnormality'].min()\n pathology = grouped['pathology'].min()\n name = '%s%s' % (abnormality.title(), pathology.title()) if pathology_in_name else abnormality.title()\n # iterate over cases\n objs = []\n for _, row in grouped.iterrows():\n obj = '\\n'.join([\n ' ',\n ' %s' % name,\n ' 0',\n ' ',\n ' %d' % row['min_x'],\n ' %d' % row['min_y'],\n ' %d' % row['max_x'],\n ' %d' % row['max_y'],\n ' ',\n ' '\n ])\n objs.append(obj)\n doc = '\\n'.join([\n '',\n ' %s' % folder,\n ' %s' % filename,\n ' %s' % laterality,\n ' %s' % view,\n ' %s' % breast_density,\n ' %s' % birads,\n ' %s' % birads_specific,\n '\\n'.join([\n ' ',\n ' %s' % width,\n ' %s' % height,\n ' 3',\n ' '\n ]),\n ' 0',\n '\\n'.join(objs),\n ''\n ])\n return doc\n\n# write annotations xml\nannotations_dir = os.path.join(output_dir, 'Annotations')\nos.makedirs(annotations_dir, exist_ok=True)\nprint('writing annotations...')\nwrote = 0\nfor index, grouped in df.groupby('id'):\n print('\\r%s' % index, end=\"\")\n xml = to_xml(index, grouped)\n xml_fn = os.path.join(annotations_dir, str(index) + '.xml')\n with open(xml_fn, 'w') as f:\n f.write(xml)\n wrote += 1\nprint('')\nprint('')\nprint('wrote %d annotations files' % wrote)\n\n# write image sets\n\ntrainval = [str(x) for x in list(np.unique(df.index.values))]\ntest = trainval\n\nimage_sets_dir = os.path.join(output_dir, 'ImageSets', 'Main')\ntrainval_fn = os.path.join(image_sets_dir, 'trainval.txt')\ntest_fn = os.path.join(image_sets_dir, 'test.txt')\nos.makedirs(image_sets_dir, exist_ok=True)\n\nwith open(trainval_fn, 'w') as f:\n f.write('\\n'.join(trainval))\nwith open(test_fn, 'w') as f:\n f.write('\\n'.join(test))\n\n# images\n\njpeg_images_dir = os.path.join(output_dir, 'JPEGImages')\nos.makedirs(jpeg_images_dir, exist_ok=True)\n\nconvert_dcm_fn = os.path.join(jpeg_images_dir, 'convert_dcm.sh')\nwith open(convert_dcm_fn, 'w') as f:\n for id, r in df.iterrows():\n input_dcm_fn = os.path.join(dcm_dir, r['dcm_fn'])\n output_jpg_fn = os.path.join(jpeg_images_dir, '%d.jpg' % id)\n f.write('dcmj2pnm -d +oj +Jq 100 --min-max-window %s %s\\n' % (input_dcm_fn, output_jpg_fn))\n","sub_path":"inbreast2pascal_voc.py","file_name":"inbreast2pascal_voc.py","file_ext":"py","file_size_in_byte":7602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"39417994","text":"\n\nfrom xai.brain.wordbase.nouns._date import _DATE\n\n#calss header\nclass _DATES(_DATE, ):\n\tdef __init__(self,): \n\t\t_DATE.__init__(self)\n\t\tself.name = \"DATES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"date\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_dates.py","file_name":"_dates.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"408402126","text":"from model import *\nimport numpy as np\nimport tensorflow as tf\nimport time\nN = 128\n\n\nimg_ph = tf.placeholder(dtype=tf.float32, shape=[N, 300, 300, 3])\nimg = np.ndarray(shape=(N, 300, 300, 3), dtype=np.float32, buffer=np.ones([N, 300, 300, 3]))\nret_cls, ret_loc = ssd_model(img_ph, False)\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nt0 = time.time()\nsess.run([ret_cls, ret_loc], feed_dict={img_ph: img})\nprint(time.time() - t0)\nprint(np.shape(ret_cls), np.shape(ret_loc))\n","sub_path":"trash/testcode/test_forward_spped.py","file_name":"test_forward_spped.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"20514531","text":"input = open('input.txt', 'r')\ninput_list = input.read().splitlines()\ninput.close()\n\ninput_cable1 = input_list[0].split(',')\ninput_cable2 = input_list[1].split(',')\n\ndef get_points(cable) :\n x = 0\n y = 0\n answer = set()\n for pos in cable :\n operator = pos[0]\n distance = int(pos[1:])\n dx = {'L':-1, 'R':1, 'U':0, 'D':0}\n dy = {'L': 0, 'R': 0, 'U': 1, 'D': -1}\n\n for i in range(distance) :\n x += dx[operator]\n y += dy[operator]\n answer.add((x,y))\n\n return answer\n\nall_pos_cable1 = get_points(input_cable1)\nall_pos_cable2 = get_points(input_cable2)\njoin = all_pos_cable1&all_pos_cable2\n\ncommons = [(x,y) for (x,y) in join]\nlist_cable1 = list(all_pos_cable1)\nlist_cable2 = list(all_pos_cable2)\n\ndef get_steps(coords) :\n index1 = list_cable1.index(coords)\n index2 = list_cable2.index(coords)\n total_steps = len(list_cable1[:index1]) + len(list_cable2[:index2])\n return total_steps\n\nstep_counts = set()\nfor coords in commons :\n steps = get_steps(coords)\n step_counts.add(steps)\n\nanswer = min(step_counts)\nprint(answer)\n\n\n","sub_path":"advent-of-code/03/day3_part2.py","file_name":"day3_part2.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"193257943","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# To run in terminal\n# $ cd /home/james/Documents/Edoc/3Nohtyp/Python_By_Example/Example_058\n# $ python3 Example_058.py\n# $ python3 Example_058.py Output.txt\n\n\"\"\"\nPython by Example: Learning to Program in 150 Challenges by Nichola Lacey\n\n058\nMake a maths quiz that asks five questions by randomly\ngenerating two whole numbers to make the question\n(e.g. [num1] + [num2]). Ask the user to enter the\nanswer. If they get it right add a point to their score. At\nthe end of the quiz, tell them how many they got correct\nout of five.\n\n\"\"\"\n\nimport random\n\nprint(__doc__)\n\nscore = 0\ncount = 0\nwhile count < 5 : \n\n num_1 = random.randint(0,20)\n num_2 = random.randint(0,20)\n sum = num_1 + num_2\n answer = int(input(f'What is the sum of {num_1} and {num_2}? '))\n print(f'The input for the answer is {answer}.')\n print(f'The sum of {num_1} and {num_2} is {sum}.')\n if answer == sum : \n print(f'Your answer is correct.')\n score += 1\n else : \n print('Wrong answer.')\n count += 1\n print()\n \nprint(f'Your score is {score} out of 5.')\n","sub_path":"Example_058/Example_058.py","file_name":"Example_058.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"442223766","text":"# This file is part of the Adblock Plus web scripts,\n# Copyright (C) 2017-present eyeo GmbH\n#\n# Adblock Plus is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# Adblock Plus is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Adblock Plus. If not, see .\n\nimport sys\nfrom urllib import urlencode\nfrom urllib2 import urlopen, HTTPError\n\nimport pytest\nfrom wsgi_intercept import (urllib_intercept, add_wsgi_intercept,\n remove_wsgi_intercept)\n\n# We are mocking the functions that use MySQLdb, so it is not needed. This\n# is to prevent the tests from crashing when they try to import it.\nsys.modules['MySQLdb'] = sys\nfrom sitescripts.reports.web.updateReport import handleRequest\n\nLOCAL_HOST = 'test.local'\nREMOTE_HOST = 'reports.adblockplus.org'\nPORT = 80\nPLAINTEXT_GUID = '12345678-1234-1234-1234-123456789abc'\nUR_PATH = 'sitescripts.reports.web.updateReport.'\n\n\ndef intercept_fn(environ, start_response):\n assert environ['SERVER_NAME'] == REMOTE_HOST\n assert PLAINTEXT_GUID in environ['PATH_INFO']\n return 'Intercepted!'\n\n\n@pytest.fixture\ndef response_for():\n \"\"\"Register two intercepts, and return responses for them.\"\"\"\n urllib_intercept.install_opener()\n add_wsgi_intercept(LOCAL_HOST, PORT, lambda: handleRequest)\n add_wsgi_intercept(REMOTE_HOST, 443, lambda: intercept_fn)\n\n def response_for(data):\n url = 'http://{}:{}'.format(LOCAL_HOST, PORT)\n response = urlopen(url, urlencode(data))\n return response.code, response.read()\n\n yield response_for\n remove_wsgi_intercept()\n\n\n@pytest.fixture\ndef form_data():\n return {\n 'email': 'jane_doe@example.com',\n 'secret': '92b3e705f2abe74c20c1c5ea9abd9ba2',\n 'guid': PLAINTEXT_GUID,\n 'status': 'x' * 1025,\n 'usefulness': 0,\n 'notify': 'test NOTIFY',\n 'message': 'test MESSAGE',\n 'subject': 'test SUBJECT',\n 'name': 'test NAME',\n }\n\n\n@pytest.mark.parametrize('field,message', [\n (('guid', 'badGUID'), 'Invalid or missing report GUID'),\n (('secret', 'badSECRET'), 'Wrong secret value'),\n])\ndef test_http_errs(field, message, response_for, form_data, mocker):\n mocker.patch(UR_PATH + 'getReport', new=lambda *args: {'usefulness': 1})\n key, value = field\n form_data[key] = value\n with pytest.raises(HTTPError) as error:\n response_for(form_data)\n\n assert message in error.value.read()\n\n\ndef test_success(response_for, form_data, mocker):\n # These methods are patched to avoid the need for a MySQL database\n mocker.patch(UR_PATH + 'getReport', new=lambda *args: {'usefulness': 1,\n 'email': 'jane_doe@example.com'})\n sr_mock = mocker.patch(UR_PATH + 'saveReport')\n uuu_mock = mocker.patch(UR_PATH + 'updateUserUsefulness')\n sun_mock = mocker.patch(UR_PATH + 'sendUpdateNotification')\n\n assert response_for(form_data) == (200, '\\nIntercepted!')\n\n assert sr_mock.call_count == 1\n for key in ['usefulness', 'email']:\n assert key in sr_mock.call_args[0][1]\n assert sr_mock.call_args[0][1][key] == str(form_data[key])\n\n assert '0' in uuu_mock.call_args[0] and 1 in uuu_mock.call_args[0]\n\n for key in ['email', 'status']:\n assert key in sun_mock.call_args[0][0]\n assert sun_mock.call_args[0][0]['email'] == form_data['email']\n\n # These should not be equal, because updateReport.py strips characters\n # over 1024, and form_data['status'] has 1025.\n assert str(sr_mock.call_args[0][1]['status']) != form_data['status']\n assert str(sun_mock.call_args[0][0]['status']) != form_data['status']\n\n\ndef test_get_report_error(response_for, form_data, mocker):\n mocker.patch(UR_PATH + 'getReport', new=lambda *args: None)\n with pytest.raises(HTTPError) as error:\n response_for(form_data)\n\n assert 'Report does not exist' in error.value.read()\n","sub_path":"sitescripts/reports/tests/test_updateReport.py","file_name":"test_updateReport.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"121261169","text":"from application.database.database import Database\nimport sqlite3\n\n\nclass SQLiteDatabase(Database):\n\n def __init__(self, db_schema=\":memory:\"):\n conn = sqlite3.connect(db_schema)\n super().__init__(conn)\n\n def run_scripts(self, sql_file):\n with open(sql_file) as sql_file:\n sql_scripts = sql_file.read()\n with self.conn as conn:\n conn.executescript(sql_scripts)\n","sub_path":"application/database/sqlite_database.py","file_name":"sqlite_database.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"46875404","text":"import tkinter as tk\nimport simpleaudio as sa\nimport cv2\n\n\ndef insert_header(container, text):\n heading = tk.Label(\n container,\n bg=\"white\",\n fg=\"black\",\n text=text,\n font='none 24 bold'\n )\n heading.place(relx=0.15, rely=0.1, anchor=tk.W)\n\n\ndef create_frame(container, row):\n frame = tk.Frame(container)\n frame.configure(bg='white')\n frame.place(\n relx=0.15,\n rely=(0.1 * row),\n anchor=tk.W\n )\n\n return frame\n\n\ndef create_label(master, text, row, col, fix_text=True):\n if (fix_text):\n label_text = tk.Label(\n master=master,\n text=text,\n bg=\"white\",\n fg=\"black\"\n )\n\n else:\n label_text = tk.Label(\n master=master,\n textvariable=text,\n bg=\"white\",\n fg=\"black\"\n )\n\n label_text.grid(row=row, column=col, sticky=tk.W)\n\n\ndef create_button(master, text, action, row, col):\n button = tk.Button(\n master=master,\n text=text,\n command=action,\n bg=\"white\",\n fg=\"black\"\n )\n button.grid(row=row, column=col, sticky=tk.W)\n\n\ndef create_check_button(master, text, variable, row, col):\n check_button = tk.Checkbutton(\n master=master,\n text=text,\n variable=variable,\n bg=\"white\",\n fg=\"black\"\n )\n check_button.grid(row=row, column=col, sticky=tk.W)\n\n\ndef create_radio_button(master, text, value, variable, row, col, command):\n radio_button = tk.Radiobutton(\n master=master,\n text=text,\n value=value,\n variable=variable,\n command=command,\n bg=\"white\",\n fg=\"black\"\n )\n radio_button.grid(row=row, column=col, sticky=tk.W)\n\n\ndef create_entry(master, default, row, col):\n\n entry = tk.Entry(master=master)\n entry.configure(bg=\"white\", fg=\"black\")\n entry.grid(row=row, column=col, sticky=tk.W)\n\n if (default != \"\"):\n entry.insert(tk.END, default)\n\n return entry\n","sub_path":"src/helper/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"587126029","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\n\np=input('Digite o valor de p: ')\nq=input('Digite o valor de q: ')\ncont1=0\ncont2=0\n\nfor i in range (1, p+1, 1) :\n p%p==0\n cont1=cont1+1\nfor i in range (1, q+1, 1) :\n q%q==0\n cont2=cont2+1\nif cont1==1 and cont2==1 :\n if p==q+2 :\n print ('S')\n else :\n print ('N')\nelse :\n print ('os numeros inseridos não são primos')\n ","sub_path":"moodledata/vpl_data/37/usersdata/83/12770/submittedfiles/questao3.py","file_name":"questao3.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"373625908","text":"import random\n\nnum_1 = 0\nnum_2 = 0\nnum_3 = 0\nnum_4 = 0\nnum_5 = 0\nnum_6 = 0\nnum_times = [x * 1 for x in range(1000)]\nfor i in num_times:\n result = random.choice(range(1, 7))\n if result == 1:\n num_1 += 1\n elif result == 2:\n num_2 += 1\n elif result == 3:\n num_3 += 1\n elif result == 4:\n num_4 += 1\n elif result == 5:\n num_5 += 1\n else:\n num_6 += 1\nprint(num_1)\nprint(num_2)\nprint(num_3)\nprint(num_4)\nprint(num_5)\nprint(num_6)\n","sub_path":"Other/Python Scripts/gaus.py","file_name":"gaus.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"521862015","text":"'''Запускать из рабочей директории!'''\nimport pygame\nimport time\nfrom storage import *\n\n\npygame.init()\n\nusr_name = ''\nscore = 0\nright_answers_loop_1 = 'count+1'\nright_answers_loop_2 = '10,0,-1', '100-1' \n\nW, H, FPS = 800, 600, 30\ncol_in = (0, 200, 200)\ncol_out = (255, 241, 55)\n\napp = pygame.display.set_mode((W, H))\npygame.display.set_caption('BAUMAN PYTHONERS')\npygame.display.set_icon(pygame.image.load(r'assets\\logo.png'))\nclock = pygame.time.Clock()\n\nback_img = pygame.image.load(r'assets\\menu.png')\nback_img_theme = pygame.image.load(r'assets\\bg.png')\n\nifelse_img_s = pygame.image.load(r'assets\\ifelse_s.jpg')\nifelse_img = pygame.image.load(r'assets\\ifelse.jpg')\nlist_img_s = pygame.image.load(r'assets\\list_s.jpg')\nlist_img = pygame.image.load(r'assets\\list.jpg')\nfunc_img_s = pygame.image.load(r'assets\\func_s.jpg')\nfunc_img = pygame.image.load(r'assets\\func.jpg')\nloop_img_s = pygame.image.load(r'assets\\loop_s.jpg')\nloop_img = pygame.image.load(r'assets\\loop.jpg')\nrocket_0_img = pygame.image.load(r'assets\\rocket0.png')\nrocket_1_img = pygame.image.load(r'assets\\rocket7.png')\nrocket_2_img = pygame.image.load(r'assets\\rocket6.png')\n\ns1_m, s2_m, s3_m, s4_m, s5_m = [pygame.image.load(r'assets\\star' + str(i) + '_s_w.png') for i in range(1, 6)] \ns1, s2, s3, s4, s5 = [pygame.image.load(r'assets\\star' + str(i) + '.png') for i in range(1, 6)] \n\n\npygame.mixer.music.set_volume(0.1)\n\n\ndef is_clicked_with_sound(need_delay=True):\n\tclicked = pygame.mouse.get_pressed()\n\tif clicked[0] == 1: \n\t\tpygame.mixer.music.load(r'assets\\click.mp3')\n\t\tpygame.mixer.music.play()\n\t\tif need_delay:\n\t\t\tpygame.time.delay(500)\n\treturn clicked\n\n\ndef button_txt(x, y, w, h, mes, action=None, f_size=30, x_sh=25, y_sh=25):\n\tmouse = pygame.mouse.get_pos()\n\tclicked = is_clicked_with_sound()\n\n\tif x < mouse[0] < x + w and y < mouse[1] < y + h:\n\t\tpygame.draw.rect(app, col_in, (x, y, w, h))\n\t\tif clicked[0] == 1:\n\t\t\tif action is not None:\n\t\t\t\taction()\n\t\t\t\tquit()\n\telse:\n\t\tpygame.draw.rect(app, col_out, (x, y, w, h))\n\t\n\tprint_text(mes, x + x_sh, y + y_sh, font_size=f_size) \n\n\ndef button_img(x_c, y_c, w_btn, h_btn, img, img_b, action=None):\n\tmouse = pygame.mouse.get_pos()\n\tclicked = is_clicked_with_sound(need_delay=False)\n\n\tw = 50\n\th = 50\n\tw_b = 100\n\th_b = 100\n\t\n\tx = x_c - w // 2\n\ty = y_c - h // 2\n\tx_b = x_c - w_b // 2\n\ty_b = y_c - h_b // 2\n\n\tif x < mouse[0] < x + w_btn and y < mouse[1] < y + h_btn:\n\t\tapp.blit(img_b, (x_b, y_b))\n\t\tif clicked[0] == 1:\n\t\t\tif action is not None:\n\t\t\t\taction()\n\t\t\t\tquit()\n\telse:\n\t\tapp.blit(img, (x, y))\n\t\n\ndef print_text(mes, x, y, font_col=(0, 0, 0), font_type=None, font_size=30):\n\tfont = pygame.font.Font(font_type, font_size)\n\ttext_surface = font.render(mes, True, font_col)\n\tapp.blit(text_surface, (x, y))\n\n\ndef draw_stars(is_animation=False):\n\tif is_animation is False:\n\t\tapp.blit(s1_m, (0, 35))\n\t\tapp.blit(s2_m, (70, 25))\n\t\tapp.blit(s3_m, (160, 15))\n\t\tapp.blit(s4_m, (260, 10))\n\t\tapp.blit(s5_m, (380, 0))\n\telse:\n\t\tapp.blit(s1, (0, 35))\n\t\tpygame.display.update()\n\t\tpygame.time.delay(500)\n\t\tapp.blit(s2, (70, 25))\n\t\tpygame.display.update()\n\t\tpygame.time.delay(500)\n\t\tapp.blit(s3, (160, 15))\n\t\tpygame.display.update()\n\t\tpygame.time.delay(500)\n\t\tapp.blit(s4, (260, 10))\n\t\tpygame.display.update()\n\t\tpygame.time.delay(500)\n\t\tapp.blit(s5, (380, 0))\n\t\tpygame.display.update()\n\t\tpygame.time.delay(500)\n\n\ndef loop():\n\tglobal score\n\n\tanswer_loop_1\t= ''\n\tis_end = False\n\tcount_ans = 0\n\tis_anim = False\n\twrong_ans = False\n\n\twhile True:\n\t\tif is_end:\n\t\t\tscore += 4 - count_ans\n\t\t\tloop_2()\n\t\t\tquit()\n\t\t\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\texit()\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tpygame.mixer.music.load(r'assets\\type.mp3')\n\t\t\t\tpygame.mixer.music.play()\n\n\t\t\t\tif event.key == pygame.K_RETURN:\n\t\t\t\t\tcount_ans += 1\n\t\t\t\t\tif ''.join(answer_loop_1.split()) == right_answers_loop_1:\n\t\t\t\t\t\tis_anim = True\n\t\t\t\t\t\tis_end = True\n\t\t\t\t\telse:\n\t\t\t\t\t\twrong_ans = True\n\t\t\t\telif event.key == pygame.K_BACKSPACE:\n\t\t\t\t\tanswer_loop_1 = answer_loop_1[:-1]\n\t\t\t\telse:\n\t\t\t\t\tif len(answer_loop_1) < 10:\n\t\t\t\t\t\tanswer_loop_1 += event.unicode\n\t\t\n\n\t\tkeys = pygame.key.get_pressed()\n\t\tif keys[pygame.K_ESCAPE]:\n\t\t\tpause()\n\n\t\tapp.blit(back_img_theme, (0, 0))\n\n\t\tx = 300\n\t\ty = 300\n\t\theight_line = 40\n\t\twith_tab = 30\n\t\tpygame.draw.rect(app, col_out, (x - 10, y - 10, 220, height_line * 4 + 20))\n\t\tpygame.draw.rect(app, (0, 0, 0), (x - 10, y - 10, 220, height_line * 4 + 20), 2)\n\t\tprint_text('count = 1', x, y, font_size=30)\n\t\tprint_text('while count < 6:', x, y + height_line * 1, font_size=30)\n\t\tprint_text('print(count)', x + with_tab, y + height_line * 2, font_size=30)\n\t\tprint_text('count = _________', x + with_tab, y + height_line * 3, font_size=30)\n\t\tbutton_txt(x + 25, y + height_line * 5.5, 150, 50, answer_loop_1, x_sh=20, y_sh=20)\n\t\t\n\t\tx = 100\n\t\ty = 180\n\t\theight_line = 30\n\n\t\tprint_text('В ряд стоят 5 звёздочек. Их нужно посчитать, ', x, y, font_size=30)\n\t\tprint_text('но вот незадача: из программы, которая должна была ', x, y + height_line, font_size=30)\n\t\tprint_text('это сделать потерялась часть строчки. Помоги ', x, y + height_line * 2, font_size=30)\n\t\tprint_text('её восстановить.', x, y + height_line * 3, font_size=30)\n\n\t\tif wrong_ans:\n\t\t\tif count_ans < 3:\n\t\t\t\tprint_text(\"Неверно!\", 330, 480, font_size=40, font_col=(200, 0, 0))\n\t\t\t\tpygame.display.update()\n\t\t\t\tpygame.time.delay(1000)\n\t\t\t\tanswer_loop_1 = ''\n\t\t\telse:\n\t\t\t\tprint_text(\"Попытки закончились (\", 260, 480, font_size=40, font_col=(200, 0, 0))\n\t\t\t\tpygame.display.update()\n\t\t\t\tpygame.time.delay(1000)\n\t\t\t\tis_end = True\n\t\t\twrong_ans = False\n\t\t\n\t\tdraw_stars(is_anim)\n\n\t\tpygame.display.update()\n\t\tclock.tick(FPS)\n\n\ndef loop_2():\n\tglobal score\n\n\tanswer_loop_2 = ''\n\tis_end = False\n\tcount_ans = 0\n\tis_anim = False\n\twrong_ans = False\n\n\tx_r = 100\n\ty_r = 450\n\tsign = 50\n\tspeed = 10\n\tvalue = 400\n\n\twhile True:\n\t\tif is_end:\n\t\t\tscore += 4 - count_ans\n\t\t\tend_app()\n\t\t\tquit()\n\t\t\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\texit()\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tpygame.mixer.music.load(r'assets\\type.mp3')\n\t\t\t\tpygame.mixer.music.play()\n\n\t\t\t\tif event.key == pygame.K_RETURN:\n\t\t\t\t\tcount_ans += 1\n\t\t\t\t\tif ''.join(answer_loop_2.split()) in right_answers_loop_2 or\\\n\t\t\t\t\t\t''.join(answer_loop_2.split(',')) in right_answers_loop_2:\n\t\t\t\t\t\tis_anim = True\n\t\t\t\t\telse:\n\t\t\t\t\t\twrong_ans = True\n\t\t\t\telif event.key == pygame.K_BACKSPACE:\n\t\t\t\t\tanswer_loop_2 = answer_loop_2[:-1]\n\t\t\t\telse:\n\t\t\t\t\tif len(answer_loop_2) < 10:\n\t\t\t\t\t\tanswer_loop_2 += event.unicode\n\t\t\n\n\t\tkeys = pygame.key.get_pressed()\n\t\tif keys[pygame.K_ESCAPE]:\n\t\t\tpause()\n\n\t\tapp.blit(back_img_theme, (0, 0))\n\n\t\tx = W // 2 - 100\n\t\ty = H // 2\n\t\theight_line = 40\n\t\twith_tab = 30\n\t\tpygame.draw.rect(app, (250, 250, 0), (x - 20, y - 20, 270, height_line * 4 + 20))\n\t\tpygame.draw.rect(app, (0, 0, 0), (x - 20, y - 20, 270, height_line * 4 + 20), 2)\n\t\tprint_text('for i in range(__, __, __):', x, y + height_line * 0, font_size=30)\n\t\tprint_text('print(i)', x + with_tab, y + height_line * 1, font_size=30)\n\t\tprint_text(\"print('Пуск!')\", x, y + height_line * 2, font_size=30)\n\t\tbutton_txt(x + 25, y + height_line * 5.5, 150, 50, answer_loop_2, x_sh=20, y_sh=20)\n\n\n\t\tprint_text('До запуска ракеты остается 10 секунд. Но из ', W // 4 - 40, H // 2 - 120, font_size=30)\n\t\tprint_text('программы, которая должна была вести отсчет до нуля, ', W // 4 - 40, H // 2 - 90, font_size=30)\n\t\tprint_text('потерялась часть условия. Помоги его восстановить. ', W // 4 - 40, H // 2 - 60, font_size=30)\n\t\t\n\t\tapp.blit(rocket_1_img, (x_r, y_r))\n\n\t\tif is_anim:\n\t\t\ty_r -= speed // 10\n\t\t\tx_r += sign // 10\n\t\t\tsign *= -1\n\t\t\tif sign > 0:\n\t\t\t\tsign -= 1\n\t\t\t\tspeed += 1\n\t\t\tif y_r > value:\n \t\t\t\tapp.blit(rocket_0_img,(x_r, y_r)) \n\t\t\telse:\n \t\t\t\tapp.blit(rocket_2_img,(x_r, y_r))\n\t\t\tif y_r == -90:\n\t\t\t\tis_end = True\n\t\t\t\n\t\t\n\t\tif wrong_ans:\n\t\t\tif count_ans < 3:\n\t\t\t\tprint_text(\"Неверно!\", 330, 480, font_size=40, font_col=(200, 0, 0))\n\t\t\t\tpygame.display.update()\n\t\t\t\tpygame.time.delay(1000)\n\t\t\t\tanswer_loop_2 = ''\n\t\t\telse:\n\t\t\t\tprint_text(\"Попытки закончились (\", 260, 480, font_size=40, font_col=(200, 0, 0))\n\t\t\t\tpygame.display.update()\n\t\t\t\tpygame.time.delay(1000)\n\t\t\t\tis_end = True\n\t\t\twrong_ans = False\n\n\t\tpygame.display.update()\n\t\tclock.tick(FPS)\n\n\ndef chose_theme():\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\texit()\n\n\t\tkeys = pygame.key.get_pressed()\n\t\tif keys[pygame.K_ESCAPE]:\n\t\t\tpause()\n\n\t\tapp.blit(back_img, (0, 0))\n\n\t\tbutton_img(100, 200, 200, 100, loop_img_s, loop_img, action=loop)\n\t\tbutton_img(550, 200, 200, 100, ifelse_img_s, ifelse_img)\n\t\tbutton_img(100, 400, 200, 100, list_img_s, list_img)\n\t\tbutton_img(550, 400, 200, 100, func_img_s, func_img)\n\t\t\n\t\tpygame.display.update()\n\t\tclock.tick(FPS)\n\n\ndef pause():\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\texit()\n\n\t\tkeys = pygame.key.get_pressed()\n\n\t\tif keys[pygame.K_n]:\n\t\t\treturn\n\n\t\tapp.blit(back_img, (0, 0))\n\t\tprint_text('PAUSE', W // 2, H // 2, font_size=50)\n\t\tprint_text('Press n to continue', W // 2, H // 2 + 100, font_size=50)\n\t\t\n\t\tpygame.display.update()\n\t\tclock.tick(FPS)\n\n\ndef check_name_existance():\n\tlist_names = [user[0] for user in data.select_all()]\n\tif usr_name in list_names:\n\t\tprint_text(\"Такое имя уже занято!\", 280, 500, font_size=30, font_col=(200, 0, 0))\n\t\tpygame.display.update()\n\t\tpygame.time.delay(3000)\n\t\treturn True\n\telse:\n\t\treturn False\n\t\t\n\ndef usr_name_input():\n\tglobal usr_name\n\n\tusr_name = ''\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\texit()\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tpygame.mixer.music.load(r'assets\\type.mp3')\n\t\t\t\tpygame.mixer.music.play()\n\n\t\t\t\tif event.key == pygame.K_RETURN:\n\t\t\t\t\tif usr_name:\n\t\t\t\t\t\tif check_name_existance() is True:\n\t\t\t\t\t\t\tusr_name = ''\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tchose_theme()\n\t\t\t\telif event.key == pygame.K_BACKSPACE:\n\t\t\t\t\tusr_name = usr_name[:-1]\n\t\t\t\telse:\n\t\t\t\t\tif len(usr_name) < 10:\n\t\t\t\t\t\tusr_name += event.unicode\n\n\t\tapp.blit(back_img, (0, 0))\n\n\t\tprint_text('Пожалуйста, введи ник (меньше 10 символов)', W // 5 + 20, 200, font_size=30)\n\t\tprint_text('Затем нажми на \\'enter\\'', W // 3 + 15, 250, font_size=30)\n\n\t\tbutton_txt(320, 300, 150, 70, usr_name, x_sh=20, y_sh=30)\n\t\tbutton_txt(335, 400, 110, 70, 'Quit', x_sh=30, y_sh=30, action=exit)\n\n\t\tpygame.display.update()\n\t\tclock.tick(FPS // 2)\n\t\n\ndef menu():\n\tglobal score, usr_name\n\t\n\tscore = 0\n\tusr_name = ''\n\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\texit()\n\n\t\tapp.blit(back_img, (0, 0))\n\n\t\tbutton_txt(335, 200, 110, 70, 'Старт', x_sh=25, y_sh=27, action=usr_name_input)\n\t\tbutton_txt(335, 300, 110, 70, 'Выход', x_sh=20, y_sh=27, action=exit)\n\n\t\tpygame.display.update()\n\t\tclock.tick(FPS)\n\n\ndef show_list():\n\tglobal data\n\n\tlist_data\t= sorted(data.select_all(), key=lambda x: -x[1])[:5]\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\texit()\n\n\t\tapp.blit(back_img, (0, 0))\n\n\t\tfor i in range(len(list_data)):\n\t\t\tbutton_txt(W // 7 + W // 25 + 100, H // 3 + i * H // 10 - 50,\\\n\t\t\t\tW // 5, H // 9, list_data[i][0], x_sh=30, y_sh=20)\n\t\t\tbutton_txt(W // 7 + W // 25 + 250, H // 3 + i * H // 10 - 50,\\\n\t\t\t\tW // 5, H // 9, str(list_data[i][1]), x_sh=70, y_sh=20)\n\t\t\n\t\tbutton_txt(340, 470, 110, 50, 'Меню', x_sh = 25, y_sh = 20, action=menu)\n\t\tbutton_txt(340, 530, 110, 50, 'Выход', x_sh = 20, y_sh = 15, action=exit)\n\n\t\tpygame.display.update()\n\t\tclock.tick(FPS // 2)\n\n\ndef end_app():\n\tdata.insert_into(usr_name, score)\n\t\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\texit()\n\t\t\n\t\tapp.blit(back_img, (0, 0))\n\t\tif score > 5:\n\t\t\tprint_text('Молодец!', W // 2 - 50, 170, font_size=30)\n\t\telif score > 3:\n\t\t\tprint_text('Нeплохо.', W // 2 - 50, 170, font_size=30)\n\t\telse:\n\t\t\tprint_text('Знаю, ты можешь лучше!', W // 2 - 130, 170, font_size=30)\n\t\tx = 310\n\t\ty = H // 2 - 20\n\t\tprint_text(usr_name, x + 130 - len(usr_name) * 15, y - 40, font_size=30)\n\t\tprint_text('Ты набрал', x, y, font_size=30)\n\t\tprint_text(str(score), x + 115, y, font_size=30)\n\t\tif score > 5:\n\t\t\tprint_text('баллов', x + 130, y, font_size=30)\n\t\telif score >= 2:\n\t\t\tprint_text('балла', x + 130, y, font_size=30)\n\t\telse:\n\t\t\tprint_text('балл', x + 130, y, font_size=30)\n\n\t\tbutton_txt(340, 350, 110, 70, 'Топ', x_sh = 35, y_sh = 30, action=show_list)\n\t\tbutton_txt(340, 450, 110, 70, 'Выход', x_sh = 20, y_sh = 30, action=exit)\n\n\t\tpygame.display.update()\n\t\tclock.tick(FPS)\n\n\ndata = Storage()\nmenu()\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":12871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"268707498","text":"# Echo client program\r\nimport socket\r\n\r\nHOST = '127.0.0.1' \r\nPORT = 50007 \r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.connect((HOST, PORT))\r\ndata = 'Hello world'\r\ns.send(data.encode('utf-8'))\r\nprint('Send[1]: ', data)\r\ndata = s.recv(1024)\r\ns.close()\r\nprint('Received[4]: ', data)\r\n","sub_path":"labs/lab3_server/easy_client.py","file_name":"easy_client.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"285849212","text":"import importlib\nimport inspect\nimport pkgutil\n\nimport euler.problems\n\nfrom timeit import default_timer as timer\n\npackages_spec = {'path': euler.problems.__path__, \n 'prefix': euler.problems.__name__ + '.',\n 'onerror': lambda x: None}\n\npackages = [name for importer, name, ispkg \n in pkgutil.walk_packages(**packages_spec)]\n\nmodules = map(importlib.import_module, packages)\n\nclass_check = lambda m: inspect.isclass(m) and issubclass(m, euler.problems.Problem) and euler.problems.Problem is not m\nclasses = [member[1] for module in modules for member in inspect.getmembers(module, class_check)]\n\nproblem_objects = [c() for c in classes]\n\nproblems_index_by_id = {o.problem_id: o for o in problem_objects}\nproblems_index_by_name = {o.name.replace(' ', '_'): o for o in problem_objects}\n\nclass ProblemNotFound(KeyError):\n pass\n\nclass Euler:\n def find_by_name(self, name):\n try:\n return problems_index_by_name[name]\n except KeyError as e:\n raise ProblemNotFound(*e.args)\n\n def find(self, problem_id):\n try:\n return problems_index_by_id[problem_id]\n except KeyError as e:\n raise ProblemNotFound(*e.args)\n\n def list(self):\n return [dict(o) for o in problem_objects]\n\n def _timed(self, func):\n start = timer()\n result = func()\n end = timer()\n return (end - start, result)\n\n def run(self, problem_id):\n problem = self.find(problem_id)\n test_elapsed, test_result = self._timed(problem.solve_for_test)\n answer_elapsed, answer_result = self._timed(problem.solve_for_answer)\n\n result = dict(problem)\n result['test'] = {'elapsed': test_elapsed, 'result': test_result}\n result['answer'] = {'elapsed': answer_elapsed, 'result': answer_result}\n\n return result\n\n\n\n \n\n","sub_path":"euler-python/euler/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"302185185","text":"from django.shortcuts import render, render_to_response, redirect\nfrom datetime import datetime, date, timedelta\nfrom sale.models import TransportType, EngineCapacity, PassengerSeats, \\\n BearingCapacity, PersonType, DocType, ClientData\nfrom calculator import operations, views\nimport utils\nimport json\n\n\ndef auto_step_1(request):\n\n if request.POST:\n transport_type = int(request.POST['car[type]'])\n engine_capacity = request.POST['car[more][1]']\n person_type = request.POST['person']\n fin = request.POST['fin']\n doc_serie = request.POST['doc[serie]']\n doc_num = request.POST['doc[num]']\n try:\n if request.POST['military'] == '1':\n military = True\n else:\n military = False\n except Exception:\n military = False\n try:\n voen = request.POST['voen']\n except Exception:\n voen = None;\n\n popular_mark_ids = (185, 200, 221, 229, 258, 278, 288, 308, 322, 332, 333, 360, 364, 379, 386, 399, 404,\n 463, 478, 500)\n car_manufacturers = operations.GetClassifier('docflow.AutoMark').filter(id__in=popular_mark_ids).order_by(\n 'title').execute()\n car_models_dict = {}\n\n for car_manufacturer in car_manufacturers:\n auto_models = utils.get_auto_models(transport_type, int(car_manufacturer.id))\n car_models = {}\n\n for auto_model in auto_models:\n car_models[auto_model.id] = str(auto_model.title)\n\n car_models_dict[car_manufacturer.id] = car_models\n\n\n return render_to_response('sale/auto/sale-step-2.html', {\n 'car_models_dict': json.dumps(car_models_dict),\n })\n\n transport_types = TransportType.objects.all().order_by('id')\n\n engine_capacitys = EngineCapacity.objects.all()\n # engine_capacitys = operations.GetClassifier('docflow.LibTable2_44').execute()\n\n passenger_seats = PassengerSeats.objects.all()\n # passenger_seats = operations.GetClassifier('docflow.LibTable2_48').execute()\n\n bearing_capasitys = BearingCapacity.objects.all()\n # bearing_capasitys = operations.GetClassifier('docflow.LibTable2_47').execute()\n\n person_types = PersonType.objects.all()\n doc_types = DocType.objects.all()\n js = '';\n for transport_type in transport_types:\n if transport_type.description == 'none':\n js = js + str(transport_type.id) + ': {'\n for person_type in person_types:\n js = js + str(person_type.id) + ': {'\n for doc_type in doc_types:\n if person_type.id == 1:\n js = js + str(doc_type.id) + ': ' + str(transport_type.premium_individual) + ','\n if person_type.id == 2:\n js = js + str(doc_type.id) + ': ' + str(transport_type.premium_entity) + ','\n js = js + '},'\n js = js + '},'\n if transport_type.description == '#carWeight':\n js = js + str(transport_type.id) + ': {'\n for bearing_capasity in bearing_capasitys:\n js = js + str(bearing_capasity.id) + ': {'\n for person_type in person_types:\n js = js + str(person_type.id) + ': {'\n for doc_type in doc_types:\n if person_type.id == 1:\n js = js + str(doc_type.id) + ': '+ str(transport_type.premium_individual+bearing_capasity.premium_individual)+', '\n if person_type.id == 2:\n js = js + str(doc_type.id) + ': '+ str(transport_type.premium_entity+bearing_capasity.premium_entity)+', '\n js = js+'},'\n js = js + '},'\n js = js + '},'\n if transport_type.description == '#carSize':\n js = js + str(transport_type.id) + ': {'\n for passenger_seat in passenger_seats:\n js = js + str(passenger_seat.id) + ': {'\n for person_type in person_types:\n js = js + str(person_type.id) + ': {'\n for doc_type in doc_types:\n if person_type.id == 1:\n js = js + str(doc_type.id) + ': '+ str(transport_type.premium_individual+passenger_seat.premium_individual)+', '\n if person_type.id == 2:\n js = js + str(doc_type.id) + ': '+ str(transport_type.premium_entity+passenger_seat.premium_entity)+', '\n js = js+'},'\n js = js + '},'\n js = js + '},'\n if transport_type.description == '#carEngine':\n js = js + str(transport_type.id) + ': {'\n for engine_capacity in engine_capacitys:\n js = js + str(engine_capacity.id) + ': {'\n for person_type in person_types:\n js = js + str(person_type.id) + ': {'\n for doc_type in doc_types:\n if person_type.id == 1:\n js = js + str(doc_type.id) + ': '+ str(transport_type.premium_individual+engine_capacity.premium_individual)+', '\n if person_type.id == 2:\n js = js + str(doc_type.id) + ': ' + str(\n transport_type.premium_entity + engine_capacity.premium_entity) + ', '\n js = js+'},'\n js = js + '},'\n js = js + '},'\n\n\n\n\n return render_to_response('sale/auto/sale-step-1.html', {\n 'transport_types': transport_types,\n 'engine_capacitys': engine_capacitys,\n 'passenger_seats': passenger_seats,\n 'bearing_capasitys': bearing_capasitys,\n 'person_types': person_types,\n 'doc_types': doc_types,\n 'js': js,\n\n })\n\n\ndef auto_step_2(request):\n\n transport_types = TransportType.objects.all()\n person_types = PersonType.objects.all()\n territoryes = operations.GetClassifier('docflow.Country').filter(id__in=(4, 60, 170, 213, 216)).execute(),\n\n\n # AUDI, BMW, CHEVROLET, DAEWOO, FORD, HONDA, HYUNDAI, KIA, LADA, LIFAN, MERCEDES BENZ, NISSAN, OPEL, LEXUS,\n # RANGE ROVER, SKODA, TOYOTA, VOLKSWGEN, MG, PORSHE\n popular_mark_ids = (185, 200, 221, 229, 258, 278, 288, 308, 322, 332, 333, 360, 364, 379, 386, 399, 404,\n 463, 478, 500)\n\n car_manufacturers = ''\n\n\n transport_selected = 1;\n percon_selected = 1;\n if request.POST:\n transport_selected = request.POST.get('car[type]')\n percon_selected = int(request.POST.get('person'))\n transport_type_selected = TransportType.objects.get(id=transport_selected)\n transport_types = TransportType.objects.all().filter(id=transport_selected)\n if transport_type_selected.value == 4:\n car_manufacturers = operations.GetClassifier('docflow.AutoMark').filter(id__in=popular_mark_ids).order_by(\n 'title').execute()\n else:\n try:\n car_manufacturers = operations.GetClassifier('docflow.AutoMark').filter(automodel__auto_type=transport_type_selected.value).distinct('title').order_by('title').execute()\n # car_models = operations.GetClassifier('docflow.AutoModel').filter(auto_type=transport_type_selected.value).order_by('title').execute()\n except Exception:\n return redirect('/sale/auto-step-1/')\n\n js = '';\n for car_manufacturer in car_manufacturers:\n js = js + str(car_manufacturer.id) + ': {'\n #car_models = CarModel.objects.all().filter(brand=car_manufacturer.id)\n car_models = operations.GetClassifier('docflow.AutoModel').filter(auto_type=transport_type_selected.value, mark=car_manufacturer.id).order_by('title').execute()\n for car_model in car_models:\n js = js + str(car_model.id) + ': { title: ' + '\"' + str(car_model.title)+'\"' + ', type: {'\n #car_type_car_models = CarTypeCarModel.objects.all().filter(car_model=car_model)\n for transport_type in transport_types:\n js = js + str(transport_type.value) + ': \"' + str(transport_type.transport_type.encode('utf-8').strip()) + '\",'\n js = js + '},'\n js = js +' more: {'\n for transport_type in transport_types:\n js = js + str(transport_type.value) + ': \"' + str(transport_type.description) + '\",'\n js = js + '},},'\n js = js + '},'\n\n return render_to_response('sale/auto/sale-step-2.html', {\n 'car_manufacturers': car_manufacturers,\n 'person_types': person_types,\n 'transport_types': transport_types,\n 'transport_selected': transport_selected,\n 'person_selected': percon_selected,\n 'territoryes': territoryes[0],\n 'js': js,\n })\n\n\ndef auto_step_3(request):\n\n if request.POST:\n car_engine = request.POST.get('car[engine]')\n car_size = request.POST.get('car[size]')\n car_weight = request.POST.get('car[weight]')\n value = ''\n\n if car_engine == '' and car_size == '':\n car_engine = 0\n car_size = 0\n\n type = str('Max weight'),\n value = car_weight\n\n elif car_size == '' and car_weight == '':\n car_size = 0\n car_weight = 0\n\n type = str('Capacity engine'),\n value = car_engine\n\n elif car_weight == '' and car_engine == '':\n car_weight = 0\n car_engine = 0\n\n type = str('Seats'),\n value = car_size\n\n\n client_data = ClientData(\n registration_number= request.POST.get('car[num]'),\n car_manufacturer= request.POST.get('car[make]'),\n car_model= request.POST.get('car[model]'),\n car_type= TransportType.objects.get(id=request.POST.get('car[type]')),\n car_engine= float(car_engine),\n car_size= float(car_size),\n car_weight= float(car_weight),\n person_type= PersonType.objects.get(id=request.POST.get('person[type]')),\n doc_type= DocType.objects.get(id=1),\n pin_code= request.POST.get('person[id]'),\n driver_license_series= request.POST.get('person[serie]'),\n driver_license_number= request.POST.get('person[num]'),\n start_date= datetime.strptime(str(request.POST.get('start')), '%d.%m.%Y'),\n territory= request.POST.get('territory'),\n phone= request.POST.get('tel'),\n email= request.POST.get('email'),\n )\n #client_data.set_premium();\n client_data.prem = 150;\n client_data.save()\n delta = timedelta(days=365)\n stop_date = client_data.start_date + delta\n car_manufacturer = operations.GetClassifier('docflow.AutoMark').filter(id=client_data.car_manufacturer).order_by('title').execute()\n car_model = operations.GetClassifier('docflow.AutoModel').filter(id=client_data.car_model).order_by('title').execute()\n territory = operations.GetClassifier('docflow.Country').filter(id=(int(client_data.territory))).execute()\n utils.get_tariff(client_data.car_type.value, client_data.car_engine, client_data.person_type.id,\n client_data.pin_code, client_data.driver_license_series, client_data.driver_license_number,\n\n )\n return render_to_response('sale/auto/sale-step-3.html', {\n 'client_data': client_data,\n 'type': type,\n 'value': value,\n 'stop_date': stop_date,\n 'car_manufacturer': car_manufacturer[0],\n 'car_model': car_model[0],\n 'territory': territory[0],\n })\n else:\n return redirect('/sale/auto-step-3/')\n\n","sub_path":"sale/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"498240873","text":"\"\"\"\nFont size used in this package.\n\"\"\"\n\n\n__all__ = [\n \"XXSMALL\", \"XSMALL\", \"SMALLER\",\n \"SMALL\", \"MEDIUM\", \"LARGE\",\n \"LARGER\", \"XLARGE\", \"XXLARGE\",\n]\n\n\nMEDIUM = 20\n\nSMALL = MEDIUM - 2\nXSMALL = SMALL - 2\nXXSMALL = XSMALL - 2\n\nLARGE = MEDIUM + 2\nXLARGE = LARGE + 2\nXXLARGE = XLARGE + 2\n\nLARGER = XLARGE\nSMALLER = XSMALL\n","sub_path":"FiguresForPublication/FontSize.py","file_name":"FontSize.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"614638562","text":"import json\n\n\nusername = input(\"What is your name ?\")\n\nfilename = 'username.json'\n\nwith open(filename,'w') as f:\n\tjson.dump(username,f)\n\tprint(f'we will rememember you when you come back {username}')","sub_path":"src/chapter09/remeber_me.py","file_name":"remeber_me.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"346064794","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom . import views\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'can.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'index/', 'app.views.index', name='index'),\n url(r'^app1/$', 'app.views.app1', name='app1'), #attention bien mettre $ car sinon la vue portfolio ne s'enclenche pas !\n url(r'login/', 'app.views.log_in', name='login'),\n url(r'logout/', 'app.views.log_out', name='logout'),\n url(r'^app1/portfolio/(?P\\d+)/$', views.portfolio, name='portfolio'),\n url(r'^app1/editportfolio/(?P\\d+)/$', views.edit_portfolio, name='edit_portfolio'),\n)\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"197254598","text":"import paho.mqtt.client as mqqt\nimport time\n\ndef on_message(client, userdata, message):\n print(\"Received massage: \", str(message.payload.decode(\"utf-8\")))\n\nmqqtBroker = \"mqtt.eclipseprojects.io\"\nclient = mqqt.Client(\"Smartphone\")\nclient.connect(mqqtBroker)\n\nclient.loop_start()\nclient.subscribe(\"TEMPERATURE\")\nclient.on_message = on_message\ntime.sleep(30)\nclient.loop_stop()","sub_path":"mqqt_subscribe.py","file_name":"mqqt_subscribe.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637015488","text":"from eval import Ev\nfrom string_distance.edit_distance import levenshtein\nimport time\nimport tqdm\nfrom vedastr.utils import Config\nfrom vedastr.runners import InferenceRunner\nimport cv2\nimport os\nimport sys\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))\n\n\nclass ocr:\n def __init__(self, config, weights) -> None:\n super().__init__()\n self.config = config\n self.weights = weights\n cfg = Config.fromfile(self.config)\n\n deploy_cfg = cfg['deploy']\n common_cfg = cfg.get('common')\n cfg['batch_max_length'] = 40\n runner = InferenceRunner(deploy_cfg, common_cfg)\n runner.load_checkpoint(self.weights)\n self.runner = runner\n\n def run(self, im):\n pred_str, probs = self.runner(im)\n return pred_str\n\n\nif __name__ == '__main__':\n ev = Ev()\n run = ocr(\"configs/small_satrn.py\",\n \"workdir/small_satrn/best_acc.pth\", ).run\n table_ocr_txt_path = \"../table_ocr/abs_val.txt\"\n with open(table_ocr_txt_path, \"r\") as f:\n gt_lines = f.readlines()\n for index, line in enumerate(gt_lines):\n name, value = line.strip(\"\\n\").split(\"\\t\")\n im = cv2.imread(name)\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n start = time.time()\n pre = ''.join(run(im))\n print(f\"{time.time()-start:.2f}\\t{pre}\\t{value}\")\n # ev.count(value, ''.join(pre))\n # print(f\"{time.time()-start:.2f}\\t{ev.socre()}\")\n","sub_path":"Predict.py","file_name":"Predict.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"68821293","text":"n = int(input())\nanswer=[]\nfor a in range(1001):\n for b in range(1667):\n if(5*a+3*b == n):\n answer.append(a+b)\nif(answer==[]):\n print(-1)\nelse:\n print(min(answer))","sub_path":"BOJ/python/2839(설탕 배달).py","file_name":"2839(설탕 배달).py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"84428417","text":"from json import JSONDecodeError\nfrom uuid import uuid4\nfrom django.core.validators import URLValidator\nfrom django.core.exceptions import ValidationError\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\n\nfrom main.rabbitmq_sender import publish_data_to_broker\n\n\ndef is_valid_url(url):\n validate = URLValidator()\n try:\n validate(url) # check if url format is valid\n except ValidationError:\n return False\n\n return True\n\n\n@csrf_exempt\n@require_http_methods(['POST', 'GET']) # only get and post\ndef crawl(request):\n # Post requests are for new crawling tasks\n if request.method == 'POST':\n\n # take urls comes from client.\n try:\n json_data = json.loads(request.body)\n url_data = json_data['urls']\n project_name = json_data['project_name']\n except JSONDecodeError:\n return JsonResponse({'Error': 'Missing URLs in the request payload or empty'})\n\n if not url_data:\n return JsonResponse({'Error': 'Missing urls key in the request payload'})\n\n if not project_name:\n return JsonResponse({'Error': 'Missing project_name key in the request payload'})\n\n publish_url_ids = []\n for url in url_data:\n if not is_valid_url(url):\n return JsonResponse({'error': url + ' URL is invalid'})\n\n unique_id = str(uuid4()) # create a unique ID.\n publish_data = '{ \"unique_id\": ' + unique_id + ', \"url\":' + url + ', \"project_name\": ' + project_name + ' }'\n\n try:\n publish_data_to_broker(publish_data)\n publish_url_ids.append(unique_id)\n except:\n return JsonResponse({'status': \"500 BAD\", 'Exception': 'Can not publish data to the broker'})\n\n return JsonResponse({'status': \"SUCCESS\", 'job_ids': publish_url_ids})\n\n","sub_path":"crawlerx_server/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"106524257","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport seaborn as sns\n\nd2_e = [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001, 0.00005, 0.00001, 0.000005]\nd2_acc = [0.7692, 0.7692, 0.7692, 0.7143, 0.7143, 0.4167, 0.303, 0.3226, 0.1163, 0.0437]\n\nd1_e = [0.1, 0.05, 0.01]\nd1_acc = [0.625, 0.625, 0.02]\n\npp = PdfPages(\"accept_rate_2d_1d.pdf\")\nf, ax = plt.subplots(figsize=(7, 7))\nsns.set(style=\"white\", palette=\"muted\", color_codes=True)\nplt.plot(d2_e, d2_acc, 'ro', color='g', label='2D')\nplt.plot(d1_e, d1_acc, 'bs', color='b', label='1D')\n\nax.set(xscale=\"log\")\n#plt.ylim([0, 0.5])\n#plt.xlim([0.0008, 11])\nplt.xlabel('Final epsilon')\nplt.ylabel('Acceptance rate')\nplt.legend(loc='upper left')\npp.savefig()\nplt.close()\npp.close()\n","sub_path":"pre-change/chapterABCFlow/images_flow_only/acceptance_rate_epsi.py","file_name":"acceptance_rate_epsi.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"652356512","text":"from django.shortcuts import render, redirect\nfrom homepage.models import Item, Lesson, Account, Chapter, Subject,UserDetail\nfrom datetime import datetime\nfrom django.http import JsonResponse\n\n# Create your views here.\n\ndef index(request):\n if request.session.has_key('username'):\n account = Account.objects.get(username = request.session['username'])\n if account.accounttypeid.accounttypeid == 1:\n items = Item.objects.all()\n for item in items:\n item.createdate = item.createdate\n item.editdate = item.editdate\n userdetail=UserDetail.objects.get(accountid=account)\n context = {\n 'userdetail':userdetail,\n 'account':account,\n 'items': items}\n return render(request, 'adminitem/item_show.html', context)\n else:\n return redirect('homepage:index')\n else:\n return redirect('homepage:index')\n\ndef create(request):\n if request.session.has_key('username'):\n account = Account.objects.get(username = request.session['username'])\n if account.accounttypeid.accounttypeid == 1:\n if request.method == 'POST':\n item = Item( \n accountid = Account.objects.get(accountid = request.POST['accountid']),\n lessonid = Lesson.objects.get(lessonid = request.POST['lessonid']),\n itemname=request.POST['itemid'], \n createdate= datetime.now(), \n editdate= datetime.now(),\n description=request.POST['description'],\n content=request.POST['content'],\n order=request.POST['order'],\n isenable=request.POST['isenable'], \n note=request.POST['note'])\n item.save()\n return redirect('/adminitem/')\n else:\n lessons = Lesson.objects.all()\n accounts = Account.objects.all()\n subjects = Subject.objects.all()\n \n \n for lesson in lessons:\n lesson.createdate = lesson.createdate\n lesson.editdate = lesson.editdate\n\n for account in accounts:\n account.createdate = account.createdate\n account.editdate = account.editdate\n\n for subject in subjects:\n subject.createdate = subject.createdate\n subject.editdate = subject.editdate\n userdetail=UserDetail.objects.get(accountid=account)\n context = {\n 'userdetail':userdetail,\n 'account':account,\n 'lessons': lessons,\n 'accounts': accounts,\n 'subjects': subjects,\n }\n return render(request, 'adminitem/item_create.html', context)\n else:\n return redirect('homepage:index')\n else:\n return redirect('homepage:index')\n\ndef edit(request, id):\n if request.session.has_key('username'):\n account = Account.objects.get(username = request.session['username'])\n if account.accounttypeid.accounttypeid == 1:\n item = Item.objects.get(itemid=id)\n subjectid = item.lessonid.chapterid.subjectid\n\n item.createdate = item.createdate\n item.editdate = datetime.now()\n\n lessons = Lesson.objects.all()\n accounts = Account.objects.all()\n subjects = Subject.objects.all()\n for lesson in lessons:\n lesson.createdate = lesson.createdate\n lesson.editdate = lesson.editdate\n\n for account in accounts:\n account.createdate = account.createdate\n account.editdate = account.editdate\n\n for subject in subjects:\n subject.createdate = subject.createdate\n subject.editdate = subject.editdate\n\n userdetail=UserDetail.objects.get(accountid=account)\n context = {\n 'userdetail':userdetail,\n 'account':account,\n 'item': item,\n 'lessons': lessons,\n 'accounts': accounts,\n 'subjects': subjects,\n 'subjectid': subjectid,\n }\n return render(request, 'adminitem/item_edit.html', context)\n else:\n return redirect('homepage:index')\n else:\n return redirect('homepage:index')\n\ndef getNum(x):\n return int(''.join(ele for ele in x if ele.isdigit()))\n\ndef update(request, id):\n if request.session.has_key('username'):\n account = Account.objects.get(username = request.session['username'])\n if account.accounttypeid.accounttypeid == 1:\n item = Item.objects.filter(itemid = id).update(accountid = Account.objects.get(accountid = getNum(request.POST['accountid'])))\n item = Item.objects.filter(itemid = id).update(lessonid = Lesson.objects.get(lessonid = getNum(request.POST['lessonid'])))\n item = Item.objects.get(itemid=id)\n item.itemname=request.POST['itemid']\n item.createdate=item.createdate\n item.editdate=datetime.now()\n item.description=request.POST['description']\n item.content=request.POST['content']\n item.order=request.POST['order']\n item.isenable=request.POST['isenable']\n item.note=request.POST['note']\n item.save()\n return redirect('/adminitem/')\n else:\n return redirect('homepage:index')\n else:\n return redirect('homepage:index')\n\n\ndef delete(request, id):\n if request.session.has_key('username'):\n account = Account.objects.get(username = request.session['username'])\n if account.accounttypeid.accounttypeid == 1:\n item = Item.objects.get(itemid= id)\n item.delete()\n return redirect('/adminitem/')\n else:\n return redirect('homepage:index')\n else:\n return redirect('homepage:index')\n\n\n#lấy giá trị subject được nhập vào để giới hạn giá trị show ra của chapter\ndef validate_subjectitem(request):\n subject = request.GET.get('subject', None)\n chapters = Chapter.objects.filter(subjectid=subject)\n edit = request.GET.get('edit', False)\n if edit == '1': \n edit = True\n \n change = request.GET.get('change', False)\n if change == '1': \n change = True\n \n if edit == True:\n item = Item.objects.get(itemid = request.GET.get('item', None))\n \n if edit == False or change == True:\n s = ''\n else:\n if change == False:\n s= ' '\n \n temp = ''\n\n for chapter in chapters: \n if edit == True and change == False:\n if chapter.chapterid!=item.lessonid.chapterid.chapterid:\n temp = ' '\n else:\n temp = ' '\n s = s+temp\n\n data = {\n 'is_taken': s\n }\n\n return JsonResponse(data)\n\n\n#lấy giá trị chapter được nhập vào để giới hạn giá trị show ra của lesson\ndef validate_chapteritem(request):\n chapter = request.GET.get('chapter', None)\n \n lessons = Lesson.objects.filter(chapterid=chapter)\n \n edit = request.GET.get('edit', False)\n if edit == '1': \n edit = True\n \n change = request.GET.get('change', False)\n if change == '1': \n change = True\n\n if edit == True:\n item = Item.objects.get(itemid = request.GET.get('item', None))\n \n if edit == False or change == True:\n s = ''\n else:\n s= ' '\n \n temp = ''\n\n for lesson in lessons: \n if edit == True and change == False:\n if lesson.lessonid!=item.lessonid.lessonid:\n temp = ' '\n else:\n temp = ' '\n s = s+temp\n\n data = {\n 'is_taken': s\n }\n\n return JsonResponse(data)\n\ndef validate_lessonorderitem(request):\n lesson = request.GET.get('lesson', None)\n les = request.GET.get('les', None)\n items = Item.objects.filter(lessonid = lesson)\n ite = request.GET.get('ite', None)\n if ite != None:\n ite = Item.objects.get(itemid = ite)\n \n if len(items) == 0:\n s = 1\n else:\n listorder = []\n if les != None and lesson !=None and int(lesson) == int(les):\n s = ite.order\n else:\n for item in items:\n listorder.append(item.order)\n s = max(listorder) + 1\n \n data = {\n 'is_taken': s\n }\n return JsonResponse(data)","sub_path":"adminitem/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"534974844","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport os\n\nimport numpy as np\nfrom scipy import fft\nfrom scipy.io import wavfile\nfrom sklearn.linear_model import LogisticRegression\n\nlogging.basicConfig(format='%(asctime)s\\t%(name)s\\t%(levelname)s : %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nlogger.info('Loading data...')\n\nX = list()\ny = list()\n\ncategories = ['classical', 'country', 'jazz', 'metal', 'pop', 'rock']\n\nfor fname in os.listdir('trainset'):\n fpath = os.path.join('trainset', fname)\n if os.path.isdir(fpath):\n continue\n fft_features = np.load(fpath)\n category = fname.split('.')[0]\n category_index = categories.index(category)\n X.append(fft_features)\n y.append(category_index)\n\nX = np.array(X)\ny = np.array(y)\n\nlogger.info('Training model...')\n\nmodel = LogisticRegression()\nmodel.fit(X, y)\n\nlogger.info('Read test wavfile...')\nfs, data = wavfile.read('trainset/sample/heibao-wudizirong-remix.wav')\nx_test = abs(fft(data))[:1000]\n\nlogger.info('Test model...')\ncategory_index = model.predict([x_test])[0]\n\nlogger.info('Test sample is classified as {}'.format(categories[category_index]))\n","sub_path":"battle/music_classify.py","file_name":"music_classify.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"160430565","text":"#!/usr/bin/env python\nfrom Tkinter import *\nimport os\nimport sys\nfrom libxml2 import thrDefDefaultBufferSize\n\nimport rospy\nimport Queue\nimport threading\n#from listener import getState\nfrom baxter_collaboration_msgs.msg import WOZ\n\nroot = Tk()\n\nstates = [0,0,0,0]\n\ndance = StringVar()\nwave = StringVar()\ngiggle = StringVar()\ncheat = StringVar()\n\ndef setMessage(i):\n scores = []\n scores.extend(states)\n scores[i] = not scores[i]\n try:\n talker(scores[0],scores[1],scores[2],scores[3])\n except rospy.ROSInterruptException: pass\n\ndef callback(data):\n global root\n rospy.loginfo(\"dance: %d \\n wave: %d \\n giggle: %d \\n cheat: %d\" % (data.dance, data.wave, data.giggle, data.cheat))\n\n q.put_nowait(data)\n\n'''The GUI update function '''\ndef update_gui():\n try:\n msg = q.get_nowait()\n except Queue.Empty:\n root.after(100, update_gui)\n return\n\n #update internal states\n states[0] = msg.dance\n states[1] = msg.wave\n states[2] = msg.giggle\n states[3] = msg.cheat\n\n #update GUI text\n if(msg.dance == 1):\n dance.set(\"Dance: Enabled\")\n else:\n dance.set(\"Dance: Disabled\")\n\n if (msg.wave == 1):\n wave.set(\"Wave: Enabled\")\n else:\n wave.set(\"Wave: Disabled\")\n\n if (msg.giggle == 1):\n giggle.set(\"Giggle: Enabled\")\n else:\n giggle.set(\"Giggle: Disabled\")\n\n if (msg.cheat == 1):\n cheat.set(\"Cheat: Enabled\")\n else:\n cheat.set(\"Cheat: Disabled\")\n\n # schedule the next run in 100 ms\n root.after(100, update_gui)\n\n\ndef talker(newDance,newWave,newGiggle,newCheat):\n \n msg = WOZ()\n \n msg.dance = newDance\n msg.wave = newWave\n msg.giggle = newGiggle\n msg.cheat = newCheat\n \n #rospy.loginfo(msg)\n pub.publish(msg)\n\ndef listening():\n #rospy.init_node('woz_listener', anonymous=True)\n sub = rospy.Subscriber(\"woz_st\", WOZ, callback)\n rospy.spin()\n\n\ndef on_close():\n pub.unregister()\n rospy .signal_shutdown(\"Closing thread to prevent process being kept open\")\n root.destroy()\n \n\nif __name__ == '__main__':\n pros = []\n pub = None\n sub = None\n q = Queue.Queue()\n\n # Create and pack the buttons\n txt = [\"Dance\", \"Wave\", \"Giggle\", \"Cheat\"]\n val = [0,1,2,3]\n\n rb0=Button(root,\n text=txt[0],\n width = 20,\n padx = 20,\n command=lambda : setMessage(0)).pack(anchor=W)\n\n rb1=Button(root,\n text=txt[1],\n width = 20,\n padx = 20,\n command=lambda : setMessage(1)).pack(anchor=W)\n\n rb2=Button(root,\n text=txt[2],\n width = 20,\n padx = 20,\n command=lambda : setMessage(2)).pack(anchor=W)\n\n rb3=Button(root,\n text=txt[3],\n width = 20,\n padx = 20,\n command=lambda : setMessage(3)).pack(anchor=W)\n\n # Create and pack the labels\n Label(root, textvariable = dance).pack(anchor=W)\n Label(root, textvariable = wave).pack(anchor=W)\n Label(root, textvariable = giggle).pack(anchor=W)\n Label(root, textvariable = cheat).pack(anchor=W)\n\n # default settings\n dance.set(\"Dance: Disabled\")\n wave.set(\"Wave: Disabled\")\n giggle.set(\"Giggle: Disabled\")\n cheat.set(\"Cheat: Disabled\")\n\n #initialize the node\n rospy.init_node('woz_gui', anonymous=False)\n #create publisher on main thread\n pub = rospy.Publisher('woz_cmd', WOZ, queue_size=1)\n\n #Added subscriber on worker thread\n sub_t = threading.Thread(target=listening, args=[])\n sub_t.setDaemon(True)\n sub_t.start()\n\n # schedule the first run of loop function\n root.after(100, update_gui)\n # schedule the exit handler.\n root.protocol(\"WM_DELETE_WINDOW\", on_close)\n root.mainloop()\n\n\n\n\n","sub_path":"src/baxter_examples/scripts/thing.py","file_name":"thing.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"228035382","text":"#!/usr/bin/env python\n\"\"\"\nukmohso-ingest.py.\n\nImport Historical Stations Observations from the UK Met Office. See\nhttps://www.metoffice.gov.uk/research/climate/maps-and-data/historic-station-data\nfor more details.\n\"\"\"\nimport argparse\nimport boto3\nimport gzip\nimport logging\nimport os\nimport sentry_sdk\nimport sys\nimport urllib.request\nimport yaml\n\nfrom pathlib import Path\nfrom urllib.request import Request\nfrom urllib.request import urlopen\n\n\nclass IngestHistoricStationData:\n \"\"\"Ingest Historic Station Data.\"\"\"\n\n def __init__(self, datafile):\n \"\"\"\n Initialise the object.\n\n args:\n datafile - The name of the data file containing the station names.\n\n \"\"\"\n logging.info(f'Opening {datafile}.')\n stream = open(datafile, 'r')\n data = yaml.safe_load(stream)\n stream.close()\n self.stations(data['stations'])\n self.s3bucket(data['s3bucket'])\n self.s3path(data['s3path'])\n\n def get_data(self, station):\n \"\"\"\n Download the data from the station URL.\n\n args:\n station - The name of the station.\n\n \"\"\"\n server = 'www.metoffice.gov.uk'\n path = 'pub/data/weather/uk/climate/stationdata'\n url = f'https://{server}/{path}/{station}data.txt'\n logging.debug(f'url={url}')\n req = Request(url)\n req.add_header('User-Agent', 'curl/7.64.1')\n data = urlopen(req).read()\n uncompressed_data_length = len(data)\n logging.debug(f'Downloaded {uncompressed_data_length} bytes')\n gzip_file_name = f'{station}data.txt.gz'\n logging.debug(f'Writing GZIP archive to {gzip_file_name}.')\n f = gzip.open(gzip_file_name, 'wb')\n f.write(data)\n f.close()\n compressed_data_length = os.path.getsize(gzip_file_name)\n percentage = compressed_data_length / uncompressed_data_length\n percentage *= 100.0\n percentage = 100.0 - percentage\n logmsg = 'Compressed down to %d bytes (%.02f%%).' % (\n compressed_data_length,\n percentage)\n logging.debug(logmsg)\n s3object = f'{self.s3path()}/{gzip_file_name}'\n logging.debug(f'Uploading to {self.s3bucket()}://{s3object}.')\n s3 = boto3.client('s3')\n f = open(gzip_file_name, 'rb')\n s3.upload_fileobj(f, self.s3bucket(), s3object)\n f.close()\n logging.debug(f'Removing file {gzip_file_name}.')\n p = Path(gzip_file_name)\n p.unlink()\n\n def s3bucket(self, s3bucket=None):\n \"\"\"Get/set the S3 Bucket Name.\"\"\"\n if s3bucket is not None:\n self._s3bucket = s3bucket\n return self._s3bucket\n\n def s3path(self, s3path=None):\n \"\"\"Get/set the S3 Path Name.\"\"\"\n if s3path is not None:\n self._s3path = s3path\n return self._s3path\n\n def stations(self, stations=None):\n \"\"\"\n Get or set the list of station names.\n\n args:\n stations - A list of station names.\n\n \"\"\"\n if stations is not None:\n self._stations = stations\n return self._stations\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Ingest Met Office Historic Station Data')\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-d\",\n \"--debug\",\n action=\"store_true\",\n help=\"Show DEBUG output.\")\n parser.add_argument(\"-f\",\n \"--file\",\n help=\"Specify the station data file.\",\n type=str)\n group.add_argument(\"-q\",\n \"--quiet\",\n action=\"store_true\",\n help=\"Show WARN/ERROR output only.\")\n args = parser.parse_args()\n\n if args.quiet:\n logging.basicConfig(level='WARN')\n elif args.debug:\n logging.basicConfig(level='DEBUG')\n logging.debug('Output at DEBUG level.')\n else:\n logging.basicConfig(level='INFO')\n\n if 'SENTRY_DSN' in os.environ:\n logging.info('Setting Sentry DSN.')\n sentry_sdk.init(os.environ['SENTRY_DSN'])\n else:\n logging.warn('Sentry not configured.')\n\n if args.file is None:\n parser.error('Please specify a file.')\n\n ingest = IngestHistoricStationData(args.file)\n\n for station in ingest.stations():\n logging.info(f'Downloading data for {station}.')\n ingest.get_data(station)\n","sub_path":"ukmohso-ingest.py","file_name":"ukmohso-ingest.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"425256012","text":"from datetime import datetime, timedelta, timezone\n\nclass Emprestimo(object):\n HORARIO_BRASILIA = timezone(timedelta(hours=-3))\n\n def __init__(self, nomeCliente, nomeloja, quantidade, modalidade) -> None:\n self.nome = nomeCliente\n self.loja = nomeloja # Loja de retirada das bicicletas\n self.quantidade = quantidade\n self.modalidade = modalidade\n self.retirada = datetime.now().astimezone(self.HORARIO_BRASILIA)\n self.devolucao = self.retirada\n self.valor = 0.0\n ","sub_path":"Emprestimo.py","file_name":"Emprestimo.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"459167702","text":"import unittest\nfrom utils import buildFromCode\nfrom lor_deckcodes import LoRDeck, CardCodeAndCount\nfrom utils import get_dataframe\n\nclass DeckBuilderTests(unittest.TestCase):\n\t\"\"\"docstring for DeckBuilderTests\"\"\"\n\n\tdef test_get_dataframe(self):\n\t\tdata = get_dataframe()\n\t\tself.assertEqual(len(data), 601)\n\t\tmax_num_copies = data['name'].value_counts().max()\n\t\tself.assertEqual(max_num_copies, 1)\n\t\t\n\tdef test_buildFromCode(self):\n\t\tdeck = get_dataframe()\n\t\t\n\t\ttestDeck = deck['cardCode'].value_counts().to_dict()\n\t\tf = lambda key, value: str(value) + ':' + key\n\n\t\tcardList = []\n\t\tfor key, value in zip(testDeck.keys(), testDeck.values()):\n\t\t\tcardList.append(f(key, value))\n\n\t\ttestDeck = LoRDeck(cardList)\n\t\tcode = testDeck.encode()\n\t\ttestDataframe = buildFromCode(code)\n\n\t\tvalid = deck['cardCode'].unique().tolist()\n\t\ttest = testDataframe['cardCode'].unique().tolist()\n\n\t\tvalid.sort()\n\t\ttest.sort()\n\n\t\tfor x, y in zip(valid, test):\n\t\t\tself.assertEqual(x, y)\n\nif __name__ == '__main__':\n\tunittest.main()","sub_path":"unittesttristan.py","file_name":"unittesttristan.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"58066288","text":"#Polynomial Regression 回歸\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os #工作路徑用多項式\n\nos.chdir(\"D:\\Machine Learning A-Z Chinese Template Folder\\Part 2 - Regression\\Section 6 - Polynomial Regression\");\n#自動跳到工作路徑\n\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:2].values #為什麼要變成矩陣????\nY = dataset.iloc[:, 2].values\n\n#遺失數據處理\n#from sklearn.impute import SimpleImputer\n#imputer = SimpleImputer(missing_values =np.nan, strategy= 'mean')\n#imputer.fit(X[:, 1:3])\n#X[:, 1:3] = imputer.transform(X[:, 1:3])\n\n#Splitting the datset into the Training set and Test set\n#from sklearn.model_selection import train_test_split #自動將數據及分為訓練集及測試集\n#_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.2, random_state = 0 )\n\n#Feature Scaling,最常用的就是標準化\n#from sklearn.preprocessing import StandardScaler\n#sc = StandardScaler()\n#X_train = sc.fit_transform(X_train) #fit 會找出平均值與標準差並標準化\n#X_test = sc.transform(X_test) #沒有fit 會直接用上面有fit過得到的平均值與標準差進行標準化\n\n\n#Fitting Linear Refression ti the dataset\nfrom sklearn.linear_model import LinearRegression\nlin_reg = LinearRegression()\nlin_reg.fit(X,Y)\n\n#Fitting Polynomial Regression to the sataset\nfrom sklearn.preprocessing import PolynomialFeatures\npoly_reg = PolynomialFeatures(degree = 2)\nX_poly = poly_reg.fit_transform(X)\n\nlin_reg_2 = LinearRegression()\nlin_reg_2.fit(X_poly,Y)\n\n\n#Visualising the Linear Regression results\nplt.scatter(X,Y,color = 'red')\nplt.plot(X, lin_reg.predict(X), color = 'blue')\nplt.title('Truth or Bluff(Linear Regression)')\nplt.xlabel('Position Level')\nplt.ylebel('Salary')\nplt.show()\n\n#Visualising the Polynomial Regression results\nX_grid = np.arange(min(X),max(X) , 0.1) #將原本的X數據分割為0.1為單位\nX_grid = X_grid.reshape(len(X_grid), 1) #轉置矩陣 以上兩行只是為了讓圖形看起來比較平滑\nplt.scatter(X,Y,color = 'red')\nplt.plot(X_grid, lin_reg_2.predict(poly_reg.fit_transform(X_grid)), color = 'blue')\nplt.title('Truth or Bluff(Linear Regression)')\nplt.xlabel('Position Level')\nplt.ylebel('Salary')\nplt.show()\n\n# Predicing a new result with Linear Regression\nlin_reg.predict(6.5)\n\n# Predicing a new result with Polynomial Regression\nlin_reg_2.predict(poly_reg.fit_transform(6.5))\n\n\n\n\n\n\n","sub_path":"HOMEWORK/2.6_Polynomial_Regression/多項式回歸.py","file_name":"多項式回歸.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"427034931","text":"import gkeepapi\nimport os \nimport sys\nfrom decouple import config\n\ndef removeFromLabel(label):\n gnotes = keep.find(labels=[keep.findLabel(label)])\n for gnote in gnotes:\n gnote.delete()\n keep.sync()\n\nif __name__ == '__main__':\n gkeepapi.node.DEBUG = True\n keep = gkeepapi.Keep()\n keep.login(config('GMAIL'), config('PWORD'))\n\n action = sys.argv[1]\n if action == 'label':\n label = sys.argv[2]\n removeFromLabel(label)","sub_path":"remove.py","file_name":"remove.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"8337264","text":"import os\nimport du\n\ntrial_name = os.path.basename(__file__)[:-3]\nwith du.trial.run_trial(trial_name=trial_name) as trial:\n\n BATCH_SIZE = 128\n NUM_EPOCHS = 82\n\n import numpy as np\n import tensorflow as tf\n import tfu\n import tfu.sandbox.batch_normalization as bn\n\n datamaps = du.tasks.image_tasks.cifar10(x_dtype=\"float32\",\n y_dtype=\"int64\",\n include_valid_split=False)\n datamaps = du.tasks.image_tasks.subtract_per_pixel_mean(datamaps)\n train, valid = datamaps\n\n x = tf.placeholder(tf.float32, [None, 32, 32, 3], name=\"x\")\n y = tf.placeholder(tf.int64, [None], name=\"y\")\n\n tfu.add_hook(tfu.hooks.reuse_variables(variable_scope=\"valid\",\n replace=[(\"valid\", \"train\")]))\n tfu.add_hook(tfu.inits.set_weight_init(tfu.inits.msr_normal))\n tfu.add_hook(tfu.inits.scale_weight_inits(scale=np.sqrt(2)))\n tfu.add_hook(tfu.hooks.default_kwargs_dsl(kwargs={\"filter_size\": (3, 3)},\n key=\"conv2d\"))\n\n epoch_size = len(train[\"x\"]) * 2 # multiply by 2 for flips\n batches_per_epoch = np.ceil(epoch_size / float(BATCH_SIZE))\n expected_count = NUM_EPOCHS * batches_per_epoch\n tfu.counter.make_default_counter(expected_count=expected_count)\n\n def model():\n norm = bn.ema_batch_normalization\n\n h = x\n with tfu.variable_scope(\"conv1\"):\n h = tfu.conv2d(h, num_filters=96)\n h = norm(h)\n h = tf.nn.relu(h)\n with tfu.variable_scope(\"conv2\"):\n h = tfu.conv2d(h, num_filters=96)\n h = norm(h)\n h = tf.nn.relu(h)\n with tfu.variable_scope(\"conv3\"):\n h = tfu.conv2d(h, num_filters=96, strides=(2, 2))\n h = norm(h)\n h = tf.nn.relu(h)\n with tfu.variable_scope(\"conv4\"):\n h = tfu.conv2d(h, num_filters=192)\n h = norm(h)\n h = tf.nn.relu(h)\n with tfu.variable_scope(\"conv5\"):\n h = tfu.conv2d(h, num_filters=192)\n h = norm(h)\n h = tf.nn.relu(h)\n with tfu.variable_scope(\"conv6\"):\n h = tfu.conv2d(h, num_filters=192, strides=(2, 2))\n h = norm(h)\n h = tf.nn.relu(h)\n with tfu.variable_scope(\"conv7\"):\n h = tfu.conv2d(h, num_filters=192)\n h = norm(h)\n h = tf.nn.relu(h)\n with tfu.variable_scope(\"conv8\"):\n h = tfu.conv2d(h, num_filters=192, filter_size=(1, 1))\n h = norm(h)\n h = tf.nn.relu(h)\n with tfu.variable_scope(\"conv9\"):\n h = tfu.conv2d(h, num_filters=10, filter_size=(1, 1))\n h = norm(h)\n\n h = tfu.global_avg_pool2d(h)\n cross_entropy = tf.reduce_mean(\n tfu.softmax_cross_entropy_with_logits(h, y))\n accuracy = tf.reduce_mean(tfu.categorical_accuracy(h, y))\n\n l2 = tfu.costs.l2()\n cost = cross_entropy + l2 * 2e-4\n\n tfu.summary.scalar(\"cost\", cost)\n tfu.summary.scalar(\"cross_entropy\", cross_entropy)\n tfu.summary.scalar(\"accuracy\", accuracy)\n\n return dict(\n cross_entropy=cross_entropy,\n accuracy=accuracy,\n cost=cost,\n )\n\n file_writer = tf.summary.FileWriter(trial.file_path(\"summary\"))\n\n train_summary = tfu.SummaryAccumulator()\n train_summary.add_file_writer(file_writer)\n\n valid_summary = tfu.SummaryAccumulator()\n valid_summary.add_file_writer(file_writer)\n\n updates = tfu.UpdatesAccumulator()\n\n with tfu.variable_scope(\"train\"), train_summary, updates:\n train_out = model()\n learning_rate = tfu.counter.discrete_scale_schedule(\n 0.1,\n scale=0.1,\n thresholds=[0.5, 0.75])\n tfu.summary.scalar(\"learning_rate\", learning_rate)\n tfu.updates.nesterov_momentum(train_out[\"cost\"],\n learning_rate=learning_rate)\n\n with tfu.variable_scope(\"valid\", deterministic=True), valid_summary:\n valid_out = model()\n\n # enable XLA\n config = tf.ConfigProto()\n config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1\n sess = tf.InteractiveSession(config=config)\n\n tfu.counter.set_session(sess)\n sess.run(tf.global_variables_initializer())\n\n summary_printer = tfu.SummaryPrinter()\n summary_printer.add_recipe(\"trial_info\", trial)\n summary_printer.add_recipe(\"progress\")\n summary_printer.add_recipe(\"iter\")\n summary_printer.add_recipe(\"time\")\n summary_printer.add_recipe(\"s_per_iter\")\n summary_printer.add_recipe(\"x min+iter\",\n \"valid_epoch/cost\",\n format=\"%.4g\")\n summary_printer.add_recipe(\"x max+iter\",\n \"valid_epoch/accuracy\",\n format=\"%.4g\")\n summary_printer.add_recipe(\"add_finals\",\n [\"train_epoch/cost\",\n \"train_epoch/accuracy\",\n \"valid_epoch/cost\",\n \"valid_epoch/accuracy\"],\n format=\"%.4g\")\n train_summary.add_summary_printer(summary_printer)\n valid_summary.add_summary_printer(summary_printer)\n\n train_fn = tfu.tf_fn(sess=sess,\n inputs={\"x\": x,\n \"y\": y},\n outputs=train_out,\n ops=[updates, train_summary, tfu.counter.step_op()])\n train_fn = tfu.wrap.output_nan_guard(train_fn)\n train_fn = tfu.wrap.split_input(train_fn, split_size=BATCH_SIZE)\n train_fn = tfu.wrap.format_output_keys(train_fn, \"train_epoch/%s\")\n train_fn = tfu.wrap.update_summary_printer(train_fn, summary_printer)\n\n valid_fn = tfu.tf_fn(sess=sess,\n inputs={\"x\": x,\n \"y\": y},\n outputs=valid_out,\n ops=[valid_summary])\n valid_fn = tfu.wrap.split_input(valid_fn, split_size=500)\n valid_fn = tfu.wrap.format_output_keys(valid_fn, \"valid_epoch/%s\")\n valid_fn = tfu.wrap.update_summary_printer(valid_fn, summary_printer)\n\n train_gen = du.tasks.image_tasks.gen_standard_cifar10_augmentation(train)\n\n while tfu.counter.get_count_value() < expected_count:\n with du.timer(\"epoch\"):\n train_epoch = train_gen.next()\n train_res = train_fn(train_epoch)\n valid_res = valid_fn(valid)\n print(summary_printer.to_org_list())\n if 0:\n tfu.serialization.dump_variables(trial.file_path(\"final_variables\"))\n","sub_path":"examples/cifar10_allconv_bn.py","file_name":"cifar10_allconv_bn.py","file_ext":"py","file_size_in_byte":6701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"440202073","text":"# config: utf-8\nimport pygame\nfrom pygame.locals import *\nimport sys\n\nSCREEN_SIZE = (640, 480)\n\n#Pygameを初期化\npygame.init()\n#SCREEN_SIZEの画面を作成\nscreen = pygame.display.set_mode(SCREEN_SIZE)\n#タイトルバーの文字列をセット\npygame.display.set_caption(\"ウィンドウを作成\")\nwhile True:\n screen.fill((0, 255, 0))\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == QUIT:\n sys.exit()","sub_path":"pygame_proc/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"34861428","text":"## 특정값의 모든 위치를 출력하는 프로그램\r\nimport random\r\nmyList = [random.randint(1,5) for _ in range(10)]\r\nprint(myList)\r\nNUMBERS = 5\r\nindex = 0\r\nfindList = []\r\n#try, except 잘 활용.\r\nwhile True :\r\n try:\r\n index = myList.index(NUMBERS, index)\r\n print(index)\r\n index += 1\r\n except:\r\n break\r\n","sub_path":"강의자료/2019-06-10/Code04-03 리스트 조작함수1.py","file_name":"Code04-03 리스트 조작함수1.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"102094122","text":"import matplotlib.pylab as plt\nimport numpy as np\n\nX = np.linspace(-np.pi, np.pi, 256,endpoint=True)\nC,S = np.cos(X), np.sin(X)\n\nplt.plot(X,C)\nplt.plot(X,S)\nplt.axis('tight')\nplt.show()\n","sub_path":"MatplotLib/plot2.py","file_name":"plot2.py","file_ext":"py","file_size_in_byte":186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"529780100","text":"# -*- coding: utf-8 -*-\nfrom data_type_identifier_generator import DataTypeIdentifier\nfrom sklearn.metrics import classification_report\nfrom sklearn.preprocessing import LabelEncoder\nfrom tensorflow.keras.models import load_model\nfrom sklearn.utils import shuffle\nfrom os.path import join\nfrom pandas import DataFrame, read_csv\n\n\n\"\"\"\n############################################\n############# MAIN OBJECT ##################\n############################################\n\"\"\"\n#Using LabelEncoder implied that we want to keep a certain order of modalities while we are encoding a variable. \n#Here we have no order but the target variable only has two modalities. So there's no problem using it.\n#We could have also used OneHotEncoder instead. \ndata_type_identifier = DataTypeIdentifier(LabelEncoder)\n\n\n\"\"\"\n############################################\n######## DATA PREPROCESSING ################\n############################################\n\"\"\"\n# 1-Loading data from two different sources\ncategorical_numerical_data = read_csv(join(\"data\",\"data.csv\"), sep=\",\", index_col=False) \nfeatures_transposed_2 = read_csv(join(\"data\",\"CC_training.csv\"), sep=\",\", index_col=False, encoding=\"-ISO8859-1\")\ntarget_variable_2 = read_csv(join(\"data\",\"y.csv\"), sep=\",\")\n\n# 2-Separating our features from our target variable\nfeatures = categorical_numerical_data.iloc[:,:-1]\ntarget_variable = categorical_numerical_data.iloc[:,-1]\n\n# 3-Transposing our feature data frame to implement a column type analysis\nfeatures_transposed = features.T\n\n# 4-Keeping the initial data type of every single feature\nfeatures_transposed = data_type_identifier.keep_initial_data_types(features_transposed)\nfeatures_transposed_2 = data_type_identifier.keep_initial_data_types(features_transposed_2)\n\n# 5-Building our training set\nfeatures_and_target = data_type_identifier.build_final_set(features_transposed, target_variable)\nfeatures_and_target_2 = data_type_identifier.build_final_set(features_transposed_2, target_variable_2)\nX_train = concat((features_and_target[\"new_features\"], features_and_target_2[\"new_features\"]))\ny_train = concat((features_and_target[\"target_variable_encoded\"],features_and_target_2[\"target_variable_encoded\"]))\nmappings = data_type_identifier.get_target_variable_class_mappings() # 0 for categorical and 1 for numerical when this model was built \n\n# 6-Shuffling our data\nX_train , y_train = shuffle(X_train, y_train) \n\n\"\"\"\n############################################\n############## TRAINING ####################\n############################################\n\"\"\"\ndata_type_identifier_model=data_type_identifier.sigmoid_neuron(X=X_train,\n y=y_train,\n path=join(\"model_and_checkpoint\",\"data_type_identifier.h5\"), \n epoch=300, \n validation_split=0.1, \n batch_size=20)\n\n\"\"\"\n############################################\n##############SAVING VARIABLES##############\n############################################\n\"\"\"\ndata_type_identifier.save_variables(join(\"saved_variables\",\"mappings.pickle\"), mappings)\ndata_type_identifier.save_variables(join(\"saved_variables\",\"X_train.pickle\"), X_train)\ndata_type_identifier.save_variables(join(\"saved_variables\",\"y_train.pickle\"), y_train) \n\n\n\"\"\"\n############################################\n################ TESTING ###################\n############################################\n\"\"\"\n# 1-Loading important variables\nmappings = data_type_identifier.load_variables(join(\"saved_variables\",\"mappings.pickle\"))\n\n# 2-Loading the model and the test datasets\ndata_type_identifier_model = load_model(join(\"model_and_checkpoint\",\"data_type_identifier.h5\"))\nX_test = read_csv(join(\"data\",\"CC_training.csv\"), sep=\",\", encoding=\"ISO-8859-1\")\ny_test = read_csv(join(\"data\",\"y.csv\"), sep=\",\")\n\n# 3-Predictions on test set\nnew_test_set_predictions = data_type_identifier.predict(X_test, mappings, data_type_identifier_model)\n\n# 4-Classification report\nreport = classification_report(y_true=y_test, y_pred=new_test_set_predictions, output_dict=True)\nreport = DataFrame(report).transpose()\nreport.to_csv(join(\"data\",\"report.csv\"))\n\n\n","sub_path":"DataTypeIdentifier/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"229916885","text":"class BigRandom(object):\n def __init__(self):\n self.data = \"data.txt\"\n # add attributes if you need more\n\n def answer(self):\n noh = 0 # variable to store number of hashtag\n # ommiting line number's hashtag\n suc = 0 # variable to store sum of character's code in ascii,\n # ommiting line number and its hashtag\n\n # your algorithm\n total = 0 \n file = open(\"data.txt\",\"r\")\n for i in file:\n for j in i:\n if ( j != '#'):\n i = i[1: ]\n else :\n break\n for j in i[1:] :\n if (j=='#'):\n noh+=1\n suc+=ord(j)\n return(noh,suc)\n \n \n \n\njawab = BigRandom()\nprint (jawab.answer())\n#return (noh,suc)\n\n # add methods if you need more\n","sub_path":"bigrandom/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"362787452","text":"__author__ = 'joefalkson'\nimport cv2\nimport numpy as np\n\n\ndef warp(image, U, V):\n [M, N] = image.shape\n X, Y = np.meshgrid(xrange(N), xrange(M))\n map_x = (X + U).astype('float32')\n map_y = (Y + V).astype('float32')\n\n warp1 = cv2.remap(image, map_x, map_y, interpolation = cv2.INTER_LINEAR)\n warp2 = cv2.remap(image, map_x, map_y, interpolation = cv2.INTER_NEAREST)\n fail = np.isnan(warp1)\n warp1[fail] = warp2[fail]\n return warp1","sub_path":"ps5_python/warp.py","file_name":"warp.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"313295382","text":"from pytest import mark, raises\n\nfrom piptools.utils import (\n as_tuple,\n dedup,\n flat_map,\n format_requirement,\n format_specifier,\n get_hashes_from_ireq,\n is_pinned_requirement,\n name_from_req,\n)\n\n\ndef test_format_requirement(from_line):\n ireq = from_line(\"test==1.2\")\n assert format_requirement(ireq) == \"test==1.2\"\n\n\ndef test_format_requirement_editable(from_editable):\n ireq = from_editable(\"git+git://fake.org/x/y.git#egg=y\")\n assert format_requirement(ireq) == \"-e git+git://fake.org/x/y.git#egg=y\"\n\n\ndef test_format_requirement_ireq_with_hashes(from_line):\n ireq = from_line(\"pytz==2017.2\")\n ireq_hashes = [\n \"sha256:d1d6729c85acea5423671382868627129432fba9a89ecbb248d8d1c7a9f01c67\",\n \"sha256:f5c056e8f62d45ba8215e5cb8f50dfccb198b4b9fbea8500674f3443e4689589\",\n ]\n\n expected = (\n \"pytz==2017.2 \\\\\\n\"\n \" --hash=sha256:d1d6729c85acea542367138286\"\n \"8627129432fba9a89ecbb248d8d1c7a9f01c67 \\\\\\n\"\n \" --hash=sha256:f5c056e8f62d45ba8215e5cb8f5\"\n \"0dfccb198b4b9fbea8500674f3443e4689589\"\n )\n assert format_requirement(ireq, hashes=ireq_hashes) == expected\n\n\ndef test_format_requirement_ireq_with_hashes_and_markers(from_line):\n ireq = from_line(\"pytz==2017.2\")\n marker = 'python_version<\"3.0\"'\n ireq_hashes = [\n \"sha256:d1d6729c85acea5423671382868627129432fba9a89ecbb248d8d1c7a9f01c67\",\n \"sha256:f5c056e8f62d45ba8215e5cb8f50dfccb198b4b9fbea8500674f3443e4689589\",\n ]\n\n expected = (\n 'pytz==2017.2 ; python_version<\"3.0\" \\\\\\n'\n \" --hash=sha256:d1d6729c85acea542367138286\"\n \"8627129432fba9a89ecbb248d8d1c7a9f01c67 \\\\\\n\"\n \" --hash=sha256:f5c056e8f62d45ba8215e5cb8f5\"\n \"0dfccb198b4b9fbea8500674f3443e4689589\"\n )\n assert format_requirement(ireq, marker, hashes=ireq_hashes) == expected\n\n\ndef test_format_specifier(from_line):\n ireq = from_line(\"foo\")\n assert format_specifier(ireq) == \"\"\n\n ireq = from_line(\"foo==1.2\")\n assert format_specifier(ireq) == \"==1.2\"\n\n ireq = from_line(\"foo>1.2,~=1.1,<1.5\")\n assert format_specifier(ireq) == \"~=1.1,>1.2,<1.5\"\n ireq = from_line(\"foo~=1.1,<1.5,>1.2\")\n assert format_specifier(ireq) == \"~=1.1,>1.2,<1.5\"\n\n\ndef test_as_tuple(from_line):\n ireq = from_line(\"foo==1.1\")\n name, version, extras = as_tuple(ireq)\n assert name == \"foo\"\n assert version == \"1.1\"\n assert extras == ()\n\n ireq = from_line(\"foo[extra1,extra2]==1.1\")\n name, version, extras = as_tuple(ireq)\n assert name == \"foo\"\n assert version == \"1.1\"\n assert extras == (\"extra1\", \"extra2\")\n\n # Non-pinned versions aren't accepted\n should_be_rejected = [\"foo==1.*\", \"foo~=1.1,<1.5,>1.2\", \"foo\"]\n for spec in should_be_rejected:\n ireq = from_line(spec)\n with raises(TypeError):\n as_tuple(ireq)\n\n\ndef test_flat_map():\n assert [1, 2, 4, 1, 3, 9] == list(flat_map(lambda x: [1, x, x * x], [2, 3]))\n\n\ndef test_dedup():\n assert list(dedup([3, 1, 2, 4, 3, 5])) == [3, 1, 2, 4, 5]\n\n\ndef test_get_hashes_from_ireq(from_line):\n ireq = from_line(\n \"pytz==2017.2\",\n options={\n \"hashes\": {\n \"sha256\": [\n \"d1d6729c85acea5423671382868627129432fba9a89ecbb248d8d1c7a9f01c67\",\n \"f5c056e8f62d45ba8215e5cb8f50dfccb198b4b9fbea8500674f3443e4689589\",\n ]\n }\n },\n )\n expected = [\n \"sha256:d1d6729c85acea5423671382868627129432fba9a89ecbb248d8d1c7a9f01c67\",\n \"sha256:f5c056e8f62d45ba8215e5cb8f50dfccb198b4b9fbea8500674f3443e4689589\",\n ]\n assert get_hashes_from_ireq(ireq) == expected\n\n\n@mark.parametrize(\n (\"line\", \"expected\"),\n [\n (\"django==1.8\", True),\n (\"django===1.8\", True),\n (\"django>1.8\", False),\n (\"django~=1.8\", False),\n (\"django==1.*\", False),\n ],\n)\ndef test_is_pinned_requirement(from_line, line, expected):\n ireq = from_line(line)\n assert is_pinned_requirement(ireq) is expected\n\n\ndef test_is_pinned_requirement_editable(from_editable):\n ireq = from_editable(\"git+git://fake.org/x/y.git#egg=y\")\n assert not is_pinned_requirement(ireq)\n\n\ndef test_name_from_req(from_line):\n ireq = from_line(\"django==1.8\")\n assert name_from_req(ireq.req) == \"django\"\n\n\ndef test_name_from_req_with_project_name(from_line):\n ireq = from_line(\"foo==1.8\")\n ireq.req.project_name = \"bar\"\n assert name_from_req(ireq.req) == \"bar\"\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"273686604","text":"import pygame\ndisplay_width = 800\ndisplay_height = 600\nfrom random import randint\nclass character():\n def __init__(self, characterImg, startx, starty, imgW, imgH):\n self.hp = 3\n self.mana = 100\n self.armour = 50\n self.speed = 10\n self.charx = startx\n self.chary = starty\n self.characterImg = pygame.image.load(characterImg)\n self.forward = False\n self.rightways = False\n self.leftways = True\n self.backwards = False\n self.gravity = -10\n self.xlen = imgW\n self.ylen = imgH\n\n def moveRight(self):\n self.rightways = True\n self.leftways = False\n\n def moveLeft(self):\n self.rightways = False\n self.leftways = True\n\n def moveUp(self):\n self.backwards = False\n self.forward = True\n\n def moveDown(self):\n self.backwards = True\n self.forward = False\n\n def isAlive(self):\n if self.hp > 0:\n return True\n return False\n def hitLeftWall(self):\n if 0 >= self.charx or self.charx >= display_width:\n return True\n return False\n def moveAlien(self):\n self.charx += -10\n if randint(0, 25) == 10:\n self.chary += -randint(50, 400)\n def bossMove(self):\n self.charx += -5 * randint (-1, 3) * randint(1, 2)\n if randint(0, 180) == 10:\n self.chary += -randint(50, 100)\n def isOnGround(self):\n if self.chary + 120 <= display_height:\n return False\n return True\n def hasColided(self, character):\n if (self.charx <= character.charx <= self.charx + self.xlen or self.charx <= character.charx + character.xlen <= self.charx + self.xlen) and (self.chary <= character.chary <= self.chary + self.ylen or self.chary <= character.chary + character.ylen <= self.chary + self.ylen):\n return True\n return False","sub_path":"bbvgg/char.py","file_name":"char.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"21786137","text":"\"\"\"Author: Benson Pan\n Date: April 20, 2014\n Description: Scrambled Eggs - A Flappy Bird Remake\n\"\"\"\n#Import\nimport pygame, SEsprites\npygame.init()\npygame.mixer.init()\n\ndef main():\n #Display\n screen = pygame.display.set_mode((640, 480))\n pygame.display.set_caption(\"Scrambled Eggs\")\n\n #Entities\n background = pygame.Surface(screen.get_size())\n background.fill((255, 255, 255))\n\n #Load music and sound effect\n pygame.mixer.music.load(\"./Resources/song.mp3\")\n pygame.mixer.music.set_volume(0.250)\n pygame.mixer.music.play(-1)\n score_sound = pygame.mixer.Sound(\"./Resources/coin.wav\")\n score_sound.set_volume(0.5)\n press_sound = pygame.mixer.Sound(\"./Resources/press.wav\")\n press_sound.set_volume(0.125)\n crack_sound = pygame.mixer.Sound(\"./Resources/crack.wav\")\n\n #creates the title screen objects and puts them in a group\n overlay = SEsprites.Title_overlay(screen.get_width(), screen.get_height())\n play_button = SEsprites.Button(screen.get_width(), screen.get_height(), -70, 100, \"Play\")\n quit_button = SEsprites.Button(screen.get_width(), screen.get_height(), 70, 100, \"Quit\")\n titleGroup = pygame.sprite.Group(overlay, play_button, quit_button)\n\n #creates the game over objects and puts them in a group\n game_over = SEsprites.Game_over_overlay(screen.get_width(), screen.get_height())\n score = SEsprites.Score_label(screen.get_width(), screen.get_height(), \"score\")\n highscore = SEsprites.Score_label(screen.get_width(), screen.get_height(), \"highscore\")\n retry_button = SEsprites.Button(screen.get_width(), screen.get_height(), -70, 100, \"Retry\")\n exit_button = SEsprites.Button(screen.get_width(), screen.get_height(), 70, 100, \"Exit\")\n gameOverGroup = pygame.sprite.Group(game_over, score, highscore, retry_button, exit_button)\n\n #creates the scrolling background and puts it into its own group\n background1 = SEsprites.Background(screen.get_width())\n background2 = SEsprites.Background(screen.get_width())\n background2.set_x_zero() #runs this method to position the second background\n backgroundGroup = pygame.sprite.Group(background1, background2)\n\n #Creates the score_label and egg and puts it into a group\n egg = SEsprites.Egg()\n score_keeper = SEsprites.Score_keeper(screen.get_width())\n allGroup = pygame.sprite.Group(egg, score_keeper)\n\n #Creates the instructions label and puts it into its own group\n instructionsGroup = pygame.sprite.Group(SEsprites.Instructions(150, 150))\n\n #Creates 2 pairs of pipes, and joins them in a nested group\n top_pipe1 = SEsprites.Top_pipe(screen.get_width(), screen.get_height())\n bot_pipe1 = SEsprites.Bottom_pipe(screen.get_width(), screen.get_height(),\\\n top_pipe1.get_bottom())\n pipeGroup1 = pygame.sprite.Group(top_pipe1, bot_pipe1)\n\n top_pipe2 = SEsprites.Top_pipe(screen.get_width(), screen.get_height())\n bot_pipe2 = SEsprites.Bottom_pipe(screen.get_width(), screen.get_height(),\\\n top_pipe2.get_bottom())\n pipeGroup2 = pygame.sprite.Group(top_pipe2, bot_pipe2)\n\n pipeGroup = pygame.sprite.Group(pipeGroup1, pipeGroup2)\n\n #Action\n #Assign\n clock = pygame.time.Clock()\n keepGoing = True #Main game loop variable\n game_start = False #Space bar triggered variable to activate pipes\n obtain_score1 = True #Pipe score limiter\n obtain_score2 = False #Pipe score limiter\n play = False #variable before game_start; must be triggered before it can be triggered\n end_screen = False #game over screen\n instructions_visible = False\n pressed = pygame.key.get_pressed()\n zero_pressed = pressed #stores the no key press data for use in reseting\n\n #Loop\n while keepGoing:\n #Time\n clock.tick(60)\n\n #Events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n keepGoing = False\n\n\n #Mouse handler\n if not play and not end_screen:\n #Title Screen\n if play_button.get_rect().collidepoint(pygame.mouse.get_pos()):\n play_button.hover(True)\n if pygame.mouse.get_pressed()[0]:\n press_sound.play()\n play = True\n else:\n play_button.hover(False)\n\n if quit_button.get_rect().collidepoint(pygame.mouse.get_pos()):\n quit_button.hover(True)\n if pygame.mouse.get_pressed()[0]:\n press_sound.play()\n keepGoing = False\n else:\n quit_button.hover(False)\n elif end_screen:\n #Game Over Screen\n if retry_button.get_rect().collidepoint(pygame.mouse.get_pos()):\n retry_button.hover(True)\n if pygame.mouse.get_pressed()[0]:\n #Starts game directly by changing play to true\n press_sound.play()\n end_screen = False\n play = True\n else:\n retry_button.hover(False)\n\n if exit_button.get_rect().collidepoint(pygame.mouse.get_pos()):\n exit_button.hover(True)\n if pygame.mouse.get_pressed()[0]:\n press_sound.play()\n end_screen = False\n #Delay slightly, as to not trigger the quit button when changing screens\n pygame.time.delay(200)\n else:\n exit_button.hover(False)\n else:\n #Only allows press updates when they click the play button\n pressed = pygame.key.get_pressed()\n\n #Key handler\n if not game_start:\n instructions_visible = True\n if pressed[pygame.K_SPACE]:\n #Starts moving pipes when space is pressed once\n instructions_visible = False\n game_start = True\n egg.start()\n top_pipe1.move_pipe()\n bot_pipe1.move_pipe()\n\n if game_start:\n if pressed[pygame.K_SPACE]:\n #moves egg if space is pressed\n egg.jump()\n\n #Start moving the other set of pipes when one pair reached the middle and adds score\n if top_pipe1.get_x() <= (screen.get_width() / 2):\n top_pipe2.move_pipe()\n bot_pipe2.move_pipe()\n if obtain_score1:\n score_keeper.add_score()\n score_sound.play()\n obtain_score1 = False\n obtain_score2 = True\n elif top_pipe2.get_x() <= (screen.get_width() / 2):\n top_pipe1.move_pipe()\n bot_pipe1.move_pipe()\n if obtain_score2:\n score_keeper.add_score()\n score_sound.play()\n obtain_score2 = False\n obtain_score1 = True\n\n #Resets and stops pipes when they reach the end\n if top_pipe1.check_boundaires():\n bot_pipe1.reset(top_pipe1.get_bottom())\n elif top_pipe2.check_boundaires():\n bot_pipe2.reset(top_pipe2.get_bottom())\n\n if pygame.sprite.spritecollide(egg, pipeGroup, False) or \\\n (egg.get_top() <= 0) or (egg.get_bot() >= screen.get_height()):\n #FULL GAME SPRITE ATTRIBUTE RESET\n if egg.get_pic_num() == 1:\n #Animate the egg after pausing the pipes from moving\n crack_sound.play()\n top_pipe1.pause()\n bot_pipe1.pause()\n top_pipe2.pause()\n bot_pipe2.pause()\n egg.set_animate()\n if egg.get_pic_num() > 9:\n #After egg animation is complete, reset all attributes\n pygame.time.delay(500)\n highscore.update_highscore(score_keeper.get_score())\n score.update_highscore(score_keeper.get_score())\n top_pipe1.reset()\n top_pipe2.reset()\n bot_pipe1.reset(top_pipe1.get_bottom())\n bot_pipe2.reset(top_pipe2.get_bottom())\n egg.reset()\n score_keeper.reset()\n pressed = zero_pressed #resets the press, to prevent \"ghost jumps\"\n obtain_score1 = True #Ensures first pipe is first\n obtain_score2 = False\n game_start = False\n play = False\n end_screen = True\n\n #Refresh\n screen.blit(background, (0, 0))\n\n #Clear, update and draw is conditional, depending on what part of the game it is\n backgroundGroup.clear(screen,background)\n if play:\n #Main game items (in-game)\n pipeGroup.clear(screen,background)\n allGroup.clear(screen,background)\n if instructions_visible:\n #If player hasnt pressed space yet, instructions are visible\n instructionsGroup.clear(screen, background)\n elif not play and end_screen:\n #Game over screen items\n gameOverGroup.clear(screen, background)\n else:\n #Title screen items\n titleGroup.clear(screen,background)\n\n backgroundGroup.update()\n if play:\n pipeGroup.update()\n allGroup.update()\n if instructions_visible:\n instructionsGroup.update()\n elif not play and end_screen:\n gameOverGroup.update()\n else:\n titleGroup.update()\n\n backgroundGroup.draw(screen)\n if play:\n pipeGroup.draw(screen)\n allGroup.draw(screen)\n if instructions_visible:\n instructionsGroup.draw(screen)\n elif not play and end_screen:\n gameOverGroup.draw(screen)\n else:\n titleGroup.draw(screen)\n\n pygame.display.flip()\n\n #Fade out music and delays the program quit\n pygame.mixer.music.fadeout(1750)\n pygame.time.delay(2000)\n pygame.quit()\n\nmain()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"448444376","text":"#!/usr/bin/env python\n#\n# Copyright (C) 2015 Joel W. Dafoe\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\n# FOR PYTHON 3.x\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom gui import FrontEnd, ButtonTip\nfrom threading import Thread\nimport os\nimport pickle\nfrom string import ascii_uppercase\nfrom time import sleep\nimport sys\n\nfrom diskinfo import DiskInfo\nimport createdisk as createdisk\n\n\nclass Interface(FrontEnd):\n def __init__(self, master, version):\n FrontEnd.__init__(self, master, version)\n # MAKE IT LOOK NICE\n # style = ttk.Style()\n\n # CREATE AND SET VARIABLES\n self.logo = PhotoImage(file=os.path.join(datadir, 'DT.gif'))\n self.description = \"A disk utility for viewing, cleaning, and creating various bootable and other drive types\" \\\n \" featuring advanced drive detection and customizable config options. This tool replaces \" \\\n \"the former USBtool.\"\n self.label = StringVar()\n self.src = StringVar()\n self.fs = StringVar()\n self.size = StringVar()\n # self.status = StringVar()\n self.progress = IntVar()\n self.prog_bar = None\n self.drv_checkboxes = {}\n self.workers = {}\n self.threads = {}\n self.drives = {}\n self.src_files = 1\n self.mnt = ''\n self.last_thread = None\n self.task = None\n self.disk_frame = None\n self.settings_nb = None\n\n # LOAD PROFILES FROM LOCAL DB\n self.prof_db = os.path.join(datadir, 'prof.db')\n if os.path.isfile(self.prof_db):\n with open(self.prof_db, 'rb') as prof:\n self.profiles = pickle.load(prof)\n if 'default' not in self.profiles:\n self.profiles['default'] = ['FAT32', '8192', '', '']\n # OR CREATE THE DEFAULT\n else:\n self.profiles = {'default': ['FAT32', '8192', '', '']}\n self.create_ui()\n\n def create_ui(self):\n # CONFIGURE ROOT MENUBAR\n self.menu_file.insert_command(0, label='Settings...', command=self.get_settings)\n\n # CONFIGURE BUTTON BAR\n refresh = PhotoImage(file=os.path.join(datadir, 'refresh.gif'))\n r_button = ttk.Button(self.button_bar, image=refresh, command=lambda: Thread(target=self.find_disks).start())\n r_button.image = refresh\n r_button.grid(row=0, column=0)\n ButtonTip(r_button, 'Refresh list')\n go = PhotoImage(file=os.path.join(datadir, 'go.gif'))\n g_button = ttk.Button(self.button_bar, image=go, command=self.run_task)\n g_button.image = go\n g_button.grid(row=0, column=1)\n ButtonTip(g_button, 'Start process')\n\n # SETUP PRIMARY WIDGETS\n ttk.Label(self.frame, text='Profile:').grid(row=1, column=0, sticky=E)\n self.task = ttk.Combobox(self.frame, values=sorted(self.profiles), state='readonly')\n self.task.bind('<>', self.load_profile)\n self.task.set('default')\n self.task.grid(row=1, column=1, padx=4, pady='16', sticky=W)\n\n # MAIN PANE\n self.disk_frame = ttk.Frame(self.frame, width=350, height=120, padding='21 15 8 15', relief='sunken')\n self.disk_frame.grid(columnspan=2, padx=4, sticky='nsew')\n\n # CONFIGURE STATUS BAR\n self.prog_bar = ttk.Progressbar(self.status_bar, variable=self.progress)\n self.prog_bar.grid(row=0, column=1, sticky=EW)\n\n # CONFIGURE WINDOW RESIZING\n self.disk_frame.columnconfigure(0, weight=1, minsize=30)\n self.disk_frame.columnconfigure(1, weight=1)\n\n # DETECT ALL DRIVES\n Thread(target=self.find_disks).start()\n self.load_profile()\n\n def find_disks(self):\n self.master.wm_attributes(\"-topmost\", 1)\n self.progress.set(0)\n self.status_lbl.set('searching...')\n for cb in self.disk_frame.winfo_children():\n cb.destroy()\n self.drv_checkboxes = {}\n disks = DiskInfo()\n self.drives = disks.get_info()\n for index, drive in enumerate(sorted(self.drives)):\n self.drv_checkboxes[drive] = IntVar()\n if index == 0:\n cb_state = 'disabled'\n else:\n cb_state = '!disabled'\n capacity, description = self.drives[drive]['info']\n ttk.Checkbutton(self.disk_frame, variable=self.drv_checkboxes[drive], text=drive,\n state=cb_state).grid(row=index, column=0, pady=1, sticky='nse')\n tree = ttk.Treeview(self.disk_frame, show='', height=len(self.drives[drive]['partitions'])+1,\n column=('size', 'description', 'fs', 'mount'), padding='0 4', selectmode='none')\n tree.column('size', width=60, anchor=E)\n tree.column('fs', width=150, anchor='center')\n tree.column('mount', width=140)\n tree.insert('', 'end', iid=drive, values=(capacity, description,), open=True)\n for partition in self.drives[drive]['partitions']:\n size, label, mount, fs, boot, _cylinders = partition\n tree.insert(drive, 'end', values=(size, label, fs, mount), tags='detail')\n tree.tag_configure('detail', foreground='grey', font='tkDefault 8')\n tree.grid(row=index, column=1, sticky=NW)\n self.status_lbl.set('Ready')\n self.master.wm_attributes(\"-topmost\", 0)\n\n def load_profile(self, _event=None):\n name = self.task.get()\n if _event:\n print(name)\n fs, size, label, src = self.profiles[name]\n self.fs.set(fs)\n self.size.set(size)\n self.label.set(label)\n self.src.set(src)\n\n def cluster_size(self):\n if self.fs.get() == \"NTFS\":\n self.size.set('4096')\n elif self.fs.get() == \"FAT32\":\n self.size.set('8192')\n elif self.fs.get() == \"exFAT\":\n self.size.set('32K')\n elif self.fs.get() == \"FAT16\":\n self.size.set('64K')\n # self.update_profile()\n\n def run_task(self):\n self.master.wm_attributes(\"-topmost\", 1)\n self.workers = {}\n self.threads = {}\n no_mount = self.next_mnt()\n # READ ALL CONFIG FIELDS\n label = self.label.get()\n src = self.src.get()\n fs = self.fs.get()\n size = self.size.get()\n for disk in self.drv_checkboxes:\n if self.drv_checkboxes[disk].get():\n try:\n mnt = self.drives[disk]['partitions'][0][2]\n if mnt == \"n/a\":\n mnt = no_mount.pop(0)\n elif mnt[1] == \":\":\n mnt = mnt[0]\n except IndexError:\n mnt = no_mount.pop(0)\n self.workers[disk] = createdisk.DrvWorker(disk, mnt, fs, size, label, src)\n self.threads[disk] = Thread(target=self.workers[disk].format_disk)\n if len(self.threads) != 0:\n ask = messagebox.askokcancel(self.version[:-5], 'Data on selected drives will be destroyed. OK to proceed?')\n if ask:\n self.status_lbl.set('formating...')\n self.prog_bar['mode'] = 'indeterminate'\n self.prog_bar.start()\n last_disk = list(self.workers.keys())[-1]\n self.workers[last_disk].clean_disk()\n # self.prog_bar['mode'] = 'determinate'\n for thread in self.threads:\n self.threads[thread].start()\n # self.last_thread = thread\n Thread(target=self.monitor_status).start()\n else:\n messagebox.showerror(self.version[:-5], 'You did not select any disks for this operation.')\n self.master.wm_attributes(\"-topmost\", 0)\n\n def monitor_status(self):\n if self.src.get() != \"\":\n self.src_files = createdisk.get_size(self.src.get())\n for thread in self.threads:\n while self.threads[thread].is_alive():\n sleep(2)\n if self.workers[thread].src != \"\" and self.__count_files(self.workers[thread].mount+':/'):\n self.prog_bar.stop()\n self.prog_bar['mode'] = 'determinate'\n self.progress.set((createdisk.get_size(self.workers[thread].mount+':/'))*100 / self.src_files)\n size, units = createdisk.nice_size(self.src_files)\n self.status_lbl.set('copying files...'.format(size, units))\n self.prog_bar.stop()\n self.prog_bar['mode'] = 'determinate'\n self.progress.set(100)\n self.status_lbl.set('Finished!')\n messagebox.showinfo(self.version[:-5], 'Disk process was successfully completed.')\n for cb in self.drv_checkboxes:\n if self.drv_checkboxes[cb].get():\n self.drv_checkboxes[cb] = None\n\n @staticmethod\n def next_mnt():\n mounts = []\n for mount in list(ascii_uppercase)[5:]:\n if not os.path.isdir('{}:/'.format(mount)):\n mounts.append(mount)\n return mounts\n \n @staticmethod\n def __count_files(path):\n count = 0\n for _base, _dir, files in os.walk(path):\n count += len(files)\n return count\n\n def get_settings(self):\n settings = Toplevel(root)\n settings.title('Profile Settings')\n settings.resizable(1, 0)\n btn_bar = ttk.Frame(settings, relief='groove', padding='0 1 0 2')\n btn_bar.grid(sticky=EW)\n add = PhotoImage(file=os.path.join(datadir, 'add.gif'))\n a_button = ttk.Button(btn_bar, image=add, command=self.name_profile)\n a_button.image = add\n a_button.grid()\n ButtonTip(a_button, 'New profile')\n self.settings_nb = notebook = ttk.Notebook(settings)\n notebook.grid(padx='11', pady='19 10', sticky='nsew')\n for profile in sorted(self.profiles):\n prof_frame = ProfFrame(notebook, profile)\n notebook.add(prof_frame, text=profile)\n # DEFAULT PROFILE SHOULD ALWAYS BE FIRST\n if profile == \"default\":\n notebook.insert(0, prof_frame)\n notebook.select(0) # SELECT THE FIRST (DEFAULT) PROFILE\n settings.columnconfigure(0, weight=1)\n\n def name_profile(self):\n prof = Toplevel(root)\n prof.title('New profile')\n # prof.wm_overrideredirect(1)\n prof.resizable(0, 0)\n new_frame = ttk.Frame(prof, relief='raised', padding='17 18 20 17')\n new_frame.grid(columnspan=2)\n ttk.Label(new_frame, text='Name:').grid(row=0, column=0, padx='0 3')\n name = ttk.Entry(new_frame)\n name.grid(row=0, column=1)\n name.focus_set()\n ttk.Button(prof, text='Create', command=lambda: self.add_profile(name.get(), prof)).grid(row=1, column=0,\n padx='9 0', pady='7')\n ttk.Button(prof, text='Cancel', command=prof.destroy).grid(row=1, column=1, padx='0 9', pady='7')\n\n def add_profile(self, name, modal):\n modal.destroy()\n self.profiles[name] = self.profiles['default']\n prof_frame = ProfFrame(self.settings_nb, name)\n self.settings_nb.insert(0, prof_frame, text=name)\n self.settings_nb.select(0)\n self.task['values'] = sorted(self.profiles)\n self.task.set(name)\n self.load_profile()\n self.save_config()\n\n def save_config(self):\n with open(self.prof_db, 'wb') as prof:\n pickle.dump(self.profiles, prof, -1)\n\n\nclass ProfFrame(ttk.Frame):\n def __init__(self, master, name):\n ttk.Frame.__init__(self, master, padding='52 15 16 15')\n self.master = master\n # BREAK-OUT THE PROFILE LIST INFORMATION\n self.name = name\n fs, size, label, source = app.profiles[self.name]\n self.label = StringVar()\n self.label.set(label)\n self.src = StringVar()\n self.src.set(source)\n self.fs = StringVar()\n self.fs.set(fs)\n self.size = StringVar()\n self.size.set(size)\n # SET LISTS TO BE USED FOR EACH PROFILE\n fs_list = ['FAT16', 'FAT32', 'NTFS', 'exFAT']\n unit_list = ['512', '1024', '2048', '4096', '8192', '16K', '32K', '64K']\n # ADD WIDGETS TO THE FRAME\n ttk.Label(self, text='Set label:')\n ttk.Entry(self, textvariable=self.label)\n # label_txt.insert(END, label)\n ttk.Label(self, text='Source:')\n ttk.Entry(self, textvariable=self.src)\n # source_txt.insert(END, source)\n ttk.Label(self, text='File system:')\n ttk.Combobox(self, textvariable=self.fs, values=fs_list, state='readonly')\n # fs_cb.bind('<>', self.cluster_size)\n # fs_cb.set(fs)\n ttk.Label(self, text='Cluster size:')\n ttk.Combobox(self, textvariable=self.size, values=unit_list, state='readonly')\n # size_cb.bind('<>', self.update_profile)\n # size_cb.set(size)\n r = 0\n c = 0\n for child in self.winfo_children():\n child.grid(row=r, column=c, padx=2, pady=5, sticky=EW)\n if c % 2:\n r += 1\n c = 0\n else:\n c += 1\n btn_frame = ttk.Frame(self, padding='0 13 0 0')\n btn_frame.grid(column=1, sticky=E)\n save = PhotoImage(file=os.path.join(datadir, 'save.gif'))\n s_button = ttk.Button(btn_frame, image=save, text='Save', compound=LEFT, command=self.update_profile)\n s_button.image = save\n s_button.grid(row=0, column=0, padx=2, sticky=E)\n delete = PhotoImage(file=os.path.join(datadir, 'delete.gif'))\n d_button = ttk.Button(btn_frame, image=delete, text='Delete', compound=LEFT, command=self.delete_profile)\n d_button.image = delete\n d_button.grid(row=0, column=1, padx=2, sticky=W)\n # ttk.Button(btn_frame2, text='new', command=self.name_profile).grid(pady=3)\n self.columnconfigure(1, weight=1)\n\n def delete_profile(self):\n del app.profiles[self.name]\n app.task['values'] = sorted(app.profiles)\n app.task.current(0)\n app.load_profile()\n app.save_config()\n self.master.forget(self)\n\n def update_profile(self):\n app.profiles[self.name] = [self.fs.get(), self.size.get(), self.label.get(), self.src.get()]\n app.save_config()\n\n\n# CHECK FOR FROZEN APPLICATION\nif getattr(sys, 'frozen', False):\n datadir = os.path.dirname(sys.executable)\nelse:\n datadir = os.path.dirname(__file__)\n# START THE GUI\nroot = Tk()\nif os.name == \"nt\":\n root.iconbitmap(os.path.join(datadir, 'DT.ico'))\napp = Interface(root, 'Drive Tool v5.2')\nroot.mainloop()\n","sub_path":"DrvTool.pyw","file_name":"DrvTool.pyw","file_ext":"pyw","file_size_in_byte":15365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"158739708","text":"import numpy as np\nimport scipy as sp\nfrom scipy import linalg\n\nfrom scipy.linalg.lapack import ztrsyl, dtrsyl\n\n\ndef sqrtm2(M):\n m, fb, fe, s = block_structure(M)\n n = M.shape[0]\n ris = sp.zeros((n,n))\n for i in range(0,m):\n ris[fb[i]:fe[i],fb[i]:fe[i]] = twobytworoot(M[fb[i]:fe[i],fb[i]:fe[i]])\n \n for j in range(1,m):\n for i in range(0,m-j):\n Tnoto = sp.copy(M[fb[i]:fe[i],fb[i+j]:fe[i+j]]) #dopo togliere il copy\n for k in range(i+1,i+j):\n Tnoto -= (ris[fb[i]:fe[i],fb[k]:fe[k]]).dot(ris[fb[k]:fe[k],fb[j+i]:fe[j+i]])\n if((M[fb[i]:fe[i],fb[i+j]:fe[i+j]]).shape==(1,1)):\n ris[fb[i]:fe[i],fb[i+j]:fe[i+j]] = Tnoto/(ris[fb[i]:fe[i],fb[i]:fe[i]] + ris[fb[i+j]:fe[i+j],fb[i+j]:fe[i+j]])\n else:\n Uii = ris[fb[i]:fe[i],fb[i]:fe[i]]\n Ujj = ris[fb[i+j]:fe[i+j],fb[i+j]:fe[i+j]]\n shapeUii = Uii.shape[0]\n shapeUjj = Ujj.shape[0]\n x, scale, info = dtrsyl(Uii, Ujj, Tnoto)\n ris[fb[i]:fe[i],fb[i+j]:fe[i+j]] = x * scale\n\n return ris\n\n\"\"\"\n else:\n print \"formaX\"\n print M[fb[i]:fe[i],fb[i+j]:fe[i+j]]\n print M[fb[i]:fe[i],fb[i]:fe[i]]\n print M[fb[i+j]:fe[i+j],fb[i+j]:fe[i+j]]\n return ris\n\"\"\"\n\ndef twobytworoot(matrix):\n if (matrix.shape[0]==2):\n a = sp.sqrt(sp.linalg.eigvals(matrix)[0]).real\n ris=sp.ndarray(shape=(2,2))\n ris[0,0] = a + (1/(4*a))*(matrix[0,0] - matrix[1,1])\n ris[1,1] = a - (1/(4*a))*(matrix[0,0] - matrix[1,1])\n ris[0,1] = (1/(2*a))*matrix[0,1]\n ris[1,0] = (1/(2*a))*matrix[1,0]\n else:\n return sp.sqrt(matrix)\n return ris\n\ndef block_structure(T):\n \"\"\"\n computes the block structure of the upper quasi-triangular matrix T\n m is the number of diagonal blocks\n fb is the array containing the begin of each block\n fe is the array containing the end of each block + 1\n s is an array containing the sizes of the diagonal blocks\n \"\"\"\n import scipy as sp\n\n n = len(T)\n tol = 1e-15\n\n i = 0\n v = []\n s = []\n\n while i < n-1:\n v.append(i)\n if abs(T[i+1,i]) int:\n if not nums: return -1\n lo, hi = 0, len(nums)-1\n L = len(nums)\n while lo != hi:\n mid = (lo+hi)//2\n if nums[mid] < nums[hi]:\n hi = mid\n else:\n lo = mid+1\n zero = lo\n lo, hi = 0, len(nums)-1\n\n while lo != hi:\n mid = (lo+hi)//2\n if target > nums[(mid+zero)%L]:\n lo = mid+1\n else:\n hi = mid\n # print(zero, lo)\n\n if nums[(lo+zero)%L] == target:\n return (lo+zero)%L\n else:\n return -1\n","sub_path":"0033.py","file_name":"0033.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"451342794","text":"#Nicholas Novak\n#Text-based Wheel of Fortune Game\n\n\nfrom IPython.display import clear_output\nimport random\nletters = \"qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM\"\n\n\n# # Python Code Sample: \"Wheel of Fortune\" Game\n# \n# ### This assignment is from my Python Coursework and is an implementation of a popular TV show game. Our game was called Circle of Value.\n# \n# ### The phrases are generted from a co-located text file called \"wheeloffortune.txt\"\n# \n# ### The game will perform like this:\n# \n# ### Prompt the user for their name, save it as a string\n# \n# ### The user starts off with 0 dollars at the beginning of the game, and their bankroll is updated as they either win money by correctly guessing, or pay 50 dollars to guess a vowel. The phrases are chosen at random.\n\n# ### I used three spaces between words in order to make the blank phrases more readable. Furthermore, I am using spaces between the letters to better denote the number of letters in a word, and tabs to space out the words during the puzzle display.\n# \n# ### Also, a string of the letters that have been guessed was created so the user will not guess the same letters.\n# \n# ### The game is played until either the user guesses the phrase (by solving the puzzle or having guessed all the letters) or they decide to quit or fail at solving the puzzle (they only get one shot). All user guesses are treated as uppercase regardless of what letter is entered in. Vowels include AEIOU; Y is considered not a vowel for this program. If the user guesses a letter in the word, they win the amount the wheel lands on times the number of letters that were in the phrase. Vowels merely cost 100 dollars regardless of how many are in the word. \n# \n# \n\n\n\n\n## First, this opens the file and randomly select a line as our phrase for the game\n## Than, the file is closed and the randomly selected line is returned\ndef get_phrase():\n wheel_file = open(\"wheeloffortune.txt\",\"r\") #Find and open list of phrases\n \n wheelall = wheel_file.readlines()\n random.shuffle(wheelall) #Assign random line numbers\n \n wheelline = wheelall[0].rstrip() #Find a random line\n \n count = wheelline.index(\":\")\n \n global phrase #Set phrase as a global variable\n phrase = \"\"\n for p in wheelline[count+1::]: #Separate phrase from clue\n \n # Add spaces to phrase:\n phrase += \" \"\n phrase += p\n phrase += \" \"\n \n \n global clue #Set clue as a gloabl variable\n \n clue = wheelline[:count:]\n #Grab clue text\n \n wheel_file.close() #Close the file\n return #End the function\n\n## Steps to take:\n## Get the randomly selected phrase\n## Ask user for their name\n## Loop until a lose condition occurs or until the puzzle is solved\n## Spin the wheel and get the result\n## Update the bankroll when either a letter has been guessed and is present in puzzle or vowel purchased\n## If you go bankrupt, bankroll should go to zero and turn ends, begin next turn\n## If logging is True, do not clear the screen otherwise by default it will clear the screen between turns\n\n\n\n\n\ndef play_game(logging=False):\n get_phrase() #Run the get phrase function\n \n user_name = input(\"What is your name?\") #Determine name\n \n game = True #Break variable for while loop later used in game\n \n puzzle = \"\" #Assign variable for puzzle\n \n \n for i in phrase:\n if i in letters: #Create blank puzzle out of underscore characters\n puzzle += \"_\"\n \n \n else: #Insert spaces around any character not a letter.\n puzzle += \"\"\n puzzle += i\n puzzle += \"\"\n \n bankroll = 0 #Starting money\n \n letters_guessed = \"\" #Assign variable for guessed letters\n \n while game == True: #Dangerous loop! (Hence break variable built into all outcomes)\n \n print(user_name,\"your current puzzle progress is\",puzzle,\"and you have guessed\",letters_guessed,\".\")\n quit = input(\"Would you like to quit? Enter Y or N.\\n\")\n if quit in \"Yy\": #Check for quit\n print(\"Thanks for playing,\",user_name,\"! The phrase was: \",phrase)\n game == False\n return\n \n \n else:\n \n print(\"Here are the letters you have guessed so far,\",user_name,\":\",letters_guessed,\"\\n\") #Show guessed letters\n \n print(\"You have\",bankroll,\"dollars in your bankroll\",\"\\n\") #Show money accumulated\n \n print(\"Your clue is:\",clue,\"\\n\") #Show clue\n \n print(\"Current Progress:\",puzzle,\"\\n\") #Show puzzle progress\n \n print(\"Spinnging wheel.........now.\\n\") #Fun addition (serves no function except for the illusion of the program chugging/the wheel spinning in the background)\n \n spin_wheel() #Call spin wheel function\n \n print(\"Landed on $\",lander) #Give landed value\n \n if lander == \"BANKRUPT\":\n\n print(\"Uh-oh! (Insert slide whistle) You got \",lander,\"! Spin again and see if you can earn all that \",bankroll,\" dollars back!\\n\")\n bankroll == 0 #Exception for bankrupcy result (log not cleared for this)\n\n \n else:\n\n solver = input(\"Would you like to solve the puzzle? If not hit enter, otherwise type in your guess. WARNING: IF YOU SOLVE INCORRECTLY, THE GAME WILL BE OVER\\n\")\n solver = solver.upper() #Check if user wants to solve\n \n\n\n if solver == \"\": #If the user does not want to solve, continue\n \n \n print(\"What letter would you like to guess? Keep in mind you have to buy vowels (y not included).\\n\")\n guesser = input(\"Please type your guess here\\n\")\n guesser = guesser.upper() #Interpret input guess into uppercase\n \n \n if guesser in \"aeiouAEIOU\": #Vowel result\n bankroll += 0\n print(\"This roll of\",bankroll,\"is forfeited to buy the vowel\")\n \n\n if guesser in phrase: \n puzzle2 = \"\"\n count1 = -1 #Assign temp variables for this loop\n \n \n for i in phrase:\n count1 += 1\n if i == guesser: # If the letter matches the guessed letter\n puzzle2 += i\n else:\n puzzle2 += puzzle[count1] #Add guess to puzzle\n \n letters_guessed += guesser\n puzzle = puzzle2\n\n elif guesser not in phrase: #Result if incorrect guess\n print(\"Nooooo! That letter was not in the phrase!\")\n letters_guessed += guesser\n \n \n \n \n elif guesser in \"qwrtypsdfghjklzxcvbnmQWRTYPSDFGHJKLZXCVBNM\" and guesser not in letters_guessed: #Consonant results tree\n \n if guesser in phrase:\n puzzle2 = \"\"\n count2 = -1\n bankroll += lander \n for i in phrase: #Result if guess is in the phrase\n count2 += 1\n \n if i == guesser:\n puzzle2 += i\n \n else: #Add letter to puzzle\n puzzle2 += puzzle[count2]\n letters_guessed += guesser\n puzzle = puzzle2\n \n\n\n elif guesser not in phrase: #Result if incorrect guess\n print(\"Nooooo! That letter was not in the phrase!\")\n letters_guessed += guesser\n \n\n\n elif guesser in \"qwrtypsdfghjklzxcvbnmQWRTYPSDFGHJKLZXCVBNM\" and guesser in letters_guessed: #Check to see if letter has been guessed already\n print(\"You already guessed that letter! Spin again!\")\n \n \n else: #Check if guess is not in alphabet\n print(\"Please guess a letter!\")\n \n\n elif solver == phrase: #Check if puzzle solved\n \n if lander != \"BANKRUPT\": #Add final value\n bankroll += int(lander)\n \n \n print(\"Congratulations \",user_name,\" you won\",bankroll,\"dollars! Go ahead and spend them all in one place.\") \n game == False #End game, game is won\n \n \n print(\"The phrase was\",phrase) #Show the phrase\n \n return\n\n else: #Incorrect puzzle solution guess\n \n print(\"The phrase was,\",phrase,\"and you guessed\",solver,\":(. Better luck next time!\")\n game == False #End game, game is lost\n \n \n return\n if puzzle == phrase: #Result if user wins through raw smarts and guessing power\n\n game == False\n\n print(\"Congratulations\",user_name,\"you won\",bankroll,\"dollars! Go ahead and spend them all in one place.\")\n print(\"The phrase was\",phrase) #Show the phrase\n \n return\n if logging == False:\n clear_output() #Clear output between runs if logging is set to \"True\"\n \n pass\n\n\n\n\n##Return a random element of this list whenever spin_wheel() is called\ndef spin_wheel(): #Randomized called value function\n wheel = [300,500,1000,1500,400,\"BANKRUPT\",2250,2500,200,100] #Wheel values\n \n wheel_val = random.randint(0,9) #Random value generated\n \n global lander #Create lander as a global variable for use in the game function\n lander = wheel[wheel_val] #Grab spin result\n \n return #End function without an output, since lander can be called at anytime in the game loop\n\n\nplay_game(logging = False) #Run the game without logging previous turns\n","sub_path":"Circle_of_value_game.py","file_name":"Circle_of_value_game.py","file_ext":"py","file_size_in_byte":10795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"339786857","text":"'''\nWrite a function count_digits that takes one string as input and returns the \nnumber of digits 0123456789 in the string. You can use len() and the membership \noperator in but no other string functions. You should have loops!\n\nFor example:\nTest\tResult\nprint(count_digits(\"2527 MainSt\"))\n4\nprint(count_digits(\"Call me in 2 hours and 30 minutes\"))\n3\n'''\nimport string\n\ndef count_digits(string_input):\n string_counter = 0\n for i in range(len(string_input)):\n if string_input[i] in string.digits:\n string_counter = string_counter + 1\n return string_counter","sub_path":"Wk4_June_11-17/LAB_7/LAB7_Q1.py","file_name":"LAB7_Q1.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"263378467","text":"\"\"\"\nMaximum Units on a Truck\n\nYou are assigned to put some amount of boxes onto one truck. You are given a 2D array boxTypes, where boxTypes[i] = [numberOfBoxesi, numberOfUnitsPerBoxi]:\n\nnumberOfBoxesi is the number of boxes of type i.\nnumberOfUnitsPerBoxi is the number of units in each box of the type i.\nYou are also given an integer truckSize, which is the maximum number of boxes that can be put on the truck. You can choose any boxes to put on the truck as long as the number of boxes does not exceed truckSize.\n\nReturn the maximum total number of units that can be put on the truck.\n\nInput: boxTypes = [[1,3],[2,2],[3,1]], truckSize = 4\nOutput: 8\nExplanation: There are:\n- 1 box of the first type that contains 3 units.\n- 2 boxes of the second type that contain 2 units each.\n- 3 boxes of the third type that contain 1 unit each.\nYou can take all the boxes of the first and second types, and one box of the third type.\nThe total number of units will be = (1 * 3) + (2 * 2) + (1 * 1) = 8.\n\nInput: boxTypes = [[5,10],[2,5],[4,7],[3,9]], truckSize = 10\nOutput: 91\n\nConstraints:\n\n1 <= boxTypes.length <= 1000\n1 <= numberOfBoxesi, numberOfUnitsPerBoxi <= 1000\n1 <= truckSize <= 106\n\"\"\"\n\nimport heapq\n\n\nclass Solution:\n def maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:\n unit_count = 0\n units = []\n for box, unit in boxTypes:\n heapq.heappush(units, (-unit, box))\n while units:\n unit, box = heapq.heappop(units)\n box_unit = min(truckSize, box)\n unit_count += -unit * box_unit\n truckSize -= box_unit\n if truckSize == 0:\n break\n return unit_count\n","sub_path":"LeetCode/mock_interviews/amazon/maximum_units_on_a_truck.py","file_name":"maximum_units_on_a_truck.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"290362769","text":"from itertools import chain\nfrom . import BaseULObject\nfrom .component import resolve_reference, write_reference, Component\nfrom nineml import TopLevelObject\nfrom nineml.xmlns import NINEML, E\nfrom nineml.utils import expect_single, check_tag\nfrom nineml.annotations import annotate_xml, read_annotations\n\n\nclass Population(BaseULObject, TopLevelObject):\n \"\"\"\n A collection of spiking neurons all of the same type.\n\n **Arguments**:\n *name*\n a name for the population.\n *number*\n an integer, the number of neurons in the population\n *cell*\n a :class:`Component`, or :class:`Reference` to a component defining\n the cell type (i.e. the mathematical model and its\n parameterisation).\n *positions*\n TODO: need to check if positions/structure are in the v1 spec\n \"\"\"\n element_name = \"Population\"\n defining_attributes = (\"name\", \"number\", \"cell\", \"positions\")\n\n def __init__(self, name, number, cell, positions=None):\n super(Population, self).__init__()\n self.name = name\n self.number = number\n self.cell = cell\n if positions is not None:\n assert isinstance(positions, PositionList)\n self.positions = positions\n\n def __str__(self):\n return ('Population \"%s\": %dx\"%s\" %s' %\n (self.name, self.number, self.cell.name, self.positions))\n\n def __repr__(self):\n return (\"Population(name='{}', number={}, cell={}{})\"\n .format(self.name, self.number, self.cell.name,\n 'positions={}'.format(self.positions)\n if self.positions else ''))\n\n def get_components(self):\n \"\"\"\n Return a list of all components used by the population.\n \"\"\"\n components = []\n if self.cell:\n components.append(self.cell)\n components.extend(self.cell.properties.get_random_distributions())\n components.extend(\n self.cell.initial_values.get_random_distributions())\n if self.positions is not None:\n components.extend(self.positions.get_components())\n return components\n\n @property\n def attributes_with_units(self):\n return chain(*[c.attributes_with_units for c in self.get_components()])\n\n @write_reference\n @annotate_xml\n def to_xml(self):\n positions = [self.positions.to_xml()] if self.positions else []\n return E(self.element_name,\n E.Number(str(self.number)),\n E.Cell(self.cell.to_xml()),\n *positions,\n name=self.name)\n\n @classmethod\n @resolve_reference\n @read_annotations\n def from_xml(cls, element, document):\n check_tag(element, cls)\n layout_elem = element.find(NINEML + 'Layout')\n kwargs = {}\n if layout_elem:\n kwargs['positions'] = Component.from_xml(layout_elem, document)\n cell = expect_single(element.findall(NINEML + 'Cell'))\n cell_component = cell.find(NINEML + 'Component')\n if cell_component is None:\n cell_component = cell.find(NINEML + 'Reference')\n return cls(name=element.attrib['name'],\n number=int(element.find(NINEML + 'Number').text),\n cell=Component.from_xml(cell_component, document), **kwargs)\n\n\nclass PositionList(BaseULObject, TopLevelObject):\n \"\"\"\n Represents a list of network node positions. May contain either an explicit\n list of positions or a :class:`Structure` instance that can be used to\n generate positions.\n\n Either `positions` or `structure` should be provided. Providing both\n will raise an Exception.\n\n **Arguments**:\n *positions*\n a list of (x,y,z) tuples or a 3xN (Nx3?) numpy array.\n *structure*\n a :class:`Structure` component.\n \"\"\"\n element_name = \"Layout\"\n defining_attributes = []\n\n def __init__(self, positions=[], structure=None):\n \"\"\"\n Create a new PositionList.\n\n Either `positions` or `structure` should be provided. Providing both\n will raise an Exception.\n\n `positions` should be a list of (x,y,z) tuples or a 3xN (Nx3?) numpy\n array.\n `structure` should be a Structure componentclass.\n \"\"\"\n super(PositionList, self).__init__()\n if positions and structure:\n raise Exception(\"Please provide either positions or structure, \"\n \"not both.\")\n assert not isinstance(positions, Structure)\n self._positions = positions\n if isinstance(structure, Structure):\n self.structure = structure\n elif structure is None:\n self.structure = None\n else:\n raise Exception(\"structure is\", structure)\n\n def __eq__(self, other):\n if self._positions:\n return self._positions == other._positions\n else:\n return self.structure == other.structure\n\n def __str__(self):\n if self.structure:\n return \"positioned according to '%s'\" % self.structure.name\n else:\n return \"with explicit position list\"\n\n def get_positions(self, population):\n \"\"\"\n Return a list or 1D numpy array of (x,y,z) positions.\n \"\"\"\n if self._positions:\n assert len(self._positions) == population.number\n return self._positions\n elif self.structure:\n return self.structure.generate_positions(population.number)\n else:\n raise Exception(\"Neither positions nor structure is set.\")\n\n def get_components(self):\n if self.structure:\n return [self.structure]\n else:\n return []\n\n @write_reference\n @annotate_xml\n def to_xml(self):\n element = E(self.element_name)\n if self._positions:\n for pos in self._positions:\n x, y, z = pos\n element.append(E.position(x=str(x), y=str(y), z=str(z),\n units=\"um\"))\n elif self.structure:\n element.append(E.structure(self.structure.name))\n else:\n raise Exception(\"Neither positions nor structure is set.\")\n return element\n\n @classmethod\n @resolve_reference\n @read_annotations\n def from_xml(cls, element, document):\n if element is None:\n return None\n else:\n check_tag(element, cls)\n structure_element = element.find(NINEML + 'structure')\n if structure_element is not None:\n return cls(structure=document.resolve_ref(\n structure_element, Structure))\n else:\n positions = [(float(p.attrib['x']), float(p.attrib['y']),\n float(p.attrib['z']))\n for p in element.findall(NINEML + 'position')]\n return cls(positions=positions)\n\n\ndef qstr(obj):\n if isinstance(obj, basestring):\n return '\"%s\"' % obj\n else:\n return obj.__str__()\n\n\nclass Structure(Component):\n\n \"\"\"\n Component representing the structure of a network, e.g. 2D grid, random\n distribution within a sphere, etc.\n \"\"\"\n abstraction_layer_module = 'Structure'\n\n def generate_positions(self, number):\n \"\"\"\n Generate a number of node positions according to the network structure.\n \"\"\"\n raise NotImplementedError\n\n @property\n def is_csa(self):\n return self.get_definition().__module__ == 'csa.geometry' # probably need a better test @IgnorePep8\n\n def to_csa(self):\n if self.is_csa:\n return self.get_definition() # e.g. lambda size: csa.random2d(size, *self.properties) @IgnorePep8\n else:\n raise Exception(\"Structure cannot be transformed to CSA geometry \"\n \"function\")\n","sub_path":"lib9ml/python/nineml/user_layer/population.py","file_name":"population.py","file_ext":"py","file_size_in_byte":7944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"543115090","text":"import sys\nimport numpy as np\nfrom collections import OrderedDict\nimport torch\nimport pyqtgraph as pg\nfrom PyQt5 import QtGui, QtCore\nfrom PyQt5.QtGui import QSlider,QVBoxLayout, QLabel, QDial, QWidget, QHBoxLayout, QComboBox, QGridLayout, QPushButton, QFileDialog\nimport net\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\n\ndecoder_path = \"models/decoder.pth\"\nvgg_path = \"models/vgg_normalised.pth\"\n\n\ndef init_torch():\n torch.cuda.set_device(0)\n\n\nclass NeuralNetworkManager():\n def __init__(self, input_dim):\n self.input_dimension = input_dim\n decoder = net.decoder\n vgg = net.vgg\n\n self.generate_model_dict(vgg, self.input_dimension)\n\n vgg.load_state_dict(torch.load(vgg_path))\n vgg = nn.Sequential(*list(vgg.children())[:31])\n decoder.load_state_dict(torch.load(decoder_path))\n self.net = net.Net(vgg, decoder)\n self.net.cuda()\n\n\n def set_model_dict(self, model_dict):\n self.model_dict = model_dict\n\n def set_mask(self, mask_list):\n self.masks = []\n for layer_index, conv_layer in enumerate(self.conv_dict.values()):\n _, out_channels, _, _, resolution = conv_layer\n mask_array = np.zeros( (out_channels, resolution, resolution) ).astype(np.float32)\n select = np.where(mask_list[layer_index]==1.)\n mask_array[ select, :, :] = 1.\n mask_tensor = torch.from_numpy(mask_array).cuda()\n mask_variable = Variable(mask_tensor.unsqueeze(0), volatile=True)\n\n self.masks.append(mask_variable)\n\n def set_input(self, input):\n input = np.transpose(input, (2, 1, 0))\n input_tensor = torch.from_numpy(input)\n input_tensor = input_tensor.cuda()\n self.input_variable = Variable(input_tensor.unsqueeze(0), volatile=True)\n\n\n def encode_decode(self):\n image = self.net.decode(self.net.encode(self.input_variable))\n frame = image.cpu().data.numpy()[0]\n frame = np.transpose(frame, (1, 2, 0))\n return frame\n\n def encode_decode_masked(self):\n\n encoding = self.net.encode_with_masking(self.input_variable, self.masks)\n image = self.net.decode(encoding)\n frame = image.cpu().data.numpy()[0]\n frame = np.transpose(frame, (1, 2, 0))\n return frame\n\n def generate_model_dict(self, network, input_resolution):\n\n layer_dict = OrderedDict()\n conv_dict = OrderedDict()\n res = input_resolution\n for i, layer in enumerate(network.children()):\n layer_name = \"{}-{}\".format(i, layer.__repr__())\n if \"Conv\" in layer_name:\n layer_dict[layer_name] = [layer.in_channels, layer.out_channels, layer.kernel_size, layer.stride]\n conv_dict[layer_name] = [layer.in_channels, layer.out_channels, layer.kernel_size, layer.stride, res]\n elif \"ReLU\" in layer_name:\n layer_dict[layer_name] = []\n elif \"MaxPool\" in layer_name:\n layer_dict[layer_name] = [layer.kernel_size, layer.stride]\n res = int(res/2)\n else:\n layer_dict[layer_name] = []\n self.layer_dict = layer_dict\n self.conv_dict =conv_dict\n\n\n\n\nclass NeuronGroup(pg.GraphicsObject):\n def __init__(self, layer, x, y):\n\n pg.GraphicsObject.__init__(self)\n self.x = x\n self.y =y\n self.layer = layer\n self.selected = True\n self.generatePicture()\n self.neurons = []\n\n def set_neurons(self, neuron_list):\n self.neurons= neuron_list\n\n def click(self):\n self.selected = not self.selected\n for neuron in self.neurons:\n neuron.set_active(self.selected)\n\n self.generatePicture()\n self.update()\n\n def generatePicture(self):\n ## pre-computing a QPicture object allows paint() to run much more quickly,\n ## rather than re-drawing the shapes every time.\n self.picture = QtGui.QPicture()\n p = QtGui.QPainter(self.picture)\n p.setPen(pg.mkPen('w'))\n\n if self.selected:\n p.setBrush(pg.mkBrush('g'))\n else:\n p.setBrush(pg.mkBrush('r'))\n p.drawEllipse(self.x, self.y, 25, 25)\n p.end()\n\n def paint(self, p, *args):\n p.drawPicture(0, 0, self.picture)\n\n def boundingRect(self):\n ## boundingRect _must_ indicate the entire area that will be drawn on\n ## or else we will get artifacts and possibly crashing.\n ## (in this case, QPicture does all the work of computing the bouning rect for us)\n\n return QtCore.QRectF(self.picture.boundingRect())\n\n def getLayer(self):\n return self.layer\n\nclass NeuronRect(pg.GraphicsObject):\n def __init__(self, layer, index, x, y):\n pg.GraphicsObject.__init__(self)\n self.x =x\n self.y =y\n self.layer = layer\n self.index= index\n self.selected = True\n self.generatePicture()\n\n def generatePicture(self):\n ## pre-computing a QPicture object allows paint() to run much more quickly,\n ## rather than re-drawing the shapes every time.\n self.picture = QtGui.QPicture()\n p = QtGui.QPainter(self.picture)\n p.setPen(pg.mkPen('w'))\n\n if self.selected:\n p.setBrush(pg.mkBrush('g'))\n else:\n p.setBrush(pg.mkBrush('r'))\n p.drawRect(self.x,self.y,10,10)\n p.end()\n\n def set_active(self, active):\n self.selected = active\n self.generatePicture()\n self.update()\n\n def getLayerIndex(self):\n return self.layer, self.index\n\n def click(self):\n self.selected = not self.selected\n self.generatePicture()\n self.update()\n\n def paint(self, p, *args):\n p.drawPicture(0, 0, self.picture)\n\n def boundingRect(self):\n ## boundingRect _must_ indicate the entire area that will be drawn on\n ## or else we will get artifacts and possibly crashing.\n ## (in this case, QPicture does all the work of computing the bouning rect for us)\n\n return QtCore.QRectF(self.picture.boundingRect())\n\n\nclass MainWindow(QWidget):\n def __init__(self, input_dim):\n\n self.resolution =input_dim\n super(MainWindow, self).__init__()\n self.glw = pg.GraphicsLayoutWidget()\n\n self.noise_view = self.glw.addViewBox()\n self.noise_view.setAspectLocked()\n self.noise_img = pg.ImageItem()\n self.noise_view.addItem(self.noise_img)\n\n self.model_view = self.glw.addViewBox()\n self.model_view.setAspectLocked()\n self.output_view = self.glw.addViewBox()\n self.output_view.setAspectLocked()\n self.output_img = pg.ImageItem()\n self.output_view.addItem(self.output_img)\n\n\n self.glw.scene().sigMouseClicked.connect(self.onClick)\n #self.glw.sigSceneMouseMoved.connect(self.onClick)\n\n\n self.layout = QtGui.QGridLayout()\n self.setLayout(self.layout)\n self.layout.addWidget(self.glw)\n\n self.nnm = NeuralNetworkManager(input_dim)\n\n self.set_model(self.nnm.conv_dict)\n\n self.create_noise()\n # self.graphicsLayout = pg.GraphicsLayoutWidget()\n # self.setLayout(self.graphicsLayout.ci)\n\n def set_model(self, modelDict):\n self.model = modelDict\n self.layers = []\n for _, out_channels, _, _, _ in self.model.values():\n self.layers.append(np.ones((out_channels)))\n\n self.nnm.set_mask(self.layers)\n\n\n def create_noise(self):\n input = np.random.rand( self.resolution, self.resolution,3).astype(np.float32)\n self.noise_img.setImage(input)\n self.nnm.set_input(input)\n\n def visualize_model(self):\n column_step = 0\n numLayers = len(self.model.items())\n self.rects = []\n\n for layerIndex, (key,layer ) in enumerate(self.model.items()):\n rects =[]\n print(\"Layer {}/{}\".format(layerIndex,numLayers))\n if layerIndex >4 :continue\n in_channels, out_channels, kernel_size, stride, _ = layer\n neurons = []\n\n col_start = column_step\n for i in range(out_channels):\n height= i%32\n if (i%32) ==0:\n column_step+=15\n\n neuron= NeuronRect(layerIndex, i, column_step, height *15)\n neurons.append(neuron)\n self.model_view.addItem(neuron)\n\n neuron_group = NeuronGroup(layerIndex,col_start + (column_step-col_start)/2 , 32*15+10)\n neuron_group.set_neurons(neurons)\n self.model_view.addItem(neuron_group)\n column_step +=20\n\n def onClick(self, event):\n items = self.glw.scene().items(event.scenePos())\n for x in items:\n if isinstance(x, NeuronRect):\n x.click()\n layer, index = x.getLayerIndex()\n self.layers[layer][index]= 1. if x.selected else 0.\n self.nnm.set_mask(self.layers)\n self.update()\n\n if isinstance(x, NeuronGroup):\n x.click()\n layer = x.getLayer()\n self.layers[layer] = np.ones_like(self.layers[layer]) if x.selected else np.zeros_like(self.layers[layer])\n\n self.nnm.set_mask(self.layers)\n self.update()\n\n\n if x is self.noise_view:\n self.create_noise()\n self.update()\n\n def update(self):\n img = self.nnm.encode_decode_masked()\n self.output_img.setImage(img)\n\n\n\n\nif __name__ == \"__main__\":\n\n application = QtGui.QApplication([])\n\n\n mw = MainWindow(256)\n mw.visualize_model()\n mw.show()\n\n\n\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()","sub_path":"visualize_network.py","file_name":"visualize_network.py","file_ext":"py","file_size_in_byte":9808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"452068216","text":"#!/usr/bin/env python3\nimport sys\n\nif len(sys.argv) != 3:\n print(\"\\n\" + sys.argv[0] + \" inpute_file output_file\\n\")\n exit()\n\ninput_file = sys.argv[1]\noutput_file = sys.argv[2]\n\nwith open(output_file, \"w\") as out:\n\twith open(input_file, \"r\") as f:\n\t\tnext(f)\n\t\tfor line in f:\n\t\t\tname, data = line.rstrip().split(\":\")\n\t\t\tif name == \"jnetpred\":\n\t\t\t\tdata = data.split(\",\")\n\t\t\t\tprint(name, data)\n\t\t\t\tfor s in data[:-1]:\n\t\t\t\t\ta = 0.0\n\t\t\t\t\tb = 0.0\n\t\t\t\t\tif s == 'H':\n\t\t\t\t\t\ta = 1.0\n\t\t\t\t\telif s == 'E':\n\t\t\t\t\t\tb = 1.0\n\t\t\t\t\tout.write(str(round(a, 1)))\n\t\t\t\t\tout.write(' ')\n\t\t\t\t\tout.write(str(round(b, 1)))\n\t\t\t\t\tout.write('\\n')\n","sub_path":"GenSswight_from_mass_jpred.py","file_name":"GenSswight_from_mass_jpred.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"128382536","text":"from __future__ import print_function\nimport instagram_urls \nimport flickr_urls\nimport phhhoto_urls\nimport urllib\nimport random\nimport os\nimport MySQLdb\nimport urlparse\n\nMAX_DIRECTORY_SIZE = 300*1024*1024\n\n## this is a super simple implementation to get the URLs for all the pictures \n## in a public instagram or flickr account, then download the a random 200 images to a folder.\n\ndef connect_to_db():\n db = MySQLdb.connect(host='localhost',\n user='pictureplayer',\n passwd='pictureplayer',\n db='pictureplayer',\n charset='utf8', \n use_unicode='true') \n return db\n\ndef get_usernames_for_service(service):\n \n db = connect_to_db()\n c = db.cursor()\n sqltext = \"select user_name from user_names where service = '%s' group by 1;\" %(service)\n c.execute(sqltext)\n result = c.fetchall()\n\n user_names_with_service = []\n for row in result:\n to_append = {}\n to_append['service'] = service\n to_append['username'] = row[0] \n user_names_with_service.append(to_append)\n \n return user_names_with_service\n\n\ndef get_services_needed():\n db = connect_to_db()\n c = db.cursor()\n sqltext = \"select service from user_names group by 1;\"\n c.execute(sqltext)\n result = c.fetchall()\n \n services = []\n for row in result:\n to_append = row[0]\n services.append(to_append)\n\n return services\n \n\ndef set_refresh_flag():\n db = connect_to_db()\n c = db.cursor()\n sqltext = \"update refresh_flag set refresh_flag = 'Y';\"\n c.execute(sqltext)\n\ndef delete_photos_if_too_big():\n \n # get the size of the images directory \n directory_size = sum(os.path.getsize(f) for f in os.listdir('./images/') if os.path.isfile(f))\n # if it's bigger than our maximum image size, \n if directory_size > MAX_DIRECTORY_SIZE:\n #delete half of the images stored\n list_of_downloaded_photos = os.listdir('./images')\n random.shuffle(list_of_downloaded_photos)\n middlePhoto = len(list_of_downloaded_photos)/2\n half_list = list_of_downloaded_photos[:middlePhoto]\n for item in half_list:\n os.remove(item)\n\ndef insert_photos_into_database(photo_urls):\n \n for photo in photo_urls:\n # don't want to insert anything that's already in there\n db = connect_to_db()\n c = db.cursor()\n print(photo+' '+hashtag)\n sqltext = '''insert into photos (photo_url, user_name, service)\n select * from (select '%s', '%s', 'instagram-hashtag') a\n where not exists (select photo_url from photos where photo_url = '%s')\n ;\n ''' %(photo, hashtag, photo)\n c.execute(sqltext)\n db.commit()\n \n\n\n\nif __name__ == '__main__':\n\n # get the services we care about and the usernames on those services\n services = get_services_needed() \n user_names_and_services =[]\n for service in services:\n user_names_and_services.append(get_usernames_for_service(service))\n\n \n # get the URLs by service - new services will be added here \n for service in user_names_and_services:\n for x in service:\n if x['service'] == 'instagram':\n print('getting instagram')\n instagram_urls.get_all_photos_for_user_name(x['username'])\n elif x['service'] == 'instagram-hashtag':\n print('getting instagram')\n instagram_urls.get_all_photos_for_hashtag(x['username']) \n elif x['service'] == 'flickr-set': \n print('getting flickr-set')\n flickr_urls.get_all_photos_in_set(x['username']) \n elif x['service'] == 'flickr-user':\n print('getting flickr-user')\n flickr_urls.get_all_photos_for_user_name(x['username'])\n elif x['service'] == 'phhhoto-user':\n print('getting phhhoto-user')\n phhhoto_urls.get_all_photos_for_user_name(x['username'])\n\n\n\n\n print('back in picturechecker')\n db = connect_to_db()\n c = db.cursor()\n \n # get 200 random pictures that we don't already have\n ## check for all photos we've downloaded\n ## check the filename against the photo filename\n ## get up to 200 not in that list\n \n \n sqltext = 'select photo_url from photos where order by rand() limit 200;'\n c.execute(sqltext)\n result = c.fetchall()\n \n # get the list of the current photos that are downloaded\n list_of_downloaded_photos = os.listdir('./images')\n delete_photos_if_too_big()\n \n for row in result:\n # get the filename so we know where to save it\n print('Downloading file '+ urlparse.urlsplit(row[0]).path)\n filename = os.path.basename(urlparse.urlsplit(row[0]).path)\n # if we haven't already downloaded this, download that picture to this server\n if filename not in list_of_downloaded_photos:\n urllib.urlretrieve(row[0], \"images/\"+filename)\n\n \n # tell the pictureplayer to refresh itself\n set_refresh_flag()\n","sub_path":"python/picturechecker.py","file_name":"picturechecker.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"303032959","text":"from quatro import sql_query, scalar_data, tabular_data\n\n\ndef ord_no_orl_id(config, ord_no):\n sql_exp = f'SELECT orl_id FROM order_line WHERE ord_no = {ord_no} AND prt_no <> \\'\\''\n result_set = sql_query(sql_exp, config.sigm_db_cursor)\n orl_ids = tabular_data(result_set)\n return orl_ids\n\n\ndef orl_id_prt_no(config, orl_id):\n sql_exp = f'SELECT prt_no FROM order_line WHERE orl_id = {orl_id}'\n result_set = sql_query(sql_exp, config.sigm_db_cursor)\n prt_no = scalar_data(result_set)\n return prt_no\n\n\ndef prt_no_children(config, prt_no):\n sql_exp = f\"SELECT child_no FROM bom WHERE parent_no = '{prt_no}'\"\n result_set = sql_query(sql_exp, config.sigm_db_cursor)\n children = tabular_data(result_set)\n return children\n\n\ndef critical_parts(config):\n sql_exp = f\"SELECT prt_no FROM critical_quantities WHERE source IN ('PROD', 'ORDER')\"\n result_set = sql_query(sql_exp, config.sigm_db_cursor)\n prt_nos = tabular_data(result_set)\n return prt_nos\n","sub_path":"statements.py","file_name":"statements.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"606146700","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Tool name :tree\n# Module name :tree.py\n# Detail :The script to simulate Windows command \"tree\".\n# Implementer :R.Ishikawa\n# Version :1.4\n# Last update :2017/12/16\n\n# Version History\n# 1. Create New R.Ishikawa Ver.1.0\n# 2. display node, \"|-\" R.Ishikawa Ver.1.1\n# 3. Add messages R.Ishikawa Ver.1.2\n# 4. Changed node \"|-\" to \"|__ \"\n# Add \"@\" for directory R.Ishikawa Ver.1.3\n# 5. Add shebang, messages & comments R.Ishikawa Ver.1.4\n\nimport os\n\n# define export file\nexportfile = \"filetree.txt\"\n\n# define current directory\nsearchdir = \".\"\n\nprint (\"\\n\"+\"Export FileTree Start!\")\n\n# Scan directory and file\nwk = os.walk(searchdir)\n\nwith open(exportfile,mode=\"w\") as f:\n f.write(\"\\n\" + \"'@' is directory.\" + \"\\n\")\n f.write(\"FileTree is as follows... \" + \"\\n\" + \"\\n\")\n \n for dirpath, dirs, files in wk:\n path = dirpath.split(\"/\")\n # Export directory\n f.write(\"\\t\"*(len(path)-2) + \"@\" + \" \" + path[-1] + \"\\n\")\n for x in files:\n #Export node and file\n f.write(\"\\t\"*(len(path)-1) + \"|__\" + \" \" + x + \"\\n\")\n\nprint (\"\\n\"+\"Export FileTree Finished!\"+\"\\n\")\n","sub_path":"tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"523820857","text":"import json\nimport os\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import TemplateView\nimport dateutil.parser as dparser\nimport os.path, time\n\nfrom pcinfo.models import PC, MOTHERBOARD, RAM, GPU, OS, HDD, CPU\n\n\nclass MainPage(TemplateView):\n pass\n\n\nclass PcinfoTable(TemplateView):\n def get_context_data(self, *args, **kwargs):\n \"\"\"\n Если мы получили GET запрос.\n \"\"\"\n context = super(PcinfoTable, self).get_context_data(**kwargs)\n path = 'D:\\\\pcinfo'\n # path = 'templates\\\\pcinfo'\n # f = open('C:\\\\Users\\\\admin\\\\PycharmProjects\\\\web_map\\\\templates\\\\pcinfo\\\\10.1.52.11.txt')\n files = []\n for d, dirs, fs in os.walk(path):\n if d == path:\n files = fs\n i = 0\n PCs = []\n for file in files:\n try:\n if file.split(\".\")[len(file.split(\".\")) - 1] == \"txt\":\n i += 1\n PC = []\n name = file\n filechanged = time.ctime(os.path.getmtime(path + \"\\\\\" + str(file)))\n f = open(path + \"\\\\\" + str(file))\n file = []\n for line in f:\n file.append(line)\n # print(str(i) + \". \" + f.name + \" | \" + file[2][file[2].rfind(':') + 1:len(file[2]) - 1])\n try:\n PC.append(i)\n PC.append(file[0][file[0].rfind(':') + 1:len(file[0]) - 1])\n PC.append(file[2][file[2].rfind(':') + 1:len(file[2]) - 1])\n PC.append(file[3][file[3].rfind(':') + 1:len(file[3]) - 1])\n PC.append(file[4][file[4].rfind(':') + 1:len(file[4]) - 1])\n PC.append(dparser.parse(file[5][file[5].rfind(':') + 1:len(file[5]) - 1]))\n PC.append(int(file[14][file[14].rfind(':') + 1:len(file[14]) - 1]))\n PC.append(file[11][file[11].rfind(':') + 1:len(file[11]) - 1])\n PC.append(dparser.parse(filechanged))\n PC.append(name)\n except Exception as e:\n print(e)\n PCs.append(PC)\n f.close()\n # PC['IP'] =\n except Exception as e:\n pass\n PCsOut = []\n for i in range(len(PCs)):\n finder = False\n for j in range(len(PCs)):\n if PCs[i][2] == PCs[j][2] and PCs[j][9] != PCs[i][9]:\n if PCs[i][8] > PCs[j][8]:\n finder = True\n if not finder:\n PCsOut.append(PCs[i])\n\n if \"sort_id\" in context:\n n = int(context['sort_id'])\n else:\n n = 0\n\n def sort_col(i):\n return i[n]\n\n PCsOut.sort(key=sort_col)\n context['PCs'] = PCsOut\n context['selected'] = [5, 11, 14, 15, 20, 21, 27]\n context['page'] = 1\n return context\n\n\nclass TableGenerator(TemplateView):\n def get_context_data(self, *args, **kwargs):\n \"\"\"\n Если мы получили GET запрос.\n \"\"\"\n context = super(PcinfoTable, self).get_context_data(**kwargs)\n path = '\\\\\\\\b1-fileshare\\\\pcinfo'\n # path = 'templates\\\\pcinfo'\n # f = open('C:\\\\Users\\\\admin\\\\PycharmProjects\\\\web_map\\\\templates\\\\pcinfo\\\\10.1.52.11.txt')\n files = []\n for d, dirs, fs in os.walk(path):\n files = fs\n i = 0\n\n PCs = []\n for file in files:\n try:\n if file.split(\".\")[len(file.split(\".\")) - 1] == \"txt\":\n i += 1\n PC = []\n name = file\n filechanged = time.ctime(os.path.getmtime(path + \"\\\\\" + str(file)))\n f = open(path + \"\\\\\" + str(file))\n file = []\n for line in f:\n file.append(line)\n # print(str(i) + \". \" + f.name + \" | \" + file[2][file[2].rfind(':') + 1:len(file[2]) - 1])\n try:\n PC.append(i)\n PC.append(file[0][file[0].rfind(':') + 1:len(file[0]) - 1])\n PC.append(file[2][file[2].rfind(':') + 1:len(file[2]) - 1])\n PC.append(file[3][file[3].rfind(':') + 1:len(file[3]) - 1])\n PC.append(file[4][file[4].rfind(':') + 1:len(file[4]) - 1])\n PC.append(dparser.parse(file[5][file[5].rfind(':') + 1:len(file[5]) - 1]))\n PC.append(int(file[14][file[14].rfind(':') + 1:len(file[14]) - 1]))\n PC.append(file[11][file[11].rfind(':') + 1:len(file[11]) - 1])\n PC.append(dparser.parse(filechanged))\n PC.append(name)\n except Exception as e:\n print(e)\n PCs.append(PC)\n f.close()\n # PC['IP'] =\n except Exception as e:\n pass\n PCsOut = []\n for i in range(len(PCs)):\n finder = False\n for j in range(len(PCs)):\n if PCs[i][2] == PCs[j][2] and PCs[j][9] != PCs[i][9]:\n if PCs[i][8] > PCs[j][8]:\n finder = True\n if not finder:\n PCsOut.append(PCs[i])\n\n if \"sort_id\" in context:\n n = int(context['sort_id'])\n else:\n n = 0\n\n def sort_col(i):\n return i[n]\n\n PCsOut.sort(key=sort_col)\n context['PCs'] = PCsOut\n context['selected'] = [5, 11, 14, 15, 20, 21, 27]\n context['page'] = 1\n return context\n\n\nclass Support(TemplateView):\n def get_context_data(self, **kwargs):\n context = super(Support).get_context_data(**kwargs)\n return context\n\n\nclass DataGetter(TemplateView):\n @method_decorator(csrf_exempt)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body.decode('cp866'))\n print(data)\n pc = PC.objects.create(\n ip=data['ip'],\n bios_manufacturer=data['bios']['manifacturer'],\n bios_version=data['bios']['version'],\n )\n for item in data['motherboard']:\n pc.motherboard.add(MOTHERBOARD.objects.create(\n name=data['motherboard'][item]['name'],\n manufacturer=data['motherboard'][item]['manufacturer'],\n chipset=data['motherboard'][item]['chipset'],\n serial_number=data['motherboard'][item]['serial_number'],\n ))\n for item in data['ram']:\n pc.ram.add(RAM.objects.create(\n name=data['ram'][item]['name'],\n manufacturer=data['ram'][item]['manufacturer'],\n capacity=data['ram'][item]['capacity'],\n form_factor=data['ram'][item]['form_factor'],\n clock_speed=data['ram'][item]['clock_speed'],\n ))\n for item in data['gpu']:\n pc.gpu.add(GPU.objects.create(\n name=data['gpu'][item]['name'],\n manufacturer=data['gpu'][item]['manufacturer'],\n adapter_ram=data['gpu'][item]['adapter_ram'],\n driver_version=data['gpu'][item]['driver_version'],\n video_processor=data['gpu'][item]['video_processor'],\n ))\n for item in data['os']:\n pc.os.add(OS.objects.create(\n name=data['os'][item]['name'],\n manufacturer=data['os'][item]['manufacturer'],\n caption=data['os'][item]['caption'],\n version=data['os'][item]['version'],\n computer_name=data['os'][item]['computer_name'],\n current_user=data['os'][item]['current_user'],\n install_date=data['os'][item]['install_date'],\n build_number=data['os'][item]['build_number'],\n boot_device=data['os'][item]['boot_device'],\n total_visible_memory=data['os'][item]['total_visible_memory'],\n serial_number=data['os'][item]['serial_number'],\n ))\n for item in data['hdd']:\n pc.hdd.add(HDD.objects.create(\n name=data['hdd'][item]['name'],\n ))\n for item in data['cpu']:\n pc.cpu.add(CPU.objects.create(\n name=data['cpu'][item]['name'],\n manufacturer=data['cpu'][item]['manufacturer'],\n core_count=data['cpu'][item]['core_count'],\n clock_speed=data['cpu'][item]['clock_speed'],\n architecture=data['cpu'][item]['architecture'],\n ))\n pc.save()\n return HttpResponse('OK')\n","sub_path":"pcinfo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"284197130","text":"from typing import List\r\nimport discord\r\nfrom discord.ext import commands\r\nfrom .utils import checks\r\nfrom .utils.dataIO import dataIO, fileIO\r\nfrom .utils import checks, chat_formatting as cf\r\nfrom __main__ import send_cmd_help\r\nimport os\r\nimport time\r\nimport datetime\r\nimport asyncio\r\nimport operator\r\n\r\ndefault_settings = {\r\n \"join_message\": \"{0.mention} has joined the server.\",\r\n \"leave_message\": \"{0.mention} has left the server.\",\r\n \"ban_message\": \"{0.mention} has been banned.\",\r\n \"unban_message\": \"{0.mention} has been unbanned.\",\r\n \"join_mp\": \"Salut {0.mention}, bienvenue sur EK !\",\r\n \"on\": False,\r\n \"channel\": None\r\n}\r\n\r\nclass Tools:\r\n \"\"\"Ensemble d'outils.\"\"\"\r\n\r\n def __init__(self, bot: commands.Bot):\r\n self.bot = bot\r\n self.settings_path = \"data/membership/settings.json\"\r\n self.settings = dataIO.load_json(self.settings_path)\r\n self.live = dataIO.load_json(\"data/gen/live.json\")\r\n\r\n def compare_role(self, user, rolelist):\r\n for role in rolelist:\r\n if role in user.roles:\r\n return True\r\n else:\r\n return False\r\n\r\n def log_update(self, server, change: str):\r\n temps = time.strftime(\"%d/%m/%Y %H:%M:%S\", time.localtime())\r\n if server.id in self.live:\r\n self.live[server.id][\"UPDATE\"].append([temps, change])\r\n fileIO(\"data/gen/live.json\", \"save\", self.live)\r\n return True\r\n else:\r\n self.live[server.id] = {\"NOM\" : server.name,\r\n \"UPDATE\" : []}\r\n self.live[server.id][\"UPDATE\"].append([temps, change])\r\n fileIO(\"data/gen/live.json\", \"save\", self.live)\r\n return True\r\n\r\n @commands.command(pass_context=True)\r\n async def jp(self, ctx, nb: int=10):\r\n \"\"\"Affiche les X derniers changements de pseudo du serveur.\r\n\r\n Par défaut les 10 derniers.\"\"\"\r\n server = ctx.message.server\r\n clsm = []\r\n msg = \"**Derniers changements**\\n\"\r\n if server.id in self.live:\r\n for e in self.live[server.id][\"UPDATE\"]:\r\n clsm.append([e[0],e[1]])\r\n clsm = sorted(clsm, key=operator.itemgetter(0))\r\n clsm.reverse()\r\n if len(clsm) <= nb:\r\n nb = len(clsm)\r\n a = 0\r\n while a < nb:\r\n rang = clsm[a]\r\n temps = rang[0]\r\n update = rang[1]\r\n msg += \"__{}__ > {}\\n\".format(temps, update)\r\n a += 1\r\n await self.bot.say(msg)\r\n else:\r\n self.live[server.id] = {\"NOM\": server.name,\r\n \"UPDATE\": []}\r\n fileIO(\"data/gen/live.json\", \"save\", self.live)\r\n await self.bot.say(\"Aucun changement enregistré pour ce serveur.\")\r\n\r\n @commands.command(pass_context=True, no_pm=True)\r\n async def easter(self, ctx):\r\n \"\"\"Ceci n'est pas un easter egg.\"\"\"\r\n # Pour les gens qui cherchent des easter-eggs dans les codes :jpp:\r\n await self.bot.say(\"```css\\nCeci n'est en aucun cas un easter-egg...```\")\r\n await asyncio.sleep(66)\r\n await self.bot.say(\"Ou bien si...\")\r\n await asyncio.sleep(1)\r\n await self.bot.say(\"011001010110011101100111001000000110010101110011011101000010000001101100011000010010000001110010011001010111000001101111011011100111001101100101\")\r\n\r\n# AUTRE ------------------------------------------------------------\r\n\r\n @commands.command(pass_context=True)\r\n async def membernb(self, ctx):\r\n \"\"\"Renvoie le nombre de membres présents sur le serveur.\"\"\"\r\n server = ctx.message.server\r\n a = 0\r\n await self.bot.say(\"Il y a\" + str(len(server.members)) + \" membres sur le serveur.\")\r\n for member in server.members:\r\n if member.status is discord.Status.online:\r\n a += 1\r\n else:\r\n await self.bot.say(\"Dont {} actifs.\".format(a))\r\n \r\n @commands.command(pass_context=True)\r\n async def bans(self, ctx):\r\n \"\"\"Retrouve les bannis du serveur.\"\"\"\r\n server = ctx.message.server\r\n a = 0\r\n for member in await self.bot.get_bans(server):\r\n a +=1\r\n await self.bot.say(\"Il y a {} bannis sur ce serveur.\".format(a))\r\n\r\n @commands.command(pass_context=True)\r\n async def region(self, ctx):\r\n \"\"\"Affiche la région du serveur.\"\"\"\r\n await self.bot.say(ctx.message.server.region)\r\n\r\n @commands.command(pass_context=True)\r\n async def time(self, ctx, temps:int):\r\n \"\"\"Permet d'attendre pendant x secondes.\"\"\"\r\n now = int(time.time())\r\n await self.bot.say(\"Now : {}\".format(now))\r\n vise = now + temps\r\n while int(time.time()) != vise:\r\n pass\r\n else:\r\n await self.bot.say(\"Terminé : {}\".format(int(time.time())))\r\n\r\n @commands.command(pass_context=True)\r\n async def urole(self, ctx):\r\n \"\"\"Affiche l'ensemble des rôles liés aux utilisateurs du serveur.\"\"\"\r\n server = ctx.message.server\r\n msg = \"**Serveur {}:**\\n\".format(server.name)\r\n n = 1\r\n for member in server.members:\r\n clean = []\r\n for role in member.roles:\r\n clean.append(role.name)\r\n if clean != [\"@everyone\"]:\r\n msg += \"{} | {}\\n\".format(member.mention, clean)\r\n if len(msg) > 1900 * n:\r\n msg += \"!!\"\r\n n += 1\r\n else:\r\n listmsg = msg.split(\"!!\")\r\n for msg in listmsg:\r\n await self.bot.say(msg)\r\n\r\n @commands.command(pass_context=True)\r\n async def ancien(self, ctx, jours: int):\r\n \"\"\"Recherche les gens qui ont + de X jours sur le serveur.\"\"\"\r\n server = ctx.message.server\r\n msg = \"**Anciens (+{} jours):**\\n\".format(jours)\r\n n = 1\r\n for member in server.members:\r\n passed = (ctx.message.timestamp - member.joined_at).days\r\n if passed >= jours:\r\n clean = []\r\n for role in member.roles:\r\n clean.append(role.name)\r\n if \"Habitué\" in clean:\r\n msg += \"{}\\n\".format(member.mention)\r\n if len(msg) > 1900 * n:\r\n msg += \"!!\"\r\n n += 1\r\n else:\r\n listmsg = msg.split(\"!!\")\r\n for msg in listmsg:\r\n await self.bot.whisper(msg)\r\n \r\n @commands.command(pass_context=True)\r\n async def suser(self, ctx, *snom):\r\n \"\"\"Recherche un utilisateur.\"\"\"\r\n server = ctx.message.server\r\n nom = \" \".join(snom).lower()\r\n nb = 0\r\n for member in server.members: #On cherche le nombre de résultats [RECHERCHE ALL]\r\n if nom in member.name or nom in member.display_name:\r\n nb += 1\r\n if nb > 0:\r\n await self.bot.say(\"**{} Résultat(s)**\".format(nb))\r\n await asyncio.sleep(0.25)\r\n msg = \"**Quel mode voulez-vous utiliser ?**\\n\"\r\n msg += \"*ALL* - Affiche l'ensemble des résultats\\n\"\r\n msg += \"*DEB* - Affiche les résultats commençant par votre recherche\\n\"\r\n msg += \"*FIN* - Affiche les résultats finissant par votre recherche\\n\"\r\n msg += \"-----------------------\\n\"\r\n msg += \"*ACTIF* - Affiche seulement les membres actifs\\n\"\r\n msg += \"*INACT* - Affiche seulement les membres inactifs\\n\"\r\n msg += \"*ROLE* - Affiche seulement les membres possédant le rôle visé\\n\"\r\n msg += \"*STR* - Recherche stricte, la case est prise en compte\\n\"\r\n msg += \"\\n\" + \"**Assemblez les mots clefs ci-dessus pour lancer la recherche...**\\n*Note : N'oubliez pas qu'il faut au moins 'ALL','DEB' ou 'FIN' pour démarrer la recherche !*\"\r\n await self.bot.say(msg)\r\n verif = False\r\n while verif != True:\r\n rps = await self.bot.wait_for_message(author = ctx.message.author, channel = ctx.message.channel)\r\n rps = rps.content.lower()\r\n if rps != \"\":\r\n \r\n if \"role\" in rps:\r\n verif2 = False\r\n await self.bot.say(\"**Mentionne le(s) rôle(s) que tu veux rechercher:**\")\r\n while verif2 != True:\r\n rmsg = await self.bot.wait_for_message(author = ctx.message.author, channel = ctx.message.channel)\r\n if rmsg.content != \"\":\r\n rlist = rmsg.role_mentions\r\n verif2 = True\r\n else:\r\n await self.bot.say(\"Invalide, réessaye !\")\r\n continue\r\n \r\n if \"str\" in rps:\r\n nom = \" \".join(snom)\r\n \r\n if \"actif\" and \"inactif\" in rps:\r\n await self.bot.say(\"Certains mots clefs ne sont pas compatibles entre eux ! ('ACTIF' et 'INACT' reviennent à 'ALL' !)\\nRéessayez.\")\r\n continue\r\n \r\n #RECHERCHE\r\n verif = True\r\n if \"all\" in rps: #MODE FULL\r\n if \"actif\" in rps: #FULL ACTIF\r\n res = \"**Voici ce que j'ai trouvé pour [ALL, ACTIF]:**\\n\"\r\n for member in server.members:\r\n if nom in member.name or nom in member.display_name:\r\n if member.status is discord.Status.online:\r\n res += \"*{}* | {}\\n\".format(member.display_name, member.mention)\r\n elif \"inact\" in rps: #FULL INACTIF\r\n res = \"**Voici ce que j'ai trouvé pour [ALL, INACT]:**\\n\"\r\n for member in server.members:\r\n if nom in member.name or nom in member.display_name:\r\n if member.status is discord.Status.offline:\r\n res += \"*{}* | {}\\n\".format(member.display_name, member.mention)\r\n elif \"role\" in rps: #FULL ROLE\r\n res = \"**Voici ce que j'ai trouvé pour [ALL, ROLE] (Avec les rôles spécifiés):**\\n\"\r\n for member in server.members:\r\n if nom in member.name or nom in member.display_name:\r\n if self.compare_role(member, rlist):\r\n res += \"*{}* | {}\\n\".format(member.display_name, member.mention)\r\n else: #FULL SIMPLE\r\n res = \"**Voici ce que j'ai trouvé pour [ALL]:**\\n\"\r\n for member in server.members:\r\n if nom in member.name or nom in member.display_name:\r\n res += \"*{}* | {}\\n\".format(member.display_name, member.mention)\r\n await self.bot.say(res)\r\n elif \"deb\" in rps: #MODE DEBUT\r\n r = len(nom)\r\n if \"actif\" in rps: #DEBUT ACTIF\r\n res = \"**Voici ce que j'ai trouvé pour [DEB, ACTIF]:**\\n\"\r\n for member in server.members:\r\n if nom in member.name[:r] or nom in member.display_name[:r]:\r\n if member.status is discord.Status.online:\r\n res += \"*{}* | {}\\n\".format(member.display_name, member.mention)\r\n elif \"inact\" in rps: #DEBUT INACT\r\n res = \"**Voici ce que j'ai trouvé pour [DEB, INACT]:**\\n\"\r\n for member in server.members:\r\n if nom in member.name[:r] or nom in member.display_name[:r]:\r\n if member.status is discord.Status.offline:\r\n res += \"*{}* | {}\\n\".format(member.display_name, member.mention)\r\n elif \"role\" in rps: #DEBUT ROLE\r\n res = \"**Voici ce que j'ai trouvé pour [DEB, ROLE] (Avec les rôles spécifiés):**\\n\"\r\n for member in server.members:\r\n if nom in member.name[:r] or nom in member.display_name[:r]:\r\n if self.compare_role(member, rlist):\r\n res += \"*{}* | {}\\n\".format(member.display_name, member.mention)\r\n else: #DEBUT SIMPLE\r\n res = \"**Voici ce que j'ai trouvé pour [DEB]:**\\n\"\r\n for member in server.members:\r\n if nom in member.name[:r] or nom in member.display_name[:r]:\r\n res += \"*{}* | {}\\n\".format(member.display_name, member.mention)\r\n await self.bot.say(res)\r\n elif \"fin\" in rps: #MODE FIN\r\n r = len(nom)\r\n if \"actif\" in rps: #FIN ACTIF\r\n res = \"**Voici ce que j'ai trouvé pour [FIN, ACTIF]:**\\n\"\r\n for member in server.members:\r\n if nom in member.name[-r:] or nom in member.display_name[-r:]:\r\n if member.status is discord.Status.online:\r\n res += \"*{}* | {}\\n\".format(member.display_name, member.mention)\r\n elif \"inact\" in rps: #FIN INACT\r\n res = \"**Voici ce que j'ai trouvé pour [FIN, INACT]:**\\n\"\r\n for member in server.members:\r\n if nom in member.name[-r:] or nom in member.display_name[-r:]:\r\n if member.status is discord.Status.offline:\r\n res += \"*{}* | {}\\n\".format(member.display_name, member.mention)\r\n elif \"role\" in rps: #FIN ROLE\r\n res = \"**Voici ce que j'ai trouvé pour [FIN, ROLE] (Avec les rôles spécifiés):**\\n\"\r\n for member in server.members:\r\n if nom in member.name[-r:] or nom in member.display_name[-r:]:\r\n if self.compare_role(member, rlist):\r\n res += \"*{}* | {}\\n\".format(member.display_name, member.mention)\r\n else: #FIN SIMPLE\r\n res = \"**Voici ce que j'ai trouvé pour [FIN]:**\\n\"\r\n for member in server.members:\r\n if nom in member.name[-r:] or nom in member.display_name[-r:]:\r\n res += \"*{}* | {}\\n\".format(member.display_name, member.mention)\r\n await self.bot.say(res)\r\n else:\r\n await self.bot.say(\"Votre recherche ne donne aucun résultat. Vérifiez vos mots-clefs.\")\r\n continue\r\n else:\r\n await self.bot.say(\"Mettez des mots clefs pour commencer une recherche.\")\r\n continue\r\n\r\n @commands.command(pass_context=True)\r\n @checks.mod_or_permissions(ban_members=True)\r\n async def unban(self, ctx, user : discord.Member):\r\n \"\"\"Permet le déban d'un utilisateur.\"\"\"\r\n server = ctx.message.server\r\n if user in server.members:\r\n self.bot.unban(server, user)\r\n await self.bot.say(\"L'utilisateur a été débanni.\")\r\n else:\r\n await self.bot.say(\"L'utilisateur n'est pas sur le serveur.\")\r\n\r\n# MASSDM -----------------------------------------------------------\r\n\r\n def _member_has_role(self, member: discord.Member, role: discord.Role):\r\n return role in member.roles\r\n\r\n def _get_users_with_role(self, server: discord.Server,\r\n role: discord.Role) -> List[discord.User]:\r\n roled = []\r\n for member in server.members:\r\n if self._member_has_role(member, role):\r\n roled.append(member)\r\n return roled\r\n\r\n @commands.command(no_pm=True, pass_context=True, name=\"mdm\", aliases=[\"massdm\"])\r\n @checks.mod_or_permissions(ban_members=True)\r\n async def _mdm(self, ctx: commands.Context, role: discord.Role, *, message: str):\r\n \"\"\"Envoie un MP à toutes les personnes possédant un certain rôle.\r\n Permet certaines customisations:\r\n {0} est le membre recevant le message.\r\n {1} est le rôle au travers duquel ils sont MP.\r\n {2} est la personne envoyant le message.\r\n Exemple: Message provenant de {2}: Salut {0} du rôle {1} ! ...\"\"\"\r\n server = ctx.message.server\r\n sender = ctx.message.author\r\n await self.bot.delete_message(ctx.message)\r\n dm_these = self._get_users_with_role(server, role)\r\n for user in dm_these:\r\n await self.bot.send_message(user,message.format(user, role, sender))\r\n\r\n#MEMBERSHIP ---------------------------------------------------------\r\n\r\n @commands.group(pass_context=True, no_pm=True, name=\"trigset\")\r\n @checks.admin_or_permissions(manage_server=True)\r\n async def _membershipset(self, ctx: commands.Context):\r\n \"\"\"Changement des paramétrages des triggers.\"\"\"\r\n server = ctx.message.server\r\n if server.id not in self.settings:\r\n self.settings[server.id] = default_settings\r\n self.settings[server.id][\"channel\"] = server.default_channel.id\r\n dataIO.save_json(self.settings_path, self.settings)\r\n if ctx.invoked_subcommand is None:\r\n await send_cmd_help(ctx)\r\n\r\n @_membershipset.command(pass_context=True, no_pm=True, name=\"join\",aliases=[\"greeting\", \"bienvenue\"])\r\n async def _join(self, ctx: commands.Context, *,\r\n format_str: str):\r\n \"\"\"Change le message d'arrivée du serveur.\r\n {0} est le membre\r\n {1} est le serveur\r\n \"\"\"\r\n await self.bot.type()\r\n server = ctx.message.server\r\n self.settings[server.id][\"join_message\"] = format_str\r\n dataIO.save_json(self.settings_path, self.settings)\r\n await self.bot.say(\"Message réglé.\")\r\n\r\n @_membershipset.command(pass_context=True, no_pm=True, name=\"mp\")\r\n async def _mp(self, ctx: commands.Context, *,\r\n format_str: str):\r\n \"\"\"Change le MP d'arrivée du serveur.\r\n {0} est le membre\r\n {1} est le serveur\r\n \"\"\"\r\n await self.bot.type()\r\n server = ctx.message.server\r\n self.settings[server.id][\"join_mp\"] = format_str\r\n dataIO.save_json(self.settings_path, self.settings)\r\n await self.bot.say(\"Message réglé.\")\r\n\r\n @_membershipset.command(pass_context=True, no_pm=True, name=\"leave\",aliases=[\"adieu\"])\r\n async def _leave(self, ctx: commands.Context, *,\r\n format_str: str):\r\n \"\"\"Change le message de départ du serveur.\r\n {0} est le membre\r\n {1} est le serveur\r\n \"\"\"\r\n await self.bot.type()\r\n server = ctx.message.server\r\n self.settings[server.id][\"leave_message\"] = format_str\r\n dataIO.save_json(self.settings_path, self.settings)\r\n await self.bot.say(\"Message reglé.\")\r\n\r\n @_membershipset.command(pass_context=True, no_pm=True, name=\"ban\")\r\n async def _ban(self, ctx: commands.Context, *, format_str: str):\r\n \"\"\"Change le message de ban du serveur.\r\n {0} est le membre\r\n {1} est le serveur\r\n \"\"\"\r\n await self.bot.type()\r\n server = ctx.message.server\r\n self.settings[server.id][\"ban_message\"] = format_str\r\n dataIO.save_json(self.settings_path, self.settings)\r\n await self.bot.say(\"Message reglé.\")\r\n\r\n @_membershipset.command(pass_context=True, no_pm=True, name=\"unban\")\r\n async def _unban(self, ctx: commands.Context, *, format_str: str):\r\n \"\"\"Change le message de débanissement du serveur.\r\n {0} est le membre\r\n {1} est le serveur\r\n \"\"\"\r\n await self.bot.type()\r\n server = ctx.message.server\r\n self.settings[server.id][\"unban_message\"] = format_str\r\n dataIO.save_json(self.settings_path, self.settings)\r\n await self.bot.say(\"Message reglé.\")\r\n\r\n @_membershipset.command(pass_context=True, no_pm=True, name=\"toggle\")\r\n async def _toggle(self, ctx: commands.Context):\r\n \"\"\"Active ou désactive les triggers serveur.\"\"\"\r\n\r\n await self.bot.type()\r\n server = ctx.message.server\r\n self.settings[server.id][\"on\"] = not self.settings[server.id][\"on\"]\r\n if self.settings[server.id][\"on\"]:\r\n await self.bot.say(\"Les events du trigger seront annoncés.\")\r\n else:\r\n await self.bot.say(\"Les events du trigger ne seront plus annoncés.\")\r\n dataIO.save_json(self.settings_path, self.settings)\r\n\r\n @_membershipset.command(pass_context=True, no_pm=True, name=\"channel\")\r\n async def _channel(self, ctx: commands.Context,\r\n channel: discord.Channel=None):\r\n \"\"\"Change le channel où doit être envoyé les messages d'activation de trigger.\r\n\r\n Par défaut le présent.\"\"\"\r\n\r\n await self.bot.type()\r\n server = ctx.message.server\r\n\r\n if not channel:\r\n channel = server.default_channel\r\n\r\n if not self.speak_permissions(server, channel):\r\n await self.bot.say(\r\n \"Je n'ai pas les permissions d'envoyer de message sur {0.mention}.\".format(channel))\r\n return\r\n\r\n self.settings[server.id][\"channel\"] = channel.id\r\n dataIO.save_json(self.settings_path, self.settings)\r\n channel = self.get_welcome_channel(server)\r\n await self.bot.send_message(channel,\"{0.mention}, \" + \"Je vais maintenant envoyer les messages d'annonce\" + \"sur {1.mention}.\".format(ctx.message.author, channel))\r\n\r\n @commands.command(pass_context=True, no_pm=True)\r\n @checks.admin_or_permissions(manage_server=True)\r\n async def upchan(self, ctx, channel: discord.Channel):\r\n \"\"\"Permet de mettre un serveur de publication des update profil.\"\"\"\r\n server = ctx.message.server\r\n self.settings[server.id][\"upchan\"] = channel.id\r\n dataIO.save_json(self.settings_path, self.settings)\r\n await self.bot.say(\"Channel réglé.\")\r\n\r\n async def member_join(self, member: discord.Member):\r\n server = member.server\r\n if server.id not in self.settings:\r\n self.settings[server.id] = default_settings\r\n self.settings[server.id][\"channel\"] = server.default_channel.id\r\n dataIO.save_json(self.settings_path, self.settings)\r\n\r\n if not self.settings[server.id][\"on\"]:\r\n return\r\n\r\n await self.bot.send_typing(\r\n self.bot.get_channel(self.settings[member.server.id][\"channel\"]))\r\n\r\n if server is None:\r\n print(\"Le serveur était considéré NONE, Erreur inconnue.\"\r\n \"L'utilisateur était {}.\".format(\r\n member.name))\r\n return\r\n\r\n channel = self.get_welcome_channel(server)\r\n if self.speak_permissions(server, channel):\r\n await self.bot.send_message(channel,\r\n self.settings[server.id][\r\n \"join_message\"]\r\n .format(member, server))\r\n await asyncio.sleep(0.25)\r\n await self.bot.send_message(member, self.settings[server.id][\r\n \"join_mp\"])\r\n else:\r\n print(\"Je n'ai pas eu les autorisations pour envoyer un message. L'utilisateur était {}.\".format(member.name))\r\n\r\n async def member_leave(self, member: discord.Member):\r\n server = member.server\r\n if server.id not in self.settings:\r\n self.settings[server.id] = default_settings\r\n self.settings[server.id][\"channel\"] = server.default_channel.id\r\n dataIO.save_json(self.settings_path, self.settings)\r\n\r\n if not self.settings[server.id][\"on\"]:\r\n return\r\n\r\n await self.bot.send_typing(\r\n self.bot.get_channel(self.settings[member.server.id][\"channel\"]))\r\n\r\n if server is None:\r\n print(\"Le serveur était NONE, c'était peut-être un MP. L'utilisateur était {}.\".format(member.name))\r\n return\r\n\r\n channel = self.get_welcome_channel(server)\r\n if self.speak_permissions(server, channel):\r\n await self.bot.send_message(channel,\r\n self.settings[server.id][\r\n \"leave_message\"]\r\n .format(member, server))\r\n else:\r\n print(\"J'ai essayé d'envoyer un message mais je n'ai pas pu, l'utilisateur était {}.\".format(member.name))\r\n\r\n async def member_ban(self, member: discord.Member):\r\n server = member.server\r\n if server.id not in self.settings:\r\n self.settings[server.id] = default_settings\r\n self.settings[server.id][\"channel\"] = server.default_channel.id\r\n dataIO.save_json(self.settings_path, self.settings)\r\n\r\n if not self.settings[server.id][\"on\"]:\r\n return\r\n\r\n await self.bot.send_typing(\r\n self.bot.get_channel(self.settings[member.server.id][\"channel\"]))\r\n\r\n if server is None:\r\n print(\"Le serveur était NONE, c'était peut-être un MP. L'utilisateur était {}.\".format(member.name))\r\n return\r\n\r\n channel = self.get_welcome_channel(server)\r\n if self.speak_permissions(server, channel):\r\n await self.bot.send_message(channel,\r\n self.settings[server.id][\"ban_message\"]\r\n .format(member, server))\r\n else:\r\n print(\"J'ai essayé d'envoyer un message mais je n'ai pas pu, l'utilisateur était {}.\".format(member.name))\r\n\r\n async def member_unban(self, member: discord.Member):\r\n server = member.server\r\n if server.id not in self.settings:\r\n self.settings[server.id] = default_settings\r\n self.settings[server.id][\"channel\"] = server.default_channel.id\r\n dataIO.save_json(self.settings_path, self.settings)\r\n\r\n if not self.settings[server.id][\"on\"]:\r\n return\r\n\r\n await self.bot.send_typing(\r\n self.bot.get_channel(self.settings[member.server.id][\"channel\"]))\r\n\r\n if server is None:\r\n print(\"Le serveur était NONE, c'était peut-être un MP. L'utilisateur était {}.\".format(\r\n member.name))\r\n return\r\n\r\n channel = self.get_welcome_channel(server)\r\n if self.speak_permissions(server, channel):\r\n await self.bot.send_message(channel,\r\n self.settings[server.id][\r\n \"unban_message\"]\r\n .format(member, server))\r\n else:\r\n print(\"J'ai essayé d'envoyer un message mais je n'ai pas pu, l'utilisateur était {}.\".format(member.name))\r\n\r\n async def member_update(self, before: discord.Member, after: discord.Member):\r\n server = after.server\r\n if server.id in self.settings:\r\n if before.nick != after.nick:\r\n if after.nick != None:\r\n if before.nick == None:\r\n self.log_update(server,\r\n \"**{}** a changé son surnom en **{}** (Pseudo *{}*)\".format(before.name,\r\n after.nick,\r\n after.name))\r\n return\r\n self.log_update(server, \"**{}** a changé son surnom en **{}** (Pseudo *{}*)\".format(before.nick,\r\n after.nick,\r\n after.name))\r\n else:\r\n self.log_update(server, \"**{}** a retiré son surnom (Pseudo *{}*)\".format(before.name,after.name))\r\n elif before.name != after.name:\r\n self.log_update(server,\r\n \"**{}** a changé son pseudo en **{}** (Pseudo *{}*)\".format(before.name, after.name,\r\n after.nick))\r\n else:\r\n pass\r\n else:\r\n pass\r\n\r\n def get_welcome_channel(self, server: discord.Server):\r\n return server.get_channel(self.settings[server.id][\"channel\"])\r\n\r\n def speak_permissions(self, server: discord.Server,\r\n channel: discord.Channel=None):\r\n if not channel:\r\n channel = self.get_welcome_channel(server)\r\n return server.get_member(\r\n self.bot.user.id).permissions_in(channel).send_messages\r\n\r\n \r\n #DEMARRAGE =================================================================\r\n\r\ndef check_folders():\r\n if not os.path.exists(\"data/membership\"):\r\n print(\"Création de data/membership directory...\")\r\n os.makedirs(\"data/membership\")\r\n\r\n\r\ndef check_files():\r\n f = \"data/membership/settings.json\"\r\n if not dataIO.is_valid_json(f):\r\n print(\"Création de data/membership/settings.json...\")\r\n dataIO.save_json(f, {})\r\n\r\n f = \"data/gen/sondage.json\"\r\n if not dataIO.is_valid_json(f):\r\n print(\"Création du fichier de Sondages...\")\r\n dataIO.save_json(f, {})\r\n\r\n f = \"data/gen/live.json\"\r\n if not dataIO.is_valid_json(f):\r\n print(\"Création du fichier Live...\")\r\n dataIO.save_json(f, {})\r\n\r\ndef setup(bot: commands.Bot):\r\n check_folders()\r\n check_files()\r\n n = Tools(bot)\r\n bot.add_listener(n.member_join, \"on_member_join\")\r\n bot.add_listener(n.member_leave, \"on_member_remove\")\r\n bot.add_listener(n.member_ban, \"on_member_ban\")\r\n bot.add_listener(n.member_unban, \"on_member_unban\")\r\n bot.add_listener(n.member_update, \"on_member_update\")\r\n bot.add_cog(n)\r\n","sub_path":"cogs/tools/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":30986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"268173143","text":"\nimport requests\nimport json\nimport pandas as pd\n\nvk_api = 'https://api.vk.com/method/'\ntoken = '(╯°□°)╯︵ ┻━┻'\n\n\nclass Params:\n\n def __init__(self, **kwargs):\n self.params_dict = {\n 'v': '5.7',\n 'filter': 'all',\n 'access_token': token}\n for key, value in kwargs.items():\n self.params_dict[key] = str(value)\n\n def get_dict(self):\n return self.params_dict\n\n\nmethod = 'photos.search'\nreserves = [\n ['Земля Леопарда', 43.23994, 131.36819, 8000],\n ['Национальный парк Бикин', 46.5403, 135.36522, 3500],\n ['Удэгейская легенда', 45.75773, 135.47622, 10000],\n ['Сихотэ - Алийский заповедник', 45.3337, 136.262, 20000],\n ['Дальневосточный Морской заповедник', 43.19967, 131.92011, 300],\n ['Кедровая падь', 43.10331, 131.49025, 6000],\n ['Зов тигра', 43.55909, 134.25464, 15000],\n ['Сафари парк', 43.32466, 132.40369, 1100],\n ['Бухта петрова', 42.87271, 133.80187, 2300],\n ['Лазовский заповедник', 43.15432, 133.9992, 14000],\n ['Высота 611', 44.568938, 135.574703, 300],\n ['Владивосток', 43.111820, 131.926755, 3000]]\n\nall_reserves = pd.DataFrame()\nin_vdk = pd.DataFrame()\noffs = 100\n\nfor i in range(len(reserves)):\n buf_reserve = list()\n sch = 0\n while len(buf_reserve) >= (sch * (offs * 0.9)):\n params = Params(lat=str(reserves[i][1]), long=str(reserves[i][2]),\n radius=str(reserves[i][3]), offset=str(sch * offs)).get_dict()\n buf_reserve += json.loads(requests.get(vk_api + method, params=params).text)['response']['items']\n sch += 1\n\n buf_reserve = pd.DataFrame(buf_reserve).drop(columns={'text', 'user_id', 'post_id',\n 'id', 'album_id', 'photo_75', 'photo_130',\n 'photo_604', 'photo_807', 'photo_1280',\n 'photo_2560', 'width', 'height'})\n\n buf_reserve.drop_duplicates('owner_id', inplace=True)\n buf_reserve.drop(columns='owner_id').to_csv(f'csv/{reserves[i][0]}.csv', index=False)\n print(reserves[i][0], len(buf_reserve))\n\n if i != (len(reserves) - 1):\n all_reserves = pd.concat([all_reserves, buf_reserve], sort=False)\n else:\n print('Всего', len(all_reserves))\n all_reserves.drop_duplicates('owner_id', inplace=True)\n all_reserves.drop(columns={'owner_id'}).to_csv('csv/all reserves.csv', index=False)\n\n print(reserves[i][0], len(buf_reserve))\n in_vdk = buf_reserve[buf_reserve.owner_id.isin(all_reserves.owner_id)]\n\nin_vdk.drop(columns={'owner_id'}).to_csv('csv/Places in VDK.csv', index=False)\nprint('Мест во Владивостоке', len(in_vdk))\n","sub_path":"f_to_pay.py","file_name":"f_to_pay.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"2471137","text":"#! /usr/bin/python3\r\n# coding=utf-8\r\n\"\"\"\r\nHangman Game\r\n-----------------------------------\r\nHelper code\r\nYou don't need to understand this helper code, but you will have to know\r\nhow to use the functions (so be sure to read the docstrings!)\r\n\"\"\"\r\n\r\nimport random\r\nimport string\r\n\r\nWORDLIST_FILENAME = \"words.txt\"\r\n\r\ndef load_words():\r\n \"\"\"\r\n Returns a list of valid words. Words are strings of lowercase letters.\r\n\r\n Depending on the size of the word list, this function may\r\n take a while to finish.\r\n \"\"\"\r\n with open(WORDLIST_FILENAME, 'r') as fh:\r\n return fh.read().split()\r\n \r\ndef choose_word(wordlist):\r\n \"\"\"\r\n wordlist (list): list of words (strings)\r\n\r\n Returns a random word from wordlist.\r\n \"\"\"\r\n secret_word = random.choice(wordlist)\r\n return secret_word\r\n\r\n# end of helper code\r\n# -----------------------------------\r\n\r\n# Load the list of words into the variable wordlist\r\n# so that it can be accessed from anywhere in the program\r\nwordlist = load_words()\r\nword_so_far = ''\r\n\r\ndef is_word_guessed(secret_word, letters_guessed):\r\n \"\"\"\r\n secret_word (string): the word the user is guessing; assumes all letters\r\n are lowercase\r\n letters_guessed (list): which letters have been guessed so far; assumes\r\n that all letters are lowercase\r\n returns: boolean, True if all the letters of secret_word are in\r\n letters_guessed; False otherwise\r\n \"\"\"\r\n guessed = True\r\n for char in secret_word:\r\n if char not in letters_guessed:\r\n guessed = False\r\n break\r\n return guessed \r\n\r\ndef get_guessed_word(secret_word, letters_guessed):\r\n \"\"\"\r\n secret_word (string): the word the user is guessing\r\n letters_guessed (list): which letters have been guessed so far\r\n returns (string): comprised of letters, underscores (_), and spaces\r\n that represents which letters in secret_word have been guessed so far.\r\n \"\"\"\r\n word_so_far = list('_' * len(secret_word)) \r\n for char in letters_guessed:\r\n for index, letter in enumerate(secret_word):\r\n if letter == char:\r\n word_so_far[index] = char\r\n word_so_far = ' '.join(word_so_far)\r\n return word_so_far\r\n \r\ndef get_available_letters(letters_guessed):\r\n \"\"\"\r\n letters_guessed (list): which letters have been guessed so far\r\n returns: string (of letters), comprised of letters that represents\r\n which letters have not yet been guessed.\r\n \"\"\"\r\n import string\r\n available_letters = (' '.join(string.ascii_lowercase)).split()\r\n\r\n for char in available_letters:\r\n if char in letters_guessed:\r\n index = available_letters.index(char)\r\n available_letters[index] = '' \r\n\r\n available_letters = ''.join(available_letters) \r\n return available_letters\r\n\r\ndef hangman():\r\n \"\"\"\r\n secret_word (string): the secret word to guess.\r\n\r\n Starts up an interactive game of Hangman.\r\n \"\"\"\r\n \"\"\" * At the start of the game, let the user know how many letters the\r\n secret_word contains and how many guesses s/he starts with.\r\n \"\"\"\r\n secret_word = choose_word(wordlist)\r\n letters_guessed = []\r\n word_so_far = ''\r\n guesses = 6\r\n warnings = 3\r\n \r\n while guesses > 0: # while there are still guesses left \r\n if secret_word == word_so_far.replace(' ',''):\r\n print(\"Yay you won the game!\")\r\n break\r\n else:\r\n print (f\"You have {guesses} guess(es) left and {warnings} warning(s) left.\")\r\n print (f\"So far, your guessed word is: {get_guessed_word(secret_word, letters_guessed)}\")\r\n print (f\"Choose one letter from the following: {get_available_letters(letters_guessed)}\")\r\n guess = input(\"Enter a letter to guess: \")\r\n if guess.isalpha(): # input is a letter \r\n guess = guess.lower()\r\n if guess in secret_word:\r\n print (\"Yay you guessed correctly!\")\r\n else:\r\n print (\"Oh no, you guessed incorrectly. Try again.\")\r\n guesses -= 1\r\n letters_guessed.append(guess)\r\n word_so_far = get_guessed_word(secret_word, letters_guessed)\r\n else:\r\n print (\"Please enter a letter instead.\")\r\n if warnings > 0:\r\n warnings -= 1\r\n else:\r\n guesses -= 1\r\n \r\n if secret_word != word_so_far.replace(' ',''): \r\n print (f\"you lost GAME OVER. The word was '{secret_word.upper()}'.\")\r\n\r\n \"\"\"* The user should start with 6 guesses\r\n\r\n * Before each round, you should display to the user how many guesses\r\n s/he has left and the letters that the user has not yet guessed.\r\n\r\n * Ask the user to supply one guess per round. Remember to make\r\n sure that the user puts in a letter!\r\n\r\n * The user should receive feedback immediately after each guess\r\n about whether their guess appears in the computer's word.\r\n\r\n * After each guess, you should display to the user the partially\r\n guessed word so far.\r\n\r\n Follows the other limitations detailed in the problem write-up.\r\n \"\"\"\r\n\r\n# When you've completed your hangman function, commment out the word\r\n# pass that follows the if__name__ == \"__main__\" line and uncomment\r\n# the two lines that follow it to test (hint: you might want to pick\r\n# your own secret_word while you're doing your own testing)\r\n\r\nif __name__ == \"__main__\":\r\n hangman()\r\n\r\n # To test part 2, comment out the pass line above and\r\n # uncomment the following two lines.\r\n\r\n # secret_word = choose_word(wordlist)\r\n # hangman(secret_word)\r\n\r\n","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"564350407","text":"import pandas as pd\nimport numpy as np\nimport feather\n\ndef probe_df(file_path, chunksize = 1000):\n data = {}\n rows = 0\n for chunk in pd.read_csv(file_path, chunksize=chunksize):\n rows += chunk.count()\n for column in chunk.columns:\n if column in data:\n data[column]['null_values'] += chunk[column].isnull().sum()\n if chunk[column].dtypes == int or chunk[column].dtypes == float:\n data[column]['avg_val'] = (data[column]['avg_val'] + chunk[column].mean()) / 2\n else: \n data[column] = {}\n data[column]['null_values'] = chunk[column].isnull().sum()\n data[column]['dtype'] = chunk[column].dtypes\n if chunk[column].dtypes == int or chunk[column].dtypes == float:\n data[column]['avg_val'] = chunk[column].mean()\n return data\n\ndef write_df(file_path_read, file_path_write, chunksize = 1000, missing_vals = {}):\n for num, chunk in enumerate(pd.read_csv(file_path_read, chunksize=chunksize)):\n for column in missing_vals:\n chunk[column].fillna(missing_vals[column], inplace=True)\n if num == 0:\n chunk.to_csv(file_path_write, header=True)\n else: \n chunk.to_csv(file_path_write, header=False)\n \n","sub_path":"Homework/Unit2/studentprojects/ShellySeroussi/chunking 2.py","file_name":"chunking 2.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"353139503","text":"import re\nfrom collections import Counter\nimport itertools\nfrom gensim import models\nimport numpy as np\nimport pickle\n\ndef clean_str(string):\n \"\"\"\n Tokenization/string cleaning; original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\n \"\"\"\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()\n\ndef load_data(dataset_file):\n\tdataset = list(open(dataset_file,\"rb\").readlines())\n\tdataset = [s.decode('latin-1').strip() for s in dataset]\n\n\tdataset_text = [clean_str(sent) for sent in dataset]\n\tdataset_text = [s.split(\" \") for s in dataset_text]\n\n\t\n\treturn dataset_text\n\n# if real sentence length < sentence_length, add with \"\"\ndef pad_sentences(sentences, sentence_length, padding_word=\"\"):\n\t\"\"\"\n Pads all sentences to the same length. The length is defined by the longest sentence.\n Returns padded sentences.\n \"\"\"\n\tpadded_sentences = []\n\tfor i in range(len(sentences)):\n\t\tsentence = sentences[i]\n\t\tnum_padding = sentence_length - len(sentence)\n\t\tnew_sentence = sentence + [padding_word] * num_padding\n\t\tpadded_sentences.append(new_sentence)\n\treturn padded_sentences\n\ndef build_vocab_and_embeddings(sentences, vector, vocab_embedding_pickle):\n\tprint (\"Building vocabulary...\")\n\t# Build vocabulary\n\tword_counts = Counter(itertools.chain(*sentences))\n\t# Mapping from index to word\n\tvocabulary_inv = [x[0] for x in word_counts.most_common()]\n\t# Mapping from word to index\n\tvocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n\n\tif(vector == 'w2v'):\n\t\tprint (\"Loading w2v model...\")\n\t\tmodel = models.Word2Vec.load_word2vec_format('../CNN/GoogleNews-vectors-negative300.bin', binary = True)\n\n\t\tprint (\"Building embeddings...\")\n\t\tvocab_size = len(vocabulary)\n\t\tembeddings = np.zeros((vocab_size, 300))\n\t\t# a matrix of zero => 300 * vocab_size\n\t\t\n\t\tfor word in vocabulary:\n\t\t\tindex = vocabulary[word]\n\t\t\ttry:\n\t\t\t\tembeddings[index, :] = model[word].reshape((1,300))\n\t\t\texcept KeyError:\n\t\t\t\tembeddings[index, :] = np.random.uniform(-0.23, 0.23, [1,300])\n\t\t# -0.23,0.23 means number between -0.23 and 0.23\n\t\t# every word in vocabulary is 300 * 1, if w2v model contans word, uses model, if not, uses random.\n\n\t\tprint (\"Write data in a pickle...\")\n\t\tpickle_file = 'w2v_'+vocab_embedding_pickle+'.pickle'\n\t\ttry:\n\t\t\tfp = open(pickle_file, 'wb')\n\t\t\tsave = {\n\t\t\t\t'vocabulary': vocabulary,\n\t\t\t\t'embeddings': embeddings\n\t\t\t}\n\t\t\t\t\n\t\t\tpickle.dump(save, fp, pickle.HIGHEST_PROTOCOL)\n\t\t\tfp.close()\n\t\texcept Exception as e:\n\t\t\tprint ('Unable to save data to', pickle_file, ':', e)\n\t\t\traise\n\t\t\n\tif (vector == 'random'):\n\t\tvocab_size = len(vocabulary)\n\t\tembeddings = np.random.uniform(-1.0, 1.0, [vocab_size, 300])\n\t\t\t\n\t\tprint (\"Write data in a pickle...\")\n\t\tpickle_file = 'random_'+vocab_embedding_pickle+'.pickle'\n\t\ttry:\n\t\t\tfp = open(pickle_file, 'wb')\n\t\t\tsave = {\n\t\t\t\t'vocabulary': vocabulary,\n\t\t\t\t'embeddings': embeddings\n\t\t\t}\n\t\t\tpickle.dump(save, fp, pickle.HIGHEST_PROTOCOL)\n\t\t\tfp.close()\n\t\texcept Exception as e:\n\t\t\tprint ('Unable to save data to', pickle_file, ':', e)\n\t\t\traise\n\nif __name__ == \"__main__\":\n\n\tpos_dataset_file = \"classifier1c/sentPara-train-pos-a1.txt\"\n\tneg_dataset_file = \"classifier1c/sentPara-train-neg-a1.txt\"\n\n\tpos_dataset_file2 = \"classifier1c/relCorRep-pos-test-a1.txt\"\n\tneg_dataset_file2 = \"classifier1c/relCorRep-neg-test-a1.txt\"\n\n\t\"\"\"\n\tpos_dataset_file = \"classifier1b/sentsSubj-pos-a1.txt\"\n\tneg_dataset_file = \"classifier1b/sentsSubj-neg-a1.txt\"\n\t\"\"\"\n\t\"\"\"\n\tpos_dataset_file = \"classifier1/relCorReplaced-pos-a1.txt\"\n\tneg_dataset_file = \"classifier1/relCorReplaced-neg-a1.txt\"\n\t\"\"\"\n\n\tsentences_pos = load_data(pos_dataset_file)\n\tsentence_length_pos = max(len(x) for x in sentences_pos)\n\t\n\tsentences_neg = load_data(neg_dataset_file)\n\tsentence_length_neg = max(len(x) for x in sentences_neg)\n\n\tsentences_pos2 = load_data(pos_dataset_file2)\n\tsentence_length_pos2 = max(len(x) for x in sentences_pos2)\n\n\tsentences_neg2 = load_data(neg_dataset_file2)\n\tsentence_length_neg2 = max(len(x) for x in sentences_neg2)\n\t\n\tsentence_length = max(sentence_length_pos, sentence_length_neg,sentence_length_pos2,sentence_length_neg2)\n\tprint (sentence_length)\n\t# sentence_length: 59\n\t\n\tsentences_padded_pos = pad_sentences(sentences_pos, sentence_length)\n\tsentences_padded_neg = pad_sentences(sentences_neg, sentence_length)\n\n\tsentences_padded_pos2 = pad_sentences(sentences_pos2, sentence_length)\n\tsentences_padded_neg2 = pad_sentences(sentences_neg2, sentence_length)\n\t\n\tsentences_all = sentences_padded_pos + sentences_padded_neg + sentences_padded_pos2 + sentences_padded_neg2\n\t\n\tvectors = ['w2v', 'random']\n\t\n\tfor vector in vectors:\n\t\tbuild_vocab_and_embeddings(sentences_all, vector,'class1c-a1')","sub_path":"mycode/build_vocab_embed.py","file_name":"build_vocab_embed.py","file_ext":"py","file_size_in_byte":5221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"341030642","text":"import itertools\nfrom collections import OrderedDict\n\nimport util.util as util\nfrom torch.autograd import Variable\nfrom util.image_pool import ImagePool\n\nfrom . import networks\nfrom .base_model import BaseModel\nfrom .unet import *\n\n\nclass CycleGANModel(BaseModel):\n def name(self):\n return 'CycleGANModel'\n\n def initialize(self, opt):\n BaseModel.initialize(self, opt)\n nb = opt.batchSize\n size = opt.fineSize\n self.no_input = opt.no_input\n self.input_A1 = self.Tensor(nb, opt.input_nc, size, size) # store inputs in a tensor # DONE\n self.input_A2 = self.Tensor(nb, opt.input_nc2, size, size) # store inputs in a tensor # DONE\n self.input_A3 = self.Tensor(nb, opt.input_nc3, size, size) #HC\n self.input_B = self.Tensor(nb, opt.output_nc, size, size)\n\n # load/define networks\n # The naming conversion is different from those used in the paper\n # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)\n print([opt.output_nc, opt.input_nc])\n self.netG_A = (networks.define_G(opt.input_nc, opt.output_nc,\n opt.ngf, 'resnetMM', opt.norm, not opt.no_dropout, opt.init_type,\n self.gpu_ids))\n # inputs = torch.randn(1,3,256,256)\n # y = self.netG_A(Variable(inputs).cuda(),Variable(inputs).cuda())\n # g = make_dot(y)\n # g.view()\n self.netG_B = (networks.define_G(opt.input_nc, opt.output_nc,\n opt.ngf, 'resnetMMReverse', opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids))\n\n if self.isTrain:\n use_sigmoid = opt.no_lsgan\n self.netD_A = networks.define_D(opt.output_nc, opt.ndf,\n opt.which_model_netD,\n opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)\n self.netD_B1 = networks.define_D(opt.input_nc, opt.ndf,\n opt.which_model_netD,\n opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)\n self.netD_B2 = networks.define_D(opt.input_nc, opt.ndf,\n opt.which_model_netD,\n opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)\n self.netD_B3 = networks.define_D(opt.input_nc, opt.ndf,\n opt.which_model_netD,\n opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)\n\n if not self.isTrain or opt.continue_train:\n which_epoch = opt.which_epoch\n self.load_network(self.netG_A, 'G_A', which_epoch)\n self.load_network(self.netG_B, 'G_B', which_epoch)\n if self.isTrain:\n self.load_network(self.netD_A, 'D_A', which_epoch)\n self.load_network(self.netD_B1, 'D_B1', which_epoch)\n self.load_network(self.netD_B2, 'D_B2', which_epoch)\n self.load_network(self.netD_B3, 'D_B3', which_epoch)\n\n if self.isTrain:\n self.old_lr = opt.lr\n self.fake_A1_pool = ImagePool(opt.pool_size)\n self.fake_A2_pool = ImagePool(opt.pool_size)\n self.fake_A3_pool = ImagePool(opt.pool_size)\n self.fake_B_pool = ImagePool(opt.pool_size)\n # define loss functions\n self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor)\n self.criterionCycle = torch.nn.L1Loss()\n self.criterionIdt = torch.nn.L1Loss()\n self.criterionLatent = torch.nn.L1Loss()\n # initialize optimizers\n self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),\n lr=1.5 * opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D_B = torch.optim.Adam(itertools.chain(self.netD_B1.parameters(), self.netD_B2.parameters(), self.netD_B3.parameters()), lr=opt.lr*0.1, betas=(opt.beta1, 0.999))\n self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers = []\n self.schedulers = []\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D_A)\n self.optimizers.append(self.optimizer_D_B)\n # for optimizer in self.optimizers:\n self.schedulers.append(networks.get_scheduler(self.optimizers[0], opt, lr=1.5))\n self.schedulers.append(networks.get_scheduler(self.optimizers[1], opt, lr=0.1))\n self.schedulers.append(networks.get_scheduler(self.optimizers[2], opt, lr=1.0))\n print('---------- Networks initialized -------------')\n networks.print_network(self.netG_A)\n networks.print_network(self.netG_B)\n if self.isTrain:\n networks.print_network(self.netD_A)\n networks.print_network(self.netD_B1)\n networks.print_network(self.netD_B2)\n networks.print_network(self.netD_B3)\n print('-----------------------------------------------')\n\n def set_input(self, input):\n AtoB = self.opt.which_direction == 'AtoB'\n input_A1 = input['A1']\n input_A2 = input['A2']\n input_A3 = input['A3']\n input_B = input['B']\n self.input_A1.resize_(input_A1.size()).copy_(input_A1)\n self.input_A2.resize_(input_A2.size()).copy_(input_A2)\n self.input_A3.resize_(input_A3.size()).copy_(input_A3)\n self.input_B.resize_(input_B.size()).copy_(input_B)\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\n\n def forward(self):\n self.real_A1 = Variable(self.input_A1)\n self.real_A2 = Variable(self.input_A2)\n self.real_A3 = Variable(self.input_A3)\n self.real_B = Variable(self.input_B)\n\n def test(self):\n self.real_A1 = Variable(self.input_A1, volatile=True)\n self.real_A2 = Variable(self.input_A2, volatile=True)\n self.real_A3 = Variable(self.input_A3, volatile=True)\n self.fake_B, _ = self.netG_A.forward(self.real_A1, self.real_A2, self.real_A3)\n self.rec_A1, self.rec_A2, self.rec_A3, _ = self.netG_B.forward(self.fake_B)\n\n self.real_B = Variable(self.input_B, volatile=True)\n self.fake_A1, self.fake_A2, self.fake_A3, _ = self.netG_B.forward(self.real_B)\n self.rec_B, _ = self.netG_A.forward(self.fake_A1, self.fake_A2, self.fake_A3)\n\n # get image paths\n def get_image_paths(self):\n return self.image_paths\n\n def backward_D_basic(self, netD, real, fake):\n # Real\n pred_real = netD.forward(real)\n loss_D_real = self.criterionGAN(pred_real, True)\n # Fake\n pred_fake = netD.forward(fake.detach())\n loss_D_fake = self.criterionGAN(pred_fake, False)\n # Combined loss\n loss_D = (loss_D_real + loss_D_fake) * 0.5\n # backward\n loss_D.backward()\n return loss_D\n\n def backward_D_A(self):\n fake_B = self.fake_B_pool.query(self.fake_B)\n self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)\n\n def backward_D_B(self):\n fake_A1 = self.fake_A1_pool.query(self.fake_A1)\n fake_A2 = self.fake_A2_pool.query(self.fake_A2)\n fake_A3 = self.fake_A3_pool.query(self.fake_A3)\n self.loss_D_B = 0.5 * (\n self.backward_D_basic(self.netD_B1, self.real_A1, fake_A1) + self.backward_D_basic(self.netD_B2,\n self.real_A2,\n fake_A2) +\n self.backward_D_basic(self.netD_B3, self.real_A3, fake_A3))\n\n def l1_loss(self, input, target):\n return torch.sum(torch.abs(input - target)) / input.data.nelement()\n\n def backward_G(self):\n lambda_idt = self.opt.identity\n lambda_A = self.opt.lambda_A\n lambda_B = self.opt.lambda_B\n lambda_latent = 1.0\n # Identity loss\n # if lambda_idt > 0:\n # # G_A should be identity if real_B is fed.\n # self.idt_A = self.netG_A.forward(self.real_B)\n # self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt\n # # G_B should be identity if real_A is fed.\n # self.idt_B = self.netG_B.forward(self.real_A)\n # self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt\n # else:\n self.loss_idt_A = 0\n self.loss_idt_B = 0\n\n # GAN loss\n # D_A(G_A(A))\n self.fake_B, latent_fB = self.netG_A.forward(self.real_A1 ,self.real_A2, self.real_A3)\n pred_fake = self.netD_A.forward(self.fake_B)\n self.loss_G_A = self.criterionGAN(pred_fake, True)\n\n self.fake_A1, self.fake_A2, self.fake_A3, latent_fA = self.netG_B.forward(self.real_B)\n pred_fake1 = self.netD_B1.forward(self.fake_A1)\n pred_fake2 = self.netD_B2.forward(self.fake_A2)\n pred_fake3 = self.netD_B3.forward(self.fake_A3)\n self.loss_G_B = (self.criterionGAN(pred_fake1, True) + self.criterionGAN(pred_fake2, True) + self.criterionGAN(\n pred_fake3, True))\n # Forward cycle loss\n self.rec_A1, self.rec_A2, self.rec_A3, latent_rA = self.netG_B.forward(self.fake_B)\n self.loss_cycle_A = (\n self.criterionCycle(self.rec_A1, self.real_A1) * lambda_A + self.criterionCycle(self.rec_A2,\n self.real_A2) * lambda_A +\n self.criterionCycle(self.rec_A3, self.real_A3) * lambda_A)\n\n self.rec_B, latent_rB = self.netG_A.forward(self.fake_A1, self.fake_A2, self.fake_A3)\n self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n self.latent_loss = lambda_latent * self.l1_loss(latent_fB, latent_rA) + lambda_latent * self.l1_loss(latent_fA,\n latent_rB)\n\n self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B + self.latent_loss\n self.loss_G.backward()\n\n def optimize_parameters(self):\n # forward\n self.forward()\n # G_A and G_B\n self.optimizer_G.zero_grad()\n self.backward_G()\n self.optimizer_G.step()\n # D_A\n self.optimizer_D_A.zero_grad()\n self.backward_D_A()\n self.optimizer_D_A.step()\n # D_B\n self.optimizer_D_B.zero_grad()\n self.backward_D_B()\n self.optimizer_D_B.step()\n\n def get_current_errors(self):\n D_A = self.loss_D_A.data.item()\n G_A = self.loss_G_A.data.item()\n Cyc_A = self.loss_cycle_A.data.item()\n D_B = self.loss_D_B.data.item() # =============================================================================\n G_B = self.loss_G_B.data.item()\n Cyc_B = self.loss_cycle_B.data.item()\n if self.opt.identity > 0.0:\n idt_A = self.loss_idt_A.data.item()\n idt_B = self.loss_idt_B.data.item()\n return OrderedDict([('D_A', D_A), ('G_A', G_A), ('Cyc_A', Cyc_A), ('idt_A', idt_A),\n ('D_B', D_B), ('G_B', G_B), ('Cyc_B', Cyc_B), ('idt_B', idt_B)])\n else:\n return OrderedDict([('D_A', D_A), ('G_A', G_A), ('Cyc_A', Cyc_A),\n ('D_B', D_B), ('G_B', G_B), ('Cyc_B', Cyc_B)])\n\n def get_current_visuals(self):\n real_A1 = util.tensor2im(self.real_A1.data)\n real_A2 = util.tensor2im(self.real_A2.data)\n real_A3 = util.tensor2im(self.real_A3.data)\n fake_B = util.tensor2im(self.fake_B.data)\n rec_A1 = util.tensor2im(self.rec_A1.data)\n rec_A2 = util.tensor2im(self.rec_A2.data)\n rec_A3 = util.tensor2im(self.rec_A3.data)\n real_B = util.tensor2im(self.real_B.data)\n fake_A1 = util.tensor2im(self.fake_A1.data)\n fake_A2 = util.tensor2im(self.fake_A2.data)\n fake_A3 = util.tensor2im(self.fake_A3.data)\n rec_B = util.tensor2im(self.rec_B.data)\n if self.opt.identity > 0.0:\n idt_A = util.tensor2im(self.idt_A.data)\n idt_B = util.tensor2im(self.idt_B.data)\n return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A), ('idt_B', idt_B),\n ('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B), ('idt_A', idt_A)])\n else:\n return OrderedDict([('real_A1', real_A1), ('real_A2', real_A2), ('real_A3', real_A3), ('fake_B', fake_B),\n ('rec_A1', rec_A1), ('rec_A2', rec_A2), ('rec_A3', rec_A3),\n ('real_B', real_B), ('fake_A1', fake_A1), ('fake_A2', fake_A2), ('fake_A3', fake_A3),\n ('rec_B', rec_B)])\n\n def save(self, label):\n self.save_network(self.netD_B2, 'D_B2', label, self.gpu_ids)\n self.save_network(self.netD_B3, 'D_B3', label, self.gpu_ids)\n self.save_network(self.netG_B, 'G_B', label, self.gpu_ids)\n self.save_network(self.netD_B1, 'D_B1', label, self.gpu_ids)\n self.save_network(self.netG_A, 'G_A', label, self.gpu_ids)\n self.save_network(self.netD_A, 'D_A', label, self.gpu_ids)\n","sub_path":"current/src/In2I/models/cycle_gan_model.py","file_name":"cycle_gan_model.py","file_ext":"py","file_size_in_byte":13661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"318418496","text":"#!/usr/bin/env python\nimport tkinter as tk\nfrom tkinter import ttk\n\ndef click_me():\n \n label_1.configure(text='button has been clicked!', foreground='red')\n\nif __name__ =='__main__':\n \n window = tk.Tk()\n window.title('This is just some more practice')\n\n label_1 = ttk.Label(window, text='Practice label...')\n label_1.grid(column=0, row=0)\n\n button_1 = ttk.Button(window, text='click me!', command=click_me)\n button_1.grid(column=0, row=1)\n\n window.mainloop()\n\n \n","sub_path":"concepts/gui/python_gui_book/chapter_1/practice/basic/basic_gui.py","file_name":"basic_gui.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"18319539","text":"import codecs\nimport numpy as np\nimport os\nimport multiprocessing\nimport re\n\n\nuseful_postag = {'v', 'm', 'd', 'a', 'n', 'i'}\nconj_words = {'因', '因为', '由于', '由'}\nrefers_words = {'由此', '因此', '这', '对此'}\nnoise_words = {'对', '对于', '给', '为'}\nneg_words = {\n '没有', '不能', '没法', '不会', '是否', '是否会', '不', '不可能',\n '尚未', '还没', '非', '并非', '不是', '并不', '无法'\n}\nneg_modifier = {'不', '非', '并非', '不是', '并不'}\npunctuation = {'“', '”', '《', '》', ':', ';', '、', '(', ')', '〈', '〉', ',', '。'}\ncontent_pos = {'v', 'm', 'd', 'a', 'n', 'i'}\nproject_source_path = os.path.join(os.path.expanduser('~'), 'Documents/sources/')\nltp_path = os.path.join(project_source_path, 'ltp_data/')\n\n\ndef split_cn_line(splitter, line, efface_number=False):\n results, response = splitter.split(line), []\n for r in results:\n r = re.sub('[^,。;:、()《》“”\\n0-9\\u4e00-\\u9fa5]', '', r)\n if efface_number and re.match('.*\\d+', r):\n continue\n if 7 < len(r) < 300:\n response.append(r)\n return response\n\n\nclass WordVec(object):\n def __contains__(self, item):\n return item in self.indices\n\n def __getitem__(self, item):\n if item not in self.indices:\n raise KeyError\n return self.vectors[self.indices[item]]\n\n def __init__(self, embedding_path, normalized=False):\n print('load embedding: {} ...'.format(embedding_path))\n self.words, self.vectors = [], []\n fin = codecs.open(embedding_path, 'r', 'utf-8')\n line = fin.readline()\n res = line.strip().split(' ')\n self.vocab_size, self.embedding_size = list(map(int, res))\n line = fin.readline()\n while line:\n r = line.strip().split(' ')\n vec = [float(v) for v in r[1:]]\n assert len(vec) == self.embedding_size\n if normalized:\n l2 = np.sqrt(sum(np.square(vec)))\n try:\n assert l2 != 0\n vec /= l2\n except ZeroDivisionError:\n line = fin.readline()\n continue\n # vec /= np.sqrt(sum(np.square(vec)))\n self.vectors.append(vec)\n self.words.append(r[0])\n line = fin.readline()\n assert len(self.vectors) == self.vocab_size\n assert len(self.words) == self.vocab_size\n self.vectors = np.array(self.vectors, dtype=np.float32)\n self.indices = {w: i for i, w in enumerate(self.words)}\n print('finished loading embeddings {}.'.format(embedding_path))\n\n # 怎么在O(1)内查找字符\n def get_index(self, w):\n try:\n return self.indices[w]\n except KeyError:\n return -1\n\n def get_vec(self, w):\n if w in self.indices:\n return self.vectors[self.indices[w]]\n else:\n raise KeyError('key: {} not in vocab!.'.format(w))\n\n @staticmethod\n def __get_score__(v1, v2, norm):\n score = v1.dot(v2)\n return score/(np.sqrt(np.sum(np.square(v1)))*np.sqrt(np.sum(np.square(v2)))) if norm else score\n\n def __per_process__(self, queue, target, start, end, norm):\n d = {}\n for i in range(start, end):\n score = self.__get_score__(target, self.vectors[i], norm)\n if score < 1.0:\n d[self.words[i]] = score\n res = sorted(d.items(), key=lambda item: item[1], reverse=True)\n queue.put(res[0])\n\n def __find_sim_by_multi_process__(self, v, num_threads, norm):\n try:\n assert len(v) == self.embedding_size\n thread_list, queue = [], multiprocessing.Queue()\n num_vec_per_thread = int((self.vocab_size - 1) / num_threads) + 1\n for i in range(num_threads):\n s, e = i*num_vec_per_thread, min((i+1)*num_vec_per_thread, self.vocab_size)\n thread = multiprocessing.Process(target=self.__per_process__, args=(queue, v, s, e, norm))\n thread.start()\n thread_list.append(thread)\n for th in thread_list:\n th.join()\n\n result = []\n while not queue.empty():\n result.append(queue.get())\n res = sorted(result, key=lambda item: item[1], reverse=True)\n return res[0][0]\n except AssertionError:\n print('embedding size should be {}.!'.format(self.vocab_size))\n exit(0)\n\n def __find_sim_by_single_process__(self, v, norm):\n d = {}\n for w in self.indices:\n score = self.__get_score__(v, self.vectors[self.indices[w]], norm)\n if score < 1.0:\n d[w] = score\n # d = {w: self.__get_score__(v, self.vectors[self.indices[w]], norm) for w in self.indices}\n res = sorted(d.items(), key=lambda item: item[1], reverse=True)\n return res[0][0]\n\n def find_sim(self, x, num_threads, norm):\n if isinstance(x, str):\n try:\n if num_threads == 1:\n return self.__find_sim_by_single_process__(self.vectors[self.indices[x]], norm)\n else:\n return self.__find_sim_by_multi_process__(self.vectors[self.indices[x]], num_threads, norm)\n except KeyError:\n raise KeyError('key: {} not in vocab!.'.format(x))\n else:\n try:\n assert isinstance(x, np.ndarray)\n if num_threads == 1:\n return self.__find_sim_by_single_process__(x, norm)\n else:\n return self.__find_sim_by_multi_process__(x, num_threads, norm)\n except AssertionError:\n raise KeyError('please give input which has type of str or np.ndarray!'.format(x))\n\n","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":5861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"335919171","text":"\n\nfrom xai.brain.wordbase.nouns._snowstorm import _SNOWSTORM\n\n#calss header\nclass _SNOWSTORMS(_SNOWSTORM, ):\n\tdef __init__(self,): \n\t\t_SNOWSTORM.__init__(self)\n\t\tself.name = \"SNOWSTORMS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"snowstorm\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_snowstorms.py","file_name":"_snowstorms.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"652031235","text":"'''\n27/04/2020 (Solved ~3 minutes after reading the question whoo)\n\nStarting with the number 1 and moving to the right in a clockwise direction a 5 by 5 spiral is formed as follows:\n\n21 22 23 24 25\n20 7 8 9 10\n19 6 1 2 11\n18 5 4 3 12\n17 16 15 14 13\n\nIt can be verified that the sum of the numbers on the diagonals is 101.\n\nWhat is the sum of the numbers on the diagonals in a 1001 by 1001 spiral formed in the same way?\n\n'''\n\n# Start at 1\n# Then 3,5,7,9 are seperated by 2 (3x3 square)\n# 13, 17, 21, 25 are seperated by 4 (5x5 square)\n# 31, 37, 43, 49 are seperated by 6 (7x7 square)\n\nend_sum = 1\nnumber = 1\nincrement = 2\nfor _ in range(500):\n for _ in range(4):\n number += increment\n end_sum += number\n increment += 2\n\nprint(end_sum)","sub_path":"problem28.py","file_name":"problem28.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"134952047","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 12 17:13:42 2021\r\n\r\n@author: BMCL\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 11 23:29:18 2021\r\n\r\n@author: BMCL\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 6 12:06:18 2021\r\n\r\n@author: BMCL\r\n\"\"\"\r\n\r\nimport os\r\nimport numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\n\r\nimport pandas as pd\r\nimport pandas_datareader as pdr\r\nimport scipy.io as sio\r\nfrom datetime import datetime\r\nimport glob\r\nfrom sklearn.model_selection import StratifiedKFold\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.model_selection import train_test_split\r\nimport tensorflow.keras as tfk\r\nimport tensorflow.keras.backend as K\r\nfrom tensorflow.keras.callbacks import EarlyStopping\r\nfrom tensorflow.keras import initializers\r\nfrom tensorflow.keras.optimizers import Adam\r\nimport random\r\nfrom scipy.io import savemat\r\nfrom tqdm import tqdm\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.layers import Input\r\nfrom tensorflow.keras.models import Model, Sequential\r\nfrom tensorflow.keras.layers import Dense, Dropout, Conv1D, BatchNormalization\r\nfrom tensorflow.keras.layers import LeakyReLU, Reshape, Flatten, Conv1DTranspose\r\nfrom tensorflow.keras.datasets import mnist\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras import initializers\r\nimport time\r\nfrom scipy import signal\r\n\r\n# Keras 가 Tensorflow 를 벡엔드로 사용할 수 있도록 설정합니다.\r\nos.environ[\"KERAS_BACKEND\"] = \"tensorflow\"\r\n\r\n# 실험을 재현하고 동일한 결과를 얻을 수 있는지 확인하기 위해 seed 를 설정합니다.\r\nnp.random.seed(10)\r\n\r\n#=============================================\r\ngpus = tf.config.experimental.list_physical_devices('GPU')\r\nif gpus: \r\n try: # Currently, memory growth needs to be the same across GPUs\r\n for gpu in gpus:\r\n tf.config.experimental.set_memory_growth(gpu, True)\r\n logical_gpus = tf.config.experimental.list_logical_devices('GPU') \r\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\r\n except RuntimeError as e: # Memory growth must be set before GPUs have been initialized\r\n print(e)\r\n#====================================================================== \r\n\r\n# Generator 만들기\r\n# 이 메서드는 크로스 엔트로피 손실함수 (cross entropy loss)를 계산하기 위해 헬퍼 (helper) 함수를 반환합니다.\r\ncross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\r\nmean_abs_err = tf.keras.losses.MeanAbsoluteError()\r\n\r\nfs=100\r\n\r\ndef get_generator():\r\n generator = Sequential()\r\n generator.add(Input(shape=(3000,1)))\r\n# =============================================================================\r\n# generator.add(Conv1D(16, 2, padding='same'))\r\n# generator.add(LeakyReLU(0.2))\r\n# \r\n# \r\n# generator.add(Conv1D(64, 2, padding='same'))\r\n# generator.add(LeakyReLU(0.2))\r\n# \r\n# \r\n# generator.add(Conv1D(256, 2, padding='same'))\r\n# generator.add(LeakyReLU(0.2))\r\n# \r\n# \r\n# generator.add(Conv1D(256,2, padding='same'))\r\n# generator.add(LeakyReLU(0.2))\r\n# \r\n# \r\n# generator.add(Conv1D(64,2, padding='same'))\r\n# generator.add(LeakyReLU(0.2))\r\n# generator.add(Dropout(0.5))\r\n# \r\n# generator.add(Conv1D(16,2, padding='same'))\r\n# generator.add(LeakyReLU(0.2))\r\n# \r\n# generator.add(Conv1D(10,2, padding='same'))\r\n# =============================================================================\r\n\r\n#x= tfk.layers.Conv1D(256,3, padding='same')(x)\r\n generator.add(Dense(1024))\r\n generator.add(BatchNormalization())\r\n generator.add(LeakyReLU(alpha=0.3))\r\n \r\n \r\n generator.add(Conv1D(512,3, padding='same'))\r\n generator.add(Dropout(0.25))\r\n generator.add(LeakyReLU(alpha=0.3))\r\n \r\n generator.add(Dropout(0.25))\r\n \r\n generator.add(Conv1D(256, 3, padding='same'))\r\n generator.add(Dropout(0.25))\r\n generator.add(LeakyReLU(alpha=0.3))\r\n \r\n generator.add(Conv1D(64, 3, padding='same'))\r\n generator.add(BatchNormalization())\r\n generator.add(LeakyReLU(alpha=0.3))\r\n \r\n generator.add(Conv1D(16, 3, padding='same'))\r\n generator.add(Dropout(0.25))\r\n generator.add(LeakyReLU(alpha=0.3))\r\n \r\n generator.add(Conv1D(10, 3, padding='same'))\r\n generator.summary()\r\n #generator.compile(loss='binary_crossentropy', optimizer='adam')\r\n return generator\r\n\r\ndef generator_loss(fake_output, real_output):\r\n return mean_abs_err(fake_output, real_output)\r\n #return cross_entropy(tf.ones_like(fake_output), fake_output)\r\n\r\n\r\n\r\n#generated_signal = generator.predict(sig[1,:,0].reshape((1, 3000,1)))\r\n#plt.plot(generated_signal[0,3000,0,3])\r\n\r\n# Discriminator 만들기\r\ndef get_discriminator():\r\n discriminator = Sequential()\r\n discriminator.add(Input(shape=(3000,10)))\r\n # discriminator.add(Reshape(target_shape=(3000,10)))\r\n discriminator.add(Conv1D(32,2, padding='same', activation='relu'))\r\n discriminator.add(Dropout(0.5))\r\n discriminator.add(Conv1D(128,2, padding='same', activation='relu'))\r\n discriminator.add(Dropout(0.5))\r\n discriminator.add(Conv1D(512,2, padding='same', activation='relu'))\r\n discriminator.add(Dropout(0.5))\r\n discriminator.add(Conv1D(1,1, padding='same', activation='relu'))\r\n discriminator.add(Dropout(0.5))\r\n discriminator.add(Flatten())\r\n discriminator.add(Dropout(0.5))\r\n discriminator.add(Dense(1, activation='sigmoid'))\r\n discriminator.summary()\r\n #discriminator.compile(loss='binary_crossentropy', optimizer='adam')\r\n return discriminator\r\n\r\ndef discriminator_loss(real_output, fake_output):\r\n real_loss = cross_entropy(tf.ones_like(real_output), real_output)\r\n fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\r\n total_loss = real_loss + fake_loss\r\n return total_loss\r\n\r\n#decision = discriminator.predict(generated_signal)\r\n\r\n \r\n\r\n# `tf.function`이 어떻게 사용되는지 주목해 주세요.\r\n# 이 데코레이터는 함수를 \"컴파일\"합니다.\r\n@tf.function\r\n# =============================================================================\r\n# def train_step1(signa, real_imf):\r\n# \r\n# with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\r\n# fake_imf = generator(signa, training=False)\r\n# \r\n# real_output = discriminator(real_imf, training=True)\r\n# fake_output = discriminator(fake_imf, training=True)\r\n# \r\n# gen_loss = generator_loss(fake_output, real_output)\r\n# disc_loss = discriminator_loss(real_output, fake_output)\r\n# \r\n# #gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)\r\n# gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\r\n# \r\n# #generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))\r\n# discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))\r\n# =============================================================================\r\n\r\ndef train_step2(signa, real_imf):\r\n \r\n with tf.GradientTape() as gen_tape: #, tf.GradientTape() as disc_tape:\r\n fake_imf = generator(signa, training=True)\r\n\r\n #real_output = discriminator(real_imf, training=False)\r\n #fake_output = discriminator(fake_imf, training=False)\r\n\r\n gen_loss0 = tf.cast(generator_loss(real_imf[None, :,0], fake_imf[None,:,0]), tf.float64)\r\n gen_loss1 = tf.cast(generator_loss(real_imf[None, :,1], fake_imf[None,:,1]), tf.float64)\r\n gen_loss2 = tf.cast(generator_loss(real_imf[None, :,2], fake_imf[None,:,2]), tf.float64)\r\n gen_loss3 = tf.cast(generator_loss(real_imf[None, :,3], fake_imf[None,:,3]), tf.float64)\r\n gen_loss4 = tf.cast(generator_loss(real_imf[None, :,4], fake_imf[None,:,4]), tf.float64)\r\n gen_loss5 = tf.cast(generator_loss(real_imf[None, :,5], fake_imf[None,:,5]), tf.float64)\r\n gen_loss6 = tf.cast(generator_loss(real_imf[None, :,6], fake_imf[None,:,6]), tf.float64)\r\n gen_loss7 = tf.cast(generator_loss(real_imf[None, :,7], fake_imf[None,:,7]), tf.float64)\r\n gen_loss8 = tf.cast(generator_loss(real_imf[None, :,8], fake_imf[None,:,8]), tf.float64)\r\n gen_loss9 = tf.cast(generator_loss(real_imf[None, :,9], fake_imf[None,:,9]), tf.float64)\r\n gen_loss = gen_loss0+ gen_loss1+ gen_loss2+ gen_loss3+ gen_loss4+gen_loss5+ gen_loss6+ gen_loss7+gen_loss8+ gen_loss9\r\n \r\n real_frq0 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,0])), tf.float64)\r\n real_frq1 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,1])), tf.float64)\r\n real_frq2 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,2])), tf.float64)\r\n real_frq3 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,3])), tf.float64)\r\n real_frq4 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,4])), tf.float64)\r\n real_frq5 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,5])), tf.float64)\r\n real_frq6 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,6])), tf.float64)\r\n real_frq7 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,7])), tf.float64)\r\n real_frq8 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,8])), tf.float64)\r\n real_frq9 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,9])), tf.float64)\r\n \r\n fake_frq0 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 0])), tf.float64)\r\n fake_frq1 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 1])), tf.float64)\r\n fake_frq2 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 2])), tf.float64)\r\n fake_frq3 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 3])), tf.float64)\r\n fake_frq4 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 4])), tf.float64)\r\n fake_frq5 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 5])), tf.float64)\r\n fake_frq6 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 6])), tf.float64)\r\n fake_frq7 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 7])), tf.float64)\r\n fake_frq8 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 8])), tf.float64)\r\n fake_frq9 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 9])), tf.float64)\r\n #fake_frq = tf.convert_to_tensor(signal.periodogram(fake_imf, fs)[1])\r\n #real_frq = tf.convert_to_tensor(signal.periodogram(real_imf, fs)[1])\r\n \r\n freq_loss0 = generator_loss(real_frq0 ,fake_frq0)\r\n freq_loss1 = generator_loss(real_frq1 ,fake_frq1)\r\n freq_loss2 = generator_loss(real_frq2 ,fake_frq2)\r\n freq_loss3 = generator_loss(real_frq3 ,fake_frq3)\r\n freq_loss4 = generator_loss(real_frq4 ,fake_frq4)\r\n freq_loss5 = generator_loss(real_frq5 ,fake_frq5)\r\n freq_loss6 = generator_loss(real_frq6 ,fake_frq6)\r\n freq_loss7 = generator_loss(real_frq7 ,fake_frq7)\r\n freq_loss8 = generator_loss(real_frq8 ,fake_frq8)\r\n freq_loss9 = generator_loss(real_frq9 ,fake_frq9)\r\n\r\n freq_loss = freq_loss0+ freq_loss1 + 2*freq_loss2+ 3*freq_loss3+ 4*freq_loss4+ 5*freq_loss5+ 6*freq_loss6+ 7*freq_loss7+ 8*freq_loss8+9*freq_loss9\r\n total_loss = gen_loss + 5*freq_loss\r\n #disc_loss = discriminator_loss(real_output, fake_output)\r\n\r\n gradients_of_generator = gen_tape.gradient(total_loss, generator.trainable_variables)\r\n #gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\r\n\r\n generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))\r\n #discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))\r\n\r\ndef train(training_set, training_label, epochs, BATCH_SIZE):\r\n for epoch in range(epochs):\r\n start = time.time()\r\n\r\n batch_count = np.shape(training_set)[0] // BATCH_SIZE\r\n for ii in range(batch_count):\r\n signa = training_set[ii*BATCH_SIZE:(ii+1)*BATCH_SIZE, :,:]\r\n real_imf = training_label[ii*BATCH_SIZE:(ii+1)*BATCH_SIZE, :,:]\r\n #train_step1(signa, real_imf)\r\n train_step2(signa, real_imf)\r\n if (epoch + 1) % 5 == 0:\r\n fake_imf = generator.predict(signa)\r\n \r\n gen_loss0 = tf.cast(generator_loss(real_imf[None, :,0], fake_imf[None,:,0]), tf.float64)\r\n gen_loss1 = tf.cast(generator_loss(real_imf[None, :,1], fake_imf[None,:,1]), tf.float64)\r\n gen_loss2 = tf.cast(generator_loss(real_imf[None, :,2], fake_imf[None,:,2]), tf.float64)\r\n gen_loss3 = tf.cast(generator_loss(real_imf[None, :,3], fake_imf[None,:,3]), tf.float64)\r\n gen_loss4 = tf.cast(generator_loss(real_imf[None, :,4], fake_imf[None,:,4]), tf.float64)\r\n gen_loss5 = tf.cast(generator_loss(real_imf[None, :,5], fake_imf[None,:,5]), tf.float64)\r\n gen_loss6 = tf.cast(generator_loss(real_imf[None, :,6], fake_imf[None,:,6]), tf.float64)\r\n gen_loss7 = tf.cast(generator_loss(real_imf[None, :,7], fake_imf[None,:,7]), tf.float64)\r\n gen_loss8 = tf.cast(generator_loss(real_imf[None, :,8], fake_imf[None,:,8]), tf.float64)\r\n gen_loss9 = tf.cast(generator_loss(real_imf[None, :,9], fake_imf[None,:,9]), tf.float64)\r\n gen_loss = gen_loss0+ gen_loss1+ gen_loss2+ gen_loss3+ gen_loss4+gen_loss5+ gen_loss6+ gen_loss7+gen_loss8+ gen_loss9\r\n \r\n real_frq0 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,0])), tf.float64)\r\n real_frq1 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,1])), tf.float64)\r\n real_frq2 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,2])), tf.float64)\r\n real_frq3 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,3])), tf.float64)\r\n real_frq4 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,4])), tf.float64)\r\n real_frq5 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,5])), tf.float64)\r\n real_frq6 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,6])), tf.float64)\r\n real_frq7 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,7])), tf.float64)\r\n real_frq8 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,8])), tf.float64)\r\n real_frq9 = tf.cast(tf.abs(tf.signal.rfft(real_imf[None, :,9])), tf.float64)\r\n \r\n fake_frq0 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 0])), tf.float64)\r\n fake_frq1 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 1])), tf.float64)\r\n fake_frq2 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 2])), tf.float64)\r\n fake_frq3 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 3])), tf.float64)\r\n fake_frq4 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 4])), tf.float64)\r\n fake_frq5 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 5])), tf.float64)\r\n fake_frq6 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 6])), tf.float64)\r\n fake_frq7 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 7])), tf.float64)\r\n fake_frq8 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 8])), tf.float64)\r\n fake_frq9 = tf.cast(tf.abs(tf.signal.rfft(fake_imf[None, :, 9])), tf.float64)\r\n #fake_frq = tf.convert_to_tensor(signal.periodogram(fake_imf, fs)[1])\r\n #real_frq = tf.convert_to_tensor(signal.periodogram(real_imf, fs)[1])\r\n \r\n freq_loss0 = generator_loss(real_frq0 ,fake_frq0)\r\n freq_loss1 = generator_loss(real_frq1 ,fake_frq1)\r\n freq_loss2 = generator_loss(real_frq2 ,fake_frq2)\r\n freq_loss3 = generator_loss(real_frq3 ,fake_frq3)\r\n freq_loss4 = generator_loss(real_frq4 ,fake_frq4)\r\n freq_loss5 = generator_loss(real_frq5 ,fake_frq5)\r\n freq_loss6 = generator_loss(real_frq6 ,fake_frq6)\r\n freq_loss7 = generator_loss(real_frq7 ,fake_frq7)\r\n freq_loss8 = generator_loss(real_frq8 ,fake_frq8)\r\n freq_loss9 = generator_loss(real_frq9 ,fake_frq9)\r\n \r\n freq_loss = freq_loss0+ freq_loss1 + freq_loss2+ freq_loss3+ freq_loss4+ freq_loss5+ freq_loss6+ freq_loss7+ freq_loss8+ freq_loss9\r\n total_loss = gen_loss + 5*freq_loss\r\n\r\n# =============================================================================\r\n# decision0=discriminator.predict(fake_imf)\r\n# print(decision0)\r\n# fake_imf = generator(signa, training=False)\r\n# \r\n# real_output = discriminator(real_imf, training=False)\r\n# fake_output = discriminator(fake_imf, training=False)\r\n# =============================================================================\r\n# =============================================================================\r\n# gen_loss = tf.cast(generator_loss(real_imf, fake_imf), tf.float64)\r\n# real_frq = tf.cast(tf.abs(tf.signal.rfft(real_imf)), tf.float64)\r\n# fake_frq = tf.cast(tf.abs(tf.signal.rfft(fake_imf)), tf.float64)\r\n# freq_loss = generator_loss(real_frq,fake_frq)\r\n# total_loss = gen_loss + freq_loss\r\n# =============================================================================\r\n print(r'gen_loss={}'.format(gen_loss))\r\n print(r'frq_loss={}'.format(freq_loss))\r\n print(r'total_loss={}'.format(total_loss))\r\n# =============================================================================\r\n# gen_loss = generator_loss(fake_output, real_output)\r\n# disc_loss = discriminator_loss(real_output, fake_output)\r\n# print(r'gen_loss={}'.format(gen_loss))\r\n# print(r'disc_loss={}'.format(disc_loss))\r\n# =============================================================================\r\n# =============================================================================\r\n# if (np.mean(decision0)>0.99) & (disc_loss<0.01):\r\n# break\r\n# \r\n# =============================================================================\r\n print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))\r\n\r\n \r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n fileLoc = \"F:\\Sleep_data\\public_data\\sleep-edfx_data\\sleep-edfx-mat\\SC_BEMD\\*.mat\"\r\n filelist = glob.glob(fileLoc)\r\n \r\n Label = np.zeros((1,1,1));\r\n sig = np.zeros((1,3000,1));\r\n imf = np.zeros((1,3000,10));\r\n \r\n indices = np.random.randint(0,160949, size=200)\r\n np.array(filelist)[indices.astype(int)]\r\n #for loc in filelist[indices.astype(int)]:\r\n for loc in np.array(filelist)[indices.astype(int)]:\r\n mat =sio.loadmat(loc)\r\n Label0 = mat['Label']\r\n sig0 = mat['sig'] \r\n imf0 = mat['imf']\r\n Label0 = Label0.reshape((1, 1, 1))\r\n sig0 = sig0.reshape((1,3000,1))\r\n imf0 = imf0.T.reshape((1,3000,10))\r\n \r\n Label = np.append(Label, Label0, axis = 0)\r\n sig = np.append(sig, sig0, axis = 0)\r\n imf = np.append(imf, imf0, axis = 0)\r\n \r\n Label = np.delete(Label, 0, axis=0) \r\n sig = np.delete(sig, 0, axis=0) \r\n imf = np.delete(imf, 0, axis=0) \r\n \r\n training_set, test_set, training_label, test_label = train_test_split(sig, imf, train_size = 0.7, random_state=777)\r\n \r\n generator = get_generator()\r\n #discriminator = get_discriminator()\r\n generator_optimizer = tf.keras.optimizers.Adadelta(1e-3)\r\n #discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)\r\n \r\n train(training_set, training_label,6000 ,4)\r\n\r\n\r\n \r\n \r\n# =============================================================================\r\n signa = training_set[0:4,:,:]\r\n real_imf = training_label[0:4,:,:]\r\n fake_imf = generator.predict(signa)\r\n plt.subplot(421)\r\n plt.plot(real_imf[0,:,1])\r\n \r\n plt.subplot(422)\r\n plt.plot(fake_imf[0,:,1])\r\n \r\n plt.subplot(423)\r\n plt.plot(real_imf[0,:,4])\r\n \r\n plt.subplot(424)\r\n plt.plot(fake_imf[0,:,4])\r\n\r\n plt.subplot(425)\r\n plt.plot(real_imf[0,:,7])\r\n \r\n plt.subplot(426)\r\n plt.plot(fake_imf[0,:,7])\r\n \r\n plt.subplot(427)\r\n plt.plot(real_imf[0,:,9])\r\n \r\n plt.subplot(428) \r\n plt.plot(fake_imf[0,:,9])\r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"BEMD-CNN_freq_loss2.py","file_name":"BEMD-CNN_freq_loss2.py","file_ext":"py","file_size_in_byte":20002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"201385182","text":"#!/usr/bin/env python3\n\"\"\"\nRemoves a trailing duplicate newline from text files in the directory.\n\"\"\"\nimport os\ns_dir = os.path.dirname(os.path.abspath(__file__))\npaths = []\nfor dirpath, dirnames, filenames in os.walk(s_dir):\n paths_dir = []\n for f in filenames:\n path = os.path.join(dirpath, f)\n paths_dir.append(path)\n paths.extend(paths_dir)\npaths.sort()\nprint(\"number of files in directory:\", len(paths))\ndef is_text(path):\n result = True\n if path.endswith(\".out\"):\n result = False\n elif path.endswith(\".o\"):\n result = False\n elif \".git\" in path:\n result = False\n return result\npaths = list(filter(is_text, paths))\nprint(\"number of files after filtering:\", len(paths))\ndef is_update(path):\n with open(path, \"r\") as file:\n try:\n s = file.read()\n except UnicodeDecodeError as e:\n print(\"UnicodeDecodeError at\", path)\n exit()\n result = s.endswith(\"\\n\\n\")\n return result\npaths = list(filter(is_update, paths))\nprint(\"number of files to update:\", len(paths))\nn_update = 0\nfor path in paths:\n size_before = os.path.getsize(path)\n with open(path, \"r\") as file:\n s = file.read()\n with open(path, \"w\") as file:\n file.write(s[:-1])\n size_after = os.path.getsize(path)\n print(\"updated {} (size {} -> {})\".format(path, size_before, size_after))\n n_update += 1\nprint(\"number of updated files:\", n_update)\n","sub_path":"src/py/remove_newline.py","file_name":"remove_newline.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"289498846","text":"from QA.Utilities.PerformAction import PerformActions\nfrom QA.PageObjects.Test_Locators import PCN\nfrom QA.Utilities.CommonLib import CommonFunctions\nfrom QA.Base.Config import MyConfigFiles\nfrom QA.BusinessLogic.Home_Page import Home\nfrom QA.BusinessLogic.Filter_Page import Filter\nfrom QA.BusinessLogic.CreateJPCN_Page import createJPCN_Page\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom pathlib import Path\nfrom selenium import webdriver\nimport sys\nimport datetime\nfrom pytz import timezone\nimport time\n\nclass ClosureAssessment_Page():\n global objCommon, objActions ,objConfig,objFilter,objHome,objCreateJPCN\n objCommon = CommonFunctions()\n objActions = PerformActions()\n objConfig=MyConfigFiles()\n objFilter=Filter()\n objHome=Home()\n objCreateJPCN = createJPCN_Page()\n\n\n def Closure_Assesement_DashBorad_Link(self):\n time.sleep(2)\n objActions.clickElement(PCN.DashBoard_ContextCE_ClosureAssessment_WebElement_xapth, \"xpath\")\n time.sleep(3)\n objCommon.capture_screenshot(\"Closure Assessment Page intiiated\")\n\n def ValidationcheckFor_JPNMPNInfor_Section(self):\n objCreateJPCN.ClickOnJPN_MPN_Exapnd()\n time.sleep(3)\n global flag\n time.sleep(2)\n strPages = objActions.getText(PCN.ContextCE_TablePage_Count_xpath, \"xpath\")\n strPagesCount = strPages.split(\" \")\n intPagesCount = int(strPagesCount[4])\n flag = False\n for pages in range(1, intPagesCount + 1):\n objTable = MyConfigFiles.driver.find_elements_by_xpath(\"//table[@id='upload-jpn']/tbody/tr\")\n intRowCount = len(objTable)\n for i in range(2, intRowCount + 1):\n stri = str(i)\n objTablecol = MyConfigFiles.driver.find_elements_by_xpath(\"//table[@id='upload-jpn']/tbody/tr/th\")\n intColCount = len(objTablecol)\n for j in range(i, intColCount + 1):\n strj = str(j)\n strchoosefile = objActions.getText(\"//table[@id='upload-jpn']/tbody/tr[\" + stri + \"]/td[2]\", \"xpath\")\n if strchoosefile == \"MAXIM\":\n time.sleep(2)\n objActions.clickElement(\"//table[@id='upload-jpn']/tbody/tr[\" + stri + \"]/td[2]//a[1]\", \"xpath\")\n time.sleep(2)\n LTB_Decision = MyConfigFiles.driver.find_element_by_xpath(\"//div[contains(@class,'supplier-info-section padding-around show-it')]//select[@id='selectDrpDwn']\")\n LTB_DStatus= LTB_Decision.is_enabled()\n LTB_Comments = MyConfigFiles.driver.find_element_by_xpath(\"//tr[8]//td[2]//div[1]//div[1]//div[1]//input[1]\")\n LTB_CStatus=LTB_Comments.is_enabled()\n if (LTB_DStatus) and (LTB_CStatus) == True:\n print(\"LTB Decision and Comments are Editable\")\n objActions.selectDropdown(\"//div[contains(@class,'supplier-info-section padding-around show-it')]//select[@id='selectDrpDwn']\", \"xpath\", \"visibletext\", \"Bridge Buy\")\n time.sleep(3)\n objActions.enterText(\"//tr[8]//td[2]//div[1]//div[1]//div[1]//input[1]\", \"xpath\", \"Comments\")\n time.sleep(3)\n objCommon.capture_screenshot(\"LTB Decision and LTB comments sucessfully edited and saved\" )\n objActions.clickElement(PCN.Submit_SR_button_xpath, \"xpath\")\n time.sleep(3)\n else:\n print(\"LTB Decision and Comments are not Editable\")\n objCommon.capture_screenshot(\"LTB Decision and Comments are not Editable\")\n time.sleep(3)\n objActions.clickElement(PCN.Cancel_Button_xpath, \"xpath\")\n flag =True\n break\n\n def ValidationcheckFor_CONTEXTCE_DETAILS_Section_ClosureAssessment(self,PCNStatus):\n time.sleep(2)\n objActions.selectDropdown(PCN.Validate_PCNStatus_Value_name, \"name\", \"visibletext\", PCNStatus)\n html = MyConfigFiles.driver.find_element_by_tag_name('html')\n html.send_keys(Keys.TAB)\n time.sleep(2)\n strIPCNS_Act = objActions.getText(PCN.Validation_msg_ClosureAssesment_WebElement_xapth, \"xpath\")\n time.sleep(2)\n strIPCNS_Exp = \"Invalid pcn status\"\n if strIPCNS_Act == strIPCNS_Exp:\n print(\"Passed - Validation Message is dispalying as::\" \" \" + strIPCNS_Act)\n else:\n print(\"Failed -NO validation message is displayed\")\n assert strIPCNS_Act == strIPCNS_Exp\n\n def Fill_CONTEXTCE_Drtails_Section_ClosureAssessment(self, PCNStatus,pcnComments,pcnClosedComments):\n time.sleep(2)\n objActions.selectDropdown(PCN.Validate_PCNStatus_Value_name, \"name\", \"visibletext\", PCNStatus)\n time.sleep(3)\n objActions.enterText(PCN.Validate_PCNStatusComment_Value_name, \"name\", pcnComments)\n time.sleep(3)\n objActions.enterText(PCN.PCNStatusClose_Comments_name, \"name\", pcnClosedComments)\n time.sleep(3)\n objActions.clickElement(PCN.Close_Assessment_WebElement_xpath, \"xpath\")\n # Waring_Msg=objActions.getText(PCN.Close_Assessment_Warning_WebElement_xpath, \"xpath\")\n # print(Waring_Msg)\n # assert (objActions.AssertObjectExists(PCN.Close_Assessment_Warning_WebElement_xpath, \"xpath\"))\n\n\n\n def EDit_PR_Section_ClosureAssessment(self,PCNStatus,pcnComments,pcnClosedComments):\n time.sleep(3)\n global flag\n time.sleep(4)\n objTable = MyConfigFiles.driver.find_elements_by_xpath(\"//div[@class='context-ce-details-section padding-around show-it']//div[@class='inline']//div//table[@id='pr']/tbody/tr\")\n intRowCount = len(objTable)\n for i in range(2, intRowCount + 1):\n stri = str(i)\n strEcoMcoStatus = objActions.getText(\"//div[@class='context-ce-details-section padding-around show-it']//div[@class='inline']//div//table[@id='pr']/tbody/tr[\" + stri + \"]/td[7]\", \"xpath\")\n if strEcoMcoStatus != '':\n objActions.clickElement( \"//div[@class='context-ce-details-section padding-around show-it']//div[@class='inline']//div//table[@id='pr']/tbody/tr[\" + stri + \"]/td[1]\", \"xpath\")\n time.sleep(2)\n MyConfigFiles.driver.find_element_by_name(\"mcoEco\").clear()\n objActions.enterText(PCN.AddNewRecord_ECOMCO_WebEdit_name, \"name\", \"Invailddata\")\n objCommon.capture_screenshot(\"MCOECO Values is edited Sucessfully with invalid data\")\n objActions.clickElement(PCN.AddNewAttachement_SubmitBtn_xpath, \"xpath\")\n time.sleep(1)\n objActions.selectDropdown(PCN.Validate_PCNStatus_Value_name, \"name\", \"visibletext\", PCNStatus)\n time.sleep(2)\n objActions.enterText(PCN.Validate_PCNStatusComment_Value_name, \"name\", pcnComments)\n time.sleep(2)\n objActions.enterText(PCN.PCNStatusClose_Comments_name, \"name\", pcnClosedComments)\n time.sleep(2)\n objActions.clickElement(PCN.Close_Assessment_WebElement_xpath, \"xpath\")\n Waring_Msg=objActions.getText(PCN.Close_Assessment_Warning_WebElement_xpath, \"xpath\")\n print(Waring_Msg)\n assert (objActions.AssertObjectExists(PCN.Close_Assessment_Warning_WebElement_xpath, \"xpath\"))\n objActions.clickElement(PCN.Completed_Page_OK_button_xpath, \"xpath\")\n time.sleep(2)\n objActions.clickElement(\"//div[@class='context-ce-details-section padding-around show-it']//div[@class='inline']//div//table[@id='pr']/tbody/tr[\" + stri + \"]/td[1]\", \"xpath\")\n time.sleep(2)\n MyConfigFiles.driver.find_element_by_name(\"mcoEco\").clear()\n objActions.clickElement(PCN.AddNewAttachement_SubmitBtn_xpath, \"xpath\")\n time.sleep(2)\n objActions.clickElement(PCN.Close_Assessment_WebElement_xpath, \"xpath\")\n break\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"SCO_Automation/QA/BusinessLogic/ClosureAssessment_Page.py","file_name":"ClosureAssessment_Page.py","file_ext":"py","file_size_in_byte":8316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"21955377","text":"#!/usr/bin/env python3\n\n\nimport sys\nimport math\nfrom calc import *\nfrom additional import *\n\n\ndef main():\n root = check_parameters()\n ar = []\n new = []\n i = 2\n while (i < len(sys.argv)):\n for j in range(root):\n new.append(sys.argv[i])\n i += 1\n ar.append(new)\n new = []\n get_calc(ar, root)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"360230944","text":"from numpy import *\r\nimport unittest\r\n\r\n\r\nclass NpshapeTest(unittest.TestCase):\r\n\r\n\tdef test_shape(this):\r\n\t\t# shape 验证\r\n\t\tarr1 = array([[1, 2], [2, 3], [3, 4]])\r\n\t\tprint(arr1.shape[0]) # 3,第一维的数量\r\n\t\tprint(arr1.shape[1]) # 2,第二维的数量\r\n\r\n\t\"\"\"\r\n\t验证 tile\r\n\t\"\"\"\r\n\tdef test_shape2(this):\r\n\t\tarr2 = tile(2.1, (5, 2))\r\n\t\tprint(arr2)\r\n\t\t\"\"\"\r\n\t\t[[2.1 2.1]\r\n\t\t [2.1 2.1]\r\n\t\t [2.1 2.1]\r\n\t\t [2.1 2.1]\r\n\t\t [2.1 2.1]]\r\n\t\t\"\"\"\r\n\t\tprint(shape(arr2)) # (5, 2)\r\n\r\n\t\tarr3 = array([\r\n\t\t\t\t\t[\r\n\t\t\t\t\t\t[1, 1], \r\n\t\t\t\t\t\t[2, 2], \r\n\t\t\t\t\t\t[3, 3]\r\n\t\t\t\t\t], \r\n\t\t\t\t\t[\r\n\t\t\t\t\t\t[4, 4], \r\n\t\t\t\t\t\t[5, 5], \r\n\t\t\t\t\t\t[6, 6]\r\n\t\t\t\t\t]\r\n\t\t\t\t])\r\n\t\tprint(shape(arr3)) # (2, 3, 2)\r\n\r\n\tdef test_tile2(this):\r\n\t\t\"\"\"\r\n\t\t对 list 使用 tile\r\n\t\t\"\"\"\r\n\t\tlist1 = array([1, 2])\r\n\t\tarr1 = tile(list1, (3, 1))\r\n\t\tprint(arr1)\r\n\t\t\"\"\"\r\n\t\t[[1 2]\r\n\t\t [1 2]\r\n\t\t [1 2]]\r\n\t\t\"\"\"\r\n\r\n\tdef test_shape_mat(this):\r\n\t\t# 对 矩阵使用 shape\r\n\t\tmat2 = mat(arr2)\r\n\t\tprint(shape(mat2)) # (5, 2)\r\n\r\n\tdef test_zeors(this):\r\n\t\t# zeros 返回的类型\r\n\t\tmat3 = zeros((2, 3))\r\n\t\tprint(type(mat3)) # \r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tsuite = unittest.TestSuite()\r\n\tsuite.addTest(NpshapeTest('test_tile2'))\r\n\tsuite.addTest(NpshapeTest('test_shape2'))\r\n\r\n\tunittest.TextTestRunner(verbosity=2).run(suite)\r\n","sub_path":"numpydemo/npshapedemo.py","file_name":"npshapedemo.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"150723820","text":"# Python Module\nimport os\nimport pandas\nimport numpy\nimport rpy2.robjects as robjects\nfrom rpy2.robjects import pandas2ri\npandas2ri.activate()\nimport math\n\n# Directory path\nroot_dir = \"/home/marie/Documents/\"\n\n# Build-in Module\nos.chdir(\"%sTNBC_Drug_response/SRC/Python\"%root_dir)\nfrom R_functions_plot import *\nfrom Functions_misc import *\n\n################################################################################\n# STEP 1b - mRNA = Normalization and Scaling\n################################################################################\nos.chdir(\"%sINPUT/Cellcount_20150707\"%root_dir)\nraw_design_files = os.listdir(\"%sINPUT/Cellcount_20150707\"%root_dir)\nstep_1_files = os.listdir(\"%sPROCESSED/Step_1_mRNA\"%root_dir)\nwhole_design = pandas.read_table('20150707_RNAseqDGE_PlateID.tsv')\n\n# Default settings\nexp = \"mRNA\"\n\n# Which design to analyse?\n# print(\"Which design to analyse?\")\nprint(pandas.Series.unique(whole_design.DesignNumber))\ndesign_number = 2#int(input(\"Which design to analyse? -> \"))\n\n# Which time point ?\n# print(\"Which time point?\")\nprint(pandas.Series.unique(whole_design.Time))\ntime_point = 3#int(input(\"Which time point? -> \"))\n\n# Which type of data\nprint(\"Which type of data?\")\nprint([\"total\",\"umi\"])\ndata_type = \"umi\"\n\n# Load corresponding plate design\ndesign_cond = [x for x in raw_design_files if ('Design' and \"_%d.tsv\"%design_number) in x]\nprint(design_cond)\nprint(\"Loading design table.\")\nDesign = pandas.read_table(design_cond[0])\nDesign.index = Design.loc[:,'Well']\n\n# Load corresponding count table\nselected_plate = whole_design.loc[((whole_design.Experiment == exp) &\n (whole_design.DesignNumber == str(design_number)) &\n (whole_design.Time == time_point)),:]\n\n################################################################################\nfor i in range(0,selected_plate.shape[0],1):\n # Load corresponding plate\n number = int(selected_plate.iloc[i,0][11:13])\n print(number)\n file_name = \"M%d.unq.refseq.%s.dat\"%(number,data_type)\n print(\"Loading read count table: %s\"%file_name)\n os.chdir(\"%sINPUT/DGE_20150707\"%root_dir)\n raw_count = pandas.read_table(\"M%d.unq.refseq.%s.dat\"%(number,data_type))\n raw_count.columns=['0']+list(Design.sort_values(by='Well').loc[:,'Well'])\n\n # Load corresponding sum count and lQ wells\n os.chdir(\"%sPROCESSED/Step_1_mRNA\"%root_dir)\n raw_sum_count = pandas.read_table(\"M%d_%s_sum_count.tsv\"%(number,data_type),names=['well','sum'])\n raw_sum_count.index = raw_sum_count.loc[:,'well']\n design_cond = list_search('M%d_.*%s_lowQWells'%(number,data_type),step_1_files)\n lowQ_wells = pandas.read_table(design_cond[0])\n\n # Discard low quality wells\n select = list(set(raw_count.columns.values) - set(lowQ_wells.loc[:,'wells']) -set(['0']))\n select.sort()\n select_design = Design.loc[select,:]\n select_sum_count = raw_sum_count.loc[select,:]\n select.insert(0,'0')\n select_count = raw_count.loc[:,select]\n\n # Reads count Normalization\n print(\"Normalization processing ...\")\n divided_count = select_count\n for j in range(1,select_count.shape[1]):\n divided_count.iloc[:,j]=select_count.iloc[:,j]/select_sum_count.iloc[j-1,1]\n\n norm_count = divided_count\n norm_count.iloc[:,1:select_count.shape[1]] = divided_count.iloc[:,1:select_count.shape[1]] * numpy.mean(select_sum_count.iloc[:,1])\n\n\n # Log transform\n print(\"Scaling processing ...\")\n add_count = norm_count\n add_count.iloc[:,1:select_count.shape[1]] = norm_count.iloc[:,1:select_count.shape[1]] + 4\n\n tmp_mat = add_count.iloc[:,1:select_count.shape[1]]\n df = pandas2ri.py2ri(tmp_mat)\n r_log = robjects.r.log2(df)\n\n log_count = pandas2ri.ri2py(r_log)\n log_count.index = add_count.iloc[:,0]\n\n # SAVE\n log_count.to_csv(\"M%d_%s_logCount.tsv\"%(number,data_type),sep=\"\\t\")\n select_design.to_csv(\"Design_%d_%d_%s_QC.tsv\"%(design_number,time_point,data_type),sep=\"\\t\")\n","sub_path":"SRC/Python/Step_1b_mRNA.py","file_name":"Step_1b_mRNA.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"402851853","text":"\"\"\"\n1249. Minimum Remove to Make Valid Parentheses\nGiven a string s of '(' , ')' and lowercase English characters. \n\nYour task is to remove the minimum number of parentheses ( '(' or ')', in any positions ) so that the resulting parentheses string is valid and return any valid string.\n\nFormally, a parentheses string is valid if and only if:\n\nIt is the empty string, contains only lowercase characters, or\nIt can be written as AB (A concatenated with B), where A and B are valid strings, or\nIt can be written as (A), where A is a valid string.\n\nExample 1:\nInput: s = \"lee(t(c)o)de)\"\nOutput: \"lee(t(c)o)de\"\nExplanation: \"lee(t(co)de)\" , \"lee(t(c)ode)\" would also be accepted.\n\nExample 2:\nInput: s = \"a)b(c)d\"\nOutput: \"ab(c)d\"\n\nExample 3:\nInput: s = \"))((\"\nOutput: \"\"\nExplanation: An empty string is also valid.\n\nExample 4:\nInput: s = \"(a(b(c)d)\"\nOutput: \"a(b(c)d)\"\n\nConstraints:\n1 <= s.length <= 10^5\ns[i] is one of '(' , ')' and lowercase English letters.\n\"\"\"\n# Time complexity: O(N)\n# Space complexity: O(N)\nclass Solution:\n def minRemoveToMakeValid(self, s: str) -> str:\n opening_brackets = 0\n new_s = []\n for c in s:\n if c == \"(\":\n opening_brackets += 1\n new_s.append(\"(\")\n elif c == \")\":\n if opening_brackets > 0:\n opening_brackets -= 1\n new_s.append(\")\")\n else:\n new_s.append(c)\n \n for i in reversed(range(len(new_s))):\n if opening_brackets == 0:\n break\n if new_s[i] == \"(\":\n new_s[i] = \"\"\n opening_brackets -= 1\n \n return \"\".join(new_s)\n","sub_path":"Leetcode/1249. Minimum Remove to Make Valid Parentheses.py","file_name":"1249. Minimum Remove to Make Valid Parentheses.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"51577732","text":"# -*- coding: utf-8 -*-\n\"\"\"\n__mktime__ = '2019/4/22'\n__author__ = 'Just'\n__filename__ = 'banner'\n\"\"\"\nfrom datetime import datetime\nfrom app.model.base import MixinJSONSerializer\n\n\nclass BannerViewModel(MixinJSONSerializer):\n \"\"\"单个主题详细信息\"\"\"\n def __init__(self, banner, items):\n self.id = banner.id\n self.name = banner.name\n self.description = banner.description\n self.update_time = datetime.fromtimestamp(banner.update_time)\n self.count = len(items)\n self.items = items\n self._fields = ['id', 'name', 'description', 'update_time', 'count', 'items']\n\n\nclass BannerCollection(MixinJSONSerializer):\n \"\"\"一组Banner\"\"\"\n def __init__(self):\n self.total = None\n self.banners = []\n self._fields = ['banners', 'total']\n\n def fill(self, collection):\n \"\"\"\n 一组list对象\n :param collection: zip包装list banner对象\n \"\"\"\n self.banners = [BannerViewModel(item[0], item[1]) for item in collection]\n self.total = len(self.banners)\n","sub_path":"app/viewmodel/banner.py","file_name":"banner.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"157685392","text":"from django.urls import path\nfrom . import views\n\napp_name = 'Report'\n\nurlpatterns = [\n path('request', views.request_list, name='request'),\n path('request/', views.request_detail, name='request-detail'),\n path('list', views.list_list, name='list'),\n path('list/', views.list_detail, name='list-detail'),\n path('list//comment/', views.delete_comment,\n name='delete_comment'),\n path('list/search', views.search_list, name='search_list'),\n path('list/filter', views.filter_list, name='filter_list')\n]\n","sub_path":"app/Report/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"74267377","text":"# coding: utf-8\nfrom django.shortcuts import render\n\n# To inherit from template view\nfrom django.views.generic import TemplateView\nfrom django.views.generic import FormView\nfrom django.template import RequestContext\nfrom django.template.loader import get_template\n\n#Read Settings\nfrom django.conf import settings\n\nfrom .forms import *\nfrom .models import *\n\n#For sending emails\nfrom django.core.mail import EmailMessage\nfrom django.core.mail import EmailMultiAlternatives\n\n#Translate\nfrom django.utils.translation import ugettext as _\n\n#To hide fields\nfrom django import forms\n\n#Geodjango & geojson goodies\nfrom django.contrib.gis.geos import Point\nfrom django.core.serializers import serialize\n\n\n\n\n# Create your views here.\n\n#\n# My stuff\n#\n\nclass infoView(TemplateView):\n template_name = \"walkingtempo/info.html\"\n\nclass mapMainView(TemplateView):\n template_name = \"walkingtempo/map_main.html\"\n def get(self, request):\n return render(request, self.template_name, {'fullmap': True}) \n\nclass mapActivityView(TemplateView):\n template_name = \"walkingtempo/map_activities.html\"\n def get(self, request):\n return render(request, self.template_name, {'fullmap': True})\n\nclass quizView(TemplateView):\n template_name = \"walkingtempo/quiz.html\"\n def get(self, request):\n return render(request, self.template_name, {'fullmap': True}) \n \nclass mapEmbedView(TemplateView):\n template_name = \"walkingtempo/map_embed.html\"\n \n def get(self, request):\n return render(request, self.template_name, {'wohnstrassen': True})\n\nclass mapRetrospectiveView(TemplateView):\n template_name = \"walkingtempo/map_embed.html\"\n \n def get(self, request):\n return render(request, self.template_name, {'wohnstrassen': True, 'restrospective': True})\n\nclass genActivitiesJsonView(TemplateView):\n template_name = \"walkingtempo/past-activities.json\"\n \n def get(self, request):\n\n all_activities = activity.objects.all()\n\n context = {'all_activities': all_activities}\n \n features = []\n feature_collection = serialize('geojson', activity.objects.all(),\n geometry_field='location',\n fields=('title','address','start_date','end_date','copyright','pic','text') )\n\n # for myActivity in all_activities:\n\n #feature_collection = serialize('geojson', myActivity.objects.all(),\n # geometry_field='location',\n # fields=('title',) )\n\n# myPoint = activity.location\n# \n# myTitle = myActivity.title\n # myAddress = myActivity.address\n ## myStartDate = myActivity.start_date \n # myEndDate = myActivity.end_date \n # myPic = str( request.META['HTTP_HOST'] ) + str (myActivity.pic.url) \n # myCopyright = myActivity.copyright\n # myText = myActivity.text \n \n \n #features.append(Feature(geometry=myPoint, properties={\"Title\": \"myTitle\"}))\n \n #feature_collection = FeatureCollection(features)\n \n context['geojson'] = feature_collection\n \n return render(request, self.template_name, context) \n\n '''\n \n here for each activity: get the point, the things and make html. Dump it into the properties of the json.\n \n '''\n\n '''\n from geojson import Point, Feature, FeatureCollection, dump\n \n point = Point((-115.81, 37.24))\n \n features = []\n features.append(Feature(geometry=point, properties={\"country\": \"Spain\"}))\n \n # add more features...\n # features.append(...)\n \n feature_collection = FeatureCollection(features)\n \n with open('myfile.geojson', 'w') as f:\n dump(feature_collection, f)\n '''\n\nclass activityFormView(FormView):\n template_name = 'walkingtempo/form.html'\n form_class = activityForm\n\n def get(self, request):\n\n form = self.form_class()\n context = request.GET.dict()\n \n if (context.__contains__('autoFill') and context.__contains__('locLon') and context.__contains__('locLat') ):\n \n #context = request.GET.dict()\n if (context['locLon'] != '' and context['locLat'] != ''):\n \n form = self.form_class( initial = context )\n form.fields['locLat'].widget = forms.HiddenInput()\n form.fields['locLon'].widget = forms.HiddenInput()\n return render(request, self.template_name, {'form': form})\n \n else:\n\n self.template_name = \"walkingtempo/map_form.html\"\n return render(request, self.template_name, {'form': form, 'fullmap': True, 'form_invalid' : True})\n \n else:\n \n self.template_name = \"walkingtempo/map_form.html\"\n return render(request, self.template_name, {'form': form, 'fullmap': True, 'form_invalid' : True})\n\n\n\n def post(self, request):\n \n form = self.form_class(request.POST, request.FILES)\n\n if form.is_valid(): \n\n context = request.POST.dict()\n \n lon = float(context['locLon'])\n lat = float(context['locLat'])\n \n activityPnt = Point(x=lon, y=lat) #, srid=3857)\n \n form.instance.location = activityPnt\n \n myActivity = form.save()\n \n context['pic'] = str( request.META['HTTP_HOST'] ) + str (myActivity.pic.url)\n\n return render(request, 'walkingtempo/form-confirmation.html', context )\n \n else:\n \n print(\"--- Form was not validated ---\") \n\n return render(request, self.template_name, { 'form' : form } )\n ","sub_path":"walkingtempo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"626302739","text":"import re\nimport os.path\nimport operator\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pickle as pic\nfrom collections import Counter\nfrom nltk.corpus import stopwords\nfrom nltk.corpus import words as woc\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\n\nclass Trainer:\n\n spam = {}\n ham = {}\n\n def __init__(self):\n pass\n\n def clear_string(self, string):\n res = re.compile('[^a-zA-Z]')\n return res.sub('', string)\n\n def get_words(self, string):\n replacements = ('.', ',', '-', '!', '?')\n for r in replacements:\n string = string.replace(r, ' ')\n string = string.split()\n for i in range(0, len(string)):\n if re.search('\\d+', string[i]):\n string[i] = ''\n return string\n\n def filter_file(self):\n file_set = open('english_big.txt', 'r')\n lmt = WordNetLemmatizer()\n for line in file_set:\n words = self.get_words(line)\n for w in words[:-1]:\n w = lmt.lemmatize(lmt.lemmatize(self.clear_string(w).lower()), 'v')\n if (w not in stopwords.words('english')) and (w != ''):\n if words[-1] == 'spam':\n if w in self.spam:\n self.spam[w] = self.spam.get(w) + 1\n else:\n if (w in woc.words()) and ((len(w) > 2) or (w == 'go')):\n self.spam[w] = 1\n elif words[-1] == 'ham':\n if w in self.ham:\n self.ham[w] = self.ham.get(w) + 1\n else:\n if w in woc.words() and ((len(w) > 2) or (w == 'go')):\n self.ham[w] = 1\n file_set.close()\n\n def write_to_files(self):\n if os.path.exists('spam_words.pkl') and os.path.exists('ham_words.pkl'):\n sw = open('spam_words.pkl', 'rb')\n self.spam = pic.load(sw)\n sw.close()\n hw = open('ham_words.pkl', 'rb')\n self.ham = pic.load(hw)\n hw.close()\n return\n self.filter_file()\n sw = open('spam_words.pkl', 'wb')\n pic.dump(self.spam, sw, 2)\n sw.close()\n hw = open('ham_words.pkl', 'wb')\n pic.dump(self.ham, hw, 2)\n hw.close()\n return\n\n def draw_plot(self):\n word_s = (sorted(self.spam.items(), key=operator.itemgetter(1)))[-11:-1]\n df_s = pd.DataFrame(word_s, columns=['word', 'frequency'])\n df_s.plot(kind='bar', x='word', title='spam')\n word_h = (sorted(self.ham.items(), key=operator.itemgetter(1)))[-11:-1]\n df_h = pd.DataFrame(word_h, columns=['word', 'frequency'])\n df_h.plot(kind='bar', x='word', title='ham')\n plt.show()\n\n def calc_pos(self, string):\n words = self.get_words(string)\n lmt = WordNetLemmatizer()\n spam_all = sum(self.spam.values())\n ham_all = sum(self.ham.values())\n spam_p = 1\n ham_p = 1\n added_s = 0\n added_h = 0\n l = 0\n for i in range(0, len(words)):\n words[i] = lmt.lemmatize(lmt.lemmatize(self.clear_string(words[i]).lower()), 'v')\n if (words[i] in stopwords.words('english')) or (words[i] == '') or (words[i] not in woc.words()):\n words[i] = ''\n if words[i] != '':\n l = l + 1\n count = Counter(words)\n for key in count.keys():\n if (key not in self.spam) and key != '':\n added_s = added_s + count[key]\n if (key not in self.ham) and key != '':\n added_h = added_h + count[key]\n for key in count.keys():\n if key != '':\n if key in self.spam:\n #spam_p = spam_p * (count[key] / self.spam[key])\n spam_p = spam_p * (self.spam[key] / (spam_all + added_s))\n else:\n spam_p = spam_p * (count[key] / (added_s + spam_all))\n if key in self.ham:\n #ham_p = ham_p * (count[key] / self.ham[key])\n ham_p = ham_p * (self.ham[key] / (ham_all + added_h))\n else:\n ham_p = ham_p * (count[key] / (added_h + ham_all))\n #d = spam_p * (l / spam_all) + ham_p * (l / ham_all)\n print(spam_p * (spam_all / (spam_all + ham_all)))\n print(ham_p * (ham_all / (spam_all + ham_all)))\n return spam_p * (spam_all / (spam_all + ham_all)), ham_p * (ham_all / (spam_all + ham_all))\n\n#def main():\n #print('go' in woc.words())\n #tr = Trainer()\n #word = tr.opFile()\n #print(word)\n #tr.write_to_files()\n #print(tr.calc_pos('Hi are you dating today?'))\n #lmt = WordNetLemmatizer()\n #print(lmt.lemmatize(lmt.lemmatize(tr.clear_string('g').lower()), 'v') in stopwords.words('english'))\n #tr.draw_plot()\n #return\n\n#if __name__ == \"__main__\":\n #main()\n","sub_path":"Trainer.py","file_name":"Trainer.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"525771751","text":"import unittest\nimport json\nfrom unittest.mock import patch, MagicMock\nfrom services.mitre import getMalwareRecords\n\nmock_files = {\n \"/attack-patterns/file1.json\": {\"id\": 1, \"content\": \"foo\", \"name\": \"zzz\"},\n \"/attack-patterns/file2.json\": {\"id\": 2, \"content\": \"bar\", \"name\": \"yyy\"},\n}\n\nmock_repository = {\n \"/attack-patterns\": [\n {\"isFile\": False, \"name\": \"folder\"},\n {\"isFile\": True, \"name\": \"file1.json\"},\n {\"isFile\": True, \"name\": \"file2.json\"},\n {\"isFile\": True, \"name\": \"file3.pdf\"},\n ],\n \"/other-folder\": [],\n}\n\n\nclass TestMitreService(unittest.TestCase):\n @patch(\n \"repository_navigation.RepositoryNavigator.RepositoryNavigator.listRepository\",\n side_effect=lambda p: mock_repository[p],\n )\n @patch(\n \"repository_navigation.RepositoryNavigator.RepositoryNavigator.getFileFromRepository\",\n side_effect=lambda n: json.dumps(mock_files[n]),\n )\n def test_get_malware_records(self, *args):\n response = getMalwareRecords(\n \"/attack-patterns\", [\"id\", \"content\", \"other\", \"content.other\"]\n )\n\n expected = [{\"id\": 1, \"content\": \"foo\"}, {\"id\": 2, \"content\": \"bar\"}]\n self.assertEqual(response, expected)\n","sub_path":"src/__tests__/services/test_mitre.py","file_name":"test_mitre.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"278540243","text":"\nimport random\nimport math\nimport copy\ncheckerboard = [[],[],[],[],[],[],[],[]]\nfor i in range(len(checkerboard)):\n for j in range(8):\n checkerboard[i].append(random.randint(0,1))\ndef create_visual(grid):\n print('\\n',\"----- \"*8)\n for row in grid:\n print('|', end= '')\n for value in row:\n if value == 0:\n print(' H |', end = '')\n else:\n print(' |', end = '')\n\n print('\\n',\"----- \"*8)\ncreate_visual(checkerboard)\n\ngoal = (random.randint(0,7),random.randint(0,7))\n\ndef point_to_index(x, y):\n return x * 8 + y\n\ndef point_to_binary(x, y):\n return bin(point_to_index(x, y))[2:].rjust(6,'0')\n\nbinary_goal = point_to_binary(goal[0], goal[1])\n\nprint(\"The coin you want is number: \", point_to_index(goal[0], goal[1]))\ncoordinate = [int(x) - 1 for x in input(\"What is the coordinate of the coin you want to flip, enter it as a coordinate pair X,Y: \").split(',')]\ncoordinate = [coordinate[-1],coordinate[0]]\n\n\ndef check_value(checkerboard):\n checkerboard_value = \"\"\n num = [0,0,0,0,0,0]\n for i in range(len(checkerboard)):\n for j in range(len(checkerboard)):\n if j %2 == 0:\n if checkerboard[i][j] == 0:\n num[0] += 1\n if j < 2 or (j > 3 and j < 6):\n if checkerboard[i][j] == 0:\n num[1] += 1\n if j < 4:\n if checkerboard[i][j] == 0:\n num[2] += 1\n if i %2 == 0:\n if checkerboard[i][j] == 0:\n num[3] += 1\n if i < 2 or (i > 3 and i < 6):\n if checkerboard[i][j] == 0:\n num[4] += 1\n if i < 4:\n if checkerboard[i][j] == 0:\n num[5] += 1\n for value in num:\n checkerboard_value += \"1\" if value % 2 == 0 else \"0\"\n\n return checkerboard_value\n\n\nprint(\"Original checkerboard value\", check_value(checkerboard))\nog_checkerboard_value =check_value(checkerboard)\n\nnew_checkerboard = copy.deepcopy(checkerboard)\nif new_checkerboard[coordinate[0]][coordinate[1]] == 0:\n new_checkerboard[coordinate[0]][coordinate[1]] = 1\nelse:\n new_checkerboard[coordinate[0]][coordinate[1]] = 0\n\ncreate_visual(new_checkerboard)\nprint(\"New checkerboard value\", check_value(new_checkerboard))\n\nprint(\"binary goal\", binary_goal)\n\ndef same(x,y):\n if x == y:\n return True\n else:\n return False\n\ndef flip(x,y):\n global checkerboard\n if checkerboard[x][y] == 0:\n checkerboard[x][y] = 1\n else:\n checkerboard[x][y] = 0\n\n\n\nif same(check_value(new_checkerboard),binary_goal):\n print(\"Good Job!\")\nelse:\n i = 0\n j = 0\n while not same(check_value(checkerboard),binary_goal):\n flip(i,j)\n if same(check_value(checkerboard),binary_goal):\n break\n else:\n flip(i,j)\n if j == 7:\n i += 1\n j = 0\n elif i == 7 and j == 7:\n print(\"You messed up\")\n else:\n j += 1\n\n\n print(\"The checkerboard value you wanted to change was\", (i * 8 + j + 1))\n print(\"That numbers binary value is\", bin(i * 8 + j + 1)[2:].rjust(6,'0'))\n print(\"Original Value\", og_checkerboard_value)\n","sub_path":"Checkerboard Escape/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"41580834","text":"def say_check(csv, column=r'丁目番地'):\n from .chdir import chdir\n from .labeltopos import label_to_pos\n from csv import reader as csv_reader\n chdir()\n with open(csv, mode=r'rt', encoding=r'UTF-8') as istream:\n r = csv_reader(istream)\n title = next(r)\n column = label_to_pos(column, title)\n try:\n while True:\n row = next(r); row\n name, post = row[0], row[column]\n pre = next(r)[column]\n if pre != '':\n confirm(name, pre, post)\n next(r)\n except StopIteration:\n return\n\ndef confirm(name, pre, post):\n from .keybykey import key_by_key\n print(name, post, sep='\\n', end='\\n\\n')\n while True:\n _say(pre, post)\n k = key_by_key()\n if k == r' ':\n break\n\ndef _say(*text):\n from subprocess import call\n for y in text:\n call((r'say', y))\n","sub_path":"ken2015nov/saycheck.py","file_name":"saycheck.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"515058505","text":"import cv2\nimport numpy as np\n\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\n# recognizer = cv2.createLBPHFaceRecognizer() # in OpenCV 2\nrecognizer.read('trainner/trainner.yml')\n# recognizer.load('trainner/trainner.yml') # in OpenCV 2\n\ncascade_path = r\"cv2data\\haarcascade_frontalface_default.xml\"\nface_cascade = cv2.CascadeClassifier(cascade_path)\ncam = cv2.VideoCapture(0)\n# font = cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 1, 1) # in OpenCV 2\nfont = cv2.FONT_HERSHEY_SIMPLEX\nz=True\nwhile z:\n ret, im = cam.read()\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.2, 5)\n for (x, y, w, h) in faces:\n cv2.rectangle(im, (x - 50, y - 50), (x + w + 50, y + h + 50), (225, 0, 0), 2)\n img_id, conf = recognizer.predict(gray[y:y + h, x:x + w])\n if conf > 61:\n if img_id == 1:\n print(\"Hi, welcome.\")\n z = False\n break\n else:\n print(conf)\n img_id = \"Unknown\"\n # cv2.cv.PutText(cv2.cv.fromarray(im), str(Id), (x, y + h), font, 255)\n cv2.putText(im, str(img_id), (x, y + h), font, 0.55, (0, 255, 0), 1)\n cv2.imshow('im', im)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\ncam.release()\ncv2.destroyAllWindows()\nimport cv2\nimport os\nif not 'trainner' in os.listdir():\n try:\n os.mkdir()\n except:\n pass\nimport numpy as np\nfrom PIL import Image\n\n# recognizer = cv2.createLBPHFaceRecognizer()\ndetector = cv2.CascadeClassifier(r\"cv2data\\haarcascade_frontalface_default.xml\")\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\n\n\ndef get_images_and_labels(path):\n image_paths = [os.path.join(path, f) for f in os.listdir(path)]\n face_samples = []\n ids = []\n\n for image_path in image_paths:\n image = Image.open(image_path).convert('L')\n image_np = np.array(image, 'uint8')\n if os.path.split(image_path)[-1].split(\".\")[-1] != 'jpg':\n continue\n image_id = int(os.path.split(image_path)[-1].split(\".\")[1])\n faces = detector.detectMultiScale(image_np)\n for (x, y, w, h) in faces:\n face_samples.append(image_np[y:y + h, x:x + w])\n ids.append(image_id)\n\n return face_samples, ids\n\n\nfaces, Ids = get_images_and_labels('dataSet')\nrecognizer.train(faces, np.array(Ids))\nrecognizer.save('trainner/trainner.yml')\nos.remove(\"dataSet\\\\*.jpg\")\n","sub_path":"Remember_Faces.py","file_name":"Remember_Faces.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"260504233","text":"from os import listdir, rename\r\n\r\nto_fix = 'Firefly'\r\nfiles = listdir('.')\r\nepisode = 0\r\n\r\n# runs through all the files in the current dir\r\n# and finds the ones with the prefix you want\r\n# to change, then it changes them!\r\n# mine is currently setup to rename episodes\r\n# of Firefly I got off amazon so Plex will\r\n# parse it properly\r\n\r\nfor f in files:\r\n if f.startswith(to_fix):\r\n episode += 1\r\n if episode < 10:\r\n ep = '0' + str(episode)\r\n else:\r\n ep = str(episode)\r\n rename(f, 'Firefly - s01e' + ep) \r\n\r\n","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"593901073","text":"from django.test import TestCase\nfrom apps.hello.models import MyData, RequestKeeperModel\n\n\nclass MyDataModelTests(TestCase):\n\n def test_str(self):\n \"\"\" check value that return MyData \"\"\"\n data = MyData(name='Name')\n self.assertEqual(str(data), u'Name ')\n\n\nclass RequestKeeperModelTests(TestCase):\n\n def test_str(self):\n \"\"\" check value that return requests \"\"\"\n info = RequestKeeperModel(name='/')\n self.assertEqual(str(info), u'/')\n","sub_path":"apps/hello/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"157652877","text":"#!/usr/bin/env python3\n\nfrom daf.utils.decorators import cli_decorator\nfrom daf.command_line.move.move_utils import MoveBase\n\n\nclass AngleMove(MoveBase):\n DESC = \"\"\"Move the diffractometer by direct change in the angles\"\"\"\n\n EPI = \"\"\"\n Eg:\n daf.amv --del 30 --eta 15\n daf.amv -d 30 -e 15\n daf.amv -d CEN\n daf.amv -d MAX -co roi1\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.parsed_args = self.parse_command_line()\n self.parsed_args_dict = vars(self.parsed_args)\n self.exp = self.build_exp()\n\n def parse_command_line(self):\n super().parse_command_line()\n self.motor_inputs()\n self.parser.add_argument(\n \"-co\",\n \"--counter\",\n metavar=\"counter\",\n type=str,\n help=\"choose the counter to be used when inputing CEN or MAX\",\n )\n args = self.parser.parse_args()\n return args\n\n def write_angles(self, parsed_args_dict: dict) -> dict:\n \"\"\"Write the passed angle self.parsed_args_dict\"\"\"\n dict_ = self.experiment_file_dict[\"scan_stats\"]\n if dict_:\n if parsed_args_dict[\"counter\"] is not None:\n FWHM = dict_[\"fwhm\"][parsed_args_dict[\"counter\"]]\n CEN = dict_[\"com\"][parsed_args_dict[\"counter\"]]\n MAX = dict_[\"max\"][parsed_args_dict[\"counter\"]][0]\n MIN = dict_[\"min\"][parsed_args_dict[\"counter\"]][0]\n\n elif self.experiment_file_dict[\"main_scan_counter\"]:\n FWHM = dict_[\"fwhm\"][self.experiment_file_dict[\"main_scan_counter\"]]\n CEN = dict_[\"com\"][self.experiment_file_dict[\"main_scan_counter\"]]\n MAX = dict_[\"max\"][self.experiment_file_dict[\"main_scan_counter\"]][0]\n MIN = dict_[\"min\"][self.experiment_file_dict[\"main_scan_counter\"]][0]\n else:\n values_view = dict_.keys()\n value_iterator = iter(values_view)\n first_key = next(value_iterator)\n FWHM = dict_[\"fwhm\"][first_key]\n CEN = dict_[\"com\"][first_key]\n MAX = dict_[\"max\"][first_key][0]\n MIN = dict_[\"min\"][first_key][0]\n stat_dict = {\"FWHM\": FWHM, \"CEN\": CEN, \"MAX\": MAX, \"MIN\": MIN}\n dict_parsed_with_counter_stats = {\n key: (\n stat_dict[value] if (value in [\"FWHM\", \"CEN\", \"MAX\", \"MIN\"]) else value\n )\n for key, value in parsed_args_dict.items()\n }\n return dict_parsed_with_counter_stats\n\n def run_cmd(self) -> None:\n \"\"\"Method to be defined be each subclass, this is the method\n that should be run when calling the cli interface\"\"\"\n motor_dict = self.write_angles(self.parsed_args_dict)\n self.write_to_experiment_file(motor_dict, is_motor_set_point=True, write=False)\n pseudo_dict = self.get_pseudo_angles_from_motor_angles()\n self.update_experiment_file(pseudo_dict)\n self.write_to_experiment_file(motor_dict, is_motor_set_point=True)\n\n\n@cli_decorator\ndef main() -> None:\n obj = AngleMove()\n obj.run_cmd()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"daf/command_line/move/ang_move.py","file_name":"ang_move.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"348917466","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport pdb\nimport numpy as np\nimport quadprog\nimport miosqp\nimport scipy as sp\nimport scipy.sparse as spa\nfrom .common import MLP, ResNet18\nimport random\n\n\n# no replay when training the first class\nclass Net(nn.Module):\n def __init__(self,\n n_inputs,\n n_outputs,\n n_tasks,\n args):\n super(Net, self).__init__()\n nl, nh = args.n_layers, args.n_hiddens\n self.is_cifar = ('cifar10' in args.data_file)\n if self.is_cifar:\n self.net = ResNet18(n_outputs, bias=args.bias)\n else:\n self.net = MLP([n_inputs] + [nh] * nl + [n_outputs])\n self.ce = nn.CrossEntropyLoss()\n self.n_outputs = n_outputs\n self.opt = optim.SGD(self.parameters(), args.lr)\n self.n_memories = args.n_memories\n self.n_sampled_memories = args.n_sampled_memories\n self.n_constraints = args.n_constraints\n self.gpu = args.cuda\n self.batch_size=args.batch_size\n self.n_iter = args.n_iter\n\n # allocate ring buffer\n self.memory_data = torch.FloatTensor(self.n_memories, n_inputs)\n self.memory_labs = torch.LongTensor(self.n_memories)\n # allocate buffer for the current task\n self.sampled_memory_data = None\n self.sampled_memory_labs = None\n # allocate buffer for each task\n self.sampled_task_data = {}\n self.sampled_task_labs = {}\n if args.cuda:\n self.memory_data = self.memory_data.cuda()\n self.memory_labs = self.memory_labs.cuda()\n \n\n self.observed_tasks = []\n self.old_task = -1\n self.mem_cnt = 0\n self.n_task=0\n self.n_old_task=0\n self.task_buffer_size=0\n self.sample_size_list=[]\n\n\n def forward(self, x, t=0):\n output = self.net(x)\n return output\n\n def select_random_samples(self):\n\n if self.sampled_memory_data is None:\n self.sampled_memory_data = self.memory_data.clone()\n self.sampled_memory_labs = self.memory_labs.clone()\n\n else:\n sampled_memory_size=self.sampled_memory_labs.size(0)\n total_size=sampled_memory_size+self.n_memories\n\n self.sampled_memory_data = torch.cat(\n (self.sampled_memory_data, self.memory_data), dim=0)\n self.sampled_memory_labs = torch.cat(\n (self.sampled_memory_labs, self.memory_labs), dim=0)\n\n if total_size>self.task_buffer_size:\n shuffeled_inds=torch.randperm(total_size)\n self.sampled_memory_data = self.sampled_memory_data[shuffeled_inds[0:self.task_buffer_size]]\n self.sampled_memory_labs = self.sampled_memory_labs[shuffeled_inds[0:self.task_buffer_size]]\n\n\n def observe(self, x, t, y, pretrained=None):\n\n # identify a new task\n if t!=self.old_task:\n # update the counter and list\n self.old_task=t\n self.observed_tasks.append(t)\n self.mem_cnt = 0\n self.n_old_task=self.n_task\n self.n_task+=1\n self.task_buffer_size=int(self.n_sampled_memories/self.n_task)\n\n if self.n_task>1:\n # determine the sample size from each task\n remainder=self.n_constraints%self.n_old_task\n quotient=int(self.n_constraints/self.n_old_task)\n self.sample_size_list=[quotient for _ in range(self.n_old_task)]\n for i in range(remainder):\n self.sample_size_list[i]=self.sample_size_list[i]+1\n\n # update the task buffer\n task_id=self.observed_tasks[self.n_old_task-1]\n self.sampled_task_data[task_id]=self.sampled_memory_data\n self.sampled_task_labs[task_id]=self.sampled_memory_labs\n\n # shrink the buffer size\n for index in range(self.n_old_task):\n task_id=self.observed_tasks[index]\n self.sampled_task_data[task_id]=self.sampled_task_data[task_id][:self.task_buffer_size]\n self.sampled_task_labs[task_id]=self.sampled_task_labs[task_id][:self.task_buffer_size]\n\n # intialize the cluster for new task\n self.sampled_memory_data=None\n self.sampled_memory_labs=None\n\n # Update ring buffer storing examples from current task\n bsz = y.data.size(0)\n endcnt = min(self.mem_cnt + bsz, self.n_memories)\n effbsz = endcnt - self.mem_cnt\n self.memory_data[self.mem_cnt: endcnt].copy_(\n x.data[: effbsz])\n if bsz == 1:\n self.memory_labs[self.mem_cnt] = y.data[0]\n else:\n self.memory_labs[self.mem_cnt: endcnt].copy_(\n y.data[: effbsz])\n self.mem_cnt += effbsz\n\n\n for iter_i in range(self.n_iter):\n # update model on the new data\n self.zero_grad()\n loss = self.ce(self.forward(x), y)\n loss.backward()\n self.opt.step()\n \n #----update model on the old data----#\n if self.n_task>1:\n\n batch_x=None\n batch_y=None\n\n # sample from each task\n for index in range(self.n_old_task):\n task_id=self.observed_tasks[index]\n sample_size=self.sample_size_list[index]\n random_inds=random.sample(range(0,self.task_buffer_size),sample_size)\n\n if batch_x is None:\n batch_x=self.sampled_task_data[task_id][random_inds]\n batch_y=self.sampled_task_labs[task_id][random_inds]\n else:\n batch_x=torch.cat((batch_x,self.sampled_task_data[task_id][random_inds]),dim=0)\n batch_y=torch.cat((batch_y,self.sampled_task_labs[task_id][random_inds]),dim=0)\n\n \n self.zero_grad()\n loss = self.ce(self.forward(batch_x), batch_y)\n loss.backward()\n self.opt.step()\n\n\n\n \n if self.mem_cnt == self.n_memories:\n self.mem_cnt = 0\n self.select_random_samples()\n","sub_path":"model/random2.py","file_name":"random2.py","file_ext":"py","file_size_in_byte":6293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"1515321","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport cv2\nfrom utils import *\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndata=np.load('data/temple.npz')\n\n# for key in data:\n# \tprint(key)\n\nK1= data['K1']\nK2= data['K2']\n\nUV=data['pts1']\nUV_prime = data['pts2']\n\nF=cv2.findFundamentalMat(UV,UV_prime,method=cv2.FM_8POINT)[0]\n\nE = np.dot(np.dot(K2.T,F),K1)\n\nprint(E)\nR1,R2,t = cv2.decomposeEssentialMat(E)\n\n#R1 -t\n\nIO=np.hstack((np.eye(3), np.zeros((3,1))))\nRt = np.hstack((R2,-t))\n\nP1 = np.dot(K1,IO)\nP2 = np.dot(K2,Rt)\nprint(P1)\nprint(P2)\ntri_points=cv2.triangulatePoints(P1,P2,UV.T.astype(float),UV_prime.T.astype(float))\n\n\ntri_points_tran=tri_points.T\ntri3 = tri_points_tran[:,3]\ntri3 = tri3[:,None]\ntri_tran_norm=tri_points_tran/tri3\n\ntri_tran_norm = tri_tran_norm[:,:3]\n\nvisualize_pcd(tri_tran_norm)\n\n# fig = plt.figure()\n# ax = fig.add_subplot(111, projection='3d')\n# ax.scatter(tri_tran_norm[0,:], tri_tran_norm[1,:], tri_tran_norm[2,:], c='r', marker='o')\n","sub_path":"hw6/part3.py","file_name":"part3.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"276895700","text":"import git\nimport math\nfrom copy import deepcopy\nfrom dateutil.relativedelta import relativedelta\n\n\ndef diffusion_metrics_extraction(commit_list):\n diffusion_list_to_return = []\n for commit in commit_list:\n commit_dict = commit.stats.files\n ns = 0\n nd = 0\n nf = 0\n all_line_modded = 0\n change_in_files = []\n for keys, details in commit_dict.items():\n keys = str(keys)\n change_in_files.append(details['lines'])\n all_line_modded = all_line_modded + details['lines']\n modified_changes = keys.split(\"/\")\n if len(modified_changes) == 1:\n nf = nf+1\n if len(modified_changes) == 2:\n nf = nf+1\n ns = ns+1\n if len(modified_changes) >= 3:\n nf = nf+1\n ns = ns+1\n nd = nd+1\n if ns == 0 or nd == 0:\n ns = 1\n nd = 1\n\n entropy = 0\n for change_count in change_in_files:\n file_bias =(change_count/all_line_modded)\n entropy = -file_bias* math.log(file_bias,2) + entropy\n\n # diffusion_metrics_dictionary = {'ns': ns,'nd': nd, 'nf': nf, 'entropy': entropy}\n diffusion_list_to_return.append({'commit': commit.hexsha, 'ns': ns,'nd': nd, 'nf': nf, 'entropy': entropy})\n\n return diffusion_list_to_return\n\n\ndef size_metrics_extraction(commit_list):\n size_list_to_return=[]\n list_of_files_name = []\n file_line_dict = {}\n unmodded_file_line_dict = []\n\n for i in range(len(commit_list)):\n lt = 0\n la = 0\n ld = 0\n commit=commit_list[i]\n this_file_line_dict = {}\n commit_dict = commit.stats.files\n all_line_modded = 0\n change_in_files = []\n\n for keys, details in commit_dict.items():\n keys = str(keys)\n insertions = details['insertions']\n la = la + insertions\n deletions = details['deletions']\n ld = ld + deletions\n change_in_files.append(details['lines'])\n all_line_modded = all_line_modded + details['lines']\n modified_changes = keys.split(\"/\")\n if list_of_files_name.count(modified_changes[-1]) != 0:\n file_line_dict[modified_changes[-1]] = file_line_dict[modified_changes[-1]]+ insertions-deletions\n this_file_line_dict[modified_changes[-1]] = file_line_dict[modified_changes[-1]]+ insertions- deletions\n else:\n list_of_files_name.append(modified_changes[-1])\n file_line_dict[modified_changes[-1]] = details['lines']\n this_file_line_dict[modified_changes[-1]] = details['lines']\n\n copy_file_dict = deepcopy(file_line_dict)\n unmodded_file_line_dict.append(copy_file_dict)\n\n if i == 0:\n lt = 0\n la = 0\n ld = 0\n # size_metrics_dict={'lt': lt,'la': la,'ld':ld}\n size_list_to_return.append({'commit': commit.hexsha,'lt': lt,'la': la,'ld':ld})\n else:\n lt_dict=unmodded_file_line_dict[i-1]\n for key, value in lt_dict.items():\n if key in this_file_line_dict:\n lt = lt + value\n if lt == 0:\n la = 0\n ld = 0\n # size_metrics_dict = {'lt': lt, 'la': la, 'ld': ld}\n size_list_to_return.append({'commit': commit.hexsha, 'lt': lt, 'la': la, 'ld': ld})\n else:\n la = la/lt\n ld = ld/lt\n # size_metrics_dict = {'lt': lt, 'la': la, 'ld': ld}\n size_list_to_return.append({'commit': commit.hexsha,'lt': lt, 'la': la, 'ld': ld})\n\n\n return size_list_to_return\n\ndef purpose_metrics_extraction(commit_list):\n purpose_list_to_return = []\n purpose = 0\n for commit in commit_list:\n if 'bug' in commit.message or 'defect' in commit.message or 'fix' in commit.message or 'patch' in commit.message :\n purpose = 1\n purpose_metrics_dict = {'commit': commit.hexsha,'purpose':purpose}\n purpose_list_to_return.append(purpose_metrics_dict)\n return purpose_list_to_return\n\n\n\ndef file_related_information(commit_list):\n file_name_list = []\n for commit in commit_list:\n for key, details in commit.stats.files.items():\n file_name_list.append(key)\n file_name_list = list(set(file_name_list))\n file_touched_dict_list = []\n file_touched_commit_list = []\n this_commit_ndev = []\n for file in file_name_list:\n committer_list = []\n commits_list = []\n for commits in commit_list:\n this_commit_ndev.extend(committer_list)\n for key in commits.stats.files:\n if file == key:\n committer_list.append(commits.committer.name)\n commits_list.append(commits)\n\n commits_list.sort(key=lambda x: x.committed_datetime, reverse=False)\n\n file_touched_dict_list.append({'file': file, 'committer_name': committer_list})\n file_touched_commit_list.append({'file': file, 'commit': commits_list})\n\n return (file_touched_dict_list , file_touched_commit_list)\n\n\ndef history_dimention_metrics_extraction(commit_list):\n list_to_return = []\n for i in range(len(commit_list)):\n ndev_list = []\n nuc_list = []\n age = 0\n prev_commitlist = commit_list[ :i]\n file_touched_dict_list, file_touched_commit_list = file_related_information(prev_commitlist)\n commit = commit_list[i]\n this_files = []\n for key in commit.stats.files:\n this_files.append(key)\n\n for this_dict in file_touched_dict_list:\n if this_dict['file'] in this_files:\n ndev_list.extend(this_dict['committer_name'])\n for this_commit_dict in file_touched_commit_list:\n if this_commit_dict['file'] in this_files:\n age =age + (commit.committed_datetime.date()-this_commit_dict['commit'][-1].committed_datetime.date()).days\n for commit in this_commit_dict['commit']:\n nuc_list.append(commit.hexsha)\n age = age/len(this_files)\n ndev = len(list(set(ndev_list)))\n nuc = len(list(set(nuc_list)))\n list_to_return.append({'commit': commit.hexsha, 'age':age ,'ndev':ndev,'nuc':nuc})\n return list_to_return\n\n\ndef experiance_metrics_extraction(commit_list):\n list_to_return = []\n for i in range(len(commit_list)):\n devs_exp = {}\n devs_list = []\n devs_rexp_list = []\n prev_commit_list = commit_list[:i+1]\n number_of_years = relativedelta(commit_list[i].committed_datetime, prev_commit_list[0].committed_datetime).years\n for commit in prev_commit_list:\n devs_list.append(commit.committer.name)\n for j in range(number_of_years+1):\n committer_list = []\n for commit in prev_commit_list:\n committer_list.append(commit.committer.name)\n devs_rexp_list.append(j)\n devs_rexp_list.append(committer_list)\n devs_exp = {i: devs_list.count(i) for i in devs_list}\n file_touched_dict_list, file_touched_commit_list = file_related_information(prev_commit_list)\n subsystem_dev_list = []\n sub_dict_list=[]\n sub_sys_name_set = set()\n for file_info in file_touched_dict_list:\n if len(file_info['file'].split('/')) >= 2:\n subsystem_dev_list = file_info['committer_name']\n sub_dict_list.append({'sub_sys':file_info['file'].split('/')[0],'subsys_devs':subsystem_dev_list})\n sub_sys_name_set.add(file_info['file'].split('/')[0])\n aggrigated_sub_dict_list = []\n for subsysname in sub_sys_name_set:\n aggrigated_dev_list = []\n for dictionary in sub_dict_list:\n if dictionary['sub_sys'] == subsysname:\n aggrigated_dev_list.extend(dictionary['subsys_devs'])\n aggrigated_sub_dict_list.append({'sub_sys': subsysname, 'devs': aggrigated_dev_list})\n\n\n exp = 0\n rexp = 0\n sexp = 0\n subsysname = None\n for key in commit_list[i].stats.files:\n if len(key.split('/')) >=2:\n subsysname = key.split('/')[0]\n for dictionary in aggrigated_sub_dict_list:\n if dictionary['sub_sys'] == subsysname:\n sexp = sexp + dictionary['devs'].count(commit_list[i].committer.name)\n\n for i in range(int(len(devs_rexp_list)/2)):\n j = (i*2)\n rexp = rexp + devs_rexp_list[j+1].count(commit_list[i].committer.name)/(devs_rexp_list[j]+1)\n\n exp = devs_exp[str(commit_list[i].committer.name)]\n list_to_return.append({'commit': commit_list[i].hexsha, 'exp': exp, 'rexp': rexp, 'sexp': sexp})\n return list_to_return\n\n\n\n\n\ndef main():\n repository_name = \"GithubDataExtractor/\"\n repo = git.Repo(repository_name)\n commit_list = list(repo.iter_commits())\n commit_list.sort(key=lambda x: x.committed_datetime, reverse=False)\n diffusion_metrics = diffusion_metrics_extraction(commit_list)\n size_metrics = size_metrics_extraction(commit_list)\n purpose_metrics = purpose_metrics_extraction(commit_list)\n history = history_dimention_metrics_extraction(commit_list)\n exp = experiance_metrics_extraction(commit_list)\n extracted_list = []\n for i in range(len(purpose_metrics)):\n dictionary = {}\n dictionary.update(diffusion_metrics[i])\n dictionary.update(size_metrics[i])\n dictionary.update(purpose_metrics[i])\n dictionary.update(history[i])\n dictionary.update(exp[i])\n extracted_list.append(dictionary)\n\n print(extracted_list)\n\n\nif __name__ == \"__main__\":\n main()\n# TODO : history and experiance metrics and integration","sub_path":"kamei2013.py","file_name":"kamei2013.py","file_ext":"py","file_size_in_byte":9901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"161148382","text":"# transformer_chatbot\n# Copyright (C) 2018 Golovanov, Tselousov\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .transformer_module import TransformerModule\n\n\nclass MultiInputModel(nn.Module):\n def __init__(self, config, vocab, n_segments=None):\n super(MultiInputModel, self).__init__()\n self.config = config\n self.vocab = vocab\n\n self.transformer_module = TransformerModule(\n config.n_layers, len(vocab), config.n_pos_embeddings, config.embeddings_size, vocab.pad_id,\n config.n_heads, config.dropout, config.embed_dropout, config.attn_dropout, config.ff_dropout, n_segments)\n self.pre_softmax = nn.Linear(config.embeddings_size, len(vocab), bias=False)\n self.pre_softmax.weight = self.transformer_module.embeddings.weight\n\n def forward(self, x, contexts=[]):\n enc_contexts = [self.encode(c) for c in contexts]\n return self.decode(x, enc_contexts)\n\n def encode(self, x):\n return self.transformer_module(x)\n\n def generate(self, enc_x):\n return self.pre_softmax(enc_x)\n\n def decode(self, x, enc_contexts=[]):\n x, _ = self.transformer_module(x, enc_contexts)\n return self.generate(x)\n\n def predict(self, contexts=[]):\n enc_contexts = [self.encode(c) for c in contexts]\n prediction = self.beam_search(enc_contexts)\n return prediction\n\n def predict_beam(self, contexts=[]):\n enc_contexts = [self.encode(c) for c in contexts]\n prediction = self.beam_search(enc_contexts, return_beams=True)\n\n return prediction\n\n def _length_penalty(self, sequence_lengths):\n \"\"\"https://arxiv.org/abs/1609.08144\"\"\"\n return (5 + sequence_lengths) ** self.config.length_penalty / (5 + 1) ** self.config.length_penalty\n\n def predict_next(self, enc_contexts=[], return_beams=False, prefix=[]):\n with torch.no_grad():\n if len(enc_contexts) == 0:\n return []\n\n batch_size = enc_contexts[0][0].shape[0]\n device = next(self.parameters()).device\n\n ind = len(prefix)\n if ind:\n assert batch_size == 1\n prefix_sentence = [self.vocab.bos_id] + prefix\n prevs = torch.LongTensor(prefix_sentence).to(device)\n prevs = prevs.expand(self.config.beam_size, ind + 1)\n else:\n prevs = torch.full((batch_size * self.config.beam_size, 1), fill_value=self.vocab.bos_id, dtype=torch.long,\n device=device)\n beam_enc_contexts = []\n for c, p in enc_contexts:\n c = c.unsqueeze(1).repeat(1, self.config.beam_size, 1, 1)\n c = c.view(-1, c.shape[2], c.shape[3])\n p = p.unsqueeze(1).repeat(1, self.beam_size, 1)\n p = p.view(-1, p.shape[2])\n beam_enc_contexts.append((c, p))\n outputs, _ = self.transformer_module(prevs, beam_enc_contexts)\n logits = self.generate(outputs[:, -1, :])\n probs = F.softmax(logits, dim=-1)\n return probs[0].tolist()\n\n def beam_search(self, enc_contexts=[], return_beams=False):\n with torch.no_grad():\n if len(enc_contexts) == 0:\n return []\n\n batch_size = enc_contexts[0][0].shape[0]\n device = next(self.parameters()).device\n\n prevs = torch.full((batch_size * self.config.beam_size, 1), fill_value=self.vocab.bos_id, dtype=torch.long,\n device=device)\n\n beam_scores = torch.zeros(batch_size, self.config.beam_size, device=device)\n beam_lens = torch.ones(batch_size, self.config.beam_size, dtype=torch.long, device=device)\n is_end = torch.zeros(batch_size, self.config.beam_size, dtype=torch.uint8, device=device)\n\n beam_enc_contexts = []\n for c, p in enc_contexts:\n c = c.unsqueeze(1).repeat(1, self.config.beam_size, 1, 1)\n c = c.view(-1, c.shape[2], c.shape[3])\n p = p.unsqueeze(1).repeat(1, self.config.beam_size, 1)\n p = p.view(-1, p.shape[2])\n beam_enc_contexts.append((c, p))\n\n current_sample_prob = 1\n group_size = self.config.beam_size // self.config.diversity_groups\n diversity_penalty = torch.zeros((batch_size, len(self.vocab)), device=device)\n\n # zrs:\n repeat = [{} for i in range(batch_size * self.config.beam_size)]\n # **********\n for i in range(self.config.max_seq_len):\n outputs, _ = self.transformer_module(prevs, beam_enc_contexts)\n\n logits = self.generate(outputs[:, -1, :])\n log_probs = F.log_softmax(logits, dim=-1)\n # zrs: remove n repeat. prevs: (batch_size*beam_size, 1)\n for idx in range(batch_size * self.config.beam_size):\n for key in repeat[idx]:\n for value in repeat[idx][key]:\n log_probs[idx][value] = -1000\n # **********\n log_probs = log_probs.view(batch_size, self.config.beam_size, -1)\n\n beam_scores = beam_scores.unsqueeze(-1) + log_probs * (1 - is_end.float().unsqueeze(-1))\n # zrs, log_probs: batch * beam * dim\n ba, be, dim = beam_scores.shape\n for ba_idx in range(ba):\n for be_idx in range(be):\n if int(torch.max(beam_scores[ba_idx][be_idx]) == torch.min(beam_scores[ba_idx][be_idx])):\n temp = float(beam_scores[ba_idx][be_idx][0])\n beam_scores[ba_idx][be_idx] = -float('inf')\n beam_scores[ba_idx][be_idx][0] = temp\n # **********\n penalty = self._length_penalty(beam_lens.float() + 1 - is_end.float())\n penalty = penalty.unsqueeze(-1).repeat(1, 1, len(self.vocab))\n beam_scores = beam_scores / penalty\n\n if i == 0:\n penalty = penalty[:, 0, :]\n beam_scores = beam_scores[:, 0, :]\n\n beam_scores, idxs = beam_scores.topk(self.config.beam_size, dim=-1)\n beam_idxs = torch.zeros((batch_size, self.config.beam_size), dtype=torch.long, device=device)\n else:\n\n penalty = penalty.view(batch_size, self.config.diversity_groups, group_size, -1)\n beam_scores = beam_scores.view(batch_size, self.config.diversity_groups, group_size, -1)\n\n all_scores, all_idxs = [], []\n for g in range(self.config.diversity_groups):\n g_beam_scores = beam_scores[:, g, :, :]\n g_penalty = penalty[:, g, :, :]\n g_beam_scores -= self.config.diversity_coef * diversity_penalty.unsqueeze(1) / g_penalty\n g_beam_scores = g_beam_scores.view(batch_size, -1)\n\n if random.random() < current_sample_prob:\n # print('*********')\n beam_probas = F.softmax(g_beam_scores/self.config.temperature, dim=-1)\n if self.config.annealing_topk is not None:\n beam_probas, sample_idxs = beam_probas.topk(self.config.annealing_topk, dim=-1)\n g_idxs = torch.multinomial(beam_probas, group_size)\n g_idxs = torch.gather(sample_idxs, 1, g_idxs)\n else:\n g_idxs = torch.multinomial(beam_probas, group_size)\n else:\n # print('|||||||||')\n _, g_idxs = g_beam_scores.topk(group_size, dim=-1)\n\n g_scores = torch.gather(beam_scores[:, g, :, :].view(batch_size, -1), 1, g_idxs)\n g_idxs += g * group_size * len(self.vocab)\n\n all_scores.append(g_scores)\n all_idxs.append(g_idxs)\n\n diversity_penalty.scatter_add_(1, torch.fmod(g_idxs, len(self.vocab)),\n torch.ones((batch_size, group_size), device=device))\n\n diversity_penalty.fill_(0)\n penalty = penalty.view(batch_size, -1)\n beam_scores = torch.cat(all_scores, dim=-1)\n idxs = torch.cat(all_idxs, dim=-1)\n\n beam_idxs = (idxs.float() / len(self.vocab)).long()\n\n penalty = torch.gather(penalty, 1, idxs)\n sym_idxs = torch.fmod(idxs, log_probs.shape[-1])\n is_end = torch.gather(is_end, 1, beam_idxs).bool()\n beam_lens = torch.gather(beam_lens, 1, beam_idxs)\n\n sym_idxs[is_end] = self.vocab.pad_id\n beam_lens[~is_end] += 1\n is_end[sym_idxs == self.vocab.eos_id] = 1\n\n sym_idxs = sym_idxs.view(batch_size * self.config.beam_size, 1)\n prevs = prevs.view(batch_size, self.config.beam_size, -1)\n prevs = torch.gather(prevs, 1, beam_idxs.unsqueeze(-1).repeat(1, 1, prevs.shape[-1]))\n prevs = prevs.view(batch_size * self.config.beam_size, -1)\n prevs = torch.cat([prevs, sym_idxs], dim=1)\n\n # zrs:\n prevs_list = prevs.tolist()\n for b in range(batch_size * self.config.beam_size):\n b_list = prevs_list[b]\n if len(b_list) > 2 and b_list[-1] != self.vocab.pad_id and b_list[-1] != self.vocab.eos_id:\n key = (int(b_list[-3]), int(b_list[-2]))\n if key in repeat[b]:\n repeat[b][key].append(int(b_list[-1]))\n else:\n repeat[b][key] = [int(b_list[-1])]\n # ********\n\n if all(is_end.view(-1)):\n break\n\n beam_scores *= penalty\n current_sample_prob *= self.config.annealing\n\n predicts = []\n result = prevs.view(batch_size, self.config.beam_size, -1)\n\n if return_beams:\n bests = torch.argsort(beam_scores, dim=-1, descending=True)\n for i in range(batch_size):\n temp = []\n for j in range(self.config.beam_size):\n best_len = beam_lens[i, bests[i][j]]\n best_seq = result[i, bests[i][j], 1:best_len - 1]\n temp.append(best_seq.tolist())\n predicts.append(temp)\n return predicts\n\n if self.config.sample:\n probs = F.softmax(beam_scores, dim=-1)\n bests = torch.multinomial(probs, 1).view(-1)\n else:\n bests = beam_scores.argmax(dim=-1)\n\n for i in range(batch_size):\n best_len = beam_lens[i, bests[i]]\n best_seq = result[i, bests[i], 1:best_len - 1]\n predicts.append(best_seq.tolist())\n\n return predicts\n","sub_path":"src/model/model_multi_input.py","file_name":"model_multi_input.py","file_ext":"py","file_size_in_byte":11971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"164768242","text":"\"\"\"setup database with flask and created 3 models\n\nRevision ID: 36acd3e36ed6\nRevises: \nCreate Date: 2020-03-12 23:10:30.790448\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '36acd3e36ed6'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('puppies',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.Text(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('owners',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.Text(), nullable=True),\n sa.Column('puppy_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['puppy_id'], ['puppies.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('toys',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('item_name', sa.Text(), nullable=True),\n sa.Column('puppy_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['puppy_id'], ['puppies.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('toys')\n op.drop_table('owners')\n op.drop_table('puppies')\n # ### end Alembic commands ###\n","sub_path":"relationshipsinDB/migrations/versions/36acd3e36ed6_setup_database_with_flask_and_created_3_.py","file_name":"36acd3e36ed6_setup_database_with_flask_and_created_3_.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"92699549","text":"import matplotlib as mpl\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pymysql\n\nclass MyManager:\n def __init__(self):\n self.conn = pymysql.connect(host='localhost', user='root', password='java', db='python', charset='utf8')\n self.curs = self.conn.cursor()\n \n def __del__(self): \n self.conn.close()\n \n def getAllScode(self):\n sql = \"SELECT s_code FROM stock GROUP BY s_code \"\n self.curs.execute(sql)\n rows = self.curs.fetchall()\n\n codes = [] \n for row in rows :\n codes.append(row[0])\n return codes\n \n \n def getPrices(self,s_name):\n sql = \"select s_price,in_time from stock WHERE s_name = '\"+s_name+\"' order by in_time desc \"\n self.curs.execute(sql)\n rows = self.curs.fetchall()\n\n prices = [] \n for row in rows :\n prices.append(row[0])\n return prices\n \n def getPricesPer(self,s_name):\n sql = \"select s_price,in_time from stock WHERE s_name = '\"+s_name+\"' order by in_time desc \"\n self.curs.execute(sql)\n rows = self.curs.fetchall()\n\n prices = [] \n p_init = 0;\n for idx, row in enumerate(rows) :\n if idx == 0:\n p_init = row[0]\n prices.append((row[0]/p_init)*100)\n return prices\n \n def getPricesPerNumpy(self,s_name):\n sql = \"select s_price,in_time from stock WHERE s_name = '\"+s_name+\"' order by in_time desc \"\n self.curs.execute(sql)\n rows = self.curs.fetchall()\n\n prices = [] \n p_init = 0;\n for idx, row in enumerate(rows) :\n if idx == 0:\n p_init = row[0]\n prices.append((row[0]/p_init)*100)\n return np.array(prices)\n \n def getPricesPerFromCode(self,s_code):\n sql = \"select s_price,in_time from stock WHERE s_code = '\"+s_code+\"' order by in_time desc \"\n self.curs.execute(sql)\n rows = self.curs.fetchall()\n\n prices = [] \n p_init = 100;\n for idx, row in enumerate(rows) :\n if idx == 0:\n if row[0] > 0:\n p_init = row[0]\n \n per = (row[0]/p_init)*100\n \n if per == 0:\n per = 96\n \n prices.append(per)\n return np.array(prices)\n \n \nmm = MyManager()\nfig = plt.figure()\nax = fig.gca(projection='3d')\ncodes = mm.getAllScode()\nprint(len(codes))\n\nzs = []\nfor code in codes:\n print(\"code:\",code)\n zs.append(mm.getPricesPerFromCode(code))\n \nx = np.zeros(len(zs[0]))\ny = np.array(range(len(zs[0])))\n\n# zs.append(mm.getPricesPerFromCode(\"000020\"))\n# zs.append(mm.getPricesPerFromCode(\"005930\"))\n# zs.append(mm.getPricesPerFromCode(\"000220\"))\n\nfor i,z in enumerate(zs):\n ax.plot(x+i , y, z)\n \n# ax.plot(x+0 , y, zs[0])\n# ax.plot(x+1 , y, zs[1])\n# ax.plot(x+2 , y, zs[2])\n\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Z')\n\nplt.show()","sub_path":"python/day11/mygraph4percentall.py","file_name":"mygraph4percentall.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"288547044","text":"from collections import defaultdict\nimport pickle\n\nif __name__ == '__main__':\n co_occur = defaultdict(lambda: 0)\n count_word = defaultdict(lambda: 0)\n count_context = defaultdict(lambda: 0)\n with open('tokens82.txt', 'r') as i_f:\n for i, line in enumerate(i_f):\n t, c = line.split('\\t')\n c = c.split()\n for c_s in c:\n co_occur[t + '\\t' + c_s] += 1\n count_word[c_s] += 1\n count_context[t] += 1\n N = i + 1\n print(N)\n with open('f.dumps', 'wb') as fb:\n pickle.dump((dict(co_occur), dict(count_word), dict(count_context), N), fb)\n","sub_path":"naruhisa/chapter09/knock83.py","file_name":"knock83.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"622977406","text":"#!/usr/bin/env python\n#\n# Test OpenTripPlanner\nimport os\nimport argparse\nfrom datetime import datetime\nimport time\nimport json\nimport logging\nimport requests\nimport sys\nimport socket\nfrom collections import defaultdict\n\n# DEFAULTS\nDEFAULT_URL = 'http://localhost:8080/opentripplanner-api-webapp/ws/plan'\nDATE_TIME_FORMAT = \"%Y-%m-%dT%H:%M:%S\"\nTIMEOUT = 5000 # milliseconds\n\n# GLOBAL USED FOR KEEPING TRACK OF VALIDATION\nVALIDATION = {}\n\nlogger = logging.getLogger('test-otp')\n\n# CONFIG FOR GRAYLOG2\nGELFHOST = ''\nGELFPORT = 0\n\nif (GELFHOST and GELFPORT):\n try:\n # $ pip install gelfHandler\n # From: https://github.com/stewrutledge/gelfHandler/\n from gelfHandler import gelfHandler\n gHandler = gelfHandler(host=GELFHOST,port=GELFPORT,proto='UDP',fromHost=socket.gethostname())\n logger.addHandler(gHandler)\n except ImportError:\n logger.warn(\"GelfHandler not importer, not logging to Gelf server\")\n\n\ndef test_otp(options):\n instream = open(options.input, 'r') if options.input != '-' else sys.stdin\n outstream = open(options.output, 'w', 1) if options.output != '-' else sys.stdout\n\n tests = json.load(instream)\n before_all_tests(tests, options)\n\n for i, test in enumerate(tests):\n before_each_test(test, options, i) # adds test['url'] and test['test_identifier']\n\n # OUT: start of array or seperator\n outstream.write(',\\n' if i > 0 else '[\\n')\n\n logger.info('RUNNING: %s on %s' % (test['test_identifier'], test['url']), extra={'gelfProps':{'test':test['test_identifier'], 'url': test['url']}})\n\n try:\n response = requests.get(test['url'], timeout=options.requesttimeout/1000) \n resultjson = response.json()\n except requests.exceptions.RequestException as e: # This is the correct syntax\n logger.error('REQUESTEXCEPTION: %s on %s' % (test['test_identifier'], test['url']), extra={'gelfProps':{'test':test['test_identifier'], 'url': test['url'], 'requestException': str(e)}})\n resultjson = {}\n\n result = parse_result(test, resultjson)\n\n # OUT: actual result\n json.dump(result, outstream, indent=2, sort_keys=True)\n\n after_each_test(test, result, options, i)\n\n after_all_tests(tests, options)\n\n # OUT: end of array\n outstream.write('\\n]\\n')\n\n if instream is not sys.stdin: instream.close()\n if outstream is not sys.stdout: outstream.close()\n\n\ndef before_all_tests(tests, options):\n VALIDATION['startTime'] = int(round(time.time() * 1000))\n VALIDATION['errorsFound'] = 0\n VALIDATION['highestTestDuration'] = 0\n\n logger.info('BEFOREALLTESTS %s' % options.url,\n extra={'gelfProps':{\n 'startTimestamp': VALIDATION['startTime']\n }})\n \ndef after_all_tests(tests, options):\n VALIDATION['endTime'] = int(round(time.time() * 1000))\n VALIDATION['totalTestDuration'] = (VALIDATION['endTime'] - VALIDATION['startTime'])\n # TODO: Improve actual test duration calculation?\n # VALIDATION['totalTestDurationAbsolute'] = 0\n # for key, val in VALIDATION.items():\n # if 'testDuration' in val:\n # VALIDATION['totalTestDurationAbsolute'] += val['testDuration']\n \n logger.info('AFTERALLTESTS %s: %s' % (options.url, VALIDATION['totalTestDuration']),\n extra={'gelfProps':{ \n 'url': options.url,\n 'startTimestamp': VALIDATION['startTime'],\n 'endTimestamp': VALIDATION['endTime'],\n 'totalTestDuration': VALIDATION['totalTestDuration'],\n 'errorsFound': VALIDATION['errorsFound'],\n 'highestTestDuration': VALIDATION['highestTestDuration']\n }})\n\n if options.output:\n fileName, fileExtension = os.path.splitext(options.output)\n validationOutputName = '%s_validation%s' % (fileName, fileExtension)\n validationOutput = open(validationOutputName, 'w', 1)\n json.dump(VALIDATION, validationOutput, indent=2, sort_keys=True)\n validationOutput.close()\n\n\n\ndef before_each_test(test, options, i):\n # Extend test object with url and test_identifier\n test['test_identifier'] = readable_test_identifier(test)\n test['url'] = build_url(test, options)\n\n VALIDATION[test['id']] = {\n 'id': test['id'],\n 'startTime': int(round(time.time() * 1000)),\n 'url': test['url'],\n 'test_identifier': test['test_identifier']\n }\n\n\ndef after_each_test(test, result, options, i):\n VALIDATION[test['id']]['endTime'] = int(round(time.time() * 1000))\n VALIDATION[test['id']]['testDuration'] = (VALIDATION[test['id']]['endTime'] - VALIDATION[test['id']]['startTime'])\n VALIDATION[test['id']]['isError'] = result['isError']\n VALIDATION[test['id']]['itineraryDuration'] = 0 if result['isError'] else result['duration']\n VALIDATION[test['id']]['itineraryTransfers'] = 0 if result['isError'] else result['transfers']\n\n if (VALIDATION[test['id']]['testDuration'] > VALIDATION['highestTestDuration']):\n VALIDATION['highestTestDuration'] = VALIDATION[test['id']]['testDuration']\n if (result['isError']):\n VALIDATION['errorsFound'] += 1\n\n logger.info('AFTERTEST %s' % test['id'],\n extra={ 'gelfProps': VALIDATION[test['id']] })\n\n\n\n# UTILS\ndef readable_test_identifier(test):\n return \"Test %s: from %s (%s, %s) to %s (%s, %s)\" % (test['id'],\n test['from']['description'], test['from']['latitude'], test['from']['longitude'],\n test['to']['description'], test['to']['latitude'], test['to']['longitude'])\n\n\ndef build_url(test, options):\n time = datetime.strptime(test['time'], DATE_TIME_FORMAT)\n\n if options.today:\n now = datetime.now()\n time = now.replace(hour=time.hour, minute=time.minute)\n\n coords = lambda c: '%f,%f' % (c['latitude'], c['longitude'])\n params = {\n 'fromPlace': coords(test['from']),\n 'toPlace': coords(test['to']),\n 'date': time.strftime('%Y-%m-%d'),\n 'time': time.strftime('%H:%M:%S'),\n 'arriveBy': (test['timeType'] == 'A'),\n 'maxWalkDistance': 5000,\n 'optimize': 'QUICK',\n 'mode': (test.get('mode') if test.get('mode') else 'WALK,TRANSIT'),\n 'walkSpeed': 1.389,\n 'numItineraries': 1,\n }\n url = options.url + '?' + '&'.join('%s=%s' % (k, v) for k, v in params.items())\n return url\n\n\ndef parse_result(test, result):\n if not result or result.get('error') is not None:\n return parse_error(test, result)\n else:\n return parse_itinerary(test, result)\n\n\ndef parse_itinerary(test, result):\n itinerary = result.get('plan', {}).get('itineraries')[0]\n return {\n 'id': test['id'],\n 'isError': False,\n 'OTPTotalComputationTime': result.get(\"debug\", {}).get(\"totalTime\"),\n 'OTPTimedout': result.get(\"debug\", {}).get(\"timedOut\"),\n 'transfers': itinerary['transfers'],\n 'departureTime': jsonDateTime(itinerary['startTime']),\n 'arrivalTime': jsonDateTime(itinerary['endTime']),\n 'duration': itinerary['duration'] / 60, # seconds to minutes\n 'legs': [parse_leg(leg) for leg in itinerary['legs']],\n }\n\n\ndef parse_leg(leg):\n d = defaultdict(lambda: 'UNKNOWN')\n d.update(leg)\n if leg['mode'] == 'WALK':\n line = 'walk'\n else:\n line = '%(route)s' % leg\n return {\n 'departureTime': jsonDateTime(leg['startTime']),\n 'arrivalTime': jsonDateTime(leg['endTime']),\n 'line': line,\n }\n\n\ndef parse_error(test, result):\n return {\n 'id': test.get('id'),\n 'isError': True,\n 'OTPTotalComputationTime': result.get(\"debug\", {}).get(\"totalTime\"),\n 'OTPTimedout': result.get(\"debug\", {}).get(\"timedOut\"),\n 'error': result.get('error', {}).get('msg'),\n }\n\n\ndef jsonDateTime(timestamp):\n time = datetime.fromtimestamp(timestamp / 1000) # milliseconds to seconds\n return datetime.strftime(time, DATE_TIME_FORMAT)\n\n# Command line handling\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n description='Test OpenTripPlanner using planning data from a test file.')\n parser.add_argument('input', metavar='INPUT', nargs='?', default='-',\n help='the test input file (default: stdin)')\n parser.add_argument('output', metavar='OUTPUT', nargs='?', default='-',\n help='the test output file (default: stdout')\n parser.add_argument('-u', '--url', metavar='URL', default=DEFAULT_URL,\n help='the OpenTripPlanner URL (default: ' + DEFAULT_URL + ')')\n parser.add_argument('-d', '--debug', action='store_true',\n help='show debugging output')\n parser.add_argument('-t', '--today', action='store_true',\n help='overrule the dates given in the test data to be on today')\n parser.add_argument('-r', '--requesttimeout', default=TIMEOUT, type=int,\n help='the maxmimum time (in ms) that a request is allowed to have taken (default: ' + str(TIMEOUT) + ')')\n return parser.parse_args(args)\n\n\ndef main():\n args = parse_args()\n\n logging.basicConfig(format='%(message)s', level=logging.WARN)\n logger.setLevel(logging.DEBUG if args.debug else logging.INFO)\n\n test_otp(args)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"mmri/test_otp.py","file_name":"test_otp.py","file_ext":"py","file_size_in_byte":9294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"397651441","text":"__author__ = \"Alexander Chang\"\n__jhed__ = \"achang56\"\n__email__ = \"achang56@jhu.edu\"\n__class__ = \"cs475\"\n\n\nimport numpy as np\nfrom activation import Sigmoid\n\n\nclass CostFunction(object):\n\n def cost(self, W, x, y):\n raise NotImplementedError()\n\n\nclass LogLoss(CostFunction):\n\n # J = (1/m) * sum{1}{m}[((-y * log(h(x)) - ((1 - y) * log(1 - h(x)))] + (lambda/2*m)*||(W)||^2\n # h(x) = g(x, W) = sigmoid(x, W)\n # dJ/dWij = (1/m)*sum{1}{m}[(h(x) - y) * x] + (lambda/m)*[(L * W)]\n # L = I (with L[0,0] = 0)\n def cost(self, W, x, y):\n \"\"\"\n Computes the cross entropy loss and the gradient with the sigmoid activation function for the batch\n :param W: Weights ∈ R^num_features\n :param x: the inputs ∈ R^m*num_features\n :param y: the outputs ∈ R^m\n :return: the cost, the gradient\n \"\"\"\n\n # the activation function\n g = Sigmoid()\n\n # get the number of examples in this batch size\n m = np.size(y, 0)\n\n # calculate unregularized cost\n z = np.matmul(x, W)\n\n # calculates (-y * log(h(x)) y = 0\n cost1 = -y.T * np.log(g.activation_function(z))\n\n # calculates ((1 - y) * log(1 - h(x))) y = 1\n cost2 = np.transpose(np.ones((m, 1)) - y) * np.log(np.ones((m, 1)) - g.activation_function(z))\n\n j = (1 / m) * (cost1 - cost2)\n\n # calculates the gradient\n # (1/m)*(h(x) - y) * x\n grad = np.multiply(1 / m, np.matmul(x.T, g.activation_function(z) - y))\n\n return j, grad\n\n\nclass ZeroOneLoss(CostFunction):\n\n # J = sum{1}{m}(max(-y * W' * x))\n # dJ/dWij = (y - y_hat)x = (y - W' * x) * x\n def cost(self, W, x, y):\n \"\"\"\n Calculates the zero one loss for the batch\n :param W: Weights ∈ R^num_features\n :param x: the inputs ∈ R^m*num_features\n :param y: the outputs ∈ R^m\n :return: the cost, the gradient\n \"\"\"\n\n z = np.matmul(x, W)\n # does y_hat = sign(W' * x)\n for idx in range(len(z)):\n if z[idx] >= 0:\n z[idx] = 1\n else:\n z[idx] = -1\n\n # does sum{1}{m}(-y * W' * x)\n j = np.matmul(-y.T, z)\n # does max(-y * W' * x) - actually the order might be wrong but it works on batch size of 1\n j = max(np.array([0]), j)\n\n # computes y * x, if cost > 0 - its wrong\n grad = np.zeros((np.size(x, 1), 1))\n if j != 0:\n grad = x.T * y\n\n return j, -grad\n","sub_path":"cost_function.py","file_name":"cost_function.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"381633630","text":"from tastypie import fields\nfrom tastypie.authorization import ReadOnlyAuthorization\n\nfrom dashboard.models import Call\nfrom .fixture import FixtureResource\nfrom dashboard.resources.base import BaseModelResource\n\n\nclass CallResource(BaseModelResource):\n fixture = fields.ToOneField(FixtureResource, 'fixture', full=True, null=True)\n\n class Meta:\n queryset = Call.objects.all()\n resource_name = 'call'\n authorization = ReadOnlyAuthorization()\n limit = 60\n excludes = ('meta',)\n\n def dehydrate(self, bundle):\n from json import loads\n json_fields = [\n 'callheaders',\n 'cookies',\n 'get',\n 'post',\n 'raw_post'\n ]\n\n for field in json_fields:\n try:\n bundle.data[field] = loads(bundle.data[field])\n if 'callheaders' == field and 'HTTP_COOKIE' in bundle.data['callheaders']:\n del bundle.data[field]['HTTP_COOKIE']\n except ValueError as e:\n bundle.data[field] = ''\n\n if bundle.obj.fixture is not None:\n bundle.data['fixture'].data['call_response'] = bundle.obj.fixture_response\n\n return bundle","sub_path":"dashboard/resources/call.py","file_name":"call.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"121676735","text":"# This code does most of the work for the application. The @app.route code\n# \"listens\" to the websiite to see what page is being requested. If the pages url\n# matches the @app.route the it runs the function defined below it.\n\nfrom app import app\nfrom flask import render_template, redirect, url_for, request, session, flash\nfrom app.classes.data import User, Computer\nfrom app.classes.forms import ComputerForm\n\n@app.route('/newcomputer/', methods=['GET', 'POST'])\ndef newcomputer(studid):\n form = ComputerForm()\n stud=User.objects.get(pk=studid)\n if form.validate_on_submit():\n # Create the new computer document\n newcomputer = Computer(\n number=form.number.data, \n rmnum=form.rmnum.data, \n servicetag=form.servicetag.data\n )\n\n # Embed the computer Document\n stud.update(\n computer=newcomputer\n )\n return redirect(url_for('computers'))\n if stud.computer:\n form.number.data = stud.computer.number\n form.rmnum.data = stud.computer.rmnum\n form.servicetag.data = stud.computer.servicetag\n\n flash('Fill out the form to create a new computer')\n return render_template('computerform.html', form=form, stud=stud) \n\n@app.route('/computers')\ndef computers():\n computerstuds = User.objects(computer__number__gt = 0)\n return render_template('computers.html', computerstuds=computerstuds)","sub_path":"app/routes/computers.py","file_name":"computers.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"271286841","text":"# 给你一根长度为 n 的绳子,请把绳子剪成整数长度的 m 段(m、n都是整数,n>1并且m>1),每段绳子的长度记为 k[0],k[1]...k[m-1] 。请问 k[0]*k[1]*...*k[m-1] 可能的最大乘积是多少?例如,当绳子的长度是8时,我们把它剪成长度分别为2、3、3的三段,此时得到的最大乘积是18。\n\n\n# 方法1 贪心法\n# 经过数学推导,当n<4时,返回n-1,当n=4时,分为2、2,返回4,当n>4时,尽可能的分为长度为3的段\nclass Solution:\n def cuttingRope(self, n: int) -> int:\n if n < 4:\n return n - 1\n if n == 4:\n return n\n res = 1\n while n > 4:\n res *= 3\n n -= 3\n return res * n\n\n\n# 方法2 dp\nclass Solution:\n def cuttingRope(self, n: int) -> int:\n dp = [0] * (n + 1)\n dp[1] = 1\n for i in range(2, n + 1):\n for j in range(1, i):\n dp[i] = max(dp[i], j * (i - j), j * dp[i - j])\n return dp[-1]","sub_path":"leetcode/sword_to_offer14_1.py","file_name":"sword_to_offer14_1.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"285053634","text":"class MyClass:\n _instance = None\n some_var = None\n\n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n print('Creating the object')\n cls._instance = super().__new__(cls)\n # Put any initialization here\n return cls._instance\n\n\nif __name__ == '__main__':\n mc = MyClass()\n mc1 = MyClass()\n print(mc is mc1)\n mc.some_var = 'hello'\n print(mc.some_var)\n print(mc1.some_var)","sub_path":"creational_patterns/singleton/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"256575220","text":"from game.spell import Spell\r\n\r\n\r\nclass Heal(Spell):\r\n def __init__(self):\r\n super(Heal, self).__init__()\r\n self.name = 'Heal'\r\n self.manacost = 10\r\n self.require['level'] = 2\r\n self.group = 'HEAL'\r\n self.factor = {'health': 0.05,'base_heal':5,'level':0.1}\r\n\r\n\r\nheal = Heal()\r\n","sub_path":"rpg/game/data/spells/spell_list/heal.py","file_name":"heal.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"373244109","text":"# Copyright 2020 Alexis Lopez Zubieta\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\nimport logging\n\nimport yaml\nimport os\n\nfrom appimagebuilder.inspector.inspector import Inspector\n\n\nclass BundleInfo:\n def __init__(self, app_dir):\n self.app_dir = app_dir\n self.data = {}\n\n def generate(self):\n # self._fetch_bundlers_report()\n self._fetch_dependencies()\n\n path = self.get_file_name()\n with open(path, \"w\") as f:\n logging.info(\n \"Writing bundle info to: %s\" % os.path.relpath(path, self.app_dir)\n )\n\n app_yaml = yaml.dump(self.data)\n f.write(app_yaml)\n\n def _fetch_dependencies(self):\n inspector = Inspector(self.app_dir)\n self.data[\"dependencies\"] = list(inspector.get_bundle_needed_libs())\n\n def get_file_name(self):\n return os.path.join(self.app_dir, \".bundle.yml\")\n","sub_path":"appimagebuilder/app_dir/app_info/bundle_info.py","file_name":"bundle_info.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"362430023","text":"#!/usr/bin/env python3\nimport ipaddress\nimport os\nimport re\nimport logging\n\n\nclass Unbound:\n def __init__(self, args):\n self.args = args\n\n self.valid_networks = set([\n ipaddress.ip_network(zone)\n for zone\n in args.unbound.split(',')\n ])\n self.route53_domains = [\n '..', # '.' + trailing dot\n '...'\n ]\n self.logger = logging.getLogger('Unbound')\n self.logger.setLevel(self.args.logging)\n\n ch = logging.StreamHandler()\n ch.setLevel(self.args.logging)\n\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n\n self.logger.addHandler(ch)\n\n self.retrieve_all()\n\n def parse_unbound_config(self, name):\n self.logger.info(f'Loading UNBOUND config {name}')\n with open(name) as f:\n current_zone = {\n 'forward-addr': set(),\n 'name': None\n }\n for line in f:\n if line.find(':') == -1:\n continue\n line = [\n part.strip()\n for part in line.split(':')\n ]\n if line[0] == 'forward-zone':\n if current_zone['name'] and current_zone['forward-addr']:\n yield current_zone\n current_zone = {\n 'forward-addr': set(),\n 'name': None\n }\n elif line[0] == 'name':\n temp = re.sub('^[\"\\']*', '', line[1])\n temp = re.sub('[\"\\']*$', '', temp) # trim trail\n temp = re.sub('\\\\.*$', '', temp) # trim trail dot if any\n current_zone['name'] = temp.lower() + '.' # append dot\n elif line[0] == 'forward-addr':\n current_zone['forward-addr'].add(\n ipaddress.ip_address(line[1]))\n\n # make sure that last zone is reported\n if current_zone['name'] and current_zone['forward-addr']:\n yield current_zone\n\n def retrieve_all(self):\n self.data = {}\n for name in os.listdir(self.args.path):\n for zone in self.parse_unbound_config(f'{self.args.path}/{name}'):\n if zone['name'] not in self.data:\n self.data[zone['name']] = set()\n self.data[zone['name']].update(zone['forward-addr'])\n\n def zones(self):\n value = set()\n # set arithmetic won't work as we have networks vs addresses\n\n for zone in self.data.items():\n if zone[0] in self.route53_domains:\n continue # exclude '.', '..', etc.\n for valid in self.valid_networks:\n for forward_addr in zone[1]:\n if forward_addr in valid:\n value.add(zone[0])\n\n return value\n","sub_path":"Unbound.py","file_name":"Unbound.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"368842558","text":"# GUI\nimport tkinter\nimport socket\nimport select\nfrom tkinter import ttk\nfrom tkinter import scrolledtext\nfrom tkinter import messagebox\nfrom tkinter import filedialog\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nremoteSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\n# Connects to central server\n# Error checks user input\n# Sends username, hostname, and connection speed to server\ndef connectServer():\n servIP = serverHostText.get()\n port = portText.get()\n username = usernameText.get()\n hostname = userHostText.get()\n speed = speedOps.get()\n\n if len(servIP) > 0 and len(port) > 0 and len(username) > 0 and len(hostname) > 0 and len(speed) > 0:\n serverAddress = (str(servIP), int(port))\n sock.connect(serverAddress)\n messagebox.showinfo(\"Connection\", \"Connected to central server\")\n\n msg = str(username) + \" \" + str(hostname) + \" \" + str(16242) + \" \" + str(speed)\n sock.sendall(msg.encode())\n\n disButton[\"state\"] = tkinter.NORMAL\n connButton[\"state\"] = tkinter.DISABLED\n else:\n messagebox.showerror(\"Invalid Input\", \"Please fill in all fields\")\n\n\n# Browse for a description file\ndef browse():\n description = tkinter.filedialog.askopenfilename()\n filepathText.insert(0, description)\n\n\n# File Description Upload\ndef shareFile():\n path = filepathText.get()\n\n if len(path) > 0:\n try:\n msg = \"FILE_DESC \" + path\n sock.sendall(msg.encode())\n file = open(path, \"rb\")\n sock.sendall(file.read(1024))\n messagebox.showinfo(\"File Description\", \"Description file successfully uploaded\")\n except:\n messagebox.showerror(\"File Error\", \"Description file failed to upload\")\n else:\n messagebox.showerror(\"File Error\", \"Description file not found\")\n\n\n# Disconnect from the central server\ndef disconnect():\n try:\n msg = \"QUIT\"\n sock.sendall(msg.encode())\n sock.close()\n messagebox.showinfo(\"Disconnection\", \"Disconnected from central server\")\n connButton[\"state\"] = tkinter.NORMAL\n disButton[\"state\"] = tkinter.DISABLED\n except:\n messagebox.showerror(\"ERROR\", \"Failure to disconnect from central server\")\n\n\n# Keyword Search\n# Sends keyword to the central server and displays feedback in modified treeview widget\ndef keywordSearch():\n key = searchText.get()\n totalData = []\n dataArray = []\n i = 0\n j = 0\n\n # print(\"HERE \", key)\n\n if len(key) > 0:\n\n # Clear table\n for x in searchResult.get_children():\n searchResult.delete(x)\n\n msg = \"KEYWORD_SEARCH \" + key\n sock.sendall(msg.encode())\n\n while (True):\n ready = select.select([sock], [], [], 2)\n if (ready[0]):\n data = sock.recv(1024).decode()\n else:\n break\n totalData.append(data)\n\n # Extract Data\n dataArray = totalData[0].split(\";\")\n\n # Check last item is not an empty item.\n if dataArray[len(dataArray) - 1] == '':\n dataArray.pop(len(dataArray) - 1)\n\n # Place items in the table.\n while (j < ((len(dataArray)) / 4)):\n searchResult.insert(\"\", j + 1, text=dataArray[i],\n values=(dataArray[i + 1], dataArray[i + 2], dataArray[i + 3]))\n j += 1\n i += 4\n else:\n messagebox.showerror(\"Invalid Input\", \"Please enter in a keyword\")\n\n\n# Connection with Remote Host\n# Host can connect, quit, and retrieve files from the remote host\ndef remoteHost():\n command = ftpText.get()\n if len(command) > 0:\n command = command.split()\n if command[0].upper() == \"CONNECT\" and len(command) == 3:\n ftpResult.insert(tkinter.INSERT, \"->\" + ftpText.get() + \"\\n\")\n ftpResult.insert(tkinter.INSERT, \"Connecting...\\n\")\n try:\n ftpAddress = (str(command[1]), int(command[2]))\n remoteSock.connect(ftpAddress)\n ftpResult.insert(tkinter.INSERT, \"Connected to \" + command[1] + \" on port \" + command[2] + \"\\n\")\n except:\n ftpResult.insert(tkinter.INSERT, \"Connection Failed\\n\")\n messagebox.showerror(\"FTP ERROR\", \"Could not connect to the server\")\n elif command[0].upper() == \"RETRIEVE\" and len(command) == 2:\n ftpResult.insert(tkinter.INSERT, \"->\" + ftpText.get() + \"\\n\")\n ftpResult.insert(tkinter.INSERT, \"Retrieving File....\\n\")\n try:\n msg = \"RETRIEVE \" + command[1]\n remoteSock.sendall(msg.encode())\n\n file = open(command[1], 'w')\n data = ''\n\n while (True):\n ready = select.select([remoteSock], [], [], 2)\n if (ready[0]):\n data = remoteSock.recv(1024)\n file.write(data)\n else:\n break\n file.close()\n ftpResult.insert(tkinter.INSERT, \"Retrieved \" + command[1] + \"\\n\")\n except:\n ftpResult.insert(tkinter.INSERT, \"Retrieval Failed\\n\")\n messagebox.showerror(\"FTP ERROR\", \"Could not retrieve file from the server\")\n elif command[0].upper() == \"QUIT\":\n ftpResult.insert(tkinter.INSERT, \"->\" + ftpText.get() + \"\\n\")\n ftpResult.insert(tkinter.INSERT, \"Terminating Connection....\\n\")\n try:\n msg = \"QUIT\"\n remoteSock.sendall(msg.encode())\n remoteSock.close()\n ftpResult.insert(tkinter.INSERT, \"Connection Terminated\\n\")\n except:\n ftpResult.insert(tkinter.INSERT, \"Termination Failed\\n\")\n messagebox.showerror(\"FTP ERROR\", \"Could not terminate connection to the server\")\n elif command[0].upper() == \"HELP\":\n ftpResult.insert(tkinter.INSERT, \"->\" + ftpText.get() + \"\\n\")\n ftpResult.insert(tkinter.INSERT, \"CONNECT \\n\")\n ftpResult.insert(tkinter.INSERT, \"RETRIEVE \\n\")\n ftpResult.insert(tkinter.INSERT, \"QUIT\\n\")\n ftpResult.insert(tkinter.INSERT, \"HELP\\n\")\n else:\n messagebox.showerror(\"Invalid Command\", \"Please enter a command or type HELP\")\n else:\n messagebox.showerror(\"Invalid Command\", \"Please enter a command or type HELP\")\n\n\ngui = tkinter.Tk()\ngui.title(\"NAP Host GUI\")\ngui.geometry(\"750x525\")\n\n##################### CONNECTING TO CENTRAL SERVER ###########################\ncLabel = tkinter.Label(gui, text=\"Connection\", font=(\"-weight bold\", 13))\ncLabel.grid(column=0, row=0, padx=10, sticky=\"W\", columnspan=6)\n\nserverHostLabel = tkinter.Label(gui, text=\"Server Hostname: \")\nserverHostLabel.grid(column=0, row=1)\nserverHostText = tkinter.Entry(gui, width=25)\nserverHostText.grid(column=1, row=1, sticky=\"W\")\n\nportLabel = tkinter.Label(gui, text=\"Port: \")\nportLabel.grid(column=2, row=1)\nportText = tkinter.Entry(gui, width=6)\nportText.grid(column=3, row=1, sticky=\"W\")\n\nconnButton = tkinter.Button(gui, text=\"Connect\", width=10, command=connectServer)\nconnButton.grid(column=4, row=1, pady=10, padx=5)\n\ndisButton = tkinter.Button(gui, text=\"Disconnect\", width=10, command=disconnect)\ndisButton[\"state\"] = tkinter.DISABLED\ndisButton.grid(column=5, row=1, pady=10, padx=5)\n\nusernameLabel = tkinter.Label(gui, text=\"Username: \")\nusernameLabel.grid(column=0, row=2)\nusernameText = tkinter.Entry(gui, width=15)\nusernameText.grid(column=1, row=2, sticky=\"W\")\n\nuserHostLabel = tkinter.Label(gui, text=\"Hostname: \")\nuserHostLabel.grid(column=2, row=2)\nuserHostText = tkinter.Entry(gui, width=25)\nuserHostText.grid(column=3, row=2, sticky=\"W\")\n\nspeedLabel = tkinter.Label(gui, text=\"Speed: \")\nspeedLabel.grid(column=4, row=2, padx=10)\nspeedOps = ttk.Combobox(gui, width=10)\nspeedOps[\"values\"] = (\"Ethernet\", \"Fiber Optic\", \"Analog\")\nspeedOps.current(0)\nspeedOps.grid(column=5, row=2, sticky=\"W\")\n\nfilepathLabel = tkinter.Label(gui, text=\"File Description Pathway: \")\nfilepathLabel.grid(column=0, row=3)\nfilepathText = tkinter.Entry(gui, width=50)\nfilepathText.grid(column=1, row=3, columnspan=5, sticky=\"W\")\n\nbrowseFile = tkinter.Button(gui, text=\"Browse\", width=10, command=browse)\nbrowseFile.grid(column=4, row=3, pady=10, padx=5)\n\nsendFile = tkinter.Button(gui, text=\"Send\", width=10, command=shareFile)\nsendFile.grid(column=5, row=3, pady=10, padx=5)\n##############################################################################\n\n######################### KEYWORD SEARCHING ##################################\nsLabel = tkinter.Label(gui, text=\"Keyword Search\", font=(\"-weight bold\", 13))\nsLabel.grid(column=0, row=4, pady=5, padx=10, sticky=\"W\")\n\nsearchLabel = tkinter.Label(gui, text=\"Search: \")\nsearchLabel.grid(column=0, row=5)\nsearchText = tkinter.Entry(gui, width=50)\nsearchText.grid(column=1, row=5, sticky=\"W\", columnspan=3)\n\nsearchButton = tkinter.Button(gui, text=\"Search\", width=20, command=keywordSearch)\nsearchButton.grid(column=4, row=5, columnspan=2)\n\nsearchResult = ttk.Treeview(gui, columns=(\"hostname\", \"port\", \"speed\"), height=4)\nsearchResult.column(\"#0\", width=195, minwidth=195, stretch=tkinter.NO)\nsearchResult.heading(\"#0\", text=\"Filename\", anchor=tkinter.W)\nsearchResult.column(\"hostname\", width=195, minwidth=195, stretch=tkinter.NO)\nsearchResult.heading(\"hostname\", text=\"Hostname\", anchor=tkinter.W)\nsearchResult.column(\"port\", width=120, minwidth=120, stretch=tkinter.NO)\nsearchResult.heading(\"port\", text=\"Port\", anchor=tkinter.W)\nsearchResult.column(\"speed\", width=120, minwidth=120, stretch=tkinter.NO)\nsearchResult.heading(\"speed\", text=\"Speed\", anchor=tkinter.W)\nsearchResult.grid(column=0, row=6, pady=10, padx=20, columnspan=10, rowspan=5)\n##############################################################################\n\n############################# FTP COMMANDS ###################################\nfLabel = tkinter.Label(gui, text=\"FTP\", font=(\"-weight bold\", 13))\nfLabel.grid(column=0, row=11, pady=5, padx=10, sticky=\"W\")\n\nftpLabel = tkinter.Label(gui, text=\"Enter Command: \")\nftpLabel.grid(column=0, row=12)\nftpText = tkinter.Entry(gui, width=50)\nftpText.grid(column=1, row=12, sticky=\"W\", columnspan=3)\n\nftpButton = tkinter.Button(gui, text=\"Enter\", width=20, command=remoteHost)\nftpButton.grid(column=4, row=12, columnspan=2)\n\nftpResult = scrolledtext.ScrolledText(gui, width=75, height=7)\nftpResult.grid(column=0, row=13, pady=5, padx=30, columnspan=6, rowspan=5)\n##############################################################################\n\ngui.mainloop()\n","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":10662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"123301734","text":"from itertools import combinations\nfrom helpers.misc_functions import prod\n\n\nRUN_TEST = False\nTEST_SOLUTION = 241861950\nTEST_INPUT_FILE = 'test_input_day_01.txt'\nINPUT_FILE = 'input_day_01.txt'\n\nCOMBO_LENGTH = 3\n\n\ndef main(input_file, combo_length):\n with open(input_file) as file:\n lines = file.readlines()\n\n nums = list(map(int, lines))\n num_combinations = combinations(nums, combo_length)\n solution_num_combo = next(filter(lambda num_combo: sum(num_combo) == 2020, num_combinations))\n solution = prod(solution_num_combo) # solution_num_combo[0] * solution_num_combo[1]\n\n return solution\n\n\nif __name__ == '__main__':\n if RUN_TEST:\n solution = main(TEST_INPUT_FILE, COMBO_LENGTH)\n assert (TEST_SOLUTION == solution)\n else:\n solution = main(INPUT_FILE, COMBO_LENGTH)\n\n print(solution)\n","sub_path":"day 01/day01_part2.py","file_name":"day01_part2.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"212662924","text":"import argparse \n\n\ndef readTree (inputFile):\n\t\"\"\"\n\treadTree reads the lines in the input file, and forms a tree using dictionaries.\n\tThe result tree is a dictionary, whose keys are the indexes of nodes and the values of the dictionary are pairs of the value of nodes and the list of their children. For example tree[1] = (15, [2, 4]) means that node 1 has value 15 and it has two children with indexes of 2 and 4.\n\tAlso, tree[2] = (20, [] ) means that node 2 has value 20, and it has no children (it is a leaf).\n\t\"\"\"\n\ttree = {}\n\tfor line in inputFile:\n\t\titems = line.split();\n\t\tid = int(items[0]);\n\t\tvalue = int(items[1]);\n\t\tchildrenList = [];\n\t\tif len(items) > 2:\n\t\t\tfor it in items[2:]:\n\t\t\t\tchild = int(it);\n\t\t\t\tchildrenList.append(child);\n\t\ttree[id] = (value, childrenList);\t\t\t\t \n\n\treturn tree # read the file\n\ndef printTree(tree, index, level):\n \"\"\"\n\tprintTree prints the tree in the given format.\n\tindex is the index of the current node in the tree \n and level is the level of the current node.\n \"\"\"\n #do recursively\n # access to tuple\n value,nodes = tree[index] #integer, nodes:list of indexes\n print(level, index, \";\", value)\n # if nodes is []: # base cases : node has no children\n # return\n for child in nodes: # if it is empty[], this will not be executed\n printTree(tree, child, level + '-')\n \ndef main():\t\n\tparser = argparse.ArgumentParser(description=\"Print a tree illustration of the given input data\")\n\tparser.add_argument(\"treefile\", type=argparse.FileType('r'), help=\"tree data file\")\n\targs = parser.parse_args();\n\ttree = readTree(args.treefile)\n\t#We suppose that the root of the tree has index 1 and level 0.\n\tprintTree(tree, 1, \"\"); #empty str, 1st level no dash.\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"CIS210 Computer Science I/Labs/lab_6/tree_starter.py","file_name":"tree_starter.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"459414472","text":"#!/usr/bin/python\r\n\r\nimport numpy as np\r\nimport sys\r\n\r\ndef covariance_function(function_name):\r\n '''\r\n Definition of the covariance functions\r\n '''\r\n if function_name == 'gm1':\r\n function = func_gm1\r\n function_name_long = 'Gauss-Markov 1st order'\r\n elif function_name == 'gm2':\r\n function = func_gm2\r\n function_name_long = 'Gauss-Markov 2nd order'\r\n elif function_name == 'reilly':\r\n function = func_reilly\r\n function_name_long = 'Reilly model'\r\n elif function_name == 'hirvonen':\r\n function = func_hirvonen\r\n function_name_long = 'Hirvonen\\'s formula'\r\n elif function_name == 'markov1':\r\n function = func_markov1\r\n function_name_long = 'Markov 1st order'\r\n elif function_name == 'markov2':\r\n function = func_markov2\r\n function_name_long = 'Markov 2nd order'\r\n elif function_name == 'tri':\r\n function = func_tri\r\n function_name_long = 'Triangular model'\r\n elif function_name == 'lauer':\r\n function = func_lauer\r\n function_name_long = 'Lauer'\r\n elif function_name == 'vestol':\r\n function = func_vestol\r\n function_name_long = 'Vestol'\r\n elif function_name == 'gauss':\r\n function = func_gauss\r\n function_name_long = 'Gauss'\r\n elif function_name == 'log':\r\n function = func_log\r\n function_name_long = 'Logarithmic'\r\n else:\r\n sys.exit('ERROR: Chosen function is not supported.')\r\n return function, function_name_long;\r\n\r\n\r\ndef func_gauss(dist, C0, alpha):\r\n '''\r\n Gaussian function with the factor alpha\r\n '''\r\n return C0 * np.exp(-1 * alpha**2 * dist**2);\r\n\r\n\r\ndef func_gm1(dist, C0, d0):\r\n '''\r\n First-order Gauss-Markov process\r\n '''\r\n return C0 * np.exp(-1 * dist / d0);\r\n\r\n\r\ndef func_gm2(dist, C0, d0):\r\n '''\r\n Second-order Gauss-Markov process\r\n '''\r\n return C0 * np.exp(-1 * dist**2 / d0**2);\r\n\r\n\r\ndef func_reilly(dist, C0, d0):\r\n '''\r\n Reilly covariance function\r\n '''\r\n return C0 * (1 - 0.5 * (dist / d0)**2) * np.exp(-0.5 * (dist / d0)**2);\r\n\r\n\r\ndef func_hirvonen(dist, C0, d0):\r\n '''\r\n Hirvonen covariance function (usually used for grvaity data)\r\n '''\r\n return C0 * (d0**2 / (d0**2 + dist**2));\r\n\r\n\r\ndef func_log(dist, C0, d0, m):\r\n '''\r\n Logarithmic covariance function (m=2 is Hirvonen function)\r\n '''\r\n return C0 * (d0**m / (d0**m + dist**m));\r\n\r\n\r\ndef func_markov1(dist, C0, d0):\r\n '''\r\n First-order Markov covariance function\r\n '''\r\n return C0 * (1 + (dist / d0)) * np.exp(-1 * dist / d0);\r\n\r\n\r\ndef func_markov2(dist, C0, d0):\r\n '''\r\n Second-order Markov covariance function\r\n '''\r\n return C0 * (1 + (dist / d0) + (dist**2 / (3 * d0**2))) * np.exp(-1 * dist / d0);\r\n\r\n\r\ndef func_vestol(dist, C0, a, b):\r\n '''\r\n Covariance function used by Olav Vestol with a=10/400^2 and b=8/400\r\n '''\r\n return C0 * ((a * dist**2) + (b * dist) + 1);\r\n\r\n\r\ndef func_tri(dist, C0, d0):\r\n '''\r\n Triangular covariance function\r\n '''\r\n return C0 * (1 - (dist / (2 * d0)));\r\n\r\n\r\ndef func_lauer(dist, C0, d0):\r\n '''\r\n Lauer covariance function\r\n '''\r\n return C0 / (dist**d0);\r\n","sub_path":"covariance/function_covariance.py","file_name":"function_covariance.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"261600404","text":"import cv2\nimport numpy as np\n\ndef trans():\n img=cv2.imread(r'c:\\\\opencv\\your\\xia_screw.png')\n\n height = img.shape[0];\n width = img.shape[1];\n min_val = 255;\n for i in range(0,height):\n for j in range(0,width):\n val = (int(img[i][j][0])+img[i][j][1]+img[i][j][2])/3\n if min_val > val:\n min_val = val\n img_a = np.full((height,width,1),255,dtype=np.uint8);\n for i in range(0,height):\n for j in range(0,width):\n if (img[i][j] == (255,255,255)).all():\n img_a[i][j] = 0;\n else:\n val = (int(img[i][j][0])+img[i][j][1]+img[i][j][2])/3;\n op_val = int(255-val)*255/(255-min_val)\n if (op_val > 255):\n op_val = 255\n img_a[i][j] = op_val\n b,g,r = cv2.split(img)\n img_t = cv2.merge((b,g,r,img_a))\n rlt = cv2.imwrite(r'c:\\\\opencv\\your\\tmp.png',img_t)\n print(min_val)\n\nimg=cv2.imread(r'c:\\\\opencv\\your\\Untitled.png')\n\nheight = img.shape[0];\nwidth = img.shape[1];\nimg_t = np.full((810,1440,3),255,dtype=np.uint8);\n\nfor i in range(0,810):\n for j in range(0,1440):\n img_t[i][j] = img[i+3][j+4]\n \ncv2.imwrite(r'c:\\\\opencv\\your\\whole.png',img_t)","sub_path":"opencv/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"227082951","text":"# WEBSITE: HackerRank\n# EXERCISE: Sorting: Comparator (Cracking the Coding Interview)\n# SOURCE: https://www.hackerrank.com/challenges/ctci-comparator-sorting\n# LANGUAGE: Python 3 \n\n# RULES: Given an array of n Player objects, write a comparator that sorts them in order of decreasing score; \n# if 2 or more players have the same score, sort those players alphabetically by name. \n\n# SAMPLE INPUT:\n# 5\n# amy 100\n# david 100\n# heraldo 50\n# aakansha 75\n# aleksa 150\n#\n# EXPECTED OUTPUT: \n# aleksa 150\n# amy 100\n# david 100\n# aakansha 75\n# heraldo 50\n\n#!/bin/python3\n\nfrom functools import cmp_to_key\n\nclass Player:\n def __init__(self, name, score):\n self.name = name\n self.score = score\n \n def __repr__(self):\n print(\"%s %s\" %(self.name, self.score))\n\n def comparator(a, b):\n if a.score == b.score:\n if sorted([a.name, b.name]) == [a.name, b.name]:\n return -1\n else:\n return 1\n else:\n return b.score - a.score\n\nn = int(input())\ndata = []\nfor i in range(n):\n name, score = input().split()\n score = int(score)\n player = Player(name, score)\n data.append(player)\n \ndata = sorted(data, key=cmp_to_key(Player.comparator))\nfor i in data:\n print(i.name, i.score)","sub_path":"cracking_the_coding_interview/CtCI_Sorting-Comparator.py","file_name":"CtCI_Sorting-Comparator.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"231054091","text":"import bpy\nimport blf\nimport bgl\nimport gpu\nfrom bpy_extras import view3d_utils\nfrom gpu_extras.batch import batch_for_shader\nfrom mathutils import Vector, Euler\nfrom .cui_functions import *\nfrom .cui_shapes import *\n\n#\n#\n#\n\n\nclass CUIItem(CUIRectWidget):\n def __init__(self, height):\n self.color = (0.0, 0.0, 0.35, 0.5)\n self.color_hover = (0.0, 0.0, 0.4, 0.75)\n\n super().__init__()\n\n self.parts = []\n self.shapes = []\n self.custom_id = None\n\n self.item_type = ''\n\n self.height = height\n self.hover_highlight = True\n self.click_down = False\n\n self.click_down_function = None\n self.click_up_function = None\n\n self.draw_box = False\n\n return\n\n #\n\n def update_batches(self, position):\n super().update_batches(position)\n for part in self.parts:\n part.update_batches(\n [position[0]+self.scale_pos_offset[0], position[1]+self.scale_pos_offset[1]])\n for shape in self.shapes:\n shape.update_batches(\n [position[0]+self.scale_pos_offset[0], position[1]+self.scale_pos_offset[1]])\n return\n\n def draw(self, color_override=None):\n super().draw(color_override)\n for part in self.parts:\n part.draw(color_override, self.click_down)\n\n return\n\n #\n\n def click_down_move(self, mouse_co, shift, pos, arguments=None):\n self.test_hover(mouse_co, pos)\n if self.hover == False:\n self.click_down = False\n return\n\n def click_down_func(self, mouse_co, shift, pos, arguments=None):\n if self.click_down_function:\n click_status = self.click_down_function(self, arguments)\n if click_status:\n return [click_status, self.custom_id]\n return [self.item_type, self.custom_id]\n\n def click_up_func(self, mouse_co, shift, pos, arguments=None):\n if self.click_up_function:\n click_status = self.click_up_function(self, arguments)\n if click_status:\n return [click_status, self.custom_id]\n return [self.item_type, self.custom_id]\n\n def test_hover(self, mouse_co, pos):\n super().test_hover(mouse_co, [pos[0], pos[1]])\n if self.hover:\n for part in self.parts:\n part.test_hover(\n mouse_co, [pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]])\n return\n\n #\n\n def reset_item_states(self, clear_hover):\n if clear_hover:\n self.clear_hover()\n\n self.click_down = False\n self.create_shape_data()\n return\n\n def clear_hover(self):\n self.hover = False\n for part in self.parts:\n part.hover = False\n return\n\n #\n\n def set_scale(self, scale):\n super().set_scale(scale)\n for part in self.parts:\n part.set_scale(scale)\n for shape in self.shapes:\n shape.set_scale(scale)\n return\n\n def set_custom_id(self, id):\n self.custom_id = id\n return\n\n def set_font_size(self, size):\n for part in self.parts:\n part.font_size = size\n return\n\n def set_color(self, color=None, color_hover=None, color_outline=None, color_click=None, color_font=None):\n super().set_color(color=color, color_hover=color_hover, color_outline=color_outline)\n for part in self.parts:\n part.set_color(color=color, color_hover=color_hover, color_outline=color_outline,\n color_click=color_click, color_font=color_font)\n\n return\n\n def set_click_up_func(self, func):\n self.click_up_function = func\n return\n\n def set_click_down_func(self, func):\n self.click_down_function = func\n return\n\n #\n\n def __str__(self):\n return 'CUI Item Widget'\n\n\nclass CUIItemWidget(CUIRectWidget):\n def __init__(self, height, text):\n self.color = (0.0, 0.0, 0.35, 0.5)\n self.color_hover = (0.0, 0.0, 0.4, 0.75)\n\n self.color_font = (0.0, 0.0, 1.0, 1.0)\n self.color_click = (0.0, 0.0, 0.6, 1.0)\n\n self.color_font_render = None\n self.color_click_render = None\n\n super().__init__()\n\n self.height = height\n self.hover_highlight = True\n\n self.font_id = 0\n self.font_size = 12\n self.scale_font_size = 12\n\n self.text = text\n self.text_render = text\n self.text_pos = [0, 0]\n self.text_pos_offset = [0, 0]\n # self.text_vert_alignment = 'CENTER'\n # self.text_hor_alignment = 'CENTER'\n self.text_auto_y = True\n self.text_margin = 2\n\n self.shader_img = gpu.shader.from_builtin('2D_IMAGE')\n\n self.icon_pos = [0, 0]\n self.icon_pos_offset = [0, 0]\n self.icon_height = 20\n self.icon_width = 20\n self.icon_img = None\n self.icon_text_side = 'RIGHT'\n self.icon_visible = True\n return\n\n #\n\n def create_shape_data(self, value='', text=None, text_pos=0):\n super().create_shape_data()\n # TEXT\n wid_mid = self.width/2\n icon_w = 0\n if self.icon_img:\n icon_w = self.icon_width\n\n # Icon is bigger than current width so no text or icon will be drawn\n avail_wid = self.width-self.text_margin*2\n if icon_w > avail_wid:\n self.text_render = ''\n self.icon_visible = False\n\n else:\n self.icon_visible = True\n\n # Get current text\n self.text_render = self.text\n if value != '':\n self.text_render = self.text + ': ' + str(value)\n if text != None:\n self.text_render = text\n\n size_width = 0\n size_height = 0\n cur_size = self.font_size\n if self.text_render != '':\n blf.size(self.font_id, self.font_size, 72)\n blf.position(self.font_id, 0, 0, 0)\n size_w = blf.dimensions(self.font_id, self.text_render)\n size_h = blf.dimensions(self.font_id, 'T')\n\n targ_width = size_w[0] * self.scale\n cur_size = self.font_size\n if targ_width != 0 and self.scale != 1.0:\n cur_width = 0\n cur_size = 0\n while cur_width <= targ_width:\n cur_size += 1\n\n blf.size(self.font_id, cur_size, 72)\n cur_width, cur_height = blf.dimensions(\n self.font_id, self.text_render)\n cur_size -= 1\n blf.size(self.font_id, cur_size, 72)\n size_w = blf.dimensions(self.font_id, self.text_render)\n size_h = blf.dimensions(self.font_id, 'T')\n\n # Test that icon and text will fit in item width if not then clip text\n if size_w[0] + (icon_w)*self.scale > avail_wid*self.scale:\n clip_width = 0\n cur_pos = -1\n while clip_width < avail_wid*self.scale:\n cur_pos += 1\n size_w = blf.dimensions(\n self.font_id, self.text_render[:cur_pos] + '...')\n clip_width = size_w[0] + (icon_w)*self.scale\n cur_pos -= 1\n size_w = blf.dimensions(\n self.font_id, self.text_render[:cur_pos] + '...')\n\n if cur_pos >= 0:\n self.text_render = self.text_render[:cur_pos] + '...'\n else:\n self.text_render = ''\n size_w = [0, 0]\n size_h = [0, 0]\n size_width = size_w[0]/self.scale\n size_height = size_h[1]/self.scale\n x_co = (wid_mid - (size_width + icon_w)/2)\n\n if self.icon_text_side == 'RIGHT':\n x_co += icon_w\n\n self.scale_font_size = cur_size\n self.text_pos_offset = [x_co, -self.height/2 - size_height/2]\n # ICON\n if self.icon_img:\n x_co = wid_mid - (size_width + icon_w)/2\n y_co = -self.height/2 + self.icon_height/2\n\n if self.icon_text_side == 'LEFT':\n x_co += size_width\n self.icon_pos_offset[0] = x_co\n self.icon_pos_offset[1] = y_co\n\n self.icon_pos = [\n [0, 0],\n [icon_w, 0],\n [icon_w, -self.icon_height],\n [0, -self.icon_height],\n ]\n\n return\n\n def update_batches(self, position):\n super().update_batches(position)\n pos = [position[0]+self.scale_pos_offset[0],\n position[1]+self.scale_pos_offset[1]]\n if self.icon_img:\n i_pos = [pos[0]+self.icon_pos_offset[0]*self.scale,\n pos[1]+self.icon_pos_offset[1]*self.scale]\n points = draw_cos_offset(i_pos, self.scale, self.icon_pos)\n tex_cos = [[0, 0], [1, 0], [1, -1], [0, -1]]\n\n self.batch_icon = batch_for_shader(self.shader_img, 'TRI_FAN', {\n \"pos\": points, \"texCoord\": tex_cos})\n\n if self.text_render != '':\n\n self.text_pos = [pos[0]+self.text_pos_offset[0] *\n self.scale, pos[1]+self.text_pos_offset[1]*self.scale]\n\n return\n\n def update_color_render(self):\n super().update_color_render()\n self.color_font_render = hsv_to_rgb_list(self.color_font)\n self.color_click_render = hsv_to_rgb_list(self.color_click)\n return\n\n def init_shape_data(self):\n self.icon_pos = []\n super().init_shape_data()\n return\n\n def draw(self, color_override=None, click_down=False):\n if color_override:\n super().draw(color_override)\n elif self.hover and click_down:\n super().draw(self.color_click_render)\n else:\n super().draw()\n\n self.icon_draw()\n self.text_draw()\n return\n\n def icon_draw(self):\n if self.visible == True and self.icon_img:\n if self.icon_img.gl_load():\n raise Exception()\n\n disable = bgl.glIsEnabled(bgl.GL_BLEND)\n if disable == False:\n bgl.glEnable(bgl.GL_BLEND)\n bgl.glActiveTexture(bgl.GL_TEXTURE0)\n bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.icon_img.bindcode)\n self.shader_img.bind()\n self.shader_img.uniform_int(\"image\", 0)\n self.batch_icon.draw(self.shader_img)\n if disable == False:\n bgl.glDisable(bgl.GL_BLEND)\n return\n\n def text_draw(self):\n if self.visible == True and self.text_render != '':\n blf.size(self.font_id, self.scale_font_size, 72)\n blf.position(self.font_id, self.text_pos[0], self.text_pos[1], 0)\n blf.color(\n self.font_id, self.color_font_render[0], self.color_font_render[1], self.color_font_render[2], self.color_font_render[3])\n blf.draw(self.font_id, self.text_render)\n return\n\n #\n\n def clear_hover(self):\n self.hover = False\n return\n\n #\n\n def set_custom_id(self, id):\n self.custom_id = id\n return\n\n def set_text(self, text):\n self.text = text\n return\n\n def set_color(self, color=None, color_hover=None, color_outline=None, color_click=None, color_font=None):\n if color_font:\n self.color_font = color_font\n if color_click:\n self.color_click = color_click\n\n super().set_color(color=color, color_hover=color_hover, color_outline=color_outline)\n\n return\n\n def set_icon_data(self, image=None, width=None, height=None, text_side=None):\n if image != None:\n self.icon_img = image\n if width != None:\n self.icon_width = width\n if height != None:\n self.icon_height = height\n if text_side != None:\n self.icon_text_side = text_side\n return\n\n #\n\n def __str__(self):\n return 'CUI Item Widget'\n\n\nclass CUICheckWidget(CUIItemWidget):\n def __init__(self, height, default_val=False):\n self.color_check = (0.0, 0.0, 1.0, 0.75)\n self.color_bool = (0.62, 0.5, 0.75, 0.75)\n\n self.color_check_render = None\n self.color_bool_render = None\n\n super().__init__(height, '')\n\n self.item_type = 'CHECKBOX'\n\n self.icon_img_false = None\n self.icon_img_true = None\n\n self.bool_val = default_val\n self.bool_box_size = height-6\n self.bool_thickness = 3\n\n self.draw_check = True\n self.text_margin = 0\n\n self.bevel_size = 4\n self.bevel_res = 3\n return\n\n #\n\n def create_shape_data(self):\n offset = (self.height-self.bool_box_size)/2\n\n super().create_shape_data()\n\n bool_fac = int(self.bool_box_size*0.25)\n self.check_lines = [\n [self.bool_box_size-bool_fac, -offset-bool_fac],\n [int(self.bool_box_size*.4), -offset-self.bool_box_size+bool_fac],\n [int(self.bool_box_size*.4), -offset-self.bool_box_size+bool_fac],\n [bool_fac, -offset-self.bool_box_size+int(self.bool_box_size*.5)]\n ]\n\n return\n\n def init_shape_data(self):\n self.check_lines = []\n super().init_shape_data()\n return\n\n def update_batches(self, position):\n pos = [position[0]+self.scale_pos_offset[0],\n position[1]+self.scale_pos_offset[1]]\n\n lines = draw_cos_offset(pos, self.scale, self.check_lines)\n\n self.batch_check = batch_for_shader(\n self.shader, 'LINES', {\"pos\": lines})\n super().update_batches(position)\n return\n\n def update_color_render(self):\n super().update_color_render()\n self.color_check_render = hsv_to_rgb_list(self.color_check)\n self.color_bool_render = hsv_to_rgb_list(self.color_bool)\n return\n\n def draw(self, color_override=None, click_down=False):\n if self.bool_val:\n super().draw(self.color_bool_render, click_down=click_down)\n else:\n super().draw(click_down=click_down)\n if self.bool_val and self.draw_check:\n\n bgl.glLineWidth(self.bool_thickness)\n self.shader.bind()\n self.shader.uniform_float(\"color\", self.color_check_render)\n self.batch_check.draw(self.shader)\n\n return\n\n #\n\n def set_color(self, color=None, color_hover=None, color_outline=None, color_click=None, color_font=None, color_check=None, color_bool=None):\n if color_check:\n self.color_check = color_check\n if color_bool:\n self.color_bool = color_bool\n\n super().set_color(color=color, color_hover=color_hover,\n color_outline=color_outline, color_click=color_click, color_font=color_font)\n\n return\n\n def set_true_icon(self, image):\n self.icon_img_true = image\n return\n\n def set_false_icon(self, image):\n self.icon_img_false = image\n return\n\n def set_bool(self, status):\n self.bool_val = status\n if self.icon_img_true != None and status:\n super().set_icon_data(image=self.icon_img_true)\n\n if self.icon_img_false != None and not status:\n super().set_icon_data(image=self.icon_img_false)\n\n return\n\n #\n\n def __str__(self):\n return 'CUI Check Widget'\n\n\n#\n#\n#\n\n\nclass CUIButton(CUIItem):\n def __init__(self, height, text):\n self.color_bool = (0.62, 0.5, 0.75, 0.75)\n\n self.color_bool_render = None\n\n super().__init__(height)\n\n self.item_type = 'BUTTON'\n\n self.bool = False\n\n button = CUIItemWidget(height, text)\n button.hover_highlight = self.hover_highlight\n\n self.parts.append(button)\n return\n\n #\n\n def update_color_render(self):\n super().update_color_render()\n self.color_bool_render = hsv_to_rgb_list(self.color_bool)\n return\n\n def create_shape_data(self, value='', text=None, text_pos=0, x_offset=0):\n super().create_shape_data()\n for part in self.parts:\n part.height = self.height\n part.width = self.width\n part.create_shape_data()\n for shape in self.shapes:\n shape.create_shape_data()\n return\n\n def draw(self):\n if self.bool:\n for part in self.parts:\n part.draw(self.color_bool_render, click_down=self.click_down)\n else:\n for part in self.parts:\n part.draw(click_down=self.click_down)\n\n for shape in self.shapes:\n shape.draw()\n\n return\n\n #\n\n def add_poly_shape(self, coords):\n shape = CUIPolyWidget()\n shape.set_base_points(coords)\n\n self.shapes.append(shape)\n return shape\n\n #\n\n def get_text(self):\n return self.parts[0].text\n\n #\n\n def set_bool(self, status):\n self.bool = status\n return\n\n def test_hover(self, mouse_co, pos):\n super().test_hover(mouse_co, [pos[0], pos[1]])\n status = None\n if self.hover:\n status = 'BUTTON'\n return status\n\n def set_text(self, text=None):\n if text != None:\n self.parts[0].set_text(text)\n return\n\n def set_draw_box(self, status):\n self.parts[0].draw_box = status\n return\n\n def set_bool_color(self, color):\n self.color_bool = color\n\n self.update_color_render()\n return\n\n def set_icon_data(self, image=None, width=None, height=None, text_side=None):\n self.parts[0].set_icon_data(\n image=image, width=width, height=height, text_side=text_side)\n return\n\n #\n\n def __str__(self):\n return 'CUI Button'\n\n\nclass CUIBoolProp(CUIItem):\n def __init__(self, height, text, default_val=False):\n super().__init__(height)\n\n self.item_type = 'BOOLEAN'\n\n self.bool_val = default_val\n self.use_button = True\n\n check_box = CUICheckWidget(height, default_val)\n button = CUIItemWidget(height, text)\n check_box.hover_highlight = self.hover_highlight\n button.hover_highlight = self.hover_highlight\n\n self.parts.append(check_box)\n self.parts.append(button)\n return\n\n #\n\n def create_shape_data(self):\n super().create_shape_data()\n offset = (self.height-self.parts[0].bool_box_size)/2\n\n for p, part in enumerate(self.parts):\n part.pos_offset = [0, 0]\n if p == 0:\n part.pos_offset[1] = -offset\n\n part.height = part.bool_box_size\n part.width = part.bool_box_size\n if p == 1:\n part.pos_offset[0] = self.parts[0].bool_box_size + offset\n\n part.height = self.height\n part.width = self.width - self.parts[0].bool_box_size - offset\n\n part.create_shape_data()\n return\n\n def draw(self):\n if self.visible == True:\n for p, part in enumerate(self.parts):\n if p == 0:\n part.draw(click_down=self.click_down)\n else:\n if self.use_button:\n part.draw(click_down=self.click_down)\n\n return\n\n #\n\n def click_up_func(self, mouse_co, shift, pos, arguments=None):\n self.toggle_bool()\n status = super().click_up_func(mouse_co, shift, pos, arguments)\n return status\n\n def test_hover(self, mouse_co, pos):\n super().test_hover(mouse_co, [pos[0], pos[1]])\n status = None\n if self.hover:\n status = 'BOOLEAN'\n self.parts[0].hover = True\n self.parts[1].hover = True\n else:\n self.parts[0].hover = False\n self.parts[1].hover = False\n return status\n\n def toggle_bool(self):\n self.bool_val = not self.bool_val\n self.parts[0].set_bool(self.bool_val)\n return\n\n #\n\n def set_check_icon(self, image_true=None, image_false=None):\n if image_true != None:\n self.parts[0].set_true_icon(image_true)\n if image_false != None:\n self.parts[0].set_false_icon(image_false)\n\n if self.bool_val:\n self.parts[0].set_icon_data(\n image=image_true, width=self.parts[0].bool_box_size-2, height=self.parts[0].bool_box_size-2)\n else:\n self.parts[0].set_icon_data(\n mage=image_false, width=self.parts[0].bool_box_size-2, height=self.parts[0].bool_box_size-2)\n\n self.parts[0].draw_check = False\n return\n\n def set_use_button(self, status):\n self.use_button = status\n return\n\n def set_bool(self, status):\n self.bool_val = status\n self.parts[0].set_bool(status)\n return\n\n #\n\n def __str__(self):\n return 'CUI Boolean Prop'\n\n\nclass CUINumProp(CUIItem):\n def __init__(self, height, text, default, decimals, step, min, max):\n self.color_perc_bar = (0.0, 0.0, 0.3, 0.75)\n self.color_perc_bar_hover = (0.0, 0.0, 0.4, 0.75)\n self.color_arrow_box = (0.0, 0.0, 0.4, 1)\n self.color_arrow_box_hover = (0.0, 0.0, 0.5, 1)\n self.color_arrow = (0.0, 0.0, 1.0, 0.75)\n\n self.color_arrow_render = None\n\n super().__init__(height)\n\n self.item_type = 'NUMBER'\n\n self.value_change_function = None\n\n self.arrow_width = self.height-8\n\n self.draw_box = True\n\n self.slidable = True\n\n self.init_click_loc = [0, 0]\n self.sliding = False\n self.typing = False\n\n self.type_string = ''\n self.type_pos = 0\n\n self.value = default\n self.round_decis = decimals\n self.value_min = min\n self.value_max = max\n self.slide_fac = 5.0\n self.value_step = step\n self.shift_value_step = step * 2\n self.slide_value_step = step * .1\n\n round_min = 1\n for i in range(decimals):\n round_min *= .1\n if self.slide_value_step < round_min:\n self.slide_value_step = round_min\n\n self.draw_backdrop = True\n\n perc_bar = CUIItemWidget(height, '')\n left_arrow = CUIItemWidget(height, '')\n right_arrow = CUIItemWidget(height, '')\n text_bar = CUIItemWidget(height, text)\n\n left_arrow.bevel_inds = [0, 3]\n right_arrow.bevel_inds = [1, 2]\n\n perc_bar.hover_highlight = self.hover_highlight\n left_arrow.hover_highlight = self.hover_highlight\n right_arrow.hover_highlight = self.hover_highlight\n text_bar.hover_highlight = self.hover_highlight\n\n text_bar.draw_box = False\n\n self.parts.append(perc_bar)\n self.parts.append(left_arrow)\n self.parts.append(right_arrow)\n self.parts.append(text_bar)\n return\n\n #\n\n def create_shape_data(self):\n self.init_shape_data()\n\n arrow_box_size = self.height\n if self.width < self.height*3:\n arrow_box_size = self.width/3\n\n for p, part in enumerate(self.parts):\n part.height = self.height\n part.pos_offset = [0, 0]\n if p == 0:\n perc = (self.value-self.value_min) / \\\n (self.value_max-self.value_min)\n part.set_color(color=self.color_perc_bar,\n color_hover=self.color_perc_bar_hover)\n part.pos_offset[0] = arrow_box_size\n part.width = (self.width - arrow_box_size*2) * perc\n part.create_shape_data()\n if p == 1:\n part.set_color(color=self.color_arrow_box,\n color_hover=self.color_arrow_box_hover)\n part.width = arrow_box_size\n part.create_shape_data()\n if p == 2:\n part.set_color(color=self.color_arrow_box,\n color_hover=self.color_arrow_box_hover)\n part.pos_offset[0] = self.width - arrow_box_size\n part.width = arrow_box_size\n part.create_shape_data()\n if p == 3:\n part.pos_offset[0] = arrow_box_size\n part.width = self.width - arrow_box_size*2\n if self.typing:\n string = self.type_string[:self.type_pos] + \\\n '|' + self.type_string[self.type_pos:]\n part.create_shape_data(\n value=self.value, text=string, text_pos=self.type_pos)\n else:\n part.create_shape_data(value=self.value)\n\n if arrow_box_size < self.arrow_width+2:\n self.arrow_pos = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], ]\n else:\n offset = (arrow_box_size - self.arrow_width)/2\n self.arrow_pos = [[offset+self.arrow_width, -offset], [offset, -self.height/2], [offset+self.arrow_width, -self.height+offset],\n [self.width-arrow_box_size+offset, -offset], [self.width-offset, -self.height/2], [self.width-arrow_box_size+offset, -self.height+offset], ]\n return\n\n def init_shape_data(self):\n self.arrow_pos = []\n self.arrow_tris = [[0, 1, 2], [3, 4, 5]]\n super().init_shape_data()\n return\n\n def update_batches(self, position):\n pos = [position[0]+self.scale_pos_offset[0],\n position[1]+self.scale_pos_offset[1]]\n\n arrows = draw_cos_offset(pos, self.scale, self.arrow_pos)\n\n self.batch_arrows = batch_for_shader(\n self.shader, 'TRIS', {\"pos\": arrows}, indices=self.arrow_tris)\n super().update_batches(position)\n return\n\n def update_color_render(self):\n super().update_color_render()\n\n self.color_arrow_render = hsv_to_rgb_list(self.color_arrow)\n return\n\n def draw(self):\n if self.visible == True:\n # if self.parts[0].hover:\n # super().draw(self.color_hover)\n # else:\n # super().draw()\n\n for p, part in enumerate(self.parts):\n if p == 0:\n if self.typing == False:\n part.draw(click_down=self.click_down)\n elif p < 4:\n part.draw(click_down=self.click_down)\n\n self.shader.bind()\n self.shader.uniform_float(\"color\", self.color_arrow_render)\n self.batch_arrows.draw(self.shader)\n\n return\n\n #\n\n def click_down_func(self, mouse_co, shift, pos, arguments=None):\n self.typing = False\n for p, part in enumerate(self.parts):\n if part.hover:\n self.init_click_loc = mouse_co\n # perc bar\n if p == 0:\n return ['NUMBER_BAR', self.custom_id]\n # left arrow\n if p == 1:\n return ['NUMBER_L_ARROW', self.custom_id]\n # right arrow\n if p == 2:\n return ['NUMBER_R_ARROW', self.custom_id]\n return None\n\n def click_up_func(self, mouse_co, shift, pos, arguments=None):\n for p, part in enumerate(self.parts):\n skip_update = False\n if part.hover:\n self.init_click_loc = [0, 0]\n\n if self.sliding:\n if self.value_change_function:\n self.value_change_function(self, arguments)\n self.sliding = False\n bpy.context.window.cursor_modal_set('DEFAULT')\n return ['NUMBER_SLIDE', self.custom_id]\n\n else:\n if p == 0:\n self.typing = True\n self.create_shape_data()\n return ['NUMBER_BAR_TYPE', self.custom_id]\n\n if p == 1:\n if shift:\n self.offset_value(-self.shift_value_step)\n else:\n self.offset_value(-self.value_step)\n\n if self.value_change_function:\n skip_update = self.value_change_function(\n self, arguments)\n\n if not skip_update:\n self.create_shape_data()\n self.update_batches(pos)\n return ['NUMBER_L_ARROW', self.custom_id]\n\n if p == 2:\n if shift:\n self.offset_value(self.shift_value_step)\n else:\n self.offset_value(self.value_step)\n\n if self.value_change_function:\n skip_update = self.value_change_function(\n self, arguments)\n\n if not skip_update:\n self.create_shape_data()\n self.update_batches(pos)\n return ['NUMBER_R_ARROW', self.custom_id]\n return None\n\n def click_down_move(self, mouse_co, shift, pos, arguments=None):\n for p, part in enumerate(self.parts):\n if p == 3:\n continue\n skip_update = False\n if part.hover:\n if self.sliding == False:\n if abs(self.init_click_loc[0] - mouse_co[0]) > 5 and self.slidable:\n self.sliding = True\n bpy.context.window.cursor_modal_set('NONE')\n else:\n self.test_hover(mouse_co, pos)\n\n else:\n # calc if slide is far enough to iterate\n diff = mouse_co[0] - self.init_click_loc[0]\n\n min_val = 1.0 * self.slide_fac\n\n if shift:\n diff *= .1\n\n if abs(diff) >= min_val:\n iters = int(abs(diff)/min_val)\n if iters == 0:\n iters = 1\n\n if diff >= 0:\n self.offset_value(self.slide_value_step*iters)\n else:\n self.offset_value(-self.slide_value_step*iters)\n\n if self.value_change_function:\n skip_update = self.value_change_function(\n self, arguments)\n\n if not skip_update:\n self.create_shape_data()\n self.update_batches(pos)\n\n bpy.context.window.cursor_warp(\n bpy.context.region.x + self.init_click_loc[0], bpy.context.region.y + self.init_click_loc[1])\n\n return\n\n def test_hover(self, mouse_co, pos):\n super().test_hover(mouse_co, [pos[0], pos[1]])\n status = None\n if self.hover:\n status = 'NUMBER'\n if self.parts[1].hover == False and self.parts[2].hover == False:\n self.parts[0].hover = True\n return status\n\n def offset_value(self, offset):\n self.set_value(self.value + offset)\n return\n\n #\n\n def reset_item_states(self, clear_hover):\n # if clear_hover:\n # self.clear_hover()\n self.sliding = False\n # bpy.context.window.cursor_modal_set('DEFAULT')\n self.type_string = ''\n self.type_pos = 0\n self.typing = False\n # self.click_down = False\n super().reset_item_states(clear_hover)\n return\n\n #\n\n def type_add_key(self, key):\n if self.typing:\n change = False\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', ]\n operands = ['+', '-', '/', '*', ]\n\n if key in numbers:\n self.type_string = self.type_string[:self.type_pos] + \\\n key + self.type_string[self.type_pos:]\n change = True\n\n if key in operands:\n existing = False\n for op in operands:\n if op in self.type_string:\n existing = True\n\n if existing == False:\n self.type_string = self.type_string[:self.type_pos] + \\\n key + self.type_string[self.type_pos:]\n change = True\n\n if key == '.':\n op_found = False\n for op in operands:\n if op in self.type_string:\n op_found = True\n sides = self.type_string.split(op)\n\n if len(sides) >= self.type_pos:\n if '.' not in sides[0]:\n self.type_string = self.type_string[:self.type_pos] + \\\n key + self.type_string[self.type_pos:]\n change = True\n else:\n if '.' not in sides[1]:\n self.type_string = self.type_string[:self.type_pos] + \\\n key + self.type_string[self.type_pos:]\n change = True\n\n if op_found == False:\n if '.' not in self.type_string:\n self.type_string = self.type_string[:self.type_pos] + \\\n key + self.type_string[self.type_pos:]\n change = True\n\n if change:\n self.type_pos += 1\n self.create_shape_data()\n return\n\n def type_backspace_key(self):\n if self.typing:\n if self.type_pos > 0:\n self.type_string = self.type_string[:self.type_pos -\n 1] + self.type_string[self.type_pos:]\n self.type_pos -= 1\n self.create_shape_data()\n return\n\n def type_delete_key(self):\n if self.typing:\n if self.type_pos < len(self.type_string):\n self.type_string = self.type_string[:self.type_pos] + \\\n self.type_string[self.type_pos+1:]\n self.create_shape_data()\n return\n\n def type_move_pos(self, value):\n if self.typing:\n self.type_pos += value\n if self.type_pos < 0:\n self.type_pos = 0\n if self.type_pos > len(self.type_string):\n self.type_pos = len(self.type_string)\n self.create_shape_data()\n return\n\n def type_confirm(self, arguments=None):\n if self.typing and self.type_string:\n operands = ['+', '-', '/', '*', ]\n op_found = False\n for op in operands:\n if op in self.type_string:\n op_found = True\n sides = self.type_string.split(op)\n value = self.value\n\n if op == '+':\n value = float(sides[0])+float(sides[1])\n\n if op == '-':\n value = float(sides[0])-float(sides[1])\n\n if op == '/':\n if flaot(sides[1]) != 0.0:\n value = float(sides[0])/float(sides[1])\n\n if op == '*':\n value = float(sides[0])*float(sides[1])\n self.set_value(value)\n if self.value_change_function:\n self.value_change_function(self, arguments)\n\n if op_found == False:\n value = float(self.type_string)\n self.set_value(value)\n if self.value_change_function:\n self.value_change_function(self, arguments)\n\n self.type_string = ''\n self.type_pos = 0\n self.typing = False\n self.create_shape_data()\n return\n\n def type_cancel(self):\n self.type_string = ''\n self.type_pos = 0\n self.typing = False\n self.create_shape_data()\n return\n\n #\n\n def set_slidable(self, status):\n self.slidable = status\n return\n\n def set_value_step(self, step=None, shift_step=None):\n if step != None:\n self.value_step = step\n if shift_step != None:\n self.shift_value_step = shift_step\n return\n\n def set_slide_value_step(self, step):\n self.slide_value_step = step\n return\n\n def set_value(self, value):\n self.value = value\n\n if self.value_min != None:\n if self.value < self.value_min:\n self.value = self.value_min\n\n if self.value_max != None:\n if self.value > self.value_max:\n self.value = self.value_max\n\n self.value = round(self.value, self.round_decis)\n if self.round_decis == 0:\n self.value = int(self.value)\n return\n\n def set_slide_factor(self, fac):\n self.slide_fac = fac\n return\n\n def set_arcolor_row(self, color_box=None, color_box_hover=None):\n for p, part in enumerate(self.parts):\n if p == 1 or p == 2:\n if color_box:\n self.color = color_box\n if color_box_hover:\n self.color_hover = color_box_hover\n\n self.update_color_render()\n return\n\n def set_bev(self, size, res):\n self.parts[1].set_bev(size, res)\n self.parts[2].set_bev(size, res)\n super().set_bev(size, res)\n return\n\n def set_value_change_func(self, func):\n self.value_change_function = func\n return\n\n #\n\n def __str__(self):\n return 'CUI Number Prop'\n\n\nclass CUILabel(CUIItem):\n def __init__(self, height, text):\n super().__init__(height)\n\n self.item_type = 'LABEL'\n\n txt_box = CUIItemWidget(height, text)\n txt_box.hover_highlight = False\n txt_box.draw_box = False\n\n self.parts.append(txt_box)\n return\n\n #\n\n def create_shape_data(self, value='', text=None, text_pos=0, x_offset=0):\n super().create_shape_data()\n for part in self.parts:\n part.height = self.height\n part.width = self.width\n part.create_shape_data()\n return\n\n #\n\n def click_down_func(self, mouse_co, shift, pos, arguments=None):\n return None\n\n def click_up_func(self, mouse_co, shift, pos, arguments=None):\n return None\n\n def test_hover(self, mouse_co, pos):\n return None\n\n #\n\n def reset_item_states(self, clear_hover):\n return\n\n #\n\n def draw(self):\n super().draw()\n return\n\n def set_text(self, text):\n self.parts[0].set_text(text)\n return\n\n def set_icon_data(self, image=None, width=None, height=None, text_side=None):\n self.parts[0].set_icon_data(\n image=image, width=width, height=height, text_side=text_side)\n return\n\n #\n\n def __str__(self):\n return 'CUI Button'\n\n\n#\n#\n#\n\n\nclass UIGizmoContainer:\n def __init__(self, index, mat, size, axis, scale):\n self.shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')\n self.index = index\n self.gizmos = []\n self.matrix = mat\n self.scale_factor = None\n self.size = size\n\n self.visible = False\n\n self.scale = scale\n\n return\n\n def update_size(self, size):\n self.size = size\n for giz in self.gizmos:\n giz.size = self.size\n self.create_shape_data(self.matrix)\n return\n\n def create_shape_data(self, matrix):\n for gizmo in self.gizmos:\n gizmo.create_shape_data()\n\n self.update_position(matrix)\n return\n\n def update_position(self, matrix, ang=0):\n self.matrix = matrix\n self.scale_factor = None\n for gizmo in self.gizmos:\n self.scale_factor = gizmo.update_position(\n matrix, self.scale_factor, ang)\n return\n\n def update_rot(self, ang, start_ang):\n for gizmo in self.gizmos:\n if gizmo.in_use:\n gizmo.update_rot_fan(\n self.matrix, self.scale_factor, ang, start_ang)\n return\n\n def update_orientation(self, matrix):\n self.matrix = matrix\n for gizmo in self.gizmos:\n gizmo.update_position(self.matrix, self.scale_factor)\n return\n\n def draw(self):\n if self.visible:\n for i in range(len(self.gizmos)):\n if self.gizmos[i*-1-1].active:\n self.gizmos[i*-1-1].draw()\n return\n\n def clear_hover(self):\n for giz in self.gizmos:\n giz.hover = False\n return\n\n def test_hover(self, mouse_co):\n if self.visible:\n\n for giz in self.gizmos:\n if giz.active:\n hov = giz.test_hover(mouse_co)\n if hov:\n return True\n return False\n\n def set_scale(self, scale):\n self.scale = scale\n for giz in self.gizmos:\n giz.set_scale(self.scale)\n self.create_shape_data(self.matrix)\n return\n\n def set_visibility(self, status):\n self.visible = status\n return\n\n\nclass UIRotateGizmo:\n def __init__(self, index, size, scale, axis, giz_type, color, thickness=6):\n self.shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')\n self.index = index\n\n self.size = size\n self.scale = scale\n self.resolution = 36\n self.points = []\n self.mat_points = []\n self.axis = axis\n self.color = color\n self.active = True\n self.hover = False\n self.thickness = thickness\n self.type = giz_type\n self.in_use = False\n self.prev_screen_size = size\n\n return\n\n def test_hover(self, mouse_co):\n region = bpy.context.region\n rv3d = bpy.context.region_data\n\n mouse_co = Vector((mouse_co[0], mouse_co[1]))\n\n min_x = 0\n max_x = 0\n min_y = 0\n max_y = 0\n rcos = []\n for c, co in enumerate(self.mat_points):\n rco = view3d_utils.location_3d_to_region_2d(region, rv3d, co)\n if rco == None:\n rcos.append(None)\n continue\n\n if c == 0:\n min_x = rco[0]\n max_x = rco[0]\n min_y = rco[1]\n max_y = rco[1]\n else:\n if rco[0] < min_x:\n min_x = rco[0]\n if rco[0] > max_x:\n max_x = rco[0]\n if rco[1] < min_y:\n min_y = rco[1]\n if rco[1] > max_y:\n max_y = rco[1]\n\n rcos.append(rco)\n\n self.hover = False\n if min_x < mouse_co[0] < max_x and min_y < mouse_co[1] < max_y:\n for tri in self.tris:\n co1 = rcos[tri[0]]\n co2 = rcos[tri[1]]\n co3 = rcos[tri[2]]\n\n if co1 == None or co2 == None or co3 == None:\n continue\n\n vec1 = mouse_co - co1\n vec2 = mouse_co - co2\n vec3 = mouse_co - co3\n\n tri_vec1 = co2-co1\n tri_vec2 = co3-co1\n tri_vec3 = co2-co3\n\n t_ang = tri_vec1.angle_signed(tri_vec2)\n m_ang = tri_vec1.angle_signed(vec1)\n if t_ang < 0.0:\n if m_ang < t_ang or m_ang > 0.0:\n continue\n else:\n if m_ang > t_ang or m_ang < 0.0:\n continue\n\n t_ang = tri_vec3.angle_signed(-tri_vec2)\n m_ang = tri_vec3.angle_signed(vec3)\n if t_ang < 0.0:\n if m_ang < t_ang or m_ang > 0.0:\n continue\n else:\n if m_ang > t_ang or m_ang < 0.0:\n continue\n\n self.hover = True\n return self.hover\n\n return self.hover\n\n def init_shape_data(self):\n self.points = []\n self.tris = []\n self.mat_points = []\n self.fan_points = []\n self.fan_tris = []\n self.fan_lines = []\n self.mat_fan_points = []\n self.mat_fan_lines = []\n return\n\n def create_shape_data(self):\n self.init_shape_data()\n if self.resolution <= 12:\n self.resolution = 12\n ang = 360/self.resolution\n co1 = [0, (1+self.thickness/2)*self.scale]\n co2 = [0, 1*self.scale]\n co3 = [0, (1-self.thickness/2)*self.scale]\n co4 = [0, 1*self.scale]\n for i in range(self.resolution):\n new_co1 = rotate_2d([0, 0], co1, math.radians(ang*i)).to_3d()\n new_co2 = rotate_2d([0, 0], co2, math.radians(ang*i)).to_3d()\n new_co3 = rotate_2d([0, 0], co3, math.radians(ang*i)).to_3d()\n new_co4 = rotate_2d([0, 0], co4, math.radians(ang*i)).to_3d()\n\n new_co2[2] += self.thickness/2*self.scale\n new_co4[2] -= self.thickness/2*self.scale\n\n new_co1 *= .01\n new_co2 *= .01\n new_co3 *= .01\n new_co4 *= .01\n\n if self.axis == 0:\n self.points.append(new_co1.zyx)\n self.points.append(new_co2.zyx)\n self.points.append(new_co3.zyx)\n self.points.append(new_co4.zyx)\n if self.axis == 1:\n self.points.append(new_co1.xzy)\n self.points.append(new_co2.xzy)\n self.points.append(new_co3.xzy)\n self.points.append(new_co4.xzy)\n if self.axis == 2:\n self.points.append(new_co1)\n self.points.append(new_co2)\n self.points.append(new_co3)\n self.points.append(new_co4)\n\n if i < self.resolution-1:\n self.tris += [\n [i*4+1, i*4, i*4+4], [i*4+1, i*4+4, i*4+5], [i *\n 4+2, i*4+1, i*4+5], [i*4+2, i*4+5, i*4+6],\n [i*4+3, i*4+2, i*4+6], [i*4+3, i*4+6, i*4 +\n 7], [i*4, i*4+3, i*4+7], [i*4, i*4+7, i*4+4]\n ]\n else:\n self.tris += [\n [i*4+1, i*4, 0], [i*4+1, 0, 1], [i *\n 4+2, i*4+1, 1], [i*4+2, 1, 2],\n [i*4+3, i*4+2, 2], [i*4+3, 2, 7], [i*4, i*4+3, 7], [i*4, 3, 0]\n ]\n\n ang = 180/(int(self.resolution/2)-1)\n co = [0, 1*self.scale]\n self.fan_points.append(Vector((0, 0, 0)))\n for i in range(int(self.resolution/2)+1):\n new_co = rotate_2d([0, 0], co, math.radians(ang*i)).to_3d()\n new_co *= .01\n\n if self.axis == 0:\n self.fan_points.append(new_co.zyx)\n if self.axis == 1:\n self.fan_points.append(new_co.xzy)\n if self.axis == 2:\n self.fan_points.append(new_co)\n\n if i < int(self.resolution/2):\n self.fan_tris.append([0, i+1, i+2])\n\n self.fan_lines.append(self.fan_points[0])\n self.fan_lines.append(self.fan_points[1])\n self.fan_lines.append(self.fan_points[0])\n self.fan_lines.append(self.fan_points[-1])\n return\n\n def update_position(self, matrix, scale_fac, angle=0):\n region = bpy.context.region\n rv3d = bpy.context.region_data\n\n self.mat_points.clear()\n for p in range(len(self.points)):\n self.mat_points.append(matrix @ self.points[p])\n\n if scale_fac == None:\n region = bpy.context.region\n rv3d = bpy.context.region_data\n\n min_x = 0\n max_x = 0\n min_y = 0\n max_y = 0\n for c, co in enumerate(self.mat_points):\n rco = view3d_utils.location_3d_to_region_2d(region, rv3d, co)\n if rco == None:\n continue\n\n if c == 0:\n min_x = rco[0]\n max_x = rco[0]\n min_y = rco[1]\n max_y = rco[1]\n else:\n if rco[0] < min_x:\n min_x = rco[0]\n if rco[0] > max_x:\n max_x = rco[0]\n if rco[1] < min_y:\n min_y = rco[1]\n if rco[1] > max_y:\n max_y = rco[1]\n\n height = max_y - min_y\n width = max_x - min_x\n\n if height > width:\n max_size = height\n else:\n max_size = width\n\n if max_size == 0:\n max_size = self.prev_screen_size\n\n self.prev_screen_size = max_size\n max_size += 1\n\n scale_fac = self.size/max_size\n\n self.mat_points.clear()\n for p in range(len(self.points)):\n co = matrix @ (self.points[p]*scale_fac)\n self.mat_points.append(co)\n\n self.batch = batch_for_shader(\n self.shader, 'TRIS', {\"pos\": self.mat_points}, indices=self.tris)\n\n self.update_rot_fan(matrix, scale_fac, angle)\n return scale_fac\n\n def update_rot_fan(self, matrix, scale_fac, angle, start_ang=0):\n region = bpy.context.region\n rv3d = bpy.context.region_data\n\n if self.resolution <= 12:\n self.resolution = 12\n\n point_per_rot = int(360/(self.resolution/2))\n rotations = int(math.degrees(abs(angle))/360)\n point_num = point_per_rot*(rotations+1)\n ang = angle/point_num\n\n co = [0, 1*self.scale]\n self.fan_points.clear()\n self.fan_tris.clear()\n self.fan_points.append(Vector((0, 0, 0)))\n for i in range(point_num+1):\n new_co = rotate_2d([0, 0], co, ang*i+start_ang).to_3d()\n new_co *= .01\n\n if self.axis == 0:\n eul = Euler((math.radians(90.0), 0.0,\n math.radians(90.0)), 'XYZ')\n new_co.rotate(eul)\n self.fan_points.append(new_co)\n if self.axis == 1:\n eul = Euler((math.radians(90.0), 0.0, 0.0), 'XYZ')\n new_co.rotate(eul)\n self.fan_points.append(new_co)\n if self.axis == 2:\n self.fan_points.append(new_co)\n\n if i < point_num:\n self.fan_tris.append([0, i+1, i+2])\n\n self.fan_lines.clear()\n self.fan_lines.append(self.fan_points[0])\n self.fan_lines.append(self.fan_points[1])\n self.fan_lines.append(self.fan_points[0])\n self.fan_lines.append(self.fan_points[-1])\n\n self.mat_fan_points.clear()\n for p in range(len(self.fan_points)):\n self.mat_fan_points.append(matrix @ (self.fan_points[p]*scale_fac))\n\n self.mat_fan_lines.clear()\n for p in range(len(self.fan_lines)):\n self.mat_fan_lines.append(matrix @ (self.fan_lines[p]*scale_fac))\n\n self.batch_fan = batch_for_shader(\n self.shader, 'TRIS', {\"pos\": self.mat_fan_points}, indices=self.fan_tris)\n self.batch_fan_lines = batch_for_shader(\n self.shader, 'LINES', {\"pos\": self.mat_fan_lines})\n\n return\n\n def draw(self):\n if self.active:\n bgl.glDepthRange(0, 0.01)\n\n if self.in_use:\n self.shader.bind()\n self.shader.uniform_float(\n \"color\", [self.color[0], self.color[1], self.color[2], 0.2])\n self.batch_fan.draw(self.shader)\n\n self.shader.bind()\n self.shader.uniform_float(\"color\", [1.0, 1.0, 1.0, 1.0])\n self.batch_fan_lines.draw(self.shader)\n\n self.shader.bind()\n if self.hover and self.in_use == False:\n self.shader.uniform_float(\"color\", [1.0, 1.0, 1.0, 1.0])\n else:\n self.shader.uniform_float(\"color\", self.color)\n self.batch.draw(self.shader)\n\n bgl.glDepthRange(0, 1.0)\n\n return\n\n def set_scale(self, scale):\n self.scale = scale\n return\n","sub_path":"cui_classes/cui_items.py","file_name":"cui_items.py","file_ext":"py","file_size_in_byte":52652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"128650490","text":"# #UDP没有连接\n# from socket import *\n# ip_port = ('172.24.36.5',8000)\n# buffer_size = 1021\n#\n# udp_client = socket(AF_INET,SOCK_DGRAM) #SOCK_DGRAM数据包式套接字\n# udp_client.sendto('HelloWorld'.encode('utf8'),ip_port)\n\n\n\n#UDP没有连接\nfrom socket import *\nip_port = ('172.24.36.5',8000)\nbuffer_size = 1021\n\nudp_client = socket(AF_INET,SOCK_DGRAM) #SOCK_DGRAM数据包式套接字\nwhile True:\n msg = input('>>>>>:').strip()\n udp_client.sendto(msg.encode('utf8'),ip_port)\n\n data, addr = udp_client.recvfrom(buffer_size)\n print(data.decode('utf8'))\n\n\n\n\n","sub_path":"7-31/UDP客户端2.py","file_name":"UDP客户端2.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"457127694","text":"import pickle\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as R\nfrom handeye_4dof import Calibrator4DOF\n\n\n\"\"\"\n Exactly the same as example.py except we load precomputed \n motions instead, resulting in faster completion.\n\"\"\"\n\n\nnp.set_printoptions(suppress=True)\n\n\ndef main():\n with open(\"../example_data/pose_samples.pkl\", \"rb\") as f:\n try:\n base_to_hand, camera_to_marker = pickle.load(f)\n except UnicodeDecodeError:\n # python 2 to python 3 pickle in case sampling was done in ROS\n base_to_hand, camera_to_marker = pickle.load(f, encoding='latin1')\n\n with open(\"../example_data/paired_poses.pkl\", \"rb\") as f:\n motions = pickle.load(f)\n\n # Initialize calibrator with precomputed motions.\n cb = Calibrator4DOF(motions)\n\n # Our camera and end effector z-axes are antiparallel so we apply a 180deg x-axis rotation.\n dq_x = cb.calibrate(antiparallel_screw_axes=True)\n\n # Hand to Camera TF obtained from handeye calibration.\n ca_hand_to_camera = np.linalg.inv(dq_x.as_transform())\n\n # Hand to Camera TF obtained from post nonlinear refinement.\n nl_hand_to_camera = cb.nonlinear_refinement(base_to_hand, camera_to_marker, ca_hand_to_camera)\n\n ca_rotation = np.rad2deg(R.from_matrix(ca_hand_to_camera[:3, :3]).as_euler('xyz'))\n nl_rotation = np.rad2deg(R.from_matrix(nl_hand_to_camera[:3, :3]).as_euler('xyz'))\n\n # Ground Truth Hand to Camera\n gt_translation = [-0.456, -0.037, -0.112]\n gt_rotation = [180, 0, 0]\n\n # NOTE: (1) Ground Truth itself may be inaccurate (manually measured).\n # (2) z-translation is an invalid number.\n np.set_printoptions(precision=5)\n print(\"Hand to Camera Transform Comparisons\")\n print(\"Translations: Calibration {}\".format(ca_hand_to_camera[:3, -1]))\n print(\" Nonlinear {}\".format(nl_hand_to_camera[:3, -1]))\n print(\" Ground Truth {}\".format(gt_translation))\n print(\"Rotations: Calibration {}\".format(ca_rotation))\n print(\" Nonlinear {}\".format(nl_rotation))\n print(\" Ground Truth {}\".format(gt_rotation))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/example_using_precomputed_motions.py","file_name":"example_using_precomputed_motions.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"172770262","text":"from functools import reduce\nfrom collections import defaultdict \nimport re\n\ndef readinput_dict_as_ints(filename): \n input = {}\n file = open(filename, \"r\")\n for line in file:\n input[int(line)] = int(line)\n return input\n\ndef readinput_lines(filename): \n file = open(filename, \"r\") \n return [line.strip() for line in file]\n\ndef readinput_lines_as_ints(filename): \n file = open(filename, \"r\") \n input=[]\n for line in [line.strip() for line in file]:\n input.append(int(line)) \n return input\n\n\"\"\" A Python Class\nA simple Python graph class, demonstrating the essential \nfacts and functionalities of graphs.\n\"\"\"\n\nclass Graph(object):\n\n def __init__(self, graph_dict=None):\n \"\"\" initializes a graph object \n If no dictionary or None is given, an empty dictionary will be used\n \"\"\"\n if graph_dict == None:\n graph_dict = {}\n self.__graph_dict = graph_dict\n\n def vertices(self):\n \"\"\" returns the vertices of a graph \"\"\"\n return list(self.__graph_dict.keys())\n\n def edges(self):\n \"\"\" returns the edges of a graph \"\"\"\n return self.__generate_edges()\n\n def add_vertex(self, vertex):\n \"\"\" If the vertex \"vertex\" is not in \n self.__graph_dict, a key \"vertex\" with an empty\n list as a value is added to the dictionary. \n Otherwise nothing has to be done. \n \"\"\"\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []\n\n def add_edge(self, edge):\n \"\"\" assumes that edge is of type set, tuple or list; \n between two vertices can be multiple edges! \n \"\"\"\n edge = set(edge)\n vertex1 = edge.pop()\n if edge:\n # not a loop\n vertex2 = edge.pop()\n else:\n # a loop\n vertex2 = vertex1\n if vertex1 in self.__graph_dict:\n self.__graph_dict[vertex1].append(vertex2)\n else:\n self.__graph_dict[vertex1] = [vertex2]\n\n def __generate_edges(self):\n \"\"\" A static method generating the edges of the \n graph \"graph\". Edges are represented as sets \n with one (a loop back to the vertex) or two \n vertices \n \"\"\"\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({vertex, neighbour})\n return edges\n\n def __str__(self):\n res = \"vertices: \"\n for k in self.__graph_dict:\n res += str(k) + \" \"\n res += \"\\nedges: \"\n for edge in self.__generate_edges():\n res += str(edge) + \" \"\n return res\n\n def find_isolated_vertices(self):\n \"\"\" returns a list of isolated vertices. \"\"\"\n graph = self.__graph_dict\n isolated = []\n for vertex in graph:\n print(isolated, vertex)\n if not graph[vertex]:\n isolated += [vertex]\n return isolated\n\n def find_path(self, start_vertex, end_vertex, path=[]):\n \"\"\" find a path from start_vertex to end_vertex \n in graph \"\"\"\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return path\n if start_vertex not in graph:\n return None\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_path = self.find_path(vertex, \n end_vertex, \n path)\n if extended_path: \n return extended_path\n return None\n \n\n def find_all_paths(self, start_vertex, end_vertex, path=[]):\n \"\"\" find all paths from start_vertex to \n end_vertex in graph \"\"\"\n graph = self.__graph_dict \n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n if start_vertex not in graph:\n return []\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_paths(vertex, \n end_vertex, \n path)\n for p in extended_paths: \n paths.append(p)\n return paths\n\n def is_connected(self, \n vertices_encountered = None, \n start_vertex=None):\n \"\"\" determines if the graph is connected \"\"\"\n if vertices_encountered is None:\n vertices_encountered = set()\n gdict = self.__graph_dict \n vertices = gdict.keys() \n if not start_vertex:\n # chosse a vertex from graph as a starting point\n start_vertex = vertices[0]\n vertices_encountered.add(start_vertex)\n if len(vertices_encountered) != len(vertices):\n for vertex in gdict[start_vertex]:\n if vertex not in vertices_encountered:\n if self.is_connected(vertices_encountered, vertex):\n return True\n else:\n return True\n return False\n\n def vertex_degree(self, vertex):\n \"\"\" The degree of a vertex is the number of edges connecting\n it, i.e. the number of adjacent vertices. Loops are counted \n double, i.e. every occurence of vertex in the list \n of adjacent vertices. \"\"\" \n adj_vertices = self.__graph_dict[vertex]\n degree = len(adj_vertices) + adj_vertices.count(vertex)\n return degree\n\n def degree_sequence(self):\n \"\"\" calculates the degree sequence \"\"\"\n seq = []\n for vertex in self.__graph_dict:\n seq.append(self.vertex_degree(vertex))\n seq.sort(reverse=True)\n return tuple(seq)\n\n @staticmethod\n def is_degree_sequence(sequence):\n \"\"\" Method returns True, if the sequence \"sequence\" is a \n degree sequence, i.e. a non-increasing sequence. \n Otherwise False is returned.\n \"\"\"\n # check if the sequence sequence is non-increasing:\n return all( x>=y for x, y in zip(sequence, sequence[1:]))\n \n\n def delta(self):\n \"\"\" the minimum degree of the vertices \"\"\"\n min = 100000000\n for vertex in self.__graph_dict:\n vertex_degree = self.vertex_degree(vertex)\n if vertex_degree < min:\n min = vertex_degree\n return min\n \n def Delta(self):\n \"\"\" the maximum degree of the vertices \"\"\"\n max = 0\n for vertex in self.__graph_dict:\n vertex_degree = self.vertex_degree(vertex)\n if vertex_degree > max:\n max = vertex_degree\n return max\n\n def density(self):\n \"\"\" method to calculate the density of a graph \"\"\"\n g = self.__graph_dict\n V = len(g.keys())\n E = len(self.edges())\n return 2.0 * E / (V *(V - 1))\n\n def diameter(self):\n \"\"\" calculates the diameter of the graph \"\"\"\n \n v = self.vertices() \n pairs = [ (v[i],v[j]) for i in range(len(v)-1) for j in range(i+1, len(v))]\n smallest_paths = []\n for (s,e) in pairs:\n paths = self.find_all_paths(s,e)\n smallest = sorted(paths, key=len)[0]\n smallest_paths.append(smallest)\n\n smallest_paths.sort(key=len)\n\n # longest path is at the end of list, \n # i.e. diameter corresponds to the length of this path\n diameter = len(smallest_paths[-1])\n return diameter\n\n @staticmethod\n def erdoes_gallai(dsequence):\n \"\"\" Checks if the condition of the Erdoes-Gallai inequality \n is fullfilled \n \"\"\"\n if sum(dsequence) % 2:\n # sum of sequence is odd\n return False\n if Graph.is_degree_sequence(dsequence):\n for k in range(1,len(dsequence) + 1):\n left = sum(dsequence[:k])\n right = k * (k-1) + sum([min(x,k) for x in dsequence[k:]])\n if left > right:\n return False\n else:\n # sequence is increasing\n return False\n return True\n \nclass Binary:\n def get_binary_as_string(self,val,length=0):\n bval = bin(int(val)).replace(\"0b\",\"\") \n return \"\".ljust(length-len(bval), \"0\") + bval\n\n def get_binary_as_string_from_mask(self,val,mask,match):\n bval = self.get_binary_as_string(val,len(mask))\n binstr = \"\"\n for i in range(len(bval)): \n if mask[i] == match:\n binstr+=bval[i]\n else:\n binstr+=mask[i]\n return binstr\n \n def split_binary_as_list(self,val,match):\n splitted = []\n matches = pow(2,val.count(match))\n for i in range(matches):\n splitted.append(\"\") \n for n in range(len(val)): \n v=False \n splitted.sort() \n for i in range(matches): \n v = v == False \n if val[n] == match:\n splitted[i] += str(int(v))\n else:\n splitted[i] += val[n] \n return splitted\n\n def get_int_from_binary_string(self,s):\n return int(s,2)\n\n def get_int_from_binary_reversed_string(self,s):\n return int(s[::-1],2)\n \n\n\n\n def __init__(self, graph_dict=None):\n \"\"\" initializes a graph object \n If no dictionary or None is given, \n an empty dictionary will be used\n \"\"\"\n if graph_dict == None:\n graph_dict = {}\n self.__graph_dict = graph_dict\n\n def vertices(self):\n \"\"\" returns the vertices of a graph \"\"\"\n return list(self.__graph_dict.keys())\n\n def edges(self):\n \"\"\" returns the edges of a graph \"\"\"\n return self.__generate_edges()\n\n def add_vertex(self, vertex):\n \"\"\" If the vertex \"vertex\" is not in \n self.__graph_dict, a key \"vertex\" with an empty\n list as a value is added to the dictionary. \n Otherwise nothing has to be done. \n \"\"\"\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []\n\n def add_edge(self, edge):\n \"\"\" assumes that edge is of type set, tuple or list; \n between two vertices can be multiple edges! \n \"\"\"\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.__graph_dict:\n self.__graph_dict[vertex1].append(vertex2)\n else:\n self.__graph_dict[vertex1] = [vertex2]\n\n def __generate_edges(self):\n \"\"\" A static method generating the edges of the \n graph \"graph\". Edges are represented as sets \n with one (a loop back to the vertex) or two \n vertices \n \"\"\"\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({vertex, neighbour})\n return edges\n\n def find_all_paths(self, start_vertex, end_vertex, path=[]):\n \"\"\" find all paths from start_vertex to \n end_vertex in graph \"\"\"\n graph = self.__graph_dict \n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n if start_vertex not in graph:\n return []\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_paths(vertex, \n end_vertex, \n path)\n for p in extended_paths: \n paths.append(p)\n return paths\n\n def __str__(self):\n res = \"vertices: \"\n for k in self.__graph_dict:\n res += str(k) + \" \"\n res += \"\\nedges: \"\n for edge in self.__generate_edges():\n res += str(edge) + \" \"\n return res\n\nclass FileHelper:\n \n def readinput_dict_as_ints(self,filename): \n input = {}\n file = open(filename, \"r\")\n for line in file:\n input[int(line)] = int(line)\n return input\n\n def readinput_lines(self,filename): \n file = open(filename, \"r\") \n return [line.strip() for line in file]\n \n def readinput_lines_and_replace(self,filename,replaces): \n #Usage : input = file.readinput_lines_and_replace(r\"Day17\\input_ex.txt\",[[\".\",\"0\"],[\"#\",\"1\"]]) \n file = open(filename, \"r\") \n lines = []\n for line in [line.strip() for line in file]:\n for replace in replaces:\n line = line.replace(replace[0],replace[1])\n lines.append(line)\n\n return lines\n\n def readinput_lines_as_list_ints(self,filename): \n file = open(filename, \"r\") \n input=[]\n for line in [line.strip() for line in file]:\n for i in line:\n input.append(int(i)) \n return input\n \n def readinput_lines_as_ints(self,filename): \n file = open(filename, \"r\") \n input=[]\n for line in [line.strip() for line in file]:\n input.append(int(line)) \n return input\n\n def get_arrays_from_separator(self,lines,separator):\n # Reads all lines and creates array for each seperator found (mostly blanc line)\n arrays = [] \n lineid = 0\n \n while lineid < len(lines):\n arr = []\n while lineid < len(lines) and lines[lineid]:\n if lines[lineid]==separator: break\n arr.append(lines[lineid].strip())\n lineid += 1\n lineid += 1\n arrays.append(arr)\n\n return arrays\nclass Compass:\n compasspoints = {'N': (0, 1), 'E': (1, 0), 'S': (0, -1), 'W': (-1, 0)} # Can be used for north/south, east/west calculation\n hexaspoints= {\"E\": (1, 0), \"W\": (-1, 0), \"SE\": (0, 1), \"SW\": (-1, 1), \"NE\": (1, -1), \"NW\": (0, -1)}\n def turnCompassPoint(self,currentdirection,turndirection,degrees): \n degrees = (degrees // 90) \n if turndirection == \"L\":\n degrees=-degrees\n dirs = list(self.compasspoints.keys())\n idx = dirs.index(currentdirection) + degrees\n idx %= len(dirs)\n return (dirs[idx:] + dirs[:idx])[0] \n\nclass GridHelper:\n def get_suroundings(self,grid,x,y,count):\n start =-1\n end = count -1 + start\n # Gets a new grid with offset\n return [grid[y + dy][x + dx] \n \n for dx in range(start, end) \n for dy in range(start, end) \n if 0 <= y + dy < len(grid) # Check y bounds\n and 0 <= x + dx < len(grid[x]) # check x bounds\n and (dx, dy) != (0, 0)] # not self\n \n def join_lines_from_list(self,mylist): \n return \"\".join(\"\".join(row) for row in mylist)\n\n\n def calculate_combinations(self,data,offset):\n\n dist = [1]\n data.sort()\n for val in range(1, len(data)):\n total = 0\n for tot in range(val):\n if data[tot] + offset >= data[val]:\n total += dist[tot]\n dist.append(total) \n \n return dist\n \n def get_int_combinations(self,data,offsets):\n # =============================================================\n # Data is list of int's.\n # Each val in list needs to connect within the offset parameter\n # =============================================================\n\n combis = {}\n combi_vals = {}\n data.sort()\n for offset in offsets:\n combi = []\n combi_val = [] \n start = 0\n for val in data:\n if val - start == offset:\n combi.append(1) \n combi_val.append(val) \n start = val\n \n combis[offset] = combi\n combi_vals[offset] = combi_val \n \n return combis,combi_vals\nclass ChineseReminder():\n def calculate_chinese_remainder(self,rem, mod):\n #\n # Solves and finds X for a system of congruences:\n # X = a_1 (mod n_1)\n # X = a_2 (mod n_2)\n # ...\n # X = a_N (mod n_N)\n #\n # Solutions afterwards can be made by adding/subtracting by MOD\n # Returns X (the initial value), and MOD (the interval where it repeats)\n #\n # Additional Info: https://brilliant.org/wiki/chinese-remainder-theorem/\n\n a1 = rem[0]\n m1 = mod[0]\n for a2, m2 in zip(rem[1:], mod[1:]):\n gcd, x, y = self.extended_gcd(m1, m2)\n if a1 % gcd != a2 % gcd:\n raise ValueError(\"No solutions for given input.\")\n _, x, y = self.extended_gcd(m1 // gcd, m2 // gcd)\n MOD = m1 // gcd * m2\n X = (a1 * (m2 // gcd) * y + a2 * (m1 // gcd) * x) % MOD\n a1 = X\n m1 = MOD\n return a1, MOD\n \n def extended_gcd(self,a, b):\n x, y, u, v = 0, 1, 1, 0\n while a != 0:\n q, r = b // a, b % a\n m, n = x - u * q, y - v * q\n b, a, x, y, u, v = a, r, u, v, m, n\n gcd = b\n return gcd, x, y # x, y are for [ax + by = gcd]\n\nclass RegexHelper():\n def is_string_numeric_regex(self,s):\n return re.search('^[0-9]+$',s)\n \n def is_list_numeric_regex(self,l):\n for s in l:\n if not re.search('^[0-9]+$',s):\n return False\n\n return True\n \n def has_string_numeric_regex(self, s):\n for i in s:\n if re.search('^[0-9]+$',i):\n return True\n\n return False\n \n def has_list_numeric_regex(self,l):\n for s in l:\n if re.search('^[0-9]+$',s):\n return True\n \n return False\n","sub_path":"AOCHelper.py","file_name":"AOCHelper.py","file_ext":"py","file_size_in_byte":17479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"425080906","text":"# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-\n### BEGIN LICENSE\n# This file is in the public domain\n### END LICENSE\n\nimport os\nimport subprocess\n\nimport gettext\nfrom gettext import gettext as _\ngettext.textdomain('interface')\n\nfrom gi.repository import Gtk # pylint: disable=E0611\nimport logging\nlogger = logging.getLogger('interface')\n\nfrom interface_lib import Window\n\n#Import the others dialogs\nfrom interface.DirectoryDialog\t\t\t import DirectoryDialog\nfrom interface.GenerateinterfaceDialog\t import GenerateinterfaceDialog\nfrom interface.GenerateunitDialog\t\t import GenerateunitDialog\nfrom interface.MutantsdualDialog\t\t\timport MutantsdualDialog\nfrom interface.MutantsequivalentsDialog\t import MutantsequivalentsDialog\nfrom interface.MutantsexecDialog\t\t\timport MutantsexecDialog\nfrom interface.MutantsselectinterfaceDialog import MutantsselectinterfaceDialog\nfrom interface.MutantsselectunitDialog\t import MutantsselectunitDialog\nfrom interface.MutantsviewDialog\t\t\timport MutantsviewDialog\nfrom interface.NewDialog\t\t\t\t\timport NewDialog\nfrom interface.PreferencesInterfaceDialog import PreferencesInterfaceDialog\nfrom interface.TestaddDialog\t\t\t\timport TestaddDialog\nfrom interface.TestdeleteDialog\t\t\t import TestdeleteDialog\nfrom interface.TesteffectiveDialog\t\t import TesteffectiveDialog\nfrom interface.TestimportDialog\t\t\t import TestimportDialog\nfrom interface.TestminimizeDialog\t\t import TestminimizeDialog\nfrom interface.TestviewDialog\t\t\t import TestviewDialog\nfrom interface.StatusDialog\t\t\t\t import StatusDialog\nfrom interface.PropertiesDialog\t\t\t import PropertiesDialog\nfrom interface.ReportDialog\t\t\t\t import ReportDialog\n\n# See interface_lib.Window.py for more details about how this class works\nclass InterfaceWindow(Window):\n\t__gtype_name__ = \"InterfaceWindow\"\n\t\n\tdef finish_initializing(self, builder): # pylint: disable=E1002\n\t\t\"\"\"Set up the main window\"\"\"\n\t\tsuper(InterfaceWindow, self).finish_initializing(builder)\n\n\t\t#Proteum variables\n\t\tself.session \t = None\n\t\tself.directory = None\n\t\tself.source_code = None \n\t\tself.source\t = None #Source code without \".c\"\n\t\tself.executable = None\n\t\tself.compile_command = None\n\t\tself.type_session\t= None\n\n\t\t\n\t\t#Program Dialogs\n\t\tself.dir_dialog\t\t\t = DirectoryDialog()\t\t\t\t\t \n\t\tself.new_dialog\t\t\t = NewDialog()\t\t \n\t\n\t\t#Mutants Dialogs\t\t\n\t\tself.gen_interface_dialog\t = GenerateinterfaceDialog() \n\t\tself.gen_unit_dialog\t\t = GenerateunitDialog()\n\t\tself.mutants_dual_dialog\t = MutantsdualDialog() \n\t\tself.mutants_equiv_dialog\t = MutantsequivalentsDialog() \n\t\tself.mutants_exec_dialog\t = MutantsexecDialog() \n\t\tself.mutants_sel_inter_dialog = MutantsselectinterfaceDialog() \n\t\tself.mutants_sel_unit_dialog = MutantsselectunitDialog() \n\t\tself.mutants_view_dialog\t = MutantsviewDialog() \n\t\t\n\t\tself.preferences_interface_dialog = PreferencesInterfaceDialog() #NA\n\t\t\n\t\t#Test Dialogs\t\t\n\t\tself.test_add_dialog\t\t = TestaddDialog() \n\t\tself.test_del_dialog\t\t = TestdeleteDialog() \n\t\tself.test_effect_dialog\t = TesteffectiveDialog() \n\t\tself.test_import_dialog\t = TestimportDialog() \n\t\tself.test_view_dialog\t\t = TestviewDialog()\n\n\n\t\t#Status dialogs\n\t\tself.status_dialog = StatusDialog()\n\n\t\t#Properties dialogs\n\t\tself.properties_dialog = PropertiesDialog()\n\n\t\t#Report dialog\n\t\tself.report_dialog = ReportDialog()\n\t \n\t\t#Program Test Menu itens\n\t\tself.mnu_load = self.builder.get_object(\"mnu_load\")\n\t\tself.mnu_new = self.builder.get_object(\"mnu_new\")\n\t\tself.mnu_close = self.builder.get_object(\"mnu_close\")\n\n\t\t#Test Case Menu itens\n\t\tself.mnu_tcase\t = self.builder.get_object(\"test_case\")\n\n\t\tself.mnu_add\t = self.builder.get_object(\"mnu_add\")\n\t\tself.mnu_view\t = self.builder.get_object(\"mnu_view\")\n\t\tself.mnu_delete\t= self.builder.get_object(\"mnu_delete\")\n\t\tself.mnu_effective = self.builder.get_object(\"mnu_effective\")\n\t\tself.mnu_import\t= self.builder.get_object(\"mnu_import\")\n\n\n\t\t#Mutants Menu itens\n\t\tself.mnu_mutants\t = self.builder.get_object(\"mutants\")\n\t\t\n\t\tself.mnu_gen_unit\t = self.builder.get_object(\"mnu_gen_unit\")\n\t\tself.mnu_gen_interface = self.builder.get_object(\"mnu_gen_interface\")\n\t\tself.mnu_view_mutant = self.builder.get_object(\"mnu_view_mutant\")\n\t\tself.mnu_equivalents = self.builder.get_object(\"mnu_equivalents\")\n\t\tself.mnu_execute\t = self.builder.get_object(\"mnu_execute\")\n\t\tself.mnu_exec_dual\t = self.builder.get_object(\"mnu_exec_dual\")\n\t\tself.mnu_exec_parallel = self.builder.get_object(\"mnu_exec_parallel\")\n\t\tself.mnu_sel_unit\t = self.builder.get_object(\"mnu_sel_unit\")\n\t\tself.mnu_sel_interface = self.builder.get_object(\"mnu_sel_interface\")\n\n\t\t#Report menu itens\n\t\tself.mnu_report = self.builder.get_object(\"reports\")\n\n\t\tself.mnu_report_tcase = self.builder.get_object(\"mnu_report_tcases\")\n\n\t\t#Propertie item\n\t\tself.properties = self.builder.get_object(\"properties\")\n\n\t\t#Status item\n\t\tself.mnu_status = self.builder.get_object(\"status\")\n\t\t\n\t\t#Quit item\n\t\tself.mnu_quit = self.builder.get_object(\"quit\")\n\n\t\t#Text view and Scroll\n\t\tself.main_text_view = self.builder.get_object(\"main_text_view\")\n\t\t\n\n\n################# EXTRA actions #############################\t\n\t\"\"\"\n\t\tCreates a dialog with message. Types: Gtk.MessageType.[INFO, WARNING, QUESTION ,ERROR] \n\t\"\"\"\n\tdef dialog_info(self, type_message, message):\n\t\tdialog = Gtk.MessageDialog(None,\n\t\tGtk.DialogFlags.MODAL,\n\t\ttype_message,\n\t\tGtk.ButtonsType.OK,\n\t\tmessage)\n\n\t\tresponse = dialog.run()\n\t\tdialog.hide()\n\n\n\n\t\"\"\"\n\t\tReads the PTM file \n\t\"\"\"\n\tdef read_ptmFile(self,session):\n\t\t\n\t\tinfo = [None] * 4\n\t\t\n\t\tfile_name = session + '.PTM'\n\t\t\n\t\tptm_file = open(file_name,'r')\n\n\t\t#Read 4 empty lines\n\t\tfor i in range(4):\n\t\t\tptm_file.readline()\n\n\t\t#Read userfull information\n\t\tinfo[0] = ptm_file.readline() #source code\n\t\tinfo[1]\t= ptm_file.readline() #executable file\n\t\tinfo[2] = ptm_file.readline() #compilation command\n\t\tinfo[3]\t= ptm_file.readline() #Test Type [Test, Research]\n\n\t\treturn info\n\n\n\t\"\"\"\n\t\tCreates a report file and read the information\n\t\"\"\"\n\tdef read_report(self,session):\n\t\t\n\t\treport = \"/tmp/g-report\"\n\t\t\n\t\tstatement = \"report -g %s >%s\" % (session,report)\n\t\tos.system(statement)\n\t\t\n\t\tReportGFile = open(report, \"r\")\n\t\t\n\t\tqtd = ReportGFile.readline() #ignores empty line\n\n\t\tqtd\t= ReportGFile.readline() # reads number test cases\n\t\ttcases = int(qtd)\n\t\t\n\t\tqtd = ReportGFile.readline() #total mutants\n\t\ttotal = int(qtd)\n\t\t\n\t\tqtd = ReportGFile.readline() # reads the number of ACTIVE mutants\n\t\tactiv = int(qtd) \n\t\t\n\t\tqtd = ReportGFile.readline() # read number equiv mutants\n\t\tequiv = int(qtd)\n\t\t\n\t\tqtd = ReportGFile.readline() # read Mutation Score\n\t\tms = float(qtd)\n\n\t\tqtd = ReportGFile.readline() # read alive mutants\n\t\talive = int(qtd)\n\t\t\n\t\tqtd = ReportGFile.readline() #read anomalous mutants\n\t\tanom = int(qtd)\n\t\t\n\t\t#Test Cases, Total, Active, Equivalents, Mutation Score, Alive, Anomalous\n\t\tresults = [tcases,total,activ,equiv,ms,alive,anom]\t\t\n\t\t\n\t\treturn results\n\n\n\t\"\"\"\n\t\tAutomatic scrolls down the textview. Its executed inside size-allocate signal\n\t\"\"\"\n\tdef scrolls_text_view(self, widget, event, data=None):\n\t\tadj = widget.get_vadjustment()\n\t\tadj.set_value(adj.get_upper() - adj.get_page_size())\n################# EXTRA actions #############################\n\n\n\n\n################# Program Test Menu actions #############################\n\t\"\"\"\n\t\tLoad proteum existing session\n\t\"\"\"\n\tdef on_mnu_load_activate(self, widget, data=None):\n\n\t\tdirectory,session = None, None\n\n\t\twhile True:\n\n\t\t\tresult = self.dir_dialog.run()\n\n\t\t\t#If Cancel close the dialog\n\t\t\tif result == Gtk.ResponseType.CANCEL:\n\t\t\t\tself.new_dialog.hide()\n\t\t\t\treturn\n\n\t\t\t#Get the result and session values\n\t\t\telif result == Gtk.ResponseType.OK:\n\n\t\t\t\tself.directory, self.session = self.dir_dialog.get_entry_information()\n\t\t\t\t\n\t\t\t\t#Verifys if the dir exists\n\t\t\t\tif not (os.path.isdir(self.directory)):\n\t\t\t\t\tself.dialog_info(Gtk.MessageType.ERROR, \"This Directory Does Not Exist!\")\t\t\n\n\t\t\t\t#Verifys if the session with that name exists\n\t\t\t\telif self.session in (None, \"\"):\n\t\t\t\t\tself.dialog_info(Gtk.MessageType.ERROR, \"Invalid Program Session Name!\") \n\n\t\t\t\t#Verifys if the PTM file exists\n\t\t\t\telif not (os.path.isfile(self.directory+'/'+self.session+'.PTM')):\n\t\t\t\t\tself.dialog_info(Gtk.MessageType.ERROR, \"Program Session Does Not Exist\")\n \t\t\n\t\t\t\t#Continue the execution\n\t\t\t\telse:\n\t\t\t\n\t\t\t\t\t#Enable TestCase, Mutants, Reports, Status Menu\n\t\t\t\t\tself.mnu_mutants.set_sensitive(True)\n\t\t\t\t\tself.mnu_tcase.set_sensitive(True)\n\t\t\t\t\tself.mnu_report.set_sensitive(True)\n\t\t\t\t\tself.mnu_status.set_sensitive(True)\n\t\t\t\t\t\t\n\t\t\t\t\t#Hide the dialog\n\t\t\t\t\tself.dir_dialog.hide()\n\t\t\t\t\tbreak\n\n\t\t#Enter in the directory\n\t\tos.chdir(self.directory)\n\n\t\t#Getting information about the session\n\t\tinfo = self.read_ptmFile(self.session)\n\n\t\t#Update Atributes values > The [:-1] remove the \\n at the end of the string\n\t\tself.source_code = str(info[0][:-1])\t\t\n\t\tself.executable = str(info[1][:-1])\n\t\tself.compile_command = str(info[2][:-1])\n\t\tself.type_session\t= str(info[3][:-1])\n\n\n\n\t\t#Add information to Main Text View\n\t\tinfo = \"\\nPROTEUM> Load the test session: %s \\n\" % (self.session)\n\t\tinfo2 = \"\\nPROTEUM> Session stored at: %s \\n\" % (self.directory) \n\t\tself.main_text_view.get_buffer().insert(self.main_text_view.get_buffer().get_end_iter(),info) \n\t\tself.main_text_view.get_buffer().insert(self.main_text_view.get_buffer().get_end_iter(),info2)\n\n\n\n\t\"\"\"\n\t\tCreates a new proteum session\n\t\"\"\"\t \n\tdef on_mnu_new_activate(self, widget, data=None):\n\n\t\tentrys = []\n\n\t\twhile True:\n\n\t\t\tresult = self.new_dialog.run()\n\n\t\t\t#If Cancel close the dialog\n\t\t\tif result == Gtk.ResponseType.CANCEL:\n\t\t\t\tself.new_dialog.hide()\n\t\t\t\tbreak\n\t\t\n\t\t\t#Get the result and session values\n\t\t\telif result == Gtk.ResponseType.OK:\n\n\t\t\t\tentrys = self.new_dialog.get_entry_info()\t\t\t\n\t\t\t\n\t\t\t\t#Verifys if the dir exists\n\t\t\t\tif not (os.path.isdir(entrys[0])):\n\t\t\t\t\tself.dialog_info(Gtk.MessageType.ERROR, \"This Directory Does Not Exist!\")\t\n\n\t\t\t\t#Verifys if the session is empty\n\t\t\t\telif entrys[1] in (None, \"\"):\n\t\t\t\t\tself.dialog_info(Gtk.MessageType.ERROR, \"Invalid Program Session Name!\") \n\n\t\t\t\t#Verifys if source code name is empty\n\t\t\t\telif entrys[2] in (None, \"\"):\n\t\t\t\t\tself.dialog_info(Gtk.MessageType.ERROR, \"Invalid Source Code Name!\")\n\n\t\t\t\t#Verifys executable is empty\n\t\t\t\telif entrys[3] in (None, \"\"):\n\t\t\t\t\tself.dialog_info(Gtk.MessageType.ERROR, \"Invalid Executable Name!\") \t\t\n\n\t\t\t\t#Verifys if compilation command is empty\n\t\t\t\telif entrys[4] in (None, \"\"):\n\t\t\t\t\tself.dialog_info(Gtk.MessageType.ERROR, \"Invalid Compilation Command!\") \t\t\n\t\t\t\n\t\t\t\telse:\n\n\t\t\t\t\t#Enable TestCase, Mutants, Reports, Status Menu\n\t\t\t\t\tself.mnu_mutants.set_sensitive(True)\n\t\t\t\t\tself.mnu_tcase.set_sensitive(True)\n\t\t\t\t\tself.mnu_report.set_sensitive(True)\n\t\t\t\t\tself.mnu_status.set_sensitive(True)\n\t\t\t\t\t\t\n\t\t\t\t\t#Hide the dialog\n\t\t\t\t\tself.new_dialog.hide()\n\t\t\t\t\tbreak\n\t\t\n\t\t#Entrys = [dir,session,source,exec,compile,type]\n\t\tself.directory = entrys[0] \n\t\tself.session\t = entrys[1]\n\t\tself.source_code = entrys[2]\n\t\tself.source\t = source_code[:-2] #remove the \".c\"\n\t\tself.executable = entrys[3]\n\t\tself.compile_command = entrys[4]\n\t\tself.type_session\t= entrys[5].lower() #Lower case\"\n\n\t\t#Creates a new test session\n\t\tstatment1 = self.compile_command\n\t\tstatment2 = 'test-new -%s -S %s -E %s -C \"%s\" -D %s %s' % (self.type_session, self.source, self.executable, self.compile_command, self.directory, self.session)\n\t\tstatment3 = 'instrum -D %s -EE %s __%s' % (self.directory, self.session, self.source)\n\t\tstatment4 = 'instrum -build __%s %s' % (self.source, self.session)\n\n\t\tstatments = [statment1,statment2,statment3,statment4]\n\n\t\t#Enter in the directory\n\t\tos.chdir(self.directory)\n\n\t\t#Execute the statments\n\t\tfor statment in statments:\n\n\t\t\t#Process each statment\n\t\t\tresult = str(subprocess.check_output(statment, shell=True))\n\t\t\tself.main_text_view.get_buffer().insert(self.main_text_view.get_buffer().get_end_iter(),result)\n\t\t\t\n\n\n\t\"\"\"\n\t\tEnds the execution\n\t\"\"\"\n\tdef on_mnu_close_activate(self, widget, data=None):\n\t\tself.destroy()\n################# Program Test Menu actions end #########################\n\n\n\n\n################# Test Case Menu actions #############################\n\n\t\"\"\"\n\t\tAdd one test case to the session\n\t\"\"\"\n\tdef on_mnu_add_activate(self, widget, data=None):\n\t\tresult = self.test_add_dialog.run()\n\n\t\t#If it's cancel, close the window\n\t\tif result == Gtk.ResponseType.CANCEL:\n\t\t\tself.test_add_dialog.hide()\n\n\t\t#If it's ok, grab the inputs and add the test case\n\t\telif result == Gtk.ResponseType.OK:\n\n\t\t\tinputs = self.test_add_dialog.get_entry_param()\n\n\t\t\tstatment = 'tcase-add -timeout 300 -trace -p \"%s\" %s' % (inputs, self.session)\n\t\t\t\n\t\t\t#Enter in the directory\n\t\t\tos.chdir(self.directory)\n\t\t\t\n\t\t\t#Process each statment\n\t\t\tresult = str(subprocess.check_output(statment, shell=True))\n\n\t\t\t#Show in textview\n\t\t\tinfo = \"\\nPROTEUM> %s \\n\" % (statment)\n\t\t\tself.main_text_view.get_buffer().insert(self.main_text_view.get_buffer().get_end_iter(),info)\n\t\t\tself.main_text_view.get_buffer().insert(self.main_text_view.get_buffer().get_end_iter(),result)\n\n\t\t\t#Hide the dialog\n\t\t\tself.test_add_dialog.hide() \n\n\n\n\t\"\"\"\n\t\tView test cases for that session\n\t\"\"\"\n\tdef on_mnu_view_activate(self, widget, data=None):\n\n\n\t\t#Always start with the information of the first test case\n\t\tself.test_view_dialog.tcase_id = 1\n\t\tstatment = 'tcase -l -D %s -x %d %s' % (self.directory, self.test_view_dialog.tcase_id, self.session)\n\n\t\t#Process the statment\n\t\tresult = str(subprocess.check_output(statment, shell=True))\n\t\t\n\t\t#There arent test cases to view\t\t\n\t\tif not result:\n\t\t\tself.dialog_info(Gtk.MessageType.WARNING, \"No Test Case to View!\")\n\n\t\telse:\n\t\t\t#Print into textview\n\t\t\tinfo = \"\\nPROTEUM> %s \\n\" % (statment)\n\t\t\tself.main_text_view.get_buffer().insert(self.main_text_view.get_buffer().get_end_iter(),info)\n\t\t\tself.main_text_view.get_buffer().insert(self.main_text_view.get_buffer().get_end_iter(),result)\n\n\t\t\t#Extract the info to pass to the entrys\n\t\t\ttcase_info = []\n\t\t\ttcase_info = self.test_view_dialog.get_tcase_info(result) #Use the result to extract the tcase information\n\t\t\t\n\t\t\t#Set the information in the entrys\n\t\t\tself.test_view_dialog.set_entrys(tcase_info) #Use the tcase info to set the values into the entrys in test_view_dialog\n\n\t\t\t#Information used to run proteum modules and text view to update the content in principal dialog\n\t\t\tinformation = [self.session, self.directory, self.main_text_view]\n\n\t\t\t#Send information to Textview Dialog instance\n\t\t\tself.test_view_dialog.recive_information(information)\n\n\t\t\t#Run the dialog\n\t\t\tresult = self.test_view_dialog.run()\n\n\t\t\t#If it's cancel, close the window\n\t\t\tif result == Gtk.ResponseType.CANCEL:\n\t\t\t\tself.test_view_dialog.hide()\n\n\t\t\t#If it's ok, hide windows too..\n\t\t\telif result == Gtk.ResponseType.OK:\n\t\t\t\tself.test_view_dialog.hide()\n\n\n\t\"\"\"\n\t\tDelete some test cases\n\t\"\"\"\n\tdef on_mnu_delete_activate(self, widget, data=None):\n\t\t\n\t\t#Define Proteum statment\n\t\tstatment = \"tcase -l -D %s %s\" % (self.directory,self.session)\n\n\t\t#Get shell output return from Proteum\n\t\tshell_output = str(subprocess.check_output(statment, shell=True))\n\n\t\t#Retrive the testcase information \n\t\ttest_set = self.test_del_dialog.get_tcase_info(shell_output)\n\n\t\t#There arent testcases to delete\n\t\tif len(test_set) < 1:\n\t\t\tself.dialog_info(Gtk.MessageType.WARNING, \"No Test Cases to delete!\")\n\t\t\n\t\t#There are some test cases..\n\t\telse:\n\n\t\t\t#Print into textview\n\t\t\tinfo = \"\\nPROTEUM> %s \\n\" % (statment)\n\t\t\tself.main_text_view.get_buffer().insert(self.main_text_view.get_buffer().get_end_iter(),info)\n\n\t\t\t#Send test_set to TestdialogView\n\t\t\tself.test_del_dialog.recive_test_set(test_set)\n\n\t\t\twhile True:\n\n\t\t\t\t#Run the dialog\n\t\t\t\tresult = self.test_del_dialog.run()\n\n\t\t\t\t#If it's cancel, close the window\n\t\t\t\tif result == Gtk.ResponseType.CANCEL:\n\t\t\t\t\tself.test_del_dialog.hide()\n\t\t\t\t\tbreak #get off the while\n\n\t\t\t\t#If it's ok, delete some test cases..\n\t\t\t\telif result == Gtk.ResponseType.OK:\n\n\t\t\t\t\t#Get information from Test del dialog\n\t\t\t\t\tstart,end = self.test_del_dialog.get_infos()\n\n\t\t\t\t\t#It's a valid interval?\n\t\t\t\t\tif start > end:\n\t\t\t\t\t\tself.dialog_info(Gtk.MessageType.WARNING, \"Invalid Range!\")\n\t\t\t\t\t\n\t\t\t\t\t#Everything ok.. delete the testcases\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\t\n\t\t\t\t\t\t#Create the statement\n\t\t\t\t\t\tstatment = \"tcase -d -D %s -f %d -t %d %s\" % (self.directory,start,end,self.session)\n\t\t\t\t\t\t\n\t\t\t\t\t\t#Process the statment\n\t\t\t\t\t\tresult = str(subprocess.check_output(statment, shell=True))\n\n\t\t\t\t\t\t#Print into textview\n\t\t\t\t\t\tinfo = \"\\nPROTEUM> %s \\n\" % (statment)\n\t\t\t\t\t\tself.main_text_view.get_buffer().insert(self.main_text_view.get_buffer().get_end_iter(),info)\n\n\t\t\t\t\t\t#Hide the dialog\t\t\t\t\t\t\n\t\t\t\t\t\tself.test_del_dialog.hide()\n\t\t\t\n\t\t\t\t\t\tbreak #get of the while\n\t\t\t\n\t\t\t#Reset default values when close the dialog\n\t\t\tself.test_del_dialog.recive_test_set(test_set)\n\n\n\n\t\"\"\"\n\t\tSet active test cases\n\t\"\"\"\n\tdef on_mnu_effective_activate(self, widget, data=None):\n\t\t\n\t\tstatment = ''\n\n\t\twhile True:\n\t\t\t\n\t\t\tresult = self.test_effect_dialog.run()\n\n\t\t\t#Get combo value\n\t\t\tvalue = self.test_effect_dialog.combo_value \n\n\t\t\t#If it's cancel, close the window\n\t\t\tif result == Gtk.ResponseType.CANCEL:\n\t\t\t\tself.test_effect_dialog.hide()\n\t\t\t\tbreak\n\n\t\t\t#If it's ok, delete or disable some test cases..\n\t\t\telif result == Gtk.ResponseType.OK:\n\t\t\n\t\t\t\t#Disable testcases\n\t\t\t\tif value == 'Disable':\n\n\t\t\t\t\tstatment = \"list-good -i -%s -D %s %s\" % (self.type_session.lower(), self.directory, self.session)\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\t#Delete testcases\n\t\t\t\telif value == 'Delete':\n\t\t\t\t\tstatment = \"list-good -d -%s -D %s %s\" % (self.type_session.lower(), self.directory, self.session)\n\t\t\t\t\tbreak\n\n\t\t\t\telse:\n\t\t\t\t\tself.dialog_info(Gtk.MessageType.WARNING, \"Invalid Option!\")\n\n\t\t#Hide the dialog\n\t\tself.test_effect_dialog.hide()\n\n\t\t#Process the statment\n\t\tresult = str(subprocess.check_output(statment, shell=True))\n\n\t\t#Print into textview\n\t\tinfo = \"\\nPROTEUM> %s \\n\" % (statment)\n\t\tself.main_text_view.get_buffer().insert(self.main_text_view.get_buffer().get_end_iter(),info)\n\n\n\n\t\"\"\"\n\t\tImport test cases from file\n\t\"\"\"\n\tdef on_mnu_import_activate(self, widget, data=None):\n\t\tresult = self.test_import_dialog.run()\n\n################# Test Case Menu actions End #############################\n\n\n\n\n\t\t\n################# Mutants Menu actions #############################\n\tdef on_mnu_gen_unit_activate(self, widget, data=None):\n\t\tresult = self.gen_unit_dialog.run()\n\n\tdef on_mnu_gen_interface_activate(self, widget, data=None):\n\t\tresult = self.gen_interface_dialog.run()\n\n\tdef on_mnu_view_mutant_activate(self, widget, data=None):\n\t\tresult = self.mutants_sel_inter_dialog.run()\n\n\tdef on_mnu_equivalents_activate(self, widget, data=None):\n\t\tresult = self.mutants_equiv_dialog.run()\n\n\tdef on_mnu_execute_activate(self, widget, data=None):\n\t\tpass\n\n\tdef on_mnu_exec_dual_activate(self, widget, data=None):\n\t\tpass\n\n\tdef on_mnu_exec_parallel_activate(self, widget, data=None):\n\t\tpass\n\n\tdef on_mnu_sel_unit_activate(self, widget, data=None):\n\t\tresult = self.mutants_sel_unit_dialog.run()\n\n\tdef on_mnu_sel_interface_activate(self, widget, data=None):\n\t\tresult = self.mutants_view_dialog.run()\n################# Mutants Menu actions End #############################\n\n\n\n\n################# Properties Menu actions\t #############################\n\tdef on_mnu_report_tcases_activate(self, widget, data=None):\n\t\tself.report_dialog.run()\n################# Properties Menu actions End #############################\n\n\n\n\n################# Properties Menu actions\t #############################\n\tdef on_properties_activate(self, widget, data=None):\n\t\tself.properties_dialog.run()\n################# Properties Menu actions End #############################\n\n\n\n\n################# Status Menu actions\t #############################\n\tdef on_status_activate(self, widget, data=None):\n\t\tos.chdir(self.directory)\n\n\t\tinfo = self.read_ptmFile(self.session) \n\t\tresult = self.read_report(self.session)\n\n\t\t#Add extra information to result\n\t\tresult.append(self.directory)\n\t\tresult.append(self.session)\n\t\t\t\t\n\t\tself.status_dialog.set_entrys(info,result)\n\n\t\tself.status_dialog.run()\n################# Status Menu actions End #############################\n\n\n\n\n################# Quit Menu actions\t #############################\n\tdef on_quit_activate(self, widget, data=None):\n\t\tself.destroy()\n################# Quit Menu actions End #############################\n\n\n\n\n\n","sub_path":"interface/InterfaceWindow.py","file_name":"InterfaceWindow.py","file_ext":"py","file_size_in_byte":20210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"302304128","text":"D = {\"rose\":(\"red\",\"black\",\"purple\"), \"lily\":(\"red\",\"white\",\"violet\")}\r\n\r\nL = list() \r\nfor i in D:\r\n L.append(len(D[i]))\r\n \r\nMax = max(L)\r\n\r\nfor i in D:\r\n if len(D[i]) == Max:\r\n print(i)\r\n break\r\n \r\nl1 = list()\r\nl2 = list()\r\nfor i in D.values():\r\n for j in i:\r\n l1.append(j)\r\n \r\nfor i in l1:\r\n if l1.count(i) not in l2:\r\n l2.append(l1.count(i))\r\n\r\nMAX = max(l2)\r\n\r\nfor i in l1:\r\n if l1.count(i) == MAX:\r\n print(i)\r\n break","sub_path":"Answer 33.py","file_name":"Answer 33.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"146646291","text":"# coding=utf-8\n# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom pants.backend.native.config.environment import (Assembler, CCompiler, CppCompiler,\n GCCCCompiler, GCCCppCompiler, Linker,\n LLVMCCompiler, LLVMCppCompiler, Platform)\nfrom pants.backend.native.subsystems.binaries.binutils import Binutils\nfrom pants.backend.native.subsystems.binaries.gcc import GCC\nfrom pants.backend.native.subsystems.binaries.llvm import LLVM\nfrom pants.backend.native.subsystems.libc_dev import LibcDev\nfrom pants.backend.native.subsystems.xcode_cli_tools import XCodeCLITools\nfrom pants.engine.rules import RootRule, rule\nfrom pants.engine.selectors import Get, Select\nfrom pants.subsystem.subsystem import Subsystem\nfrom pants.util.memo import memoized_property\n\n\nclass NativeToolchain(Subsystem):\n \"\"\"Abstraction over platform-specific tools to compile and link native code.\n\n When this subsystem is consumed, Pants will download and unpack archives (if necessary) which\n together provide an appropriate \"native toolchain\" for the host platform: a compiler and linker,\n usually. This subsystem exposes the toolchain through `@rule`s, which tasks then request during\n setup or execution (synchronously, for now).\n\n NB: Currently, on OSX, Pants will find and invoke the XCode command-line tools, or error out with\n installation instructions if the XCode tools could not be found.\n \"\"\"\n\n options_scope = 'native-toolchain'\n\n @classmethod\n def subsystem_dependencies(cls):\n return super(NativeToolchain, cls).subsystem_dependencies() + (\n Binutils.scoped(cls),\n GCC.scoped(cls),\n LibcDev.scoped(cls),\n LLVM.scoped(cls),\n XCodeCLITools.scoped(cls),\n )\n\n @memoized_property\n def _binutils(self):\n return Binutils.scoped_instance(self)\n\n @memoized_property\n def _gcc(self):\n return GCC.scoped_instance(self)\n\n @memoized_property\n def _llvm(self):\n return LLVM.scoped_instance(self)\n\n @memoized_property\n def _xcode_cli_tools(self):\n return XCodeCLITools.scoped_instance(self)\n\n @memoized_property\n def _libc_dev(self):\n return LibcDev.scoped_instance(self)\n\n\n@rule(Linker, [Select(Platform), Select(NativeToolchain)])\ndef select_linker(platform, native_toolchain):\n # TODO(#5933): make it possible to yield Get with a non-static\n # subject type and use `platform.resolve_platform_specific()`, something like:\n # linker = platform.resolve_platform_specific({\n # 'darwin': lambda: Get(Linker, XCodeCLITools, native_toolchain._xcode_cli_tools),\n # 'linux': lambda: Get(Linker, Binutils, native_toolchain._binutils),\n # })\n if platform.normalized_os_name == 'darwin':\n # TODO(#5663): turn this into LLVM when lld works.\n linker = yield Get(Linker, XCodeCLITools, native_toolchain._xcode_cli_tools)\n else:\n linker = yield Get(Linker, Binutils, native_toolchain._binutils)\n\n libc_dirs = native_toolchain._libc_dev.get_libc_dirs(platform)\n\n # NB: We need to link through a provided compiler's frontend, and we need to know where all the\n # compiler's libraries/etc are, so we set the executable name to the C++ compiler, which can find\n # its own set of C++-specific files for the linker if necessary. Using e.g. 'g++' as the linker\n # appears to produce byte-identical output when linking even C-only object files, and also\n # happens to work when C++ is used.\n gcc_c_compiler = yield Get(GCCCCompiler, NativeToolchain, native_toolchain)\n c_compiler = gcc_c_compiler.c_compiler\n gcc_cpp_compiler = yield Get(GCCCppCompiler, NativeToolchain, native_toolchain)\n cpp_compiler = gcc_cpp_compiler.cpp_compiler\n\n # NB: If needing to create an environment for process invocation that could use either a compiler\n # or a linker (e.g. when we compile native code from `python_dist()`s), use the environment from\n # the linker object (in addition to any further customizations), which has the paths from the C\n # and C++ compilers baked in.\n # FIXME(#5951): we need a way to compose executables more hygienically.\n linker = Linker(\n path_entries=(\n c_compiler.path_entries +\n cpp_compiler.path_entries +\n linker.path_entries),\n exe_filename=cpp_compiler.exe_filename,\n library_dirs=(\n libc_dirs +\n c_compiler.library_dirs +\n cpp_compiler.library_dirs +\n linker.library_dirs))\n\n yield linker\n\n\n@rule(LLVMCCompiler, [Select(Platform), Select(NativeToolchain)])\ndef select_llvm_c_compiler(platform, native_toolchain):\n original_llvm_c_compiler = yield Get(LLVMCCompiler, LLVM, native_toolchain._llvm)\n provided_clang = original_llvm_c_compiler.c_compiler\n\n if platform.normalized_os_name == 'darwin':\n xcode_clang = yield Get(CCompiler, XCodeCLITools, native_toolchain._xcode_cli_tools)\n clang_with_xcode_paths = CCompiler(\n path_entries=(provided_clang.path_entries + xcode_clang.path_entries),\n exe_filename=provided_clang.exe_filename,\n library_dirs=(provided_clang.library_dirs + xcode_clang.library_dirs),\n include_dirs=(xcode_clang.include_dirs + provided_clang.include_dirs))\n final_llvm_c_compiler = LLVMCCompiler(clang_with_xcode_paths)\n else:\n gcc_c_compiler = yield Get(GCCCCompiler, GCC, native_toolchain._gcc)\n provided_gcc = gcc_c_compiler.c_compiler\n clang_with_gcc_libs = CCompiler(\n path_entries=provided_clang.path_entries,\n exe_filename=provided_clang.exe_filename,\n # We need this version of GLIBCXX to be able to run, unfortunately.\n library_dirs=(provided_gcc.library_dirs + provided_clang.library_dirs),\n include_dirs=(provided_clang.include_dirs + provided_gcc.include_dirs))\n final_llvm_c_compiler = LLVMCCompiler(clang_with_gcc_libs)\n\n yield final_llvm_c_compiler\n\n\n@rule(LLVMCppCompiler, [Select(Platform), Select(NativeToolchain)])\ndef select_llvm_cpp_compiler(platform, native_toolchain):\n original_llvm_cpp_compiler = yield Get(LLVMCppCompiler, LLVM, native_toolchain._llvm)\n provided_clangpp = original_llvm_cpp_compiler.cpp_compiler\n\n if platform.normalized_os_name == 'darwin':\n xcode_clang = yield Get(CppCompiler, XCodeCLITools, native_toolchain._xcode_cli_tools)\n clang_with_xcode_paths = CppCompiler(\n path_entries=(provided_clangpp.path_entries + xcode_clang.path_entries),\n exe_filename=provided_clangpp.exe_filename,\n library_dirs=(provided_clangpp.library_dirs + xcode_clang.library_dirs),\n include_dirs=(xcode_clang.include_dirs + provided_clangpp.include_dirs))\n final_llvm_cpp_compiler = LLVMCppCompiler(clang_with_xcode_paths)\n else:\n gcc_cpp_compiler = yield Get(GCCCppCompiler, GCC, native_toolchain._gcc)\n provided_gpp = gcc_cpp_compiler.cpp_compiler\n clang_with_gpp_libs = CppCompiler(\n path_entries=provided_clangpp.path_entries,\n exe_filename=provided_clangpp.exe_filename,\n # We need this version of GLIBCXX to be able to run, unfortunately.\n library_dirs=(provided_gpp.library_dirs + provided_clangpp.library_dirs),\n include_dirs=(provided_clangpp.include_dirs + provided_gpp.include_dirs))\n final_llvm_cpp_compiler = LLVMCppCompiler(clang_with_gpp_libs)\n\n yield final_llvm_cpp_compiler\n\n\n@rule(GCCCCompiler, [Select(Platform), Select(NativeToolchain)])\ndef select_gcc_c_compiler(platform, native_toolchain):\n original_gcc_c_compiler = yield Get(GCCCCompiler, GCC, native_toolchain._gcc)\n provided_gcc = original_gcc_c_compiler.c_compiler\n\n # GCC needs an assembler, so we provide that (platform-specific) tool here.\n if platform.normalized_os_name == 'darwin':\n xcode_tools_assembler = yield Get(Assembler, XCodeCLITools, native_toolchain._xcode_cli_tools)\n assembler_paths = xcode_tools_assembler.path_entries\n\n # GCC needs access to some headers that are only provided by the XCode toolchain\n # currently (e.g. \"_stdio.h\"). These headers are unlikely to change across versions, so this is\n # probably safe.\n # TODO: we should be providing all of these (so we can eventually phase out XCodeCLITools\n # entirely).\n # This mutual recursion with select_llvm_c_compiler() works because we only pull in gcc in that\n # method if we are on Linux.\n xcode_clang = yield Get(CCompiler, XCodeCLITools, native_toolchain._xcode_cli_tools)\n\n new_library_dirs = provided_gcc.library_dirs + xcode_clang.library_dirs\n new_include_dirs = xcode_clang.include_dirs + provided_gcc.include_dirs\n else:\n binutils_assembler = yield Get(Assembler, Binutils, native_toolchain._binutils)\n assembler_paths = binutils_assembler.path_entries\n\n new_library_dirs = provided_gcc.library_dirs\n new_include_dirs = provided_gcc.include_dirs\n\n gcc_with_assembler = CCompiler(\n path_entries=(provided_gcc.path_entries + assembler_paths),\n exe_filename=provided_gcc.exe_filename,\n library_dirs=new_library_dirs,\n include_dirs=new_include_dirs)\n\n final_gcc_c_compiler = GCCCCompiler(gcc_with_assembler)\n yield final_gcc_c_compiler\n\n\n@rule(GCCCppCompiler, [Select(Platform), Select(NativeToolchain)])\ndef select_gcc_cpp_compiler(platform, native_toolchain):\n original_gcc_cpp_compiler = yield Get(GCCCppCompiler, GCC, native_toolchain._gcc)\n provided_gpp = original_gcc_cpp_compiler.cpp_compiler\n\n if platform.normalized_os_name == 'darwin':\n xcode_tools_assembler = yield Get(Assembler, XCodeCLITools, native_toolchain._xcode_cli_tools)\n assembler_paths = xcode_tools_assembler.path_entries\n\n xcode_clangpp = yield Get(CppCompiler, XCodeCLITools, native_toolchain._xcode_cli_tools)\n\n new_library_dirs = provided_gpp.library_dirs + xcode_clangpp.library_dirs\n new_include_dirs = xcode_clangpp.include_dirs + provided_gpp.include_dirs\n else:\n binutils_assembler = yield Get(Assembler, Binutils, native_toolchain._binutils)\n assembler_paths = binutils_assembler.path_entries\n\n new_library_dirs = provided_gpp.library_dirs\n new_include_dirs = provided_gpp.include_dirs\n\n gcc_with_assembler = CppCompiler(\n path_entries=(provided_gpp.path_entries + assembler_paths),\n exe_filename=provided_gpp.exe_filename,\n library_dirs=new_library_dirs,\n include_dirs=new_include_dirs)\n\n final_gcc_cpp_compiler = GCCCppCompiler(gcc_with_assembler)\n yield final_gcc_cpp_compiler\n\n\n@rule(CCompiler, [Select(NativeToolchain)])\ndef select_c_compiler(native_toolchain):\n llvm_c_compiler = yield Get(LLVMCCompiler, NativeToolchain, native_toolchain)\n yield llvm_c_compiler.c_compiler\n\n\n@rule(CppCompiler, [Select(NativeToolchain)])\ndef select_cpp_compiler(native_toolchain):\n llvm_cpp_compiler = yield Get(LLVMCppCompiler, NativeToolchain, native_toolchain)\n yield llvm_cpp_compiler.cpp_compiler\n\n\ndef create_native_toolchain_rules():\n return [\n select_linker,\n select_llvm_c_compiler,\n select_llvm_cpp_compiler,\n select_gcc_c_compiler,\n select_gcc_cpp_compiler,\n select_c_compiler,\n select_cpp_compiler,\n RootRule(NativeToolchain),\n ]\n","sub_path":"src/python/pants/backend/native/subsystems/native_toolchain.py","file_name":"native_toolchain.py","file_ext":"py","file_size_in_byte":11096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"142099716","text":"import csv\nimport json\nimport pandas\n\ndef csv_reader(file_obj):\n \"\"\"\n Read a csv file\n \"\"\"\n reader = csv.DictReader(file_obj)\n formatted = []\n for row in reader:\n if (row[\"reviews.rating\"] == \"5\" or row[\"reviews.rating\"] == \"1\"):\n formatted.append([row[\"reviews.text\"], row[\"reviews.rating\"]])\n for i in range(int(len(formatted)/10), len(formatted)):\n formatted[i][1] = ''\n\n jsonData = []\n for r in formatted:\n new_r = {}\n new_r['text'] = r[0]\n if (len(r[1])):\n new_r['label'] = r[1]\n jsonData.append(new_r)\n with open('hotels_data.csv', 'w') as outfile:\n for js in jsonData:\n outfile.write(\"\\\"\" + js['text'].replace('\"', '\\\\\"') + '\\\"\\n')\n\n with open('hotels_labels.csv', 'w') as outfile:\n outfile.write('text,label\\n')\n for js in jsonData:\n if (js.get('label')):\n outfile.write(\"\\\"\" + js['text'].replace('\"', '\\\\\"') + '\\\",\\\"' + js['label'] + '\\\"\\n')\n\n \nif __name__ == \"__main__\":\n csv_path = \"hotel_reviews.csv\"\n with open(csv_path, \"r\") as f_obj:\n csv_reader(f_obj)","sub_path":"data/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"475506887","text":"import os\nimport sys\nimport gym\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\n\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.utils import to_categorical\n\nfrom utils.continuous_environments import Environment\nfrom utils.networks import get_session\n\ngym.logger.set_level(40)\n\nclass DDPG:\n \"\"\" Deep Deterministic Policy Gradient (DDPG) Helper Class\n \"\"\"\n\n def __init__(self, act_dim, env_dim, act_range, k, buffer_size = 20000, gamma = 0.99, lr = 0.00005, tau = 0.001):\n \"\"\" Initialization\n \"\"\"\n # Environment and A2C parameters\n self.act_dim = act_dim\n self.act_range = act_range\n self.env_dim = (k,) + env_dim\n self.gamma = gamma\n self.lr = lr\n # Create actor and critic networks\n self.actor = Actor(self.env_dim, act_dim, act_range, 0.1 * lr, tau)\n self.critic = Critic(self.env_dim, act_dim, lr, tau)\n self.buffer = MemoryBuffer(buffer_size)\n\n def policy_action(self, s):\n \"\"\" Use the actor to predict value\n \"\"\"\n return self.actor.predict(s)[0]\n\n def bellman(self, rewards, q_values, dones):\n \"\"\" Use the Bellman Equation to compute the critic target\n \"\"\"\n critic_target = np.asarray(q_values)\n for i in range(q_values.shape[0]):\n if dones[i]:\n critic_target[i] = rewards[i]\n else:\n critic_target[i] = rewards[i] + self.gamma * q_values[i]\n return critic_target\n\n def memorize(self, state, action, reward, done, new_state):\n \"\"\" Store experience in memory buffer\n \"\"\"\n self.buffer.memorize(state, action, reward, done, new_state)\n\n def sample_batch(self, batch_size):\n return self.buffer.sample_batch(batch_size)\n\n def update_models(self, states, actions, critic_target):\n \"\"\" Update actor and critic networks from sampled experience\n \"\"\"\n # Train critic\n self.critic.train_on_batch(states, actions, critic_target)\n # Q-Value Gradients under Current Policy\n actions = self.actor.model.predict(states)\n grads = self.critic.gradients(states, actions)\n # Train actor\n self.actor.train(states, actions, np.array(grads).reshape((-1, self.act_dim)))\n # Transfer weights to target networks at rate Tau\n self.actor.transfer_weights()\n self.critic.transfer_weights()\n\n def train(self, env, args, summary_writer):\n results = []\n\n # First, gather experience\n tqdm_e = tqdm(range(args.nb_episodes), desc='Score', leave=True, unit=\" episodes\")\n for e in tqdm_e:\n\n # Reset episode\n time, cumul_reward, done = 0, 0, False\n old_state = env.reset()\n actions, states, rewards = [], [], []\n noise = OrnsteinUhlenbeckProcess(size=self.act_dim)\n\n while not done:\n if args.render: env.render()\n # Actor picks an action (following the deterministic policy)\n a = self.policy_action(old_state)\n # Clip continuous values to be valid w.r.t. environment\n a = np.clip(a+noise.generate(time), -self.act_range, self.act_range)\n # Retrieve new state, reward, and whether the state is terminal\n new_state, r, done, _ = env.step(a)\n # Add outputs to memory buffer\n self.memorize(old_state, a, r, done, new_state)\n # Sample experience from buffer\n states, actions, rewards, dones, new_states, _ = self.sample_batch(args.batch_size)\n # Predict target q-values using target networks\n q_values = self.critic.target_predict([new_states, self.actor.target_predict(new_states)])\n # Compute critic target\n critic_target = self.bellman(rewards, q_values, dones)\n # Train both networks on sampled batch, update target networks\n self.update_models(states, actions, critic_target)\n # Update current state\n old_state = new_state\n cumul_reward += r\n time += 1\n\n # Gather stats every episode for plotting\n if(args.gather_stats):\n mean, stdev = gather_stats(self, env)\n results.append([e, mean, stdev])\n\n # Export results for Tensorboard\n score = tfSummary('score', cumul_reward)\n summary_writer.add_summary(score, global_step=e)\n summary_writer.flush()\n # Display score\n tqdm_e.set_description(\"Score: \" + str(cumul_reward))\n tqdm_e.refresh()\n\n return results\n\n def save_weights(self, path):\n path += '_LR_{}'.format(self.lr)\n self.actor.save(path)\n self.critic.save(path)\n\n def load_weights(self, path_actor, path_critic):\n self.critic.load_weights(path_critic)\n self.actor.load_weights(path_actor)\n\n\nclass Actor:\n \"\"\" Actor Network for the DDPG Algorithm\n \"\"\"\n\n def __init__(self, inp_dim, out_dim, act_range, lr, tau):\n self.env_dim = inp_dim\n self.act_dim = out_dim\n self.act_range = act_range\n self.tau = tau\n self.lr = lr\n self.model = self.network()\n self.target_model = self.network()\n self.adam_optimizer = self.optimizer()\n\n def network(self):\n \"\"\" Actor Network for Policy function Approximation, using a tanh\n activation for continuous control. We add parameter noise to encourage\n exploration, and balance it with Layer Normalization.\n \"\"\"\n inp = Input((self.env_dim))\n #\n x = Dense(256, activation='relu')(inp)\n x = GaussianNoise(1.0)(x)\n #\n x = Flatten()(x)\n x = Dense(128, activation='relu')(x)\n x = GaussianNoise(1.0)(x)\n #\n out = Dense(self.act_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(out)\n #\n return Model(inp, out)\n\n def predict(self, state):\n \"\"\" Action prediction\n \"\"\"\n return self.model.predict(np.expand_dims(state, axis=0))\n\n def target_predict(self, inp):\n \"\"\" Action prediction (target network)\n \"\"\"\n return self.target_model.predict(inp)\n\n def transfer_weights(self):\n \"\"\" Transfer model weights to target model with a factor of Tau\n \"\"\"\n W, target_W = self.model.get_weights(), self.target_model.get_weights()\n for i in range(len(W)):\n target_W[i] = self.tau * W[i] + (1 - self.tau)* target_W[i]\n self.target_model.set_weights(target_W)\n\n def train(self, states, actions, grads):\n \"\"\" Actor Training\n \"\"\"\n self.adam_optimizer([states, grads])\n\n def optimizer(self):\n \"\"\" Actor Optimizer\n \"\"\"\n action_gdts = K.placeholder(shape=(None, self.act_dim))\n params_grad = tf.gradients(self.model.output, self.model.trainable_weights, -action_gdts)\n grads = zip(params_grad, self.model.trainable_weights)\n return K.function([self.model.input, action_gdts], [tf.train.AdamOptimizer(self.lr).apply_gradients(grads)])\n\n def save(self, path):\n self.model.save_weights(path + '_actor.h5')\n\n def load_weights(self, path):\n self.model.load_weights(path)\n\n\nclass Critic:\n \"\"\" Critic for the DDPG Algorithm, Q-Value function approximator\n \"\"\"\n\n def __init__(self, inp_dim, out_dim, lr, tau):\n # Dimensions and Hyperparams\n self.env_dim = inp_dim\n self.act_dim = out_dim\n self.tau, self.lr = tau, lr\n # Build models and target models\n self.model = self.network()\n self.target_model = self.network()\n self.model.compile(Adam(self.lr), 'mse')\n self.target_model.compile(Adam(self.lr), 'mse')\n # Function to compute Q-value gradients (Actor Optimization)\n self.action_grads = K.function([self.model.input[0], self.model.input[1]], K.gradients(self.model.output, [self.model.input[1]]))\n\n def network(self):\n \"\"\" Assemble Critic network to predict q-values\n \"\"\"\n state = Input((self.env_dim))\n action = Input((self.act_dim,))\n x = Dense(256, activation='relu')(state)\n x = concatenate([Flatten()(x), action])\n x = Dense(128, activation='relu')(x)\n out = Dense(1, activation='linear', kernel_initializer=RandomUniform())(x)\n return Model([state, action], out)\n\n def gradients(self, states, actions):\n \"\"\" Compute Q-value gradients w.r.t. states and policy-actions\n \"\"\"\n return self.action_grads([states, actions])\n\n def target_predict(self, inp):\n \"\"\" Predict Q-Values using the target network\n \"\"\"\n return self.target_model.predict(inp)\n\n def train_on_batch(self, states, actions, critic_target):\n \"\"\" Train the critic network on batch of sampled experience\n \"\"\"\n return self.model.train_on_batch([states, actions], critic_target)\n\n def transfer_weights(self):\n \"\"\" Transfer model weights to target model with a factor of Tau\n \"\"\"\n W, target_W = self.model.get_weights(), self.target_model.get_weights()\n for i in range(len(W)):\n target_W[i] = self.tau * W[i] + (1 - self.tau)* target_W[i]\n self.target_model.set_weights(target_W)\n\n def save(self, path):\n self.model.save_weights(path + '_critic.h5')\n\n def load_weights(self, path):\n self.model.load_weights(path)\n\n\nclass Environment(object):\n \"\"\" Environment Helper Class (Multiple State Buffer) for Continuous Action Environments\n (MountainCarContinuous-v0, LunarLanderContinuous-v2, etc..), and MujuCo Environments\n \"\"\"\n def __init__(self, gym_env, action_repeat):\n self.env = gym_env\n self.timespan = action_repeat\n self.gym_actions = 2 #range(gym_env.action_space.n)\n self.state_buffer = deque()\n\n def get_action_size(self):\n return self.env.action_space.n\n\n def get_state_size(self):\n return self.env.observation_space.shape\n\n def reset(self):\n \"\"\" Resets the game, clears the state buffer.\n \"\"\"\n # Clear the state buffer\n self.state_buffer = deque()\n x_t = self.env.reset()\n s_t = np.stack([x_t for i in range(self.timespan)], axis=0)\n for i in range(self.timespan-1):\n self.state_buffer.append(x_t)\n return s_t\n\n def step(self, action):\n x_t1, r_t, terminal, info = self.env.step(action)\n previous_states = np.array(self.state_buffer)\n s_t1 = np.empty((self.timespan, *self.env.observation_space.shape))\n s_t1[:self.timespan-1, :] = previous_states\n s_t1[self.timespan-1] = x_t1\n # Pop the oldest frame, add the current frame to the queue\n self.state_buffer.popleft()\n self.state_buffer.append(x_t1)\n return s_t1, r_t, terminal, info\n\n def render(self):\n return self.env.render()\n\n\n\n\n\ndef main(args=None):\n\n summary_writer = tf.summary.FileWriter(args.type + \"/tensorboard_\" + args.env)\n env = Environment(gym.make('LunarLanderContinuous-v2'))\n env.reset()\n state_dim = env.get_state_size()\n action_space = gym.make(args.env).action_space\n action_dim = action_space.high.shape[0]\n act_range = action_space.high\n\n algo = DDPG(action_dim, state_dim, act_range, args.consecutive_frames)\n stats = algo.train(env, args, summary_writer)\n\n df = pd.DataFrame(np.array(stats))\n df.to_csv(args.type + \"/logs.csv\", header=['Episode', 'Mean', 'Stddev'], float_format='%10.5f')\n\n # Save weights and close environments\n exp_dir = 'models/'\n if not os.path.exists(exp_dir):\n os.makedirs(exp_dir)\n\n algo.save_weights(exp_dir)\n env.env.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"DDPG/ddpg_lander/ddpglander.py","file_name":"ddpglander.py","file_ext":"py","file_size_in_byte":11950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"265453656","text":"class col:\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n\n\nsandwich = {\"ingredients\": [\"ham\", \"bread\", \"cheese\", \"tomatoes\"],\n \"meal\": \"lunch\",\n \"prep_time\": 10\n }\ncake = {\"ingredients\": [\"flour\", \"sugar\", \"eggs\"],\n \"meal\": \"dessert\",\n \"prep_time\": 60\n }\nsalad = {\"ingredients\": [\"avocado\", \"arugula\", \"tomatoes\", \"spinach\"],\n \"meal\": \"lunch\",\n \"prep_time\": 15\n }\ncookbook = {}\ncookbook.update({'sandwich': sandwich})\ncookbook.update({'cake': cake})\ncookbook.update({'salad': salad})\n\n\ndef display_help():\n print(col.OKBLUE, end=\"\")\n print(\"Please select an option by typing the corresponding number:\")\n print(\"1. Add a recipe\")\n print(\"2: Delete a recipe\")\n print(\"3: Print a recipe\")\n print(\"4: Print the cookbook\")\n print(\"5: Quit\\033[0m\")\n\n\ndef cookbook_add(name, ing, meal, time):\n recipe = {\n \"ingredients\": ing,\n \"meal\": meal,\n \"prep_time\": time\n }\n cookbook.update({name: recipe})\n\n\ndef cookbook_del(ssel):\n if ssel in cookbook:\n cookbook.pop(ssel)\n else:\n print(col.FAIL)\n print(\"Recipe for \", ssel, \" does not exist.\", col.ENDC, sep=\"\")\n print()\n\n\ndef cookbook_display(ssel):\n if ssel in cookbook:\n print(\"Recipe for \", ssel, \":\", sep=\"\")\n print(\"Ingredients list:\", cookbook[ssel][\"ingredients\"])\n print(\"To be eaten for \", cookbook[ssel][\"meal\"], \".\", sep=\"\")\n print(\"Takes\", cookbook[ssel][\"prep_time\"], \"minutes of cooking.\")\n else:\n print(col.FAIL, end=\"\")\n print(\"Recipe for \", ssel, \" does not exist.\", col.ENDC, sep=\"\")\n print()\n\n\ndef cookbook_display_all():\n for ssel in cookbook:\n cookbook_display(ssel)\n display_help()\n\n\ndisplay_help()\nwhile(1):\n sel = input()\n print()\n if (sel == \"1\"):\n name = input(\"Please enter the recipe's name to add:\\n\")\n ing = []\n print(\"Please enter the recipe's ingredients:\")\n print(\"(Use \"\"exit\"\" when you are done)\")\n while (1):\n in_ing = input()\n if (in_ing == \"exit\"):\n break\n ing.append(in_ing)\n meal = input(\"Please enter the recipe's type:\\n\")\n time = input(\"Please enter the recipe's preparation time in min:\\n\")\n cookbook_add(name, ing, meal, time)\n display_help()\n elif (sel == \"2\"):\n ssel = input(\"Please enter the recipe's name to delete it:\\n\")\n cookbook_del(ssel)\n display_help()\n elif (sel == \"3\"):\n ssel = input(\"Please enter the recipe's name to get its details:\\n\")\n print()\n cookbook_display(ssel)\n display_help()\n elif (sel == \"4\"):\n cookbook_display_all()\n elif (sel == \"5\"):\n print(\"Cookbook closed.\")\n break\n else:\n print(col.FAIL, end=\"\")\n print(\"This option does not exist, \", end=\"\")\n print(\"please type the corresponding number.\", col.ENDC)\n print(\"To exit, enter 5.\")\n","sub_path":"ex06/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"532680980","text":"# Maple Adjustment Period (57458) | Kanna 2nd Job\n\nhaku = 9130081\n\ncacophonous = 1142507\n\nsm.removeEscapeButton()\nsm.setSpeakerID(haku)\nsm.setBoxChat()\nsm.sendNext(\"Finally, your real skills are coming back! I'm tired of doing all the work!\")\n\nsm.flipBoxChat()\nsm.flipBoxChatPlayerAsSpeaker()\nsm.sendNext(\"What exactly do you do, other than sleep?\")\n\nsm.setSpeakerID(haku)\nsm.setBoxChat()\nsm.sendNext(\"I do all kinds of stuff... when I have enough Mana, \"\n\"which, might I add, I am still waiting for!\")\n\nsm.flipBoxChat()\nsm.flipBoxChatPlayerAsSpeaker()\nsm.sendNext(\"That reminds me. I should release some of the Mana I've stored up. \"\n\"The weak magic I've been using won't get me very far.\")\nsm.sendNext(\"Time to buff up my magic. I'll be stronger in no time!\")\n\nsm.setSpeakerID(haku)\nsm.setBoxChat()\nsm.sendNext(\"Hey! Are you trying to starve me to death? \"\n\"Without Mana, I might as well be a house cat!\")\n\nsm.flipBoxChat()\nsm.flipBoxChatPlayerAsSpeaker()\nsm.sendNext(\"Relax, furball. We have to be careful about how we use Mana in this new world. \"\n\"There's no telling what it could do.\")\nsm.sendNext(\"(There's no way this will be enough to overthrow Nobunaga and rescue the princess. \"\n\"I'll have to train to become more powerful.)\")\n\nif sm.canHold(cacophonous):\n sm.jobAdvance(4210)\n sm.startQuest(parentID)\n sm.completeQuest(parentID)\n sm.giveItem(cacophonous)\nelse:\n sm.sendNext(\"Please make space in your Equip inventory.\")\n","sub_path":"scripts/quest/q57458s.py","file_name":"q57458s.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"149387503","text":"##############################################################################\n#\n# Copyright (c) 2002-2011 Nexedi SA and Contributors. All Rights Reserved.\n#\n# WARNING: This program as such is intended to be used by professional\n# programmers who take the whole responsibility of assessing all potential\n# consequences resulting from its eventual inadequacies and bugs\n# End users who are looking for a ready-to-use solution with commercial\n# guarantees and support are strongly adviced to contract a Free Software\n# Service Company\n#\n# This program is Free Software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n##############################################################################\n\n\nfrom Products.ERP5Type.tests.ERP5TypeTestCase import ERP5TypeTestCase\nfrom zExceptions import Unauthorized\nfrom DateTime import DateTime\nimport json\n\n\nclass TestUNGSecurity(ERP5TypeTestCase):\n \"\"\"Test Suite to validate all cases of permissions in UNG\"\"\"\n\n def getTitle(self):\n return \"Test UNG Security\"\n\n def beforeTearDown(self):\n person_module = self.getPersonModule()\n person_module.manage_delObjects(list(person_module.objectIds()))\n self.tic()\n\n def afterSetUp(self):\n if self.portal.portal_preferences.ung_preference.getPreferenceState() != \"global\":\n self.portal.portal_preferences.ung_preference.enable()\n person = self.portal.person_module.newContent(portal_type='Person',\n reference=\"ung_user\")\n assignment = person.newContent(portal_type='Assignment')\n assignment.setFunction(\"function/ung_user\")\n assignment.open()\n self.tic()\n\n def testERP5Site_createNewWebDocumentAsAnonymous(self):\n \"\"\"Test use script ERP5Site_createNewWebDocument as Anonymous User\"\"\"\n self.logout()\n self.changeSkin(\"UNGDoc\")\n self.assertRaises(Unauthorized,\n self.portal.ERP5Site_createNewWebDocument,\n (\"web_page_template\"))\n\n def testERP5Site_createNewWebDocumentWithUNGRole(self):\n \"\"\"Test use script ERP5Site_createNewWebDocument when a erp5 user have role\n to create and edit document in UNG\"\"\"\n self.login(\"ung_user\")\n web_page = self.portal.portal_catalog.getResultValue(portal_type=\"Web Page\")\n self.assertEqual(web_page, None)\n self.changeSkin(\"UNGDoc\")\n self.portal.ERP5Site_createNewWebDocument(\"web_page_template\")\n self.tic()\n web_page = self.portal.portal_catalog.getResultValue(portal_type=\"Web Page\")\n self.assertEqual(web_page.getReference(), \"default-Web.Page.Reference\")\n self.assertEqual(len(self.portal.web_page_module.searchFolder()), 1)\n\n def testShareDocument(self):\n \"\"\"Test the document sharing between erp5 users\"\"\"\n person = self.portal.person_module.newContent(portal_type='Person',\n reference=\"ung_user2\",\n first_name=\"Gabriel\")\n assignment = person.newContent(portal_type='Assignment')\n assignment.setFunction(\"function/ung_user\")\n assignment.open()\n self.tic()\n self.login(\"ung_user\")\n self.changeSkin(\"UNGDoc\")\n self.portal.ERP5Site_createNewWebDocument(\"web_table_template\")\n self.tic()\n web_table = self.portal.portal_catalog.getResultValue(portal_type=\"Web Table\")\n web_table.setReference(\"share-Web.Table\")\n self.tic()\n self.login(\"ung_user2\")\n self.assertEqual(len(self.portal.web_page_module.searchFolder()), 0)\n ung_web_site = self.portal.web_site_module.ung\n self.changeSkin(\"UNGDoc\")\n web_table = ung_web_site.ERP5Site_userFollowUpWebPage(\"share-Web.Table\")\n self.tic()\n self.assertNotEquals(web_table.getFollowUpList(), [])\n self.assertEqual(len(self.portal.web_page_module.searchFolder()), 1)\n web_table = self.portal.web_page_module.searchFolder()[0]\n self.assertEqual(web_table.getPortalType(), \"Web Table\")\n self.login(\"ERP5TypeTestCase\")\n self.assertEqual(web_table.getFollowUpValue().getFirstName(), \"Gabriel\")\n\n def testBase_updateCalendarEventListWithERP5User(self):\n \"\"\" Test script Base_updateCalendarEventList with erp5 user\"\"\"\n self.logout()\n self.changeSkin(\"UNGDoc\")\n self.assertEqual('{\"events\": []}',\n self.portal.Base_updateCalendarEventList(\"list\"))\n self.login(\"ung_user\")\n self.changeSkin(\"UNGDoc\")\n event_list = json.loads(self.portal.Base_updateCalendarEventList(\"list\"))\n self.assertEqual(event_list.get(\"events\"), [])\n event = self.portal.event_module.newContent(portal_type=\"Note\")\n event.setStartDate(DateTime())\n event.setStopDate(DateTime()+1)\n self.tic()\n self.changeSkin(\"UNGDoc\")\n event_dict = json.loads(self.portal.Base_updateCalendarEventList(\"list\"))\n event_list = event_dict.get(\"events\")\n self.assertEqual(event_list[0][-2], \"Note\")\n form_dict = dict(CalendarStartTime=DateTime().strftime(\"%m/%d/%Y %H:%M\"),\n CalendarEndTime=DateTime().strftime(\"%m/%d/%Y %H:%M\"),\n CalendarTitle=\"One Sample\",\n portal_type=\"Web Message\")\n self.portal.REQUEST.form.update(form_dict)\n self.changeSkin(\"UNGDoc\")\n self.portal.Base_updateCalendarEventList(\"add\")\n self.tic()\n web_message = self.portal.portal_catalog.getResultValue(portal_type=\"Web Message\")\n self.assertEqual(web_message.getTitle(), \"One Sample\")\n self.portal.REQUEST.form.clear()\n form_dict = dict(CalendarStartTime=DateTime().strftime(\"%m/%d/%Y %H:%M\"),\n CalendarEndTime=DateTime().strftime(\"%m/%d/%Y %H:%M\"),\n title=\"Buy Coffee\",\n event_id=web_message.getId())\n self.portal.REQUEST.form.update(form_dict)\n self.changeSkin(\"UNGDoc\")\n self.portal.Base_updateCalendarEventList(\"update\")\n self.tic()\n self.assertEqual(web_message.getTitle(), \"Buy Coffee\")\n self.portal.REQUEST.form.clear()\n form_dict = dict(title=web_message.getTitle(),\n id=web_message.getId())\n self.portal.REQUEST.form.update(form_dict)\n self.changeSkin(\"UNGDoc\")\n self.portal.Base_updateCalendarEventList(\"remove\")\n self.tic()\n web_message = self.portal.portal_catalog.getResultValue(portal_type=\"Web Message\")\n self.assertEqual(web_message, None)\n \n def testERPSite_createUNGUser(self):\n \"\"\"Test if is possible create one user as Anonymous user\"\"\"\n self.logout()\n form_dict = dict(firstname=\"My First Name\",\n lastname=\"My Last Name\",\n password=\"ung_password\")\n self.portal.REQUEST.form.update(form_dict)\n self.changeSkin(\"UNGDoc\")\n self.portal.ERPSite_createUNGUser()\n self.tic()\n self.login(\"ERP5TypeTestCase\")\n person = self.portal.portal_catalog.getResultValue(portal_type=\"Person\")\n self.assertEqual(person.getLastName(), \"My Last Name\")\n self.assertEqual(person.getValidationState(), \"validated\")\n\n def testBase_getPreferencePathList(self):\n \"\"\"Test if with normal user the paths of preference objects are returned correctly\"\"\"\n person = self.portal.person_module.newContent(portal_type='Person',\n reference=\"ung_user2\")\n assignment = person.newContent(portal_type='Assignment')\n assignment.setFunction(\"function/ung_user\")\n assignment.open()\n self.tic()\n self.login(\"ung_user\")\n self.changeSkin(\"UNGDoc\")\n preference_dict = json.loads(self.portal.Base_getPreferencePathList())\n self.assertEqual(preference_dict, {u'preference': u'portal_preferences/1'})\n self.login(\"ung_user2\")\n self.changeSkin(\"UNGDoc\")\n preference_dict = json.loads(self.portal.Base_getPreferencePathList())\n self.assertEqual(preference_dict, {u'preference': u'portal_preferences/2'})\n \n def testWebPage_shareDocument(self):\n \"\"\" \"\"\"\n self.login(\"ung_user\")\n self.changeSkin(\"UNGDoc\")\n self.portal.ERP5Site_createNewWebDocument(\"web_page_template\")\n self.tic()\n web_page = self.portal.portal_catalog.getResultValue(portal_type=\"Web Page\")\n self.assertEqual(web_page.getValidationState(), \"draft\")\n self.changeSkin(\"UNGDoc\")\n response = web_page.WebPage_shareDocument()\n self.tic()\n self.assertEqual(response, \"\".join((self.portal.absolute_url(),\n \"/?key=\",\n web_page.getReference())))\n self.assertEqual(web_page.getValidationState(), \"shared\")\n","sub_path":"bt5/erp5_web_ung_role/TestTemplateItem/portal_components/test.erp5.testUNGSecurity.py","file_name":"test.erp5.testUNGSecurity.py","file_ext":"py","file_size_in_byte":9037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"124234926","text":"from convertColor import *\n\nclass myIO():\n \"\"\" My IO class for reading and saving data\n \n \"\"\"\n def __init__(self, speicherpfad):\n \"\"\" Initialisation by choosing the file path\n \n \"\"\"\n self.speicherpfad = speicherpfad\n self.cC = Convert_to_color()\n\n def save_2D_list(self, mylist, header, speichername='test.txt', **kwargs):\n \"\"\" Needs documentation and reprogramming\n \n \"\"\"\n if 'speicherpfad' not in kwargs.keys():\n speicherpfad = self.speicherpfad + speichername\n else:\n speicherpfad = kwargs['speicherpfad']\n import os\n if not os.path.exists(speicherpfad):\n os.makedirs(speicherpfad)\n speicherpfad = speicherpfad + speichername\n header = header\n data = mylist\n #umwandeln der liste in text\n for index in range(len(data)):\n data[index] = str(data[index])\n data[index] = data[index].lstrip('[')\n data[index] = data[index].rstrip(']')\n data[index] = data[index].replace(', ', '\\t')\n with open(speicherpfad, 'wb') as f:\n f.write(header + '\\n')\n for entry in data:\n f.write(entry+'\\n')\n\n def save_2D_list_oH(self, mylist, speichername='test.txt'):\n \"\"\" Needs documentation and reprogramming\n \n \"\"\"\n speicherpfad = self.speicherpfad + speichername\n data = mylist\n #umwandeln der liste in text\n for index in range(len(data)):\n data[index] = str(data[index])\n data[index] = data[index].lstrip('[')\n data[index] = data[index].rstrip(']')\n data[index] = data[index].replace(', ', '\\t')\n with open(speicherpfad, 'wb') as f:\n for entry in data:\n f.write(entry+'\\n')\n\n def save_ndarray_as_Image(self, ndarray, speichername, **kwargs):\n \"\"\" Needs documentation and reprogramming\n \n \"\"\"\n \"\"\"\n kwargs argumente:\n speicherpfad = str,\n normiert = bool,\n color = bool\n \"\"\"\n resultImage = PILImage.fromarray(ndarray)\n resultImage = resultImage.convert(\"L\")\n try:\n if kwargs['normiert'] == True:\n import ImageOps\n resultImage = ImageOps.autocontrast(resultImage, cutoff=0)\n except:\n pass\n \n try:\n if kwargs['color'] == True:\n resultImage = self.cC.convert_to_rgb(resultImage)\n except:\n pass\n \n \n if 'speicherpfad' not in kwargs.keys():\n speicherpfad = self.speicherpfad + speichername\n else:\n speicherpfad = kwargs['speicherpfad']\n import os\n if not os.path.exists(speicherpfad):\n os.makedirs(speicherpfad)\n speicherpfad = speicherpfad + speichername\n resultImage.save(speicherpfad)\n\n\n\n\n\n","sub_path":"myIO.py","file_name":"myIO.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"177775868","text":"# Problem 1: Task Scheduler\n# Philip Chow\n\nimport json\nimport database\nimport datetime as dt\nimport pytz\n\ndef read_config(filepath):\n # Parses a task scheduler config file. This assumes the config JSON file is structured\n # with one key value pair in the entire file called task_config. task_config is\n # a JSON array, where each element in the array consists of five fields. Any other\n # fields in the config are ignored.\n #\n # task_name - a string to name a task. This must be unique, though different task\n # names can call the same task location\n # task_location - a string indicating where a program is located on the filesystem\n # task_interval - an int specifying time between runs in seconds. If 0, then the task\n # is run only once. If this value is blank, it is assumed to be 0.\n # task_scheduled_time - a string indicating a time to run the first iteration of the\n # task. If blank, the task runs immediately. If task_scheduled is false, this value\n # is ignored\n # task_scheduled - bool (formatted as a str) indicating if a task is intended to be\n # scheduled. If false, the value in task_scheduled_time is ignored and the task\n # runs immediately.\n #\n # This function returns a dict containing all configs, where each config only has the\n # five fields listed above\n \n # Open the file and start parsing it\n with open(filepath) as f:\n data = json.load(f)\n \n # Check that task_config exists in the file, and stop if it doesn't\n if 'task_config' not in data.keys():\n raise Exception('The key ''task_config'' doesn''t exist in the JSON file')\n \n config_list_orig = data['task_config']\n \n # Check length of list. If zero, stop processing\n if len(config_list_orig) == 0:\n raise Exception('No configurations found in ''task_config''')\n \n # Initialize new variable for holding config data. This is the output of this\n # function.\n config_list = []\n \n # Counter variable to allow for easier debugging on config file\n count = 0\n \n # Extract just the config data that we want\n for obj in config_list_orig:\n # Reinitialize tmp on every iteration so it's obvious if it didn't work\n tmp = {}\n \n # Assign keys from original list to tmp object\n # Force variable types and also check for existence of value. If the key\n # doesn't exist, keep going and let user know in console\n try:\n tmp['task_name'] = str(obj['task_name'])\n except KeyError:\n print('Couldn''t find the key ''task_name'' at iteration ' + str(count))\n \n try:\n tmp['task_location'] = str(obj['task_location'])\n except KeyError:\n print('Couldn''t find the key ''task_location'' at iteration ' + str(count))\n \n try:\n # Check if task_interval is blank\n if obj['task_interval'] == '':\n tmp['task_interval'] = 0\n else:\n tmp['task_interval'] = int(obj['task_interval'])\n except KeyError:\n print('Couldn''t find the key ''task_interval'' at iteration ' + str(count))\n except ValueError:\n print('Couldn''t convert ''task_interval'' to an int at iteration ' + str(count))\n \n try:\n tmp['task_scheduled_time'] = str(obj['task_scheduled_time'])\n except KeyError:\n print('Couldn''t find the key ''task_scheduled_time'' at iteration ' + str(count))\n \n # If task_scheduled is a string, set it to lowercase and match it with true/false\n # If it's not a string or a bool, just error out\n try:\n if type(obj['task_scheduled']) == str:\n if obj['task_scheduled'].lower() == 'true':\n tmp['task_scheduled'] = True\n elif obj['task_scheduled'].lower() == 'false':\n tmp['task_scheduled'] = False\n else:\n raise ValueError()\n elif type(obj['task_scheduled']) == bool:\n tmp['task_scheduled'] = (obj['task_scheduled'])\n else:\n raise ValueError()\n except ValueError:\n print('Couldn''t find the key ''task_scheduled'' at iteration ' + str(count))\n \n # Assign data to final config_list if it has 5 fields (having an issue with\n # tmp being appended even though exceptions should remove it, specifically at\n # task_scheduled checkpoint)\n if len(tmp) == 5:\n config_list.append(tmp)\n \n # Increment counter\n count = count + 1\n \n return config_list\n\ndef get_jobs_to_run(config_list):\n # Given a list of job configurations, identify which ones need to be run based on the\n # scheduled time provided in the configuration. It is assumed that this scheduler runs\n # once a minute, so only jobs with expected start times within one minute of now are\n # kicked off.\n \n # Initialize variables\n utcTz = pytz.timezone(\"UTC\") # Always use UTC\n now = utcTz.localize(dt.datetime.now())\n to_run_list = []\n \n # Loop through all objects in config_list\n for obj in config_list:\n try:\n # If task_scheduled is false, the job runs\n # Otherwise, need to do some time comparisons\n if obj['task_scheduled'] == False:\n to_run_list.append(obj)\n elif obj['task_scheduled'] == True:\n # If task_scheduled_time has values, parse it\n if obj['task_scheduled_time'] == '':\n print('Couldn''t find any values for task_scheduled_time')\n raise Exception()\n \n # The following line of code doesn't work even though %Z should handle EST, per\n # strptime documentation. Hard-coding EST for now.\n # This might be an issue between Python 3.6 and whatever the online documentation is for\n # runtime = dt.datetime.strptime(obj['task_scheduled_time'], '%m/%d/%Y %H:%M:%S %Z')\n \n runtimeEST = dt.datetime.strptime(obj['task_scheduled_time'], '%m/%d/%Y %H:%M:%S EST')\n runtimeUTC = utcTz.localize(runtimeEST + dt.timedelta(hours = 5)) # Force it to time aware in UTC\n \n # If the start time is before now, then we need to march the time forward\n while runtimeUTC < now:\n runtimeUTC = runtimeUTC + dt.timedelta(seconds = obj['task_interval'])\n print(runtimeUTC)\n else:\n # This shouldn't happen, but put it here just in case\n print('Error: task_scheduled is neither true nor false')\n raise Exception()\n except Exception as error:\n print('Failed to identify jobs to run (get_jobs_to_run): ' + str(error))\n \n return to_run_list\n\ndef check_job_runtime(to_run_list, running_list):\n # Given a list of job configurations and jobs that are already running, make sure\n # that there are no collisions. i.e. if a job is already running, don't try to kick\n # it off again.\n \n final_run_list = []\n \n return final_run_list\n\ndef run_jobs(final_run_list):\n # Dummy function to run some jobs\n \n for obj in final_run_list:\n name = obj['task_name']\n program = obj['task_location']\n print('Task ' + task_name + ' at ' + task_location + ' kicked off')\n \n # For Windows, I choose to use os.system. Leaving this commented out because\n # the functions don't actually exist\n # os.system(program)\n\ndef scheduler(CONFIG_FILE = 'config.json'):\n # Given a config.json file (assumed to share the same directory as this file), then\n # do some scheduling tasks. The scheduler first parses the config file to identify\n # all tasks that need to be run, as well as associated parameters\n #\n # Assumes a default CONFIG_FILE name of 'config.json'. This can be modified by the\n # user when calling the function if so desired\n \n # Initialize database\n db = database.csvDB()\n \n # Get list of configurations\n config_list = read_config(CONFIG_FILE)\n \n # Check what needs to be run now based on config_list\n to_run_list = get_jobs_to_run(config_list)\n \n # Check what is already running (from database)\n running_list = db.get_running()\n \n # If there is a collision in task name between what is running now and what needs to\n # kick off, don't kick off the new task.\n final_run_list = check_job_runtime(to_run_list, running_list)\n \n # Otherwise, kick off the new task\n run_jobs(final_run_list)\n \n # Insert new jobs into the the database\n # db.add_task(SOME_DATA)\n \n # Check job status for all jobs previously identified as running. If the job is\n # done or errored out, update it accordingly in the database\n \n # Return variables for debugging purposes\n return config_list, running_list, to_run_list\n\nif __name__ == \"__main__\":\n # Run the function. Also fetch output for debug purposes\n output = scheduler()","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":9495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"453445286","text":"from dpl.libs.shift_reg_buffered import ShiftRegBuffered\n\nfrom dpl.core.connections import Connection, ConnectionFactory\nfrom dpl.core.connections import ConnectionRegistry\nfrom dpl.libs.shift_reg_gpio import ShiftRegGPIO\n\n\nclass ShiftRegGPIOBuffered(ShiftRegBuffered, Connection):\n def __init__(self, con_params: dict):\n Connection.__init__(self)\n ShiftRegBuffered.__init__(\n self\n , ShiftRegGPIO(**con_params)\n )\n\n\nclass ShiftRegGPIOBufferedFactory(ConnectionFactory):\n @staticmethod\n def build(config: dict) -> ShiftRegGPIOBuffered:\n return ShiftRegGPIOBuffered(\n config[\"con_params\"]\n )\n\n\nConnectionRegistry.register_factory(\n \"shift_reg\",\n ShiftRegGPIOBufferedFactory()\n)\n","sub_path":"dpl/specific/platforms/shift_reg_gpio/shift_reg_gpio_buffered.py","file_name":"shift_reg_gpio_buffered.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"632564415","text":"from flask import Flask, request, render_template,jsonify\n\napp = Flask(__name__)\n\n# This route will show a form to perform an AJAX request\n# jQuery is loaded to execute the request and update the\n# value of the operation\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/is_running', methods=['POST'])\ndef isRunning():\n #process_name = request.values.get('name', None)\n return \"HELLO WORLD\"\n ''' the content '''\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=4000)\n","sub_path":"logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"92568439","text":"import pygame\nimport sys\nfrom Laberinto import Laberinto\nfrom PantallaJuego import PantallaJuego\nfrom Jugador import Jugador\nfrom Enemigo import Enemigo\nfrom Bala import bala\nfrom BloqueDestructible import BloqueDestructible\nfrom BloqueIndestructible import BloqueIndestructible\nfrom Objetivo import Objetivo\nfrom Bala import balaMovimiento\nclass Controlador():\n\n def __init__(self):\n '''Inicializa las variables del controlador'''\n pygame.init()\n self.entidades = pygame.sprite.Group()\n self.ventanaJuego = None\n self.laberinto = None\n self.jugador = None\n\n\n def ejecutarMenu(self):\n pass\n\n def crearLaberinto(self, filas, columnas, tamanoCuadro):\n '''Inicia el laberinto del Controlador\n Parametros:\n filas (int): Cantidad de filas del laberinto que se quiere crear\n columnas (int): Cantidad de columnas del laberinto que se quiere crear\n tamanoCuadro (int): Tamano de los cuadros que conforman el laberinto\n '''\n self.laberinto = Laberinto(filas, columnas, tamanoCuadro)\n\n def setLaberinto(self, laberinto):\n '''Asigna un nuevo laberinto al Controlador\n Parametros:\n laberinto (laberinto): Es el nuevo laberinto que se le quiere asignar al Controlador'''\n self.laberinto = laberinto\n\n def getLaberinto(self):\n '''Retorna el laberinto del controlador'''\n return self.laberinto\n\n def getEntidades(self):\n '''Retorna el grupo de entidades del Controlador'''\n return self.entidades\n\n def crearVentanaJuego(self,anchoVentana, alturaVentana):\n '''Inicializa la ventana del juego\n Parametros:\n anchoVentana (int): Ancho de la ventana de juego\n alturaVentana (int): Ancho de la ventana de juego\n '''\n self.ventanaJuego = PantallaJuego(anchoVentana,alturaVentana)\n \n def agregarEntidad(self, entidadNueva):\n '''Agrega una entidad al grupo de entidades del Controlador'''\n self.entidades.add(entidadNueva)\n\n\n def crearEnemigo(self, posx, posy, velX, velY, ubicacionArchivo, nombreArchivo):\n '''Crea un nuevo enemigo\n Parametros:\n posx (int): Posicion en el eje x del enemigo\n posy (int): Posicion en el eje y del enemigo\n vida (int): Vida del enemigo\n velX (int): Velocidad en el eje x del enemigo\n velY (int): Velocidad en el eje y del enemigo\n ubicacionArchivo (string): Ubicacion de la imagen del enemigo\n nombreArchivo (string): Nombre de la imagen del enemigo\n\n Retorno:\n enemigoNuevo (Enemigo): Retorna el nuevo enemigo\n '''\n enemigoNuevo = Enemigo(posx,posy,velX,velY,ubicacionArchivo, nombreArchivo, self.ventanaJuego.ancho, self.ventanaJuego.altura)\n self.agregarEntidad(enemigoNuevo)\n return enemigoNuevo\n \n\n def crearJugador(self, posx, posy,vida, velX, velY, ubicacionArchivo, nombreArchivo,imagenBala):\n '''Crea un nuevo jugador\n Parametros:\n posx (int): Posicion en el eje x del jugador\n posy (int): Posicion en el eje y del jugador\n vida (int): Vida del jugador\n velX (int): Velocidad en el eje x del jugador\n velY (int): Velocidad en el eje y del jugador\n ubicacionArchivo (string): Ubicacion de la imagen del jugador\n nombreArchivo (string): Nombre de la imagen del jugador\n\n Retorno:\n jugador (Jugador): Retorna el nuevo jugador\n '''\n\n self.jugador = Jugador(posx,posy,vida,velX,velY,ubicacionArchivo, nombreArchivo, self.ventanaJuego.ancho, self.ventanaJuego.altura,imagenBala)\n self.agregarEntidad(self.jugador) \n return self.jugador\n\n def crearEnemigo(self, posx, posy,vida, velX, velY, ubicacionArchivo, nombreArchivo,imagenBala):\n enemigoNuevo = Enemigo(posx,posy,vida,velX,velY,ubicacionArchivo, nombreArchivo, self.ventanaJuego.ancho, self.ventanaJuego.altura,imagenBala)\n self.agregarEntidad(enemigoNuevo)\n return enemigoNuevo\n\n def eliminarEntidad(entidad):\n '''Elimina una entidad\n Parametros:\n entidad (entidad): Entidad que se quiere eliminar'''\n entidad.morir()\n\n def ejecutarJuego(self):\n '''Contiene el ciclo principal del juego'''\n if self.ventanaJuego is not None :\n done = False\n while not done: \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n pygame.quit()\n sys.exit(0)\n\n self.ventanaJuego.ventana.fill((0,0,1))\n #Dibuja en la vista todas las entidades que se han creado\n if(self.laberinto is not None):\n self.ventanaJuego.dibujarElementos(self.laberinto.objetos)\n\n self.ventanaJuego.dibujarElementos(self.entidades)\n self.ventanaJuego.actualizarPantalla()\n\n '''revisa las colisiones entre el jugador y el laberinto'''\n for entidad in self.laberinto.objetos:\n if entidad != self.jugador:\n if entidad.rect.colliderect(self.jugador) == True:\n entidad.colisionConJugador(self.jugador)\n\n '''revisa las coliones entre el jugador y las demas entidades'''\n for entidad in self.entidades:\n if entidad != self.jugador:\n if entidad.rect.colliderect(self.jugador) == True:\n entidad.colisionConJugador(self.jugador)\n\n for entidad in self.entidades:\n \n if type(entidad) is balaMovimiento:\n \n for entidad2 in self.entidades:\n \n if entidad2.rect.colliderect(entidad) == True:\n \n \n entidad2.colisionBala(entidad)\n\n #'Le da vida' a todos las entidades que se han creado. Llama el metodo update de cada entidad\n self.entidades.update()\n\n\n\n else:\n print(\"Error: No existe una ventana para el juego.\")\n\n \n ","sub_path":"Dig-Dug-Software/DigDug/DigDug/Controlador.py","file_name":"Controlador.py","file_ext":"py","file_size_in_byte":6391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"273406044","text":"# -*- coding: utf-8 -*-\n\ndef countletters(str_in):\n str_in = list(str_in.lower())\n occurences = []\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n for letter in alphabet:\n k = 0\n while letter in str_in:\n k = k + 1\n str_in.remove(letter)\n if k != 0:\n occurences.append([letter,k])\n occurences.sort()\n return occurences\n\nprint(countletters(\"a bb ccc dddd eeeee ffffff\"))\n","sub_path":"strings/countletters(str_in).py","file_name":"countletters(str_in).py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"643126211","text":"# -*- coding: utf-8 -*-\n\nfrom discord.ext import commands\nimport discord\nimport ujson\nimport asyncio\n\nclass Guilds:\n def __init__(self, bot):\n self.bot = bot\n self.config = bot.config\n\n async def update_guild_count(self):\n await self.bot.change_presence(activity=discord.Activity(name=f'=>help | {len(self.bot.guilds)} guilds', type=discord.ActivityType.listening))\n payload = {'server_count': sum(1 for g in self.bot.guilds)}\n await self.bot.session.request('POST', f'https://ls.terminal.ink/api/v1/bots/{self.bot.user.id}', data=ujson.dumps(payload, ensure_ascii=True),\n headers={'Content-Type': 'application/json', 'Authorization': self.bot.config.dbl})\n await self.bot.session.request('POST', f'https://discordbots.org/api/bots/{self.bot.user.id}/stats', data=ujson.dumps(payload, ensure_ascii=True),\n headers={'Content-Type': 'application/json', 'Authorization': self.bot.config.terminal})\n await asyncio.sleep(1)\n\n async def on_ready(self):\n if self.bot.config.dbl or self.bot.config.terminal:\n await self.update_guild_count()\n \n async def on_guild_join(self, guild):\n if self.bot.config['dbl'] or self.bot.config.terminal:\n await self.update_guild_count()\n \n async def on_guild_remove(self, guild):\n if self.bot.config['dbl'] or self.bot.config.terminal:\n await self.update_guild_count()\n\ndef setup(bot):\n bot.add_cog(Guilds(bot))\n","sub_path":"cogs/guilds.py","file_name":"guilds.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"85215191","text":"# -*- coding: utf-8 -*-\n# @author Masakaze Sato\n# @file SensorDirector\n# @note\n\nimport math\n\nfrom ...System.ObjectRegionDirectorBase import ObjectRegionDirectorBase\n\nclass SensorDirector(ObjectRegionDirectorBase):\n\n def __init__(self):\n super().__init__()\n # def __init__\n\n def _update_region(self, objs_in_region):\n len_obj = len(objs_in_region)\n # 総当たりチェック\n for i in range(len_obj):\n for j in range(i+1, len_obj, 1):\n self._update_obj_sensor(objs_in_region[i], objs_in_region[j])\n # for j\n # for i\n\n def _update_obj_sensor(self, obj1, obj2):\n sensors1 = obj1.get_game_logic_component(\"Sensor\")\n if sensors1 == None:\n return None\n sensors2 = obj2.get_game_logic_component(\"Sensor\")\n if sensors2 == None:\n return None\n\n pos1 = obj1.get_object_component(\"Physics\").pos\n pos2 = obj2.get_object_component(\"Physics\").pos\n\n for sensor1 in sensors1.sensors:\n for sensor2 in sensors2.sensors:\n sensor1_offset = sensor1.offset\n sensor2_offset = sensor2.offset\n # 確定位置で計算\n diff_x = (pos1[0] + sensor1_offset[0]) - (pos2[0] + sensor2_offset[0])\n diff_y = (pos1[1] + sensor1_offset[1]) - (pos2[1] + sensor2_offset[1])\n diff_z = (pos1[2] + sensor1_offset[2]) - (pos2[2] + sensor2_offset[2])\n diff = math.sqrt(\n math.pow(diff_x, 2) + math.pow(diff_y, 2) + math.pow(diff_z, 2)\n )\n\n radius1 = sensor1.radius\n radius2 = sensor2.radius\n\n if diff > radius1 + radius2:\n continue\n\n sensor1.hit_sensors.append(sensor2)\n sensor2.hit_sensors.append(sensor1)\n # for sensor2\n # for sensor1\n # def _update_obj_sensor\n\n# class SensorDirector\n\nif __name__ == \"__main__\":\n\n arr = [[] for x in range(2)]\n for x in range(2):\n arr[x] = [[] for y in range(3)]\n for y in range(3):\n arr[x][y] = [[] for obj in range(4)]\n\n print(arr)\n print(len(arr))\n print(len(arr[0]))\n print(len(arr[0][0]))\n","sub_path":"Simulator/Sandbox/Logic/GameLogicComponents/Sensor/SensorDirector.py","file_name":"SensorDirector.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"430010579","text":"from sys import argv\nimport pandas as pd\n\ndef arg_handler():\n Rfam_clasf=argv[1]\n infernal_data=argv[2]\n output_path=argv[3] \n\n return Rfam_clasf,infernal_data,output_path\n \ndef append_categories(category_data,infernal_data,level):\n infernal_data=infernal_data.copy()\n category_data=category_data.copy()\n infernal_data[\"Category\"]=\"-\"\n\n for row in category_data.index:\n family=category_data.loc[row,\"family\"]\n try:\n category=category_data.loc[row,\"category\"].split(\";\")[level]\n except IndexError:\n category=category_data.loc[row,\"category\"].split(\";\")[0]\n #print(family,category)\n infernal_data.loc[infernal_data.loc[:,\"accession\"]==family,\"Category\"]=category\n\n return infernal_data\n \ndef main():\n Rfam_clasf,infernal_data,output_path=arg_handler()\n Rfam_clasf=pd.read_csv(Rfam_clasf, sep=\"\\t\",names=[\"family\",\"desc\",\"category\"])\n infernal_data=pd.read_csv(infernal_data, sep=\"\\t\")\n \n data=append_categories(Rfam_clasf,infernal_data,0)\n ax=data.loc[:,\"Category\"].value_counts().plot.pie(figsize=(10,10))\n ax.figure.savefig(output_path+\"_1.png\")\n ax.cla()\n \n data_1=append_categories(Rfam_clasf,infernal_data,1)\n data_1=data_1.copy()\n ax_1=data_1.loc[:,\"Category\"].value_counts().plot.pie(figsize=(10,10))\n ax_1.figure.savefig(output_path+\"_2.png\")\n \nmain()","sub_path":"20211/Andres_Florian_ncRNA/infernal_pie_chart.py","file_name":"infernal_pie_chart.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"578004613","text":"#coding:utf-8\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import MultiCursor\n\n#plt.figure(1) # 创建图表1\nfig = plt.figure(2) # 创建图表2\nax1 = plt.subplot(211) # 在图表2中创建子图1\nax2 = plt.subplot(212) # 在图表2中创建子图2\n\nx = np.linspace(0, 3, 100)\nfor i in xrange(5):\n # plt.figure(1) #? # 选择图表1\n # plt.plot(x, np.exp(i*x/3))\n plt.sca(ax1) #? # 选择图表2的子图1\n plt.plot(x, np.sin(i*x))\n plt.sca(ax2) # 选择图表2的子图2\n plt.plot(x, np.cos(i*x),label=str(i))\nplt.legend(loc='best') #绘制图例\nplt.ylim(-2, 2)\nmulti = MultiCursor(fig.canvas,(ax1,ax2),color='r',lw=1)\n\nplt.show()","sub_path":"hurst/draw2.py","file_name":"draw2.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"615275302","text":"from __future__ import annotations\n\nimport logging\nfrom datetime import datetime\nfrom hashlib import md5\nfrom typing import Any, Mapping, Optional, Tuple, TypedDict, cast\n\nimport sentry_sdk\nfrom django.conf import settings\nfrom django.db import transaction\n\nfrom sentry import eventstream\nfrom sentry.constants import LOG_LEVELS_MAP\nfrom sentry.event_manager import (\n GroupInfo,\n _process_existing_aggregate,\n _save_grouphash_and_group,\n get_event_type,\n)\nfrom sentry.eventstore.models import Event\nfrom sentry.issues.issue_occurrence import IssueOccurrence, IssueOccurrenceData\nfrom sentry.models import GroupHash, Release\nfrom sentry.ratelimits.sliding_windows import Quota, RedisSlidingWindowRateLimiter, RequestedQuota\nfrom sentry.types.issues import GROUP_TYPE_TO_CATEGORY\nfrom sentry.utils import metrics\n\nissue_rate_limiter = RedisSlidingWindowRateLimiter(\n **settings.SENTRY_ISSUE_PLATFORM_RATE_LIMITER_OPTIONS\n)\n# This should probably be configurable per team\nISSUE_QUOTA = Quota(3600, 60, 5)\n\nlogger = logging.getLogger(__name__)\n\n\ndef save_issue_occurrence(\n occurrence_data: IssueOccurrenceData, event: Event\n) -> Tuple[IssueOccurrence, Optional[GroupInfo]]:\n process_occurrence_data(occurrence_data)\n # Convert occurrence data to `IssueOccurrence`\n occurrence = IssueOccurrence.from_dict(occurrence_data)\n if occurrence.event_id != event.event_id:\n raise ValueError(\"IssueOccurrence must have the same event_id as the passed Event\")\n # Note: For now we trust the project id passed along with the event. Later on we should make\n # sure that this is somehow validated.\n occurrence.save(event.project_id)\n\n # TODO: Pass release here\n group_info = save_issue_from_occurrence(occurrence, event, None)\n if group_info:\n send_issue_occurrence_to_eventstream(event, occurrence, group_info)\n # TODO: Create group related releases here\n\n return occurrence, group_info\n\n\ndef process_occurrence_data(occurrence_data: IssueOccurrenceData) -> None:\n # Hash fingerprints to make sure they're a consistent length\n occurrence_data[\"fingerprint\"] = [\n md5(part.encode(\"utf-8\")).hexdigest() for part in occurrence_data[\"fingerprint\"]\n ]\n\n\nclass IssueArgs(TypedDict):\n platform: Optional[str]\n message: str\n level: Optional[int]\n culprit: str\n last_seen: datetime\n first_seen: datetime\n active_at: datetime\n type: int\n data: OccurrenceMetadata\n first_release: Optional[Release]\n\n\ndef _create_issue_kwargs(\n occurrence: IssueOccurrence, event: Event, release: Optional[Release]\n) -> IssueArgs:\n kwargs: IssueArgs = {\n \"platform\": event.platform,\n # TODO: Figure out what message should be. Or maybe we just implement a platform event and\n # define it in `search_message` there.\n \"message\": event.search_message,\n # TODO: Not sure what to put here\n # \"logger\": job[\"logger_name\"],\n # TODO: Level override from occurrence?\n \"level\": LOG_LEVELS_MAP.get(event.data[\"level\"]),\n \"culprit\": occurrence.subtitle,\n \"last_seen\": event.datetime,\n \"first_seen\": event.datetime,\n \"active_at\": event.datetime,\n \"type\": cast(int, occurrence.type.value),\n \"first_release\": release,\n \"data\": materialize_metadata(occurrence, event),\n }\n kwargs[\"data\"][\"last_received\"] = event.datetime\n return kwargs\n\n\nclass OccurrenceMetadata(TypedDict):\n type: str\n culprit: str\n metadata: Mapping[str, Any]\n title: str\n location: Optional[str]\n last_received: datetime\n\n\ndef materialize_metadata(occurrence: IssueOccurrence, event: Event) -> OccurrenceMetadata:\n \"\"\"\n Returns the materialized metadata to be merged with issue.\n \"\"\"\n\n event_type = get_event_type(event.data)\n event_metadata = dict(event_type.get_metadata(event.data))\n event_metadata = dict(event_metadata)\n event_metadata[\"title\"] = occurrence.issue_title\n\n return {\n \"type\": event_type.key,\n # Not totally sure if this makes sense?\n \"culprit\": occurrence.subtitle,\n \"metadata\": event_metadata,\n \"title\": occurrence.issue_title,\n \"location\": event.location,\n \"last_received\": event.datetime,\n }\n\n\n@metrics.wraps(\"issues.ingest.save_issue_from_occurrence\")\ndef save_issue_from_occurrence(\n occurrence: IssueOccurrence, event: Event, release: Optional[Release]\n) -> Optional[GroupInfo]:\n project = event.project\n issue_kwargs = _create_issue_kwargs(occurrence, event, release)\n\n # TODO: For now we will assume a single fingerprint. We can expand later if necessary.\n # Note that additional fingerprints won't be used to generated additional issues, they'll be\n # used to map the occurrence to a specific issue.\n new_grouphash = occurrence.fingerprint[0]\n existing_grouphash = (\n GroupHash.objects.filter(project=project, hash=new_grouphash)\n .select_related(\"group\")\n .first()\n )\n if not existing_grouphash:\n with metrics.timer(\"issues.save_issue_from_occurrence.check_write_limits\"):\n granted_quota = issue_rate_limiter.check_and_use_quotas(\n [RequestedQuota(f\"issue-platform-issues:{project.id}\", 1, [ISSUE_QUOTA])]\n )[0]\n\n if not granted_quota.granted:\n # Log how many issues we dropped due to rate limiting\n metrics.incr(\"issues.issue.dropped\")\n return None\n\n with sentry_sdk.start_span(\n op=\"issues.save_issue_from_occurrence.transaction\"\n ) as span, metrics.timer(\n \"issues.save_issue_from_occurrence.transaction\",\n tags={\"platform\": event.platform or \"unknown\", \"type\": occurrence.type.value},\n sample_rate=1.0,\n ) as metric_tags, transaction.atomic():\n group, is_new = _save_grouphash_and_group(\n project, event, new_grouphash, **cast(Mapping[str, Any], issue_kwargs)\n )\n is_regression = False\n span.set_tag(\"save_issue_from_occurrence.outcome\", \"new_group\")\n metric_tags[\"save_issue_from_occurrence.outcome\"] = \"new_group\"\n metrics.incr(\n \"group.created\",\n skip_internal=True,\n tags={\"platform\": event.platform or \"unknown\", \"type\": occurrence.type.value},\n )\n group_info = GroupInfo(group=group, is_new=is_new, is_regression=is_regression)\n else:\n group = existing_grouphash.group\n if group.issue_category != GROUP_TYPE_TO_CATEGORY[occurrence.type]:\n logger.error(\n \"save_issue_from_occurrence.category_mismatch\",\n extra={\n \"issue_category\": group.issue_category,\n \"event_type\": \"platform\",\n \"group_id\": group.id,\n },\n )\n return None\n\n is_new = False\n # Note: This updates the message of the issue based on the event. Not sure what we want to\n # store there yet, so we may need to revisit that.\n is_regression = _process_existing_aggregate(group, event, issue_kwargs, release)\n group_info = GroupInfo(group=group, is_new=is_new, is_regression=is_regression)\n\n return group_info\n\n\ndef send_issue_occurrence_to_eventstream(\n event: Event, occurrence: IssueOccurrence, group_info: GroupInfo\n) -> None:\n group_event = event.for_group(group_info.group)\n group_event.occurrence = occurrence\n\n eventstream.insert(\n event=group_event,\n is_new=group_info.is_new,\n is_regression=group_info.is_regression,\n is_new_group_environment=group_info.is_new_group_environment,\n primary_hash=occurrence.fingerprint[0],\n received_timestamp=group_event.data.get(\"received\") or group_event.datetime,\n skip_consume=False,\n group_states=[\n {\n \"id\": group_info.group.id,\n \"is_new\": group_info.is_new,\n \"is_regression\": group_info.is_regression,\n \"is_new_group_environment\": group_info.is_new_group_environment,\n }\n ],\n )\n","sub_path":"src/sentry/issues/ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":8162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"513229128","text":"month_by_numbers = list(range(1, 13))\n\nmonth_by_word = ['Январь', 'Февраль', 'Март', 'Апрель', 'Май', 'Июнь', 'Июль', 'Август', 'Сентябрь', 'Октябрь',\n 'Ноябрь', 'Декабрь', ]\n\nmonth_dict = {}\n\ni = 0\n\nwhile i < len(month_by_numbers):\n month_dict[month_by_numbers[i]] = month_by_word[i]\n i += 1\n\npicked_month = int(input(\"Введите месяц в виде целого числа от 1 до 12: \"))\n\nif picked_month in range(1, 13):\n print(month_dict[picked_month])","sub_path":"exercise_3.py","file_name":"exercise_3.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"493990740","text":"from ..maze_logic import grids\nfrom ..managers import cell_type_manager as ct\nfrom ..managers import space_rep_manager as sp_rep\nfrom ..managers import algorithm_manager\n\n\nclass GridManager:\n grid = None\n\n def generate_grid(props) -> None:\n self = GridManager\n\n grid = None\n maze_dimension = int(props.maze_space_dimension)\n if props.cell_type == ct.POLAR:\n self.grid = grids.GridPolar(\n rows=props.maze_rows_or_radius,\n columns=0,\n levels=props.maze_levels if maze_dimension == int(sp_rep.REP_REGULAR) else 1,\n cell_size=1 - props.cell_inset,\n space_rep=maze_dimension,\n branch_polar=props.maze_polar_branch)\n return\n elif props.cell_type == ct.HEXAGON:\n grid = grids.GridHex\n elif props.cell_type == ct.TRIANGLE:\n grid = grids.GridTriangle\n elif props.cell_type == ct.OCTOGON:\n grid = grids.GridOctogon\n elif props.cell_type == ct.DODECAGON:\n grid = grids.GridDodecagon\n else:\n if props.maze_weave:\n self.grid = grids.GridWeave(\n rows=props.maze_rows_or_radius,\n columns=props.maze_columns,\n levels=1,\n cell_size=1 - max(0.2, props.cell_inset),\n use_kruskal=algorithm_manager.is_kruskal_random(props.maze_algorithm),\n weave=props.maze_weave,\n space_rep=maze_dimension)\n return\n elif maze_dimension == int(sp_rep.REP_BOX):\n rows = props.maze_rows_or_radius\n cols = props.maze_columns\n self.grid = grids.Grid(\n rows=3 * rows,\n columns=2 * cols + 2 * rows,\n levels=props.maze_levels if maze_dimension == int(sp_rep.REP_REGULAR) else 1,\n cell_size=1 - props.cell_inset,\n space_rep=maze_dimension,\n mask=[\n (0, 0, rows - 1, rows - 1),\n (rows + cols, 0, 2 * rows + 2 * cols - 1, rows - 1),\n (0, 2 * rows, rows - 1, 3 * rows - 1),\n (rows + cols, 2 * rows, 2 * rows + 2 * cols - 1, 3 * rows - 1)])\n return\n else:\n grid = grids.Grid\n self.grid = grid(\n rows=props.maze_rows_or_radius,\n columns=props.maze_columns,\n levels=props.maze_levels if maze_dimension == int(sp_rep.REP_REGULAR) else 1,\n cell_size=1 - props.cell_inset,\n space_rep=maze_dimension)\n","sub_path":"Maze Generator/managers/grid_manager.py","file_name":"grid_manager.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"282302988","text":"#\r\n# @lc app=leetcode.cn id=1319 lang=python3\r\n#\r\n# [1319] 连通网络的操作次数\r\n#\r\n# https://leetcode-cn.com/problems/number-of-operations-to-make-network-connected/description/\r\n#\r\n# algorithms\r\n# Medium (41.68%)\r\n# Likes: 4\r\n# Dislikes: 0\r\n# Total Accepted: 1K\r\n# Total Submissions: 2.5K\r\n# Testcase Example: '4\\n[[0,1],[0,2],[1,2]]'\r\n#\r\n# 用以太网线缆将 n 台计算机连接成一个网络,计算机的编号从 0 到 n-1。线缆用 connections 表示,其中 connections[i] =\r\n# [a, b] 连接了计算机 a 和 b。\r\n#\r\n# 网络中的任何一台计算机都可以通过网络直接或者间接访问同一个网络中其他任意一台计算机。\r\n#\r\n# 给你这个计算机网络的初始布线\r\n# connections,你可以拔开任意两台直连计算机之间的线缆,并用它连接一对未直连的计算机。请你计算并返回使所有计算机都连通所需的最少操作次数。如果不可能,则返回\r\n# -1 。\r\n#\r\n#\r\n#\r\n# 示例 1:\r\n#\r\n#\r\n#\r\n# 输入:n = 4, connections = [[0,1],[0,2],[1,2]]\r\n# 输出:1\r\n# 解释:拔下计算机 1 和 2 之间的线缆,并将它插到计算机 1 和 3 上。\r\n#\r\n#\r\n# 示例 2:\r\n#\r\n#\r\n#\r\n# 输入:n = 6, connections = [[0,1],[0,2],[0,3],[1,2],[1,3]]\r\n# 输出:2\r\n#\r\n#\r\n# 示例 3:\r\n#\r\n# 输入:n = 6, connections = [[0,1],[0,2],[0,3],[1,2]]\r\n# 输出:-1\r\n# 解释:线缆数量不足。\r\n#\r\n#\r\n# 示例 4:\r\n#\r\n# 输入:n = 5, connections = [[0,1],[0,2],[3,4],[2,3]]\r\n# 输出:0\r\n#\r\n#\r\n#\r\n#\r\n# 提示:\r\n#\r\n#\r\n# 1 <= n <= 10^5\r\n# 1 <= connections.length <= min(n*(n-1)/2, 10^5)\r\n# connections[i].length == 2\r\n# 0 <= connections[i][0], connections[i][1] < n\r\n# connections[i][0] != connections[i][1]\r\n# 没有重复的连接。\r\n# 两台计算机不会通过多条线��连接。\r\n#\r\n#\r\n#\r\n\r\n\r\n# @lc code=start\r\nclass Solution:\r\n def makeConnected(self, n: int, connections: List[List[int]]) -> int:\r\n # 并查集, 判断有多少个独立的集团, 集团数-1即为所求\r\n # 需要先保证线的数目至少为n-1\r\n if len(connections) < n - 1:\r\n return -1\r\n pre = {}\r\n\r\n def find(a):\r\n if a not in pre:\r\n pre[a] = a\r\n elif pre[a] != a:\r\n pre[a] = find(pre[a])\r\n return pre[a]\r\n\r\n def union(a, b):\r\n pre[find(a)] = find(b)\r\n\r\n for c in connections:\r\n union(c[0], c[1])\r\n s = set()\r\n for i in range(n):\r\n s.add(find(i))\r\n return len(s) - 1\r\n\r\n\r\n# @lc code=end\r\n","sub_path":"Medium/1319.连通网络的操作次数.py","file_name":"1319.连通网络的操作次数.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"105187847","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('problem', '0005_problem_manager'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ClassResource',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('name', models.CharField(max_length=30)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Resource',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('name', models.CharField(max_length=30)),\n ('classResource', models.ForeignKey(to='problem.ClassResource')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"problem/migrations/0006_classresource_resource.py","file_name":"0006_classresource_resource.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"325087831","text":"# coding:utf-8\nimport contextlib\nimport os\nimport sys\nimport random\nimport time\nimport logging\nimport pymysql\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\nfrom MagicGoogle import MagicGoogle\n\n################################################\n# \"\"\"\n# cd MagicGoogle\n# python Examples/search_result.py\n# \"\"\"\n#################################################\n\nPROXIES = [{\n 'http': 'http://152.204.130.86:3128',\n 'https': 'https://152.204.130.86:3128',\n}]\n\n# Or MagicGoogle()\nmg = MagicGoogle()\n\nlogging.getLogger(\"urllib3\").setLevel(logging.WARNING)\nlogging.getLogger(\"chardet\").setLevel(logging.WARNING)\nlogging.getLogger(\"requests\").setLevel(logging.WARNING)\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s: %(message)s')\nLOGGER = logging.getLogger('google_search.py')\n\n\n# 定义上下文管理器,连接后自动关闭连接\n@contextlib.contextmanager\ndef mysql(host='127.0.0.1', port=3306, user='root', passwd='wlw@601_SK', db='amazon_db2', charset='utf8'):\n conn = pymysql.connect(host=host, port=port, user=user, passwd=passwd, db=db, charset=charset)\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n try:\n yield cursor\n finally:\n conn.commit()\n cursor.close()\n conn.close()\n\n\ndef insert_mysql(offer_dict_list, table_name):\n insert_into_sql = \"INSERT INTO \" + table_name + \"(\"\n insert_into_sql_s = \"\"\n datas = []\n try:\n if offer_dict_list and offer_dict_list[0]:\n keys = offer_dict_list[0].keys()\n for j in keys:\n insert_into_sql = insert_into_sql + j + \",\"\n insert_into_sql_s = insert_into_sql_s + \"%s,\"\n insert_into_sql = insert_into_sql.rstrip(\",\") + \") VALUES (\" + insert_into_sql_s.rstrip(',') + \")\"\n else:\n return\n except Exception as e:\n LOGGER.info(\"Splicing insert_into_\" + table_name + \"_sql errors:{}\".format(e))\n\n try:\n for i in offer_dict_list:\n data = tuple(i.values())\n datas.append(data)\n except Exception as e:\n LOGGER.info(\"Splicing insert_into_\" + table_name + \"_data errors:{}\".format(e))\n\n try:\n with mysql() as cursor:\n cursor.executemany(insert_into_sql, datas)\n except Exception as e:\n LOGGER.info(insert_into_sql)\n LOGGER.info(datas)\n LOGGER.info(\"INSERT \" + table_name + \" errors:{}\".format(e))\n\n\ndef google_search():\n postfix = open('/root/qq/config/postfix', 'r')\n postfix = postfix.readline()\n\n with mysql() as cursor:\n try:\n row_count = cursor.execute(\n \"SELECT DISTINCT key_state, key_word from key_word_us where key_state is null limit 1000\")\n LOGGER.info(\"----------------ALL KEYWORD:\" + str(row_count) + \"-----------------\")\n for row in cursor.fetchall():\n keywordone = []\n keyword = row[\"key_word\"]\n\n if row[\"key_state\"]:\n row[\"key_state\"] = row[\"key_state\"] + \"US;\"\n else:\n row[\"key_state\"] = \"US;\"\n num = 400\n results_per_page = 100\n if num % results_per_page == 0:\n pages = num / results_per_page\n else:\n pages = num / results_per_page + 1\n result_keyword = []\n for p in range(0, pages):\n start = p * results_per_page\n get_url_sleep_time = random.randint(2, 5)\n result_keyword_one, result_num = mg.search(query=keyword + postfix, num=results_per_page,\n language='en', start=start,\n pause=get_url_sleep_time, keyword=keyword,)\n result_keyword = result_keyword + result_keyword_one\n insert_mysql(result_keyword_one, \"listing_google_us\")\n time.sleep(get_url_sleep_time)\n if result_num < start + 100:\n break\n row[\"key_count\"] = len(result_keyword)\n keywordone.append(row)\n update_key_word(keywordone)\n LOGGER.info('KEY:' + keyword + '('+str(row[\"key_count\"])+') ')\n os.system(\"nohup python2 /root/ff/filter_listing.py >/root/ff/outputtest 2>&1 &\")\n except Exception as e:\n LOGGER.info(\"--------------KEYWORD ERROR:\" + str(row_count) + \"------------{}\".format(e))\n LOGGER.exception(e)\n os.system(\"nohup python2 /root/ff/filter_listing.py >/root/ff/outputtest 2>&1 &\")\n\n\ndef google_search_for_bigkey():\n postfix = open('/root/qq/config/postfix', 'r')\n postfixStr = postfix.readline()\n keywords = open('/root/qq/Google/bigkey', 'r')\n for keyword in keywords:\n keyword = keyword.strip()\n num = 400\n results_per_page = 100\n if num % results_per_page == 0:\n pages = num / results_per_page\n else:\n pages = num / results_per_page + 1\n\n for p in range(0, pages):\n start = p * results_per_page\n get_url_sleep_time = random.randint(2, 5)\n result_keyword_one, result_num = mg.search(query=keyword + postfixStr, num=results_per_page,\n language='en', start=start,\n pause=get_url_sleep_time, keyword=keyword)\n insert_mysql(result_keyword_one, \"listing_google_us\")\n time.sleep(get_url_sleep_time)\n if result_num < start + 100:\n break\n\n\ndef update_key_word(keywords):\n update_sql = \"UPDATE key_word_us set key_state = %s,key_count = %s where key_word = %s\"\n datas = []\n try:\n for i in keywords:\n data = (i[\"key_state\"], i[\"key_count\"], i[\"key_word\"])\n datas.append(data)\n except Exception as e:\n LOGGER.info(\"Splicing UPDATE key_word_us data errors:{}\".format(e))\n try:\n with mysql() as cursor:\n row_count = cursor.executemany(update_sql, datas)\n except Exception as e:\n LOGGER.info(\"UPDATE key_word_us errors\" + keywords + \":{}\".format(e) )\n\n\ndef google_search_for_brand_all():\n google_search_for_brand('title')\n google_search_for_brand('url')\n google_search_for_brand('search')\n\n\ndef google_search_for_brand(keytype):\n postfix = open('/root/qq/config/postfix', 'r')\n postfix = postfix.readline()\n\n with mysql() as cursor:\n try:\n sql = \"SELECT key_word,key_\" + keytype + \" from key_brand where key_\" + keytype + \" = 0 ORDER BY key_utime desc\"\n row_count = cursor.execute(sql)\n LOGGER.info(\"-------------ALL Brand \" + keytype + \":\" + str(row_count) + \"---------------\")\n for row in cursor.fetchall():\n keywordone = []\n keyword = row[\"key_word\"]\n # Total data number\n num = 400\n results_per_page = 100\n if num % results_per_page == 0:\n pages = num / results_per_page\n else:\n pages = num / results_per_page + 1\n\n result_keyword = []\n if keytype == 'search':\n query = keyword + postfix\n else:\n query = postfix + ' in' + keytype + ':' + keyword\n\n for p in range(0, pages):\n start = p * results_per_page\n get_url_sleep_time = random.randint(2, 5)\n result_keyword_one, result_num = mg.search(query=query, num=results_per_page,\n language='en', start=start,\n pause=get_url_sleep_time, keyword=keyword,\n keytype=keytype)\n result_keyword = result_keyword + result_keyword_one\n insert_mysql(result_keyword_one, \"listing_google_us\")\n LOGGER.info('Stop some time:' + str(get_url_sleep_time))\n time.sleep(get_url_sleep_time)\n if result_num < start + 100:\n break\n\n row[keytype] = len(result_keyword)\n keywordone.append(row)\n update_key_brand(keywordone, keytype)\n except Exception as e:\n LOGGER.info(\"-----------------------KEYWORD ERROR:\" + str(row_count) + \"---------------------{}\".format(e))\n LOGGER.exception(e)\n\n\ndef update_key_brand(keywords, keytype):\n update_sql = \"UPDATE key_brand set key_\"+keytype+\" = 1, key_\"+keytype+\"_count = %s where key_word = %s\"\n datas = []\n try:\n for i in keywords:\n data = (i[keytype], i[\"key_word\"])\n datas.append(data)\n except Exception as e:\n LOGGER.info(\"Splicing UPDATE key_brand data errors:{}\".format(e))\n try:\n with mysql() as cursor:\n row_count = cursor.executemany(update_sql, datas)\n LOGGER.info(\"UPDATE key_brand {}/{} success:\", row_count, len(keywords))\n except Exception as e:\n LOGGER.info(\"UPDATE key_brand errors:{}\".format(e), keywords)\n\n\ngoogle_search()\n","sub_path":"google_search.py","file_name":"google_search.py","file_ext":"py","file_size_in_byte":9386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"407741592","text":"# -*- coding: utf-8 -*-\n\nif __name__ == '__main__':\n n = int( input() )\n array = input().split(' ')\n\n array = [ int(i) for i in array ]\n\n min_val = min(array)\n max_val = max(array)\n sum_val = sum(array)\n\n print( str(min_val) + ' ' + str(max_val) + ' ' + str(sum_val) )\n","sub_path":"python/aizu/itp/problem1_4D.py","file_name":"problem1_4D.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"437205986","text":"# Your code here\n\nfrom collections import Counter\n\nwith open(\"applications/histo/robin.txt\", \"r\") as f:\n content = f.read()\n\ns = content.translate(str.maketrans(content, content, ':,;.-+=/\\|[]{}()*^&\"'))\n\nbackslash_strings = [\"\\n\", \"\\t\", \"\\r\"]\nfor bs in backslash_strings:\n s = s.replace(bs, \" \") \n\nif len(s) == 0:\n print(\"Text not found\")\n\nwords = [es.lower() for es in s.split(\" \") if es is not \"\"]\n\ncounts = Counter(words)\nindentation = len(max(words, key=len)) + 2\nsorted_list_of_words = sorted(counts, key=lambda k: (-counts[k], k))\n\nfor word in sorted_list_of_words:\n spacing = (indentation - len(word)) * \" \"\n pound_signs = counts[word] * \"#\"\n print(f\"{word}{spacing}{pound_signs}\")\n","sub_path":"applications/histo/histo.py","file_name":"histo.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"331475722","text":"import traceback\n\n\ndef boxPrint(symbol, width, height):\n try:\n if len(symbol) != 1:\n raise Exception('\"symbol\" needs to be a string of length 1')\n except:\n errorFile = open('..\\\\Resources\\\\error_log.txt', 'a')\n errorFile.write(traceback.format_exc())\n errorFile.close()\n print('The traceback info was written to error_log.txt')\n if (width < 2) or (height < 2):\n raise Exception('\"width\" and \"height\" must be greater or equal to 2')\n\n print(symbol * width)\n\n for i in range(height - 2):\n print(symbol + ' ' * (width - 2) + symbol)\n\n print(symbol * width)\n\nboxPrint('*', 15, 10)\nboxPrint('o', 10, 8)\n\n","sub_path":"PythonScripts/exception.py","file_name":"exception.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"72972045","text":"\n'''\n\nA way to map the fingerprintability score we found from classifying the traffic trace features to high-level site\nfeatures that a designer of a hidden service page can use to make their site less-identifiable.\n\nAn instance is a visit.\n\nWe use the fingerprintability score as the label - which we get from analyzing the low level features.\nFeatures - high level features - things that can be changed on a site.\n\n@author bekah\n'''\n\nimport datetime\nimport os\nimport sys\n\nimport numpy as np\nimport pylab as pl\nfrom numpy import arange\nfrom py2app.recipes import scipy\nfrom scipy import stats, random\nfrom sklearn import cross_validation\nfrom sklearn.datasets import load_svmlight_file\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.linear_model import LinearRegression,Ridge,LassoCV,Lasso\nfrom sklearn.preprocessing import StandardScaler\n\nfeat_names = []\nrandom.seed(14)\ni_to_name = []\n\ndef save_insts_to_svm_file(instances, feat_file):\n print('saving insts to svm')\n dir = os.path.dirname(feat_file)\n if not os.path.exists(dir):\n print(\"making: \"+dir)\n os.makedirs(dir)\n\n\n with open(feat_file, 'w') as f:\n first = True\n for url in instances.keys():\n inst = instances[url]\n if not first:\n f.write('\\n')\n first = False\n f.write(str(inst.label)[0:5] + ' ')\n vals = inst.get_feature_vector()\n for j in range(1, len(vals)):\n val = str(vals[j]).strip()\n if val == '':\n val = '0'\n f.write(str(j) + ':' + val + ' ')\n f.write('#'+str(url))\n f.close()\n\n\ndef average(vect):\n return np.average(vect)\n\n\ndef mode(vect):\n return stats.mode(vect)[0][0]\n\n\ndef median(vect):\n for i in range(0,len(vect)):\n try:\n vect[i] = float(vect[i])\n except:\n print('COULD NOT CONVERT: ' + str(vect))\n vect[i] = 0\n return np.median(vect)\n\n\ndef var(vect):\n for i in range(0, len(vect)):\n try:\n vect[i] = float(vect[i])\n except:\n print('COULD NOT CONVERT: ' + str(vect))\n vect[i] = 0\n return np.var(vect)\n\n\ndef num_unique(vect):\n return len(np.unique(vect))\n\n\nclass Instance:\n feature_names = []\n def __init__(self, label, url):\n self.label = float(label)\n self.url = url\n self.visits = []\n\n def add(self,line):\n self.visits.append(line)\n\n def set_feature_names(self, feature_names):\n self.feature_names = feature_names\n\n def get_feature_vector(self):\n vect = {}\n\n for visit in self.visits:\n for i in range(0,len(visit)):\n if i in vect:\n t = vect[i]\n t.append(visit[i])\n vect[i] = t\n else:\n t = []\n t.append(visit[i])\n vect[i] = t\n final_vect = []\n c = 0\n\n for i1 in range(0, len(vect)):\n feat = self.feature_names[i1]\n feat_type = feat.split('_')[0]\n if 'i' == feat_type:\n continue\n if not aggregated_feat_file:\n if 'mo' == feat_type:\n final_vect.append(mode(vect[i1]))\n elif 'med' == feat_type:\n final_vect.append(median(vect[i1]))\n elif 'made' == feat_type:\n final_vect.append(mode(vect[i1]))\n elif 'var' == feat_type:\n final_vect.append(var(vect[i1]))\n else:\n print('unknown feat_type: '+feat_type + ':'+feat)\n else:\n final_vect.append(vect[i1][0])\n\n return final_vect\n\n def __str__(self):\n return str(self.label) + ',' + self.url +':' + str(self.get_feature_vector())\n\n\ndef parse_files(fability_score, feats_file):\n with open(fability_score, 'rU') as f1, open(feats_file, 'rU') as f2:\n feats = {}\n\n line = f1.next().split(',')\n for i in range(0,len(line)):\n try:\n line[i] = float(line[i])\n except ValueError:\n pass\n\n for i in range(0,len(line)):\n if str(type) in str(line[i]):\n index = i\n break\n\n for line in f1:\n line = line.split(',')\n url = line[0]\n try:\n label = line[index]\n except:\n print(\"Could not find index of metric: \"+type)\n quit()\n try:\n feats[url] = Instance(float(label), url)\n except:\n feats[url] = Instance(float(0), url)\n\n feat_names = []\n firstline =f2.next()\n for f in firstline.replace(\"\\\"\",\"\").split('\\t'):\n feat_names.append(f)\n\n for line in f2:\n line = line.strip().replace(\"\\\"\",\"\").split('\\t')\n url = line[0]\n inst = feats[url]\n inst.set_feature_names(feat_names)\n inst.add(line)\n\n return feats\n\n\ndef save_original_to_svm_file(instances, feat_file):\n with open(feat_file, 'w') as f:\n for url in instances.keys():\n inst = instances[url]\n f.write(str(inst.label)[0:5] + ' ')\n visits = inst.visits\n for vals in visits:\n for j in range(0, len(vals)):\n f.write(str(j) + ':' + str(vals[j]).strip() + ' ')\n f.write('#'+str(url)+'\\n')\n f.write('\\n')\n f.close()\n\n\ndef info_gain(labels, features, info_gain_res_file, type=''):\n print(\"**** Info Gain ****\")\n print('Total features: %d' % len(features.toarray()[0]))\n total_score =0\n n_folds = 10\n n_labels = len(list(labels))\n kfolds = cross_validation.KFold(n_labels, n_folds=n_folds, shuffle=True)\n feat_imp = [0]*len(features.toarray()[0])\n\n for fold, (train, test) in enumerate(kfolds, start=1):\n print(\"Fold %d\" % fold)\n X_train, X_test = features[train], features[test]\n y_train, y_test = labels[train], labels[test]\n # rf = RandomForestRegressor(n_estimators=250, random_state=26)\n rf = RandomForestClassifier(n_estimators=250, random_state=26)\n rf.fit(X_train, y_train)\n\n importances = rf.feature_importances_\n indices = np.argsort(importances)[::-1]\n\n for f in range(X_train.shape[1]):\n feat_imp[indices[f]] = feat_imp[indices[f]] + importances[indices[f]]\n score = rf.score(X_test, y_test)\n # print(\"Fold Score: \"+str(score))\n total_score += score\n # print(\"Total Score (\"+str(type)+\"): \"+str(total_score/n_folds))\n\n add_colm_to_file(info_gain_res_file, [type]+feat_imp)\n\n\ndef add_colm_to_file(file, colm):\n if os.path.exists(file):\n cur = []\n with open(file, \"rU\") as f:\n for line in f:\n cur.append(line.strip())\n f.close()\n with open(file, \"w\") as f:\n for i in range(0, len(colm)):\n f.write(cur[i] + \",\"+str(colm[i])+\"\\n\")\n f.close()\n else:\n with open(file, \"w\") as f:\n for i in range(0, len(colm)):\n f.write(str(colm[i]) + \"\\n\")\n f.close()\n\n\ndef bucket_list(inf, outf): #hehe\n with open (inf, 'r') as f1, open(outf, 'w') as f2:\n for line in f1:\n n = line.split('#')[1].strip()\n l = line.split(' ')[0].strip()\n f2.write(l + \",\" + n + '\\n')\n f1.close()\n f2.close()\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Meta Learner')\n parser.add_argument('-f', '--featurefile',\n help=\"Filename with site level features (csv)\")\n parser.add_argument('-s', '--fabilityscorefile',\n help=\"Filename with fability score (csv)\")\n parser.add_argument('-n', '--columnheader',\n help=\"Column header to look for the fability score csv file (e.g. ens_tpr)\")\n parser.add_argument('-t', '--topnum', default=0.95,\n help=\"The threshold for positive classes\")\n parser.add_argument('-b', '--bottomnum', default=0.33,\n help=\"The threshold for negative classes\")\n parser.add_argument('-o', '--output',\n help=\"Output file DIRECTORY\")\n\n # Parse arguments\n args = parser.parse_args()\n\n fability_score = args.s\n feats_file = args.f\n type = args.n\n top_num = args.t\n bottom_num = args.b\n out = args.o\n\n svmfile = feats_file.replace('.csv','.svm')\n feats = parse_files(fability_score, feats_file)\n save_insts_to_svm_file(feats, svmfile)\n\n bucketed_svm = even_classes(svmfile, [bottom_num,top_num], type, change_labels=False)\n out = 'res/' + str(threshold) + '_hi_level_res_' + type + '.csv'\n\n info_gain(labels, features, os.path.join(out, 'feat_res.csv'), type)\n\n # saves info about the buckets if you want to\n # outf = 'bucket_lists/bucket_list_'+type+'.csv'\n # bucket_list(bucketed_svm, outf)\n","sub_path":"variance/classification/MetaLearner.py","file_name":"MetaLearner.py","file_ext":"py","file_size_in_byte":9196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"178736738","text":"# coding:utf-8\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .forms import TextForm\nfrom ctypes import *\n\ndef index(request):\n if request.method == 'POST': # 当提交表单时\n\n form = TextForm(request.POST) # form 包含提交的数据\n\n if form.is_valid(): # 如果提交的数据合法\n a = form.cleaned_data['a']\n b = form.cleaned_data['b']\n dll = cdll.LoadLibrary('hello.dll')\n c = dll.IntAdd(a, b)\n return HttpResponse(str(c))\n\n else: # 当正常访问时\n form = TextForm()\n return render(request, 'index.html', {'form': form})","sub_path":"py36dll/dll/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"324399646","text":"\"\"\"empty message\n\nRevision ID: a348563f6f71\nRevises: 27b26ea9d40e\nCreate Date: 2020-02-21 15:58:53.551979\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a348563f6f71'\ndown_revision = '27b26ea9d40e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('settings_strict')\n op.add_column('fields', sa.Column('is_strict', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('fields', 'is_strict')\n op.create_table('settings_strict',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('is_strict', sa.BOOLEAN(), autoincrement=False, nullable=True),\n sa.Column('field_id', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.ForeignKeyConstraint(['field_id'], ['fields.id'], name='settings_strict_field_id_fkey', ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id', name='settings_strict_pkey'),\n sa.UniqueConstraint('field_id', name='unique_field')\n )\n # ### end Alembic commands ###\n","sub_path":"src/migrations/versions/a348563f6f71_.py","file_name":"a348563f6f71_.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"262217487","text":"#!/usr/bin/python\n\nimport pickle\nimport sys\nimport matplotlib.pyplot\nsys.path.append(\"../tools/\")\nfrom feature_format import featureFormat, targetFeatureSplit\n\n\n### read in data dictionary, convert to numpy array\ndata_dict = pickle.load( open(\"../final_project/final_project_dataset.pkl\", \"r\") )\n\nitem = data_dict.items()\nfoutlier = filter(lambda x:x[1]['salary'] > 2.5e7 and not(x[1]['salary'] == 'NaN'), item)\nfoutlier2 = filter(lambda x:x[1]['salary'] > 1e6 and not(x[1]['salary'] == 'NaN') and x[1]['bonus'] > 5e6 \\\n and not(x[1]['bonus'] == 'NaN'), item)\n\ndata_dict.pop('TOTAL', 0 )\n\nfeatures = [\"salary\", \"bonus\"]\ndata = featureFormat(data_dict, features)\n\n\n### your code below\n\nfor point in data:\n salary = point[0]\n bonus = point[1]\n matplotlib.pyplot.scatter( salary, bonus )\n\nmatplotlib.pyplot.xlabel(\"salary\")\nmatplotlib.pyplot.ylabel(\"bonus\")\nmatplotlib.pyplot.show()\n\n\n","sub_path":"ud120-projects-master/outliers/enron_outliers.py","file_name":"enron_outliers.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"30037353","text":"import streamlit as st\r\n# To make things easier later, we're also importing numpy and pandas for\r\n# working with sample data.\r\nimport numpy as np\r\nimport pandas as pd\r\n# import seaborn as sns\r\n# import matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\n# from jinja2 import Environment, FileSystemLoader\r\n# import uuid\r\n# from github import Github\r\n# from dotenv import load_dotenv\r\nimport os\r\n# import collections\r\n\r\n# import utils\r\n\r\n\r\n\r\n#*************************************#\r\n# Set up github access for \"Open in Colab\" button.\r\n# TODO: Maybe refactor this to another file.\r\n# load_dotenv() # load environment variables from .env file\r\n# if os.getenv(\"GITHUB_TOKEN\") and os.getenv(\"REPO_NAME\"):\r\n# g = Github(os.getenv(\"GITHUB_TOKEN\"))\r\n# repo = g.get_repo(os.getenv(\"REPO_NAME\"))\r\n# colab_enabled = True\r\n\r\n# def add_to_colab(notebook):\r\n# \"\"\"Adds notebook to Colab by pushing it to Github repo and returning Colab link.\"\"\"\r\n# notebook_id = str(uuid.uuid4())\r\n# repo.create_file(\r\n# f\"notebooks/{notebook_id}/generated-notebook.ipynb\",\r\n# f\"Added notebook {notebook_id}\",\r\n# notebook,\r\n# )\r\n# colab_link = f\"http://colab.research.google.com/github/{os.getenv('REPO_NAME')}/blob/main/notebooks/{notebook_id}/generated-notebook.ipynb\"\r\n# return colab_link\r\n\r\n\r\n# else:\r\n# colab_enabled = False\r\n \r\n#*************************************#\r\n\r\n#*************************************#\r\n\r\n\r\nimg_dirs = pd.DataFrame()\r\nimg_dirs['dir'] = ['obama','daw_su','jackie_chan','messi']\r\n\r\ntemplate_dirs = pd.DataFrame()\r\ntemplate_dirs['dir'] = None\r\n# list_dir = []\r\n# for f in os.scandir(\"templates\"):\r\n# for img in os.scandir(f):\r\n# list_dir.append(img)\r\n# template_dirs['dir'] = list_dir\r\n\r\n\r\n\r\nwith st.sidebar:\r\n option = st.sidebar.selectbox(\r\n 'Select One Person',img_dirs['dir'])\r\n \r\n def load_img(name):\r\n for f in os.scandir(\"templates\"):\r\n if (f.is_dir() and f.name == name):\r\n list_dir = []\r\n for img in os.scandir(f):\r\n list_dir.append(img.name)\r\n template_dirs['dir'] = list_dir\r\n \r\n load_img(option)\r\n \r\n img_file = st.selectbox(\"Choose any one image\", template_dirs['dir'])\r\n st.info(\"Copyright@Anonymous\")\r\n\r\n\r\n\r\n\r\n\r\n#*****************************************#\r\n#loading model file and test\r\n\r\n# import urllib.request\r\n# import zipfile\r\n# from io import BytesIO\r\n\r\n#url = 'https://github.com/Rajkap/Streamlit_app/blob/691694146b2baf55ed03dead842aa2b2d3e90224/model_file.zip'\r\n#z = zipfile.ZipFile(BytesIO(urllib.request.urlopen(url).read()))\r\n#z.extractall()\r\n\r\n\r\n\r\nimport os\r\nfrom zipfile import ZipFile\r\nwork_dir = os.getcwd() #Saves the current working directory.\r\nprint(work_dir)\r\n# st.write(work_dir)\r\nwith ZipFile(os.path.join(work_dir ,'model_file.zip'),'r') as zipobject:\r\n zipobject.extractall() \r\npath = os.scandir(work_dir)\r\n# st.write(path)\r\nmodel_path = None\r\nfor f in path:\r\n if f.name == 'model_face_recog_eg1 - Copy.h5':\r\n model_path = f\r\n\r\n\r\nmodel_file = tf.keras.models.load_model(model_path)\r\n\r\ntemplate ='templates'\r\nreal_path = os.path.join(work_dir ,template,option,img_file)\r\n# st.write(real_path)\r\n#path = input('Enter the path of your image in order to predict:')\r\n# img_path = 'https://github.com/Rajkap/Streamlit_app/blob/691694146b2baf55ed03dead842aa2b2d3e90224/templates/'+option+'/'+img_file\r\nimg = tf.keras.preprocessing.image.load_img(real_path, target_size=(160,160))\r\nst.image(tf.keras.preprocessing.image.load_img(real_path),width=250)\r\ndictionary = {0:'Daw Aung San SuuKyi',1:'Jackie Chan',2:'Messi',3:'Barack Obama'}\r\nx = tf.keras.preprocessing.image.img_to_array(img)\r\nx = np.expand_dims(x,axis=0)\r\nx /= 255.0\r\nimages = np.vstack([x])\r\nclasses = model_file.predict(x)\r\ny_classes=classes.argmax(axis=-1)\r\nlabel = y_classes[0]#9\r\n#print(label)\r\nst.write(\"Model မှခန့်မှန်း လိုက်သော အဖြေမှာ \",dictionary[label], \"ဖြစ်ပါသည်။\")\r\n","sub_path":"some.py","file_name":"some.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"434030916","text":"\"\"\"Models for Classes, Specs, Spells and Roles.\"\"\"\n\n# IMPORT LOCAL LIBRARIES\nfrom lorgs import utils\nfrom lorgs.models import base\n\n\nclass WowRole(base.Model):\n \"\"\"A role like Tank, Healer, DPS.\"\"\"\n\n def __init__(self, name, code=\"\"):\n # self.id = id\n self.name = name\n self.code = code or name.lower()\n\n self.icon = f\"roles/{self.name.lower()}.jpg\"\n self.specs = []\n\n def __repr__(self):\n return f\"\"\n\n def __lt__(self, other):\n return self.code < other.code\n\n @property\n def metric(self):\n \"\"\"str: the preferred metric. aka: dps for all. hps for healers.\"\"\"\n return \"hps\" if self.code == \"heal\" else \"dps\"\n\n\nclass WowClass(base.Model):\n \"\"\"A playable class in wow.\"\"\"\n\n def __init__(self, id: int, name: str, color: str=\"\"):\n\n # int: class id, mostly used for sorting\n self.id = id\n self.name = name\n self.color = color\n self.specs = []\n\n self.name_slug_cap = self.name.replace(\" \", \"\")\n self.name_slug = utils.slug(self.name)\n\n #: bool: flag for the trinkets/potions groups\n self.is_other = self.name.lower() == \"other\"\n\n def __repr__(self):\n return f\"\"\n\n def __lt__(self, other):\n return self.id < other.id\n\n def add_spell(self, **kwargs):\n for spec in self.specs:\n spec.add_spell(**kwargs)\n\n\nclass WowSpec(base.Model):\n \"\"\"docstring for Spec\"\"\"\n\n def __init__(self, wow_class: WowClass, name: str, role: WowRole, short_name: str = \"\"):\n super().__init__()\n self.name = name\n self.role = role\n\n self.spells = []\n\n self.wow_class = wow_class\n self.wow_class.specs.append(self)\n\n # bool: is this spec is currently supported\n self.supported = True\n\n # Generate some names\n self.full_name = f\"{self.name} {self.wow_class.name}\"\n self.short_name = short_name or self.name # to be overwritten\n\n # slugified names\n self.name_slug = utils.slug(self.name)\n self.full_name_slug = f\"{self.wow_class.name_slug}-{self.name_slug}\"\n\n # str: Spec Name without spaces, but still capCase.. eg.: \"BeastMastery\"\n self.name_slug_cap = self.name.replace(\" \", \"\")\n\n self.icon = f\"specs/{self.full_name_slug}.jpg\"\n\n\n def __repr__(self):\n return f\"\"\n\n def __lt__(self, other):\n\n def sort_key(obj):\n return (obj.role, obj.wow_class, obj.name)\n\n return sort_key(self) < sort_key(other)\n\n ##########################\n # Methods\n #\n\n def add_spell(self, **kwargs):\n kwargs.setdefault(\"color\", self.wow_class.color)\n kwargs.setdefault(\"group\", self)\n\n spell = WowSpell(**kwargs)\n spell.spec = self\n self.spells.append(spell)\n\n return spell\n\n\nclass WowSpell(base.Model):\n \"\"\"Container to define a spell.\"\"\"\n\n # yoink\n ICON_ROOT = \"https://wow.zamimg.com/images/wow/icons/medium\"\n\n def __init__(self, spell_id: int, cooldown: int = 0, duration: int = 0, show: bool = True, **kwargs):\n self.spell_id = spell_id\n self.cooldown = cooldown\n self.duration = duration\n\n self.spec = None\n self.icon = kwargs.get(\"icon\") or \"\"\n self.name = kwargs.get(\"name\") or \"\"\n self.show = show\n self.color = kwargs.get(\"color\") or \"\"\n self.group = kwargs.get(\"group\")\n\n \"\"\"str: info used for the wowhead tooltips.\"\"\"\n self.wowhead_data = kwargs.get(\"wowhead_data\") or f\"spell={self.spell_id}\"\n\n def __repr__(self):\n return f\"\"\n\n ##########################\n # Methods\n #\n\n def as_dict(self):\n\n return {\n \"spell_id\": self.spell_id,\n \"duration\": self.duration,\n \"cooldown\": self.cooldown,\n\n # display attributes\n \"name\": self.name,\n \"icon\": self.icon,\n \"color\": self.color,\n \"show\": self.show,\n }\n\n @property\n def icon_path(self):\n \"\"\"str: url to the image path.\"\"\"\n # for overwrites with custom images\n if self.icon.startswith(\"/\"):\n return self.icon\n return f\"{self.ICON_ROOT}/{self.icon}\"\n","sub_path":"lorgs/models/specs.py","file_name":"specs.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"539009483","text":"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-\n# vi: set ft=python sts=4 ts=4 sw=4 noet :\n\n# This file is part of Fail2Ban.\n#\n# Fail2Ban is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Fail2Ban is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Fail2Ban; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n# Author: Cyril Jaquier\n#\n\n__author__ = \"Cyril Jaquier\"\n__copyright__ = \"Copyright (c) 2004 Cyril Jaquier\"\n__license__ = \"GPL\"\n\nimport logging\nfrom configreader import ConfigReader\nfrom jailreader import JailReader\n\n# Gets the instance of the logger.\nlogSys = logging.getLogger(\"fail2ban.client.config\")\n\nclass JailsReader(ConfigReader):\n\n\tdef __init__(self, force_enable=False, **kwargs):\n\t\t\"\"\"\n\t\tParameters\n\t\t----------\n\t\tforce_enable : bool, optional\n\t\t Passed to JailReader to force enable the jails.\n\t\t It is for internal use\n\t\t\"\"\"\n\t\tConfigReader.__init__(self, **kwargs)\n\t\tself.__jails = list()\n\t\tself.__force_enable = force_enable\n\n\tdef getJails(self):\n\t\treturn self.__jails\n\n\tdef read(self):\n\t\treturn ConfigReader.read(self, \"jail\")\n\n\tdef getOptions(self, section=None):\n\t\t\"\"\"Reads configuration for jail(s) and adds enabled jails to __jails\n\t\t\"\"\"\n\t\topts = []\n\t\tself.__opts = ConfigReader.getOptions(self, \"Definition\", opts)\n\n\t\tif section is None:\n\t\t\tsections = self.sections()\n\t\telse:\n\t\t\tsections = [ section ]\n\n\t\t# Get the options of all jails.\n\t\tparse_status = True\n\t\tfor sec in sections:\n\t\t\tjail = JailReader(sec, basedir=self.getBaseDir(),\n\t\t\t\t\t\t\t force_enable=self.__force_enable)\n\t\t\tjail.read()\n\t\t\tret = jail.getOptions()\n\t\t\tif ret:\n\t\t\t\tif jail.isEnabled():\n\t\t\t\t\t# We only add enabled jails\n\t\t\t\t\tself.__jails.append(jail)\n\t\t\telse:\n\t\t\t\tlogSys.error(\"Errors in jail %r. Skipping...\" % sec)\n\t\t\t\tparse_status = False\n\t\treturn parse_status\n\n\tdef convert(self, allow_no_files=False):\n\t\t\"\"\"Convert read before __opts and jails to the commands stream\n\n\t\tParameters\n\t\t----------\n\t\tallow_missing : bool\n\t\t Either to allow log files to be missing entirely. Primarily is\n\t\t used for testing\n\t\t\"\"\"\n\n\t\tstream = list()\n\t\tfor opt in self.__opts:\n\t\t\tif opt == \"\":\n\t\t\t\tstream.append([])\n\t\t# Convert jails\n\t\tfor jail in self.__jails:\n\t\t\tstream.extend(jail.convert(allow_no_files=allow_no_files))\n\t\t# Start jails\n\t\tfor jail in self.__jails:\n\t\t\tstream.append([\"start\", jail.getName()])\n\n\t\treturn stream\n\n","sub_path":"rootfs/usr/share/fail2ban/client/jailsreader.py","file_name":"jailsreader.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"157521367","text":"import cv2\nimport numpy as np\n\ndef main():\n\timg = cv2.imread('VOC2010_1.jpg')\n\twith open('yolo_labels/VOC2010_1.txt') as f:\n\t\tcontent = f.readlines()\n\t\th, w = img.shape[:2]\n\t\tfor box in content:\n\t\t\tbox = box.split()\n\t\t\txCenter = float(box[1])\n\t\t\tyCenter = float(box[2])\n\t\t\twidth = float(box[3])\n\t\t\theight = float(box[4])\n\n\t\t\txCenter = xCenter * w\n\t\t\tyCenter = yCenter * h\n\t\t\twidth = width * w\n\t\t\theight = height * h\n\n\n\t\t\tp1x = int(xCenter - width/2)\n\t\t\tp1y = int(yCenter - height/2)\n\t\t\tp2x = int(xCenter + width/2)\n\t\t\tp2y = int(yCenter + height/2)\n\t\t\t# BUG, SWITCH THE Y AND X\n\t\t\t# BUG 2, hand sizes for our original images are off\n\t\t\tcv2.rectangle(img, (p1x, p1y), (p2x, p2y), (255,0,0), 5)\n\n\t\tcv2.imshow('image', img)\n\t\tcv2.waitKey(0)\n\t\tcv2.destroyAllWindows()\n\nif __name__ == '__main__':\n\tmain()","sub_path":"voc_unprocessed/img/draw_box.py","file_name":"draw_box.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"208125338","text":"import os\nimport ctypes\nimport dandan\n\ndirname = os.path.dirname(os.path.abspath(__file__))\ndllname = os.path.join(dirname, r\"..\\x64\\Debug\\clib.dll\")\nlogger = dandan.logger.getLogger()\n\nclib = ctypes.cdll.LoadLibrary(dllname)\n\nlogger.debug(\"load clib %s\", clib)\n\nclib.hello()\n\nfirst = 3\nsecond = 5\nlogger.debug(\"compute %s + %s = %s\", first, second, clib.add(first, second) )","sub_path":"hybrid/python-cpp/hello/main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"11397341","text":"import re\n\nwith open(\"inputs/day4.txt\") as file: \n\tpps = file.read().split(\"\\n\\n\")\n\nkeywords = [\"byr\",\"iyr\",\"eyr\",\"hgt\",\"hcl\",\"ecl\",\"pid\"]\ncount1 = 0\ncount2 = 0\nfor pp in pps:\n\tif all(x in pp for x in keywords):\n\t\tcount1 += 1\n\t\t#part two \n\t\tfields = pp.split()\n\t\t# delete cid from list \n\t\tfields = [x for x in fields if not x.startswith('cid')]\n\t\tfields.sort()\n\t\tstr1 = \"\".join(fields).strip()\t\t\n\t\trex = \"byr:(19[2-9][0-9]|200[0-2])+\"\\\n\t\t\t\t \"ecl:(amb|blu|brn|gry|grn|hzl|oth)+\"\\\n\t\t\t\t \"eyr:(202[0-9]|2030)+\"\\\n\t\t\t\t \"hcl:#([a-z]|[0-9]){6}\"\\\n\t\t\t\t \"hgt:((1[5-8][0-9]|19[0-3])+cm|(59|6[0-9]|7[0-6])+in)+\"\\\n\t\t\t\t \"iyr:(201[0-9]|2020)+\"\\\n\t\t\t\t \"pid:([0-9]){9}$\"\n\t\tregexp = re.compile(rex)\n\t\tif regexp.search(str1):\n\t\t\tcount2 += 1\n\t\t\nprint(\"part one\", count1)\nprint(\"part two\", count2)\n","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"214545410","text":"import queue\nimport http.client, urllib.parse, json\nimport urllib.request\nfrom threading import Thread\nfrom configuration import Configuration\n\nconf = Configuration()\n\n\nclass BingSearchWorker(Thread):\n def __init__(self, queue_input, queue_output):\n Thread.__init__(self)\n self.setDaemon(True)\n self.__terms = queue_input\n self.__queue_output = queue_output\n self.__current_term = None\n\n if len(conf.search(\"subscription_key\")) != 32:\n print(\"Invalid Bing Search API subscription key!\")\n print(\"Please paste yours into the source code.\")\n\n def run(self):\n \"Performs a Bing image search and returns the results.\"\n\n count = 100\n offset = 0\n while True:\n try:\n self.__current_term = self.__terms.get(block=True, timeout=1)\n self.search(self.__current_term, count, 0)\n offset = count\n\n except queue.Empty:\n if self.__current_term:\n self.search(self.__current_term, count, offset)\n offset = offset+count\n\n def search(self, term, count, offset):\n if term==None:\n return\n\n print(\"perform new search term: \" + term + \" index: \"+str(offset))\n\n headers = {'Ocp-Apim-Subscription-Key': conf.search(\"subscription_key\")}\n conn = http.client.HTTPSConnection(conf.search(\"host\"))\n query = urllib.parse.quote(self.__current_term)\n conn.request(\"GET\", conf.search(\"uri\") + \"?q=\" + query + \"&count=\"+str(count) + \"&offset=\"+str(offset), headers=headers)\n response = conn.getresponse()\n response = response.read().decode(\"utf8\")\n result = json.loads(response)\n for img in result[\"value\"]:\n self.__queue_output.put({\n 'url': img[\"contentUrl\"],\n 'width': img[\"width\"],\n 'height': img[\"height\"]\n })\n\n\n","sub_path":"src/010-crawler/search/impl.py","file_name":"impl.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"119918997","text":"from django.test import TestCase\n\nfrom whwn.models import WHWNUser\nfrom whwn.factories import (TeamFactory, ItemSKUFactory, UserFactory,\n UserProfileFactory, ItemFactory)\n\nclass TeamTestCase(TestCase):\n\n def setUp(self):\n self.team = TeamFactory.create()\n self.sku = ItemSKUFactory.create(team=self.team)\n self.user1 = UserFactory.create(userprofile__team=self.team)\n self.user2 = UserFactory.create(userprofile__team=self.team)\n self.item1 = ItemFactory.create(sku=self.sku, quantity=5)\n self.item2 = ItemFactory.create(sku=self.sku, quantity=4, possessor=self.user1)\n\n def test_get_items(self):\n self.assertEquals(len(self.team.items()), 2)\n items = self.team.items()\n self.assertTrue(self.item1 in items and self.item2 in items) \n\n def test_get_members(self):\n members = self.team.members()\n self.assertTrue(self.user1 in members and self.user2 in members)\n\n def test_first_member_set_as_primary_user(self):\n teamX = TeamFactory.create()\n userX = UserFactory.create(userprofile__team=teamX)\n self.assertEquals(WHWNUser.objects.get(id=teamX.primary_user.id), userX)\n","sub_path":"app/whwn/models/tests/test_team.py","file_name":"test_team.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"411208595","text":"# Copyright 2015 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport base64\nimport collections\nimport copy\nimport httplib\nimport json\nimport logging\nimport re\nimport socket\nimport time\n\nimport httplib2\nimport oauth2client.client\n\nfrom googleapiclient import errors\nfrom infra_libs.ts_mon.common import http_metrics\n\n# TODO(nxia): crbug.com/790760 upgrade oauth2client to 4.1.2.\noauth2client_util_imported = False\ntry:\n from oauth2client import util\n oauth2client_util_imported = True\nexcept ImportError:\n pass\n\n\n# default timeout for http requests, in seconds\nDEFAULT_TIMEOUT = 30\n\n\nclass AuthError(Exception):\n pass\n\n\nclass DelegateServiceAccountCredentials(\n oauth2client.client.AssertionCredentials):\n \"\"\"Authorizes an HTTP client with a service account for which we are an actor.\n\n This class uses the IAM API to sign a JWT with the private key of another\n service account for which we have the \"Service Account Actor\" role.\n \"\"\"\n\n MAX_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds\n _SIGN_BLOB_URL = 'https://iam.googleapis.com/v1/%s:signBlob'\n\n def __init__(self, http, service_account_email, scopes, project='-'):\n \"\"\"\n Args:\n http: An httplib2.Http object that is authorized by another\n oauth2client.client.OAuth2Credentials with credentials that have the\n service account actor role on the service_account_email.\n service_account_email: The email address of the service account for which\n to obtain an access token.\n scopes: The desired scopes for the token.\n project: The cloud project to which service_account_email belongs. The\n default of '-' makes the IAM API figure it out for us.\n \"\"\"\n if not oauth2client_util_imported:\n raise AssertionError('Failed to import oauth2client.util.')\n super(DelegateServiceAccountCredentials, self).__init__(None)\n self._service_account_email = service_account_email\n self._scopes = util.scopes_to_string(scopes)\n self._http = http\n self._name = 'projects/%s/serviceAccounts/%s' % (\n project, service_account_email)\n\n def sign_blob(self, blob):\n response, content = self._http.request(\n self._SIGN_BLOB_URL % self._name,\n method='POST',\n body=json.dumps({'bytesToSign': base64.b64encode(blob)}),\n headers={'Content-Type': 'application/json'})\n if response.status != 200:\n raise AuthError('Failed to sign blob as %s: %d %s' % (\n self._service_account_email, response.status, response.reason))\n\n data = json.loads(content)\n return data['keyId'], data['signature']\n\n def _generate_assertion(self):\n # This is copied with small modifications from\n # oauth2client.service_account._ServiceAccountCredentials.\n\n header = {\n 'alg': 'RS256',\n 'typ': 'JWT',\n }\n\n now = int(time.time())\n payload = {\n 'aud': self.token_uri,\n 'scope': self._scopes,\n 'iat': now,\n 'exp': now + self.MAX_TOKEN_LIFETIME_SECS,\n 'iss': self._service_account_email,\n }\n\n assertion_input = (\n self._urlsafe_b64encode(header) + b'.' +\n self._urlsafe_b64encode(payload))\n\n # Sign the assertion.\n _, rsa_bytes = self.sign_blob(assertion_input)\n signature = rsa_bytes.rstrip(b'=')\n\n return assertion_input + b'.' + signature\n\n def _urlsafe_b64encode(self, data):\n # Copied verbatim from oauth2client.service_account.\n return base64.urlsafe_b64encode(\n json.dumps(data, separators=(',', ':')).encode('UTF-8')).rstrip(b'=')\n\n\nclass RetriableHttp(object):\n \"\"\"A httplib2.Http object that retries on failure.\"\"\"\n\n def __init__(self, http, max_tries=5, backoff_time=1,\n retrying_statuses_fn=None):\n \"\"\"\n Args:\n http: an httplib2.Http instance\n max_tries: a number of maximum tries\n backoff_time: a number of seconds to sleep between retries\n retrying_statuses_fn: a function that returns True if a given status\n should be retried\n \"\"\"\n self._http = http\n self._max_tries = max_tries\n self._backoff_time = backoff_time\n self._retrying_statuses_fn = retrying_statuses_fn or \\\n set(range(500,599)).__contains__\n\n def request(self, uri, method='GET', body=None, *args, **kwargs):\n for i in range(1, self._max_tries + 1):\n try:\n response, content = self._http.request(uri, method, body, *args,\n **kwargs)\n\n if self._retrying_statuses_fn(response.status):\n logging.info('RetriableHttp: attempt %d receiving status %d, %s',\n i, response.status,\n 'final attempt' if i == self._max_tries else \\\n 'will retry')\n else:\n break\n except (ValueError, errors.Error,\n socket.timeout, socket.error, socket.herror, socket.gaierror,\n httplib2.HttpLib2Error) as error:\n logging.info('RetriableHttp: attempt %d received exception: %s, %s',\n i, error, 'final attempt' if i == self._max_tries else \\\n 'will retry')\n if i == self._max_tries:\n raise\n time.sleep(self._backoff_time)\n\n return response, content\n\n def __getattr__(self, name):\n return getattr(self._http, name)\n\n def __setattr__(self, name, value):\n if name in ('request', '_http', '_max_tries', '_backoff_time',\n '_retrying_statuses_fn'):\n self.__dict__[name] = value\n else:\n setattr(self._http, name, value)\n\n\nclass InstrumentedHttp(httplib2.Http):\n \"\"\"A httplib2.Http object that reports ts_mon metrics about its requests.\"\"\"\n\n def __init__(self, name, time_fn=time.time, timeout=DEFAULT_TIMEOUT,\n **kwargs):\n \"\"\"\n Args:\n name: An identifier for the HTTP requests made by this object.\n time_fn: Function returning the current time in seconds. Use for testing\n purposes only.\n \"\"\"\n\n super(InstrumentedHttp, self).__init__(timeout=timeout, **kwargs)\n self.fields = {'name': name, 'client': 'httplib2'}\n self.time_fn = time_fn\n\n def _update_metrics(self, status, start_time):\n status_fields = {'status': status}\n status_fields.update(self.fields)\n http_metrics.response_status.increment(fields=status_fields)\n\n duration_msec = (self.time_fn() - start_time) * 1000\n http_metrics.durations.add(duration_msec, fields=self.fields)\n\n def request(self, uri, method=\"GET\", body=None, *args, **kwargs):\n request_bytes = 0\n if body is not None:\n request_bytes = len(body)\n http_metrics.request_bytes.add(request_bytes, fields=self.fields)\n\n start_time = self.time_fn()\n try:\n response, content = super(InstrumentedHttp, self).request(\n uri, method, body, *args, **kwargs)\n except socket.timeout:\n self._update_metrics(http_metrics.STATUS_TIMEOUT, start_time)\n raise\n except (socket.error, socket.herror, socket.gaierror):\n self._update_metrics(http_metrics.STATUS_ERROR, start_time)\n raise\n except (httplib.HTTPException, httplib2.HttpLib2Error) as ex:\n status = http_metrics.STATUS_EXCEPTION\n if 'Deadline exceeded while waiting for HTTP response' in str(ex):\n # Raised on Appengine (gae_override/httplib.py).\n status = http_metrics.STATUS_TIMEOUT\n self._update_metrics(status, start_time)\n raise\n http_metrics.response_bytes.add(len(content), fields=self.fields)\n\n self._update_metrics(response.status, start_time)\n\n return response, content\n\n\nclass HttpMock(object):\n \"\"\"Mock of httplib2.Http\"\"\"\n HttpCall = collections.namedtuple('HttpCall', ('uri', 'method', 'body',\n 'headers'))\n\n def __init__(self, uris):\n \"\"\"\n Args:\n uris(dict): list of (uri, headers, body). `uri` is a regexp for\n matching the requested uri, (headers, body) gives the values returned\n by the mock. Uris are tested in the order from `uris`.\n `headers` is a dict mapping headers to value. The 'status' key is\n mandatory. `body` is a string.\n Ex: [('.*', {'status': 200}, 'nicely done.')]\n \"\"\"\n self._uris = []\n self.requests_made = []\n\n for value in uris:\n if not isinstance(value, (list, tuple)) or len(value) != 3:\n raise ValueError(\"'uris' must be a sequence of (uri, headers, body)\")\n uri, headers, body = value\n compiled_uri = re.compile(uri)\n if not isinstance(headers, dict):\n raise TypeError(\"'headers' must be a dict\")\n if not 'status' in headers:\n raise ValueError(\"'headers' must have 'status' as a key\")\n\n new_headers = copy.copy(headers)\n new_headers['status'] = int(new_headers['status'])\n\n if not isinstance(body, basestring):\n raise TypeError(\"'body' must be a string, got %s\" % type(body))\n self._uris.append((compiled_uri, new_headers, body))\n\n # pylint: disable=unused-argument\n def request(self, uri,\n method='GET',\n body=None,\n headers=None,\n redirections=1,\n connection_type=None):\n self.requests_made.append(self.HttpCall(uri, method, body, headers))\n headers = None\n body = None\n for candidate in self._uris:\n if candidate[0].match(uri):\n _, headers, body = candidate\n break\n if not headers:\n raise AssertionError(\"Unexpected request to %s\" % uri)\n return httplib2.Response(headers), body\n","sub_path":"src/third_party/chromite/third_party/infra_libs/httplib2_utils.py","file_name":"httplib2_utils.py","file_ext":"py","file_size_in_byte":9570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"47923740","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 25 15:25:52 2018\n\n@author: srikant nayak\n\"\"\"\n\nfrom gd_assent import gradient_assent\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nmy_data=np.genfromtxt('data1.csv',delimiter=',')\nw_gda=np.genfromtxt('w1_gda.csv',delimiter=',')\nnp.random.shuffle(my_data)\nmy_data=np.insert(my_data,0,1,axis=1)\nrow,col=my_data.shape\ntrain=math.ceil(.8*row)\ntest=row-train\ntraining,testing=my_data[:train,:],my_data[train:,:]\nxtrain=training[0:,0:-1]\nxtest=testing[0:,0:-1]\nytest=testing[0:,-1]\nytrain=training[0:,-1]\nprob=[]\ntpr=np.zeros(xtest.shape[0])\nfpr=np.zeros(xtest.shape[0])\ny_pred=np.zeros(xtest.shape[0])\n#w=gradient_assent(xtrain,ytrain,alpha=.0001,eps=.00001)\n\n\nfor i in range(test):\n b=np.dot(w_gda.T,xtest[i])\n f=(1 / (1 + np.exp(-b)))\n prob.append(f)\n \n\n#prob=np.array(prob)\nzipped=zip(prob,ytest)\nsorted_zipped=sorted(zipped, key=lambda x: x[0],reverse=True)\nprob1,y_test=zip(*sorted_zipped)\n\n\ndef div(x,y):\n if (y == 0):\n return 0\n else:\n return x/y\n \n\ndef performance(prob,ytest,treshold):\n y_pred=np.zeros(len(ytest))\n tp,tn,fp,fn=0,0,0,0\n for j in range(len(y_pred)):\n if(treshold <= prob[j]):\n y_pred[j]=1\n else:\n y_pred[j]=0\n if(y_pred[j]==ytest[j]==1):\n tp=tp+1\n elif(y_pred[j]==1 and ytest[j]==0):\n fp=fp+1 \n elif(y_pred[j]==0 and ytest[j]==0):\n tn=tn+1\n else:\n fn=fn+1 \n sensitivity=div(tp,(tp+fn)) \n specificity=div(fp,(tn+fp))\n return [sensitivity,specificity]\n \nfor i in range(test): \n sensitivity,specificity =performance(prob1,y_test,prob1[i])\n tpr=np.append(tpr,sensitivity)\n fpr=np.append(fpr,specificity)\nplt.plot(fpr,tpr)\nplt.show()\n","sub_path":"gda_roc.py","file_name":"gda_roc.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"273548489","text":"import urllib\nimport urllib.request\nimport json\nfrom math import sqrt\nfrom geopy.distance import great_circle\nfrom geopy.distance import geodesic\n\nfrom numpy import empty\nimport createNewData.data.config as in_config\nimport pyodbc\n\nclass UrlHandler():\n def __init__(self, in_config):\n self.in_config = in_config\n \n def __call__(self, *args):\n if args[0] == \"mineElevationData\":\n return self.mineElevationData(args[1])\n elif args[0] == \"callURL\":\n return self.callURL(args[1], args[2], args[3])\n elif args[0] == \"generateLocationRequest\":\n return self.generateLocationRequest(args[1])\n elif args[0] == \"EuclideanDist\":\n return self.EuclideanDist(args[1], args[2], \n args[3],args[4], \n args[5], args[6])\n else:\n return \"Object does not exist.\"\n\n def callURL(self, url, body, headers):\n \"\"\"Send request to URL and return response.\"\"\"\n if body:\n req = urllib.request.Request(url, body, headers)\n else:\n req = urllib.request.Request(url, headers=headers)\n response = urllib.request.urlopen(req, timeout=2000)\n return response\n \n def generateLocationRequest(self, shapeData):\n \"\"\"Generate Json request from dataframe input for elevations.\"\"\"\n listofLocations = []\n locationDict = {}\n for index,row in shapeData.iterrows():\n longlatdict = {}\n longlatdict[\"latitude\"] = row[1]\n longlatdict[\"longitude\"] = row[2]\n listofLocations.append(longlatdict.copy())\n locationDict[\"locations\"] = listofLocations\n return locationDict\n\n def mineElevationData(self, shapeData):\n \"\"\"Convert returned request into Json file type.\"\"\"\n body = str.encode(json.dumps(shapeData))\n response = self.callURL(in_config.url, body, in_config.elevHeaders)\n jsonReadyData = response.read().decode('utf8').replace(\"'\", '\"')\n elevationData = json.loads(jsonReadyData)\n return elevationData\n\n def EuclideanDist(self, alt1, alt2, lon1, lat1, lon2, lat2):\n \"\"\"Determine Euclidean distance between two coordinates and their elevations.\"\"\"\n alt_1 = alt1\n alt_2 = alt2\n dalt = alt_1-alt_2\n p1 = (lon1, lat1)\n p2 = (lon2, lat2)\n calt = geodesic(p1, p2).meters\n trueDistance = sqrt(calt**2 + dalt**2)\n return trueDistance","sub_path":"_pipeline/pypackages/urlHandler.py","file_name":"urlHandler.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"177303963","text":"import boto3 \nimport logging \n\n#setup simple logging for INFO \nlogger = logging.getLogger() \nlogger.setLevel(logging.INFO) \n\n#define the connection \nec2 = boto3.resource('ec2', region_name='us-east-1') \nclient = boto3.client('autoscaling')\n\ndef lambda_handler(event, context): \n # Use the filter() method of the instances collection to retrieve \n # all running EC2 instances with the AutoOff tag set to true. \n filters = [{ \n 'Name': 'tag:AutoOff', \n 'Values': ['true','True'] \n }, \n { \n 'Name': 'instance-state-name', \n 'Values': ['running'] \n } \n ] \n \n #filter the instances \n instances = ec2.instances.filter(Filters=filters) \n \n #locate all running instances \n RunningInstances = [instance.id for instance in instances] \n \n print (\"Running instances {0}\".format(RunningInstances)) # Logging\n \n # Make sure there are actually instances to shut down. \n if len(RunningInstances) > 0: \n \n # Pause Autoscaling actions\n for instance in instances:\n # Find if the instance is in an ASG\n instanceAsgInfo = client.describe_auto_scaling_instances(InstanceIds=[instance.id])\n # Obtain all of the instances ASG details\n instanceAsgStatus = instanceAsgInfo['AutoScalingInstances']\n \n if len(instanceAsgStatus) > 0:\n # Filter to the ASG name for the instance\n instanceAsgName = instanceAsgStatus[0]['AutoScalingGroupName']\n # Pause ASG actions\n pauseASG = client.suspend_processes(\n AutoScalingGroupName=instanceAsgName,\n ScalingProcesses=[\n 'AlarmNotification',\n 'Launch',\n 'Terminate',\n 'ReplaceUnhealthy',\n ],\n )\n print (\"Instance {0} is in an ASG - Pausing ASG processes.\".format(instance.id)) # Logging\n print (pauseASG) # Logging\n\n else:\n print (\"Instance {0} is not in an ASG - Skipping.\".format(instance.id)) # Logging\n \n # Shutdown the instances \n shuttingDown = ec2.instances.filter(InstanceIds=RunningInstances).stop() \n print (\"Shutting down instances\")\n print (shuttingDown)\n\n else: \n print (\"Nothing to shutdown.\")\n","sub_path":"StartandStopInstances/StopInstances.py","file_name":"StopInstances.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"493261680","text":"__author__ = 'dustinlee'\n\nimport json\n\nfrom pico2d import *\n\nclass TileMap:\n\n\n def get_tile_image_rect(self, id):\n y = self.tile_rows - id // self.tile_cols - 1\n x = id % self.tile_cols\n return self.image_margin+x*(self.tile_width+self.image_spacing), \\\n self.image_margin+y*(self.tile_height+self.image_spacing), \\\n self.tile_width, self.tile_height\n\n def draw_to_origin(self, left, bottom, w=None, h=None):\n if w == None and h == None:\n w,h = self.map_width, self.map_height\n\n for y in range(h):\n for x in range(w):\n id = self.map2d[y][x]\n self.tileset_image.clip_draw_to_origin(*self.get_tile_image_rect(id), x=(x+left)*self.tile_width, y=(y+bottom)*self.tile_height)\n\n\n\n def clip_draw_to_origin(self, left, bottom, width, height, target_left, target_bottom, w=None, h=None):\n if w == None and h == None:\n w,h = width, height\n\n for y in range(h):\n for x in range(w):\n id = self.map2d[bottom+y][left+x]\n self.tileset_image.clip_draw_to_origin(*self.get_tile_image_rect(id), x=(x+target_left)*self.tile_width, y=(y+target_bottom)*self.tile_height)\n\n\ndef load_tile_map(name):\n with open(name) as f:\n data = json.load(f)\n\n tile_map = TileMap()\n\n base_tile_width = data['tilewidth']\n base_tile_height = data['tileheight']\n\n tileset = data['tilesets'][0]\n tileset_image_file_name = tileset['image']\n image_height = tileset['imageheight']\n image_width = tileset['imagewidth']\n image_margin = tileset['margin']\n image_spacing = tileset['spacing']\n tile_height = tileset['tileheight']\n tile_width = tileset['tilewidth']\n first_gid = tileset['firstgid']\n num_tiles = len(tileset['tiles'])\n\n tile_cols = (image_width - 1) // (tile_width + 1)\n tile_rows = (image_height - 1) // (tile_height + 1)\n\n layers = data['layers']\n layer = layers[0]\n #print(layer['type'])\n #print(data['renderorder'])\n map_height = layer['height']\n map_width = layer['width']\n tile_data = layer['data']\n render_order = data['renderorder']\n\n '''\n h = 4\n w = 8\n tile_data = [i for i in range(h*w)]\n '''\n\n map2d = []\n\n if render_order == 'right-up':\n for i in range(map_height):\n line = [x - first_gid for x in tile_data[i*map_width:i*map_width+map_width]]\n map2d.append(line)\n else:\n for i in reversed(range(map_height)):\n line = [x - first_gid for x in tile_data[i*map_width:i*map_width+map_width]]\n map2d.append(line)\n\n tile_map.tileset_image = load_image(tileset_image_file_name)\n tile_map.map2d = map2d\n tile_map.image_margin = image_margin\n tile_map.image_spacing = image_spacing\n tile_map.map_width = map_width\n tile_map.map_height = map_height\n tile_map.tile_width = tile_width\n tile_map.tile_height = tile_height\n tile_map.tile_rows = tile_rows\n tile_map.tile_cols = tile_cols\n return tile_map","sub_path":"Labs/Lab10 - Scroll and Tiling/tile.py","file_name":"tile.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"277477771","text":"# coding: utf-8\n\n\"\"\"\n Swiss Corporate API\n\n This is the release candidate version of the \\\"Swiss Corporate API\\\" specification. # noqa: E501\n\n OpenAPI spec version: 1.0.0.2-SNAPSHOT\n Contact: swisscorpapi@six-group.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass DirectoryProductName(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'de': 'str',\n 'en': 'str',\n 'fr': 'str',\n 'it': 'str'\n }\n\n attribute_map = {\n 'de': 'de',\n 'en': 'en',\n 'fr': 'fr',\n 'it': 'it'\n }\n\n def __init__(self, de=None, en=None, fr=None, it=None): # noqa: E501\n \"\"\"DirectoryProductName - a model defined in Swagger\"\"\" # noqa: E501\n\n self._de = None\n self._en = None\n self._fr = None\n self._it = None\n self.discriminator = None\n\n self.de = de\n self.en = en\n self.fr = fr\n self.it = it\n\n @property\n def de(self):\n \"\"\"Gets the de of this DirectoryProductName. # noqa: E501\n\n\n :return: The de of this DirectoryProductName. # noqa: E501\n :rtype: str\n \"\"\"\n return self._de\n\n @de.setter\n def de(self, de):\n \"\"\"Sets the de of this DirectoryProductName.\n\n\n :param de: The de of this DirectoryProductName. # noqa: E501\n :type: str\n \"\"\"\n if de is None:\n raise ValueError(\"Invalid value for `de`, must not be `None`\") # noqa: E501\n if de is not None and len(de) > 50:\n raise ValueError(\"Invalid value for `de`, length must be less than or equal to `50`\") # noqa: E501\n\n self._de = de\n\n @property\n def en(self):\n \"\"\"Gets the en of this DirectoryProductName. # noqa: E501\n\n\n :return: The en of this DirectoryProductName. # noqa: E501\n :rtype: str\n \"\"\"\n return self._en\n\n @en.setter\n def en(self, en):\n \"\"\"Sets the en of this DirectoryProductName.\n\n\n :param en: The en of this DirectoryProductName. # noqa: E501\n :type: str\n \"\"\"\n if en is None:\n raise ValueError(\"Invalid value for `en`, must not be `None`\") # noqa: E501\n if en is not None and len(en) > 50:\n raise ValueError(\"Invalid value for `en`, length must be less than or equal to `50`\") # noqa: E501\n\n self._en = en\n\n @property\n def fr(self):\n \"\"\"Gets the fr of this DirectoryProductName. # noqa: E501\n\n\n :return: The fr of this DirectoryProductName. # noqa: E501\n :rtype: str\n \"\"\"\n return self._fr\n\n @fr.setter\n def fr(self, fr):\n \"\"\"Sets the fr of this DirectoryProductName.\n\n\n :param fr: The fr of this DirectoryProductName. # noqa: E501\n :type: str\n \"\"\"\n if fr is None:\n raise ValueError(\"Invalid value for `fr`, must not be `None`\") # noqa: E501\n if fr is not None and len(fr) > 50:\n raise ValueError(\"Invalid value for `fr`, length must be less than or equal to `50`\") # noqa: E501\n\n self._fr = fr\n\n @property\n def it(self):\n \"\"\"Gets the it of this DirectoryProductName. # noqa: E501\n\n\n :return: The it of this DirectoryProductName. # noqa: E501\n :rtype: str\n \"\"\"\n return self._it\n\n @it.setter\n def it(self, it):\n \"\"\"Sets the it of this DirectoryProductName.\n\n\n :param it: The it of this DirectoryProductName. # noqa: E501\n :type: str\n \"\"\"\n if it is None:\n raise ValueError(\"Invalid value for `it`, must not be `None`\") # noqa: E501\n if it is not None and len(it) > 50:\n raise ValueError(\"Invalid value for `it`, length must be less than or equal to `50`\") # noqa: E501\n\n self._it = it\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(DirectoryProductName, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DirectoryProductName):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"python-client/swagger_client/models/directory_product_name.py","file_name":"directory_product_name.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"112556558","text":"import logging\n\nfrom pylons import config, request, response, session, tmpl_context as c, url\n\nfrom web.lib.base import BaseController, render\nfrom web.lib.helpers import *\nimport web.lib.fieldmap as fn\n\nimport solr\n\nimport sys\nif '../../workspace/indexing/src' not in sys.path:\n sys.path.insert(0, '../../workspace/indexing/src') # Add workspace files into path. TODO: Fix!\n \nimport solrconfig\n\nlog = logging.getLogger(__name__)\n\n##-----------------------------------------------------------------------------------------------\nclass HomeController(BaseController):\n\n##-----------------------------------------------------------------------------------------------\n\n def index(self):\n \n #\n # Get main stats\n #\n sol_all = solr.SolrConnection( solrconfig.solr_urls[\"all\"] )\n \n catalogue_fn = fn.get_catalogue_fieldname()\n organisation_fn = fn.get_is_organisation_fieldname()\n\n facet_fields = ['object_type', catalogue_fn, organisation_fn]\n\n sol_response_all = sol_all.query( \"*:*\", rows=0, fl=\"-\", score=False, facet='true', facet_limit=1000, facet_field=facet_fields)\n sol_all.close()\n\n c.stats = {\n 'works' : {\n 'number': 0\n },\n 'people' : {\n 'number': 0,\n 'url' : '/browse/people'\n },\n 'locations' : {\n 'number': 0,\n 'url' : '/browse/locations'\n },\n 'organisations' : {\n 'number': 0,\n 'url' : '/browse/organisations'\n },\n 'repositories' : {\n 'number': 0,\n 'url' : '/browse/institutions'\n },\n 'manifestations' : {\n 'number': 0\n },\n 'images' : {\n 'number': 0\n },\n 'comments' : {\n 'number': 0\n },\n 'related resources' : {\n 'number': 0\n },\n 'catalogues' : {\n 'number' : 0,\n 'url' : 'http://emlo-portal.bodleian.ox.ac.uk/collections/?page_id=480'\n }\n }\n\n for stat, num in sol_response_all.facet_counts['facet_fields']['object_type'].iteritems():\n\n if stat == 'institution' :\n c.stats['repositories']['number'] = max(0,num)\n elif stat == 'comment':\n c.stats['comments']['number'] = num\n elif stat == 'image':\n c.stats['images']['number'] = num\n elif stat == 'location':\n c.stats['locations']['number'] = num\n elif stat == 'manifestation':\n c.stats['manifestations']['number'] = num\n elif stat == 'resource':\n c.stats['related resources']['number'] = num\n elif stat == 'work':\n c.stats['works']['number'] = num\n\n\n if \"true\" in sol_response_all.facet_counts['facet_fields'][organisation_fn] :\n c.stats['organisations']['number'] = sol_response_all.facet_counts['facet_fields'][organisation_fn]['true']\n else:\n c.stats['organisations']['number'] = 0\n\n if 'person' in sol_response_all.facet_counts['facet_fields']['object_type'] :\n c.stats['people']['number'] = sol_response_all.facet_counts['facet_fields']['object_type']['person'] - c.stats['organisations']['number']\n\n\n catalogue_dict = sol_response_all.facet_counts[ 'facet_fields' ][catalogue_fn]\n if 'No catalogue specified' not in catalogue_dict.keys() :\n c.stats['catalogues']['number'] = len( catalogue_dict )\n else :\n c.stats['catalogues']['number'] = len( catalogue_dict ) - 1\n\n\n # tweak numbers - none of these numbers will change frequently, if ever. (\"Number of everything\" minus \"Number of ones we want\")\n\n if config['project'] == \"EMLO\" :\n if c.stats['people']['number'] >= 21985 :\n c.stats['people']['number'] -= (21985-20097) # Remove people who have no connection to a letter\n if c.stats['organisations']['number'] >= 948 :\n c.stats['organisations']['number'] -= (948-806) # Remove orginisations who have no connection to a letter\n if c.stats['images']['number'] >= 48661 :\n c.stats['images']['number'] -= (48661) # Remove images of the bodleian card catalogue\n if c.stats['locations']['number'] > 5931 :\n c.stats['locations']['number'] -= (5931-5190) # Remove locations which have no connection to a letter\n \n return render( '/main/home.mako' )\n\n##-----------------------------------------------------------------------------------------------\n \n def updating(self):\n return \"

The website is currently being updated.

\" \\\n + \"

It will be back up as soon as possible. Thanks for waiting.

\"\n\n##-----------------------------------------------------------------------------------------------\n","sub_path":"pylons-emlo/emlo/pylons/web/web/controllers/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"546529782","text":"\nimport bs4\nimport re\nimport time\nimport requests\nimport csv\npagedata = requests.get(\n \"https://www.eia.gov/naturalgas/weekly/archivenew_ngwu/2020/05_14/#tabs-supply-2\")\n\nsoup = bs4.BeautifulSoup(pagedata.text)\ndemandTable = soup.findAll(\"div\", {\"id\": \"tabs-supply-2\"})\n# print(demandTable)\nusConsumption = soup.find(\"td\", text=\"U.S. consumption\")\nusConsumptionByWeeks = usConsumption.find_next_siblings()\noutput_row = []\nfor usConsumptionByWeek in usConsumptionByWeeks:\n div = usConsumptionByWeek.find('div')\n output_row.append(div.text)\n print(div.text)\n\nwith open('demand.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(output_row)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"241350886","text":"def heapify(arr, n, i):\n \"\"\"This is to max heapify\n Parameters:\n arr(list): List of integers\n n(int): Length of list\n i(int): Root element\n \"\"\"\n largest = i \n left = 2 * i + 1 \n right = 2 * i + 2 \n \n if left < n and arr[i] < arr[left]: \n largest = left \n \n if right < n and arr[largest] < arr[right]: \n largest = right \n \n if largest != i: \n arr[i],arr[largest] = arr[largest],arr[i] # swap \n heapify(arr, n, largest) \n\n \ndef heapSort(arr):\n \"\"\"This function sort the integers.\n Parameters:\n arr(list): List of integers\n Returns:\n None\n \"\"\"\n n = len(arr) \n \n # Build a maxheap. \n for i in range(n, -1, -1): \n heapify(arr, n, i) \n \n # One by one extract elements \n for i in range(n-1, 0, -1): \n arr[i], arr[0] = arr[0], arr[i] # swap \n heapify(arr, i, 0) \n\n\ndef main():\n arr = [ 12, 11, 13, 5, 6, 7] \n heapSort(arr) \n print(\"Sorted array is\",arr)\n\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"Algorithms/Searching & Sorting/Heap Sort/heap_sort.py","file_name":"heap_sort.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"176304593","text":"\ndef main(j, args, params, tags, tasklet):\n params.merge(args)\n\n if params.tags.tagExists(\"height\"):\n height = int(params.tags.tagGet(\"height\"))\n else:\n height = 400\n\n if params.tags.tagExists(\"docname\"):\n docname = params.tags.tagGet(\"docname\")\n doc = params.doc.preprocessor.docGet(docname)\n path = j.system.fs.getDirName(doc.path)\n else:\n path = j.system.fs.getDirName(params.doc.path)\n\n if j.system.fs.exists(j.system.fs.joinPaths(path, \"files\")):\n path = j.system.fs.joinPaths(path, \"files\")\n\n if params.tags.tagExists(\"readonly\") or params.tags.labelExists(\"readonly\"):\n readonly = \" readonly\"\n else:\n readonly = \"\"\n\n path = path.replace(\":\", \"+\")\n\n out = \"{{explorer: ppath:%s height:%s key:%s %s}}\" % (path, height, params.doc.getPageKey(), readonly)\n\n params.result = (out, params.doc)\n\n return params\n\n\ndef match(j, args, params, tags, tasklet):\n return True\n","sub_path":"apps/portalbase/macros/wiki/pageexplorer/1_main.py","file_name":"1_main.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"100885867","text":"import sys\nimport math\nimport numpy as np\n\n\ndef num_to_chr(i):\n if i == 0:\n return 'P'\n elif i == 1:\n return 'R'\n else :\n return 'S'\n\nT = int(sys.stdin.readline())\nfor t in range(1, T+1):\n line = sys.stdin.readline()\n (N, R, P, S) = map(int, line.split(' '))\n NN = 2 ** N\n \n found = False\n s = np.zeros(NN)\n for c in range(3):\n s[0] = c\n n = 1\n while n < NN:\n for i in range(n):\n index = i*(NN // n)\n s[index + (NN // (2*n))] = (s[index]+1) % 3 \n n = 2 * n\n if (np.count_nonzero(s == 0) == P) and (np.count_nonzero(s == 1) == R):\n found = True\n break\n\n if found == False:\n print(\"Case #{}: \".format(t), end=\"\")\n print(\"IMPOSSIBLE\")\n continue\n sl = s.tolist()\n n = 1\n while n < NN:\n i = 0\n while i < NN:\n if sl[i:(i+n)] > sl[(i+n):(i+2*n)]:\n h = sl[i:(i+n)]\n sl[i:(i+n)] = sl[(i+n):(i+2*n)]\n sl[(i+n):(i+2*n)] = h\n i = i+2*n\n n = 2*n\n so = ''.join(num_to_chr(i) for i in sl)\n print(\"Case #{}: \".format(t), end=\"\")\n print(so)\n\n\n\n","sub_path":"codes/BuildLinks1.03/test_input/CodeJam/2A/python/Simonsch_prob3a.py","file_name":"Simonsch_prob3a.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"352872789","text":"import chainer\nimport chainer.links as L\nimport chainer.functions as F\nimport numpy as np\nimport argparse\n\nfrom pathlib import Path\nfrom chainer import cuda, optimizers, serializers\nfrom model import Generator, Discriminator\nfrom dataset import DatasetLoader\nfrom utils import set_optimizer\nfrom visualize import Visualizer\n\nxp=cuda.cupy\ncuda.get_device(0).use()\n\n\nclass InstaGANLossFunction:\n def __init__(self):\n pass\n\n @staticmethod\n def adversarial_dis_loss(discriminator, y, y_mask, t, t_mask):\n y_dis = discriminator(y, y_mask)\n t_dis = discriminator(t, t_mask)\n\n return F.mean(F.softplus(y_dis)) + F.mean(F.softplus(-t_dis))\n\n @staticmethod\n def adversarial_gen_loss(discriminator, y, y_mask):\n y_dis = discriminator(y, y_mask)\n\n return F.mean(F.softplus(-y_dis))\n\n @staticmethod\n def cycle_consistency_loss(y, y_mask, t, t_mask):\n loss = F.mean_absolute_error(y, t)\n loss += F.mean_absolute_error(y_mask, t_mask)\n\n return 10.0 * loss\n\n @staticmethod\n def identity_mapping_loss(y, y_mask, t, t_mask):\n loss = F.mean_absolute_error(y, t)\n loss += F.mean_absolute_error(y_mask, t_mask)\n\n return 10.0 * loss\n\n @staticmethod\n def context_preserving_loss(y, y_mask, t, t_mask):\n weight = F.tile(xp.ones_like(t_mask) - t_mask * y_mask, (1, 3, 1, 1))\n loss = F.mean_absolute_error(weight * t, weight * y)\n\n return 10.0 * loss\n\n\ndef train(epochs,\n iterations,\n batchsize,\n validsize,\n src_path,\n tgt_path,\n extension,\n img_size,\n outdir,\n modeldir,\n lr_dis,\n lr_gen,\n beta1,\n beta2):\n\n # Dataset definition\n dataset = DatasetLoader(src_path, tgt_path, extension, img_size)\n print(dataset)\n x_val, x_mask_val, y_val, y_mask_val = dataset.valid(validsize)\n\n # Model & Optimizer definition\n generator_xy = Generator()\n generator_xy.to_gpu()\n gen_xy_opt = set_optimizer(generator_xy, lr_gen, beta1, beta2)\n\n generator_yx = Generator()\n generator_yx.to_gpu()\n gen_yx_opt = set_optimizer(generator_yx, lr_gen, beta1, beta2)\n\n discriminator_y = Discriminator()\n discriminator_y.to_gpu()\n dis_y_opt = set_optimizer(discriminator_y, lr_dis, beta1, beta2)\n\n discriminator_x = Discriminator()\n discriminator_x.to_gpu()\n dis_x_opt = set_optimizer(discriminator_x, lr_dis, beta1, beta2)\n\n # Loss Function definition\n lossfunc = InstaGANLossFunction()\n\n # Visualizer definition\n visualize = Visualizer()\n\n for epoch in range(epochs):\n sum_gen_loss = 0\n sum_dis_loss = 0\n\n for batch in range(0, iterations, batchsize):\n x, x_mask, y, y_mask = dataset.train(batchsize)\n\n # discriminator update\n xy, xy_mask = generator_xy(x, x_mask)\n yx, yx_mask = generator_yx(y, y_mask)\n\n xy.unchain_backward()\n xy_mask.unchain_backward()\n yx.unchain_backward()\n yx_mask.unchain_backward()\n\n dis_loss = lossfunc.adversarial_dis_loss(discriminator_y, xy, xy_mask, y, y_mask)\n dis_loss += lossfunc.adversarial_dis_loss(discriminator_x, yx, yx_mask, x, x_mask)\n\n discriminator_y.cleargrads()\n discriminator_x.cleargrads()\n dis_loss.backward()\n dis_y_opt.update()\n dis_x_opt.update()\n\n sum_dis_loss += dis_loss.data\n\n # generator update\n xy, xy_mask = generator_xy(x, x_mask)\n yx, yx_mask = generator_yx(y, y_mask)\n\n xyx, xyx_mask = generator_yx(xy, xy_mask)\n yxy, yxy_mask = generator_xy(yx, yx_mask)\n\n x_id, x_mask_id = generator_yx(x, x_mask)\n y_id, y_mask_id = generator_xy(y, y_mask)\n\n gen_loss = lossfunc.adversarial_gen_loss(discriminator_y, xy, xy_mask)\n gen_loss += lossfunc.adversarial_gen_loss(discriminator_x, yx, yx_mask)\n\n gen_loss += lossfunc.cycle_consistency_loss(xyx, xyx_mask, x, x_mask)\n gen_loss += lossfunc.cycle_consistency_loss(yxy, yxy_mask, y, y_mask)\n\n gen_loss += lossfunc.identity_mapping_loss(x_id, x_mask_id, x, x_mask)\n gen_loss += lossfunc.identity_mapping_loss(y_id, y_mask_id, y, y_mask)\n\n gen_loss += lossfunc.context_preserving_loss(xy, xy_mask, x, x_mask)\n gen_loss += lossfunc.context_preserving_loss(yx, yx_mask, y, y_mask)\n\n generator_xy.cleargrads()\n generator_yx.cleargrads()\n gen_loss.backward()\n gen_xy_opt.update()\n gen_yx_opt.update()\n\n sum_gen_loss += gen_loss.data\n\n if batch == 0:\n serializers.save_npz(f\"{modeldir}/generator_xy_{epoch}.model\", generator_xy)\n serializers.save_npz(f\"{modeldir}/generator_yx_{epoch}.model\", generator_yx)\n\n xy, xy_mask = generator_xy(x_val, x_mask_val)\n yx, yx_mask = generator_yx(y_val, y_mask_val)\n\n x = x_val.data.get()\n x_mask = x_mask_val.data.get()\n xy = xy.data.get()\n xy_mask = xy_mask.data.get()\n\n visualize(x, x_mask, xy, xy_mask, outdir, epoch, validsize, switch=\"mtot\")\n\n y = y_val.data.get()\n y_mask = y_mask_val.data.get()\n yx = yx.data.get()\n yx_mask = yx_mask.data.get()\n\n visualize(y, y_mask, yx, yx_mask, outdir, epoch, validsize, switch=\"ttom\")\n\n print(f\"epoch: {epoch}\")\n print(f\"dis loss: {sum_dis_loss / iterations} gen loss: {sum_gen_loss / iterations}\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"InstaGAN\")\n parser.add_argument(\"--e\", type=int, default=1000, help=\"the number of epochs\")\n parser.add_argument(\"--i\", type=int, default=2000, help=\"the interval of snapshot\")\n parser.add_argument(\"--b\", type=int, default=16, help=\"batch size\")\n parser.add_argument(\"--v\", type=int, default=4, help=\"valid size\")\n parser.add_argument(\"--ext\", type=str, default=\".png\", help=\"extension of training images\")\n parser.add_argument(\"--size\", type=int, default=128, help=\"the size of training images\")\n parser.add_argument(\"--outdir\", type=Path, default='outdir', help=\"output directory\")\n parser.add_argument(\"--modeldir\", type=Path, default='modeldir', help=\"model output directory\")\n parser.add_argument(\"--lrdis\", type=float, default=0.0001, help=\"discriminator alpha of Adam\")\n parser.add_argument(\"--lrgen\", type=float, default=0.0002, help=\"generator alpha of Adam\")\n parser.add_argument(\"--b1\", type=float, default=0.5, help=\"beta1 of Adam\")\n parser.add_argument(\"--b2\", type=float, default=0.999, help=\"beta2 of Adam\")\n parser.add_argument(\"--src_path\", type=Path, help=\"path which contains source images\")\n parser.add_argument(\"--tgt_path\", type=Path, help=\"path which contains target images\")\n\n args = parser.parse_args()\n\n outdir = args.outdir\n outdir.mkdir(exist_ok=True)\n\n modeldir = args.modeldir\n modeldir.mkdir(exist_ok=True)\n\n train(args.e, args.i, args.b, args.v, args.src_path, args.tgt_path, args.ext, args.size,\n args.outdir, args.modeldir, args.lrdis, args.lrgen, args.b1, args.b2)\n","sub_path":"InstaGAN/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"178785213","text":"from apps.auth.models import User\r\n\r\nimport json\r\n\r\nfrom django.http import HttpResponse\r\nfrom apps.magency.rowmapper import RowMapper\r\n\r\nfrom core.views import SecuredView, SmartView\r\nfrom core.utils.misc import convertDatetimeToString\r\n\r\n\r\nclass AjaxView(SmartView):\r\n\r\n def get(self, request, *args, **kwargs):\r\n ret = ''\r\n jsondata = ''\r\n if \"get\" in request.GET:\r\n what = request.GET.get('get')\r\n if what == \"users\":\r\n jsondata = self.get_users(request)\r\n elif what == \"newspaperads\":\r\n jsondata = self.get_news_ads(request)\r\n elif what == \"internetads\":\r\n jsondata = self.get_websitead_list(request)\r\n elif what == \"bills\":\r\n jsondata = self.get_bills_list(request)\r\n\r\n elif what == \"televisionads\":\r\n jsondata = self.get_tvad_list(request)\r\n elif what == \"billboardsads\":\r\n jsondata = self.get_billboard_list(request)\r\n\r\n jsonx = json.JSONEncoder(indent=4)\r\n ret = jsonx.encode(jsondata)\r\n\r\n return HttpResponse(ret, mimetype=\"application/json\")\r\n\r\n def post(self, request, *args, **kwargs):\r\n ret = ''\r\n if \"del\" in request.POST:\r\n what = request.POST.get('del')\r\n ret = what\r\n idx = request.POST.get('id')\r\n\r\n return HttpResponse(ret)\r\n\r\n def get_billboard_list(self, request):\r\n\r\n idx = 0\r\n if not request.user.is_staff:\r\n idx = request.user.id\r\n\r\n res = RowMapper.call_proc('billboardads(%s, :curr)' % idx,\r\n ['id', 'title', 'validity', 'bill', 'name', 'billboards'])\r\n jsr = []\r\n for a in res:\r\n if idx == 0:\r\n jsr.append([\r\n a.name,\r\n a.title,\r\n a.billboards,\r\n a.bill,\r\n convertDatetimeToString(a.validity)\r\n ])\r\n else:\r\n jsr.append([\r\n a.title,\r\n a.billboards,\r\n a.bill,\r\n convertDatetimeToString(a.validity)\r\n ])\r\n\r\n return {'aaData': jsr}\r\n\r\n def get_bills_list(self, request):\r\n\r\n idx = 0\r\n if not request.user.is_staff:\r\n idx = request.user.id\r\n\r\n res = RowMapper.call_proc('bills(%s, :curr)' % idx,\r\n ['id', 'title', 'name', 'ammount', 'paid_by', 'time'])\r\n\r\n jsr = []\r\n for a in res:\r\n\r\n if idx == 0:\r\n jsr.append([\r\n a.name,\r\n a.title,\r\n a.ammount,\r\n a.paid_by,\r\n convertDatetimeToString(a.time),\r\n ])\r\n else:\r\n jsr.append([\r\n a.title,\r\n a.ammount,\r\n a.paid_by,\r\n convertDatetimeToString(a.time),\r\n ])\r\n\r\n return {'aaData': jsr}\r\n\r\n\r\n def get_websitead_list(self, request):\r\n\r\n idx = 0\r\n if not request.user.is_staff:\r\n idx = request.user.id\r\n\r\n res = RowMapper.call_proc('internetads(%s, :curr)' % idx,\r\n ['id', 'title', 'validity', 'bill', 'name', 'websites'])\r\n jsr = []\r\n for a in res:\r\n if idx == 0:\r\n jsr.append([\r\n a.name,\r\n a.title,\r\n a.websites,\r\n a.bill,\r\n convertDatetimeToString(a.validity)\r\n ])\r\n else:\r\n jsr.append([\r\n a.title,\r\n a.websites,\r\n a.bill,\r\n convertDatetimeToString(a.validity)\r\n ])\r\n\r\n return {'aaData': jsr}\r\n\r\n def get_news_ads(self, request):\r\n idx = 0\r\n if not request.user.is_staff:\r\n idx = request.user.id\r\n\r\n res = RowMapper.call_proc('newspaperads(%s, :curr)' % idx,\r\n ['id', 'title', 'validity', 'bill', 'name', 'width', 'height', 'color', 'newspapers'])\r\n jsr = []\r\n for a in res:\r\n if idx == 0:\r\n jsr.append([\r\n a.name,\r\n a.title,\r\n a.width,\r\n a.height,\r\n a.color,\r\n a.newspapers,\r\n a.bill,\r\n convertDatetimeToString(a.validity)\r\n ])\r\n else:\r\n jsr.append([\r\n a.title,\r\n a.width,\r\n a.height,\r\n a.color,\r\n a.newspapers,\r\n a.bill,\r\n convertDatetimeToString(a.validity)\r\n ])\r\n\r\n return {'aaData': jsr}\r\n\r\n\r\n def get_tvad_list(self, request):\r\n\r\n idx = 0\r\n if not request.user.is_staff:\r\n idx = request.user.id\r\n\r\n res = RowMapper.call_proc('televisionads(%s, :curr)' % idx,\r\n ['id', 'title', 'validity', 'bill', 'name', 'duration', 'televisions'])\r\n jsr = []\r\n for a in res:\r\n if idx == 0:\r\n jsr.append([\r\n a.name,\r\n a.title,\r\n a.duration,\r\n a.televisions,\r\n a.bill,\r\n convertDatetimeToString(a.validity)\r\n ])\r\n else:\r\n jsr.append([\r\n a.title,\r\n a.duration,\r\n a.televisions,\r\n a.bill,\r\n convertDatetimeToString(a.validity)\r\n ])\r\n\r\n return {'aaData': jsr}\r\n\r\n def get_users(self, request):\r\n users = User.objects.filter(is_staff=False).order_by('-date_joined')\r\n jsr = []\r\n for a in users:\r\n\r\n jsr.append([\r\n a.name,\r\n a.phone,\r\n a.email,\r\n convertDatetimeToString(a.date_joined),\r\n convertDatetimeToString(a.last_login),\r\n ])\r\n\r\n return {'aaData': jsr}","sub_path":"apps/magency/views/ajax.py","file_name":"ajax.py","file_ext":"py","file_size_in_byte":6375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"334791665","text":"# 1보다 큰 자연수 중에서 1과 자기 자신을 제외한 약수가 없는 자연수를 소수라고 한다. 예를 들어, 5는 1과 5를 제외한 약수가 없기 때문에 소수이다. 하지만, 6은 6 = 2 × 3 이기 때문에 소수가 아니다.\n\n# 골드바흐의 추측은 유명한 정수론의 미해결 문제로, 2보다 큰 모든 짝수는 두 소수의 합으로 나타낼 수 있다는 것이다. 이러한 수를 골드바흐 수라고 한다. 또, 짝수를 두 소수의 합으로 나타내는 표현을 그 수의 골드바흐 파티션이라고 한다. 예를 들면, 4 = 2 + 2, 6 = 3 + 3, 8 = 3 + 5, 10 = 5 + 5, 12 = 5 + 7, 14 = 3 + 11, 14 = 7 + 7이다. 10000보다 작거나 같은 모든 짝수 n에 대한 골드바흐 파티션은 존재한다.\n\n# 2보다 큰 짝수 n이 주어졌을 때, n의 골드바흐 파티션을 출력하는 프로그램을 작성하시오. 만약 가능한 n의 골드바흐 파티션이 여러 가지인 경우에는 두 소수의 차이가 가장 작은 것을 출력한다.\n\nn = int(input())\n\ndef check_dividable(n):\n if n == 1:\n return True\n for i in range(2,n):\n if n%i == 0:\n return True\n return False\n\nfor i in range(n):\n a = int(input())\n if a%2 ==0 : b1, b2 = int(a/2),int(a/2)\n else: b1, b2, = int(a/2),int(a/2)+1\n while(1):\n if check_dividable(b1) == False and check_dividable(b2) == False:\n break\n b1 -= 1\n b2 += 1\n\n print(\"{} {}\".format(b1,b2))","sub_path":"Python/1주차_정렬,재귀/정글_1_9020.py","file_name":"정글_1_9020.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"537136815","text":"import sys\n\nsys.path.append('..')\nprint('sys path', sys.path)\n\n\n\nfrom pygame.math import Vector2\nfrom enum import IntEnum\nimport random\nfrom math import sqrt, pi\n\n\nfrom common.params import ObstacleAvoidanceParams, WallAvoidanceParams, WanderParams, BehaviorParams,FlockingParams\n \nfrom common.behavior import Behavior\nfrom common.geometry import line_intersection_get_distance_point\n\nimport common.transformations as Tx\nimport pygame as pg\n\nimport pygame.gfxdraw\nimport pgzrun\n\n##class Behavior(IntEnum):\n## NONE = 0,\n## SEEK = 2,\n## FLEE = 4,\n## ARRIVE = 8,\n## WANDER = 16,\n## COHESION = 32,\n## SEPARATION = 64,\n## ALIGNMENT = 128,\n## OBSTACLE_AVOIDANCE = 256,\n## WALL_AVOIDANCE = 512,\n## FOLLOW_PATH = 1024,\n## PURSUIT = 2048,\n## EVADE = 4096,\n## INTERPOSE = 8192,\n## HIDE = 16384,\n## FLOCK = 32768,\n## OFFSET_PURSUIT = 65536\n \nclass Decelaration(IntEnum):\n SLOW = 3.0,\n NORMAL = 2.0,\n FAST = 1.0\n\n##class Params:\n## jitter = 5.0\n## radius = 50.0\n## distance = 50.0\n## target = Vector2(0,-1)\n \nclass SteeringBehaviors:\n \"\"\"\n Class that encapsulates a host of steering behaviors that can be applied to\n an entity. Entitiies are of type Actor. See the Actor documentation for how to use\n \"\"\"\n def __init__(self, entity):\n \"\"\"\n Create a steering behaviors for entity. Entity is expected to be of\n type MovingEntity.\n \n \"\"\"\n self._entity = entity\n self._steering_force = Vector2()\n self._flags = Behavior.NONE\n\n## self.wander_params = WanderParams()\n self.obstacle_params = ObstacleAvoidanceParams()\n self.wall_params = WallAvoidanceParams()\n self.behavior_params = BehaviorParams()\n## self.view_distance = BehaviorParams.view_distance\n\n #create a random point on the wander circle\n self.wander_target = Tx.random_vector2() * WanderParams.radius\n \n self.feelers = [0,0,0]\n self.path = None\n \n\n def calculate(self):\n self._steering_force *= 0\n entity = self._entity\n \n # Tag vehicles within range\n # as a pre-step for calculating flocking\n if self.is_on(Behavior.ALIGNMENT) \\\n or self.is_on(Behavior.SEPARATION) \\\n or self.is_on(Behavior.COHESION):\n \n entity.world.tag_vehicles_in_view_range(entity, entity.world.agents, FlockingParams.view_distance)\n \n\n if self.is_on(Behavior.SEEK):\n self._steering_force += self.seek( self._entity.world.crosshair )\n\n if self.is_on(Behavior.ARRIVE): \n self._steering_force += self.arrive( self._entity.world.crosshair)\n\n if self.is_on(Behavior.FLEE): \n self._steering_force += self.flee( self._entity.world.crosshair)\n \n if self.is_on(Behavior.PURSUIT): \n self._steering_force += self.pursuit( self._entity.pursuit_target )\n \n if self.is_on(Behavior.EVADE): \n self._steering_force += self.evade( self._entity.evade_target )\n\n if self.is_on(Behavior.WANDER): \n self._steering_force += self.wander()\n\n if self.is_on(Behavior.OBSTACLE_AVOIDANCE):\n self._steering_force += self.obstacle_avoidance( self._entity.world.obstacles )\n \n if self.is_on(Behavior.WALL_AVOIDANCE):\n self._steering_force += self.wall_avoidance( self._entity.world.walls )\n\n if self.is_on(Behavior.INTERPOSE):\n self._steering_force += self.interpose( *self._entity.targets )\n\n if self.is_on(Behavior.HIDE):\n self._steering_force += self.hide( self._entity.hunter, self._entity.world.obstacles )\n\n if self.is_on(Behavior.FOLLOW_PATH):\n self._steering_force += self.follow_path( self.path )\n \n if self.is_on(Behavior.OFFSET_PURSUIT):\n self._steering_force += self.offset_pursuit( self._entity.leader,\n Vector2(BehaviorParams.offset_pursuit_offset))\n \n if self.is_on(Behavior.SEPARATION):\n self._steering_force += self.separation( self._entity.world.agents )\n\n if self.is_on(Behavior.ALIGNMENT):\n self._steering_force += self.alignment( self._entity.world.agents )\n \n \n \n return self._steering_force\n\n def on(self, behavior):\n self._flags |= behavior\n\n def off(self, behavior):\n if self.is_on(behavior):\n self.toggle_behavior(behavior)\n\n def all_off(self):\n self._flags = Behavior.NONE\n \n def is_on(self, behavior):\n return self._flags & behavior > 0\n \n def toggle_behavior(self, behavior):\n self._flags ^= behavior\n\n\n ## ##\n ## ##\n # Behavior methods #\n ## ##\n ## ##\n \n\n ## ---------------------------------------------------\n ## Seek\n ## \n ##\n ## \n def seek(self,target):\n desiredVelocity = target - self._entity.exact_pos\n \n if desiredVelocity.length() > 0.0001:\n desiredVelocity.normalize_ip()\n desiredVelocity *= self._entity.max_speed\n\n return desiredVelocity - self._entity.velocity\n\n\n ## ---------------------------------------------------\n ## Arrive\n ## \n ##\n ## \n def arrive(self,target, decelaration=Decelaration.NORMAL):\n toTarget = target - self._entity.exact_pos\n dist = toTarget.length()\n## print(dist * dist)\n if (dist * dist) > 1.0 :\n decelarationTweak = 0.3\n speed = dist * (decelaration * decelarationTweak )\n\n speed = min(speed, self._entity.max_speed)\n\n desiredVelocity = toTarget * ( speed / dist )\n return desiredVelocity - self._entity.velocity\n else:\n return Vector2()\n\n\n\n ## ---------------------------------------------------\n ## Flee\n ## \n ##\n ## \n def flee(self,target):\n desiredVelocity = self._entity.exact_pos - target\n \n if desiredVelocity.length() > 0.0001:\n desiredVelocity.normalize_ip()\n desiredVelocity *= self._entity.max_speed\n\n return desiredVelocity - self._entity.velocity\n \n\n ## ---------------------------------------------------\n ## Pursuit\n ## \n ##\n ## \n def pursuit(self, evader):\n # vector from us to the evader\n to_evader = evader.exact_pos - self._entity.exact_pos\n\n # how similar are our headings?\n relative_heading = self._entity.heading.dot(evader.heading)\n\n # are we pointed towards the evader?\n is_headed_towards = to_evader.dot(self._entity.heading) > 0\n\n # if we are headedTowards them and they are coming towards them\n # go to their position\n # acos(0.95)= 18 degrees\n if is_headed_towards and relative_heading < -0.95:\n return self.seek(evader.exact_pos)\n\n # if we are here then the evader is not ahead of us\n # we will try to anticipate where they are with a lookahead time\n # the lookahead time is proportional to the distance between the evader\n # and us; (the further we are from one another the further in the future we want\n # to look in order to predict where the evader is going to be\n # it is inversely proportional to the sum of our speed and the evader's speed:\n # the faster we are both moving the shorter the lookahead time (because we\n # will cover more distance in a shorter time since we are going faster.)\n\n look_ahead_time = to_evader.length() / ( self._entity.max_speed + evader.speed )\n return self.seek(evader.exact_pos + evader.velocity * look_ahead_time)\n\n ## ---------------------------------------------------\n ## Evade\n ## \n ##\n ## \n def evade(self, pursuer):\n entity = self._entity\n to_pursuer = pursuer.exact_pos - entity.exact_pos\n\n #Threat Range; uncomment to have agent only pursue entities within range\n threat_range = BehaviorParams.threat_scan_distance or 10000\n \n if to_pursuer.length_squared() > (threat_range * threat_range):\n return Vector2()\n\n look_ahead_time = to_pursuer.length() / ( entity.max_speed + pursuer.speed )\n return self.flee( pursuer.pos + ( pursuer.velocity * look_ahead_time ) )\n\n\n\n ## ---------------------------------------------------\n ## Wander\n ## \n ##\n ## \n def wander(self):\n entity = self._entity\n\n params = WanderParams #self.wander_params\n\n ## from M.Buckland:\n ## this behavior is dependent on the update rate, so this line must\n ## be included when using time independent framerate. \n## jitter_this_time_slice = params.jitter * entity.time_elapsed\n\n## jitter_this_time_slice = params.jitter\n \n## randomVec = Vector2( (random.random()*2.0-1.0) * WanderParams.jitter * entity.time_elapsed, \\\n## (random.random()*2.0-1.0) * WanderParams.jitter * entity.time_elapsed )\n\n self.wander_target.x += (random.random()*2.0-1.0) * WanderParams.jitter #* entity.time_elapsed\n self.wander_target.y += (random.random()*2.0-1.0) * WanderParams.jitter #* entity.time_elapsed \n\n## params.target += randomVec\n\n # project the target onto a point on the unit circle\n## params.target.normalize_ip()\n self.wander_target.normalize_ip()\n\n # increase the length of the vector to the same radius\n # of the wander circle\n## params.target *= params.radius\n self.wander_target *= WanderParams.radius\n\n \n # move the wander circle in front of us\n## target = params.target + Vector2(params.distance,0)\n target_for_this_iteration = self.wander_target + Vector2(WanderParams.distance,0) \n\n # tramsform the target into world coordinates\n target_for_this_iteration.rotate_ip(entity.angle)\n\n\n return target_for_this_iteration\n\n\n ## ---------------------------------------------------\n ## Obstacle Avoidance\n ## \n ##\n ## \n def obstacle_avoidance(self, obstacles):\n params = self.obstacle_params\n entity = self._entity\n steering_force = Vector2()\n \n self.d_box_length = params.min_detection_box_length\n # the scale of the box length is dependent on our max_speed ratio\n speed_ratio = entity.speed / entity.max_speed\n self.d_box_length = self.d_box_length + speed_ratio * self.d_box_length\n\n #tag obstacles that are within range of our detection box\n entity.world.tag_obstacles_in_view_range( entity, obstacles, self.d_box_length )\n\n## closest_intersecting_obstacle\n closest_obstacle = None\n \n # track the distance to the cib\n dist_to_closest_intersection_point = 1000000\n\n local_pos_of_closest_obstacle = Vector2()\n\n for o in entity.world.obstacles:\n if o.tagged:\n## point, agent_heading, agent_side, agent_position): \n local_pos = Tx.point_to_local_space(o.exact_pos,\n entity.heading,\n entity.side,\n entity.exact_pos)\n\n # if local_pos is less than 0 it is behind us so we can ignore it\n if local_pos.x >= 0:\n # if the distance from the x axis to the object's position is\n # less than its radius + half the width of the detection box then\n # there is a potential intersection\n expanded_radius = o.bounding_radius + entity.bounding_radius\n\n if abs(local_pos.y) < expanded_radius:\n # now do a line/circle intersection test. The center of the circle\n # is (cx,cy) The intersection points are\n # given by the formula x = cx +/- sqrt(r^2 - cy^2) for y = 0.\n # We only need to look at the smallest positive value of x because\n # that will be the closest point of intersection\n cx = local_pos.x\n cy = local_pos.y \n sqrt_part = sqrt(expanded_radius * expanded_radius - cy * cy)\n\n ip = cx - sqrt_part\n\n if ip <= 0.0:\n ip = cx + sqrt_part\n\n # if this intersection point is smaller thatn our current closest\n # then update our tracking variables with the information for this\n # obstacle\n if ip < dist_to_closest_intersection_point:\n dist_to_closest_intersection_point = ip\n closest_obstacle = o\n local_pos_of_closest_obstacle = local_pos\n\n\n # if we have an object that is closest (i.e. within range)\n if closest_obstacle != None:\n # the closer the actor is to the object, the stronger the\n # steering force should be\n multiplier = 1.0 + ( self.d_box_length - local_pos_of_closest_obstacle.x) / self.d_box_length\n\n #calculate the lateral force\n steering_force.y = multiplier * \\\n (closest_obstacle.bounding_radius - local_pos_of_closest_obstacle.y)\n\n #apply a braking force proportional to the obstacle's distance from the vehicle\n braking_weight = 0.2\n\n steering_force.x = braking_weight * \\\n (closest_obstacle.bounding_radius - local_pos_of_closest_obstacle.x)\n\n #convert the steering force from local to world space\n return Tx.vector_to_world_space(steering_force, entity.heading, entity.side)\n\n ## ---------------------------------------------------\n ## Wall Avoidance\n ## \n ##\n ## \n def wall_avoidance(self, walls):\n self.create_feelers()\n entity = self._entity\n ## variables for keeping track of the information as we\n ## iterate through the list of walls\n # dist to intersection point\n dist_ip = 0\n dist_to_closest_ip = 1000000\n\n closest_wall = None\n\n steering_force = Vector2()\n point = Vector2()\n closest_point = Vector2()\n\n\n # for each feeler\n # find the closest wall\n## intersecting_walls = [ wall for feeler, wall in feelers,walls if line_intersection()]\n\n for feeler in self.feelers:\n for wall in walls:\n intersecting,_dt,_pt = line_intersection_get_distance_point(entity.exact_pos,\n feeler,\n wall.phrom,\n wall.to)\n\n if intersecting:\n if _dt < dist_to_closest_ip:\n dist_to_closest_ip = _dt\n closest_wall = wall\n closest_point = _pt\n # if we found a wall then calculate a steering force based on how far\n # the feeler cut into the wall\n if closest_wall != None:\n over_shoot = feeler - closest_point\n steering_force = wall.normal * over_shoot.length() * self.wall_params.repel_multiplier\n \n return steering_force\n\n ## ---------------------------------------------------\n ## Interpose\n ## \n ##\n ## \n def interpose(self, *agents):\n assert len(agents) >= 2, 'need to pass in at least 2 agents, suckah' + str(len(agents))\n target1,target2 = agents[:2]\n entity = self._entity\n \n # first we need to figure out where the two agents are going to be at time T in the future.\n # This is approximated by determining the time taken to reach the mid way point at the\n # current time at max speed\n\n mid_point = (target1.exact_pos + target2.exact_pos ) * 0.5\n time_to_reach_mid_point = (entity.exact_pos.distance_to( mid_point) ) / entity.max_speed\n\n future1 = target1.exact_pos + target1.velocity * time_to_reach_mid_point\n future2 = target2.exact_pos + target2.velocity * time_to_reach_mid_point\n\n mid_point = (future1 + future2) * 0.5\n\n return self.arrive(mid_point,Decelaration.FAST)\n\n\n ## ---------------------------------------------------\n ## Hide\n ## \n ##\n ## \n def hide(self, hunter, obstacles):\n entity = self._entity\n world = self._entity.world\n\n # record keeping variables\n dist_to_closest = sys.float_info.max\n best_hiding_spot = None\n closest_obstacle = None\n\n for o in world.obstacles:\n hiding_spot = self.get_hiding_position( o.exact_pos, o.bounding_radius, hunter.exact_pos )\n dist = entity.exact_pos.distance_to( hiding_spot )\n if dist < dist_to_closest:\n best_hiding_spot = hiding_spot\n closest_obstacle = o\n dist_to_closest = dist\n\n if dist_to_closest == sys.float_info.max:\n return self.evade(hunter)\n\n return self.arrive(best_hiding_spot,Decelaration.FAST)\n\n\n def get_hiding_position(self, blocking_obj_pos, blocking_obj_bounding_radius, baddie_pos, spacing_from_obj = 30): \n \"\"\"\n Given the position of a hunter, and position and bounding radius of an obstacle, this method\n finds the position that puts the boundary between its location and the hunter\n \"\"\"\n # calculate the spacing \n dist_away = blocking_obj_bounding_radius + spacing_from_obj\n\n # calculate the heading from the object to the hunter\n to = (blocking_obj_pos - baddie_pos).normalize()\n \n # scale the distance\n return (blocking_obj_pos) + to * dist_away\n\n\n\n ## ---------------------------------------------------\n ## Follow Path\n ## \n ##\n ## \n def follow_path(self, path):\n entity = self._entity\n if entity.exact_pos.distance_to( self.path.current_way_point ) < \\\n BehaviorParams.path_follow_scan_distance:\n\n self.path.next()\n\n if self.path.at_end():\n return self.seek(self.path.current_way_point)\n else:\n return self.arrive(self.path.current_way_point, Decelaration.NORMAL)\n\n\n ## ---------------------------------------------------\n ## Offset Pursuit\n ## \n ##\n ## \n def offset_pursuit(self, leader, offset):\n entity = self._entity\n world_offset_pos = Tx.point_to_world_space(offset,\n leader.exact_pos,\n leader.heading,\n leader.side)\n\n to_offset = world_offset_pos - entity.exact_pos\n\n # the lookahead time is proportional to the distance between the leader\n # and the pursuer; and is inversely proportional to the sum of both agent's\n # velocities\n lookahead_time = to_offset.length() / ( entity.max_speed + leader.speed )\n\n # now arrive at the predicted future position of the offset\n \n return self.arrive( world_offset_pos + leader.velocity * lookahead_time, Decelaration.FAST)\n\n\n ## ---------------------------------------------------\n ## Separation\n ## \n ##\n ## \n def separation(self, neighbors):\n steering_force = Vector2()\n\n def is_evade_target(neighbor):\n if self._entity.evade_target == None:\n return False\n else:\n return neighbor.id == self._entity.evade_target.id\n\n #filter out neighbors that are us, or not tagged, or someone that we should be evading\n for n in neighbors:\n if n.tagged \\\n and n.id != self._entity.id \\\n and not is_evade_target(n):\n\n to = self._entity.exact_pos - n.exact_pos\n # make the force inversely proportional to the distance squared\n # if the distance is small then distance * distance will be even smaller\n # divide a constant by this distance squared factor number to get the force\n steering_force += to * ( FlockingParams.separation_multiplier / to.length_squared() ) #to.normalize() / to.length() \n\n\n return steering_force\n\n\n ## ---------------------------------------------------\n ## Aignment\n ## \n ##\n ## \n def alignment(self, neighbors):\n avg_heading = Vector2()\n\n neighbor_count = 0\n\n\n def is_evade_target(neighbor):\n if self._entity.evade_target == None:\n return False\n else:\n return neighbor.id == self._entity.evade_target.id \n\n #filter out neighbors that are us, or not tagged, or someone that we should be evading\n for n in neighbors:\n if n.tagged \\\n and n.id != self._entity.id \\\n and not is_evade_target(n):\n\n avg_heading += n.heading\n neighbor_count += 1\n \n if neighbor_count > 0:\n## print(\"entity{0} total_heading (before averaging) = {1}\".format(self._entity.id,avg_heading)) \n avg_heading /= neighbor_count\n## print(\" avg_alignment_heading = {0}\".format(avg_heading)) \n avg_heading -= self._entity.heading\n## print(\" alignment adjusted for ME = {0}\".format(avg_heading))\n\n avg_heading *= FlockingParams.alignment_multiplier\n\n \n return avg_heading\n \n\n\n ## ---------------------------------------------------\n ## Cohesion\n ## \n ##\n ## \n def cohesion(self, neighbors):\n pass \n \n \n ## ---------------------------------------------------\n ## Create Feelers\n ## \n ## \n def create_feelers(self):\n entity = self._entity\n fl = self.wall_params.detection_feeler_length\n \n self.feelers[0] = entity.exact_pos + fl * entity.heading\n \n temp = entity.heading.rotate( -50) #3.5 * 0.5 * pi)\n self.feelers[1] = entity.exact_pos + fl * 0.95 * temp\n \n temp = entity.heading.rotate( 50 ) #0.5 * pi)\n self.feelers[2] = entity.exact_pos + fl * 0.95 * temp\n \n \n\n \n\n def render_aids(self,surface):\n entity = self._entity\n world = entity.world\n\n # Render Steering Force\n if world.show_steering_force:\n pass\n\n # Render Wander if relevant \n if world.show_wander_circle \\\n and self.is_on(Behavior.WANDER):\n params = WanderParams #self.wander_params\n\n #get the wander circle center in world coordinates\n wander_center = Vector2(params.distance,0)\n wander_center.rotate_ip(self._entity.angle)\n wander_center += self._entity.exact_pos\n \n # draw the wander cirle\n pygame.gfxdraw.aacircle(surface, \\\n int(wander_center.x),\n int(wander_center.y),\n int(WanderParams.radius),\n (255,0,0))\n \n\n #draw the wander target\n wander_target = self.wander_target.rotate(self._entity.angle) + wander_center #WanderParams.target.rotate(self._entity.angle)\n #wander_target += wander_center #self._entity.exact_pos\n pygame.gfxdraw.filled_circle(surface,\n int(wander_target.x),\n int(wander_target.y),\n 5,\n (255,255,0))\n\n # Render Detection Box if relevant\n\n #only for main vehicle\n if entity.is_behavior_on(Behavior.OBSTACLE_AVOIDANCE) and world.show_detection_box: # and self.id == 0:\n minLen = ObstacleAvoidanceParams.min_detection_box_length\n # the length is a function of how fast we are going\n # the closer we get to maximum speed the longer the length\n\n # calculate the verts for the detection box in local space coordinates, relative\n # to the vehicle of interest\n if entity.max_speed > 0:\n speed_ratio = entity.speed / entity.max_speed\n else:\n speed_ratio = 1.0\n \n length = minLen + (minLen * speed_ratio )\n tl = Vector2( 0, -entity.bounding_radius )\n tr = tl + Vector2(length, 0)\n br = tr + Vector2(0, 2 * entity.bounding_radius)\n bl = br - Vector2(length, 1)\n close = bl + Vector2(0,-2 * entity.bounding_radius)\n\n #get rectangle in world space coordinates for rendering\n\n \n rect_verts_world_space = Tx.point_to_world_space([tl,tr,br,bl],\n entity.exact_pos,\n entity.heading,\n entity.side\n )\n \n pygame.gfxdraw.aapolygon( surface, rect_verts_world_space, (0,200,0))\n\n\n # wall_avoidance aids\n if entity.is_behavior_on(Behavior.WALL_AVOIDANCE):\n if world.show_feelers:\n for feeler in self.feelers:\n pygame.gfxdraw.line( surface,\n int(entity.exact_pos.x),\n int(entity.exact_pos.y),\n int(feeler.x),\n int(feeler.y),\n pg.Color(\"orange\"))\n \n if world.show_walls:\n for wall in world.walls:\n if world.show_wall_normals:\n wall.draw_with_normals(surface)\n else:\n wall.draw(surface)\n\n if entity.is_behavior_on(Behavior.FOLLOW_PATH):\n if world.show_path:\n self.path.draw(surface)\n\n\n\n \n","sub_path":"steering_behaviors/common/steering.py","file_name":"steering.py","file_ext":"py","file_size_in_byte":27181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"277155100","text":"#!/usr/bin/env python3\n\"\"\"\nCalculates the intersection of obtaining this data\n\"\"\"\nfrom math import factorial\n\n\ndef likelihood(x, n, P):\n \"\"\"\n Returns: a 1D numpy.ndarray containing the likelihood\n of obtaining the data, x and n, for each probability in P\n \"\"\"\n res = []\n if P.ndim != 1:\n raise TypeError(\"P must be a 1D numpy.ndarray\")\n if 0 < P.any() > 1:\n raise ValueError(\"All values in P must be in the range [0, 1]\")\n for theta in P:\n res.append((factorial(n) / (factorial(x)\n * factorial(n - x))) * (theta ** x)\n * ((1 - theta) ** (n - x)))\n return res\n\n\ndef intersection(x, n, P, Pr):\n \"\"\"\n Returns: a 1D numpy.ndarray containing the intersection\n \"\"\"\n res = likelihood(x, n, P) * Pr\n return res\n\n\ndef marginal(x, n, P, Pr):\n \"\"\"\n Returns: the marginal probability of obtaining x and n\n \"\"\"\n inter = intersection(x, n, P, Pr)\n return sum(inter)\n","sub_path":"math/0x07-bayesian_prob/2-marginal.py","file_name":"2-marginal.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"582293507","text":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Structure of dbpn.\"\"\"\n\nimport mindspore\nfrom mindspore import nn\n\nfrom src.model.base_network import ConvBlock, UpBlock, DownBlock, DDownBlock, \\\n DUpBlock\n\n\nclass Net(nn.Cell):\n \"\"\"Structure of dbpn network\"\"\"\n\n def __init__(self, num_channels, base_filter, feat, num_stages, scale_factor):\n super(Net, self).__init__()\n kernel = 8\n stride = 4\n padding = 2\n if scale_factor == 2:\n kernel = 6\n stride = 2\n padding = 2\n elif scale_factor == 4:\n kernel = 8\n stride = 4\n padding = 2\n elif scale_factor == 8:\n kernel = 12\n stride = 8\n padding = 2\n # Initial Feature Extraction\n self.feat0 = ConvBlock(num_channels, feat, 3, 1, 1, activation='prelu', norm=None)\n self.feat1 = ConvBlock(feat, base_filter, 1, 1, 0, activation='prelu', norm=None)\n\n # Back-projection stages\n self.up1 = UpBlock(base_filter, kernel, stride, padding)\n self.down1 = DownBlock(base_filter, kernel, stride, padding)\n self.up2 = UpBlock(base_filter, kernel, stride, padding)\n self.down2 = DDownBlock(base_filter, kernel, stride, padding, 2)\n self.up3 = DUpBlock(base_filter, kernel, stride, padding, 2)\n self.down3 = DDownBlock(base_filter, kernel, stride, padding, 3)\n self.up4 = DUpBlock(base_filter, kernel, stride, padding, 3)\n self.down4 = DDownBlock(base_filter, kernel, stride, padding, 4)\n self.up5 = DUpBlock(base_filter, kernel, stride, padding, 4)\n self.down5 = DDownBlock(base_filter, kernel, stride, padding, 5)\n self.up6 = DUpBlock(base_filter, kernel, stride, padding, 5)\n self.down6 = DDownBlock(base_filter, kernel, stride, padding, 6)\n self.up7 = DUpBlock(base_filter, kernel, stride, padding, 6)\n\n # Reconstruction\n self.output_conv = ConvBlock(num_stages * base_filter, num_channels, 3, 1, 1, activation=None, norm=None)\n\n def construct(self, x):\n \"\"\"ddbpn compute graph\n Args:\n x(Tensor): low resolution image\n Outputs:\n Tensor\n \"\"\"\n x = self.feat0(x)\n x = self.feat1(x)\n\n h1 = self.up1(x)\n l1 = self.down1(h1)\n h2 = self.up2(l1)\n\n op = mindspore.ops.Concat(1)\n\n concat_h = op((h2, h1))\n l = self.down2(concat_h)\n\n concat_l = op((l, l1))\n h = self.up3(concat_l)\n\n concat_h = op((h, concat_h))\n l = self.down3(concat_h)\n\n concat_l = op((l, concat_l))\n h = self.up4(concat_l)\n\n concat_h = op((h, concat_h))\n l = self.down4(concat_h)\n\n concat_l = op((l, concat_l))\n h = self.up5(concat_l)\n\n concat_h = op((h, concat_h))\n l = self.down5(concat_h)\n\n concat_l = op((l, concat_l))\n h = self.up6(concat_l)\n\n concat_h = op((h, concat_h))\n l = self.down6(concat_h)\n\n concat_l = op((l, concat_l))\n h = self.up7(concat_l)\n\n concat_h = op((h, concat_h))\n x = self.output_conv(concat_h)\n\n return x\n","sub_path":"research/cv/DBPN/src/model/ddbpn.py","file_name":"ddbpn.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"504308523","text":"# Given a circular array C of integers represented by A, find the maximum possible sum of a non-empty subarray of C.\n\n# Here, a circular array means the end of the array connects to the beginning of the array.\n# (Formally, C[i] = A[i] when 0 <= i < A.length, and C[i+A.length] = C[i] when i >= 0.)\n\n# Also, a subarray may only include each element of the fixed buffer A at most once.\n# (Formally, for a subarray C[i], C[i+1], ..., C[j], there does not exist i <= k1, k2 <= j with k1 % A.length = k2 % A.length.)\n\n# Example 1:\n# Input: [1,-2,3,-2]\n# Output: 3\n# Explanation: Subarray [3] has maximum sum 3\n\n# Example 2:\n# Input: [5,-3,5]\n# Output: 10\n# Explanation: Subarray [5,5] has maximum sum 5 + 5 = 10\n\n# Example 3:\n# Input: [3,-1,2,-1]\n# Output: 4\n# Explanation: Subarray [2,-1,3] has maximum sum 2 + (-1) + 3 = 4\n\n# Example 4:\n# Input: [3,-2,2,-3]\n# Output: 3\n# Explanation: Subarray [3] and [3,-2,2] both have maximum sum 3\n\n# Example 5:\n# Input: [-2,-3,-1]\n# Output: -1\n# Explanation: Subarray [-1] has maximum sum -1\n\ndef maxSubarraySumCircular(A):\n maxSubArraySum = maxSubArray(A)\n totalSum = 0\n for i in range(len(A)):\n totalSum += A[i]\n A[i] = -A[i]\n totalSum = totalSum + maxSubArray(A)\n if totalSum>maxSubArraySum and totalSum!=0:\n return totalSum\n else:\n return maxSubArraySum\n\ndef maxSubArray(nums):\n maxSubArraySum = nums[0]\n sumAtIndex = nums[0]\n for num in nums[1:]:\n sumAtIndex = max(num, sumAtIndex+num)\n maxSubArraySum = max(maxSubArraySum, sumAtIndex)\n return maxSubArraySum\n\n# Alternate Solution:\n# def maxSubarraySumCircular(A):\n# maxElement = max(A)\n# if maxElement<0:\n# return maxElement\n# maxSubArraySum1 = maxSubArray(A)\n# maxSubArraySum2 = sum(A) + maxSubArray([-num for num in A])\n# return max(maxSubArraySum1, maxSubArraySum2)\n\n\n# def maxSubArray(nums):\n# maxSubArraySum = nums[0]\n# sumAtIndex = nums[0]\n# for num in nums[1:]:\n# sumAtIndex = max(num, sumAtIndex+num)\n# maxSubArraySum = max(maxSubArraySum, sumAtIndex)\n# return maxSubArraySum\n\n# Alternate Solution\n# def maxSubarraySumCircular(A):\n# curMax = maxSum = curMin = minSum = total = A[0]\n# for num in A[1:]:\n# curMax = max(curMax + num, num)\n# maxSum = max(maxSum, curMax)\n# curMin = min(curMin + num, num)\n# minSum = min(curMin, minSum)\n# total += num\n# return max(maxSum, total - minSum) if maxSum > 0 else maxSum\n","sub_path":"may2020/solutions/day15_MaximumSumCircularSubarray.py","file_name":"day15_MaximumSumCircularSubarray.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"546944066","text":"from discord.ext import commands\nfrom discord import Embed\nimport sqlite3\n\n\nclass WarStrats(commands.Cog):\n \"\"\"\n A cog where war strategies can be viewed from and add to the database.\n \"\"\"\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name=\"strats\")\n async def war_strats(self, ctx, townhall, *, attack_name=None):\n \"\"\"\n This command is used to view a war strategy by specifying the townhall and attack name\n or all attack strategies can be viewed by just passing the townhall level.\n \"\"\"\n if int(townhall) > 12:\n return await ctx.send(\"Invalid townhall level!\")\n embed = Embed()\n embed.description = \"\"\n embed.colour = 0x01d277\n conn = sqlite3.connect(\"goldengators.db\")\n c = conn.cursor()\n if attack_name is None:\n c.execute(\"SELECT * FROM strategies WHERE townhall=\"+townhall+\"\")\n all_strats = c.fetchall()\n if len(all_strats) == 0:\n embed.title = f'There are no attack strategies avilable for {townhall} :'\n embed.description += 'None'\n else:\n embed.title = f'Avilable strategies for TownHall {townhall} are :'\n for strat in all_strats:\n embed.description += strat[1].capitalize() + '\\n'\n conn.close()\n\n else:\n c.execute(\"SELECT * FROM strategies WHERE townhall=\"+townhall+\" and name='\"+attack_name.lower()+\"'\")\n strat = c.fetchone()\n if strat is None:\n return await ctx.send(f\"No such strategy called {attack_name}\")\n embed.title = f'{attack_name} for TownHall {townhall} :'\n embed.description = f\"{strat[2]} \\n Reference : {strat[3]}\"\n\n await ctx.send(embed=embed)\n\n @commands.command(name=\"addstrat\")\n async def add_strategy_to_db(self, ctx, *, info):\n \"\"\"\n Add an attack strategy to the database.\n syntax : bot.addstrat ---\n \"\"\"\n townhall, name, description, reference = info.split('-')\n if int(townhall) > 12:\n return await ctx.send(\"Invalid townhall level!\")\n conn = sqlite3.connect(\"goldengators.db\")\n c = conn.cursor()\n c.execute(\"SELECT * FROM strategies WHERE townhall=\" + townhall + \" and name='\" + name.lower() + \"'\")\n strat = c.fetchone()\n if strat is None:\n c.execute(\"\"\"INSERT INTO strategies VALUES (\n \"\"\"+townhall+\"\"\",\n '\"\"\"+name.lower()+\"\"\"',\n '\"\"\"+description+\"\"\"',\n '\"\"\"+reference+\"\"\"'\n )\"\"\")\n conn.commit()\n conn.close()\n await ctx.send(f'Attack strategy {name} for townhall {townhall} has been added !')\n else:\n await ctx.send(f\"Attack strategy already registered.\")\n\n @commands.command(name=\"removestrat\")\n @commands.has_any_role(255403079669645312, 255409211385708546, 255419757895876608)\n async def remove_strat_from_db(self, ctx, *, info):\n \"\"\"\n Removes a strat from the database.\n argument : Info : -\n \"\"\"\n townhall, name = info.split('-')\n conn = sqlite3.connect(\"goldengators.db\")\n c = conn.cursor()\n c.execute(\"DELETE FROM strategies WHERE townhall=\" + townhall + \" and name='\" + name.lower() + \"'\")\n conn.commit()\n conn.close()\n await ctx.send(f\"Attack stratgy {name} has been removed for townhall {townhall} from the database.\")\n\n @war_strats.error\n async def war_strats_error(self, ctx, error):\n if isinstance(error, commands.CommandInvokeError):\n return await ctx.send(\"```Please specify townhall level also and in the correct order!```\")\n else:\n await ctx.send(str(error))\n\n async def on_command_error(error, ctx):\n await ctx.send(str(error))\n\n\ndef setup(bot):\n bot.add_cog(WarStrats(bot))\n","sub_path":"bot/cogs/war_strats.py","file_name":"war_strats.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"29099672","text":"import os\nimport random\n\nif __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"lyh.settings\")\n if __name__ == '__main__':\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"NBcrm.settings\")\n import django\n\n django.setup()\n from app import models\n\n l1 = []\n for i in range(1, 101):\n obj = models.Customer(\n qq=''.join([str(i) for i in random.choices(range(1, 10), k=11)]),\n name='lihua' + str(i),\n sex=random.choice(['male', 'female']),\n source=random.choice(['qq', 'referral', 'website']),\n course=random.choice(['LinuxL', 'PythonFullStack']),\n\n )\n l1.append(obj)\n models.Customer.objects.bulk_create(l1)\n","sub_path":"lyh/leading.py","file_name":"leading.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"584270387","text":"''' 1.Every node is immediatly aware of its successor and the successor's successor\n 2.When entering the network, a newly joined node need to specify those two nodes above\n 3.Predecessors are awared via ping requests from them\n 4.Global variables for peers: sucnode_1, sucnode_2, prenode_1,\n 5.One UDP listener for pinging sending and receiving\n 6.Short cuts var for storing the short cut\n 7.One TCP listener for file transmission and request and storing\n 8.Command Line params: sucnode_1 prenode (with UDP port number)\n'''\nimport sys\nimport threading\nimport socket\nimport time\nimport select\nimport os\nimport datetime\n\nUDP_PORT_BASE = 30000;#port base for udp transmission\nTCP_PORT_BASE = 50000;#port base for tcp transmission\nFILE_PORT_BASE = 60000;#port base for file transmission\nHAVE_SUCNODE2 = False;#currently got the sucnode_2\nPRENODE_INFORMED = False #inform the prenode to change the sucnode_1\nSTATUS_PING_TIMEOUT = 5.0; #timeout that a ping to test alive\nBUFFER = 1024;\nSHOW_TRIVAL_MSG = False;#show the ping message or not\nSHORTCUT_AVA = False;#the shortcut node is alive or not\nSUCNODE1_AVA = False;#the sucnode_1 is alive or not\nSTATUS_PING_INTERVAL = 3.0;\nNODE_TIMEOUT_INTERVAL = 5.0;#the maximum time a node have to respond to a ping req\nMAX_TCP_CONN = 5; #Max TCP connection that could be handled at the same time\nMAX_ID = 255;#Max number of peer\nFILE_ALLOCATED_TO_SELF = 5\nFILE_NOT_ALLOCATED_TO_SELF = 8\nTRAN = 1 #Mode for file transmission, give the file to the destination\nDOWNLOAD = 0 #Mode for file download get the file from destination\nSHORTCUT_NUMBER = 0\nSEND_SCT_ACK = False\nLOCALHOST = socket.gethostbyname(socket.gethostname())\n\n\ndef initialization():\n if len(sys.argv) - 1 < 2:\n print(\"usage: [self_identifier] [successor node(IP:port#)] [predecessor node(IP:port#)]\")\n #print(\"This application is meant to implement a \")\n exit(1)\n\n global sucnode_1, sucnode_2,prenode,shortcutnode,self_identifier,pre_id,suc_id,SUCNODE1_AVA\n try:\n #Get self_identifier by directly have the port number decrease by UDP port base\n #If the sucnode is not specified just add the prenode's ID to 10 and make it own\n sucnode_1 = None\n sucnode_2 = None\n prenode = None\n if sys.argv[2] == \"null\" and sys.argv[1] == \"null\":\n SUCNODE1_AVA = False\n self_identifier = 1\n elif sys.argv[1] == \"null\":\n SUCNODE1_AVA = False\n prenode = ((sys.argv[2].split(\":\")[0]), int((sys.argv[2].split(\":\")[1])))\n pre_id = prenode[1] - UDP_PORT_BASE\n self_identifier = pre_id + 120\n\n #If it is the first node than we assign 1 to selfID\n\n #If we have both suc and pre node ,than just calculate the avg of their ID as own ID\n else:\n sucnode_1 = ((sys.argv[1].split(\":\")[0]), int((sys.argv[1].split(\":\")[1])))\n prenode = ((sys.argv[2].split(\":\")[0]), int((sys.argv[2].split(\":\")[1])))\n pre_id = prenode[1] - UDP_PORT_BASE\n suc_id = sucnode_1[1] - UDP_PORT_BASE\n self_identifier = (pre_id + suc_id) / 2\n\n except Exception as e:\n print(\"Input strings seems to be unable to parse\\n\" + e.message)\n exit(1)\n main_procedure()\n\n\ndef UrgentContact():\n global sucnode_1,HAVE_SUCNODE2,SUCNODE1_AVA,last_suc_reply,suc_id\n if not HAVE_SUCNODE2:\n print(\"Unforunately We Don't have the successor node 2\")\n return\n urgent_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n urgent_socket.settimeout(STATUS_PING_TIMEOUT)\n urgent_socket.sendto(\"SEQ\", sucnode_2)\n u_data, u_addr = urgent_socket.recvfrom(BUFFER)\n if u_data == \"ACK\" and u_addr == sucnode_2:\n sucnode_1 = sucnode_2\n suc_id = sucnode_1[1] - UDP_PORT_BASE\n HAVE_SUCNODE2 = False\n last_suc_reply = time.time()\n SUCNODE1_AVA = True\n print(\"Fortunately Successor node 2 is Online! Setting successor node 1 to him!\")\n else:\n SUCNODE1_AVA = False\n HAVE_SUCNODE2 = False\n\ndef printbycom(str,show_trival_msg):\n if(show_trival_msg):\n print(str)\n else:\n return\n\ndef myhash(target):\n SUM = 0\n for i in target:\n SUM += ord(i)\n return (SUM * 5) % MAX_ID\n\ndef Contact_and_Transfer(src_ip,src_port,mode,filename):\n sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n sock.connect((src_ip,src_port))\n # if self had been requested to transfer a file allocated to self, then we\n # firstly scan the directory the whether we have that file, than if we have it,\n # Transmit the command with the beginning of \"FILE_READY\" otherwise we say \"FILE_NOT_AVA\"\n if mode == TRAN:\n fpath = \"localdata/\" + filename\n if os.path.isfile(fpath):\n # Send Ready and transmit\n sock.send(\"FILE_READY\")\n printbycom(\"We have the file they want! sending back confirmation\",SHOW_TRIVAL_MSG)\n response = sock.recv(BUFFER)\n if response == \"RECV_READY\":\n fo = open(fpath, 'rb')\n assert fo\n while True:\n filedata = fo.read(1024)\n if not filedata:\n break\n sock.send(filedata)\n fo.close()\n sock.close()\n printbycom(\"Transmision completed!\",SHOW_TRIVAL_MSG)\n else:\n printbycom(\"We do not have the file here in local data maybe there is an error\",SHOW_TRIVAL_MSG)\n sock.send(\"FILE_NOT_AVA\")\n sock.close()\n\n # if we are supposed to store the file ,just simply tell the source we are ready and receive\n # the file\n else:\n printbycom(\"We are supposed to store the file,waiting on receiveing......\",SHOW_TRIVAL_MSG)\n sock.send(\"RECV_READY\")\n fpath = \"localdata/\" + filename\n fo = open(fpath,'wb')\n while True:\n recvdata = sock.recv(BUFFER)\n if not recvdata:\n break\n fo.write(recvdata)\n printbycom(\"Successfully received file from\" + bytes(src_ip) + \":\" + bytes(src_port),SHOW_TRIVAL_MSG)\n sock.close()\n\n\n\ndef Send_TCP_msg(msg,ip,port):\n sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n sock.connect((ip,int(port)))\n sock.send(msg)\n sock.close()\ndef Send_UDP_msg(msg,ip,port):\n sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n sock.sendto(msg,(ip,port))\n sock.close()\n\n\ndef Status_monitor():\n '''\n // 1.Ping the sucnode_1 only, if it is surely dead, ping sucnode_2, then if sucnode_2 is dead too,\n ask the user to specify a new successor\n '''\n global sucnode_1,sucnode_2,prenode,shortcutnode,self_identifier,pre_id,suc_id\n global SUCNODE1_AVA,SHORTCUT_AVA,HAVE_SUCNODE2\n global SEND_SCT_ACK,shortcutpre\n\n global last_suc_reply\n last_suc_reply = time.time()#the time last suc ack arrived\n last_suc_sent = 0 #the time that last ping to sucnode_1 was sent\n global last_sct_reply#Set to time() when need shortcut so that they won't be judged as timeout in the first round\n last_sct_sent = 0\n\n udp_socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n udp_socket.setblocking(False)\n udp_socket.bind((socket.gethostname(),UDP_PORT_BASE + self_identifier))\n\n inputs = [udp_socket, ]\n outputs = [udp_socket,]\n while True:\n try:\n data = None\n addr = None\n r_list, w_list, e_list = select.select(inputs, outputs, [], 1)\n for event in r_list:\n try:\n data,addr = event.recvfrom(BUFFER)\n ip = addr[0]\n port = addr[1]\n msg_type = data\n if msg_type == \"SEQ\":\n # handle incoming ping request by send back ack\n printbycom(\"Incomming ping message from\" + bytes(addr), SHOW_TRIVAL_MSG)\n # Set the prenode to the node that is pingping current node\n prenode = addr\n pre_id = prenode[1] - UDP_PORT_BASE\n udp_socket.sendto(\"ACK\", (ip, port))\n if msg_type == \"ACK\" and addr == sucnode_1:\n last_suc_reply = time.time()\n printbycom(\"Successor node \" + bytes(sucnode_1) + \" is online\", SHOW_TRIVAL_MSG)\n\n if msg_type == \"SCTACK\":\n SHORTCUT_AVA = True\n shortcutnode = addr\n last_sct_reply = time.time()\n printbycom(\"Short cut node \" + bytes(shortcutnode) + \" is online\", SHOW_TRIVAL_MSG)\n\n if msg_type == \"SCTSEQ\":\n udp_socket.sendto(\"SCTACK\", (ip, port))\n printbycom(\"We have become a short cut node for\" + bytes(addr), SHOW_TRIVAL_MSG)\n break\n except Exception as e:\n print(\"Exception happens among socket receiving\" + e.message)\n for w_events in w_list:\n if SEND_SCT_ACK:\n w_events.sendto(\"SCTACK\",shortcutpre)\n SEND_SCT_ACK =False\n\n if SUCNODE1_AVA and (time.time() - last_suc_sent > STATUS_PING_INTERVAL):\n last_suc_sent = time.time()\n udp_socket.sendto(\"SEQ\", sucnode_1)\n printbycom(\"Sending ping to test \" + bytes(sucnode_1),SHOW_TRIVAL_MSG)\n\n if SUCNODE1_AVA and (time.time() - last_suc_reply > NODE_TIMEOUT_INTERVAL):#if the sucnode is timeout ,try to contact sucnode_2\n print(\"Successor node 1 is proved to be offline, trying to contact successor node 2\")\n SUCNODE1_AVA = False\n\n thread = threading.Thread(target=UrgentContact)\n thread.setDaemon(True)\n thread.start()\n\n if SHORTCUT_AVA and (time.time() - last_sct_sent > STATUS_PING_INTERVAL):\n last_sct_sent = time.time()\n udp_socket.sendto(\"SCTSEQ\", shortcutnode)\n printbycom(\"Sending ping to test Short cut\" + bytes(shortcutnode), SHOW_TRIVAL_MSG)\n\n if SHORTCUT_AVA and (time.time() - last_sct_reply > NODE_TIMEOUT_INTERVAL): # if the sucnode is timeout try to contact sucnode_2\n SHORTCUT_AVA = False\n Send_TCP_msg(\"SCT:\" + bytes(SHORTCUT_NUMBER) + \":\" + bytes(\n LOCALHOST) + \":\" + bytes(\n UDP_PORT_BASE + self_identifier), sucnode_1[0], suc_id + TCP_PORT_BASE)\n printbycom(\"Short cut node 1 is proved to be offline We are trying to find a new one\",SHOW_TRIVAL_MSG)\n\n except socket.timeout as e:\n print(\"Socket timout\")\n except Exception as e:\n print(\"Exception happened during udp ping receiving\" + e.message)\n continue\n\n\ndef Command_monitor():\n\n global last_sct_reply, last_suc_reply, sucnode_1,self_identifier,HAVE_SUCNODE2,SUCNODE1_AVA,sucnode_2,suc_id\n global JOINING_NETWORK\n global SEND_SCT_ACK,shortcutpre\n tcp_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n tcp_sock.bind((socket.gethostname(),TCP_PORT_BASE + self_identifier))\n tcp_sock.setblocking(False)\n tcp_sock.listen(MAX_TCP_CONN)\n\n inputs = [tcp_sock, ]\n while True:\n #TCP monitor need to be none blocking\n #command as the first string before ':'\n while True:\n r_list, w_list, e_list = select.select(inputs, [], [], 1)\n for event in r_list:\n if event == tcp_sock:\n new_sock, addr = event.accept()\n inputs.append(new_sock)\n else:\n data = event.recv(BUFFER)\n command = bytes(data).split(\":\")[0]\n if data:\n if command == \"QUIT\":\n print(\"Our successor node \" + bytes(sucnode_1) + \"is leaving the network\")\n # if we do not have a suc_node 2, just let the user to specify a new successor\n if not HAVE_SUCNODE2:\n SUCNODE1_AVA = False\n print(\"Sucnode has quit and we do not have the sucnode 2\", SHOW_TRIVAL_MSG)\n # if we do have a sucnode 2, make it the sucnode_1 and update the suc_id\n else:\n HAVE_SUCNODE2 = False\n sucnode_1 = sucnode_2\n suc_id = sucnode_1[1] - UDP_PORT_BASE\n print(\"Now \" + bytes(sucnode_1) + \"is my successor node\")\n break\n\n # Handling the new node joining the network\n # Set the sucnode via the info and set have sucnode2 to false to find a new backupnode\n if command == \"JOIN\":\n ip = data.split(\":\")[1]\n port = int(data.split(\":\")[2])\n sucnode_1 = (ip,port)\n suc_id = sucnode_1[1] - UDP_PORT_BASE\n HAVE_SUCNODE2 = False\n print(\"A new Node\" + bytes(sucnode_1) + \" has joined the network\")\n # Handling the new predecessor's request to ask for next node\n if command == \"ASKNEXT\":\n # Tell the pre node\n if SUCNODE1_AVA:\n event.send(\"NEXT:\" + bytes(sucnode_1[0]) + \":\" + bytes(sucnode_1[1]))\n printbycom(\"PreNode \" + bytes(addr[0]) + \":\" + bytes(addr[1]) + \"is asking for next node\",\n SHOW_TRIVAL_MSG)\n else:\n event.send(\"NEXT:NULL\")\n break\n # Handling the File storing request, if we are supposed to store the file, than store it\n # Else we transmit it to next node\n if command == \"STORE\" or command == \"REQ\":\n # Compare the myhash value with the self ID and suc_node ID\n filename = data.split(\":\")[1]\n src_ip = data.split(\":\")[2]\n src_port = int(data.split(\":\")[3])\n\n printbycom(\"Node\" + bytes(src_ip) + \":\" + bytes(\n src_port) + \"is \" + command.lower() + \" for\" + filename, SHOW_TRIVAL_MSG)\n if Check_File_Ava(filename) == FILE_ALLOCATED_TO_SELF:\n printbycom(\"File is avaliable here\", SHOW_TRIVAL_MSG)\n # TODO Multi threading\n if command == \"REQ\":\n Contact_and_Transfer(src_ip, src_port, TRAN,filename)\n else:\n Contact_and_Transfer(src_ip, src_port, DOWNLOAD,filename)\n break\n # If the myhash value is greater than both self and the successor node ID we forward the command\n elif Check_File_Ava(filename) == FILE_NOT_ALLOCATED_TO_SELF:\n # If the successor node is ava than forward the message\n if SUCNODE1_AVA:\n printbycom(\"File is not ava here ,forwarding the request\", SHOW_TRIVAL_MSG)\n Send_TCP_msg(data, sucnode_1[0], suc_id + TCP_PORT_BASE)\n break\n # Handle shortcut searching request\n if command == \"SCT\":\n searchcount = int(data.split(\":\")[1])\n src_ip = data.split(\":\")[2]\n src_port = int(data.split(\":\")[3])\n printbycom(\"Incomming SCT request from\" + bytes(addr) +\n \"with the message from \" + bytes(src_ip) + bytes(src_port), SHOW_TRIVAL_MSG)\n # If the search count is down to 1 than we know that self is the shortcut node\n # that the node is looking for\n if searchcount == 1:\n printbycom(\"Shortcut searching hit! Responding back\", SHOW_TRIVAL_MSG)\n\n\n # Just send the SCTACK to the dest ip and port to notify him we are the\n # short cut node his looking for\n\n shortcutpre = (src_ip,src_port)\n SEND_SCT_ACK = True\n else:\n # Else we just decrease the search count by 1 and forward the request\n searchcount = searchcount - 1\n if SUCNODE1_AVA:\n Send_TCP_msg(\"SCT:\" + bytes(searchcount) + \":\" + bytes(src_ip) + \":\" + bytes(src_port),sucnode_1[0], suc_id + TCP_PORT_BASE)\n break\n else:\n inputs.remove(event)\n #After the iteration we send out a message to ask for sucnode2 if we do not have one\n if SUCNODE1_AVA and not HAVE_SUCNODE2:\n Get_nextnode(sucnode_1[0], suc_id + TCP_PORT_BASE)\n\n\ndef Check_File_Ava(filename):\n myhashvalue = myhash(filename)\n\n if myhashvalue == self_identifier:\n return FILE_ALLOCATED_TO_SELF\n\n #If we are at the end of the circle just allocate to self\n elif self_identifier > suc_id:\n return FILE_ALLOCATED_TO_SELF\n\n # If One of self and the predecessor node is meant to store the file, than just store it cause we have compared in the cases below\n elif self_identifier > myhashvalue > pre_id:\n return FILE_ALLOCATED_TO_SELF\n\n elif self_identifier < myhashvalue < suc_id:\n # If One of self and the successor node is meant to store the file, than compare which one is closer\n if abs(myhashvalue - self_identifier) < abs(myhashvalue - suc_id):\n return FILE_ALLOCATED_TO_SELF\n else:\n return FILE_NOT_ALLOCATED_TO_SELF\n #Just return not allocated to self otherwise\n else:\n return FILE_NOT_ALLOCATED_TO_SELF\n\ndef Get_nextnode(ip,port):\n addr = (ip,port)\n global HAVE_SUCNODE2,sucnode_2\n try:\n sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n sock.connect(addr)\n sock.send(\"ASKNEXT\")\n printbycom(\"Sending out request to ask for sucnode 2\",SHOW_TRIVAL_MSG)\n data = sock.recv(BUFFER)\n if data:\n if data.split(\":\")[1] != \"NULL\":\n HAVE_SUCNODE2 = True\n sucnode_2 = (data.split(\":\")[1],int(data.split(\":\")[2]))\n sock.close()\n else:\n printbycom(\"It seems our succseeor node does not have successor\",SHOW_TRIVAL_MSG)\n HAVE_SUCNODE2 = False\n sock.close()\n except Exception as e:\n print(\"Exception happens during getting the next node \" + e.message)\n\n\ndef main_procedure():\n global sucnode_1, sucnode_2,prenode,shortcutnode,self_identifier,pre_id,suc_id,SUCNODE1_AVA\n global SHORTCUT_NUMBER,HAVE_SUCNODE2,SHOW_TRIVAL_MSG,last_suc_reply,last_sct_reply\n global JOINING_NETWORK\n if sucnode_1 != None and prenode != None:\n print(\"Attempting to joining the network with successor \" + sucnode_1[0] + \" and the predecessor \" + prenode[0])\n try:\n Send_TCP_msg(\"JOIN:\" + bytes(LOCALHOST) + \":\" + bytes(UDP_PORT_BASE + self_identifier),prenode[0],pre_id + TCP_PORT_BASE)\n SUCNODE1_AVA = True\n except Exception as e:\n print(\"Exception happens when trying to join the network \" + e.message)\n sys.exit(1)\n\n\n #File Storing and Requesting Command Format 'STORE:[filename]:[localhost_name]:[localhost_port]'\n #Shortcut searching command format 'SCT:[localhost_name]:[localhost_name]:[search_count]'\n #Joining the network\n thread_1 = threading.Thread(target=Status_monitor)\n thread_1.setDaemon(True)\n thread_1.start()\n thread_2 = threading.Thread(target=Command_monitor)\n thread_2.setDaemon(True)\n thread_2.start()\n\n #variable to store the shortcut count we need\n\n while True:\n\n if not SUCNODE1_AVA:\n try:\n print(\"It seems our sucessor node is now not available please specify a new one\")\n str = raw_input(\"input the suc_node ip and UDP port number divided by ':'\")\n sucnode_1 = (str.split(\":\")[0], int(str.split(\":\")[1]))\n suc_id = sucnode_1[1] - UDP_PORT_BASE\n SUCNODE1_AVA = True\n HAVE_SUCNODE2 = False\n last_suc_reply = time.time()\n continue\n except Exception as e:\n print(\"exception happened when parsing the address please retry\")\n continue\n\n command = raw_input(\"Please input next command\")\n if command == \"exit\":\n try:\n Send_TCP_msg(\"QUIT\", prenode[0], pre_id + TCP_PORT_BASE)\n print(\"successfully quited the network\")\n sys.exit(1)\n except Exception as e:\n print(\"Exception happened when quiting the network\")\n elif command.split(\" \")[0] == \"set\":\n param = command.split(\" \")[1]\n if param == \"shortcut\":\n SHORTCUT_NUMBER = int(command.split(\" \")[2])\n Send_TCP_msg(\"SCT:\" + bytes(SHORTCUT_NUMBER) + \":\" + bytes(LOCALHOST) + \":\" + bytes(\n UDP_PORT_BASE + self_identifier), sucnode_1[0], suc_id + TCP_PORT_BASE)\n continue\n elif param == \"sucnode\":\n try:\n HAVE_SUCNODE2 = False\n sucnode_1 = (command.split(\" \")[2].split(\":\")[0],int (command.split(\" \")[2].split(\":\")[1]))\n last_suc_reply = time.time()\n continue\n except Exception as e:\n print(\"Exception happens during parsing\" + e.message)\n continue\n elif param == \"showpingmsg\":\n switch = command.split(\" \")[2]\n if switch == \"on\":\n SHOW_TRIVAL_MSG = True\n print(\"Now the ping message should be seen\")\n else :\n SHOW_TRIVAL_MSG = False\n print(\"Now the ping message is not visible\")\n elif command.split(\" \")[0] == \"get\":\n param = command.split(\" \")[1]\n if param == \"self_udp_port\":\n print(\"UDP port:\" + bytes(UDP_PORT_BASE + self_identifier))\n continue\n elif param == \"self_tcp_port\":\n print(\"TCP port:\" + bytes(TCP_PORT_BASE + self_identifier))\n continue\n elif param == \"prenode\":\n print(\"prenode is:\" + bytes(prenode))\n elif param == \"sucnode\":\n print(\"sucnode is:\" + bytes(sucnode_1))\n elif param == \"backupnode\":\n str = bytes(sucnode_2) if HAVE_SUCNODE2 else \"no backup node avaliable\"\n print(\"backupnode is:\" + str)\n elif param == \"shortcutnode\":\n str = bytes(shortcutnode) if SHORTCUT_AVA else \"no shortcut node avaliable\"\n print (\"shortcut node is:\" + str)\n elif command.split(\" \")[0] == \"store\":\n fpath = command.split(\" \")[1]\n filename = fpath.split(\"/\")[len(fpath.split(\"/\")) - 1]\n if not os.path.isfile(fpath):\n print(\"File path not valid\")\n continue\n else:\n sock_get = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n sock_get.bind((LOCALHOST, self_identifier + FILE_PORT_BASE))\n sock_get.listen(1)\n if not SHORTCUT_AVA:\n Send_TCP_msg(\"STORE:\" + filename + \":\" + bytes(LOCALHOST) + \":\" +\n bytes(self_identifier + FILE_PORT_BASE),sucnode_1[0],suc_id + TCP_PORT_BASE)\n else:\n Send_TCP_msg(\"STORE:\" + filename + \":\" + bytes(LOCALHOST) + \":\" +\n bytes(self_identifier + FILE_PORT_BASE), shortcutnode[0], (shortcutnode[1] - UDP_PORT_BASE) + TCP_PORT_BASE)\n print(\"Sending out storing request,listening for reply\")\n conn,addr = sock_get.accept()\n info = conn.recv(BUFFER)\n fo = open(fpath,\"rb\")\n if info == \"RECV_READY\":\n print(\"Request responded.Attempting to transmit file\")\n while True:\n filedata = fo.read(BUFFER)\n if not filedata:\n break\n conn.send(filedata)\n fo.close()\n conn.close()\n sock_get.close()\n print(\"File storing success!\")\n\n elif command.split(\" \")[0] == \"req\":\n # TODO Muiltithreading\n sock_rec = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n sock_rec.bind((LOCALHOST, self_identifier + FILE_PORT_BASE))\n sock_rec.listen(1)\n filename = command.split(\" \")[1]\n if not SHORTCUT_AVA:\n #TODO test the shortcut search sending\n Send_TCP_msg(\"REQ:\" + filename + \":\" + bytes(LOCALHOST) + \":\" +\n bytes(self_identifier + FILE_PORT_BASE), sucnode_1[0], suc_id + TCP_PORT_BASE)\n else:\n Send_TCP_msg(\"REQ:\" + filename + \":\" + bytes(LOCALHOST) + \":\" +\n bytes(self_identifier + FILE_PORT_BASE), shortcutnode[0],\n (shortcutnode[1] - UDP_PORT_BASE) + TCP_PORT_BASE)\n print(\"Sending out request\")\n conn,addr = sock_rec.accept()\n info = conn.recv(BUFFER)\n if info == \"FILE_READY\":\n conn.send(\"RECV_READY\")\n print(\"File is found ! Receiving \")\n fpath = \"localrecv/\" + bytes(datetime.datetime.now()) + \"_\" + filename\n fo = open(fpath,'wb')\n while True:\n data = conn.recv(BUFFER)\n if not data:\n break\n fo.write(data)\n print(\"File transmission completed!\")\n fo.close()\n conn.close()\n sock_rec.close()\n else:\n print(\"File not found\")\n sock_rec.close()\n\n\n else:\n print(\"Invalid command please reinput\\n\")\n continue\n\n\n\n\nif __name__ == '__main__':\n initialization()\n\n","sub_path":"Submission/dht_node_4.py","file_name":"dht_node_4.py","file_ext":"py","file_size_in_byte":27272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"487221485","text":"def ensure_unicode(value):\n if value is None:\n return None\n if type(value) is not unicode:\n value = value.decode('utf8')\n return value\n\nclass Argument(object):\n def __init__(self, offset, val):\n self.offset = offset\n self.val = ensure_unicode(val)\n\nclass Dictable(object):\n include_type = False\n \n def to_dict(self):\n result = {}\n if self.include_type:\n result['type'] = self.type\n for name, value in self.__dict__.items():\n if isinstance(value, BasicStatement):\n value = value.to_dict()\n if type(value) is list:\n new_value = []\n for element in value:\n if isinstance(element, BasicStatement):\n new_value.append(element.to_dict())\n else:\n new_value.append(element)\n value = new_value\n if value not in ([], None):\n result[name] = value\n return result\n \nclass BasicStatement(Dictable):\n def __init__(self, comments, keyword, name, line):\n self.comments = comments\n self.keyword = ensure_unicode(keyword)\n self.name = ensure_unicode(name)\n self.line = line\n\n def line_range(self):\n if self.comments:\n first = self.comments[0].line\n else:\n first = self.first_non_comment_line()\n return first, self.line\n\n def first_non_comment_line(self):\n return self.line\n \nclass DescribedStatement(BasicStatement):\n def __init__(self, comments, keyword, name, description, line):\n super(DescribedStatement, self).__init__(comments, keyword, name, line)\n self.description = ensure_unicode(description)\n\nclass TagStatement(DescribedStatement):\n def __init__(self, comments, tags, keyword, name, description, line):\n super(TagStatement, self).__init__(comments, keyword, name, description,\n line)\n self.tags = tags\n\n def first_non_comment_line(self):\n if self.tags:\n return self.tags[0].line\n return self.line\n\nclass Replayable(object):\n type = None\n\n def replay(self, formatter):\n getattr(formatter, self.type)(self)\n\nclass Feature(TagStatement, Replayable):\n type = \"feature\"\n\nclass Background(DescribedStatement, Replayable):\n type = \"background\"\n include_type = True\n\nclass Scenario(TagStatement, Replayable):\n type = \"scenario\"\n include_type = True\n\nclass ScenarioOutline(TagStatement, Replayable):\n type = \"scenario_outline\"\n include_type = True\n\nclass Examples(TagStatement, Replayable):\n type = \"examples\"\n\n def __init__(self, comments, tags, keyword, name, description, line, rows):\n super(Examples, self).__init__(comments, tags, keyword, name,\n description, line)\n self.rows = rows\n\nclass Step(BasicStatement, Replayable):\n type = \"step\"\n\n def __init__(self, comments, keyword, name, line):\n super(Step, self).__init__(comments, keyword, name, line)\n self.rows = None\n self.doc_string = None\n\n def line_range(self):\n lrange = super(Step, self).line_range()\n if self.rows:\n return (lrange[0], self.rows[-1].line)\n elif self.doc_string:\n return (lrange[0], self.doc_string.line_range()[1])\n return lrange\n\n def outline_args(self):\n start = 0\n end = 0\n arguments = []\n while True:\n start = self.name.find(u'<', end)\n if start == -1:\n break\n end = self.name.find(u'>', start)\n arguments.append(Argument(start, self.name[start:end + 1]))\n return arguments\n\nclass Comment(Dictable):\n def __init__(self, value, line):\n self.value = ensure_unicode(value)\n self.line = line\n\nclass Tag(Dictable):\n def __init__(self, name, line):\n self.name = ensure_unicode(name)\n self.line = line\n\n def __eq__(self, other):\n return self.name == other.name\n\n def __hash__(self):\n return hash(self.name)\n\nclass DocString(Dictable):\n def __init__(self, content_type, value, line):\n self.content_type = ensure_unicode(content_type)\n self.value = ensure_unicode(value)\n self.line = line\n\n def line_range(self):\n line_count = len(self.value.splitlines())\n return (self.line, self.line + line_count + 1)\n\nclass Row(Dictable):\n def __init__(self, comments, cells, line):\n self.comments = comments\n self.cells = [ensure_unicode(c) for c in cells]\n self.line = line\n\nclass Match(Dictable, Replayable):\n type = \"match\"\n\n def __init__(self, arguments, location):\n self.arguments = arguments\n self.location = location\n\nclass Result(Dictable, Replayable):\n type = \"result\"\n\n def __init__(self, status, duration, error_message):\n self.status = ensure_unicode(status)\n self.duration = duration\n self.error_message = ensure_unicode(error_message)\n","sub_path":"python/gherkin/formatter/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"318978276","text":"\"\"\"-----------------------------------------------------------------------------\nPURPOSE : This script will evaluate all instruments contained\n within a specified compound portfolio. If there's an\n instrument containing valid trades, expiring today or\n within a set nr of business days, then the script will\n send out an email notification to the specified\n destinations as well as save down the report.\nDEPATMENT AND DESK : AAM\nREQUESTER : Suvarn Naidoo\nDEVELOPER : Rohan van der Walt\nCR NUMBER : 2419446\n--------------------------------------------------------------------------------\n\nHISTORY\n================================================================================\nDate Change no Developer Description\n--------------------------------------------------------------------------------\n2014-10-30 2419446 Rohan vd Walt Initial Implementation\n2019-02-11 CHG1001378099 Iryna Shcherbina Do not send email if no expiring\n (FAPE-37) instruments found; some refactoring\n2019-02-26 FAPE-51 Iryna Shcherbina Add trade portfolios' positions on\n demand; switch to html body\n2019-10-14 FAPE-118 Iryna Shcherbina Ignore Void and Simulated trades\n\"\"\"\n\nfrom collections import defaultdict\nfrom operator import methodcaller\nfrom textwrap import dedent\n\nimport acm\n\nfrom at import INST_DEPOSIT\nfrom at_ael_variables import AelVariableHandler\nfrom at_email import EmailHelper\nfrom at_logging import getLogger\nfrom at_time import bankingday_timediff\nfrom PS_Functions import is_child_portf\n\n\nLOGGER = getLogger(__name__)\nDATE_TODAY = acm.Time.DateToday()\nOPEN_END_STATUS_TEXT = 'Open End'\n\n\ndef email_hook(selected):\n email_destinations = ael_variables.get('email_destinations')\n email_destinations.enabled = selected.value\n email_destinations.mandatory = selected.value\n\n\nael_variables = AelVariableHandler()\nael_variables.add(\n 'portfolio',\n mandatory=True,\n cls='FCompoundPortfolio',\n multiple=True,\n label='Portfolio',\n alt='Compound portfolios that should be checked for expiring instruments'\n)\nael_variables.add(\n 'days_to_expiry',\n mandatory=True,\n cls='int',\n label='Business Days to Expiry',\n alt='How many days before expiry should the instrument be included in the report'\n)\nael_variables.add(\n 'excl_ins_type',\n mandatory=False,\n multiple=True,\n label='Exclude Instrument Types',\n collection=acm.FEnumeration['enum(InsType)'].Values(),\n alt=('This will exclude the selected instrument types\\n'\n 'Call Accounts (Open End Deposits) will always be excluded')\n)\nael_variables.add(\n 'excl_trade_status',\n mandatory=False,\n multiple=True,\n label='Exclude Trade Statuses',\n collection=acm.FEnumeration['enum(TradeStatus)'].Values()\n)\nael_variables.add_bool(\n 'show_positions',\n label='Show Positions',\n default=False,\n alt='Include the list of trade portfolios and positions'\n)\nael_variables.add_bool(\n 'send_email',\n label='Send Email',\n default=True,\n hook=email_hook,\n alt='This will send email to recipients in addition to the file report'\n)\nael_variables.add(\n 'email_destinations',\n mandatory=True,\n label='Email Destinations',\n alt='Email Destinations - Comma Separated'\n)\nael_variables.add(\n 'output_location',\n mandatory=False,\n label='Report Output Location',\n alt='Location where report will be saved',\n)\n\n\nclass ExpiryReport(object):\n\n SUBJECT = 'Instrument Expiry Notification - {date_today} - {env}'\n REPORT_BODY = dedent(\"\"\"\\\n \n \n \n \n \n

Instrument Expiry Notification: {date_today}
\n
\n Checking within the following portfolios:
\n

{portfolios}
\n
\n The following instruments are expiring within {business_days} business day(s):
\n
{instruments}
\n

\n \n \n \"\"\")\n POSITIONS = dedent(\"\"\"\n \n \n \n \n \n {}\n
Trades PortfoliosPrime_Expiries
\n \"\"\")\n\n def __init__(self, portfolios, business_days, show_positions, ignore_trades=()):\n self.instruments = set()\n self.portfolios = portfolios\n self.business_days = business_days\n self.show_positions = show_positions\n self.ignore_trade_statuses = ignore_trades\n\n def _sort_instruments_by(self, method):\n return sorted(self.instruments, key=methodcaller(method))\n\n def _positions(self, instrument):\n ratio = 1.0 / instrument.ContractSize()\n portfolio_to_position = defaultdict(float)\n for trade in instrument.Trades():\n if trade.Status() in self.ignore_trade_statuses:\n continue\n portfolio = trade.Portfolio()\n if any(is_child_portf(portfolio, parent) for parent in self.portfolios):\n portfolio_to_position[portfolio.Name()] += trade.Position() * ratio\n\n if any(portfolio_to_position.values()):\n return self.POSITIONS.format(''.join(\n '{}{:,}'.format(portfolio, pos)\n for portfolio, pos in sorted(portfolio_to_position.items()) if pos))\n else:\n return ''\n\n def _instrument_line(self, instrument):\n line = 'Expiring: {} - {}'.format(\n instrument.ExpiryDateOnly(),\n instrument.Name())\n if self.show_positions:\n line += self._positions(instrument)\n return line\n\n def add_instrument(self, instrument):\n self.instruments.add(instrument)\n\n def get_body(self):\n return self.REPORT_BODY.format(\n date_today=DATE_TODAY,\n business_days=self.business_days,\n portfolios='
'.join(portfolio.Name() for portfolio in self.portfolios),\n instruments='
'.join(\n self._instrument_line(instrument) for instrument in\n self._sort_instruments_by('ExpiryDateOnly')))\n\n def write_to_file(self, full_path):\n with open(full_path, \"w+\") as output_file:\n output_file.write(self.get_body())\n\n def send_mail(self, email_to):\n message = EmailHelper(\n body=self.get_body(),\n subject=self.SUBJECT.format(date_today=DATE_TODAY, env=get_env_name()),\n mail_to=email_to.split(','),\n sender_type=EmailHelper.SENDER_TYPE_SMTP,\n host=EmailHelper.get_acm_host())\n message.send()\n\n\ndef get_env_name():\n return acm.FInstallationData.Select('').At(0).Name()\n\n\ndef expires_within(instrument, business_days):\n expiry_date = instrument.ExpiryDate()\n if expiry_date and DATE_TODAY <= expiry_date:\n timediff = bankingday_timediff(\n instrument.Currency().Calendar(), DATE_TODAY, instrument.ExpiryDate())\n return timediff.days <= business_days\n else:\n return False\n\n\ndef is_valid(instrument, invalid_instypes):\n \"\"\"Check if the instrument type is valid given the list of invalid types.\n\n Always exclude call account expires - it does not\n make sense since they will show up daily.\n \"\"\"\n return not (\n instrument.InsType() in invalid_instypes or\n (instrument.InsType() == INST_DEPOSIT and\n instrument.OpenEnd() == OPEN_END_STATUS_TEXT)\n )\n\n\ndef collect_instruments(portfolios, exclude_trade_statuses, exclude_ins_types):\n instrument_collection = set()\n\n for portfolio in portfolios:\n LOGGER.info('Checking portfolio: {}'.format(portfolio.Name()))\n for trade in portfolio.Trades():\n if (trade.Status() not in exclude_trade_statuses and\n is_valid(trade.Instrument(), exclude_ins_types)):\n instrument_collection.add(trade.Instrument())\n\n return instrument_collection\n\n\ndef ael_main(ael_dict):\n LOGGER.msg_tracker.reset()\n LOGGER.info('Collecting Instruments In Portfolios')\n instruments = collect_instruments(\n ael_dict['portfolio'],\n ael_dict['excl_trade_status'],\n ael_dict['excl_ins_type']\n )\n expiry_report = ExpiryReport(\n ael_dict['portfolio'],\n ael_dict['days_to_expiry'],\n ael_dict['show_positions'],\n ael_dict['excl_trade_status'],\n )\n\n LOGGER.info('Checking Expiry Dates')\n for instrument in instruments:\n if expires_within(instrument, ael_dict['days_to_expiry']):\n LOGGER.info('{} expires on {}'.format(\n instrument.Name(), instrument.ExpiryDateOnly()))\n expiry_report.add_instrument(instrument)\n\n if not expiry_report.instruments:\n LOGGER.info(\n 'There are no instruments expiring within '\n '{} business day(s)'.format(ael_dict['days_to_expiry']))\n return\n\n if ael_dict['output_location']:\n LOGGER.info('Writing to file')\n try:\n expiry_report.write_to_file(ael_dict['output_location'])\n except (IOError, OSError) as err:\n LOGGER.error('Failed to write to file: {}'.format(err))\n else:\n LOGGER.info('Wrote to {}'.format(ael_dict['output_location']))\n\n if ael_dict['send_email']:\n LOGGER.info('Sending email')\n try:\n expiry_report.send_mail(ael_dict['email_destinations'])\n except Exception as err:\n LOGGER.error('Failed to send email notification: {}'.format(err))\n else:\n LOGGER.info('Email sent')\n\n if LOGGER.msg_tracker.errors_counter:\n raise RuntimeError(\"ERRORS occurred. Please check the log.\")\n\n LOGGER.info(\"Completed successfully.\")\n","sub_path":"Python modules/Ops_InsExpiryNotification.py","file_name":"Ops_InsExpiryNotification.py","file_ext":"py","file_size_in_byte":10070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"222523528","text":"#!/usr/bin/env python3\nn = int(input())\ndef is_prime_like(n):\n if n == 1:\n return False\n if n in [2, 3, 5]:\n return True\n if int(list(str(n)).pop()) in [0, 2, 4, 5, 6, 8]:\n return False\n if sum(map(int,list(str(n)))) % 3 == 0:\n return False\n return True\nif not is_prime_like(n):\n print('Not ', end='')\nprint('Prime')\n","sub_path":"arc/044/a/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"575991731","text":"import torch\nimport pickle \nimport copy\nimport sys\nimport pdb\nimport time\n\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import preprocessing\nfrom sklearn.metrics import (confusion_matrix, precision_score, recall_score, f1_score,\\\n fbeta_score, roc_auc_score, precision_recall_curve, auc,\\\n brier_score_loss)\nfrom collections import Counter\n\nsys.path.insert(1, '../')\nfrom focal_loss import FocalLoss\n\nfrom load_data import *\n\n# network related constants \nITERATION = 200\nNUM_CLASSES = 2\nETA = 1e-4\n\n# two running environment options below:\n# device = torch.device(\"cpu\")\ndevice = torch.device(\"cuda:0\")\ndtype = torch.float\n\nclass model_ca(nn.Module):\n def __init__(self):\n super(model_ca, self).__init__()\n self.class_count = NUM_CLASSES\n # self.hidden = [1977, 1000, 200]\n self.hidden = [8112,4000, 800] # previously more previously 799 \n\n self.mp4 = nn.MaxPool1d(4)\n self.mp2 = nn.MaxPool1d(2)\n\n # self.conv1 = nn.Conv1d(1,1,256, stride=1, dilation=1)\n # self.conv2 = nn.Conv1d(1,1,8, stride=1, dilation=1)\n self.conv3 = nn.Conv1d(1,1,16, stride=1, dilation=1)\n self.conv4 = nn.Conv1d(1,1,32, stride=1, dilation=1)\n self.conv5 = nn.Conv1d(1,1,64, stride=1, dilation=1)\n self.conv6 = nn.Conv1d(1,1,128, stride=1, dilation=1)\n\n \n\n self.fc1 = nn.Linear(self.hidden[0], self.hidden[1])\n self.fc2 = nn.Linear(self.hidden[1], self.hidden[2])\n self.fc3 = nn.Linear(self.hidden[2], self.class_count)\n\n def forward(self, x_):\n x = x_.data.unsqueeze(1)\n\n # x1 = self.mp4(F.relu(self.conv1(x))).squeeze()\n # x2 = self.mp4(F.relu(self.conv2(x))).squeeze()\n x3 = self.mp4(F.relu(self.conv3(x))).squeeze()\n x4 = self.mp4(F.relu(self.conv4(x))).squeeze()\n x5 = self.mp4(F.relu(self.conv5(x))).squeeze()\n x6 = self.mp4(F.relu(self.conv6(x))).squeeze()\n \n x = torch.cat((x3,x4,x5,x6), dim= 1).data.unsqueeze(1)\n\n # x = self.dropout(x)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n\n return F.softmax(x.squeeze(), dim=1)\n\n# custom weight initialization\ndef init_weights(m):\n if type(m) == nn.Linear:\n torch.nn.init.xavier_uniform_(m.weight)\n m.bias.data.fill_(0.01)\n elif type(m) == nn.Conv1d:\n torch.nn.init.xavier_uniform_(m.weight)\n m.bias.data.fill_(0.01)\n\n\nprint()\nprint(\"********** Classifier Model Training **********\")\nprint()\n\nauroc_folds = []\naupr_folds = []\n\nseed = 2294\nprint(\"Seed: \", seed)\n\n# valid_index = np.random.randint(low=1, high=9)\n# print(\"Validation fold is: \", valid_index)\ntest_indices = [1,2,3,4,5,6,7,8]\n# test_indices.remove(valid_index)\n\nfor test_index in test_indices:\n\n torch.cuda.manual_seed_all(seed)\n\n train_indices = copy.deepcopy(test_indices)\n train_indices.remove(test_index)\n\n print()\n print(\"************ Test Fold \" + str(test_index) + \" ************\")\n print()\n\n test_hydrogens = eval(\"fold_\" + str(test_index) + \"_hydrogens\")\n test_labels = eval(\"fold_\" + str(test_index) + \"_labels\")\n\n # valid_hydrogens = eval(\"fold_\" + str(valid_index) + \"_hydrogens\")\n # valid_labels = eval(\"fold_\" + str(valid_index) + \"_labels\")\n\n train_hydrogens_tuple = tuple([eval('fold_' + str(x) + '_hydrogens') for x in train_indices])\n train_labels_tuple = tuple([eval('fold_' + str(x) + '_labels') for x in train_indices])\n train_hydrogens = np.vstack(train_hydrogens_tuple)\n train_labels = np.vstack(train_labels_tuple)\n\n test_fold_data = torch.from_numpy(test_hydrogens).float()\n train_fold_data = torch.from_numpy(train_hydrogens).float()\n # valid_fold_data = torch.from_numpy(valid_hydrogens).float()\n test_fold_labels = torch.from_numpy(test_labels).long()\n train_fold_labels = torch.from_numpy(train_labels).long()\n # valid_fold_labels = torch.from_numpy(valid_labels).long()\n\n model = model_ca()\n model.apply(init_weights)\n model = model.to(device)\n\n # optimizers\n adam = torch.optim.Adam(model.parameters(), lr=ETA, weight_decay=0.001)\n sgd = torch.optim.SGD(model.parameters(), lr=ETA, weight_decay=0.001, momentum=0.85, nesterov=True)\n optimizer = adam # selected optimizer\n\n # learning rate scheduler\n # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.4, patience=20, verbose=True,min_lr=1e-5)\n\n # weighted cross entropy loss function for training \n counter = Counter(train_fold_labels.numpy().T.reshape(1,-1)[0,:].tolist())\n mw = max([counter[x] for x in range(NUM_CLASSES)]) \n weight = torch.tensor([mw/counter[x] for x in range(NUM_CLASSES)]).to(device)\n # print (\"Weights: \", [mw/counter[x] for x in range(NUM_CLASSES)])\n loss_fn = torch.nn.CrossEntropyLoss(weight=weight)\n # loss_fn = FocalLoss(class_num=2, gamma=1.2, alpha=weight)\n\n # # weighted cross entropy loss for validation dataset\n # counter = Counter(valid_fold_labels.numpy().T.reshape(1,-1)[0,:].tolist())\n # mw = max([counter[x] for x in range(NUM_CLASSES)]) \n # weight = torch.tensor([mw/counter[x] for x in range(NUM_CLASSES)]).to(device)\n # # valid_loss_fn = torch.nn.CrossEntropyLoss(weight=weight)\n # valid_loss_fn = FocalLoss(class_num=2, gamma=1, alpha=weight)\n\n # scale all samples according to training set\n scaler = preprocessing.MinMaxScaler().fit(train_fold_data.numpy())\n train_fold_data_normalized = torch.from_numpy(scaler.transform(train_fold_data.numpy())).float().to(device)\n test_fold_data_normalized = torch.from_numpy(scaler.transform(test_fold_data.numpy())).float().to(device)\n # valid_fold_data_normalized = torch.from_numpy(scaler.transform(valid_fold_data.numpy())).float().to(device)\n\n # convert to test set to torch variables\n test_data_torch = test_fold_data_normalized\n test_labels_torch = test_fold_labels\n test_labels_torch = torch.transpose(test_labels_torch, 0, 1)\n test_labels = test_labels_torch.to(device)\n\n # convert to test set to torch variables\n # valid_data_torch = valid_fold_data_normalized\n # valid_labels_torch = valid_fold_labels\n # valid_labels_torch = torch.transpose(valid_labels_torch, 0, 1)\n # valid_labels = valid_labels_torch.to(device)\n\n train_labels = train_fold_labels.to(device)\n\n # training and validation log related\n train_loss_history = []\n train_acc_history = []\n valid_loss_history = []\n aupr_history = []\n auc_history = []\n max_accuracy = 0\n\n # in order to find the best model based on validation loss\n best_model = model\n min_validation_loss = 1e1\n\n model.train()\n for epoch in range(ITERATION):\n # Forward pass\n train_fold_pred = model(train_fold_data_normalized)\n # Compute and save loss.\n loss = loss_fn(train_fold_pred, train_labels.squeeze())\n train_loss_history.append(loss.item())\n # compute training accuracy\n train_acc = (torch.transpose(train_labels, 0, 1) == torch.max(train_fold_pred,1)[1]).sum().cpu().numpy()/float(len(train_fold_labels))\n train_acc_history.append(train_acc)\n # try model on test set\n with torch.no_grad():\n model.eval()\n\n # predict test dataset\n test_pred = model(test_data_torch)\n test_labels_pred = torch.max(test_pred,1)[1]\n test_labels_pred = test_labels_pred.cpu().numpy()\n # calculate auroc\n auc_ = roc_auc_score(test_labels.cpu()[0], test_pred.cpu().numpy()[:,1])\n auc_history.append(auc_)\n # calculate aupr\n precision, recall, thresh = precision_recall_curve(test_labels.cpu().numpy().T, test_pred.cpu().numpy()[:,1])\n aupr = auc(recall, precision)\n aupr_history.append(aupr)\n # calculate validation set loss\n # valid_fold_pred = model(valid_fold_data_normalized)\n # valid_pred = model(valid_data_torch)\n # valid_labels_pred = torch.max(valid_pred,1)[1]\n # valid_labels_pred = valid_labels_pred.cpu().numpy()\n # valid_loss = valid_loss_fn(valid_fold_pred, valid_labels.squeeze())\n\n model.train()\n\n # print(\"Epoch: \", epoch, \"\\tTraining Loss: \", loss.item())\n # print(\"Epoch: \", epoch, \"\\tTraining Loss: \", loss.item(), \"Validation Loss: \", valid_loss.item())\n # clear gradient history\n optimizer.zero_grad()\n # Backward pass\n loss.backward()\n # parameter update\n optimizer.step()\n # scheduler.step(valid_loss)\n # find best model based on validation loss\n # if epoch == 0:\n # min_validation_loss = valid_loss.item()\n # elif valid_loss.item() < min_validation_loss:\n # min_validation_loss = valid_loss.item()\n # best_model = copy.deepcopy(model)\n \n\n\n # clear gradient history\n optimizer.zero_grad()\n\n # use best model\n model = best_model.eval()\n\n # predict test set \n test_labels_pred_ = model(test_data_torch)\n test_labels_pred = torch.max(test_labels_pred_,1)[1].detach().cpu()\n test_labels_pred = test_labels_pred.numpy()\n\n test_labels = test_labels.cpu().numpy()\n\n # calculate various metrics\n # all metrics are calculated by taking aggressive as positive class\n auroc = auc_history[len(auc_history) - 1]\n aupr = aupr_history[len(aupr_history) - 1]\n\n # record the calculated metrics \n auroc_folds.append(auroc)\n aupr_folds.append(aupr)\n\n\n # class classification rate from confusion matrix\n print(\"AUROC: \", auroc)\n print(\"AUPR: \", aupr)\n\n model = None\n optimizer = None\n loss_fn = None\n loss_fn_2 = None\n valid_fold_data = None\n valid_fold_labels = None\n valid_labels_pred = None\n valid_pred = None\n train_fold_pred = None\n adam = None\n rmsprop = None\n scaler = None\n\nwith open(\"./logs/CNN/auroc_scores.txt\", \"w\") as f:\n for auroc in auroc_folds:\n f.write(\"%f\\n\" % (auroc))\nwith open(\"./logs/CNN/aupr_scores.txt\", \"w\") as f:\n for aupr in aupr_folds:\n f.write(\"%f\\n\" % (aupr))","sub_path":"reproduce/figure_2/benign_aggressive/seed_2251/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":10236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"288436455","text":"class Solution(object):\n def largestRectangleArea(self, heights):\n \"\"\"\n :type heights: List[int]\n :rtype: int\n \"\"\"\n stack = []\n maxarea=0\n i=0\n while(i<=len(heights)):\n if len(stack)==0 or (iheights[stack[0]]):\n stack.insert(0, i)\n i+=1\n else:\n popped=stack.pop(0)\n if(len(stack)==0):\n maxarea=max(maxarea, heights[popped]*i)\n else:\n maxarea=max(maxarea, heights[popped]*(i-stack[0]-1))\n return maxarea\n \n","sub_path":"Largest Rectangle in Histogram.py","file_name":"Largest Rectangle in Histogram.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"607062329","text":"n = int(input(\"Enter the length of the sequence: \")) # Do not change this line\n#næsta tala í röðinni er\n\nfirst_number = 0\nsecond_number = 1\nthird_number = 2\nmain_number = 0\nnum = 1\n\nwhile num <= n:\n if num == 1:\n print(1)\n num += 1\n elif num == 2:\n print(2)\n num += 1\n elif num > 2 and num <= n:\n main_number = first_number + second_number + third_number\n print(main_number)\n first_number = second_number\n second_number = third_number\n third_number = main_number\n num += 1\n\n\n ","sub_path":"sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"223558748","text":"#!/usr/bin/python\n# ----------------------------------------------------------------\n# File: add_gateways.py\n#\n# This script downloads the list of TTN gateways around a certain central point.\n\nimport os\n\nfilePath = '/var/www/html/coverage/js/gateways.js';\n \n# As file at filePath is deleted now, so we should check if file exists or not not before deleting them\nif os.path.exists(filePath):\n os.remove(filePath)\n\nimport urllib, json\nurl = \"https://www.thethingsnetwork.org/gateway-data/location?latitude=55.6599740&longitude=12.5912461&distance=2000000\"\nresponse = urllib.urlopen(url)\ndata = json.loads(response.read())\n#print data\nprint(\"The following Gateways are added to the coverage map:\")\n\nfor key in data:\n if 'description' in data[key].keys(): gateway_name=data[key]['description']\n else: gateway_name = \"unknown\"\n gtw_id=data[key]['id']\n print(gtw_id) \n print(gateway_name)\n lat = data[key]['location']['latitude'] \n lon = data[key]['location']['longitude']\n alt = data[key]['location']['altitude'] \n file = open(\"/var/www/html/coverage/js/gateways.js\",\"a\")\n file.write('markers.addLayer(L.marker([')\n file.write(\"%f,\" % lat)\n file.write(\"%f,\" % lon)\n file.write(\"]));\")\n file.close()\n\n\n\n","sub_path":"home/add_gateways.py","file_name":"add_gateways.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"317329902","text":"from aws_cdk import (\n core,\n aws_iam as iam,\n aws_s3 as s3,\n)\n\nfrom infra.build_pipeline_construct import BuildPipelineConstruct\nfrom infra.batch_pipeline_construct import BatchPipelineConstruct\nfrom infra.deploy_pipeline_construct import DeployPipelineConstruct\n\n\nclass PipelineStack(core.Stack):\n def __init__(\n self,\n scope: core.Construct,\n construct_id: str,\n build_pipeline: bool,\n batch_pipeline: bool,\n deply_pipeline: bool,\n **kwargs,\n ) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # Define required parmeters\n project_name = core.CfnParameter(\n self,\n \"SageMakerProjectName\",\n type=\"String\",\n description=\"The name of the SageMaker project.\",\n min_length=1,\n max_length=32,\n )\n project_id = core.CfnParameter(\n self,\n \"SageMakerProjectId\",\n type=\"String\",\n min_length=1,\n max_length=16,\n description=\"Service generated Id of the project.\",\n )\n\n # Get drift-pipeline parameters\n seed_bucket = self.resolve_ssm_parameter(\"CodeCommitSeedBucket\")\n seed_build_key = self.resolve_ssm_parameter(\"CodeCommitBuildKey\")\n seed_batch_key = self.resolve_ssm_parameter(\"CodeCommitBatchKey\")\n seed_deploy_key = self.resolve_ssm_parameter(\"CodeCommitDeployKey\")\n\n # Create the s3 artifact (name must be < 63 chars)\n artifact_bucket_name = (\n f\"sagemaker-project-{project_id.value_as_string}-{self.region}\"\n )\n s3_artifact = s3.Bucket(\n self,\n \"S3Artifact\",\n bucket_name=artifact_bucket_name,\n removal_policy=core.RemovalPolicy.DESTROY,\n )\n\n core.CfnOutput(self, \"ArtifactBucket\", value=s3_artifact.bucket_name)\n\n # Get the service catalog role for all permssions (if None CDK will create new roles)\n # CodeBuild and CodePipeline resources need to start with \"sagemaker-\" to be within default policy\n products_use_role_name = self.node.try_get_context(\"drift:ProductsUseRoleName\")\n if products_use_role_name:\n service_catalog_role = iam.Role.from_role_arn(\n self,\n \"ProductsUseRole\",\n f\"arn:{self.partition}:iam::{self.account}:role/{products_use_role_name}\",\n )\n # Use the service catalog role for all roles\n sagemaker_execution_role = service_catalog_role\n code_pipeline_role = service_catalog_role\n code_build_role = service_catalog_role\n cloudformation_role = service_catalog_role\n lambda_role = service_catalog_role\n event_role = service_catalog_role\n else:\n # Create unique scope roles per service, so that permissions can be added in build/deploy stacks\n sagemaker_execution_role = iam.Role(\n self,\n \"SageMakerExecutionRole\",\n assumed_by=iam.ServicePrincipal(\"sagemaker.amazonaws.com\"),\n path=\"/service-role/\",\n )\n code_pipeline_role = iam.Role(\n self,\n \"CodePipelineRole\",\n assumed_by=iam.ServicePrincipal(\"codepipeline.amazonaws.com\"),\n path=\"/service-role/\",\n )\n code_build_role = iam.Role(\n self,\n \"CodeBuildRole\",\n assumed_by=iam.ServicePrincipal(\"codebuild.amazonaws.com\"),\n path=\"/service-role/\",\n )\n cloudformation_role = iam.Role(\n self,\n \"CloudFormationRole\",\n assumed_by=iam.ServicePrincipal(\"cloudformation.amazonaws.com\"),\n path=\"/service-role/\",\n )\n lambda_role = iam.Role(\n self,\n \"LambdaRole\",\n assumed_by=iam.ServicePrincipal(\"lambda.amazonaws.com\"),\n path=\"/service-role/\",\n )\n event_role = iam.Role(\n self,\n \"EventRole\",\n assumed_by=iam.ServicePrincipal(\"events.amazonaws.com\"),\n path=\"/service-role/\",\n )\n\n # Add cloudformation to allow creating CW rules for re-training, and passing event role\n cloudformation_role.add_to_policy(\n iam.PolicyStatement(\n actions=[\n \"events:DeleteRule\",\n \"events:DescribeRule\",\n \"events:PutRule\",\n \"events:PutTargets\",\n \"events:RemoveTargets\",\n ],\n resources=[\"arn:aws:events:*:*:rule/sagemaker-*\"],\n )\n )\n cloudformation_role.add_to_policy(\n iam.PolicyStatement(\n actions=[\n \"iam:PassRole\",\n ],\n resources=[event_role.role_arn],\n )\n )\n\n # Add cloudwatch logs\n logs_policy = iam.PolicyStatement(\n actions=[\n \"logs:CreateLogGroup\",\n \"logs:CreateLogStream\",\n \"logs:PutLogEvents\",\n ],\n resources=[\"*\"],\n )\n lambda_role.add_to_policy(logs_policy)\n\n # Create a policy statement for SM and ECR pull\n sagemaker_policy = iam.Policy(\n self,\n \"SageMakerPolicy\",\n document=iam.PolicyDocument(\n statements=[\n logs_policy,\n iam.PolicyStatement(\n actions=[\"sagemaker:*\"],\n not_resources=[\n \"arn:aws:sagemaker:*:*:domain/*\",\n \"arn:aws:sagemaker:*:*:user-profile/*\",\n \"arn:aws:sagemaker:*:*:app/*\",\n \"arn:aws:sagemaker:*:*:flow-definition/*\",\n ],\n ),\n iam.PolicyStatement(\n actions=[\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:Describe*\",\n \"ecr:GetAuthorizationToken\",\n \"ecr:GetDownloadUrlForLayer\",\n ],\n resources=[\"*\"],\n ),\n iam.PolicyStatement(\n actions=[\n \"cloudwatch:PutMetricData\",\n ],\n resources=[\"*\"],\n ),\n iam.PolicyStatement(\n actions=[\n \"s3:AbortMultipartUpload\",\n \"s3:DeleteObject\",\n \"s3:GetBucket*\",\n \"s3:GetObject*\",\n \"s3:List*\",\n \"s3:PutObject*\",\n ],\n resources=[\n s3_artifact.bucket_arn,\n f\"{s3_artifact.bucket_arn}/*\",\n ],\n ),\n iam.PolicyStatement(\n actions=[\"iam:PassRole\"],\n resources=[sagemaker_execution_role.role_arn],\n ),\n ]\n ),\n )\n # # SageMaker needs to manage pipelines, model package groups\n sagemaker_policy.attach_to_role(sagemaker_execution_role)\n # Code build needs to query model package groups and artifacts\n sagemaker_policy.attach_to_role(code_build_role)\n # CloudFormation creates models and endpoints\n sagemaker_policy.attach_to_role(cloudformation_role)\n # Lambda needs to describe SM and put metrics\n sagemaker_policy.attach_to_role(lambda_role)\n\n # Define an environment object to pass to build\n env = core.Environment(account=self.account, region=self.region)\n\n # Define the repository name and branch\n branch_name = \"main\"\n\n if build_pipeline:\n # Require a schedule parameter (must be cron, otherwise will trigger every time rate is enabled/disabled)\n # https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html\n retrain_schedule = core.CfnParameter(\n self,\n \"RetrainSchedule\",\n type=\"String\",\n description=\"The expression to retrain schedule. Defaults to first day of the month.\",\n default=\"cron(0 12 1 * ? *)\", # 1st of the month at 12am\n min_length=1,\n )\n BuildPipelineConstruct(\n self,\n \"build\",\n env=env,\n sagemaker_execution_role=sagemaker_execution_role,\n code_pipeline_role=code_pipeline_role,\n code_build_role=code_build_role,\n cloudformation_role=cloudformation_role,\n event_role=event_role,\n lambda_role=lambda_role,\n s3_artifact=s3_artifact,\n branch_name=branch_name,\n project_id=project_id.value_as_string,\n project_name=project_name.value_as_string,\n seed_bucket=seed_bucket,\n seed_key=seed_build_key,\n retrain_schedule=retrain_schedule.value_as_string,\n )\n\n if batch_pipeline:\n batch_schedule = core.CfnParameter(\n self,\n \"BatchSchedule\",\n type=\"String\",\n description=\"The expression to batch schedule. Defaults to every day.\",\n default=\"cron(0 12 * * ? *)\", # Every day at 12am\n min_length=1,\n )\n BatchPipelineConstruct(\n self,\n \"batch\",\n env=env,\n sagemaker_execution_role=sagemaker_execution_role,\n code_pipeline_role=code_pipeline_role,\n code_build_role=code_build_role,\n cloudformation_role=cloudformation_role,\n event_role=event_role,\n lambda_role=lambda_role,\n s3_artifact=s3_artifact,\n branch_name=branch_name,\n project_id=project_id.value_as_string,\n project_name=project_name.value_as_string,\n seed_bucket=seed_bucket,\n seed_key=seed_batch_key,\n batch_schedule=batch_schedule.value_as_string,\n )\n\n if deply_pipeline:\n DeployPipelineConstruct(\n self,\n \"deploy\",\n sagemaker_execution_role=sagemaker_execution_role,\n code_pipeline_role=code_pipeline_role,\n code_build_role=code_build_role,\n cloudformation_role=cloudformation_role,\n event_role=event_role,\n s3_artifact=s3_artifact,\n branch_name=branch_name,\n project_id=project_id.value_as_string,\n project_name=project_name.value_as_string,\n seed_bucket=seed_bucket,\n seed_key=seed_deploy_key,\n )\n\n def resolve_ssm_parameter(self, key: str):\n parameter_name = self.node.try_get_context(f\"drift:{key}\")\n return core.CfnDynamicReference(\n core.CfnDynamicReferenceService.SSM, parameter_name\n ).to_string()\n\n\nclass BatchPipelineStack(PipelineStack):\n \"\"\"Creates a Pipeline for batch deployment\"\"\"\n\n def __init__(\n self,\n scope: core.Construct,\n construct_id: str,\n **kwargs,\n ) -> None:\n super().__init__(scope, construct_id, True, True, False, **kwargs)\n\n\nclass DeployPipelineStack(PipelineStack):\n \"\"\"Creates a Pipelinfe for real-time deployment\"\"\"\n\n def __init__(\n self,\n scope: core.Construct,\n construct_id: str,\n **kwargs,\n ) -> None:\n super().__init__(scope, construct_id, True, False, True, **kwargs)\n","sub_path":"infra/pipeline_stack.py","file_name":"pipeline_stack.py","file_ext":"py","file_size_in_byte":12580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"373193249","text":"# coding=utf-8\n# Copyright 2014 Foursquare Labs Inc. All Rights Reserved.\n\nfrom __future__ import (\n absolute_import,\n division,\n generators,\n nested_scopes,\n print_function,\n unicode_literals,\n with_statement,\n)\n\nfrom functools import total_ordering\nimport logging\nimport re\n\n\nlogger = logging.getLogger(__name__)\n\n\n@total_ordering\nclass MavenVersion(object):\n MAVEN_VERSION_REGEX = re.compile(\n r'(?P\\d+)\\.'\n r'(?P\\d+)'\n r'(?P\\.\\d+)?'\n r'(?P-\\w+)?'\n )\n\n def __init__(self, version_str):\n self._version_str = version_str\n match = self.MAVEN_VERSION_REGEX.match(self._version_str)\n if not match:\n raise Exception('Invalid Maven version string: {}'.format(self._version_str))\n self.major = int(match.group('major'))\n self.minor = int(match.group('minor'))\n rev = match.group('rev')\n if rev:\n self.rev = int(rev[1:])\n else:\n self.rev = 0\n qualifier_or_build = match.group('qualifier_or_build')\n if qualifier_or_build:\n qualifier_or_build = qualifier_or_build[1:]\n if re.match(r'\\d+', qualifier_or_build):\n self.build = int(qualifier_or_build)\n self.qualifier = None\n else:\n self.build = None\n self.qualifier = qualifier_or_build\n else:\n self.build = 0\n self.qualifier = None\n\n def __lt__(self, rhs):\n if (self.major, self.minor, self.rev) != (rhs.major, rhs.minor, rhs.rev):\n return (self.major, self.minor, self.rev) < (rhs.major, rhs.minor, rhs.rev)\n elif self.qualifier is None and rhs.qualifier is not None:\n return True\n elif rhs.qualifier is None and self.qualifier is not None:\n return False\n elif self.qualifier is not None and rhs.qualifier is not None:\n return self.qualifier.lower() < rhs.qualifier.lower()\n else:\n return self.build < rhs.build\n\n def __eq__(self, rhs):\n self_tuple = (self.major, self.minor, self.rev, self.qualifier, self.build)\n rhs_tuple = (rhs.major, rhs.minor, rhs.rev, rhs.qualifier, rhs.build)\n return self_tuple == rhs_tuple\n\n def __repr__(self):\n return 'MavenVersion{}'.format((self.major, self.minor, self.rev, self.qualifier, self.build))\n\n def __str__(self):\n return self._version_str\n\n\nclass MavenVersionRangeRef(object):\n \"\"\"A container and parser for Maven Version Range specs.\n\n See: http://docs.oracle.com/middleware/1212/core/MAVEN/maven_version.htm#MAVEN402\n or http://docs.codehaus.org/display/MAVEN/Dependency+Mediation+and+Conflict+Resolution#DependencyMediationandConflictResolution-DependencyVersionRanges\n or any of the other variously undated or outdated or ignored documentations of this spec.\n \"\"\"\n\n # e.g. '1.0'. \"Suggested\" because someone thought it was a good idea to make '1.0' the\n # \"eh, whatever\" spec and '[1.0]' the \"no, seriously\" spec.\n SUGGESTED_VERSION_REGEX = re.compile(r'^([^\\[\\](),<>=]+)$')\n\n # e.g. '[1.0]'. See above.\n EXACT_RANGE_SPEC_REGEX = re.compile(r'^\\[{}\\]$'.format(SUGGESTED_VERSION_REGEX.pattern))\n\n # Just for code cleanliness. Matches '[foo]', '(foo)', '[foo)', '(foo]'.\n RANGE_REF_PATTERN = r'[\\[(].*?[\\]|)]'\n\n # A comma delimited list of the above pattern, with some generous whitespace guards.\n RANGES_REGEX = re.compile(\n r'^\\s*{range_ref_pattern}'\n r'(?:\\s*,{range_ref_pattern}\\s*)*$'\n .format(range_ref_pattern=RANGE_REF_PATTERN)\n )\n\n # The same as `RANGE_REF_PATTERN`, but pattern matched into component parts.\n RANGE_REF_REGEX = re.compile(\n r'(?P\\[|\\()'\n r'(?P.*?)'\n r'(?P\\]|\\))'\n )\n\n # Very liberally whitespace guarded pattern match of \"range_content\" in the above pattern.\n # Matches ',foo', 'foo,', 'foo,bar', etc.\n RANGE_CONTENT_REGEX = re.compile(\n r'^\\s*'\n r'(?P.*?)'\n r'\\s*,\\s*'\n r'(?P.*?)'\n r'\\s*$'\n )\n\n def __init__(self, ref_str):\n self._ref_str = ref_str.strip()\n self._parse_ref_to_matchers()\n\n def _parse_ref_to_matchers(self):\n # Special case: This is just a version with no extra bells and whistles.\n # Also the most common case.\n match = self.SUGGESTED_VERSION_REGEX.match(self._ref_str)\n if match:\n self._matchers = [lambda candidate: candidate == match.groups(0)]\n return\n\n # The \"exact, no really\" spec. We handle this and the loose \"this one, I guess\" spec\n # identically.\n match = self.EXACT_RANGE_SPEC_REGEX.match(self._ref_str)\n if match:\n self._matchers = [lambda candidate: candidate == match.groups(0)]\n return\n\n # A sequence of range specs\n matches = self.RANGES_REGEX.findall(self._ref_str)\n if not matches:\n raise Exception('Invalid Maven Version Range ref: {0}'.format(self._ref_str))\n\n self._matchers = []\n for matched_substr in matches:\n range_match = self.RANGE_REF_REGEX.match(matched_substr)\n begin_range_token = range_match.group('begin_range')\n end_range_token = range_match.group('end_range')\n range_content_match = self.RANGE_CONTENT_REGEX.match(range_match.group('range_content'))\n\n left_version = range_content_match.group('left_version')\n left_matcher = None\n if left_version:\n left_maven_version = MavenVersion(left_version)\n if begin_range_token == '[':\n left_matcher = lambda candidate: left_maven_version <= candidate\n else:\n left_matcher = lambda candidate: left_maven_version < candidate\n else:\n left_matcher = lambda _: True\n\n right_version = range_content_match.group('right_version')\n right_matcher = None\n if right_version:\n right_maven_version = MavenVersion(right_version)\n if begin_range_token == ']':\n right_matcher = lambda candidate: candidate <= right_maven_version\n else:\n right_matcher = lambda candidate: candidate < right_maven_version\n else:\n right_matcher = lambda _: True\n self._matchers.append(lambda mv: left_matcher(mv) and right_matcher(mv))\n\n def matches(self, maven_version):\n for matcher in self._matchers:\n if matcher(maven_version):\n return True\n return False\n","sub_path":"src/python/fsqio/pants/pom/maven_version.py","file_name":"maven_version.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"581235759","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Neural Networks\n\n# Yujue Wang wangy66@rpi.edu\n\n# In[550]:\n\n\nimport numpy as np\nimport math\nimport sys\nimport pandas as pd\nfrom random import uniform, randrange, sample\n\n\n# In[569]:\n\n\nTrain = pd.read_csv(sys.argv[1], sep=\",\", header=None)#sys.argv[1]\nTest = pd.read_csv(sys.argv[2], sep=\",\", header=None)#sys.argv[2]\nm = int(sys.argv[3]) #sys.argv[3]\nn = float(sys.argv[4]) #sys.argv[4]\nepochs = int(sys.argv[5]) #sys.argv[5], maxtier\n\n\n# Function Module\n\n# In[552]:\n\n\n# function for construct D and class array\ndef initializer(rawData):\n col = rawData.shape[1]\n row = rawData.shape[0]\n dataClass = rawData[col - 1].values.reshape(row, 1)\n D = D = {'X1':rawData[0],'X2': rawData[1],'X3': rawData[2],'X4': rawData[3], 'X5': rawData[4], 'X6': rawData[5], 'X7': rawData[6], 'X8': rawData[7], 'X9': rawData[8]}\n D = pd.DataFrame(D).values\n numClass = np.amax(dataClass) - np.amin(dataClass) + 1 # p\n return D, dataClass, numClass\n\n\n# In[553]:\n\n\n# initialize bias vectors\ndef biasVector(m, p):\n bh = []\n bo = []\n for i in range(m):\n bh.append(uniform(0.01, 1))# possible modified\n for i in range(p):\n bo.append(uniform(0.01, 1))\n return np.array(bh), np.array(bo)\n\n\n# In[554]:\n\n\n# initialize weight matrices\ndef weightMatrice(d, m, p):\n wh = np.zeros((d, m))\n wo = np.zeros((m, p))\n for i in range(d):\n for j in range(m):\n wh[i][j] = uniform(-0.01, 0.01)\n if (wh[i][j] == 0):\n j -= 1\n \n for i in range(m):\n for j in range(p):\n wo[i][j] = uniform(-0.01, 0.01)\n if (wo[i][j] == 0):\n j -= 1\n return wh, wo\n\n\n# In[555]:\n\n\n# generate true response vector\ndef generateY(dataClass, row, numClass):\n y = []\n for i in range(row):\n yi = np.zeros(numClass)\n if (dataClass[i] == 1):\n yi[0] = 1\n elif (dataClass[i] == 2):\n yi[1] = 1\n else:\n yi[2] = 1\n y.append(np.array(yi))\n return np.array(y)\n\n\n# In[556]:\n\n\ndef softmax(x):\n net_sum = 0\n for i in range(x.shape[0]):\n net_sum += np.exp(x[i]) #get denominator of softmax function\n o = np.zeros([x.shape[0],1])\n for j in range(x.shape[0]):\n o[j] = np.exp(x[j])/net_sum #neuron vec given hidden neuron vector z\n return o\n\n\n# In[557]:\n\n\n# Feed-forward phase\ndef feedForwardZ(b, w, xi):\n netk = b + w.T.dot(xi)\n return netk, np.maximum(0, netk)\n\ndef feedForwardO(b, w, zi):\n netj = b + np.dot(w.T,zi) #output layer neuron \n o = softmax(netj) #neuron vec given hidden neuron vector z\n return o\n\n\n# In[558]:\n\n\n# Backpropagation phase\n# output layer, softmax\ndef calNetGradientO(oi, yi):\n res = oi.T - yi\n return res[0]\n\n\n# In[559]:\n\n\n# hidden layer, ReLU activation\ndef calNetGradientH(wo, neto, netk):\n partials = np.zeros_like(netk)\n for i in range(netk.shape[0]):\n if netk[i]<=0:\n partials[i] = 0\n else:\n partials[i] = 1\n return np.multiply(np.dot(wo, neto),partials)[0]\n\n\n# In[560]:\n\n\n# MLP Training\ndef MLPTraining(D,dataClass, m, n, epochs, numClass):\n # implementation\n bh, bo = biasVector(m, numClass)\n wh, wo = weightMatrice(D.shape[1], m, numClass)\n row = D.shape[0]\n y = generateY(dataClass, row, numClass)\n t = 0 #iteration counter\n randomList = sample(range(row), row)\n while (t < epochs):\n for i in randomList:\n # Feed-forward phase\n netk, zi = feedForwardZ(bh, wh, D[i])\n #print(\"zi is\", zi)\n oi = feedForwardO(bo, wo, zi)\n #print(\"oi is \", oi)\n \n # Backpropagation phase\n neto = calNetGradientO(oi, y[i])\n #print(\"neto is \", neto[0])\n neth = calNetGradientH(wo, neto, netk.reshape(m, 1))\n #print(\"neth is \", neth)\n\n # Gradient descent for bias vectors\n graDescentBo = neto\n #print(graDescentBo.shape)\n #print(n)\n bo = bo - n * graDescentBo\n \n graDescentBh = neth\n #print(graDescentBh)\n #print(bh)\n bh = bh - n * graDescentBh\n \n # Gradient descent for weight matrices\n #print(neth.shape)\n graDescentWo = zi.reshape(1, len(zi)) * neto.reshape(len(neto), 1)\n wo = wo - n * graDescentWo.T\n \n graDescentWh = D[i].reshape(1, len(D[i])) * neth.reshape(len(neth), 1)\n wh = wh - n * graDescentWh.T\n \n t += 1\n\n return bh, bo, wo, wh\n\n\n# ### Train\n\n# In[561]:\n\n\nD, dataClass, numClass = initializer(Train)\n\n\n# In[562]:\n\n\nbh, bo, wo, wh = MLPTraining(D,dataClass, m, n, epochs, numClass)\n\n\n# In[563]:\n\n\nprint(\"bh is \",bh, \"\\n\")\nprint(\"bo is \",bo, \"\\n\")\nprint(\"wo is \",wo, \"\\n\")\nprint(\"wh is \",wh, \"\\n\")\n\n\n# ### Test\n\n# In[564]:\n\n\nDT, dataClassT, numClassT = initializer(Test)\n\n\n# In[565]:\n\n\npredict = []\nfor i in DT:\n netk, zi = feedForwardZ(bh, wh, i)\n oi = feedForwardO(bo, wo, zi)\n predict.append(oi)\n\n\n# In[566]:\n\n\ndef findIndex(predict):\n res = []\n for i in predict:\n target = np.amax(i)\n for j in range(len(i)):\n if (i[j] == target):\n res.append(j + 1)\n return res\n\n\n# In[567]:\n\n\npredictY = findIndex(predict)\n\n\n# In[568]:\n\n\ncnt = 0\nfor i in range(len(predictY)):\n if (predictY[i] == dataClassT[i]):\n cnt += 1\nprint(\"accuracy is \", cnt / len(predictY))\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Neural Networks/assign4.py","file_name":"assign4.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"573702865","text":"from django.core.management.base import BaseCommand, CommandError\r\nfrom quest.models import *\r\nimport os\r\nimport server.settings\r\nimport quest.google_api.api\r\nimport random\r\n\r\nclass Command(BaseCommand):\r\n help = 'Update views count for quests'\r\n\r\n SCOPE = ['https://www.googleapis.com/auth/analytics.readonly']\r\n SERVICE_ACCOUNT_EMAIL = 'service@ageless-granite-156009.iam.gserviceaccount.com'\r\n KEY_FILE_LOCATION = os.path.join(server.settings.BASE_DIR, 'quest', 'google_api', 'Analytics-691f0dc58d05.p12')\r\n\r\n def updateDB(self, results):\r\n if results and isinstance(results, dict) and 'rows' in results:\r\n rows = results['rows']\r\n Quest.objects.all().update(view_count=0)\r\n for path, v in {r[0]: {'views': r[1], 'uviews': r[2]} for r in rows}.items():\r\n url = filter(None, path.rsplit('/'))[-1]\r\n # print url, v['views'], v['uviews']\r\n try:\r\n q = Quest.objects.get(seo_url=url)\r\n q.view_count = int(v['views'])\r\n q.save()\r\n except:\r\n pass\r\n\r\n visitors, created = Settings.objects.get_or_create(name='visitors_count')\r\n min, min_created = Settings.objects.get_or_create(name='visitors_min_count')\r\n max, max_created = Settings.objects.get_or_create(name='visitors_max_count')\r\n dx, dx_created = Settings.objects.get_or_create(name='visitors_dx_count')\r\n\r\n if min_created:\r\n min.value = 7\r\n min.save()\r\n if max_created:\r\n max.value = 38\r\n max.save()\r\n if dx_created:\r\n dx.value = 3\r\n dx.save()\r\n\r\n val = 0\r\n\r\n if created:\r\n val = random.randint(int(min.value), int(max.value))\r\n else:\r\n val = random.randint(int(visitors.value) - int(dx.value), int(visitors.value) + int(dx.value))\r\n\r\n if val > int(max.value):\r\n val = max.value\r\n elif val < int(min.value):\r\n val = min.value\r\n\r\n visitors.value = val\r\n\r\n visitors.save()\r\n\r\n\r\n\r\n\r\n def add_arguments(self, parser):\r\n pass\r\n # parser.add_argument('poll_id', nargs='+', type=int)\r\n\r\n def handle(self, *args, **options):\r\n # Authenticate and construct service.\r\n service = quest.google_api.api.get_service('analytics', 'v3', Command.SCOPE,\r\n Command.KEY_FILE_LOCATION,\r\n Command.SERVICE_ACCOUNT_EMAIL)\r\n profile = quest.google_api.api.get_first_profile_id(service)\r\n results = quest.google_api.api.get_pageviews(service, profile)\r\n self.updateDB(results)","sub_path":"quest/management/commands/updateQuestViews.py","file_name":"updateQuestViews.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"323999322","text":"#! /usr/bin/python\n# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n# -*- coding: utf-8 -*-\n\nfrom gi.repository import Gtk\n\nfrom conexion import bd\n\nimport clientes\n\nimport factura\n\nimport impresion\n\nclass main:\n\n def __init__(self):\n\n#declaracion de widgets\n b = Gtk.Builder()\n b.add_from_file(\"taller.glade\")\n self.ventanaPrincipal = b.get_object(\"ventanaPrincipal\")\n self.ventanaNeocli = b.get_object(\"ventanaNeocli\")\n self.ventanaTaller = b.get_object(\"ventanaTaller\")\n self.ventanaVentas = b.get_object(\"ventanaVentas\")\n self.btnSalircli = b.get_object(\"btnSalircli\")\n self.btnNeocli = b.get_object(\"btnNeocli\")\n self.btnGrabcli = b.get_object(\"btnGrabcli\")\n self.btnSalir = b.get_object(\"btnSalir\")\n self.listCliente = b.get_object(\"listCliente\")\n self.dnicli = b.get_object(\"entDni\")\n self.apelcli = b.get_object(\"entApel\")\n self.nomcli = b.get_object(\"entNom\")\n self.dircli = b.get_object(\"entDir\")\n self.loccli = b.get_object(\"entLoc\")\n self.provcli = b.get_object(\"cbProv\")\n self.cpcli = b.get_object(\"entCp\")\n self.movcli = b.get_object(\"entMov\")\n self.telcli = b.get_object(\"entTel\")\n self.mailcli = b.get_object(\"entMail\")\n self.sicli = b.get_object(\"rtbSi\")\n self.nocli = b.get_object(\"rtbNo\")\n self.entMatrifac = b.get_object(\"entMatri\")\n self.entModelfac = b.get_object(\"entModel\")\n self.entFechafac = b.get_object(\"entFecha\")\n self.entMarcafac = b.get_object(\"entMarca\")\n self.lbldnifac = b.get_object(\"lbldnifac\")\n self.lblidfac = b.get_object(\"lblidfac\")\n self.avisodni = b.get_object(\"dlgAvis\")\n self.btnAceptar = b.get_object(\"btnAceptar\")\n self.listCliente = b.get_object(\"listCiente\")\n self.trewCliente = b.get_object(\"trwCliente\")\n self.listFactura = b.get_object(\"listFactura\")\n self.trewFactura = b.get_object(\"trewFactura\") \n self.btnBorrarcli = b.get_object(\"btnBorrarcli\")\n self.btnGrabarfac = b.get_object(\"btnGrabarfac\")\n self.btnVentas = b.get_object(\"btnVentas\")\n self.btnSalirven = b.get_object(\"btnSalirven\")\n self.lblMatriven = b.get_object(\"lblMatriven\")\n self.lblFacturav = b.get_object(\"lblFacturav\")\n self.entConce = b.get_object(\"entConce\")\n self.entPrecio = b.get_object(\"entPrecio\")\n self.trewVentas = b.get_object(\"trewVentas\")\n self.listaVentas = b.get_object(\"listVentas\")\n self.btnImprimir = b.get_object(\"btnImprimir\")\n self.menubar = b.get_object(\"menubar\")\n self.listaProv=b.get_object(\"listaProv\")\n \n self.ventanaPrincipal.maximize()\n self.ventanaNeocli.maximize()\n self.ventanaTaller.maximize()\n self.ventanaVentas.maximize()\n self.ventanaPrincipal.show()\n clientes.mostrar(self.listCliente, self.trewCliente)\n \n\n dic = {\"on_btnNeocli_clicked\": self.on_btnNeocli_clicked,\n \"on_btnSalir_clicked\": self.on_btnSalir_clicked,\n \"on_btnSalircli_clicked\": self.on_btnSalircli_clicked,\n \"on_btnGrabcli_clicked\": self.on_btnGrabcli_clicked,\n \"on_ventanaPrincipal_destroy\": self.on_ventanaPrincipal_destroy,\n \"on_ventanaNeocli_delete_event\": self.on_ventanaNeocli_delete_event,\n \"on_entDni_focus_out_event\": self.on_entDni_focus_out_event,\n \"on_btnAceptar_clicked\": self.on_btnAceptar_clicked,\n \"on_rbtNo_toggled\": self.on_rbtNo_toggled,\n \"on_btnBorrarcli_clicked\": self.on_btnBorrarcli_clicked,\n \"on_trwCliente_cursor_changed\": self.on_trwCliente_cursor_changed,\n \"on_trewFactura_cursor_changed\": self.on_trewFactura_cursor_changed,\n \"on_btnTaller_clicked\": self.on_btnTaller_clicked,\n \"on_btnSalirtaller_clicked\": self.on_btnSalirtaller_clicked,\n \"on_btnGrabarfac_clicked\": self.on_btnGrabarfac_clicked,\n \"on_ventanaTaller_destroy\": self.on_ventanaTaller_destroy,\n \"on_ventanaTaller_delete_event\": self.on_ventanaTaller_delete_event,\n \"on_btnVentas_clicked\": self.on_btnVentas_clicked,\n \"on_btnSalirven_clicked\": self.on_btnSalirven_clicked,\n \"on_ventanaVentas_destroy\": self.on_ventanaVentas_destroy,\n \"on_btnSalirven_delete_event\": self.on_btnSalirven_delete_event,\n \"on_btnGrabarven_clicked\": self.on_btnGrabarven_clicked,\n \"on_imagemenuitem5_activate\": self.on_imagemenuitem5_activate,\n \"on_btnImprimir_clicked\": self.on_btnImprimir_clicked\n }\n\n b.connect_signals(dic)\n\n#declaracion y codificacion de funciones\n def on_btnImprimir_clicked(self, widget):\n impresion.imprimir(self.dataf, self.datam, self.data)\n \n def on_imagemenuitem5_activate(self, widget):\n Gtk.main_quit()\n\n def on_btnGrabarven_clicked(self, widget, Data=None):\n self.Conce = self.entConce.get_text()\n self.Precio = self.entPrecio.get_text()\n if factura.Grabarven(self.dataf, self.Conce, self.Precio) == False:\n self.avisodni.show()\n factura.limpiarven(self.entConce, self.entPrecio)\n factura.mostrarven(self.listaVentas, self.trewVentas, self.dataf)\n \n def on_btnVentas_clicked(self, widget):\n self.lblMatriven.set_text(self.datam)\n self.lblFacturav.set_text(self.dataf)\n factura.mostrarven(self.listaVentas, self.trewVentas, self.dataf)\n self.ventanaVentas.show()\n \n def on_ventanaVentas_destroy(self, widget):\n self.ventanaVentas.hide()\n return True\n \n def on_btnTaller_clicked(self, widget):\n self.lbldnifac.set_text(self.data)\n factura.mostrar(self.listFactura, self.trewFactura, self.data)\n self.ventanaTaller.show()\n \n def on_btnSalirven_clicked(self, widget):\n self.ventanaVentas.hide()\n return True\n \n def on_btnSalirven_delete_event(self, widget):\n self.ventanaVentas.hide()\n return True \n \n def on_ventanaTaller_destroy(self, widget):\n self.ventanaTaller.hide()\n return True\n \n def on_ventanaTaller_delete_event(self, widget, Data=None):\n self.ventanaTaller.hide()\n return True\n \n def on_btnSalirtaller_clicked(self, widget):\n self.ventanaTaller.hide()\n return True\n \n def on_btnGrabarfac_clicked(self, widget):\n self.dnifac = self.data\n self.matrifac = self.entMatrifac.get_text()\n self.matrifac.upper()\n self.marcafac = self.entMarcafac.get_text()\n self.marca.capitalize()\n self.modelfac = self.entModelfac.get_text()\n self.modelfac.capitalize()\n self.fechafac = self.entFechafac.get_text()\n if factura.Grabarfac(self.dnifac, self.matrifac, self.marcafac, self.modelfac, self.fechafac) == False:\n self.aviso.show()\n factura.limpiarfac(self.lbldnifac, self.entMatrifac, self.entMarcafac, self.entModelfac, self.entFechafac, self.lblidfac)\n factura.mostrar(self.listFactura, self.trewFactura, self.data)\n \n def on_trewFactura_cursor_changed(self, widget):\n self.seleccion = self.trewFactura.get_selection()\n model, iter = self.seleccion.get_selected()\n self.dataf = model[iter][0]\n self.dataf = str(self.dataf)\n self.datam = model[iter][2]\n self.datam = str(self.datam)\n \n \n def on_trwCliente_cursor_changed(self, widget, Data=None):\n self.seleccion = self.trewCliente.get_selection()\n model, iter = self.seleccion.get_selected()\n self.data = model[iter][0]\n self.data = str(self.data)\n \n def on_btnBorrarcli_clicked(self, widget):\n clientes.Borrarcli(self.data)\n clientes.mostrar(self.listCliente, self.trewCliente) \n \n def on_btnNeocli_clicked(self, widget, data=None):\n self.llenarCombo()\n self.ventanaNeocli.show()\n self.pub = \"no\"\n\n def on_btnSalir_clicked(self, widget):\n Gtk.main_quit()\n\n def on_ventanaPrincipal_destroy(self, widget):\n Gtk.main_quit()\n\n def on_ventanaNeocli_delete_event(self, widget, data=None):\n self.ventanaNeocli.hide()\n return True\n\n def on_btnAceptar_clicked(self, widget):\n self.avisodni.hide()\n return True\n\n def on_entDni_focus_out_event(self, widget, Data=None):\n self.dni = self.dnicli.get_text()\n self.dni = self.dni.upper()\n self.dnicli.set_text(self.dni)\n if (clientes.validoDNI(self.dni) is False and self.dni != \"\"):\n self.avisodni.show()\n self.dnicli.set_text(\"\")\n\n def on_btnSalircli_clicked(self, widget, Data=None):\n self.ventanaNeocli.hide()\n return True\n\n def on_rbtNo_toggled(self, widget, Data=None):\n if widget.get_active():\n self.pub = \"no\"\n else:\n self.pub = \"si\"\n \n def llenarCombo(self):\n cursor = bd.cursor()\n rows=cursor.execute(\"SELECT provincia FROM provincias\")\n for row in rows:\n self.listaProv.append([row[0]])\n \n def on_btnGrabcli_clicked(self, widget):\n self.dni = self.dnicli.get_text()\n self.apel = self.apelcli.get_text()\n self.nom = self.nomcli.get_text()\n self.dir = self.dircli.get_text()\n self.loc = self.loccli.get_text()\n \n tree_iter = self.provcli.get_active_iter()\n if tree_iter != None:\n model = self.provcli.get_model()\n self.prov = model[tree_iter][0]\n \n self.cp = self.cpcli.get_text()\n self.mov = self.movcli.get_text()\n self.tel = self.telcli.get_text()\n self.mail = self.mailcli.get_text()\n \n if clientes.Grabarcli(self.dni, self.apel, self.nom, self.dir, self.loc, self.prov, self.cp, self.mov, self.tel, self.mail, self.pub) == False:\n self.avisodni.show()\n \n clientes.limpiarcli(self.dnicli, self.apelcli, self.nomcli, self.dircli, self.loccli, self.provcli, self.cpcli, self.movcli, self.telcli, self.mailcli)\n clientes.mostrar(self.listCliente, self.trewCliente)\n\nif __name__ == \"__main__\":\n main = main()\n Gtk.main()","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"268764275","text":"\n\n\n\n\nbtag_veto_soft_bjet = '(nBSoftJet == 0 )'\nbtag_one_soft_bjet = '(nBSoftJet == 1 )'\nbtag_one_or_more_soft_bjet = '(nBSoftJet >= 1 )'\nbtag_veto_hard_bjet = '(nBHardJet == 0 )'\nbtag_one_hard_bjet = '(nBHardJet == 1 )'\nbtag_one_or_more_hard_bjet = '(nBHardJet >= 1 )'\nbtag_veto_bjet = '((nBHardJet + nBSoftJet)== 0 )'\nbtag_one_bjet = '((nBHardJet + nBSoftJet)== 1 )'\nbtag_one_or_more_bjet = '((nBHardJet + nBSoftJet)>= 1 )'\nbtag_two_or_more_bjet = '((nBHardJet + nBSoftJet)>= 2 )'\nbtag_sr1_bjet = btag_veto_bjet\nbtag_sr2_bjet = \"( (nBSoftJet>=1) && (nBHardJet==0) )\"\nbtag_cr1_bjet = btag_veto_bjet\nbtag_cr2_bjet = \"( (nBSoftJet>=1) && (nBHardJet==0) )\"\nbtag_crtt1_bjet = \"( (nBSoftJet==0) && (nBHardJet==1) )\"\nbtag_crtt2_bjet = \"( (nBJet>=2) && (nBHardJet>=1) )\"\n\n\n\n\nsf_veto_soft_bjet = '(weightSBTag0_SF)'\nsf_one_soft_bjet = '(weightSBTag1_SF)'\nsf_one_or_more_soft_bjet = '(weightSBTag1p_SF)'\nsf_veto_hard_bjet = '(weightHBTag0_SF)'\nsf_one_hard_bjet = '(weightHBTag1_SF)'\nsf_one_or_more_hard_bjet = '(weightHBTag1p_SF)'\nsf_veto_bjet = '(weightBTag0_SF)'\nsf_one_bjet = '(weightBTag1_SF)'\nsf_one_or_more_bjet = '(weightBTag1p_SF)'\nsf_two_or_more_bjet = '(weightBTag2p_SF)'\n\nsf_sr1_bjet = sf_veto_bjet\nsf_sr2_bjet = \"(weightSBTag1p_SF * weightHBTag0_SF)\"\nsf_cr1_bjet = sf_veto_bjet\nsf_cr2_bjet = \"(weightSBTag1p_SF * weightHBTag0_SF)\" #\"( (nBSoftJet>=1) && (nBHardJet==0) )\"\nsf_crtt1_bjet = \"(weightSBTag0_SF * weightHBTag1_SF)\" #\"( (nBSoftJet==0) && (nBHardJet==1) )\"\nsf_crtt2_bjet = \"(weightBTag2p_SF * weightHBTag1p_SF)\"#\"( (nBJet>=2) && (nBHardJet>=1) )\"\n\n\n\nbtag_to_sf = {\n btag_veto_soft_bjet : sf_veto_soft_bjet , \n btag_one_soft_bjet : sf_one_soft_bjet , \n btag_one_or_more_soft_bjet : sf_one_or_more_soft_bjet , \n btag_veto_hard_bjet : sf_veto_hard_bjet , \n btag_one_hard_bjet : sf_one_hard_bjet , \n btag_one_or_more_hard_bjet : sf_one_or_more_hard_bjet , \n btag_veto_bjet : sf_veto_bjet , \n btag_one_bjet : sf_one_bjet , \n btag_one_or_more_bjet : sf_one_or_more_bjet ,\n btag_two_or_more_bjet : sf_two_or_more_bjet , \n\n btag_sr1_bjet : sf_sr1_bjet , \n btag_sr2_bjet : sf_sr2_bjet , \n btag_cr1_bjet : sf_cr1_bjet , \n btag_cr2_bjet : sf_cr2_bjet , \n btag_crtt1_bjet : sf_crtt1_bjet , \n btag_crtt2_bjet : sf_crtt2_bjet , \n }\n \nsf_to_btag = dict( (reversed(item) for item in btag_to_sf.items() ) )\n\n\n\nbtag_to_weight_vars ={\n 'nBJet' : 'weightBTag%s_SF' , \n 'nBSoftJet' : 'weightSBTag%s_SF' , \n 'nBHardJet' : 'weightHBTag%s_SF' , \n }\n\nweight_to_btag_vars = dict( (reversed(item) for item in btag_to_weight_vars.items() ) )\n\n","sub_path":"DegenerateStopAnalysis/python/tools/btag_sf_map.py","file_name":"btag_sf_map.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"471767995","text":"import re\nimport operator\n\nfile_default=\"log.txt\"\n\ntry:\n print(\"Leave empty for default\")\n file=input()\n if not file:\n raise ValueError()\nexcept ValueError:\n file=file_default\n\ndef ips(file):\n pattern=re.compile(r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\")\n ip_dict={}\n for line in open(file, \"r\"):\n ipis=pattern.findall(line)\n for ip in ipis:\n if ip not in ip_dict:\n ip_dict[ip]=1\n else:\n ip_dict[ip]+=1\n sorted_ip = sorted(ip_dict.items(), key=operator.itemgetter(1))\n print(sorted_ip[:-11:-1])\n\nips(file)","sub_path":"task2/part4.py","file_name":"part4.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"465885073","text":"import numpy\nfrom array import array\nfrom random import random\n\n'''\na = numpy.arange(12) # Build and inspect a numpy.ndarray with integers 0 to 11.\nprint(\"a:\", a)\nprint(\"type(a):\", type(a))\nprint(a.shape) # Inspect the dimensions of the array: this is a one-dimensional, 12-element array.\na.shape = 3, 4 # Change the shape of the array, adding one dimension, then inspecting the result.\nprint(\"a:\", a)\nprint(\"a[2]\", a[2]) # Get row at index 2.\nprint(\"a[2, 1]\", a[2, 1]) # Get element at index 2, 1.\nprint\nprint(\"a[:, 1]\",a[:, 1]) # Get column at index 1.\nprint()\nprint(\"a.transpose():\", a.transpose()) # Create a new array by transposing (swapping columns with rows).\n'''\n\nfloats = array('f', (random() for i in range(10**7)))\nnumpy_array = numpy.array(floats)\nnumpy.savetxt('floats-10M-lines.txt', numpy_array)\n\nfloats = numpy.loadtxt('floats-10M-lines.txt') # Load 10 million floating-point numbers from a text file.\nprint(\"floats[-3:]\", floats[-3:]) # Use sequence slicing notation to inspect the last three numbers.\n\nfloats *= .5 # Multiply every element in the floats array by .5 and inspect the last three elements again.\nprint(\"floats[-3:]\", floats[-3:])\n\nfrom time import perf_counter as pc # Import the high-resolution performance measurement timer (available since Python 3.3).\nt0 = pc(); floats /= 3; print(\"pc() - t0:\", pc() - t0) # Divide every element by 3; the elapsed time for 10 million floats is less than 40 milliseconds\n\nnumpy.save('floats-10M', floats) # Save the array in a .npy binary file.\nfloats2 = numpy.load('floats-10M.npy', 'r+') # Load the data as a memory-mapped file into another array; this allows efficient\n # processing of slices of the array even if it does not fit entirely in memory.\nfloats2 *= 6\n\nprint(\"floats2[-3:]\", floats2[-3:]) # Inspect the last three elements after multiplying every element by 6.\n","sub_path":"DataStructures/NumPyStuff.py","file_name":"NumPyStuff.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"126480649","text":"import torchvision.models as models\nimport torch.nn as nn\nimport torch\nfrom torchvision.models.inception import Inception3\n\n# class InceptionV3(nn.Module):\n# def __init__(self, num_classes, aux_logits=True, transform_input=False):\n# super(InceptionV3, self).__init__()\n# model = Inception3(num_classes=num_classes, aux_logits=aux_logits,transform_input=transform_input)\n# self.model = model\n#\n# def forward(self, x):\n# x = self.model(x)\n# return x\n\nclass InceptionV3(nn.Module):\n def __init__(self, num_classes, aux_logits=False, transform_input=False):\n super(InceptionV3, self).__init__()\n model = models.inception_v3(pretrained=True)\n num_ftrs = model.AuxLogits.fc.in_features\n model.AuxLogits.fc = nn.Linear(num_ftrs,num_classes)\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs,num_classes)\n# model = Inception3(num_classes=num_classes, aux_logits=aux_logits,transform_input=transform_input)\n self.model = model\n\n def forward(self, x):\n x = self.model(x)\n return x\n","sub_path":"Messidor/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"27780749","text":"import argparse\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport networkx as nx\nfrom numpy import *\nimport grnular.data.gen_data as gen_data\nfrom grnular.utils.metrics import report_metrics\nimport sys, copy, pickle\nfrom arboreto.algo import grnboost2\nfrom arboreto.utils import load_tf_names\nimport pandas as pd\nimport sklearn \nTRAIN=True\n\nparser = argparse.ArgumentParser(description='Classification of different cell types as well as recovering the gene regulatory network of RNA seq data: using SERGIO simulator for training')\n#****************** general parameters\nparser.add_argument('--K_train', type=int, default=5, #2, #1000,\n help='Num of training examples for a fixed D')\nparser.add_argument('--K_valid', type=int, default=5, #5, #2, #1000,\n help='Number of valid examples for a fixed D ')\nparser.add_argument('--K_test', type=int, default=100, #100, #10,\n help='Number of testing examples for a fixed D')\nparser.add_argument('--D', type=int, default=100, #1000,\n help='Number of genes ')\nparser.add_argument('--C', type=int, default=9,\n help='different cell types, target variable')\nparser.add_argument('--sparsity', type=float, default=0.3, #0.2,\n help='sparsity of erdos-renyi graph')\nparser.add_argument('--DATA_METHOD', type=str, default='sim_expt', #'syn_expt2', #'sim_expt1', \n help='expt details in draft: random/syn_same_precision, sim_expt1=DS1 and sim_expt2=Custom, sim_expt3=GRN')\nparser.add_argument('--DATA_TYPE', type=str, default='clean', #'case2', \n help='expt details in draft: sim_exp1 clean/noisy')\n\nparser.add_argument('--USE_TF_NAMES', type=str, default='no',# 'yes' \n help='use transcription factors for grnboost2: will use in general')\n\n# SERGIO simulator parameters\nparser.add_argument('--DATA_NAME', type=str, default='CUSTOM', #'DS1', \n help='expt details in draft: DS1, DS2, DS3, CUSTOM')\nparser.add_argument('--POINTS_PER_CLASS', type=int, default=2000,# NOTE: try 2000\n help='cells per class type')\nparser.add_argument('--SAMPLING_STATE', type=int, default=1, #1,\n help='num of simulations')\nparser.add_argument('--NOISE_PARAMS', type=float, default=0.1, #1,\n help='num of noise params')\nparser.add_argument('--DECAYS', type=float, default=5, #0.8,#0.8,\n help='decay params')\nparser.add_argument('--NOISE_TYPE', type=str, default='dpd', #'dpd', \n help='different noise types: \"dpd\", “sp”, “spd”')\nparser.add_argument('--SHARED_COOP_STATE', type=int, default=2, #1,\n help='shared coop state')\nparser.add_argument('--pcr_low_min', type=float, default=0.2, #1,\n help='production cell rate: low expression range')\nparser.add_argument('--pcr_low_max', type=float, default=0.5, #1,\n help='production cell rate: low expression range')\nparser.add_argument('--pcr_high_min', type=float, default=0.7, #1,\n help='production cell rate: high expression range')\nparser.add_argument('--pcr_high_max', type=float, default=1, #1,\n help='production cell rate: high expression range')\nparser.add_argument('--Kij_min', type=float, default=1, #1,\n help='Interaction strengths Kij min')\nparser.add_argument('--Kij_max', type=float, default=5, #1,\n help='Interaction strengths Kij max')\nparser.add_argument('--ratio_MR', type=float, default=0.1, #1,\n help='number of master regulators ~ ratio_MR * D')\nparser.add_argument('--connect_TF_prob', type=float, default=0.2, #1,\n help='probability of connecting master regulators')\n# SERGIO technical noise parameters\nparser.add_argument('--ADD_TECHNICAL_NOISE', type=str, default='yes',# 'no' \n help='add technical noise on the saved clean data')\nparser.add_argument('--dropout_shape', type=float, default=6.5, #1,\n help='SERGIO dropout param: shape, higher -> less dropout')\nparser.add_argument('--dropout_percentile', type=float, default=82, #1,\n help='SERGIO dropout param: percentile, lower -> less dropout')\n\nargs = parser.parse_args()\n\ndef get_args_str(dict1):\n args_str = ''\n for i, (k, v) in enumerate(dict1.items()):\n# print(k , item)\n if k in ['C', 'GLAD_LOSS', 'MODEL_SELECT', 'SUB_METHOD']:\n args_str = args_str+str(k)+str(v)+'_'\n return args_str\n\nargs_str = get_args_str(vars(args))\n\ndef get_res_filepath(name):\n FILE_NUM = str(np.random.randint(10000))\n savepath = 'simulator/BEELINE-data/my_pred_networks/'\n filepath = savepath +name+'_beeline_pred_tag'+str(FILE_NUM)+'.pickle'\n return filepath\n\n\ndef fit_grnboost2(data, PREDICT_TF=False, BEELINE=False):\n EARLY_BREAK = 9 \n print('FITTING GRNBOOST2')\n # #############################################################################\n res = []\n typeS = 'mean'\n print('Using ', typeS, ' scaling')\n for i, d in enumerate(data):\n X, y, theta_true, master_regulators = d\n Xc = normalizing_data(X, typeS)\n print('\\n grnboost2: TRAIN data batch : ', i, ' total points = ', X.shape[0])\n if args.USE_TF_NAMES=='yes' and PREDICT_TF:\n res.append(helper_grnboost2(Xc, theta_true, tf_names = master_regulators))\n\n else:\n # NOTE: breaking early as tf=None takes lot of time\n if i > EARLY_BREAK:\n print('Breaking at i = ', i, ' as tf=None case takes a lot of time')\n break\n res.append(helper_grnboost2(Xc, theta_true))\n\n\n res_mean = np.mean(np.array(res).astype(np.float64), 0)\n res_std = np.std(np.array(res).astype(np.float64), 0)\n res_mean = [\"%.3f\" %x for x in res_mean]\n res_std = [\"%.3f\" %x for x in res_std]\n res_dict = {} # name: [mean, std]\n for i, _name in enumerate(['FDR', 'TPR', 'FPR', 'SHD', 'nnz_true', 'nnz_pred', 'precision', 'recall', 'Fb', 'aupr', 'auc']): # dictionary\n res_dict[_name]= [res_mean[i], res_std[i]]#mean std\n if PREDICT_TF:\n print('\\nAvg GRNBOOST2-TF: FDR, ,TPR, ,FPR, ,SHD, ,nnz_true, ,nnz_pred, ,precision, ,recall, ,Fb, ,aupr, ,auc, ')\n else:\n print('\\nAvg GRNBOOST2: FDR, ,TPR, ,FPR, ,SHD, ,nnz_true, ,nnz_pred, ,precision, ,recall, ,Fb, ,aupr, ,auc, ')\n mean_std = [[rm, rs] for rm, rs in zip(res_mean, res_std)]\n flat_list = [item for ms in mean_std for item in ms]\n print('%s' % ', '.join(map(str, flat_list))) \n return\n\n\ndef normalizing_data(X, typeS='log'):\n if typeS == 'mean':\n #print('Centering and scaling the input data...')\n scaledX = X - X.mean(axis=0)\n scaledX = scaledX/X.std(axis=0)\n # NOTE: replacing all nan's by 0, as sometimes in dropout the complete column\n # goes to zero\n scaledX = convert_nans_to_zeros(scaledX)\n elif typeS == 'log':\n scaledX = np.log(X+1)\n else:\n print('Check the valid scaling')\n return scaledX\n\n\ndef convert_nans_to_zeros(X):\n where_are_nans = isnan(X)\n X[where_are_nans] = 0\n return X\n\n\ndef helper_grnboost2(X, theta_true, tf_names=[], BEELINE=False):#_string\n print('Running GRNBoost2 method', X.shape)\n theta_true = theta_true.real\n ex_matrix = pd.DataFrame(X)\n if args.USE_TF_NAMES == 'yes' and len(tf_names)!=0:\n tf_names = ['G'+str(n) for n in tf_names]\n else:\n tf_names = None\n \n gene_names = ['G'+str(c) for c in ex_matrix.columns]\n ex_matrix.columns = gene_names\n network = grnboost2(expression_data=ex_matrix, gene_names=gene_names, tf_names=tf_names)#, verbose=True)\n pred_edges = np.array(network[['TF', 'target', 'importance']])\n G_pred = nx.Graph()\n# G_pred.add_nodes_from(['G'+str(n) for n in range(args.D)])\n G_pred.add_nodes_from(['G'+str(n) for n in range(len(gene_names))])\n G_pred.add_weighted_edges_from(pred_edges)\n# pred_theta = nx.adj_matrix(G_pred).todense() + np.eye(args.D)\n pred_theta = nx.adj_matrix(G_pred).todense() + np.eye(len(gene_names))\n recovery_metrics = report_metrics(np.array(theta_true), np.array(pred_theta))\n print('GRNBOOST2: FDR, TPR, FPR, SHD, nnz_true, nnz_pred, precision, recall, Fb, aupr, auc')\n print('GRNBOOST2: Recovery of true theta: ', *np.around(recovery_metrics, 3))\n \n res = list(recovery_metrics)\n return res\n\n\ndef get_filepath():\n dict1 = vars(args)\n filename = ''\n abbrv_dict = {'K_train': 'KTr', 'K_valid': 'KVa', 'K_test': 'KTe', 'D': 'D', 'C':'C',\n 'sparsity': 'Sp', 'DATA_TYPE':'Dt', 'POINTS_PER_CLASS': 'ppc',\n 'SAMPLING_STATE': 'SS', 'NOISE_PARAMS': 'NP', 'DECAYS': 'De',\n 'NOISE_TYPE': 'NT', 'SHARED_COOP_STATE': 'SCS', 'pcr_low_min': 'pcrln',\n 'pcr_low_max': 'pcrlx', 'pcr_high_min': 'pcrhn', 'pcr_high_max': 'pcrhx',\n 'Kij_min': 'kmin', 'Kij_max': 'kmax', 'ratio_MR': 'rMR', \n 'connect_TF_prob': 'TFp'}\n for k in abbrv_dict.keys():\n v = dict1[k]\n filename = filename+str(abbrv_dict[k])+str(v)+'_'\n\n SAVEPATH = 'grnular/data/saved_data/'\n FILEPATH = SAVEPATH + filename + '.pickle'\n print('Filepath: ', FILEPATH)\n return FILEPATH\n\n\ndef load_saved_data():\n FILEPATH = get_filepath()\n with open(FILEPATH, 'rb') as handle:\n data = pickle.load(handle)\n return data\n\n\ndef main():\n print(args)\n print('\\nReading the input data: Single cell RNA: M(samples) x D(genes) & corresponding C(targets)')\n train_data, valid_data, test_data = load_saved_data()\n if args.ADD_TECHNICAL_NOISE == 'yes':\n print('adding technical noise')\n# train_data = gen_data.add_technical_noise(args, train_data)\n# valid_data = gen_data.add_technical_noise(args, valid_data)\n test_data = gen_data.add_technical_noise(args, test_data)\n\n # Fitting a grnboost2 \n fit_grnboost2(test_data)\n print('Using TF NAMES')\n fit_grnboost2(test_data, PREDICT_TF=True)\n print('\\nExpt Done')\n return \n\nif __name__==\"__main__\":\n main()\n","sub_path":"baselines/grnboost2/main_grnboost2.py","file_name":"main_grnboost2.py","file_ext":"py","file_size_in_byte":10195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"16909865","text":"import re\n\ndef initDictCounter():\n\tslovar = {};\n\tcount = 0;\n\tfilePath = input(\"Enter result file: \");\n\tfile = open(filePath, 'r');\n\tfor line in file:\n\t\tbytesTuple = re.findall(r'\\d+', line);\n\t\tslovar.update({(bytesTuple[0], bytesTuple[1]): bytesTuple[2]});\n\t\tcount += int(bytesTuple[2]);\n\tfile.close();\n\treturn (slovar, count);\n\ndef splitString():\n\tstrings = [];\n\tfilePath = input(\"Enter file to check: \");\n\tl = input(\"Enter l: \");\n\tfile = open(filePath, 'rb');\n\tstring = file.read();\n\tfor index in range(0,len(string)):\n\t\tstrings.append(string[index:index + int(l)]);\n\tfile.close;\n\treturn strings;\n\ndef p1(x0, table, count):\n count_x0=0;\n for i in range(0, 256):\n count_x0 += int(table[tuple([str(x0), str(i)])]);\n return count_x0 / count;\n\ndef p2(x0,x1, table, count):\n count_x0=0;\n for i in range(0, 256):\n count_x0 += int(table[tuple([str(x0),str(i)])]);\n return int(table[tuple([str(x0), str(i)])]) / count_x0;\n\ndef calculations(table, count, strings):\n\topt = [];\n\tresult = 1;\n\tfor string in strings:\n\t\tfor i in range(len(string)):\n\t\t\tif i == 0:\n\t\t\t\tresult *= p1(string[i], table, count);\n\t\t\telse:\n\t\t\t\tresult *= p2(string[i-1],strings[i], table, count);\n\t\topt.append(result);\n\t\tresult = 1;\n\treturn opt;\n\ndef main():\n\tresultHex = \"\";\n\tresultChar = \"\";\n\tslovar = initDictCounter();\n\ttable = slovar[0];\n\tcount = slovar[1];\n\tstrings = splitString();\n\topt = calculations(table, count, strings);\n\testimate = min(opt);\n\tresultBytes = strings[opt.index(estimate)];\n\tprint(\"The upper estimate: \" + str(estimate));\n\ttemp = list(resultBytes)\n\tfor i in range(len(temp)):\n\t\tif temp[i] != 0:\n\t\t\tresultChar += chr(temp[i]);\n\t\telse:\n\t\t\tresultChar += \".\";\n\tprint(\"String: \" + resultChar);\n\tprint(\"Hex: \" + resultBytes.hex());\n\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"definition_signature.py","file_name":"definition_signature.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"300886277","text":"# -*- coding: utf-8 -*-\n\nfrom django.core.exceptions import ObjectDoesNotExist\n\ndef model_to_dict(obj, exclude=[]):\n '''\n serialize model object to dict with related objects\n\n '''\n tree = {}\n for field_name in obj._meta.get_all_field_names():\n try:\n field = getattr(obj, field_name)\n except (ObjectDoesNotExist, AttributeError):\n continue\n \n if field_name in exclude:\n continue\n \n field = obj._meta.get_field_by_name(field_name)[0]\n if field.__class__.__name__ in exclude:\n continue\n \n value = getattr(obj, field_name)\n \n if value:\n tree[field_name] = value\n return tree\n\n\nfrom django.db.models.loading import get_model\n\ndef copy_model_object(obj, \\\n copy_related=True, copy_self_refs=False, \\\n copy_rels=[],\\\n exclude=None, defaults=None, values=None, \\\n parent_obj=None, parent_field=None):\n \"\"\"\n exclude:\n {\n '_':[], # self fields list\n 'somesubmodel_set':{'_': []}, # named as 'related_set'\n 'anothersubmodel_set':{'_': []}\n }\n \n values: # ovverides existing values\n as exclude\n defaults:\n as values\n \"\"\"\n # Инстанцируем новую модель\n model = get_model(obj._meta.app_label, obj._meta.module_name)\n new_model = model()\n\n # Получаем список полей, исключаем ненужные\n exclude = exclude or { '_': [] }\n\n # Автоматически исключаем поле с ключем\n pk_field = obj._meta.pk.name\n if pk_field not in exclude['_']:\n exclude['_'].append(pk_field)\n all_fields = [f.name for f in model._meta.fields]\n fields = list(set(all_fields) ^ set(exclude['_']))\n \n # Задаем аттрибуты полей и сохраняем\n defaults = defaults or { '_': {} }\n values = values or { '_': {} }\n \n # Принудительное заполнение родительского аттрибута для связанных моделей\n if parent_field and parent_obj:\n values['_'][parent_field] = parent_obj\n \n for field_name in fields:\n v = getattr(obj, field_name)\n \n if field_name in values['_']:\n v = values['_'][field_name]\n elif field_name in defaults['_'] and not v:\n v = defaults['_'][field_name]\n \n setattr(new_model, field_name, v)\n \n new_model.save()\n\n m2m_fields = model._meta.get_m2m_with_model()\n for field, fl in m2m_fields:\n field_name = field.name\n v = getattr(obj, field_name)\n v = v.all()\n if field_name in values['_']:\n v = values['_'][field_name]\n elif field_name in defaults['_'] and not v:\n v = defaults['_'][field_name]\n \n m2m_field = getattr(new_model, field_name)\n m2m_field.clear()\n m2m_field.add(*v)\n \n # Если стоит опция копирования дочерних объектов, запускаем обработку связей\n if copy_related:\n related_sets = model._meta.get_all_related_objects()\n for rel in related_sets:\n \n # пропускаем ссылки на собственную модель, если такая опция выключена в настройках\n if not copy_self_refs and rel.model==model:\n continue\n \n rel_name = rel.get_accessor_name()\n \n # если задан список связанных объектов и имя текущего сэта не находится в нём, то пропускаем\n if copy_rels and rel_name not in copy_rels:\n continue\n \n related_set = getattr(obj, rel_name)\n for related_obj in related_set.all():\n params = {\n 'obj':related_obj,\n 'copy_related':copy_related,\n 'copy_self_refs':copy_self_refs,\n 'exclude':rel_name in exclude and exclude[rel_name] or None,\n 'defaults':rel_name in defaults and defaults[rel_name] or None,\n 'values':rel_name in values and values[rel_name] or None,\n 'parent_obj':new_model,\n 'parent_field':rel.field.name\n }\n copy_model_object(**params)\n \n return new_model","sub_path":"apps/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"601125383","text":"# 先建立图,然后递归解决。注意queries中的元素不在dict中的情况,例如[x,x] 返回-1,而不是1\n\nfrom typing import List\nimport collections\nclass Solution:\n def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n def dfs(start, end, visited):\n if start == end and start in graph:\n return 1.0\n \n if start in visited:\n return -1.0\n \n visited.add(start)\n for node, val in graph[start]:\n tmp = dfs(node, end, visited)\n if tmp > 0:\n return val * tmp\n \n return -1.0\n \n graph = collections.defaultdict(list)\n for ops, val in zip(equations, values):\n op1, op2 = ops\n graph[op1].append([op2, val])\n graph[op2].append([op1, 1.0 / val])\n \n res = []\n for first, second in queries:\n res.append(dfs(first, second, set()))\n \n return res","sub_path":"leetcode/399-Evaluate-Division.py","file_name":"399-Evaluate-Division.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"528123792","text":"from django import forms\nfrom .models import curso\n\nclass tipocursoForm(forms.Form):\n at = {'class':'form-control', 'placeholder':'ingresar tipo de curso'}\n descripciontipocurso = forms.CharField(label='Descripción tipo curso', max_length=15, widget=forms.TextInput(attrs=at))\n\nclass cursoForm(forms.ModelForm):\n class Meta:\n model = curso\n fields = '__all__'\n dc = {'class':'form-control', 'placeholder':'ingresar descripción de curso'}\n tp = {'class':'form-control'}\n cr = {'class':'form-control', 'placeholder': 'ingrese número de créditos'}\n c = ciclos = (\n ('I', 'I'), ('II', 'II'), ('III', 'III'), ('IV','IV'),\n ('V', 'V'), ('VI', 'VI'), ('VII', 'VII'), ('VIII','VIII'),\n ('IX', 'IX'), ('X', 'X'), ('E', 'E'),\n )\n\n exclude = {'estadocurso'}\n\n labels = {\n 'descripcioncurso' : ('Descripcion del curso'),\n 'tipocurso' : ('Tipo curso'),\n 'creditos' : ('Créditos'),\n }\n widgets = {\n 'descripcioncurso' : forms.TextInput(attrs=dc),\n 'tipocurso' : forms.Select(attrs=tp),\n 'creditos' : forms.NumberInput(attrs=cr),\n 'ciclo' : forms.Select(attrs=tp, choices=c)\n }\n help_texts = {\n 'creditos': ('** No escribir mas de 5 créditos.
'),\n 'ciclo' : ('** Seleccione un ciclo.
'),\n }\n error_messages = {\n 'creditos': {\n 'max_length': (\"Crédito Muy largo.\"),\n },\n }\n","sub_path":"app/sistema/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"25251883","text":"desired = {\n 'deviceName': 'Android Emulator',\n 'automationName': 'appium',\n 'platformName': 'Android',\n 'platformVersion': '6.0.1',\n 'appPackage': 'com.zjhz.erpt',\n 'appActivity': '.logic.launch.LoginActivity',\n 'unid': '127.0.0.1:7555',\n 'noReset': 'True',\n 'unicodeKeyboary':'True',\n 'resetKeyboard':'True'\n}\n\n\nfind_ID = \"3\" #搜索的陌集号\n\nimport yaml\nimport log_config\n\nlogger = log_config.handle()\n\nwith open('devices_config') as f:\n data = yaml.load(f) #读取\n\n logger.info('开始查询。。。。。')\n print(data['appPackage']) #查询\n\n data['unid'] = \"46fe7912\" #修改\n logger.info('结束,,,,,,')\n applictions = {'app':'C:\\\\Users\\\\admin\\\\Documents\\\\app-release(15).apk'}\n yaml.dump(applictions) #数据转化","sub_path":"appium_project/Public/devices_config.py","file_name":"devices_config.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"324012589","text":"import cv2\nfrom os import environ\n\ndetect_face = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nrecognizer = cv2.face.EigenFaceRecognizer_create()\nrecognizer.read('classifierEigen.yml')\n# recognizer.read('classifierEigenYale.yml')\nwidth, height = 220, 220\nfont = cv2.FONT_HERSHEY_COMPLEX_SMALL\n\ncamera = cv2.VideoCapture(environ['URL']) # First webcam available\n\nwhile True:\n conect, image = camera.read()\n grey_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n detected_faces = detect_face.detectMultiScale(\n grey_image, scaleFactor=1.5, minSize=(30, 30))\n\n for (x, y, w, h) in detected_faces:\n face_image = cv2.resize(grey_image[y:y + h, x:x + w], (width, height))\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n id, reliability = recognizer.predict(face_image)\n\n name = ''\n\n if id == 1:\n name = 'Juan'\n elif id == 2:\n name = 'Tamiris'\n else:\n name = 'Desconhecido'\n\n cv2.putText(image, name, (x, y + (h + 30)), font, 2, (0, 0, 255))\n cv2.putText(image, str(reliability),\n (x, y + (h + 50)), font, 1, (0, 0, 255))\n\n cv2.imshow('Face', image)\n if cv2.waitKey(1) == ord('q'):\n break\n\ncamera.release()\ncv2.destroyAllWindows()\n","sub_path":"recognition_eigenfaces.py","file_name":"recognition_eigenfaces.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"245602514","text":"from __future__ import print_function\r\n\r\nimport sys\r\nfrom operator import add\r\nfrom pyspark import SparkContext\r\nfrom csv import reader\r\n\r\nif __name__ == \"__main__\":\r\n\tsc = SparkContext()\r\n\t\r\n\tdef toCSVLine(data):\r\n\t\treturn ','.join(str(d) for d in data)\r\n \r\n\tlines = sc.textFile(sys.argv[1], 1)\r\n\r\n\tlines = lines.mapPartitions(lambda x: reader(x))\r\n\t\r\n\tlines = lines.map(lambda x: (x[1], x[2]))\r\n\t\r\n\theader = lines.first()\r\n\t\r\n\tcounts = lines.filter(lambda x: x != header) \\\r\n\t\t\t.map(lambda x: (x,1)).reduceByKey(add).sortByKey()\r\n\t\r\n\ttotal_offenses_borough = counts.map(toCSVLine)\r\n\t\r\n\ttotal_offenses_borough.saveAsTextFile('total_offenses_borough.csv')\t","sub_path":"total_offenses_borough.py","file_name":"total_offenses_borough.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"349196818","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/4/23 19:27\n# @Author : Alan\n# @Email : xiezhengwen2013@163.com\n# @File : model2.py\n# @Software: PyCharm\n\n\nimport tensorflow as tf\nfrom model_utils import *\nfrom tensorflow.contrib.layers import l2_regularizer, xavier_initializer\nfrom tensorflow.contrib.rnn import LSTMCell, GRUCell, DropoutWrapper\n\n\nclass CAM(object):\n def __init__(self, config):\n self.ques_len = config.ques_length\n self.ans_len = config.ans_length\n self.hidden_size = config.hidden_size\n self.output_size = config.output_size\n self.pos_weight = config.pos_weight\n self.learning_rate = config.learning_rate\n self.optimizer = config.optimizer\n self.l2_lambda = config.l2_lambda\n self.clip_value = config.clip_value\n self.embeddings = config.embeddings\n self.window_sizes = config.window_sizes\n self.n_filters = config.n_filters\n self.rnn_size = config.rnn_size\n\n self._placeholder_init_pointwise()\n self.initialize_weights()\n pred = self._build(self.embeddings)\n # 损失和精确度\n self.y_hat, self.total_loss= self._add_loss_op(pred)\n # 训练节点\n self.train_op = self._add_train_op(self.total_loss)\n\n def _placeholder_init_pointwise(self):\n self._ques = tf.placeholder(tf.int32, [None, self.ques_len], name='ques_point')\n self._ans = tf.placeholder(tf.int32, [None, self.ans_len], name='ans_point')\n self._ques_mask = tf.placeholder(tf.int32, [None], 'ques_mask')\n self._ans_mask = tf.placeholder(tf.int32, [None], 'ans_mask')\n self._y = tf.placeholder(tf.int32, [None])\n self.dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\")\n self.batch_size, self.list_size = tf.shape(self._ans)[0], tf.shape(self._ans)[1]\n\n def initialize_weights(self):\n \"\"\"Global initialization of weights for the representation layer\n\n \"\"\"\n # preprocessing\n self.W_i = weight_variable('W_i', [self.hidden_size, self.hidden_size])\n self.W_l1 = weight_variable('W_l1', [5 * self.hidden_size, self.hidden_size])\n self.W_l2 = weight_variable('W_l2', [self.hidden_size, 2])\n\n self.b_i = bias_variable('b_i', [self.hidden_size])\n self.b_l1 = bias_variable('b_l1', [self.hidden_size])\n self.b_l2 = bias_variable('b_l2', [2])\n\n\n def proj_layer(self, seq, out_size, name, reuse=None):\n \"\"\"\n 投影层\n \"\"\"\n assert len(seq.get_shape()) == 3\n out1 = self.mlp(seq, out_size, 1,\n tf.nn.sigmoid, name + '_sigmoid', reuse=reuse)\n out2 = self.mlp(seq, out_size, 1,\n tf.nn.tanh, name + '_tanh', reuse=reuse)\n out = out1 * out2\n return out\n\n def biLSTMBlock(self, inputs, num_units, scope, rnn_type, dropout_keep_prob, seq_len=None, isReuse=None):\n with tf.variable_scope(scope, reuse=isReuse):\n if rnn_type == 'LSTM':\n lstmCell = LSTMCell(num_units=num_units)\n elif rnn_type == 'GRU':\n lstmCell = GRUCell(num_units=num_units)\n dropLSTMCell = lambda: DropoutWrapper(lstmCell, output_keep_prob=dropout_keep_prob)\n fwLSTMCell, bwLSTMCell = dropLSTMCell(), dropLSTMCell()\n output = tf.nn.bidirectional_dynamic_rnn(cell_fw=fwLSTMCell,\n cell_bw=bwLSTMCell,\n inputs=inputs,\n sequence_length=seq_len,\n dtype=tf.float32)\n return output\n\n def _preprocess_layer(self, question, answer, out_size):\n # 对应于原文公式(1),得到Q,A\n with tf.variable_scope('context_encoding') as scope:\n q_encode = self.proj_layer(question, out_size, 'proj_layer', reuse=None)\n a_encode = self.proj_layer(answer, out_size, 'proj_layer', reuse=True)\n return q_encode, a_encode\n\n def _preprocess_layer2(self, question, answer, out_size, reuse=None):\n # 不共享参数\n out1 = self.mlp(question, out_size, 1,\n tf.nn.relu, 'proj_layer' + 'ques_relu', reuse=reuse)\n out2 = self.mlp(answer, out_size, 1,\n tf.nn.relu, 'proj_layer' + 'ans_relu', reuse=reuse)\n return out1, out2\n\n def _preprocess_layer3(self, question, answer, out_size):\n # 共享参数\n out1 = self.mlp(question, out_size, 1,\n tf.nn.relu, 'proj_layer' + 'relu', reuse=None)\n out2 = self.mlp(answer, out_size, 1,\n tf.nn.relu, 'proj_layer' + 'relu', reuse=True)\n return out1, out2\n\n def _preprocess_layer4(self, question, answer, out_size):\n # 共享参数\n # bilstm\n rnn_outputs_left, final_state_left = self.biLSTMBlock(question, out_size, 'R', 'LSTM',\n self.dropout_keep_prob, self._ques_mask)\n rnn_outputs_right, final_state_right =self.biLSTMBlock(answer, out_size, 'R', 'LSTM',\n self.dropout_keep_prob, self._ans_mask, isReuse=True)\n rnn_q = tf.concat(rnn_outputs_left, axis=2) # (bz*ls, q_len, 2rz)\n rnn_a = tf.concat(rnn_outputs_right, axis=2) # (bz*ls, a_len, 2rz)\n print_shape('rnn_q', rnn_q)\n print_shape('rnn_a', rnn_a)\n return rnn_q, rnn_a\n\n def _attention_layer(self, question, answer, question_mask, answer_mask):\n \"\"\"\n q: [batch_size, q_length, represent_dim]\n a: [batch_size, a_length, represent_dim]\n q_mask : [bz, q_len] -> 3d\n a_mask : [bz, a_len] -> 3d\n \"\"\"\n question = tf.reshape(question, [-1, self.hidden_size])\n question = tf.nn.xw_plus_b(question, self.W_i, self.b_i)\n question = tf.reshape(question, [-1, self.ques_len, self.hidden_size])\n att_inner_product = tf.matmul(question, tf.transpose(answer, (0, 2, 1))) # [batch_size, q_length, a_length]\n question_mask = tf.expand_dims(question_mask, axis=-1)\n answer_mask = tf.expand_dims(answer_mask, axis=1)\n q_softmax = attention_softmax_3d_align(att_inner_product, question_mask, dim=1)\n # we set all items to zero that correspond to zero-padded positions of the answer\n G_zero = tf.multiply(q_softmax, answer_mask)\n output_a = tf.matmul(tf.transpose(G_zero, [0, 2, 1]), question)\n\n a_softmax = attention_softmax_3d_align(att_inner_product, answer_mask, dim=-1)\n G_zero_ = tf.multiply(a_softmax, question_mask)\n output_q = tf.matmul(G_zero_, answer) # [batch_size, q_length, 2hz]\n return output_a, output_q\n\n def _attention_layer2(self, question, answer, question_mask, answer_mask):\n \"\"\"\n q: [batch_size, q_length, represent_dim]\n a: [batch_size, a_length, represent_dim]\n q_mask : [bz, q_len] -> 3d\n a_mask : [bz, a_len] -> 3d\n \"\"\"\n question = tf.reshape(question, [-1, self.hidden_size])\n question = tf.nn.xw_plus_b(question, self.W_i, self.b_i)\n question = tf.reshape(question, [-1, self.ques_len, self.hidden_size])\n question = tf.multiply(question, tf.expand_dims(question_mask, axis=-1))\n answer = tf.multiply(answer, tf.expand_dims(answer_mask, axis=-1))\n\n matrix = tf.matmul(question, tf.transpose(answer, (0, 2, 1))) # [batch_size, q_length, a_length]\n matrix = tf.multiply(matrix, tf.expand_dims(question_mask, -1))\n matrix = tf.multiply(matrix, tf.expand_dims(answer_mask, 1))\n\n q_softmax = tf.nn.softmax(matrix, axis=1)\n a_softmax = tf.nn.softmax(matrix, axis=-1)\n q_softmax = tf.multiply(q_softmax, tf.expand_dims(question_mask, axis=-1))\n a_softmax = tf.multiply(a_softmax, tf.expand_dims(answer_mask, axis=1))\n\n a_align = tf.matmul(tf.transpose(q_softmax, [0, 2, 1]), question)\n q_align = tf.matmul(a_softmax, answer)\n return a_align, q_align\n\n def _compare_layer(self, q, q_align, a, a_align, comp_type):\n \"\"\"\n a: [batch_size, a_length, 2hz]\n a_att: [batch_size, a_length, 2hz]\n fuse_A: [batch_size, a_length, 2hz]\n fuse_Q: [batch_size, q_length, 2hz]\n \"\"\"\n size = q.get_shape()[-1]\n if comp_type == 'Gate_fuse':\n fuse_a = tf.concat([a, a_align, a * a_align, a - a_align], axis=2)\n fuse_q = tf.concat([q, q_align, q * q_align, q - q_align], axis=2)\n fuse_a_sigmoid = self.mlp(fuse_a, size, 1, tf.nn.sigmoid, 'fuse_a_sigmoid',\n use_dropout=False, bias=True)\n fuse_q_sigmoid = self.mlp(fuse_q, size, 1, tf.nn.sigmoid, 'fuse_q_sigmoid',\n use_dropout=False, bias=True)\n fuse_a_tanh = self.mlp(fuse_a, size, 1, tf.nn.tanh, 'fuse_a_tanh',\n use_dropout=False, bias=True)\n fuse_q_tanh = self.mlp(fuse_q, size, 1, tf.nn.tanh, 'fuse_q_tanh',\n use_dropout=False, bias=True)\n fuse_A = fuse_a_sigmoid * fuse_a_tanh + a - fuse_a_sigmoid*a\n fuse_Q = fuse_q_sigmoid * fuse_q_tanh + q - fuse_q_sigmoid*q\n elif comp_type == 'simple_fuse':\n fuse_A = tf.concat([a, a_align, a * a_align, a - a_align], axis=2)\n fuse_Q = tf.concat([q, q_align, q * q_align, q - q_align], axis=2)\n elif comp_type == 'mul':\n fuse_A = a * a_align\n fuse_Q = q * q_align\n else:\n raise ValueError('{} method is not implemented!'.format(comp_type))\n return fuse_A, fuse_Q\n\n def _cnn_layer(self, input, mask, name, isreuse=False, dim = -1):\n \"\"\"\n :param\n :return:\n \"\"\"\n # tf.layers.Conv1D(inputs, filters, kernel_size, strides=1)\n # self.n_filters一般指词嵌入的维度\n with tf.variable_scope(name, reuse=isreuse) as scope1:\n all = []\n for i in range(len(self.window_sizes)):\n cnn_out = tf.layers.conv1d(input, self.n_filters, self.window_sizes[i], padding='same',\n activation=tf.nn.relu, name='q_conv_' + str(i))\n all.append(cnn_out)\n cnn_outs = tf.concat(all, axis=-1)\n cnn_outs_padded = tf.multiply(cnn_outs, tf.expand_dims(mask, axis=dim))\n R_max = maxpool(cnn_outs_padded)\n R_men = meanpool(cnn_outs_padded)\n print_shape('R', R_max)\n return R_max, R_men\n\n def _build(self, embeddings, encoder_type = 'sigmoid'):\n self.Embedding = tf.Variable(tf.to_float(embeddings), trainable=False, name='Embedding')\n self.q_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ques),keep_prob=self.dropout_keep_prob)\n self.a_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ans),keep_prob=self.dropout_keep_prob)\n q_mask = tf.sequence_mask(self._ques_mask, self.ques_len, dtype=tf.float32)\n a_mask = tf.sequence_mask(self._ans_mask, self.ans_len, dtype=tf.float32)\n\n # ~~ Preprocessing\n if encoder_type == 'lstm':\n question, answer = self._preprocess_layer4(self.q_embed, self.a_embed, self.rnn_size) # (bz, len, 2hz)\n elif encoder_type == 'relu_share':\n question, answer = self._preprocess_layer3(self.q_embed, self.a_embed, self.rnn_size) # (bz, len, hz)\n elif encoder_type == 'relu_noshare':\n question, answer = self._preprocess_layer2(self.q_embed, self.a_embed, self.rnn_size) # (bz, len, hz)\n elif encoder_type == 'sigmoid':\n question, answer = self._preprocess_layer(self.q_embed, self.a_embed, self.hidden_size) # (bz, len, hz)\n\n # ~~ Attention and Comparison\n algin_a, algin_q = self._attention_layer2(question, answer, q_mask, a_mask)\n fuse_A, fuse_Q = self._compare_layer(question, algin_q, answer, algin_a, comp_type='mul')\n\n # ~~ Aggregation\n # self, input, mask, name, isreuse=False, dim = 1\n R_max_a, R_men_a = self._cnn_layer(fuse_A, a_mask, 'cnn_answer')\n # R_max_q, R_men_q = self._cnn_layer(fuse_Q, q_mask, 'cnn_question')\n\n # ~~ Predict\n # M = tf.concat([R_men_q, R_max_q, R_men_a, R_max_a], axis=-1) # (?, 1, 16*2hz)\n M = R_max_a\n\n fc1 = tf.nn.xw_plus_b(M, self.W_l1, self.b_l1)\n fc1 = tf.nn.tanh(fc1)\n fc2 = tf.nn.xw_plus_b(fc1, self.W_l2, self.b_l2)\n predict = fc2\n print_shape('predict:', predict)\n return predict\n\n def _add_loss_op(self, pred, l2_lambda=0.00001):\n \"\"\"\n 损失节点\n \"\"\"\n y_hat = tf.nn.softmax(pred, dim=-1)\n loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(self._y, pred))\n tf.add_to_collection('total_loss', loss)\n total_loss = tf.add_n(tf.get_collection('total_loss'))\n reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n l2_loss = sum(reg_losses) * l2_lambda\n pointwise_loss = total_loss + l2_loss\n tf.summary.scalar('pointwise_loss', pointwise_loss)\n self.summary_op = tf.summary.merge_all()\n return y_hat, pointwise_loss\n\n def _add_train_op(self, loss):\n \"\"\"\n 训练节点\n \"\"\"\n with tf.name_scope('train_op'):\n # 记录训练步骤\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n opt = tf.train.AdamOptimizer(self.learning_rate)\n # train_op = opt.minimize(loss, self.global_step)\n gradients, v = zip(*opt.compute_gradients(loss))\n clip_gradients = gradients\n if self.clip_value is not None:\n clip_gradients, _ = tf.clip_by_global_norm(gradients, self.clip_value)\n train_op = opt.apply_gradients(zip(clip_gradients, v), global_step=self.global_step)\n return train_op\n\n def mlp(self, bottom, size, layer_num, activation, name, use_dropout=True, reuse=None, bias = True):\n \"\"\"\n bottom: 上层输入\n size: 神经元大小\n layer_num: 神经网络层数\n name: mlp的名称\n reuse: 是否复用层\n initializer: w和b的初始化均采用xavier_initializer()\n \"\"\"\n\n now = bottom\n if use_dropout:\n now = tf.nn.dropout(now, keep_prob=self.dropout_keep_prob)\n for i in range(layer_num):\n now = tf.layers.dense(now, size,\n activation=activation,\n name=name + '_{}'.format(i),\n reuse=reuse,\n use_bias=bias,\n kernel_initializer=xavier_initializer(),\n bias_initializer=xavier_initializer()\n )\n return now","sub_path":"Demo2/model2.py","file_name":"model2.py","file_ext":"py","file_size_in_byte":15058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"61336303","text":"from sklearn.metrics import mean_squared_error\r\nfrom sklearn.pipeline import make_pipeline\r\nimport plotly.express as px\r\nimport plotly.graph_objects as go\r\nfrom plotly.offline import iplot,plot\r\nimport dataSource as ds\r\nimport dataVisualization as dv\r\nfrom copy import deepcopy\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\n\r\n# =============================================================================\r\n# Class Traitement\r\n# =============================================================================\r\n\r\nclass Traitement:\r\n \"\"\"\r\n La classe Traitement permet de construire les données X,y \r\n d'apprentissage et de test.\r\n \"\"\"\r\n \r\n def __init__(self, df, l_attrs_x, labels, freq_train=1000, freq_test=400, preprocessor=None):\r\n #DataFrame\r\n self.df = df\r\n #features\r\n self.l_attrs = l_attrs_x\r\n #targets\r\n self.labels = labels\r\n #preprocessor\r\n self.preprocessor = preprocessor\r\n #fréquences\r\n self.freq_train = freq_train\r\n self.freq_test = freq_test\r\n\r\n #Données d'apprentissage/test pour chaque modèle\r\n self.l_Xtrain = []\r\n self.l_Ytrain = []\r\n self.l_Xtest = []\r\n self.l_Ytest = []\r\n \r\n \r\n # Fonction de construction des données de train/test\r\n def set_data_train_test(self, train_size=0.8):\r\n X_train, X_test, y_train, y_test = ds.create_data_xy(self.df, train_size, self.freq_train, self.freq_test)\r\n \r\n #On vide les anciennes données s'il y en a\r\n self.l_Xtrain = []\r\n self.l_Ytrain = []\r\n self.l_Xtest = []\r\n self.l_Ytest = []\r\n \r\n for attrs in self.l_attrs:\r\n self.l_Xtrain.append(X_train[attrs])\r\n self.l_Xtest.append(X_test[attrs])\r\n self.l_Ytrain.append(y_train[self.labels])\r\n self.l_Ytest.append(y_test[self.labels])\r\n\r\n\r\n# =============================================================================\r\n# Class Evaluation \r\n# =============================================================================\r\n\r\nclass Evaluation :\r\n \"\"\"\r\n La classe Evaluation permet d'entrainer des modèles à partir de la\r\n classe Traitement et d'en afficher des résultats.\r\n \"\"\"\r\n \r\n def __init__(self, models, traitement):\r\n self.models = models\r\n self.traitement = traitement\r\n self.preprocessor = self.traitement.preprocessor\r\n\r\n self.l_Xtrain = self.traitement.l_Xtrain\r\n self.l_Ytrain = self.traitement.l_Ytrain\r\n self.l_Xtest = self.traitement.l_Xtest\r\n self.l_Ytest = self.traitement.l_Ytest\r\n self.labels = self.traitement.labels\r\n \r\n #Ajout du preprocessor à la pipeline s'il y en a un\r\n if self.preprocessor is not None :\r\n self.models_pip = [make_pipeline(self.preprocessor[i], self.models[i]) for i in range(len(self.models))]\r\n else: \r\n self.models_pip = self.models\r\n \r\n # for mi in range(len(models)):\r\n # if type(models[mi]).__name__ == 'model_physique1':\r\n # temp = self.l_Xtest[mi].copy()\r\n # temp['index'] = temp.index\r\n # f = temp.groupby(['Trip']).nth(1).reset_index()['index'].values\r\n # self.l_Ytest[mi].drop(f, inplace=True)\r\n \r\n \r\n \r\n def fit(self):\r\n \"\"\"\r\n Fonction qui entraine tous nos modèles.\r\n \"\"\"\r\n for i in range(len(self.models)):\r\n self.models_pip[i].fit(self.l_Xtrain[i], self.l_Ytrain[i])\r\n \r\n def score(self):\r\n \"\"\"\r\n Fonction retournant une liste de scores sur les données de test\r\n pour chaque modèle.\r\n \"\"\"\r\n return [self.models_pip[i].score(self.l_Xtest[i], self.l_Ytest[i]) for i in range(len(self.models))]\r\n \r\n def predict(self, X):\r\n \"\"\"\r\n Fonction retournant une liste de prédiction sur X pour chaque\r\n modèle.\r\n \"\"\"\r\n return [self.models_pip[i].predict(X[i]) for i in range(len(self.models))]\r\n \r\n def getCoef(self):\r\n \"\"\"\r\n Fonction retournant les paramètres appris pour chaque modèle.\r\n \"\"\"\r\n return [self.models[i].coef_ for i in range(len(self.models))]\r\n \r\n def calculMse(self):\r\n ypred = self.predict(self.l_Xtest)\r\n return [mean_squared_error(self.l_Ytest[i],ypred[i]) for i in range(len(self.models))]\r\n \r\n \r\n # ------------------------- Fonctions d'affichage -------------------------\r\n \r\n def afficher_score(self):\r\n \"\"\"\r\n Fonction affichant les scores pour chaque modèle.\r\n \"\"\"\r\n scores = self.score()\r\n for i in range(len(self.models)):\r\n print(f\"Score obtenu pour le modèle {type(self.models[i]).__name__ : <10} : {scores[i]}\")\r\n \r\n def afficher_coef(self):\r\n \"\"\"\r\n Fonction affichant les coefficients pour chaque modèle.\r\n \"\"\"\r\n coefs = self.getCoef()\r\n for i in range(len(self.models)):\r\n print(f\"Coefficients obtenu pour le modèle {i : <10} : {coefs[i]}\")\r\n \r\n def afficher_mse(self):\r\n ypred = self.predict(self.l_Xtest)\r\n print(\"MSE sur les données de test:\\n\")\r\n for i in range(len(self.models)):\r\n print(f\"MSE obtenue pour {type(self.models[i]).__name__ : <10} : {mean_squared_error(self.l_Ytest[i],ypred[i])}\")\r\n #print(f\"MSE obtenue pour {type(self.models[i]).__name__ : <10} : {np.mean((self.l_Ytest[i]-ypred[i])**2)}\")\r\n \r\n def afficher_resultats(self):\r\n \"\"\"\r\n Fonction appelant les autres fonctions d'affichage.\r\n \"\"\"\r\n #self.afficher_score()\r\n print()\r\n self.afficher_mse()\r\n print()\r\n #self.afficher_coef()\r\n \r\n #def afficher_pred(self):\r\n \r\n \r\n # ----------------------------- Fonctions MSE -----------------------------\r\n \r\n def tabMSEFreq(self, liste_freq, freq_train,train_size=0.8):\r\n tab_mse = []\r\n models = [deepcopy(m) for m in self.models]\r\n \r\n for freq in liste_freq:\r\n traitement = Traitement(self.traitement.df, self.traitement.l_attrs, self.traitement.labels,\r\n freq_train, freq, self.traitement.preprocessor)\r\n traitement.set_data_train_test(train_size)\r\n \r\n evaluateur = Evaluation(models,traitement)\r\n evaluateur.fit()\r\n \r\n tab_mse.append(evaluateur.calculMse())\r\n \r\n tab_mse = np.array(tab_mse)\r\n \"\"\" \r\n #Affichage MSE pour le premier modèle\r\n plt.figure(figsize=(15,5))\r\n plt.title(\"Erreur MSE en fonction de la fréquence\")\r\n plt.plot(liste_freq, tab_mse[:,0], label=type(models[0]).__name__)\r\n plt.xlabel(\"Temps entre deux points\")\r\n plt.ylabel(\"MSE\")\r\n plt.legend()\r\n plt.show()\r\n \"\"\"\r\n #Affichage des erreurs MSE des modèles en fonction de la fréquence \r\n\r\n \r\n for i in range(len(models)):\r\n plt.figure(figsize=(15,5))\r\n plt.plot(tab_mse[:,i], label=type(models[i]).__name__)\r\n\r\n plt.xticks(np.arange(len(liste_freq)), np.array(liste_freq))\r\n plt.xlabel(\"Fréquences\")\r\n plt.xlabel(\"Temps entre deux points\")\r\n plt.ylabel(\"MSE\")\r\n plt.legend()\r\n plt.show()\r\n\r\n plt.figure(figsize=(10,5))\r\n for i in range(len(models)):\r\n plt.plot(tab_mse[:,i], label=type(models[i]).__name__)\r\n plt.xticks(np.arange(len(liste_freq)), np.array(liste_freq))\r\n plt.xlabel(\"Fréquences\")\r\n plt.xlabel(\"Temps entre deux points\")\r\n plt.ylabel(\"MSE\")\r\n plt.legend()\r\n plt.show()\r\n #Tableau des erreurs MSE en DataFrame\r\n columns = [type(m).__name__ for m in models]\r\n errMSE = pd.DataFrame(tab_mse, columns=columns, index=liste_freq)\r\n \r\n return errMSE\r\n \r\n \r\n def matMSECase(self, freq_train, freq_test, lat_min, long_min, e_x, e_y, min_datapts=20, train_size=0.8, n_interval=10): \r\n #Copie des modèles\r\n models = [deepcopy(m) for m in self.models]\r\n # liste matrices erreurs des cases\r\n l_mat_err= [np.zeros((n_interval, n_interval)) for i in range(len(models))]\r\n \r\n df = self.traitement.df\r\n \r\n #Opérations pour stocker les MSE par effectif et par case\r\n eff = np.unique(df[\"Effectif_case\"])\r\n ind_eff = {eff[i]:i for i in range(len(eff))}\r\n \r\n vit = np.unique(df[\"Vitesse_moy_case\"])\r\n ind_vit = {vit[i]:i for i in range(len(vit))}\r\n \r\n var = np.unique(df[\"Vitesse_var_case\"])\r\n ind_var = {var[i]:i for i in range(len(var))}\r\n \r\n l_mse_eff = [np.zeros(len(eff)) for _ in range(len(models))]\r\n l_mse_vit = [np.zeros(len(vit)) for _ in range(len(models))]\r\n l_mse_var = [np.zeros(len(var)) for _ in range(len(models))]\r\n \r\n eff_count = [np.zeros(len(eff)) for _ in range(len(models))]\r\n vit_count = [np.zeros(len(vit)) for _ in range(len(models))]\r\n var_count = [np.zeros(len(var)) for _ in range(len(models))]\r\n \r\n # parcours de toutes les cases\r\n for i in range(n_interval):\r\n for j in range(n_interval):\r\n # récupération des données de la case\r\n case_df=ds.trouve_data_case(df, (i, j), lat_min, long_min, e_x, e_y)\r\n\r\n #On prend les Trips qui ont au moins $min_datapoints$ points\r\n #c'est pas au moins 2 points car tu splits en train et en test, ca aura moins d'un point \r\n ctrips, ccounts = np.unique(case_df[\"Trip\"], return_counts=True)\r\n ctrips = ctrips[ccounts>min_datapts]\r\n case_df = case_df[case_df['Trip'].isin(ctrips)]\r\n\r\n #Cases qui ont au moins 2 trips\r\n if len(pd.unique(case_df[\"Trip\"])) > 1 :\r\n traitement = Traitement(case_df, self.traitement.l_attrs, self.traitement.labels, \r\n freq_train, freq_test, self.traitement.preprocessor)\r\n traitement.set_data_train_test(train_size)\r\n \r\n l_ypred = self.predict(traitement.l_Xtest)\r\n \r\n for mi in range(len(models)):\r\n\r\n mse_ij = mean_squared_error(traitement.l_Ytest[mi],l_ypred[mi])\r\n l_mat_err[mi][n_interval-1-i, j] = mse_ij\r\n \r\n ei = ind_eff[pd.unique(df['Effectif_case'].loc[case_df.index])[0]]\r\n vi = ind_vit[pd.unique(df['Vitesse_moy_case'].loc[case_df.index])[0]]\r\n vi2 = ind_var[pd.unique(df['Vitesse_var_case'].loc[case_df.index])[0]]\r\n l_mse_eff[mi][ei] += mse_ij\r\n l_mse_vit[mi][vi] += mse_ij\r\n l_mse_var[mi][vi2] += mse_ij\r\n eff_count[mi][ei] += 1\r\n vit_count[mi][vi] += 1\r\n var_count[mi][vi2] += 1\r\n \r\n \r\n for mi in range(len(models)):\r\n tmp = np.where(eff_count[mi] != 0)[0]\r\n l_mse_eff[mi][tmp] /= eff_count[mi][tmp]\r\n tmp = np.where(vit_count[mi] != 0)[0]\r\n l_mse_vit[mi][tmp] /= vit_count[mi][tmp]\r\n tmp = np.where(var_count[mi] != 0)[0]\r\n l_mse_var[mi][tmp] /= var_count[mi][tmp]\r\n \r\n fig, ax = plt.subplots(2,2, figsize=(15,13)) \r\n for m in range(len(l_mat_err)):\r\n # fig, ax = plt.subplots(3,2, figsize=(13,13))\r\n #fig.suptitle(f'{type(models[m]).__name__}', fontsize=16)\r\n \r\n ax[m//2][m%2].set_title(f\"Erreur MSE par case : {type(models[m]).__name__}\")\r\n # sns.heatmap(l_mat_err[m], linewidths=.5,annot=True, cmap=\"YlGnBu\", yticklabels=np.arange(n_interval-1, -1, -1), ax=ax[0][0])\r\n sns.heatmap(l_mat_err[m], linewidths=.5,annot=True, cmap=\"YlGnBu\", yticklabels=np.arange(n_interval-1, -1, -1), ax=ax[m//2][m%2])\r\n # ax[0][1].set_title(\"Histogramme des valeurs MSE\")\r\n # val = l_mat_err[m].ravel()[l_mat_err[m].ravel() != 0]\r\n # sns.histplot(val, ax=ax[0][1])\r\n \r\n # ax[1][0].set_title(\"Histplot MSE moy par effectif\")\r\n # h1 = sns.histplot(x=ind_eff.keys(), y=l_mse_eff[m], ax=ax[1][0], cmap=\"RdPu\", cbar=True)\r\n # h1.set(xlabel='Effectif', ylabel='MSE')\r\n \r\n # ax[1][1].set_title(\"Histplot MSE moy par vitesse moy\")\r\n # h2 = sns.histplot(x=ind_vit.keys(), y=l_mse_vit[m], ax=ax[1][1], cmap=\"YlOrRd\", cbar=True)\r\n # h2.set(xlabel='Vitesse_moy', ylabel='MSE')\r\n \r\n # ax[2][0].set_title(\"Histplot MSE moy par variance vitesse\")\r\n # h3 = sns.histplot(x=ind_var.keys(), y=l_mse_var[m], ax=ax[2][0], cmap=\"YlOrRd\", cbar=True)\r\n # h3.set(xlabel='Variance_vit', ylabel='MSE')\r\n \r\n # fig.delaxes(ax[2][1])\r\n \r\n plt.show()\r\n \r\n def scatterPred(self, begin_point, end_point):\r\n models = [deepcopy(m) for m in self.models]\r\n \r\n txt = [f\"Point n°{t}\" for t in range(end_point-begin_point)]\r\n trace_0 = go.Scatter(x=self.l_Xtest[0]['Latitude'].iloc[begin_point:end_point], y=self.l_Xtest[0]['Longitude'].iloc[begin_point:end_point], mode=\"lines\",name=\"Xtest\", text=txt)\r\n trace_1 = go.Scatter(x=self.l_Ytest[0].iloc[begin_point:end_point,0], y=self.l_Ytest[0].iloc[begin_point:end_point,1], mode=\"lines+markers\", name=\"Target\", text=txt)\r\n data = [trace_0,trace_1]\r\n l_mse = []\r\n \r\n for mi in range(len(models)):\r\n ypred = models[mi].predict(self.l_Xtest[mi])[begin_point:end_point]\r\n y = self.l_Ytest[mi].iloc[begin_point:end_point].to_numpy()\r\n # mse = (ypred-y)**2\r\n mse = [mean_squared_error(y[i], ypred[i]) for i in range(len(y))]\r\n #txt = [f\"Point n°{i}
MSE_Lat = {mse[i,0]}
MSE_Long = {mse[i,1]}\" for i in range(len(mse))]\r\n txt = [f\"Point n°{i}
MSE = {mse[i]}\" for i in range(len(mse))]\r\n data.append(go.Scatter(x=ypred[:,0], y=ypred[:,1], mode=\"lines+markers\", name=type(models[mi]).__name__, text=txt))\r\n l_mse.append(np.sum(mse))\r\n \r\n layout = go.Layout(\r\n title='Targets et Predictions',\r\n xaxis = dict(\r\n title='Latitude',\r\n ticklen = 5,\r\n showgrid = True,\r\n zeroline = False\r\n ),\r\n yaxis = dict(\r\n title='Logitude',\r\n ticklen=5,\r\n showgrid=True,\r\n zeroline=False,\r\n )\r\n )\r\n\r\n fig = go.Figure(data=data, layout=layout)\r\n iplot(fig, filename=\"ScatterPred\")\r\n \r\n return l_mse","sub_path":"Eval.py","file_name":"Eval.py","file_ext":"py","file_size_in_byte":15241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"540045336","text":"import asyncio\nimport time\n\nnow = lambda : time.time()\n\nasync def do_some_work(x):\n print('Waiting: ', x)\n return \"Done\"\n\n\n\nstart = now()\n\nloop = asyncio.get_event_loop()\ntasks = [loop.create_task(do_some_work(i)) for i in range(4)]\nloop.run_until_complete(asyncio.wait(tasks))\n\n\n\n\nprint('TIME: ', now() - start)\n","sub_path":"groutine/async_ascompleted_2.py","file_name":"async_ascompleted_2.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"39897701","text":"# x만큼 간격이 있는 n개의 숫자\n# 문제 설명\n# 함수 solution은 정수 x와 자연수 n을 입력 받아, x부터 시작해 x씩 증가하는 숫자를 n개 지니는 리스트를 리턴해야 합니다. 다음 제한 조건을 보고, 조건을 만족하는 함수, solution을 완성해주세요.\n\n# 제한 조건\n# x는 -10000000 이상, 10000000 이하인 정수입니다.\n# n은 1000 이하인 자연수입니다.\n\n\ndef solution(x, n):\n answer = []\n if x == 0 and n == 0:\n return answer\n elif x == 0 and n != 0:\n answer = [0 for i in range(n)]\n elif x > 0:\n for i in range(x, (x*n)+1, x):\n answer.append(i)\n else:\n for i in range(x, (x*n)-1, x):\n answer.append(i)\n return answer\n","sub_path":"programmers/step_by_X_num.py","file_name":"step_by_X_num.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"378426744","text":"from . import views\nfrom django.urls import path\n\n\napp_name = 'superheroes'\nurlpatterns = [\n path('', views.index, name='index'),\n path('/', views.detail, name='detail'),\n path('new/', views.create, name='create_new_superhero')\n]\n","sub_path":"superhero_database/superheroes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"281919891","text":"##########################################################\n## configuration for XZZ2l2nu \n##########################################################\n\nimport CMGTools.XZZ2l2nu.fwlite.Config as cfg\nfrom CMGTools.XZZ2l2nu.fwlite.Config import printComps\nfrom CMGTools.XZZ2l2nu.RootTools import *\nfrom PhysicsTools.HeppyCore.framework.heppy_loop import getHeppyOption\n\n\n#Load all common analyzers\nfrom CMGTools.XZZ2l2nu.analyzers.coreXZZ_cff import *\n\n#-------- SAMPLES AND TRIGGERS -----------\nfrom CMGTools.XZZ2l2nu.samples.loadSamples80x import *\nselectedComponents = mcSamples+dataSamples\n\ntriggerFlagsAna.triggerBits ={\n \"ISOMU\":triggers_1mu_iso,\n \"MU\":triggers_1mu_noniso,\n \"MUv2\":triggers_1mu_noniso_v2,\n \"MU50\":triggers_1mu_noniso_M50,\n \"ISOELE\":triggers_1e,\n \"ELE\":triggers_1e_noniso,\n \"ELEv2\":triggers_1e_noniso_v2,\n \"ELE115\":triggers_1e_noniso_E115,\n \"MUMU\": triggers_mumu,\n \"MUMUNOISO\":triggers_mumu_noniso,\n \"ELEL\": triggers_ee,\n \"HT800\":triggers_HT800,\n \"HT900\":triggers_HT900,\n \"JJ\":triggers_dijet_fat,\n \"MET90\":triggers_met90_mht90+triggers_metNoMu90_mhtNoMu90,\n \"MET120\":triggers_metNoMu120_mhtNoMu120,\n \"PHOTONHZZ\": triggers_photon_unbias,\n \"ALLPHOTON\": triggers_all_photons\n}\n\n#-------- Analyzer\nfrom CMGTools.XZZ2l2nu.analyzers.treeXZZ_cff import *\n\nmultiStateAna.processTypes = ['PhotonJets']\nmultiStateAna.selectPhotonJets = (lambda x: x.leg1.pt()>20.0 and x.leg2.pt()>-0.0)\nvvSkimmer.required = ['PhotonJets']\n\nvvTreeProducer.globalVariables = [\n NTupleVariable(\"nVert\", lambda ev: len(ev.goodVertices), int, help=\"Number of good vertices\"), \n NTupleVariable(\"nVertAll\", lambda ev: len(ev.vertices), int, help=\"Number of good vertices\"), \n NTupleVariable(\"rho\", lambda ev: ev.rho , float),\n ]\nvvTreeProducer.globalObjects = { }\n\nvvTreeProducer.collections = {\n\t \"jets\" : NTupleCollection(\"jet\",JetType,100, help=\"all jets in miniaod\"),\n \"selectedPhotons\" : NTupleCollection(\"photon\",photonType,100, help=\"selected photons in miniaod\"),\n \"PhotonJets\" : NTupleCollection(\"gjet\",PhotonJetType ,100, help=\"photon and MET\"),\n }\n\n\n\n\n#-------- SEQUENCE\ncoreSequence = [\n skimAnalyzer,\n genAna,\n jsonAna,\n triggerAna,\n pileUpAna,\n vertexAna,\n #lepAna,\n photonAna, \n jetAna,\n metAna,\n multiStateAna,\n eventFlagsAna,\n triggerFlagsAna,\n]\n \n#sequence = cfg.Sequence(coreSequence)\nsequence = cfg.Sequence(coreSequence+[vvSkimmer,vvTreeProducer])\n#sequence = cfg.Sequence(coreSequence+[vvSkimmer,multtrg,vvTreeProducer])\n#sequence = cfg.Sequence(coreSequence+[vvSkimmer,fullTreeProducer])\n \n\n#-------- HOW TO RUN\ntest = 1\nif test==1:\n # test a single component, using a single thread.\n #selectedComponents = dataSamples\n #selectedComponents = mcSamples\n #selectedComponents = SinglePhoton\n #selectedComponents = [SinglePhoton_Run2016D_PromptReco_v2]\n #selectedComponents = [GJet_Pt_20toInf_DoubleEMEnriched]\n #selectedComponents = [GJet_Pt_20to40_DoubleEMEnriched, GJet_Pt_40toInf_DoubleEMEnriched]\n selectedComponents = [GJet_Pt_20toInf_DoubleEMEnriched, GJet_Pt_20to40_DoubleEMEnriched, GJet_Pt_40toInf_DoubleEMEnriched]\n #selectedComponents = [SingleMuon_Run2015D_Promptv4,SingleElectron_Run2015D_Promptv4]\n #selectedComponents = [SingleMuon_Run2015C_25ns_16Dec]\n #selectedComponents = [SingleMuon_Run2016B_PromptReco_v2] \n #selectedComponents = SingleMuon+SingleElectron\n #selectedComponents = [SingleMuon_Run2016B_PromptReco_v2,SingleElectron_Run2016B_PromptReco_v2] \n #selectedComponents = [SingleMuon_Run2016D_PromptReco_v2,SingleElectron_Run2016D_PromptReco_v2] \n #selectedComponents = [MuonEG_Run2015D_16Dec] #MuEG\n #selectedComponents = [RSGravToZZToZZinv_narrow_800]\n #selectedComponents = [DYJetsToLL_M50]\n #selectedComponents = [DYJetsToLL_M50_MGMLM_Ext1]\n #selectedComponents = [BulkGravToZZToZlepZinv_narrow_600] \n #selectedComponents = signalSamples\n #selectedComponents = [TTTo2L2Nu]\n #selectedComponents = [BulkGravToZZ_narrow_800]\n #selectedComponents = [BulkGravToZZToZlepZhad_narrow_800]\n for c in selectedComponents:\n #c.files = c.files[3:10]\n c.splitFactor = (len(c.files)/5 if len(c.files)>5 else 1)\n #c.splitFactor = 7\n #c.triggers=triggers_1mu_noniso\n #c.triggers=triggers_1e_noniso\n\n## output histogram\noutputService=[]\nfrom PhysicsTools.HeppyCore.framework.services.tfile import TFileService\noutput_service = cfg.Service(\n TFileService,\n 'outputfile',\n name=\"outputfile\",\n fname='vvTreeProducer/tree.root',\n option='recreate'\n )\noutputService.append(output_service)\n\nfrom PhysicsTools.HeppyCore.framework.eventsfwlite import Events\nfrom CMGTools.TTHAnalysis.tools.EOSEventsWithDownload import EOSEventsWithDownload\nevent_class = EOSEventsWithDownload\nevent_class = Events\nif getHeppyOption(\"nofetch\"):\n event_class = Events\nconfig = cfg.Config( components = selectedComponents,\n sequence = sequence,\n services = [],\n events_class = event_class)\n\n\n\n\n","sub_path":"XZZ2l2nu/cfg/mc80x/run_xzz2l2nu_80x_cfg_photon_mc.py","file_name":"run_xzz2l2nu_80x_cfg_photon_mc.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"44179572","text":"# -*- coding: utf-8 -*-\n'''\nCreated on Fri Aug 2 11:57:41 2019\nUpdated 20220904 22:42WER\n\n@authors: wrosing, mfitz\n'''\n\n#import json\nimport sys\n#import time\nimport os\nimport pathlib\nimport socket\nimport glob\n\n# This routine here removes all mention of previous configs from the path... for safety and my local computer got clogged with all manner of configs in the path (MTF)\npathRemovals=[]\nfor q in range(len(sys.path)):\n #print (sys.path[q])\n if 'ptr-observatory' in sys.path[q] and 'configs' in sys.path[q]:\n print ('Removing old config path: ' + str(sys.path[q]))\n pathRemovals.append(sys.path[q])\n\nfor remover in pathRemovals:\n sys.path.remove(remover)\n\npathdone=0\n\n## First try to get the hostname from a file in the directory above (..) ptr-observatory\ncwd=str(pathlib.Path().resolve())\nhwd=cwd.replace('ptr-observatory','')\nhostnamefile=glob.glob(hwd+'hostname*')\ntry:\n site_name=hostnamefile[0].split('hostname')[1]\n print(site_name)\n print ('Adding new config path: ' + str(os.path.join(pathlib.Path().resolve(),\"configs\", site_name)))\n sys.path.append(os.path.join(pathlib.Path().resolve(),\"configs\", site_name))\n pathdone=1\nexcept:\n print (\"Could not find a hostname* file in the directory above ptr-observatory e.g. hostnamesro\")\n print (\"trying another method\")\n\n\n#try:\n\n\nif pathdone==0:\n print (\"Attempting hostname approach to config file\")\n\n host_site = socket.gethostname()[:3].lower() # NB May be better to split on\n # '-' and use first part of hostname.\n if host_site =='saf':\n host_site == 'aro' # NB NB THIS is a blatant hack.\n print ('Adding new config path: ' + str(os.path.join(pathlib.Path().resolve(),\"configs\", host_site)))\n sys.path.append(os.path.join(pathlib.Path().resolve(),\"configs\", host_site))\n\ntry:\n from site_config import *\n\nexcept:\n\n print (\"Failed the hostname approach to config file\")\n print (str(host_site) + \" isn't a real place or there isn't a config file\\\n that I can find!|n\")\n\n try:\n site_name = input('What site am I running at?\\n')\n sys.path.append(os.path.join(pathlib.Path().resolve(),\"configs\", \\\n site_name))\n try:\n from site_config import *\n except:\n print (str(site_name) + \" isn't a real place, or there isn't a \\\n config file that I can find!\")\n sys.exit()\n except:\n print('You need to supply a correct site name.')\n sys.exit()","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"110191679","text":"import os\nfrom django.shortcuts import render\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template import loader\nfrom django.http import HttpResponse\n\nfrom django_generic_plus.views import GenericModelDetailView\n\nfrom .models import HueBridge, HueLight, MQTTBroker, MQTTSwitch\n\n\ndef index(request):\n # check if a bridge is available yet\n try:\n bridge = HueBridge.objects.get(name=\"bridgeAC\", ip=\"192.168.0.16\")\n except ObjectDoesNotExist:\n bridge = HueBridge.objects.create(name=\"bridgeAC\", ip=\"192.168.0.16\")\n # bridge.save()\n\n if request.method == \"POST\":\n print(request.POST)\n return HttpResponse(\"sucess\")\n lights = []\n for api_id, api_light in bridge.api.get_light_objects('id').items():\n try:\n light = HueLight.objects.get(name=api_light.name, bridge=bridge)\n except:\n light = HueLight.objects.create(name=api_light.name,\n api_id=api_id,\n bridge=bridge)\n # light.save()\n lights.append(light)\n # print( bridge.api.get_light(api_id) )\n print( light.api._get().get(\"modelid\") )\n template = loader.get_template('django_hai_hue/index.html')\n context = {\"bridge\": bridge, \"lights\": lights}\n return HttpResponse(template.render(context, request))\n\ndef mqtt(request):\n port = 1883\n url = \"192.168.0.192\"\n name = \"mosquitto\"\n try:\n # client.connect(, 1883, 60)\n\n broker = MQTTBroker.objects.get(name=name, url=url)\n except ObjectDoesNotExist:\n broker = MQTTBroker.objects.create(name=name, url=url, port = port)\n name = \"arduino\"\n feed = \"/house/light1\"\n try:\n switch = MQTTSwitch.objects.get(name=name, feed=feed)\n except ObjectDoesNotExist:\n switch = MQTTSwitch.objects.create(name=name, feed=feed, broker=broker, state=False)\n template = loader.get_template('django_hai_hue/mqtt.html')\n context = {\"broker\": broker, \"switch\": switch}\n return HttpResponse(template.render(context, request))\n\ndef mqtt_get(request):\n name = \"arduino\"\n feed = \"/house/light1\"\n switch = MQTTSwitch.objects.get(name=name, feed=feed)\n switch.switch_state()\n switch.save()\n return mqtt(request)\n\ndef change_switch_state(request,**kwargs):\n api_info = \"\"\" This route may be used with a POST requests to set the\n hsv value of a Hue light.\n Format: {\n light-id: $ID\n }\"\"\"\n api_action = os.path.basename(os.path.normpath(request.path_info))\n\n if request.method == \"POST\":\n print(request.POST)\n try:\n api_id = int(request.POST[\"light-id\"][0])\n except:\n return HttpResponse(\"input format malformed\")\n try:\n l = HueLight.objects.get(api_id=api_id)\n except:\n return HttpResponse(\"light notfound\")\n l.api.on = not l.api.on\n return HttpResponse(\"success\")\n template = loader.get_template('django_hai_hue/api_info.html')\n context = {\"api_info\" : api_info,\"api_action\" : api_action}\n return HttpResponse(template.render(context, request))\n\n\ndef sethsv(request, **kwargs):\n api_info = \"\"\" This route may be used with a POST requests to set the\n hsv value of a Hue light.\n Format: {\n light-id: $ID\n hue: $hue\n saturation: $saturation\n brightness: $brightness\n }\"\"\"\n api_action = os.path.basename(os.path.normpath(request.path_info))\n\n if request.method == \"POST\":\n hue_norm = 65535\n brightness_norm = 255\n saturation_norm = 255\n try:\n api_id = int(request.POST[\"light-id\"][0] )\n hue = int(float(request.POST[\"hue\"]) * hue_norm / 360)\n saturation = int(float(request.POST[\"saturation\"]) * saturation_norm)\n brightness = int(float(request.POST[\"brightness\"]) * brightness_norm)\n except:\n return HttpResponse(\"input format malformed\")\n try:\n l = HueLight.objects.get(api_id=api_id)\n except:\n return HttpResponse(\"light notfound\")\n if not l.api.on:\n return HttpResponse(\"light off\")\n l.api.hue = hue\n l.api.saturation = saturation\n l.api.brightness = brightness\n return HttpResponse(\"success\")\n\n template = loader.get_template('django_hai_hue/api_info.html')\n context = {\"api_info\" : api_info,\"api_action\" : api_action}\n return HttpResponse(template.render(context, request))\n\nclass HueLightView(GenericModelDetailView):\n ''' Base class for Hue Light action views '''\n","sub_path":"django_hai_hue/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"286708","text":"import pickle as pickle\nimport logging\nimport os\n\nfrom freenasUI.system.alert import alertPlugins, Alert, BaseAlert\nfrom lockfile import LockFile, LockTimeout\n\nlog = logging.getLogger('system.alertmods.collectd')\n\nCOLLECTD_FILE = '/tmp/.collectdalert'\n\n\nclass CollectdAlert(BaseAlert):\n\n def run(self):\n alerts = []\n\n if not os.path.exists(COLLECTD_FILE):\n return alerts\n\n lock = LockFile(COLLECTD_FILE)\n\n while not lock.i_am_locking():\n try:\n lock.acquire(timeout=5)\n except LockTimeout:\n return alerts\n\n with open(COLLECTD_FILE, 'rb') as f:\n try:\n data = pickle.loads(f.read())\n except:\n data = {}\n\n lock.release()\n\n for k, v in list(data.items()):\n if v['Severity'] == 'WARNING':\n l = Alert.WARN\n else:\n l = Alert.CRIT\n if k == 'ctl-ha/disk_octets':\n msg = \"CTL HA link is actively used, check initiators connectivity\"\n else:\n msg = k\n alerts.append(Alert(l, msg))\n\n return alerts\n\nalertPlugins.register(CollectdAlert)\n","sub_path":"gui/system/alertmods/collectd.py","file_name":"collectd.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"147378244","text":"'''\n处理《续资治通鉴》电子书\n'''\n\n\nimport os\nimport re\n\ndirname = r'e:\\temp\\续资治通鉴'\n\n\ndef search_index(path):\n all_index = []\n for (thisdir, subs, files) in os.walk(dirname):\n for f in files:\n if f.endswith('.htm') and f.startswith('xtj'):\n all_index.append(os.path.join(thisdir, f))\n return all_index\n\n\ndef read_htm(file):\n with open(file, 'r', errors='ignore') as f:\n htm_lines = f.readlines()\n return htm_lines\n\n\ndef proc_line(lines):\n txt = ''\n s_vol = 'style=\"font-size: 28pt\">(.*?)

0:\n txt += result + '\\n'\n result = get_useful(l, s_title)\n if len(result) > 0:\n txt += result + '\\n'\n result = get_useful(l, s_discription)\n if len(result) > 0:\n txt += result + '\\n'\n result = get_useful(l, s_king)\n if len(result) > 0:\n txt += '\\n' + result + '\\n'\n result = get_useful(l, s_year)\n if len(result) > 0:\n txt += result + '\\n'\n result = get_useful(l, s_main)\n if len(result) > 0:\n txt += ' ' + result + '\\n'\n return txt\n\n\ndef get_useful(line, pattern):\n p = re.compile(pattern, re.S)\n m = re.search(p, line)\n if m is None:\n return ''\n else:\n return m.group(1).strip()\n\n\ndef main():\n l_all_txt = search_index(dirname)\n # print(len(l_all_txt), l_all_txt)\n\n # htm_lines = read_htm(r'e:\\temp\\资治通鉴\\zztj_001.htm')\n # print(proc_line(htm_lines))\n # print(extract_txt(htm))\n\n # for l in l_all_txt:\n # htm = read_htm(l)\n # print(extract_txt(htm), l)\n\n out_file = os.path.join(dirname, '续资治通鉴.txt')\n with open(out_file, 'w+') as f:\n for l in l_all_txt:\n htm_lines = read_htm(l)\n f.write(proc_line(htm_lines))\n f.write('\\n' * 2)\n f.write('* ' * 15)\n f.write('\\n' * 2)\n print('[Read]', l)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"xtj.py","file_name":"xtj.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"177992486","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 14 08:07:36 2020\n\nClasses representing different kind of Ordinary Differential Equations (ODEs).\n\n@author: Carollo Andrea - Tomasi Matteo\n\"\"\"\n\nimport numpy as np\nimport pinocchio as pin\nfrom numpy.linalg import norm\n\n\nclass ContactPoint_ODE:\n ''' A point on the robot surface that can make contact with surfaces.\n '''\n def __init__(self, model, data, frame_name):\n self.model = model # robot model\n self.data = data # robot data\n self.frame_name = frame_name # name of reference frame associated to this contact point\n self.frame_id = model.getFrameId(frame_name) # id of the reference frame\n self.active = False # True if this contact point is in contact\n self.p0 = np.zeros(3)\n \n def get_position(self):\n ''' Get the current position of this contact point \n '''\n M = self.data.oMf[self.frame_id]\n return M.translation\n \n def get_velocity(self):\n M = self.data.oMf[self.frame_id]\n R = pin.SE3(M.rotation, 0*M.translation) # same as M but with translation set to zero\n v_local = pin.getFrameVelocity(self.model, self.data, self.frame_id)\n v_world = (R.act(v_local)).linear # convert velocity from local frame to world frame\n return v_world\n \n def get_jacobian(self):\n J6 = pin.getFrameJacobian(self.model, self.data, self.frame_id, pin.ReferenceFrame.LOCAL_WORLD_ALIGNED)\n return J6[:3,:]\n \n \nclass ContactSurface_ODE:\n ''' A visco-elastic planar surface\n '''\n def __init__(self, name, pos, normal, K, B, mu):\n self.name = name # name of this contact surface\n self.x0 = pos # position of a point of the surface\n self.normal = normal # direction of the normal to the surface\n self.K = K # stiffness of the surface material\n self.B = B # damping of the surface material\n self.mu = mu # friction coefficient of the surface\n self.bias = self.x0.dot(self.normal)\n \n def check_collision(self, p):\n ''' Check the collision of the given point\n with this contact surface. If the point is not\n inside this surface, then return False.\n '''\n normal_penetration = self.bias - p.dot(self.normal)\n if(normal_penetration < 0.0):\n return False # no penetration\n return True\n\n def compute_force(self, contact_point, q, vq, robot):\n cp = contact_point\n # cp.p0 anchor_point\n \n # get position of the contact point\n H = robot.framePlacement(q, cp.frame_id, False)\n p = H.translation\n\n # get velocity of the contact point\n R = pin.SE3(H.rotation, 0*H.translation) # same as M but with translation set to zero\n v_local = robot.frameVelocity(q, vq, cp.frame_id, False)\n v = (R.act(v_local)).linear \n\n # compute contact force using spring-damper law\n f = self.K.dot(cp.p0 - p) - self.B.dot(v)\n \n # check whether contact force is outside friction cone\n f_N = f.dot(self.normal) # norm of normal force\n f_T = f - f_N*self.normal # tangential force (3d)\n f_T_norm = norm(f_T) # norm of tangential force\n \n if(f_T_norm > self.mu*f_N):\n # contact is slipping \n # f_T_norm = self.mu*f_N\n t_dir = f_T/np.amax(np.absolute(f_T))\n t_dir = t_dir/norm(t_dir)\n \n # saturate force at the friction cone boundary\n f = f_N*self.normal + self.mu*f_N*t_dir\n # update anchor point so that f is inside friction cone\n if (f_T_norm >= 8.9e999):\n f_T_norm = 8.9e999\n delta_p0 = (f_T_norm - self.mu*f_N) / self.K[0,0]\n # print( \"Delta_p0 : \", delta_p0, \" - f_t_norm : \", f_T_norm.T, \" - f_N_norm : \", norm(f_N))\n # print( \"Delta_p0 : \", delta_p0, \" - t_dir : \", t_dir.T)\n cp.p0 -= delta_p0*t_dir\n\n \n return f\n \n\nclass ODE:\n def __init__(self, name):\n self.name = name\n\n def f(self, x, u, t):\n return np.zeros(x.shape)\n\n\nclass ODESin:\n ''' ODE defining a sinusoidal trajectory '''\n\n def __init__(self, name, A, f, phi):\n self.name = name\n self.A = A\n self.two_pi_f = 2*np.pi*f\n self.phi = phi\n\n def f(self, x, u, t):\n return self.two_pi_f*self.A*np.cos(self.two_pi_f*t + self.phi)\n\n\nclass ODELinear:\n ''' A linear ODE: dx = A*x + b\n '''\n\n def __init__(self, name, A, B, b):\n self.name = name\n self.A = A\n self.B = B\n self.b = b\n self.nx = A.shape[0]\n self.nu = B.shape[1]\n\n def f(self, x, u, t, jacobian=False):\n dx = self.A.dot(x) + self.b + self.B.dot(u)\n if(jacobian):\n return (np.copy(dx), np.copy(self.A), np.copy(self.B))\n return np.copy(dx)\n\n\nclass ODEStiffDiehl:\n def f(self, x, u, t):\n return -50.0*(x - np.cos(t))\n\n\nclass ODEPendulum:\n def __init__(self):\n self.g = -9.81\n\n def f(self, x, u, t):\n dx = np.zeros(2)\n dx[0] = x[1]\n dx[1] = self.g*np.sin(x[0])\n return dx\n\n\nclass ODERobot:\n ''' An ordinary differential equation representing a robotic system\n '''\n\n def __init__(self, name, robot):\n ''' robot: instance of RobotWrapper\n '''\n self.name = name\n self.robot = robot\n self.nu = robot.na\n nq, nv = self.robot.nq, self.robot.nv\n self.nx = nq+nv\n self.nu = self.robot.na\n self.Fx = np.zeros((self.nx, self.nx))\n self.Fx[:nv, nv:] = np.identity(nv)\n self.Fu = np.zeros((self.nx, self.nu))\n self.dx = np.zeros(2*nv)\n\n ''' System dynamics '''\n\n def f(self, x, u, t, jacobian=False):\n nq = self.robot.nq\n nv = self.robot.nv\n model = self.robot.model\n data = self.robot.data\n q = x[:nq]\n v = x[nq:]\n\n if(nv == 1):\n # for 1 DoF systems pin.aba does not work (I don't know why)\n pin.computeAllTerms(model, data, q, v)\n ddq = (u-data.nle) / data.M[0]\n else:\n ddq = pin.aba(model, data, q, v, u)\n\n self.dx[:nv] = v\n self.dx[nv:] = ddq\n\n if(jacobian):\n pin.computeABADerivatives(model, data, q, v, u)\n self.Fx[:nv, :nv] = 0.0\n self.Fx[:nv, nv:] = np.identity(nv)\n self.Fx[nv:, :nv] = data.ddq_dq\n self.Fx[nv:, nv:] = data.ddq_dv\n self.Fu[nv:, :] = data.Minv\n\n return (np.copy(self.dx), np.copy(self.Fx), np.copy(self.Fu))\n\n return np.copy(self.dx)\n\n def f_x_fin_diff(self, x, u, t, delta=1e-8):\n ''' Partial derivatives of system dynamics w.r.t. x computed via finite differences '''\n f0 = self.f(x, u, t)\n Fx = np.zeros((self.nx, self.nx))\n for i in range(self.nx):\n xp = np.copy(x)\n xp[i] += delta\n fp = self.f(xp, u, t)\n Fx[:, i] = (fp-f0)/delta\n return Fx\n\n def f_u_fin_diff(self, x, u, t, delta=1e-8):\n ''' Partial derivatives of system dynamics w.r.t. u computed via finite differences '''\n f0 = self.f(x, u, t)\n Fu = np.zeros((self.nx, self.nu))\n for i in range(self.nu):\n up = np.copy(u)\n up[i] += delta\n fp = self.f(x, up, t)\n Fu[:, i] = (fp-f0)/delta\n return Fu\n\n\nclass ODERobot_wc:\n ''' An ordinary differential equation representing a robotic system\n '''\n\n def __init__(self, name, robot, contact_points, contact_surfaces):\n ''' robot: instance of RobotWrapper\n '''\n self.name = name\n self.robot = robot\n self.nu = robot.na\n nq, nv = self.robot.nq, self.robot.nv\n self.nx = nq+nv\n self.nu = self.robot.na\n self.Fx = np.zeros((self.nx, self.nx))\n self.Fx[:nv, nv:] = np.identity(nv)\n self.Fu = np.zeros((self.nx, self.nu))\n self.dx = np.zeros(2*nv)\n self.contact_points = contact_points\n self.contact_surfaces = contact_surfaces\n\n nk = 3*len(self.contact_points)*len(self.contact_surfaces) # size of contact force vector\n self.fc = np.zeros(nk) # contact forces\n self.Jc = np.zeros((nk, self.robot.model.nv)) # contact Jacobian\n\n ''' System dynamics '''\n\n def f(self, x, u, t, jacobian=False):\n nq = self.robot.nq\n nv = self.robot.nv\n model = self.robot.model\n data = self.robot.data\n q = x[:nq]\n v = x[nq:]\n\n i = 0\n self.robot.computeAllTerms(q,v)\n for cs in self.contact_surfaces: # for each candidate contact surface\n for cp in self.contact_points: # for each candidate contact point\n # Contact point placement\n H = self.robot.framePlacement(q, cp.frame_id, False)\n p_c = H.translation\n\n if(cs.check_collision(p_c)): # check whether the point is colliding with the surface\n if(cp.active == False): # if the contact was not already active\n cp.active = True\n cp.p0 = np.copy(p_c) # anchor point\n\n # Compute the contact force\n self.fc[i:i+3] = cs.compute_force(cp, q, v, self.robot)\n # compute the jacobian\n self.Jc[i:i+3, :] = cp.get_jacobian()\n i += 3\n\n else: # if the point is not colliding more\n if(cp.active): # if the contact was already active\n cp.active = False\n\n # Contact force equal to 0\n self.fc[i:i+3] = np.zeros(3)\n # jacobian equl to zero\n self.Jc[i:i+3, :] = np.zeros((3, self.robot.model.nv))\n i += 3\n # compute JT*force from contact point\n u_con = u + self.Jc.T.dot(self.fc)\n\n if(nv == 1):\n # for 1 DoF systems pin.aba does not work (I don't know why)\n ddq = (u_con-data.nle) / data.M[0]\n else:\n ddq = pin.aba(model, data, q, v, u_con)\n\n self.dx[:nv] = v\n self.dx[nv:] = ddq\n\n if(jacobian):\n pin.computeABADerivatives(model, data, q, v, u_con)\n self.Fx[:nv, :nv] = 0.0\n self.Fx[:nv, nv:] = np.identity(nv)\n self.Fx[nv:, :nv] = data.ddq_dq\n self.Fx[nv:, nv:] = data.ddq_dv\n self.Fu[nv:, :] = data.Minv\n\n return (np.copy(self.dx), np.copy(self.Fx), np.copy(self.Fu))\n\n return np.copy(self.dx)\n\n def f_x_fin_diff(self, x, u, t, delta=1e-8):\n ''' Partial derivatives of system dynamics w.r.t. x computed via finite differences '''\n f0 = self.f(x, u, t)\n Fx = np.zeros((self.nx, self.nx))\n for i in range(self.nx):\n xp = np.copy(x)\n xp[i] += delta\n fp = self.f(xp, u, t)\n Fx[:, i] = (fp-f0)/delta\n return Fx\n\n def f_u_fin_diff(self, x, u, t, delta=1e-8):\n ''' Partial derivatives of system dynamics w.r.t. u computed via finite differences '''\n f0 = self.f(x, u, t)\n Fu = np.zeros((self.nx, self.nu))\n for i in range(self.nu):\n up = np.copy(u)\n up[i] += delta\n fp = self.f(x, up, t)\n Fu[:, i] = (fp-f0)/delta\n return Fu","sub_path":"Course_Proj/Assignment_3/Code/OCP/ode.py","file_name":"ode.py","file_ext":"py","file_size_in_byte":11692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"106917631","text":"\nimport torch \nfrom torch.autograd import Variable \nimport cv2 \nfrom data import BaseTransform, VOC_CLASSES as labelmap\nfrom ssd import build_ssd \nimport imageio \n \n# Detection\ndef detect (image, net, transform):\n h, w = image.shape[:2] \n #perform transformations\n image_t = transform(image)[0] #transform the image and get the first index to get the right dimension and colors\n image_t2 = torch.from_numpy(image_t).permute(2,0,1) #turning numpy array into torch tensor and then convert RBG to GRB since that is how the NN was changed\n image_t3 = Variable(image_t2.unsqueeze(0)) #add fake dimension then turn into torch variable\n y = net(image_t3) #putting the image through the neural network and put it into var y\n detections = y.data # gives the torch tensor of the y (since otherwise we would get gradient and tensor) \n scale = torch.Tensor([w,h,w,h]) # we create a tensor object of dimensions [width, height, width, height] to normalize the position of the image between 0-1\n # elements of detections tensor : detections = [batch, number of classes, number of occurences of class, (score, x0,y0,x1,y1) ]\n # where batch is from the fake dimension, classes are objects (planes, boats, dogs), and the score rates how likely it is that image and effects occurance, 0.6>+1 occurence, <0.6 occurence is not changed and then we get the upper left and bottom right of the detected image\n for i in range(detections.size(1)): #detections.size(1) is number of classes\n j = 0 # We initialize the loop variable j that will correspond to the occurrences of the class\n while detections[0,i,j,0]>=0.6: # We take into account all the occurrences j of the class i that have a matching score larger than 0.6.\n point = (detections[0,i,j,1:]*scale).numpy() #grabs coordinates as torch tensor so convert to numpy\n cv2.rectangle( image, ((point[0]), int(point[1])), (int(point[2]), int(point[3])), (255,0,0), 2) #draw rectangle\n cv2.putText(image, labelmap[i-1], ((point[0]), int(point[1])), cv2.FONT_HERSHEY_SIMPLEX,2,(255,255,255),2, cv2.LINE_AA) #writes text on upper left corner of detected image\n j+=1\n return image\n\n# SSD Neural Network\nnet = build_ssd('test') #train/test phase\nnet.load_state_dict(torch.load('ssd300_mAP_77.43_v2.pth', map_location = lambda storage, loc: storage)) #load pre-trained weights and open it with a torch tensor \n\n# Perform transformations so it is compatible with the NN\ntransform = BaseTransform(net.size, (104/256.0, 117/256.0, 123/256.0)) # We create an object of the BaseTransform class, a class that will do the required transformations so that the image can be the input of the neural network \n #the argumenets above are the target size and the scale under which the net was trained\n\n# Object Detection and Video Creation\nmy_video = 'Hockey.mp4'\n\nreader = imageio.get_reader(my_video) #open video\nframesps = reader.get_meta_data()['fps'] #get fps of the frames\nwriter = imageio.get_writer('output_video.mp4', fps=framesps) #create the detected video file with stated fps\nvid = cv2.VideoCapture(my_video)\nlength = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))\nfor i, image in enumerate(reader): \n image = detect(image,net.eval(),transform) #net.eval() targets the detections \n writer.append_data(image) #add frame to video\n print('Rendering Frame ', i, 'of', length) #print frame\nprint('Finished Rendering Video')\nwriter.close() ","sub_path":"player-tracking.py","file_name":"player-tracking.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"281837489","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url, include\nfrom core.ridge.views import *\n\nurlpatterns = patterns('',\n url(r'^$', home, name='home'),\n\n url(r'^tournament/add/$', tournament_add, name='tournament_add'),\n url(r'^tournament/(?P\\d+)/$',tournament_view, name='tournament_view'),\n url(r'^tournament/delete/(?P\\d+)/$',tournament_delete, name='tournament_delete'),\n\n url(r'^set_result$', set_result, name='rate'),\n url(r'^proceed$', proceed, name='proceed'),\n\n url(r'^user/$', user_auth, name='user_auth'),\n url(r'^user/logout$', user_logout, name='user_logout'),\n\n url(r'^chessplayer/add/$', chessplayer_add, name='chessplayer_add'),\n url(r'^test/$', for_test, name='test'),\n)","sub_path":"core/ridge/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"363405895","text":"import random\nnots = (\"ZERO\", \"ONE\", \"TWO\", \"THREE\", \"FOUR\", \"FIVE\", \"SIX\", \"SEVEN\", \"EIGHT\", \"NINE\")\n\ndef isIn(str1, str2):\n i = False\n str2 = list(str2)\n \n c = 0\n for char in str1:\n if char in str2:\n str2.pop(str2.index(char))\n c+=1\n\n if(c == len(str1)):\n i = True\n\n return (i, \"\".join(str2))\n\n\n\n\ntests = int(input(\"\"))\ni = 0\n\nwhile i < tests:\n raw = input(\"\")\n\n ori = raw\n final = []\n loop = 0\n\n while raw:\n loop+=1\n j = random.randint(0,9)\n #print(\"trying \", j)\n\n t = isIn(nots[j], raw)\n if(t[0]):\n #print(\"Found \", j)\n final.append(j)\n raw = t[1]\n #print(raw)\n\n if(loop >= 10):\n final = []\n raw = ori\n loop = 0\n\n print(\"CASE #{}:\".format(i+1),\"\".join([str(x) for x in sorted(final)]))\n\n i+=1\n","sub_path":"codes/CodeJamCrawler/16_2_1/bermuda.ut/digits.py","file_name":"digits.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"185272594","text":"import os\nimport sys\nimport torch\nimport numpy as np\nimport random\nimport time\n\nfrom .utils_for_robust import linf_loss, l0_loss, l1_loss, l2_loss, _auto_name, load, normalize_invert, same_length_str, \\\n tensor_detach\nfrom .models import factory\nfrom .utils import device\nfrom .datasets import Dataset\nimport foolbox\nimport pandas as pd\n\nMOMENTS = {\n 'imagenet': ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n 'cifar10': ([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),\n 'cifar100': ([0.5071, 0.4867, 0.4408], [0.2675, 0.2565, 0.2761]),\n}\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--dataset\", choices=[\"cifar10\", \"imagenet\"], default=\"imagenet\")\n parser.add_argument(\"-s\", \"--save\", default=\"./saved_results\")\n\n parser.add_argument(\"-a\", \"--attack\", choices=[\"FGSM\"], default=\"FGSM\")\n parser.add_argument(\"-u\", \"--untargeted\", action='store_true')\n parser.add_argument(\"-e\", \"--epsilon\", type=float, default=0.03)\n\n parser.add_argument(\"--num_valid_test_imgs\", type=int, default=10000)\n\n parser.add_argument(\"--attack_batch_size\", type=int, default=1)\n\n parser.add_argument(\"--model_name\", type=str, help='Name of the model.') #\n parser.add_argument(\"--load_name\", default=None, type=str, help='Checkpoint name to load')\n\n parser.add_argument(\"-z\", \"--use_zvalue\", action='store_true')\n parser.add_argument(\"--seed\", type=int, default=1216)\n\n args = vars(parser.parse_args())\n print('args = ', args)\n\n # setup random seed\n random.seed(args['seed'])\n np.random.seed(args['seed'])\n torch.manual_seed(args[\"seed\"])\n print(\"seed = \", args[\"seed\"])\n\n overall_timestart = time.time()\n\n use_log = not args['use_zvalue']\n print(\"use_log = \", use_log)\n\n model_name = args['model_name']\n dataset_name = args['dataset']\n\n model = factory(model_name, dataset_name).to(device)\n model = load(model_name, dataset_name, model, args['load_name'])\n #######################\n normMean, normStd = MOMENTS[dataset_name]\n mean = np.array(normMean, dtype='float').reshape((3, 1, 1))\n std = np.array(normStd, dtype='float').reshape((3, 1, 1))\n # --------------------------------------------------------------------------------------#\n if args['attack'] == \"FGSM\": #\n fmodel = foolbox.models.PyTorchModel(model.eval(), bounds=(0, 1), num_classes=10,\n preprocessing=(mean, std))\n attack = foolbox.attacks.FGSM(fmodel)\n else:\n print(\"Invalid attack name, exit 1\")\n return\n #####################\n attack_batch_size = args['attack_batch_size']\n # >>>>>>>>>>>>>>>>>>>>>\n data = Dataset(args['dataset'], False, attack_batch_size) # >>>>\n\n targeted_flag = not args['untargeted']\n print(\"targeted_flag = \", targeted_flag)\n\n img_no = 0\n img_nan = 0 # #########\n pred_right = 0 # ############\n total_success = 0\n l0_list = []\n l1_list = []\n l2_list = []\n linf_list = []\n time_list = []\n\n verbose_f = open(args['save'] + \"/\" + \"_\".join([args['dataset'], args['attack'], str(targeted_flag),\n str(args['epsilon']), \"verbose.txt\"]), \"w\")\n aggre_f = open(args['save'] + \"/\" + \"_\".join([args['dataset'], args['attack'], str(targeted_flag),\n str(args['epsilon']), \"aggre.txt\"]), \"w\")\n robust_f = open(args['save'] + \"/\" + \"_\".join([args['dataset'], args['attack'], str(targeted_flag),\n str(args['epsilon']), \"robust.txt\"]), \"a\")\n if targeted_flag is True:\n verbose_head_str_raw = ['total', 'pre_rig', 'adv_suc', 'time', 'success', 'pred', 'target',\n 'adv', 'l0_dist', 'l1_dist', 'l2_dist',\n 'linf_distortion']\n else:\n verbose_head_str_raw = ['total', 'pre_rig', 'adv_suc', 'time', 'success', 'pred', 'adv',\n 'l0_distor', 'l1_dist', 'l2_dist', 'linf_dist']\n\n verbose_head_str = same_length_str(verbose_head_str_raw)\n aggre_head_str_raw = ['total_count', 'pred_rate', 'suc_rate_a', 'suc_rate_r',\n 'l0_avg', 'l0_std', 'l1_avg', 'l1_std', 'l2_avg', 'l2_std',\n 'linf_avg', 'linf_std', 'time_avg', 'time_std']\n aggre_head_str = same_length_str(aggre_head_str_raw)\n\n verbose_f.write(verbose_head_str + '\\n')\n aggre_f.write(aggre_head_str + '\\n')\n\n robust_f.write(str(args['load_name']) + '\\n')\n robust_f.write(aggre_head_str + '\\n')\n\n sys.stdout.flush()\n\n for i, (images, labels) in enumerate(data):\n for j in range(labels.size()[0]):\n timestart = time.time()\n print(\"=\" * 10, \"i = \", i, \"=\" * 10, \"j=\", j, \"=\" * 10)\n \"\"\"perform the attack\"\"\"\n image = np.array(normalize_invert(images[j].cpu().clone(), normMean, normStd)).clip(0, 1)\n label = int(labels[j].item())\n adv = attack(image, label, unpack=False,epsilons=[float(args['epsilon'])], max_epsilon=0)\n\n timeend = time.time()\n time_used = timeend - timestart\n time_used_per_image = time_used\n\n original_predict = np.squeeze(model(torch.unsqueeze(images[j], 0)))\n #original_predict = np.squeeze(fmodel.forwards(images[j].numpy()))\n original_predict = tensor_detach(original_predict)\n original_class = np.argsort(original_predict)\n\n sys.stdout.flush()\n\n predict_label = np.argmax(original_predict)\n target_label = labels[j].data.cpu().numpy()\n success = False\n ####################################\n if predict_label == target_label:\n pred_right += 1\n\n img_no += 1\n print('img_nan:', img_nan)\n # if the array contains NaN, the solver did not return a solution\n if np.any(pd.isnull(adv)):\n img_nan += 1 # ########################\n print('Attack failed. (solver returned NaN)')\n l0_distortion = l1_distortion = l2_distortion = linf_distortion = np.nan\n else:\n\n l0_distortion = l0_loss(np.array(adv.cpu()), np.array(images[j].cpu()))\n l1_distortion = l1_loss(adv, np.array(images[j].cpu()))\n l2_distortion = l2_loss(adv, np.array(images[j].cpu()))\n linf_distortion = linf_loss(adv, np.array(images[j].cpu()))\n\n adversarial_predict = np.squeeze(model(torch.tensor([adv]).to(device)))\n adversarial_predict = tensor_detach(adversarial_predict)\n\n adversarial_prob = np.sort(adversarial_predict)\n adversarial_class = np.argsort(adversarial_predict)\n attack_label = np.argmax(adversarial_predict)\n\n sys.stdout.flush()\n\n success = False\n if targeted_flag:\n success = np.argsort(adversarial_predict)[-1] == target_label\n\n # dealing with the tie issue in the adversarial_predict vector\n candidates = np.array([i for i in range(len(adversarial_predict) - 1)\n if abs(adversarial_predict[i] - adversarial_prob[-1]) < 0.001])\n if len(candidates) > 1 and target_label in candidates:\n success = True\n\n else:\n success = np.argsort(adversarial_predict)[-1] != target_label\n if success:\n print(\"Attack succeeded.\")\n else:\n print(\"Attack failed.\")\n\n if success:\n total_success += 1\n l0_list.append(l0_distortion)\n l1_list.append(l1_distortion)\n l2_list.append(l2_distortion)\n linf_list.append(linf_distortion)\n time_list.append(time_used_per_image)\n print('total success', total_success)\n suffix = \"seq={0}_prev={1}_adv={2}_res={3}\".format(i, original_class[-1], adversarial_class[-1], success)\n print(\"Saving to\", suffix)\n sys.stdout.flush()\n\n L1_debug_str = \"[STATS][L1] total = {}, seq = {}, time = {:.3f}, success = {}, \" \\\n \"prev_class = {}, new_class = {}, distortion = {:.5f}, success_rate = {:.3f}, \" \\\n \"l2_avg = {:.5f}\".format(img_no, i * attack_batch_size + j,\n time_used_per_image, success, original_class[-1],\n adversarial_class[-1], l2_distortion,\n total_success / float(img_no),\n 0 if total_success == 0 else np.mean(l2_list))\n\n print(L1_debug_str)\n sys.stdout.flush()\n\n if targeted_flag is True:\n verbose_str_raw = [str(img_no), str(pred_right), str(total_success),\n format(time_used_per_image, '<6.3f'), str(success),\n str(original_class[-1]), str(target_label), str(adversarial_class[-1]),\n format(l0_distortion, '<9.3f'), format(l1_distortion, '<9.3f'),\n format(l2_distortion, '<9.3f'), format(linf_distortion, '<9.3f')]\n else:\n verbose_str_raw = [str(img_no), str(pred_right), str(total_success), str(time_used_per_image),\n str(success),\n str(original_class[-1]), str(adversarial_class[-1]), str(l0_distortion),\n str(l1_distortion),\n str(l2_distortion), str(linf_distortion)]\n verbose_str = same_length_str(verbose_str_raw)\n\n verbose_f.write(verbose_str + \"\\n\")\n verbose_f.flush()\n print(verbose_head_str)\n print(verbose_str)\n\n sys.stdout.flush()\n\n overall_timeend_sofar = time.time()\n\n overall_time_used_sofar = overall_timeend_sofar - overall_timestart\n\n print(\"overall_time_used_sofar = \", overall_time_used_sofar)\n sys.stdout.flush()\n if img_no >= args[\"num_valid_test_imgs\"]:\n break\n verbose_f.close()\n\n if img_no == 0:\n success_rate = 0.0\n else:\n # success_rate = total_success / float(img_no)\n success_rate = total_success / float(pred_right)\n\n if total_success == 0:\n aggre_str_raw = [str(img_no), str(success_rate), str(0.0), str(0.0), str(0.0), str(0.0),\n str(0.0), str(0.0), str(0.0), str(0.0), str(0.0), str(0.0)]\n else:\n aggre_str_raw = [str(img_no), format(pred_right / float(img_no), '<6.4f'),\n format(total_success / float(img_no), '<6.4f'), format(success_rate, '<6.4f'),\n format(np.mean(l0_list), '<6.3f'), format(np.std(l0_list), '<6.3f'),\n format(np.mean(l1_list), '<6.3f'), format(np.std(l1_list), '<6.3f'),\n format(np.mean(l2_list), '<6.3f'), format(np.std(l2_list), '<6.3f'),\n format(np.mean(linf_list), '<6.3f'), format(np.std(linf_list), '<6.3f'),\n format(np.mean(time_list), '<6.3f'), format(np.std(time_list), '<6.3f')]\n aggre_str = same_length_str(aggre_str_raw)\n\n aggre_f.write(aggre_str + \"\\n\")\n robust_f.write(aggre_str + '\\n') # #############\n print(aggre_head_str)\n print(aggre_str)\n sys.stdout.flush()\n robust_f.close() # ########\n aggre_f.close()\n\n overall_timeend = time.time()\n\n overall_time_used = overall_timeend - overall_timestart\n\n print(\"overall_time_used = \", overall_time_used)\n sys.stdout.flush()\n\n print(\"ALL DONE!!!\")\n return\n\n\nif __name__ == \"__main__\":\n main()\n\n print(\"Experiment Done!!!\")\n","sub_path":"umbreon/test_robust_new.py","file_name":"test_robust_new.py","file_ext":"py","file_size_in_byte":12246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"415358103","text":"import glob\nimport os\nimport pathlib\nimport stat\nimport time\nfrom collections import OrderedDict, defaultdict\nfrom pathlib import Path\nfrom textwrap import dedent\nfrom typing import Optional\n\nfrom loguru import logger\n\nfrom .action import ActionForBuild\nfrom .uninstall import uninstall\nfrom .util import run_user_script\nfrom ..exceptions import (\n BinaryArchiveNotFoundException,\n InternalCommandException,\n InternalSubprocessException,\n UserException,\n)\nfrom ..gitutils import lfs\nfrom ..gitutils import get_worktree_root\nfrom ..model.install_metadata import (\n load_metadata,\n init_metadata_from_build,\n save_metadata,\n save_file_list,\n is_installed,\n installed_component_license_path,\n installed_component_file_list_path,\n installed_component_metadata_path,\n)\n\n\nclass InstallAction(ActionForBuild):\n def __init__(\n self,\n build,\n script,\n config,\n allow_build=True,\n allow_binary_archive=True,\n create_binary_archive=False,\n no_merge=False,\n keep_tmproot=False,\n run_tests=False,\n ):\n assert (\n allow_build or allow_binary_archive\n ), f\"You must allow at least one option between building and installing from binary archives for {build.name}\"\n sources = []\n if allow_build:\n sources.append(\"build\")\n if allow_binary_archive:\n sources.append(\"binary archives\")\n sources_str = \" or \".join(sources)\n\n super().__init__(f\"install ({sources_str})\", build, script, config)\n self.allow_build = allow_build\n self.allow_binary_archive = allow_binary_archive\n self.create_binary_archive = create_binary_archive\n self.no_merge = no_merge\n self.keep_tmproot = keep_tmproot\n self.run_tests = run_tests\n\n def _run(self, explicitly_requested=False):\n tmp_root = self.environment[\"TMP_ROOT\"]\n orchestra_root = self.environment[\"ORCHESTRA_ROOT\"]\n\n logger.debug(\"Preparing temporary root directory\")\n self._prepare_tmproot()\n\n pre_file_list = self._index_directory(tmp_root + orchestra_root, relative_to=tmp_root + orchestra_root)\n\n install_start_time = time.time()\n if self.allow_binary_archive and self.binary_archive_exists():\n self._install_from_binary_archive()\n source = \"binary archives\"\n elif self.allow_build:\n self._build_and_install()\n if self.create_binary_archive:\n self._create_binary_archive()\n source = \"build\"\n else:\n raise UserException(f\"Could not find binary archive nor build: {self.build.qualified_name}\")\n install_end_time = time.time()\n\n # Binary archive symlinks always need to be updated, not only when the binary archive is rebuilt\n self.update_binary_archive_symlink()\n\n post_file_list = self._index_directory(tmp_root + orchestra_root, relative_to=tmp_root + orchestra_root)\n post_file_list.append(\n os.path.relpath(installed_component_file_list_path(self.component.name, self.config), orchestra_root)\n )\n post_file_list.append(\n os.path.relpath(installed_component_metadata_path(self.component.name, self.config), orchestra_root)\n )\n new_files = [f for f in post_file_list if f not in pre_file_list]\n\n if not self.no_merge:\n if is_installed(self.config, self.build.component.name):\n logger.debug(\"Uninstalling previously installed build\")\n uninstall(self.build.component.name, self.config)\n\n logger.debug(\"Merging installed files into orchestra root directory\")\n self._merge()\n\n self._update_metadata(\n new_files,\n install_end_time - install_start_time,\n source,\n explicitly_requested,\n )\n\n if not self.keep_tmproot:\n logger.debug(\"Cleaning up tmproot\")\n self._cleanup_tmproot()\n\n def _update_metadata(self, file_list, install_time, source, set_manually_insalled):\n # Save installed file list (.idx)\n save_file_list(self.component.name, file_list, self.config)\n\n # Save metadata\n metadata = load_metadata(self.component.name, self.config)\n if metadata is None:\n metadata = init_metadata_from_build(self.build)\n\n metadata.recursive_hash = self.component.recursive_hash\n metadata.source = source\n metadata.manually_installed = metadata.manually_installed or set_manually_insalled\n metadata.install_time = install_time\n metadata.binary_archive_path = self.binary_archive_relative_path\n\n save_metadata(metadata, self.config)\n\n def _prepare_tmproot(self):\n script = dedent(\n \"\"\"\n rm -rf \"$TMP_ROOT\"\n mkdir -p \"$TMP_ROOT\"\n mkdir -p \"${TMP_ROOT}${ORCHESTRA_ROOT}/include\"\n mkdir -p \"${TMP_ROOT}${ORCHESTRA_ROOT}/lib64\"{,/include,/pkgconfig}\n test -e \"${TMP_ROOT}${ORCHESTRA_ROOT}/lib\" || ln -s lib64 \"${TMP_ROOT}${ORCHESTRA_ROOT}/lib\"\n test -L \"${TMP_ROOT}${ORCHESTRA_ROOT}/lib\"\n mkdir -p \"${TMP_ROOT}${ORCHESTRA_ROOT}/bin\"\n mkdir -p \"${TMP_ROOT}${ORCHESTRA_ROOT}/usr/\"{lib,include}\n mkdir -p \"${TMP_ROOT}${ORCHESTRA_ROOT}/share/\"{info,doc,man,orchestra}\n touch \"${TMP_ROOT}${ORCHESTRA_ROOT}/share/info/dir\"\n mkdir -p \"${TMP_ROOT}${ORCHESTRA_ROOT}/libexec\"\n \"\"\"\n )\n self._run_internal_script(script)\n\n def _install_from_binary_archive(self):\n # TODO: handle nonexisting binary archives\n logger.debug(\"Fetching binary archive\")\n self._fetch_binary_archive()\n logger.debug(\"Extracting binary archive\")\n self._extract_binary_archive()\n\n logger.debug(\"Removing conflicting files\")\n self._remove_conflicting_files()\n\n def _fetch_binary_archive(self):\n binary_archive_path = self.locate_binary_archive()\n assert binary_archive_path is not None\n binary_archive_path = pathlib.Path(binary_archive_path)\n binary_archive_root = get_worktree_root(binary_archive_path)\n binary_archive_relative_path = binary_archive_path.relative_to(binary_archive_root)\n failures = 0\n while True:\n try:\n lfs.fetch(binary_archive_root, include=[binary_archive_relative_path])\n break\n except InternalSubprocessException as e:\n failures += 1\n if failures >= self.config.max_lfs_retries:\n raise e\n\n def _extract_binary_archive(self):\n if not self.binary_archive_exists():\n raise UserException(\"Binary archive not found!\")\n\n archive_filepath = self.locate_binary_archive()\n script = dedent(\n f\"\"\"\n mkdir -p \"$TMP_ROOT$ORCHESTRA_ROOT\"\n cd \"$TMP_ROOT$ORCHESTRA_ROOT\"\n tar xaf \"{archive_filepath}\"\n \"\"\"\n )\n self._run_internal_script(script)\n\n def _implicit_dependencies(self):\n if self.allow_binary_archive and self.binary_archive_exists() or not self.allow_build:\n return set()\n else:\n return {self.build.configure}\n\n def _implicit_dependencies_for_hash(self):\n return {self.build.configure}\n\n def _build_and_install(self):\n env = self.environment\n env[\"RUN_TESTS\"] = \"1\" if self.run_tests else \"0\"\n\n logger.debug(\"Executing install script\")\n run_user_script(self.script, environment=env)\n\n logger.debug(\"Removing conflicting files\")\n self._remove_conflicting_files()\n\n if self.build.component.skip_post_install:\n logger.debug(\"Skipping post install\")\n else:\n self._post_install()\n\n def _post_install(self):\n logger.debug(\"Dropping absolute paths from pkg-config\")\n self._drop_absolute_pkgconfig_paths()\n\n logger.debug(\"Purging libtools' files\")\n self._purge_libtools_files()\n\n # TODO: maybe this should be put into the configuration and not in orchestra itself\n logger.debug(\"Converting hardlinks to symbolic\")\n self._hard_to_symbolic()\n\n # TODO: maybe this should be put into the configuration and not in orchestra itself\n logger.debug(\"Fixing RPATHs\")\n self._fix_rpath()\n\n # TODO: this should be put into the configuration and not in orchestra itself\n logger.debug(\"Replacing NDEBUG preprocessor statements\")\n self._replace_ndebug(self.build.ndebug)\n\n # TODO: this should be put into the configuration and not in orchestra itself\n logger.debug(\"Replacing ASAN preprocessor statements\")\n self._replace_asan(self.build.asan)\n\n if self.build.component.license:\n logger.debug(\"Copying license file\")\n source = self.build.component.license\n destination = installed_component_license_path(self.build.component.name, self.config)\n script = dedent(\n f\"\"\"\n DESTINATION_DIR=\"$TMP_ROOT$(dirname \"{destination}\")\"\n mkdir -p \"$DESTINATION_DIR\"\n for DIR in \"$BUILD_DIR\" \"$SOURCE_DIR\"; do\n if test -e \"$DIR/{source}\"; then\n cp \"$DIR/{source}\" \"$TMP_ROOT/{destination}\"\n exit 0\n fi\n done\n echo \"Couldn't find {source}\"\n exit 1\n \"\"\"\n )\n self._run_internal_script(script)\n\n def _remove_conflicting_files(self):\n script = dedent(\n \"\"\"\n if test -d \"$TMP_ROOT/$ORCHESTRA_ROOT/share/info\"; then\n rm -rf \"$TMP_ROOT/$ORCHESTRA_ROOT/share/info\";\n fi\n if test -d \"$TMP_ROOT/$ORCHESTRA_ROOT/share/locale\"; then\n rm -rf \"$TMP_ROOT/$ORCHESTRA_ROOT/share/locale\";\n fi\n \"\"\"\n )\n self._run_internal_script(script)\n\n def _drop_absolute_pkgconfig_paths(self):\n script = dedent(\n \"\"\"\n cd \"${TMP_ROOT}${ORCHESTRA_ROOT}\"\n if [ -e lib/pkgconfig ]; then\n find lib/pkgconfig \\\\\n -name \"*.pc\" \\\\\n -exec sed -i 's|/*'\"$ORCHESTRA_ROOT\"'/*|${pcfiledir}/../..|g' {} ';'\n fi\n \"\"\"\n )\n self._run_internal_script(script)\n\n def _purge_libtools_files(self):\n script = dedent(\n \"\"\"\n find \"${TMP_ROOT}${ORCHESTRA_ROOT}\" -name \"*.la\" -type f -delete\n \"\"\"\n )\n self._run_internal_script(script)\n\n def _hard_to_symbolic(self):\n duplicates = defaultdict(list)\n for root, dirnames, filenames in os.walk(f'{self.environment[\"TMP_ROOT\"]}{self.environment[\"ORCHESTRA_ROOT\"]}'):\n for path in filenames:\n path = os.path.join(root, path)\n info = os.lstat(path)\n inode = info.st_ino\n if inode == 0 or info.st_nlink < 2 or not stat.S_ISREG(info.st_mode):\n continue\n\n duplicates[inode].append(path)\n\n for _, equivalent in duplicates.items():\n base = equivalent.pop()\n for alternative in equivalent:\n os.unlink(alternative)\n os.symlink(os.path.relpath(base, os.path.dirname(alternative)), alternative)\n\n def _fix_rpath(self):\n replace_dynstr = os.path.join(os.path.dirname(__file__), \"..\", \"support\", \"elf-replace-dynstr.py\")\n fix_rpath_script = dedent(\n f\"\"\"\n cd \"$TMP_ROOT$ORCHESTRA_ROOT\"\n # Fix rpath\n find . -type f -executable | while read EXECUTABLE; do\n if head -c 4 \"$EXECUTABLE\" | grep '^.ELF' > /dev/null &&\n file \"$EXECUTABLE\" | grep x86-64 | grep -E '(shared|dynamic)' > /dev/null;\n then\n REPLACE='$'ORIGIN/$(realpath --relative-to=\"$(dirname \"$EXECUTABLE\")\" \".\")\n echo \"Setting rpath of $EXECUTABLE to $REPLACE\"\n \"{replace_dynstr}\" \"$EXECUTABLE\" \"$RPATH_PLACEHOLDER\" \"$REPLACE\" /\n \"{replace_dynstr}\" \"$EXECUTABLE\" \"$ORCHESTRA_ROOT\" \"$REPLACE\" /\n fi\n done\n \"\"\"\n )\n self._run_internal_script(fix_rpath_script)\n\n def _replace_ndebug(self, disable_debugging):\n debug, ndebug = (\"0\", \"1\") if disable_debugging else (\"1\", \"0\")\n patch_ndebug_script = dedent(\n rf\"\"\"\n cd \"$TMP_ROOT$ORCHESTRA_ROOT\"\n find include/ -name \"*.h\" \\\n -exec \\\n sed -i \\\n -e 's|^\\s*#\\s*ifndef\\s\\+NDEBUG|#if {debug}|' \\\n -e 's|^\\s*#\\s*ifdef\\s\\+NDEBUG|#if {ndebug}|' \\\n -e 's|^\\(\\s*#\\s*if\\s\\+.*\\)!defined(NDEBUG)|\\1{debug}|' \\\n -e 's|^\\(\\s*#\\s*if\\s\\+.*\\)defined(NDEBUG)|\\1{ndebug}|' \\\n {{}} ';'\n \"\"\"\n )\n self._run_internal_script(patch_ndebug_script)\n\n def _replace_asan(self, asan_enabled):\n replace_with = \"1\" if asan_enabled else \"0\"\n # fmt: off\n patch_ndebug_script = dedent(rf\"\"\"\n cd \"$TMP_ROOT$ORCHESTRA_ROOT\"\n find include/ -name \"*.h\" \\\n -exec \\\n sed -i \\\n -e 's|__has_feature\\(address_sanitizer\\)|{replace_with}|' \\\n -e 's|defined\\(__SANITIZE_ADDRESS__\\)|{replace_with}|' \\\n {{}} ';'\n \"\"\")\n # fmt: on\n self._run_internal_script(patch_ndebug_script)\n\n def _merge(self):\n copy_command = f'cp -far --reflink=auto \"$TMP_ROOT/$ORCHESTRA_ROOT/.\" \"$ORCHESTRA_ROOT\"'\n self._run_internal_script(copy_command)\n\n def _create_binary_archive(self):\n if self.binary_archive_exists():\n logger.debug(f\"Binary archive for {self.component.name} already exists, skipping its creation\")\n return\n logger.debug(\"Creating binary archive\")\n binary_archive_path = self._binary_archive_path()\n binary_archive_parent_dir = os.path.dirname(binary_archive_path)\n binary_archive_repo_name = self._binary_archive_repo_name\n absolute_binary_archive_tmp_path = os.path.join(\n self.config.binary_archives_local_paths[binary_archive_repo_name],\n f\"_tmp_{self.binary_archive_filename}\",\n )\n script = dedent(\n f\"\"\"\n mkdir -p \"$BINARY_ARCHIVES\"\n cd \"$TMP_ROOT$ORCHESTRA_ROOT\"\n rm -f '{absolute_binary_archive_tmp_path}'\n tar cvaf '{absolute_binary_archive_tmp_path}' --owner=0 --group=0 *\n mkdir -p '{binary_archive_parent_dir}'\n mv '{absolute_binary_archive_tmp_path}' '{binary_archive_path}'\n \"\"\"\n )\n self._run_internal_script(script)\n self._save_hash_material()\n\n def _save_hash_material(self):\n logger.debug(\"Saving hash material\")\n hash_material_path = Path(self._hash_material_path())\n hash_material_path.write_text(self.component.recursive_hash_material())\n\n def update_binary_archive_symlink(self):\n \"\"\"Creates/updates convenience symlinks to the binary archives.\n Symlinks named _.tar.xz point to the binary archives built for the\n corresponding component and orchestra branches.\n Example: fix-something_master.tar.xz -> abcdef_fedcba.tar.xz would be created if the binary archive\n for component branch fix-something with orchestra configuration on the `master` branch is available.\n \"\"\"\n logger.debug(\"Updating binary archive symlink\")\n\n binary_archive_repo_name = self._binary_archive_repo_name\n if binary_archive_repo_name is None:\n logger.warning(\"No binary archive configured\")\n return\n\n try:\n orchestra_config_branch = self._get_script_output('git -C \"$ORCHESTRA_DOTDIR\" rev-parse --abbrev-ref HEAD')\n orchestra_config_branch = orchestra_config_branch.strip().replace(\"/\", \"-\")\n except InternalCommandException:\n logger.warning(\n \"Orchestra configuration is not inside a git repository. Defaulting to `master` as branch name\"\n )\n orchestra_config_branch = \"master\"\n\n archive_dir_path = os.path.dirname(self._binary_archive_path())\n\n def create_symlink(branch, commit):\n branch = branch.replace(\"/\", \"-\")\n target_name = self._binary_archive_filename(commit, self.component.recursive_hash)\n target_absolute_path = os.path.join(archive_dir_path, target_name)\n symlink_absolute_path = os.path.join(archive_dir_path, f\"{branch}_{orchestra_config_branch}.tar.xz\")\n if os.path.exists(target_absolute_path):\n if os.path.exists(symlink_absolute_path):\n os.unlink(symlink_absolute_path)\n os.symlink(target_name, symlink_absolute_path)\n\n if self.component.clone:\n for branch, commit in self.component.clone.heads().items():\n create_symlink(branch, commit)\n else:\n create_symlink(\"none\", \"none\")\n\n @staticmethod\n def _index_directory(root_dir_path, relative_to=None):\n paths = []\n for current_dir_path, child_dir_names, child_file_names in os.walk(root_dir_path):\n for child_filename in child_file_names:\n child_file_path = os.path.join(current_dir_path, child_filename)\n if relative_to:\n child_file_path = os.path.relpath(child_file_path, relative_to)\n paths.append(child_file_path)\n\n for child_dir in child_dir_names:\n child_dir_path = os.path.join(current_dir_path, child_dir)\n if os.path.islink(child_dir_path):\n if relative_to:\n child_dir_path = os.path.relpath(child_dir_path, relative_to)\n paths.append(child_dir_path)\n\n return paths\n\n def _cleanup_tmproot(self):\n self._run_internal_script('rm -rf \"$TMP_ROOT\"')\n\n @property\n def _binary_archive_repo_name(self):\n \"\"\"Returns the name of the binary archives repository where new archives should be created\"\"\"\n if self.component.binary_archives:\n binary_archive_repo_name = self.component.binary_archives\n if binary_archive_repo_name not in self.config.binary_archives_remotes.keys():\n raise UserException(\n f\"Component {self.component.name} wants to push to an unknown binary-archives \"\n f\"repository ({binary_archive_repo_name})\"\n )\n return binary_archive_repo_name\n elif self.config.binary_archives_remotes:\n return list(self.config.binary_archives_remotes.keys())[0]\n else:\n return None\n\n @property\n def binary_archive_relative_path(self) -> str:\n \"\"\"Returns the path to the binary archive, relative to the binary archive repository\"\"\"\n return os.path.join(\n self.binary_archive_relative_dir,\n self.binary_archive_filename,\n )\n\n @property\n def hash_material_relative_path(self) -> str:\n \"\"\"Returns the path to the hash material, relative to the binary archive repository\"\"\"\n return os.path.join(\n self.binary_archive_relative_dir,\n self.hash_material_filename,\n )\n\n @property\n def binary_archive_filename(self) -> str:\n \"\"\"Returns the filename of the binary archive for the target build.\n *Warning*: the filename is the same for all the builds of the same component. Use `binary_archive_relative_path`\n to get a path which is unique to a single build\n \"\"\"\n component_commit = self.component.commit() or \"none\"\n return self._binary_archive_filename(component_commit, self.component.recursive_hash)\n\n @property\n def binary_archive_relative_dir(self) -> str:\n \"\"\"Returns the path to the directory containing the binary archives for the associated build, relative to the\n binary archive repository\"\"\"\n return os.path.join(\n self.architecture,\n self.component.name,\n self.build.name,\n )\n\n @property\n def hash_material_filename(self) -> str:\n \"\"\"Returns the filename of the hash material for the target build.\n *Warning*: the filename is the same for all the builds of the same component. Use `hash_material_relative_path`\n to get a path which is unique to a single build\n \"\"\"\n component_commit = self.component.commit() or \"none\"\n return self._hash_material_filename(component_commit, self.component.recursive_hash)\n\n @staticmethod\n def _binary_archive_filename(component_commit, component_recursive_hash) -> str:\n return f\"{component_commit}_{component_recursive_hash}.tar.xz\"\n\n @staticmethod\n def _hash_material_filename(component_commit, component_recursive_hash) -> str:\n return f\"{component_commit}_{component_recursive_hash}.hash-material.yml\"\n\n def _binary_archive_path(self) -> str:\n \"\"\"Returns the absolute path where the binary archive should be created.\n Note: Use `locate_binary_archive` to locate the binary archive to extract when installing.\n \"\"\"\n return os.path.join(\n self.config.binary_archives_local_paths[self._binary_archive_repo_name],\n self.binary_archive_relative_path,\n )\n\n def available_binary_archives(self):\n \"\"\"Returns all available binary archives related to this build\"\"\"\n available_binary_archives = set()\n for binary_archive_repo in self.config.binary_archives_local_paths.values():\n binary_archives_glob = os.path.join(\n binary_archive_repo, f\"{self.build.install.binary_archive_relative_dir}/*.tar.*\"\n )\n for binary_archive in glob.glob(binary_archives_glob):\n binary_archive_path = Path(binary_archive)\n if not binary_archive_path.exists() or binary_archive_path.is_symlink():\n continue\n available_binary_archives.add(binary_archive)\n return available_binary_archives\n\n def _hash_material_path(self) -> str:\n \"\"\"Returns the absolute path where the material used to compute the component hash should be created\"\"\"\n return os.path.join(\n self.config.binary_archives_local_paths[self._binary_archive_repo_name],\n self.hash_material_relative_path,\n )\n\n def locate_binary_archive(self) -> Optional[str]:\n \"\"\"Returns the absolute path to the binary archive that can be extracted to install the target build.\n *Note*: the path may be pointing to a git LFS pointer which needs to be downloaded and checked out (smudged)\"\"\"\n binary_archives_path = self.config.binary_archives_dir\n for name in self.config.binary_archives_remotes:\n relative_path_without_extension = os.path.splitext(self.binary_archive_relative_path)[0]\n extensions = [\".xz\", \".gz\", \"\"]\n for extension in extensions:\n try_path = os.path.join(binary_archives_path, name, relative_path_without_extension + extension)\n if os.path.exists(try_path):\n return try_path\n return None\n\n def binary_archive_exists(self) -> bool:\n \"\"\"Returns True if the binary archive for the target build exists (cached or downloadable)\"\"\"\n return self.locate_binary_archive() is not None\n\n @property\n def environment(self) -> \"OrderedDict[str, str]\":\n env = super().environment\n env[\"DESTDIR\"] = self.tmp_root\n return env\n\n @property\n def architecture(self):\n return \"linux-x86-64\"\n\n def is_satisfied(self):\n return is_installed(\n self.config,\n self.build.component.name,\n wanted_build=self.build.name,\n wanted_recursive_hash=self.build.component.recursive_hash,\n )\n\n def assert_prerequisites_are_met(self):\n super().assert_prerequisites_are_met()\n # Verify either sources or ls-remote info are available\n if self.component.clone is not None and self.component.commit() is None:\n raise UserException(f\"HEAD commit for {self.component.name} not available. Run `orc update`.\")\n\n # Verify binary archive is available\n if not self.allow_build and not self.binary_archive_exists():\n raise BinaryArchiveNotFoundException(self)\n","sub_path":"orchestra/actions/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":24858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"125310470","text":"import trading_system.domain.domain as dom\nfrom store.models import BaseItemRule as m_BaseItemRule\n\n\nclass BaseItemRule:\n\tdef __init__(self, model=None):\n\t\tif model != None:\n\t\t\tself._model = model\n\t\t\treturn\n\n\t@property\n\tdef pk(self):\n\t\treturn self._model.pk\n\n\t@property\n\tdef id(self):\n\t\treturn self._model.pk\n\n\t@property\n\tdef type(self):\n\t\treturn self._model.type\n\n\t@property\n\tdef parameter(self):\n\t\treturn self._model.parameter\n\n\tdef check(self, amount):\n\t\tif self.type == 'MAX' and amount > int(self.parameter):\n\t\t\treturn False\n\t\telif self.type == 'MIN' and amount < int(self.parameter):\n\t\t\treturn False\n\t\treturn True\n\n\tdef update(self, item_dict):\n\t\tfor field in self._model._meta.fields:\n\t\t\tif field.attname in item_dict.keys():\n\t\t\t\tsetattr(self._model, field.attname, item_dict[field.attname])\n\n\t\ttry:\n\t\t\tself._model.save()\n\t\texcept Exception:\n\t\t\traise dom.DBFailedExceptionDomainToService(msg='DB Failed')\n\n\tdef delete(self):\n\n\t\ttry:\n\t\t\tself._model.delete()\n\t\texcept Exception:\n\t\t\traise dom.DBFailedExceptionDomainToService(msg='DB Failed')\n\n\t@staticmethod\n\tdef get_b_rule(rule_id):\n\n\t\ttry:\n\t\t\treturn BaseItemRule(model=m_BaseItemRule.objects.get(id=rule_id))\n\t\texcept Exception:\n\t\t\traise dom.DBFailedExceptionDomainToService(msg='DB Failed')\n\n\t@staticmethod\n\tdef get_item_bi_rules(item_id):\n\n\t\ttry:\n\t\t\tcir_models = m_BaseItemRule.objects.filter(item_id=item_id)\n\t\t\treturn list(map(lambda cir_model: BaseItemRule(model=cir_model), list(cir_models)))\n\t\texcept Exception:\n\t\t\traise dom.DBFailedExceptionDomainToService(msg='DB Failed')\n","sub_path":"dev/trading_system/domain/base_item_rule.py","file_name":"base_item_rule.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"443570271","text":"\"\"\"Given a rod of length n and a table of prices\np_i, i = 1, 2, ... , n, write an algorithm to find the max revenue r_n\nobtainable by cutting up the rod and selling the pieces.\n\"\"\"\n\ndef naive(values, length):\n if length <= 0:\n return 0\n r_n = -1\n for i in range(length):\n r_n = max(r_n, values[i] + naive(values, length - (i + 1)))\n return r_n\n","sub_path":"interview_prep/python/rod.py","file_name":"rod.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"372716189","text":"import csv\nfrom random import (\n sample,\n randint,\n random,\n choice\n )\nfrom string import (\n ascii_letters,\n digits\n )\n\nimport names\n\ndef create_mockup_txt(filename, size=14):\n school_ids = sample(range(1,8000), size)\n school_class = \"{c}{n:02d}\".format(\n c=choice(ascii_letters),\n n=int(choice(digits))\n )\n with open(filename, 'w') as f:\n f.write('\"Interne ID-Nummer\";\"Nachname\";\"Vorname\";\"Klasse\"\\n')\n for i in school_ids:\n f.write('{};\"{}\";\"{}\";\"{}\"\\n'.format(\n i,\n names.get_last_name(),\n names.get_first_name(),\n school_class\n )\n )\n\ncreate_mockup_txt('Test.TXT', size=10)\n","sub_path":"generate_mockup_txt.py","file_name":"generate_mockup_txt.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"598670824","text":"#\n# Emails configuration and templates used when an task order is created, CUSTOMIZE...\n#\n\nimport os\n\n\nEMAIL_TIMEOUT = 3 # seconds\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_HOST = os.getenv('EMAIL_HOST', 'smtp.gmail.com')\nEMAIL_USE_TLS = os.getenv('EMAIL_USE_TLS', 'true') == 'true'\nEMAIL_PORT = os.getenv('EMAIL_PORT', 587)\nEMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER', 'YOUREMAIL@gmail.com')\nEMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD', 'PASS')\n\n\nTASKS_SEND_EMAILS_TO_ASSIGNED = os.getenv('TASKS_SEND_EMAILS_TO_ASSIGNED', 'false') == 'true'\nTASKS_SEND_EMAILS_TO_PARTNERS = os.getenv('TASKS_SEND_EMAILS_TO_PARTNERS', 'false') == 'true'\n\n\n# Enables the Tornado Django Coleman Viewer (it will send emails with the order URL)\n# Check: https://github.com/mrsarm/tornado-dcoleman-mtasks-viewer\nTASKS_VIEWER_ENABLED = os.getenv('TASKS_VIEWER_ENABLED', 'false') == 'true'\nTASKS_VIEWER_HASH_SALT = os.getenv('TASKS_VIEWER_HASH_SALT', '1two3') # REPLACE in production !!!\nTASKS_VIEWER_ENDPOINT = os.getenv('TASKS_VIEWER_ENDPOINT', 'http://localhost:8888/{number}?t={token}')\n\nTASKS_EMAIL_WITHOUT_URL = '''\\\nNew task #{id} created.\n\nTitle:\n{title}\n\nAssigned:\n{user}\n\nDescription:\n{description}\n\nPlease note: Do NOT reply to this email. This email is sent from an unattended mailbox.\nReplies will not be read.\n\n---\n{sign}\n'''\n\n\nTASKS_EMAIL_WITH_URL = '''\\\nNew task #{id} created.\n\nTitle:\n{title}\n\nAssigned:\n{user}\n\nDescription:\n{description}\n\nOrder URL:\n{url}\n\nPlease note: Do NOT reply to this email. This email is sent from an unattended mailbox.\nReplies will not be read.\n\n---\n{sign}\n'''","sub_path":"employee_task/settings_emails.py","file_name":"settings_emails.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"2189641","text":"import threading\nfrom flask import jsonify, request, json, abort\nfrom OpinionOrderClassify.model_predict import opinion_order_clasify\nfrom app import log,app\nfrom api.opinion_classify_fun import transcode,save_opinion_result,validate_opinion_classify\nfrom api.opinion_classify_fun import PRE_RESULY_CODE_TEST,PRE_RESULT_TEST\nimport os\nfrom TASK_AND_CODE_GENERATE.TaskAllocation import task_master,logger\n\n\n@app.route('/opinionclassify', methods=['post'])\ndef predition():\n # 验证数据\n data = validate_opinion_classify(request)\n log.info(data)\n data.pop('tk')#不存储tk\n try:\n model_directory_path = os.path.join('.','OpinionOrderClassify','model_save')\n pre_result = opinion_order_clasify(data,model_directory_path=model_directory_path)\n log.info('Success to run model')\n except:\n log.error(\"Fail to run model:\\n\" , exc_info=True)\n pre_result = PRE_RESULT_TEST\n try:\n pre_result_code = transcode(pre_result) # 将中文结果编码成英文输出\n # thrd_savedata = threading.Thread(target=save_opinion_result, kwargs={'req': data, 'resp': pre_result})\n # thrd_savedata.start()\n save_opinion_result(req=data,resp=pre_result)\n log.info('Transcoding success')\n except:\n log.error(\"Transcoding fail:\\n\", exc_info=True)\n pre_result_code= PRE_RESULY_CODE_TEST\n resp = jsonify(pre_result_code)\n log.info(pre_result_code)\n return resp\n\n@app.route('/infoextract', methods=['post'])\ndef extract():\n data = request.get_json()\n if not isinstance(data,dict):\n data = json.loads(data) # 转成字典\n logger.info(data)\n result = task_master(data)\n resp = jsonify(result)\n logger.info(result)\n\n return resp","sub_path":"api/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"304966293","text":"from file_importer import FileImporter\nfrom intcode_computer import IntcodeComputer\n\ndef get_input(st):\n return list(map(ord, list(st))) + [10]\n\nprog = list(map(int, FileImporter.get_input(\"/../input/21.txt\").split(\",\")))\ncode = \"\"\"\\\nOR A J\nNOT B T\nAND T J\nNOT C T\nAND T J\nAND D J\nNOT A T\nOR T J\nNOT C T\nAND A T\nAND B T\nAND D T\nOR T J\nRUN\"\"\"\n\ncomputer = IntcodeComputer(prog, get_input(code))\n\nwhile not computer.halted:\n out = computer.get_output()\n if out is not None:\n if out < 0x110000:\n print(chr(out), end=\"\")\n else: print(out)","sub_path":"src/_21b.py","file_name":"_21b.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"38763664","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 9 17:53:20 2017\n\n@author: Master Chief\n\"\"\"\n\nimport scipy\nimport scipy.optimize\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as font_manager\n\nfont_path = 'C:\\Windows\\Fonts\\consola.ttf'\nfont_prop = font_manager.FontProperties(fname=font_path, size=10)\n\ntitle_font = {'fontname':'Arial', 'size':'16', 'color':'black', 'weight':'normal',\n 'verticalalignment':'bottom'}\n\nfont = {'fontname':'Comic Sans MS','fontsize':14}\n\ndef f(x):\n y = x + 2*scipy.cos(x)\n return y\n \nraiz= scipy.optimize.newton(f, 2)\n\nprint(raiz)\n\nx = np.linspace(-5,5)\n\nplt.plot(raiz, 0,'bo', label='Raíz calculada')\nplt.plot(x,f(x), label='$x + \\cos(x)$')\nplt.axhline(y=0, lw=0.7, ls='dashed')\nplt.axvline(x=0, lw=0.7, ls='dashed')\nplt.title('Función a la que se desea calcular la raíz',**font_prop)\nplt.legend(loc=1)\nplt.show()","sub_path":"Tema 2 - Operaciones matematicas basicas/Codigos python/03 Raices/raices_python_scipy_03.py","file_name":"raices_python_scipy_03.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"233151612","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2021, Gopi and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.website.website_generator import WebsiteGenerator\nfrom frappe.utils import flt, comma_or, nowdate, getdate\n\nclass Payments(WebsiteGenerator):\n\tdef validate(self):\n\t\tfor d in self.get('ref'):\n\t\t\tout =flt(d.total)- flt(self.paid_amount)\n\t\t\td.outstanding = out\n\t\t\td.allocated = self.paid_amount\n\ndef on_submit(self):\n\n\tfor d in self.get(\"ref\"):\n\t\tre = frappe.get_value(\"Orderss\",d.namee,\"paid_amount\")\n\t\tfrappe.db.set_value(\"Orderss\",d.namee,{\n\t\t\"paid_amount\":flt(re)+flt(self.paid_amount)\n\t\t})\n\n\t\t\n\t\t\n\t\tfrappe.db.set_value(\"Orderss\",d.namee,{\n\t\t\"outstanding\":d.outstanding\n\t\t})\n\t\t\n\n\t\tif flt(self.paid_amount)100) :\n continue\n y=g[y]\n if(dist[y]==-1 or dist[y]>dist[x]+1) :\n dist[y]=dist[x]+1\n q.append(y)\n\nprint(dist[-1])","sub_path":"algorithm_202105/baek16928.py","file_name":"baek16928.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"195878652","text":"#!/usr/bin/env python3\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\",\"--islands\", help=\"Nombre del fichero con datos de coordenadas de islas CpG\")\nparser.add_argument(\"-p\",\"--promoters\", help=\"Nombre del fichero con coordenadas de promotores\")\nargs = parser.parse_args()\n\n\nislands=list(open(args.islands))\npromoters=list(open(args.promoters))\nout=open('%s.promoters' % (args.islands),'wt')\n\nfor i in range(0, len(islands)):\n\tstart_island = int(islands[i].split('\\t')[1])\n\tend_island = int(islands[i].split('\\t')[2])\n\tfor k in range(0, len(promoters)):\n\t\tstart_promoter=int(promoters[k].split('\\t')[1])\n\t\tend_promoter=int(promoters[k].split('\\t')[2])\n\t\tif start_island in range(start_promoter, end_promoter) or end_island in range(start_promoter, end_promoter):\n\t\t\tout.write('%s\\t%s\\t%s\\t%s\\t%s\\n' % (islands[i].strip(), promoters[k].split('\\t')[3].split('#')[0], promoters[k].split('\\t')[1], promoters[k].split('\\t')[2], promoters[k].split('\\t')[4]))\n\n\n\n","sub_path":"IslandsinPromoters.py","file_name":"IslandsinPromoters.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"224647199","text":"from xml.etree.ElementTree import parse\nfrom hyper.utils.process_scan import scan_all\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.checks.messages import Critical\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView, View\nfrom .forms import ScanForm, RenameScanForm, CreateAssetGroup, AddAssetForm, DeleteAssetForm, AssetScanForm\nfrom hyper.utils.general import *\nfrom .tasks import go_to_sleep\nfrom django.shortcuts import redirect\nfrom django.views.static import serve\nimport os\nfrom django.http import HttpResponse, Http404\nfrom wsgiref.util import FileWrapper\n\n\nUser = get_user_model()\nclass LoginRequiredView(LoginRequiredMixin, TemplateView):\n pass\n\n\nclass DashboardMainView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/main.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n scans = select_scans(self.request.user.id)\n context['scans'] = scans\n return context\n\n\nclass ScanDetailsView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/scan_details.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['slug'] = self.kwargs['slug']\n selected_scan = select_slug(self.kwargs['slug'][5:],self.request.user.id)\n context['scan'] = selected_scan\n if len(selected_scan) >= 1:\n context['data'] = get_scan_data(self.kwargs['slug'][5:], self.request.user.id)\n return context\nclass CveDetailsView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/cve_details.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n scan_id = self.kwargs['slug']\n cve_id = self.kwargs['cveid']\n context['cve'] = get_cve(scan_id[5:],cve_id,self.request.user.id)\n return context\n\nclass ScanView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/scan.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['scan_form'] = ScanForm(self.request.user.id, initial={'ports':'top'})\n context['is_asset_group'] = False\n return context\n def post(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n form = ScanForm(self.request.user.id, request.POST)\n print(form.errors)\n if form.is_valid():\n context['address'] = parse_scan_addresses(form.cleaned_data['address'])[1]\n context['scan_name'] = form.cleaned_data['scan_name']\n all_scan_ips = parse_scan_addresses(form.cleaned_data['address'])[0]\n for group_name in form.cleaned_data['asset_groups']:\n gid = get_group_id(self.request.user.id, group_name)\n group_ips = get_assets(self.request.user.id, gid)\n for ip in group_ips:\n all_scan_ips.append(ip)\n \"\"\"\n Check to see if there are old scan results that have the same addresses\n and delete them, there could be other possible solutions to this\n as this will remove results from older scans\n \"\"\"\n delete_old_addresses(all_scan_ips)\n slug = add_scan(request.user.id, form.cleaned_data['scan_name'],all_scan_ips, generate_scan_display_address(all_scan_ips))\n\n \n \n \"\"\"\n Im not sure how to calculate the percent of the work done so for now we\n print a message after they submit the scan saying its running in the background\n \"\"\"\n #context['task_id'] = convert_scan_to_model(form.cleaned_data['name'], slug[5:])\n scan_all(all_scan_ips,slug[5:],form.cleaned_data['ports'], form.cleaned_data['custom_range'], self.request.user.id)\n context['scan_status'] = \"scanning\"\n \n\n return self.render_to_response(context)\n\nclass ScanManageView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/scan_manage.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['slug'] = self.kwargs['slug']\n context['rename_form'] = RenameScanForm()\n return context\n\n def post(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n form = RenameScanForm(request.POST)\n if form.is_valid():\n context['new_name'] = form.cleaned_data['name']\n scan = select_slug(self.kwargs['slug'][5:], request.user.id)\n scan.update(name=form.cleaned_data['name'])\n \n return redirect(f\"/scan/{self.kwargs['slug']}\")\n if request.POST.get('delete'):\n clear_ports(request.user.id, self.kwargs['slug'][5:])\n clear_scans(request.user.id, self.kwargs['slug'][5:])\n return redirect('/')\n return self.render_to_response(context)\n\n\nclass AddressDashboardView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/address/address_dashboard.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n ip_list = get_ips(user=self.request.user.id)\n context['ip_list'] = []\n for ip in ip_list:\n context['ip_list'].append([ip, ip.replace('.', '-')])\n return context\nclass AddressDetailsView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/address/address_details.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n slug = self.kwargs['slug'].replace('-', '.')\n context['data'] = get_address_data(self.request.user.id, slug)\n return context\nclass AddressCveDetailsView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/cve_details.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n address = self.kwargs['slug'].replace('-', '.')\n cve_id = self.kwargs['cveid']\n context['cve'] = get_address_cve(address, self.request.user.id, cve_id)\n return context\nclass AssetGroupDashboardView(LoginRequiredMixin, TemplateView):\n template_name = 'dashboard/assets/assets.html'\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['data'] = []\n groups = get_asset_groups(user=self.request.user.id)\n for group in groups:\n context['data'].append([group, get_assets(self.request.user.id, group)])\n return context\n\n\nclass CreateAssetGroupView(LoginRequiredMixin, TemplateView):\n template_name =\"dashboard/assets/create.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['create_form'] = CreateAssetGroup(self.request.user.id)\n return context\n def post(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n form = CreateAssetGroup(self.request.user.id, request.POST)\n if form.is_valid():\n gid = create_asset_group(request.user.id, form.cleaned_data['name'])\n addresses = form.cleaned_data['Add_Addresses']\n for ip in addresses:\n add_asset_to_group(ip, self.request.user.id, gid)\n return redirect('/assets/')\n return self.render_to_response(context)\n\nclass ManageAssetGroupView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/assets/manage.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n #TODO give rename its own form\n context['change_name_form'] = RenameScanForm()\n context['add_asset_form'] = AddAssetForm(self.request.user.id)\n context['del_asset_form'] = DeleteAssetForm(self.request.user.id, self.kwargs['groupid'])\n context['gid'] = self.kwargs['groupid']\n return context\n def post(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n if request.POST.get('change_name'):\n form = RenameScanForm(request.POST)\n if form.is_valid():\n groupid = self.kwargs['groupid']\n change_group_name(groupid, form.cleaned_data['name'])\n return redirect(\"/assets/\")\n if request.POST.get('add'):\n form = AddAssetForm(request.user.id, request.POST)\n if form.is_valid():\n for x in form.cleaned_data['Add_Addresses']:\n if x != \"None\":\n add_asset_to_group(x, request.user.id, self.kwargs['groupid'])\n return redirect(\"/assets/\")\n if request.POST.get('delete') == \"Submit\":\n form = DeleteAssetForm(request.user.id, self.kwargs['groupid'], request.POST)\n \n if form.is_valid():\n print(form.cleaned_data)\n for x in form.cleaned_data['Remove_Addresses']:\n if x != \"None\":\n del_asset_from_group(request.user.id, self.kwargs['groupid'], x)\n return redirect(\"/assets/\")\n if request.POST.get('remove'):\n delete_asset_group(self.request.user.id, self.kwargs['groupid'])\n return redirect('/')\n return self.render_to_response(context)\n \nclass AssetGroupAddressView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/address/address_dashboard.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n ip_list = get_assets(self.request.user.id, self.kwargs['groupid'])\n context['ip_list'] = []\n for ip in ip_list:\n context['ip_list'].append([ip, ip.replace('.', '-')])\n return context\n\nclass DashboardInfoView(LoginRequiredMixin, TemplateView):\n template_name = 'dashboard/info.html'\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n ips = get_ips(self.request.user.id)\n context['ip_num'] = len(ips)\n context['cve_nums'] = len(num_cves(self.request.user.id))\n context['critical_num'] = len(num_cves(self.request.user.id).filter(score__gte=9))\n context['high_num'] = len(num_cves(self.request.user.id).filter(score__gte=7).filter(score__lt=9))\n context['medium_num'] = len(num_cves(self.request.user.id).filter(score__gte=4).filter(score__lt=7))\n context['top_ten'] = get_top_ten(self.request.user.id)\n context['data'] = num_cves(self.request.user.id)\n return context\n\nclass DashboardScoreView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/scan_details.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['scan'] = 1\n level = self.kwargs['score']\n if level == 'critical':\n context['level'] = 'Critical Level Vulnerabilities'\n context['data'] = num_cves(self.request.user.id).filter(score__gte=9)\n elif level == 'high':\n context['level'] = 'High Level Vulnerabilities'\n context['data'] = num_cves(self.request.user.id).filter(score__gte=7).filter(score__lt=9)\n elif level == 'medium':\n context['level'] = 'Medium Level Vulnerabilities'\n context['data'] = num_cves(self.request.user.id).filter(score__gte=4).filter(score__lt=7)\n return context\n\nclass AssetScanView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/scan.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['scan_form'] = AssetScanForm(initial={'ports':'top'})\n context['is_asset_group'] = True\n context['groupid'] = self.kwargs['groupid']\n return context\n def post(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n form = AssetScanForm(request.POST)\n \n if form.is_valid():\n #context['name'] = form.cleaned_data['name']\n context['scan_name'] = form.cleaned_data['scan_name']\n \"\"\"\n Check to see if there are old scan results that have the same addresses\n and delete them, there could be other possible solutions to this\n as this will remove results from older scans\n \"\"\"\n addresses = get_assets(self.request.user.id, self.kwargs['groupid'])\n delete_old_addresses(addresses)\n slug = add_scan(request.user.id, form.cleaned_data['scan_name'],addresses)\n\n \"\"\"\n Im not sure how to calculate the percent of the work done so for now we\n print a message after they submit the scan saying its running in the background\n \"\"\"\n #context['task_id'] = convert_scan_to_model(form.cleaned_data['name'], slug[5:])\n \n scan_all(addresses,slug[5:],form.cleaned_data['ports'], form.cleaned_data['custom_range'], self.request.user.id)\n \n\n return self.render_to_response(context)\n\nclass TempDownloadView(LoginRequiredMixin, View):\n template_name = 'dashboard/download/download.html'\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['download_type'] = self.kwargs['downloadtype']\n file_path = '/tmp/test.csv'\n \n \n return context\n\nclass FileDownloadView(View):\n # Set FILE_STORAGE_PATH value in settings.py\n \n # Here set the name of the file with extension\n file_name = '/tmp/report.csv'\n # Set the content type value\n content_type_value = 'text/csv'\n\n def get(self, request, downloadtype, downloadvalue):\n if downloadtype == 'address':\n data = num_cves(request.user.id).filter(ip=downloadvalue.replace('-', '.'))\n write_data_to_csv([data])\n elif downloadtype == 'scan':\n data = get_scan_data(downloadvalue[5:], request.user.id)\n write_data_to_csv([data])\n elif downloadtype == 'group':\n members = get_assets(self.request.user.id, downloadvalue)\n data = get_cve_for_multiple_address(request.user.id, members)\n write_data_to_csv(data)\n\n file_path = self.file_name\n if os.path.exists(file_path):\n with open(file_path, 'rb') as fh:\n response = HttpResponse(\n fh.read(),\n content_type=\"text/csv\"\n )\n response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path)\n return response\n else:\n raise Http404\n\nclass DownloadView(FileDownloadView):\n file_name = '/tmp/report.csv'\n\ndashboard_info_view = DashboardInfoView.as_view()\ndashboard_manage_scan_view = ScanManageView.as_view()\ndashboard_scan_view = ScanView.as_view()\ndashboard_cve_details = CveDetailsView.as_view()\ndashboard_main_view = DashboardMainView.as_view()\ndashboard_scan_details = ScanDetailsView.as_view()\ndashboard_address_view = AddressDashboardView.as_view()\naddress_details_view = AddressDetailsView.as_view()\naddress_cve_details_view = AddressCveDetailsView.as_view()\nasset_group_view = AssetGroupDashboardView.as_view()\nasset_group_create_view = CreateAssetGroupView.as_view()\nasset_group_manage_view = ManageAssetGroupView.as_view()\nasset_group_address_view = AssetGroupAddressView.as_view()\ndashboard_score_view = DashboardScoreView.as_view()\nasset_group_scan_view = AssetScanView.as_view()\ndownload_view = FileDownloadView.as_view()","sub_path":"hyper/dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"89208054","text":"def validate_email(email: str) -> bool:\n from re import search\n # regex = r\"^ (?=[A-Z0-9][A-Z0-9@._ % +-]{5, 253}$)[A-Z0-9._ % +-]{1, 64}@(?: (?=[A-Z0-9-]{1, 63}\\.)[A-Z0-9]+(?: -[A-Z0-9]+) *\\.){1, 8}[A-Z]{2, 63}$\"\n regex = r\"[^@]+@[^@]+\\.[^@]+\"\n return True if search(regex, email) else False\n\n\ndef validate_text(text: str, sizemin=0, sizemax=80, alnum=False) -> bool:\n return True if type(text) == str and sizemin <= len(text) <= sizemax and (not alnum or text.isalnum()) else False\n\n\ndef check_valid(type: str, val) -> bool:\n return validators[type](val)\n\n\nvalidators = {\n \"Name\": validate_text,\n \"First_name\": validate_text,\n \"Family_name\": validate_text,\n \"Email_id\": validate_email,\n \"Password\": lambda val: validate_text(val, sizemin=8, sizemax=15, alnum=True),\n \"Mobile\": lambda x: len(x) == 10,\n \"Sex\": lambda x: x in {'M', 'F', 'O'},\n \"Address\": validate_text,\n \"Degree\": validate_text,\n \"Roll_no\": lambda x: type(x) == int and len(str(x)) == 10,\n \"Batch\": lambda val: validate_text(val, sizemax=10),\n \"cgpa\": lambda x: 0 <= x <= 10,\n \"Team_name\": validate_text,\n \"Course_name\": lambda x: validate_text(x, sizemax=30),\n \"Details\": lambda x: validate_text(x, sizemax=100),\n \"Textbook\": lambda x: validate_text(x, sizemax=30),\n \"Channel_name\": validate_text,\n \"Time\": lambda x: True,\n \"Quiz_no\": lambda x: type(x) == int and x < 1e9,\n \"No_of_qn\": lambda x: type(x) == int and x < 1e9,\n \"Qn_text\": lambda x: validate_text(x, sizemax=1000),\n \"Q_id\": lambda x: type(x) == int and x < 1e9,\n \"Answer\": lambda x: validate_text(x, sizemax=1000),\n \"Marks\": lambda x: 0 <= x <= 10,\n \"Percentage_marks\": lambda x: 0 <= x <= 100,\n}\n\nvalidators[\"SRoll_no\"] = validators[\"Roll_no\"]\nvalidators[\"Sup_id\"] = validators[\"Email_id\"]\nvalidators[\"Org_id\"] = validators[\"Email_id\"]\nvalidators[\"Member_id\"] = validators[\"Email_id\"]\nvalidators[\"Admin_id\"] = validators[\"Email_id\"]\nvalidators[\"Inst_Email_id\"] = validators[\"Email_id\"]\nvalidators[\"Start_time\"] = validators[\"Time\"]\nvalidators[\"End_time\"] = validators[\"Time\"]\n","sub_path":"code/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"491234608","text":"from plugin import load\nimport pickle\n\nclass Agent:\n def __init__(self):\n pass\n \n def echo(self, sock):\n while 1:\n data = sock.recv(1024)\n if data.strip() == 'break' : break\n sock.send(data)\n sock.close()\n\n def agent(self, sock):\n while 1:\n data = sock.recv(1024)\n if data.strip() == 'break' : break\n sock.send(\"agent: \" + data)\n sock.close()\n\n def load(self, sock):\n sock.send (pickle.dumps (load.get_data(), 2))\n sock.close()\n \n\n\n","sub_path":"old/connect/old/server/chandler.py","file_name":"chandler.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"116102505","text":"import sys\nsys.path.append(\"..\")\n\nfrom __main__ import settings, database\nimport discord, asyncio, json\nfrom discord.ext import commands\n\n# import mixer api and mixer chatbot from bots.mixer module\nfrom bots.mixer import api as api\nfrom bots.mixer import chat as mixer_chat\nfrom bots.mixer import channel as channel\n\n# initialize discord bot\nbot = commands.Bot(command_prefix = '>')\n\n@bot.command()\nasync def link(ctx):\n await ctx.send(\"https://mixer.com/\" + channel.token)\n\n@bot.command()\nasync def leaderboard(ctx):\n leaderboard = channel.get_leaderboard('sparks-weekly')\n message = \"\"\n for i in range(len(leaderboard)):\n leader = leaderboard[i]\n user_id = leader[\"userId\"]\n username = leader[\"username\"]\n sparks = leader[\"statValue\"]\n place = i + 1\n mixcord_user = database.user_from_mixer(user_id)\n if mixcord_user is not None:\n member = bot.get_user(mixcord_user[\"discord_id\"])\n username = member.mention\n else:\n username = \"**{}**\".format(username)\n message += \"{} is in {} place w/ {} sparks\\n\".format(username, place, sparks)\n await ctx.send(message)\n\n@bot.command()\nasync def uptime(ctx):\n\n # get uptime and check if online\n uptime = channel.get_uptime()\n if uptime is None:\n await ctx.send(channel.token + \" is not currently online.\")\n return\n\n # return formatted uptime\n await ctx.send(channel.token + \" has been live for: \" + str(uptime))\n\n# triggered when the discord bot is connected + authenticated\n@bot.event\nasync def on_ready():\n print('discord logged in:', bot.user)\n\n# triggered when !mixcord command is executed in discord\n@bot.command()\nasync def mixcord(ctx):\n\n # make sure discord id isn't already in database\n discord_id = ctx.author.id\n if database.user_from_discord(discord_id) is not None:\n await ctx.author.send(\"You've already linked your Mixer account via mixcord.\")\n return\n\n # get shortcode stuff from mixer\n shortcode = api.get_shortcode()\n code = shortcode[\"code\"]\n handle = shortcode[\"handle\"]\n\n # tell the user what to do to link their mixer account\n await ctx.author.send(\"Visit the following page to link your Mixer: \".format(code))\n\n # poll shortcode checking endpoint with handle until we can move on with authorization_code\n while True:\n await asyncio.sleep(10)\n response = api.check_shortcode(handle)\n status_code = response.status_code\n if status_code == 200:\n authorization_code = response.json()[\"code\"]\n break\n elif status_code == 403:\n await ctx.author.send(\"Failed: user denied permissions.\")\n return\n elif status_code == 404:\n await ctx.author.send(\"Failed: verification timed out.\")\n return\n\n tokens = api.get_token(authorization_code)\n token_data = api.check_token(tokens[\"access_token\"])\n user = api.get_user(token_data[\"sub\"])\n\n database.insert_user(user.id, user.channel.id, discord_id)\n database.update_tokens(discord_id, tokens[\"access_token\"], tokens[\"refresh_token\"], token_data[\"exp\"])\n\n await ctx.author.send(\"Your Mixer account has been linked: \" + user.username)\n await mixer_chat.send_message(\"@{} has linked their discord account: {}\".format(user.username, ctx.author))\n\nasync def send_announcement(message):\n guild = bot.get_guild(settings[\"discord\"][\"guild\"])\n channel = discord.utils.get(guild.text_channels, name = \"announcements\")\n await channel.send(\"@everyone \" + message)\n","sub_path":"mixcord/bots/discord.py","file_name":"discord.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"82924131","text":"#import logger as log\nimport global_var\nimport logging\nimport os\n\n\nclass createTestReport(object):\n\n def __init__(self):\n self.html = global_var.log_path + '/Test_Results.html'\n \n def create_test_report_header(self):\n \n logpath = global_var.log_path\n logpath = os.path.expanduser(logpath)\n self.html = logpath + '/Test_Results.html'\n print('Setting up Result File at %s'%self.html)\n if not os.path.isfile(self.html):\n self.testreport = open(self.html, 'w+')\n title = '\\n\\n' \n self.testreport.write(title)\n self.testreport.flush()\n else:\n self.testreport = open(self.html, 'a+')\n \n def create_test_report_table(self):\n table_header = ''\n table_header = table_header + '' % global_var.suite_name\n table_header = table_header + ''' \n \n \n \n \n \n \\n'''\n self.testreport.write(table_header)\n self.testreport.flush()\n return True\n\n def write_to_test_report(self, data):\n self.testreport.write('\\n')\n col_val = '\\n' % data['name']\n self.testreport.write(col_val)\n col_val='\\n' \n self.testreport.write(col_val)\n if not data['result'] or data['result'] == 'FAIL':\n col_val = '\\n'\n self.testreport.write(col_val)\n else:\n col_val = '\\n'\n self.testreport.write(col_val)\n if data['comment'] != '':\n col_val='\\n' \n self.testreport.write(col_val)\n else:\n col_val = '\\n'\n self.testreport.write(col_val)\n self.testreport.write('\\n')\n self.testreport.flush()\n\n\n def close_test_table_tag(self):\n self.testreport.write('

suite: %s

TestTest Case SummaryResultFailure Reason
%s'\n for summary in data['summary'].split('\\n'):\n col_val += '%s
' % summary\n col_val += '
FAILPASS'\n for comment in data['comment'].split('\\n'):\n col_val += '%s
' % comment\n col_val += '
NA
\\n')\n self.testreport.flush()\n return True\n\n def close_test_report(self):\n footer = '''\n '''\n self.testreport.write(footer)\n self.testreport.close()\n return True\n","sub_path":"testsetup/create_test_report.py","file_name":"create_test_report.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"551560371","text":"import subprocess\nimport sys\nimport csv\nfrom urllib.parse import urlparse\nimport urllib.request\nimport os\n\n#database name\ncustomCsv = \"results/phantomas.csv\"\n#phantomas command\ncommand = os.environ.get(\"PHANTOMAS_PATH\", \"phantomas\")\n\ninteresting_metrics = [\n 'Domain',\n 'requests',\n 'httpsRequests',\n 'timeToFirstByte',\n 'timeToLastByte',\n 'httpTrafficCompleted',\n 'domContentLoaded',\n 'domComplete',\n 'timeBackend',\n 'timeFrontend',\n]\n\ndef scan(site):\n #choose the protocol for the connection\n protocol = \"\"\n if not(site.startswith(\"https://\") or site.startswith(\"http://\")):\n try:\n urllib.request.urlopen(\"https://\"+site, None)\n protocol=\"https://\"\n except:\n urllib.request.urlopen(\"http://\"+site,None)\n protocol=\"http://\"\n #create subprocess that execute phantomas\n site= protocol+site\n print(site)\n proc = subprocess.Popen([command, site],stdout=subprocess.PIPE, shell=True)\n #get the output of phantomas\n out = proc.stdout.read()\n #splits the output to get only the wanted data\n out = str(out).split(\"\\\\n\\\\n\")[1][1:].split(\"\\\\n*\")\n #get the domain name\n domain = urlparse(site).hostname\n if domain.startswith(\"www\"):\n domain = domain[4:]\n #create the full dictionary\n data = {'Domain':domain}\n for line in out:\n parts = line.lstrip().split(\":\")\n data[parts[0]]=parts[1].lstrip()\n #create a smaller dictionary with only necessary parts\n newData = {}\n for metric in interesting_metrics:\n newData[metric] = data[metric]\n return newData\n return \"\"\n\ndef multiple_scan(csvFile):\n domains = \"\"\n with open(csvFile,'r') as file:\n domains = file.read().split(\"\\n\")[:-1]\n temp_data = []\n for domain in domains:\n temp_data.append(scan(domain.split(\",\")[0]))\n #write data to a csv file \n with open(customCsv, 'w') as output_file:\n writer = csv.writer(output_file, lineterminator=\"\\n\")\n writer.writerow(interesting_metrics)\n for data in temp_data:\n if data!=\"\":\n writer.writerow(data.values())\n\ndef single_scan(url):\n temp_data = scan(url)\n with open(customCsv, 'w') as output_file:\n writer = csv.writer(output_file, lineterminator=\"\\n\")\n writer.writerow(interesting_metrics)\n writer.writerow(temp_data.values())\n\n\nif __name__ == \"__main__\":\n arg = sys.argv[1]\n if str(arg).endswith(\".csv\"):\n multiple_scan(arg)\n else:\n \tsingle_scan(arg)\n print(\"Ended scan\")","sub_path":"phantomas.py","file_name":"phantomas.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"227706676","text":"from asyncio import FIRST_COMPLETED, IncompleteReadError, ensure_future, sleep, wait\nfrom asyncio.subprocess import DEVNULL, PIPE, create_subprocess_exec\nfrom contextlib import suppress\nfrom datetime import datetime\nfrom os import sep\nfrom pathlib import Path\nfrom shlex import quote\nfrom sys import stderr\nfrom textwrap import dedent\nfrom typing import Sequence\n\nfrom .consts import BIN, NUL, TIME_FMT\nfrom .copy import copy\nfrom .logging import log\nfrom .shared import join, kill_children\n\n\ndef _tunneling_prog() -> str:\n canonical = BIN / \"csshd\"\n\n try:\n rel_path = canonical.relative_to(Path.home())\n except ValueError:\n return quote(str(canonical))\n else:\n return 'exec \"$HOME\"' + quote(str(Path(sep) / rel_path))\n\n\ndef _tunnel_cmd(name: str, args: Sequence[str]) -> Sequence[str]:\n sh = _tunneling_prog()\n if name == \"cssh\":\n return (\"ssh\", \"-T\", *args, sh)\n elif name == \"cdocker\":\n return (\"docker\", \"exec\", *args, \"sh\", \"-c\", sh)\n else:\n assert False\n\n\nasync def _daemon(local: bool, name: str, args: Sequence[str]) -> int:\n cmds = _tunnel_cmd(name, args=args)\n proc = await create_subprocess_exec(\n *cmds, start_new_session=True, stdin=DEVNULL, stdout=PIPE\n )\n p_done = ensure_future(proc.wait())\n time = datetime.now().strftime(TIME_FMT)\n\n msg = f\"\"\"\n {time} | Establishing link via:\n {join(cmds)}\n \"\"\"\n log.info(\"%s\", dedent(msg))\n\n try:\n assert proc.stdout\n while True:\n p_data = ensure_future(proc.stdout.readuntil(NUL))\n await wait((p_done, p_data), return_when=FIRST_COMPLETED)\n\n if p_data.done():\n with suppress(IncompleteReadError):\n data = await p_data\n await copy(local, args=args, data=data[:-1])\n\n time = datetime.now().strftime(TIME_FMT)\n msg = f\"\"\"\n -- RECV --\n {time}\n \"\"\"\n log.info(\"%s\", dedent(msg))\n\n if p_done.done():\n return await proc.wait()\n\n finally:\n with suppress(ProcessLookupError):\n kill_children(proc.pid)\n await proc.wait()\n\n\nasync def l_daemon(local: bool, name: str, args: Sequence[str]) -> int:\n while True:\n code = await _daemon(local, name=name, args=args)\n log.warn(\"%s\", f\"Exited - $? {code}\")\n print(\"\\a\", end=\"\", file=stderr, flush=True)\n await sleep(1)\n","sub_path":"iso_cp/local_daemon.py","file_name":"local_daemon.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"96919996","text":"#helper function\r\nfrom django.contrib.gis.geoip2 import GeoIP2\r\nfrom geopy.geocoders import Nominatim\r\nfrom ipware import get_client_ip\r\n\r\ndef get_geo(ip):\r\n g = GeoIP2()\r\n country = g.country(ip)\r\n city = g.city(ip)\r\n lat, lon = g.lat_lon(ip)\r\n return country, city, lat, lon\r\n\r\ndef get_destination(request):\r\n\r\n ip, is_routable = get_client_ip(request)\r\n if ip is None:\r\n ip_ = '2401:4900:36bf:e76b:b5bf:e254:883:ccac'\r\n else:\r\n if is_routable:\r\n ip_ = ip\r\n else:\r\n ip_ = '2401:4900:36bf:e76b:b5bf:e254:883:ccac'\r\n country, city, lat1, lon1= get_geo(ip_)\r\n lat1 = int(lat1)\r\n lon1 = int(lon1)\r\n #print( city )\r\n for key,value in city.items():\r\n if key == \"postal_code\":\r\n try:\r\n postal_code=value\r\n break\r\n except:\r\n break\r\n \r\n #print(postal_code)\r\n try:\r\n geolocator = Nominatim(user_agent= 'app' )\r\n\r\n destination = geolocator.geocode(postal_code)\r\n #print(destination)\r\n except:\r\n destination=\"Not Found\"\r\n\r\n return destination, lat1, lon1\r\n\r\n\r\ndef unique_key_generator(instance):\r\n \r\n size = random.randint(30, 45)\r\n key = random_string_generator(size=size)\r\n\r\n Klass = instance.__class__\r\n qs_exists = Klass.objects.filter(key=key).exists()\r\n if qs_exists:\r\n return unique_key_generator(instance)\r\n return key\r\n\r\n\r\ndef unique_otp_generator(instance):\r\n\r\n key = random.randint(1, 999999)\r\n print(key)\r\n\r\n Klass = instance.__class__\r\n qs_exists = Klass.objects.filter(key=key).exists()\r\n if qs_exists:\r\n return unique_otp_generator(instance)\r\n return key\r\n\r\nimport re\r\nimport random\r\n\r\ndef phone_validator(phone_number):\r\n \"\"\"\r\n Returns true if phone number is correct else false\r\n \"\"\"\r\n regix = r'^\\+?1?\\d{10}$'\r\n com = re.compile(regix)\r\n find = len(com.findall(phone_number))\r\n if find == 1:\r\n return True\r\n else:\r\n return False\r\n\r\ndef otp_generator():\r\n otp = random.randint(999, 9999)\r\n return otp\r\n","sub_path":"nearbyshops/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"44199389","text":"\"\"\"--------------------------------------------------------------------------------------------------------------------------------------\nMODULE\n RerunConfirmationsTask\n\nDESCRIPTION\n This module contains an AEL main script used for re-running confirmation\n processing for any eligible entities updated on or after a specified time.\n\n This task is not intended for normal use and exists only as a support\n tool to be used to resolve any missed processing without replaying AMB\n messages.\n\n Examples of situations in which this tool may prove useful are:\n\n - Recovery after missed processing caused by the Confirmation ATS\n not being restarted after the deployment of a new confirmation hook.\n\n - Recovery after failed processing caused by a coding error in a\n confirmation hook.\n\n-----------------------------------------------------------------------------------------------------------------------------------------\nHISTORY\n=========================================================================================================================================\nDate Change no Developer Requester Description\n-----------------------------------------------------------------------------------------------------------------------------------------\n2019-06-18 FAOPS-536 Stuart Wilson Kgomotso Gumbo Initial implementation.\n2020-06-11 FAOPS-814 Cuen Edwards Kgomotso Gumbo Improvements to prevent unnecessary event generation.\n2020-09-06 FAOPS-920 Cuen Edwards Kgomotso Gumbo Improvements to allow for specifying the to time, event\n tables to examine, and the option to exclude touching\n entities already updated by the current user.\n-----------------------------------------------------------------------------------------------------------------------------------------\n\"\"\"\n\nimport datetime\n\nimport acm\nimport FConfirmationMain\nfrom FConfirmationEventFactory import FConfirmationEventFactory\nfrom FConfirmationProcess import GetConfirmationGeneratingObjects\n\nfrom at_ael_variables import AelVariableHandler\nfrom at_logging import getLogger\nimport FValidation_settings\nimport SessionFunctions\n\n\nLOGGER = getLogger(__name__)\n\n\ndef _create_ael_variable_handler():\n \"\"\"\n Create an AelVariableHandler for this script.\n \"\"\"\n ael_variable_handler = AelVariableHandler()\n # From Time.\n ael_variable_handler.add(\n name='from_time',\n label='From Time',\n cls='string',\n default=acm.Time.DateToday() + ' 00:00:00',\n mandatory=True,\n multiple=False,\n alt=\"The time from which to rerun confirmations processing. Any eligible \" +\n \"entity updated on or after this time will be touched in order to \" +\n \"trigger confirmations processing.\"\n )\n # To Time.\n ael_variable_handler.add(\n name='to_time',\n label='To Time',\n cls='string',\n default=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n mandatory=True,\n multiple=False,\n alt=\"The time up until which to rerun confirmations processing. Any eligible \" +\n \"entity updated on or before this time will be touched in order to \" +\n \"trigger confirmations processing.\"\n )\n # Event Tables.\n ael_variable_handler.add(\n name='event_table_names',\n label='Event Tables',\n cls='string',\n collection=FConfirmationMain.dbTables,\n default=','.join(FConfirmationMain.dbTables),\n mandatory=True,\n multiple=True,\n alt=\"The confirmation event tables to examine for updates between the from and \" +\n \"to times. This option allows one to avoid touching unnecessary entities \" +\n \"when it is known which types of entity need to be touched.\"\n )\n # Exclude Updates By Current User.\n ael_variable_handler.add_bool(\n name='exclude_updates_by_current_user',\n label='Exclude Updates by Current User',\n default=True,\n mandatory=True,\n multiple=False,\n alt=\"Prevent touching entities already updated by the current user. This option \" +\n \"allows one to avoid touching the same entities multiple times in the event \" +\n \"of multiple executions of this tool.\"\n )\n return ael_variable_handler\n\n\nael_variables = _create_ael_variable_handler()\n\nael_gui_parameters = {\n 'windowCaption': 'Rerun Confirmations',\n 'runButtonLabel': '&&Rerun',\n 'runButtonTooltip': 'Rerun Confirmations',\n 'hideExtraControls': True,\n 'closeWhenFinished': False\n}\n\n\ndef ael_main(ael_parameters):\n \"\"\"\n AEL Main Function.\n \"\"\"\n try:\n start_date_time = datetime.datetime.today()\n LOGGER.info('Starting at {start_date_time}'.format(start_date_time=start_date_time))\n from_time = ael_parameters['from_time']\n to_time = ael_parameters['to_time']\n event_table_names = ael_parameters['event_table_names']\n exclude_updates_by_current_user = ael_parameters['exclude_updates_by_current_user']\n _validate_from_and_to_time(from_time, to_time)\n _validate_running_as_fvalidation_exempt_user()\n _trigger_confirmation_processing(from_time, to_time, event_table_names, exclude_updates_by_current_user)\n end_date_time = datetime.datetime.today()\n LOGGER.info('Completed successfully at {end_date_time}'.format(end_date_time=end_date_time))\n duration = end_date_time - start_date_time\n LOGGER.info('Duration: {duration}'.format(duration=duration))\n except Exception as exception:\n if SessionFunctions.is_prime():\n _show_error_dialog(exception)\n LOGGER.exception(exception)\n else:\n raise\n\n\ndef _validate_from_and_to_time(from_time, to_time):\n \"\"\"\n Validate the from_time and to_time AEL parameters.\n \"\"\"\n # Validate From Time.\n from_datetime = datetime.datetime.strptime(from_time, '%Y-%m-%d %H:%M:%S')\n datetime_today = datetime.datetime.today()\n from_datetime_limit = datetime_today - datetime.timedelta(days=7)\n if from_datetime < from_datetime_limit:\n raise ValueError(\"The from time may not be earlier than '{from_datetime_limit}'.\".format(\n from_datetime_limit=from_datetime_limit\n ))\n if from_datetime > datetime_today:\n raise ValueError(\"The from time may not be in the future.\")\n # Validate To Time.\n to_datetime = datetime.datetime.strptime(to_time, '%Y-%m-%d %H:%M:%S')\n if to_datetime > datetime_today:\n raise ValueError(\"The to time may not be in the future.\")\n if to_datetime <= from_datetime:\n raise ValueError(\"The to time must after the from time.\")\n\n\ndef _validate_running_as_fvalidation_exempt_user():\n \"\"\"\n Validate that the current user is exempt from FValidation.\n \"\"\"\n if acm.UserName() not in FValidation_settings.SUPERUSERS:\n # Ensure that tool is run as a user exempt from FValidation\n # in order to avoid GUI pop-ups when touching entities.\n raise ValueError(\"This tool must be run by a user that is exempt from FValidation.\")\n\n\ndef _trigger_confirmation_processing(from_time, to_time, event_table_names, exclude_updates_by_current_user):\n \"\"\"\n Trigger confirmation processing for any eligible objects updated\n between the specified from time and to time.\n \"\"\"\n entities = _get_entities_updated_between_times(from_time, to_time, event_table_names)\n for entity in entities:\n if exclude_updates_by_current_user and entity.UpdateUser() == acm.User():\n info_message = \"{entity_class} {entity_oid}, already updated by the current user \"\n info_message += \"at '{update_time}', skipping...\"\n LOGGER.info(info_message.format(\n entity_class=entity.ClassName(),\n entity_oid=entity.Oid(),\n update_time=acm.Time.DateTimeFromTime(entity.UpdateTime())\n ))\n continue\n if not _entity_triggers_confirmation_processing(entity):\n info_message = \"{entity_class} {entity_oid}, updated '{update_time}', would \"\n info_message += \"not trigger confirmation processing, skipping...\"\n LOGGER.info(info_message.format(\n entity_class=entity.ClassName(),\n entity_oid=entity.Oid(),\n update_time=acm.Time.DateTimeFromTime(entity.UpdateTime())\n ))\n continue\n info_message = \"{entity_class} {entity_oid}, updated '{update_time}', would \"\n info_message += \"trigger confirmation processing, touching...\"\n LOGGER.info(info_message.format(\n entity_class=entity.ClassName(),\n entity_oid=entity.Oid(),\n update_time=acm.Time.DateTimeFromTime(entity.UpdateTime())\n ))\n try:\n entity.Touch()\n entity.Commit()\n except Exception as exception:\n LOGGER.exception(exception)\n\n\ndef _get_entities_updated_between_times(from_time, to_time, table_names):\n \"\"\"\n Get any ACM entities updated between the specified from time and\n to time.\n \"\"\"\n LOGGER.info(\"Finding entities updated between '{from_time}' and '{to_time}'...\".format(\n from_time=from_time,\n to_time=to_time\n ))\n select_expression = \"updat_time >= '{from_time}' and updat_time <= '{to_time}'\".format(\n from_time=acm.Time.LocalToUtc(from_time),\n to_time=acm.Time.LocalToUtc(to_time)\n )\n updated_entities = acm.FArray()\n for table_name in table_names:\n table = acm.FTable['ADM.{table_name}'.format(\n table_name=table_name\n )]\n entities = table.Select(select_expression).AsArray()\n LOGGER.info(\"Found {number_of_entities} {table_name} entities updated since '{from_time}'.\".format(\n number_of_entities=len(entities),\n table_name=table_name.lower(),\n from_time=from_time\n ))\n updated_entities.AddAll(entities)\n updated_entities.SortByProperty('UpdateTime')\n LOGGER.info(\"Found {number_of_entities} total entities updated since '{from_time}'.\".format(\n number_of_entities=len(updated_entities),\n from_time=from_time\n ))\n return updated_entities\n\n\ndef _entity_triggers_confirmation_processing(entity):\n \"\"\"\n Determine whether or not an entity would trigger any confirmation\n processing.\n \"\"\"\n if entity.IsKindOf(acm.FOperationsDocument):\n return True\n if entity.IsKindOf(acm.FParty):\n return True\n trades = GetConfirmationGeneratingObjects(entity)\n confirmation_events = FConfirmationEventFactory.GetConfirmationEvents()\n for trade in trades:\n for confirmation_event in confirmation_events:\n if confirmation_event.baseRule.IsSatisfiedBy(trade):\n return True\n return False\n\n\ndef _show_error_dialog(exception):\n \"\"\"\n Display an error dialog to the user.\n \"\"\"\n message_box = acm.GetFunction('msgBox', 3)\n ok_button = 0\n error_icon = 16\n message_box('Error', str(exception), ok_button | error_icon)\n","sub_path":"Extensions/ABSA Documentation/FPythonCode/RerunConfirmationsTask.py","file_name":"RerunConfirmationsTask.py","file_ext":"py","file_size_in_byte":11347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"393591444","text":"#!/usr/bin/python3\n\"\"\" Create .tgz file based in content of web_static \"\"\"\nfrom fabric.api import *\nfrom datetime import datetime\nimport os\n\nenv.hosts = ['35.231.235.242', '35.175.190.73']\n\n\ndef do_pack():\n try:\n local(\"mkdir -p versions/\")\n date = datetime.now()\n actual_date = date.strftime(\"%Y%m%d%H%M%S\")\n local(\"tar -cvzf versions/web_static_{}.tgz web_static/\".\n format(actual_date))\n return(\"versions/web_static_{}.tgz\".format(actual_date))\n except:\n return (None)\n\n\ndef do_deploy(archive_path):\n if not(os.path.exists(archive_path)):\n return False\n try:\n put(archive_path, \"/tmp/\")\n base_name = os.path.basename(archive_path)\n file_name = os.path.splitext(base_name)[0]\n run(\"mkdir -p /data/web_static/releases/{}\".format(file_name))\n run(\"tar -xzf /tmp/{} -C /data/web_static/releases/{}\".\n format(base_name, file_name))\n run(\"rm /tmp/{}\".format(base_name))\n dir_rel = \"/data/web_static/releases/\"\n run(\"mv {}{}/web_static/* {}{}\".\n format(dir_rel, file_name, dir_rel, file_name))\n run(\"rm -rf /data/web_static/releases/{}/web_static\".format(file_name))\n run(\"rm -rf /data/web_static/current\")\n run(\"ln -s /data/web_static/releases/{} /data/web_static/current\"\n .format(file_name))\n print(\"New version deployed!\")\n return(True)\n except:\n return(False)\n","sub_path":"2-do_deploy_web_static.py","file_name":"2-do_deploy_web_static.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"323170640","text":"######################################################################\n#\n# File: b2sdk/bucket.py\n#\n# Copyright 2019 Backblaze Inc. All Rights Reserved.\n#\n# License https://www.backblaze.com/using_b2_code.html\n#\n######################################################################\n\nimport logging\n\nfrom .exception import FileNotPresent, FileOrBucketNotFound, UnrecognizedBucketType\nfrom .file_version import FileVersionInfo, FileVersionInfoFactory\nfrom .progress import DoNothingProgressListener\nfrom .transfer.emerge.executor import AUTO_CONTENT_TYPE\nfrom .transfer.emerge.write_intent import WriteIntent\nfrom .transfer.outbound.copy_source import CopySource\nfrom .transfer.outbound.upload_source import UploadSourceBytes, UploadSourceLocalFile\nfrom .utils import B2TraceMeta, disable_trace, limit_trace_arguments\nfrom .utils import b2_url_encode, validate_b2_file_name\n\nlogger = logging.getLogger(__name__)\n\n\nclass Bucket(metaclass=B2TraceMeta):\n \"\"\"\n Provide access to a bucket in B2: listing files, uploading and downloading.\n \"\"\"\n\n DEFAULT_CONTENT_TYPE = AUTO_CONTENT_TYPE\n\n def __init__(\n self,\n api,\n id_,\n name=None,\n type_=None,\n bucket_info=None,\n cors_rules=None,\n lifecycle_rules=None,\n revision=None,\n bucket_dict=None,\n options_set=None,\n ):\n \"\"\"\n :param b2sdk.v1.B2Api api: an API object\n :param str id_: a bucket id\n :param str name: a bucket name\n :param str type_: a bucket type\n :param dict bucket_info: an info to store with a bucket\n :param dict cors_rules: CORS rules to store with a bucket\n :param dict lifecycle_rules: lifecycle rules to store with a bucket\n :param int revision: a bucket revision number\n :param dict bucket_dict: a dictionary which contains bucket parameters\n :param set options_set: set of bucket options strings\n \"\"\"\n self.api = api\n self.id_ = id_\n self.name = name\n self.type_ = type_\n self.bucket_info = bucket_info or {}\n self.cors_rules = cors_rules or []\n self.lifecycle_rules = lifecycle_rules or []\n self.revision = revision\n self.bucket_dict = bucket_dict or {}\n self.options_set = options_set or set()\n\n def get_id(self):\n \"\"\"\n Return bucket ID.\n\n :rtype: str\n \"\"\"\n return self.id_\n\n def set_info(self, new_bucket_info, if_revision_is=None):\n \"\"\"\n Update bucket info.\n\n :param dict new_bucket_info: new bucket info dictionary\n :param int if_revision_is: revision number, update the info **only if** *revision* equals to *if_revision_is*\n \"\"\"\n return self.update(bucket_info=new_bucket_info, if_revision_is=if_revision_is)\n\n def set_type(self, bucket_type):\n \"\"\"\n Update bucket type.\n\n :param str bucket_type: a bucket type (\"allPublic\" or \"allPrivate\")\n \"\"\"\n return self.update(bucket_type=bucket_type)\n\n def update(\n self,\n bucket_type=None,\n bucket_info=None,\n cors_rules=None,\n lifecycle_rules=None,\n if_revision_is=None,\n ):\n \"\"\"\n Update various bucket parameters.\n\n :param str bucket_type: a bucket type\n :param dict bucket_info: an info to store with a bucket\n :param dict cors_rules: CORS rules to store with a bucket\n :param dict lifecycle_rules: lifecycle rules to store with a bucket\n :param int if_revision_is: revision number, update the info **only if** *revision* equals to *if_revision_is*\n \"\"\"\n account_id = self.api.account_info.get_account_id()\n return self.api.session.update_bucket(\n account_id,\n self.id_,\n bucket_type=bucket_type,\n bucket_info=bucket_info,\n cors_rules=cors_rules,\n lifecycle_rules=lifecycle_rules,\n if_revision_is=if_revision_is\n )\n\n def cancel_large_file(self, file_id):\n \"\"\"\n Cancel a large file transfer.\n\n :param str file_id: a file ID\n \"\"\"\n return self.api.cancel_large_file(file_id)\n\n def download_file_by_id(self, file_id, download_dest, progress_listener=None, range_=None):\n \"\"\"\n Download a file by ID.\n\n .. note::\n download_file_by_id actually belongs in :py:class:`b2sdk.v1.B2Api`, not in :py:class:`b2sdk.v1.Bucket`; we just provide a convenient redirect here\n\n :param str file_id: a file ID\n :param download_dest: an instance of the one of the following classes: \\\n :class:`~b2sdk.v1.DownloadDestLocalFile`,\\\n :class:`~b2sdk.v1.DownloadDestBytes`,\\\n :class:`~b2sdk.v1.DownloadDestProgressWrapper`,\\\n :class:`~b2sdk.v1.PreSeekedDownloadDest`,\\\n or any sub class of :class:`~b2sdk.v1.AbstractDownloadDestination`\n :param b2sdk.v1.AbstractProgressListener, None progress_listener: a progress listener object to use, or ``None`` to not report progress\n :param tuple[int, int] range_: two integer values, start and end offsets\n \"\"\"\n return self.api.download_file_by_id(\n file_id, download_dest, progress_listener, range_=range_\n )\n\n def download_file_by_name(self, file_name, download_dest, progress_listener=None, range_=None):\n \"\"\"\n Download a file by name.\n\n .. seealso::\n\n :ref:`Synchronizer `, a *high-performance* utility that synchronizes a local folder with a Bucket.\n\n :param str file_name: a file name\n :param download_dest: an instance of the one of the following classes: \\\n :class:`~b2sdk.v1.DownloadDestLocalFile`,\\\n :class:`~b2sdk.v1.DownloadDestBytes`,\\\n :class:`~b2sdk.v1.DownloadDestProgressWrapper`,\\\n :class:`~b2sdk.v1.PreSeekedDownloadDest`,\\\n or any sub class of :class:`~b2sdk.v1.AbstractDownloadDestination`\n :param b2sdk.v1.AbstractProgressListener, None progress_listener: a progress listener object to use, or ``None`` to not track progress\n :param tuple[int, int] range_: two integer values, start and end offsets\n \"\"\"\n url = self.api.session.get_download_url_by_name(self.name, file_name)\n return self.api.services.download_manager.download_file_from_url(\n url, download_dest, progress_listener, range_\n )\n\n def get_file_info_by_id(self, file_id: str) -> FileVersionInfo:\n \"\"\"\n Gets a file version's info by ID.\n\n :param str file_id: the id of the file who's info will be retrieved.\n :rtype: generator[b2sdk.v1.FileVersionInfo]\n \"\"\"\n return FileVersionInfoFactory.from_api_response(self.api.get_file_info(file_id))\n\n def get_file_info_by_name(self, file_name: str) -> FileVersionInfo:\n \"\"\"\n Gets a file version's info by its name.\n\n :param str file_name: the name of the file who's info will be retrieved.\n :rtype: generator[b2sdk.v1.FileVersionInfo]\n \"\"\"\n try:\n return FileVersionInfoFactory.from_response_headers(\n self.api.session.get_file_info_by_name(self.name, file_name)\n )\n except FileOrBucketNotFound:\n raise FileNotPresent(bucket_name=self.name, file_id_or_name=file_name)\n\n def get_download_authorization(self, file_name_prefix, valid_duration_in_seconds):\n \"\"\"\n Return an authorization token that is valid only for downloading\n files from the given bucket.\n\n :param str file_name_prefix: a file name prefix, only files that match it could be downloaded\n :param int valid_duration_in_seconds: a token is valid only during this amount of seconds\n \"\"\"\n response = self.api.session.get_download_authorization(\n self.id_, file_name_prefix, valid_duration_in_seconds\n )\n return response['authorizationToken']\n\n def list_parts(self, file_id, start_part_number=None, batch_size=None):\n \"\"\"\n Get a list of all parts that have been uploaded for a given file.\n\n :param str file_id: a file ID\n :param int start_part_number: the first part number to return. defaults to the first part.\n :param int batch_size: the number of parts to fetch at a time from the server\n \"\"\"\n return self.api.list_parts(file_id, start_part_number, batch_size)\n\n def list_file_versions(self, file_name, fetch_count=None):\n \"\"\"\n Lists all of the versions for a single file.\n\n :param str file_name: the name of the file to list.\n :param int,None fetch_count: how many entries to list per API call or ``None`` to use the default. Acceptable values: 1 - 10000\n :rtype: generator[b2sdk.v1.FileVersionInfo]\n \"\"\"\n if fetch_count is not None and fetch_count <= 0:\n # fetch_count equal to 0 means \"use API default\", which we don't want to support here\n raise ValueError(\"unsupported fetch_count value\")\n start_file_name = file_name\n start_file_id = None\n session = self.api.session\n while 1:\n response = session.list_file_versions(\n self.id_, start_file_name, start_file_id, fetch_count, file_name\n )\n\n for entry in response['files']:\n file_version_info = FileVersionInfoFactory.from_api_response(entry)\n if file_version_info.file_name != file_name:\n # All versions for the requested file name have been listed.\n return\n yield file_version_info\n start_file_name = response['nextFileName']\n start_file_id = response['nextFileId']\n if start_file_name is None:\n return\n\n def ls(self, folder_to_list='', show_versions=False, recursive=False, fetch_count=10000):\n \"\"\"\n Pretend that folders exist and yields the information about the files in a folder.\n\n B2 has a flat namespace for the files in a bucket, but there is a convention\n of using \"/\" as if there were folders. This method searches through the\n flat namespace to find the files and \"folders\" that live within a given\n folder.\n\n When the `recursive` flag is set, lists all of the files in the given\n folder, and all of its sub-folders.\n\n :param str folder_to_list: the name of the folder to list; must not start with \"/\".\n Empty string means top-level folder\n :param bool show_versions: when ``True`` returns info about all versions of a file,\n when ``False``, just returns info about the most recent versions\n :param bool recursive: if ``True``, list folders recursively\n :param int,None fetch_count: how many entries to return or ``None`` to use the default. Acceptable values: 1 - 10000\n :rtype: generator[tuple[b2sdk.v1.FileVersionInfo, str]]\n :returns: generator of (file_version_info, folder_name) tuples\n\n .. note::\n In case of `recursive=True`, folder_name is returned only for first file in the folder.\n \"\"\"\n # Every file returned must have a name that starts with the\n # folder name and a \"/\".\n prefix = folder_to_list\n if prefix != '' and not prefix.endswith('/'):\n prefix += '/'\n\n # Loop until all files in the named directory have been listed.\n # The starting point of the first list_file_names request is the\n # prefix we're looking for. The prefix ends with '/', which is\n # now allowed for file names, so no file name will match exactly,\n # but the first one after that point is the first file in that\n # \"folder\". If the first search doesn't produce enough results,\n # then we keep calling list_file_names until we get all of the\n # names in this \"folder\".\n current_dir = None\n start_file_name = prefix\n start_file_id = None\n session = self.api.session\n while True:\n if show_versions:\n response = session.list_file_versions(\n self.id_, start_file_name, start_file_id, fetch_count, prefix\n )\n else:\n response = session.list_file_names(self.id_, start_file_name, fetch_count, prefix)\n for entry in response['files']:\n file_version_info = FileVersionInfoFactory.from_api_response(entry)\n if not file_version_info.file_name.startswith(prefix):\n # We're past the files we care about\n return\n after_prefix = file_version_info.file_name[len(prefix):]\n if '/' not in after_prefix or recursive:\n # This is not a folder, so we'll print it out and\n # continue on.\n yield file_version_info, None\n current_dir = None\n else:\n # This is a folder. If it's different than the folder\n # we're already in, then we can print it. This check\n # is needed, because all of the files in the folder\n # will be in the list.\n folder_with_slash = after_prefix.split('/')[0] + '/'\n if folder_with_slash != current_dir:\n folder_name = prefix + folder_with_slash\n yield file_version_info, folder_name\n current_dir = folder_with_slash\n if response['nextFileName'] is None:\n # The response says there are no more files in the bucket,\n # so we can stop.\n return\n\n # Now we need to set up the next search. The response from\n # B2 has the starting point to continue with the next file,\n # but if we're in the middle of a \"folder\", we can skip ahead\n # to the end of the folder. The character after '/' is '0',\n # so we'll replace the '/' with a '0' and start there.\n #\n # When recursive is True, current_dir is always None.\n if current_dir is None:\n start_file_name = response.get('nextFileName')\n start_file_id = response.get('nextFileId')\n else:\n start_file_name = max(\n response['nextFileName'],\n prefix + current_dir[:-1] + '0',\n )\n\n def list_unfinished_large_files(self, start_file_id=None, batch_size=None, prefix=None):\n \"\"\"\n A generator that yields an :py:class:`b2sdk.v1.UnfinishedLargeFile` for each\n unfinished large file in the bucket, starting at the given file, filtering by prefix.\n\n :param str,None start_file_id: a file ID to start from or None to start from the beginning\n :param int,None batch_size: max file count\n :param str,None prefix: file name prefix filter\n :rtype: generator[b2sdk.v1.UnfinishedLargeFile]\n \"\"\"\n return self.api.services.large_file.list_unfinished_large_files(\n self.id_,\n start_file_id=start_file_id,\n batch_size=batch_size,\n prefix=prefix,\n )\n\n def start_large_file(self, file_name, content_type=None, file_info=None):\n \"\"\"\n Start a large file transfer.\n\n :param str file_name: a file name\n :param str,None content_type: the MIME type, or ``None`` to accept the default based on file extension of the B2 file name\n :param dict,None file_info: a file info to store with the file or ``None`` to not store anything\n \"\"\"\n validate_b2_file_name(file_name)\n return self.api.services.large_file.start_large_file(\n self.id_, file_name, content_type=content_type, file_info=file_info\n )\n\n @limit_trace_arguments(skip=('data_bytes',))\n def upload_bytes(\n self,\n data_bytes,\n file_name,\n content_type=None,\n file_infos=None,\n progress_listener=None,\n ):\n \"\"\"\n Upload bytes in memory to a B2 file.\n\n :param bytes data_bytes: a byte array to upload\n :param str file_name: a file name to upload bytes to\n :param str,None content_type: the MIME type, or ``None`` to accept the default based on file extension of the B2 file name\n :param dict,None file_infos: a file info to store with the file or ``None`` to not store anything\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use, or ``None`` to not track progress\n :rtype: generator[b2sdk.v1.FileVersion]\n \"\"\"\n upload_source = UploadSourceBytes(data_bytes)\n return self.upload(\n upload_source,\n file_name,\n content_type=content_type,\n file_info=file_infos,\n progress_listener=progress_listener,\n )\n\n def upload_local_file(\n self,\n local_file,\n file_name,\n content_type=None,\n file_infos=None,\n sha1_sum=None,\n min_part_size=None,\n progress_listener=None,\n ):\n \"\"\"\n Upload a file on local disk to a B2 file.\n\n .. seealso::\n\n :ref:`Synchronizer `, a *high-performance* utility that synchronizes a local folder with a :term:`bucket`.\n\n :param str local_file: a path to a file on local disk\n :param str file_name: a file name of the new B2 file\n :param str,None content_type: the MIME type, or ``None`` to accept the default based on file extension of the B2 file name\n :param dict,None file_infos: a file info to store with the file or ``None`` to not store anything\n :param str,None sha1_sum: file SHA1 hash or ``None`` to compute it automatically\n :param int min_part_size: a minimum size of a part\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use, or ``None`` to not report progress\n :rtype: generator[b2sdk.v1.FileVersion]\n \"\"\"\n upload_source = UploadSourceLocalFile(local_path=local_file, content_sha1=sha1_sum)\n return self.upload(\n upload_source,\n file_name,\n content_type=content_type,\n file_info=file_infos,\n min_part_size=min_part_size,\n progress_listener=progress_listener,\n )\n\n def upload(\n self,\n upload_source,\n file_name,\n content_type=None,\n file_info=None,\n min_part_size=None,\n progress_listener=None\n ):\n \"\"\"\n Upload a file to B2, retrying as needed.\n\n The source of the upload is an UploadSource object that can be used to\n open (and re-open) the file. The result of opening should be a binary\n file whose read() method returns bytes.\n\n :param b2sdk.v1.UploadSource upload_source: an object that opens the source of the upload\n :param str file_name: the file name of the new B2 file\n :param str,None content_type: the MIME type, or ``None`` to accept the default based on file extension of the B2 file name\n :param dict,None file_info: a file info to store with the file or ``None`` to not store anything\n :param int,None min_part_size: the smallest part size to use or ``None`` to determine automatically\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use, or ``None`` to not report progress\n :rtype: generator[b2sdk.v1.FileVersion]\n\n The function `opener` should return a file-like object, and it\n must be possible to call it more than once in case the upload\n is retried.\n \"\"\"\n return self.create_file(\n [WriteIntent(upload_source)],\n file_name,\n content_type=content_type,\n file_info=file_info,\n progress_listener=progress_listener,\n # FIXME: Bucket.upload documents wrong logic\n recommended_upload_part_size=min_part_size,\n )\n\n def create_file(\n self,\n write_intents,\n file_name,\n content_type=None,\n file_info=None,\n progress_listener=None,\n recommended_upload_part_size=None,\n continue_large_file_id=None,\n ):\n \"\"\"\n Creates a new file in this bucket using an iterable (list, tuple etc) of remote or local sources.\n\n Source ranges can overlap and remote sources will be prioritized over local sources (when possible).\n For more information and usage examples please see :ref:`Advanced usage patterns `.\n\n :param list[b2sdk.v1.WriteIntent] write_intents: list of write intents (remote or local sources)\n :param str new_file_name: file name of the new file\n :param str,None content_type: content_type for the new file, if ``None`` content_type would be\n automatically determined or it may be copied if it resolves\n as single part remote source copy\n :param dict,None file_info: file_info for the new file, if ``None`` it will be set to empty dict\n or it may be copied if it resolves as single part remote source copy\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use,\n or ``None`` to not report progress\n :param int,None recommended_upload_part_size: the recommended part size to use for uploading local sources\n or ``None`` to determine automatically, but remote sources would be copied with\n maximum possible part size\n :param str,None continue_large_file_id: large file id that should be selected to resume file creation\n for multipart upload/copy, ``None`` for automatic search for this id\n \"\"\"\n return self._create_file(\n self.api.services.emerger.emerge,\n write_intents,\n file_name,\n content_type=content_type,\n file_info=file_info,\n progress_listener=progress_listener,\n continue_large_file_id=continue_large_file_id,\n recommended_upload_part_size=recommended_upload_part_size,\n )\n\n def create_file_stream(\n self,\n write_intents_iterator,\n file_name,\n content_type=None,\n file_info=None,\n progress_listener=None,\n recommended_upload_part_size=None,\n continue_large_file_id=None,\n ):\n \"\"\"\n Creates a new file in this bucket using a stream of multiple remote or local sources.\n\n Source ranges can overlap and remote sources will be prioritized over local sources (when possible).\n For more information and usage examples please see :ref:`Advanced usage patterns `.\n\n :param iterator[b2sdk.v1.WriteIntent] write_intents_iterator: iterator of write intents which\n are sorted ascending by ``destination_offset``\n :param str new_file_name: file name of the new file\n :param str,None content_type: content_type for the new file, if ``None`` content_type would be\n automatically determined or it may be copied if it resolves\n as single part remote source copy\n :param dict,None file_info: file_info for the new file, if ``None`` it will be set to empty dict\n or it may be copied if it resolves as single part remote source copy\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use,\n or ``None`` to not report progress\n :param int,None recommended_upload_part_size: the recommended part size to use for uploading local sources\n or ``None`` to determine automatically, but remote sources would be copied with\n maximum possible part size\n :param str,None continue_large_file_id: large file id that should be selected to resume file creation\n for multipart upload/copy, if ``None`` in multipart case it would always start a new\n large file\n \"\"\"\n return self._create_file(\n self.api.services.emerger.emerge_stream,\n write_intents_iterator,\n file_name,\n content_type=content_type,\n file_info=file_info,\n progress_listener=progress_listener,\n continue_large_file_id=continue_large_file_id,\n recommended_upload_part_size=recommended_upload_part_size,\n )\n\n def _create_file(\n self,\n emerger_method,\n write_intents_iterable,\n file_name,\n content_type=None,\n file_info=None,\n progress_listener=None,\n recommended_upload_part_size=None,\n continue_large_file_id=None,\n ):\n validate_b2_file_name(file_name)\n progress_listener = progress_listener or DoNothingProgressListener()\n\n return emerger_method(\n self.id_,\n write_intents_iterable,\n file_name,\n content_type,\n file_info,\n progress_listener,\n recommended_upload_part_size=recommended_upload_part_size,\n continue_large_file_id=continue_large_file_id,\n )\n\n def concatenate(\n self,\n outbound_sources,\n file_name,\n content_type=None,\n file_info=None,\n progress_listener=None,\n recommended_upload_part_size=None,\n continue_large_file_id=None,\n ):\n \"\"\"\n Creates a new file in this bucket by concatenating multiple remote or local sources.\n\n :param list[b2sdk.v1.OutboundTransferSource] outbound_sources: list of outbound sources (remote or local)\n :param str new_file_name: file name of the new file\n :param str,None content_type: content_type for the new file, if ``None`` content_type would be\n automatically determined from file name or it may be copied if it resolves\n as single part remote source copy\n :param dict,None file_info: file_info for the new file, if ``None`` it will be set to empty dict\n or it may be copied if it resolves as single part remote source copy\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use,\n or ``None`` to not report progress\n :param int,None recommended_upload_part_size: the recommended part size to use for uploading local sources\n or ``None`` to determine automatically, but remote sources would be copied with\n maximum possible part size\n :param str,None continue_large_file_id: large file id that should be selected to resume file creation\n for multipart upload/copy, ``None`` for automatic search for this id\n \"\"\"\n return self.create_file(\n WriteIntent.wrap_sources_iterator(outbound_sources),\n file_name,\n content_type=content_type,\n file_info=file_info,\n progress_listener=progress_listener,\n recommended_upload_part_size=recommended_upload_part_size,\n continue_large_file_id=continue_large_file_id,\n )\n\n def concatenate_stream(\n self,\n outbound_sources_iterator,\n file_name,\n content_type=None,\n file_info=None,\n progress_listener=None,\n recommended_upload_part_size=None,\n continue_large_file_id=None,\n ):\n \"\"\"\n Creates a new file in this bucket by concatenating stream of multiple remote or local sources.\n\n :param iterator[b2sdk.v1.OutboundTransferSource] outbound_sources_iterator: iterator of outbound sources\n :param str new_file_name: file name of the new file\n :param str,None content_type: content_type for the new file, if ``None`` content_type would be\n automatically determined or it may be copied if it resolves\n as single part remote source copy\n :param dict,None file_info: file_info for the new file, if ``None`` it will be set to empty dict\n or it may be copied if it resolves as single part remote source copy\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use,\n or ``None`` to not report progress\n :param int,None recommended_upload_part_size: the recommended part size to use for uploading local sources\n or ``None`` to determine automatically, but remote sources would be copied with\n maximum possible part size\n :param str,None continue_large_file_id: large file id that should be selected to resume file creation\n for multipart upload/copy, if ``None`` in multipart case it would always start a new\n large file\n \"\"\"\n return self.create_file_stream(\n WriteIntent.wrap_sources_iterator(outbound_sources_iterator),\n file_name,\n content_type=content_type,\n file_info=file_info,\n progress_listener=progress_listener,\n recommended_upload_part_size=recommended_upload_part_size,\n continue_large_file_id=continue_large_file_id,\n )\n\n def get_download_url(self, filename):\n \"\"\"\n Get file download URL.\n\n :param str filename: a file name\n :rtype: str\n \"\"\"\n return \"%s/file/%s/%s\" % (\n self.api.account_info.get_download_url(),\n b2_url_encode(self.name),\n b2_url_encode(filename),\n )\n\n def hide_file(self, file_name):\n \"\"\"\n Hide a file.\n\n :param str file_name: a file name\n :rtype: b2sdk.v1.FileVersionInfo\n \"\"\"\n response = self.api.session.hide_file(self.id_, file_name)\n return FileVersionInfoFactory.from_api_response(response)\n\n def copy(\n self,\n file_id,\n new_file_name,\n content_type=None,\n file_info=None,\n offset=0,\n length=None,\n progress_listener=None\n ):\n \"\"\"\n Creates a new file in this bucket by (server-side) copying from an existing file.\n\n :param str file_id: file ID of existing file to copy from\n :param str new_file_name: file name of the new file\n :param str,None content_type: content_type for the new file, if ``None`` and ``b2_copy_file`` will be used\n content_type will be copied from source file - otherwise content_type would be\n automatically determined\n :param dict,None file_info: file_info for the new file, if ``None`` will and ``b2_copy_file`` will be used\n file_info will be copied from source file - otherwise it will be set to empty dict\n :param int offset: offset of exisiting file that copy should start from\n :param int,None length: number of bytes to copy, if ``None`` then ``offset`` have to be ``0`` and it will\n use ``b2_copy_file`` without ``range`` parameter so it may fail if file is too large.\n For large files length have to be specified to use ``b2_copy_part`` instead.\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use\n for multipart copy, or ``None`` to not report progress\n \"\"\"\n\n copy_source = CopySource(file_id, offset=offset, length=length)\n if not length:\n # TODO: it feels like this should be checked on lower level - eg. RawApi\n validate_b2_file_name(new_file_name)\n progress_listener = progress_listener or DoNothingProgressListener()\n return self.api.services.copy_manager.copy_file(\n copy_source,\n new_file_name,\n content_type=content_type,\n file_info=file_info,\n destination_bucket_id=self.id_,\n progress_listener=progress_listener,\n ).result()\n else:\n return self.create_file(\n [WriteIntent(copy_source)],\n new_file_name,\n content_type=content_type,\n file_info=file_info,\n progress_listener=progress_listener,\n )\n\n # FIXME: this shold be deprecated\n def copy_file(\n self,\n file_id,\n new_file_name,\n bytes_range=None,\n metadata_directive=None,\n content_type=None,\n file_info=None,\n ):\n \"\"\"\n Creates a new file in this bucket by (server-side) copying from an existing file.\n\n :param str file_id: file ID of existing file\n :param str new_file_name: file name of the new file\n :param tuple[int,int],None bytes_range: start and end offsets (**inclusive!**), default is the entire file\n :param b2sdk.v1.MetadataDirectiveMode,None metadata_directive: default is :py:attr:`b2sdk.v1.MetadataDirectiveMode.COPY`\n :param str,None content_type: content_type for the new file if metadata_directive is set to :py:attr:`b2sdk.v1.MetadataDirectiveMode.REPLACE`, default will copy the content_type of old file\n :param dict,None file_info: file_info for the new file if metadata_directive is set to :py:attr:`b2sdk.v1.MetadataDirectiveMode.REPLACE`, default will copy the file_info of old file\n \"\"\"\n return self.api.session.copy_file(\n file_id,\n new_file_name,\n bytes_range,\n metadata_directive,\n content_type,\n file_info,\n self.id_,\n )\n\n def delete_file_version(self, file_id, file_name):\n \"\"\"\n Delete a file version.\n\n :param str file_id: a file ID\n :param str file_name: a file name\n \"\"\"\n # filename argument is not first, because one day it may become optional\n return self.api.delete_file_version(file_id, file_name)\n\n @disable_trace\n def as_dict(self):\n \"\"\"\n Return bucket representation as a dictionary.\n\n :rtype: dict\n \"\"\"\n result = {\n 'accountId': self.api.account_info.get_account_id(),\n 'bucketId': self.id_,\n }\n if self.name is not None:\n result['bucketName'] = self.name\n if self.type_ is not None:\n result['bucketType'] = self.type_\n result['bucketInfo'] = self.bucket_info\n result['corsRules'] = self.cors_rules\n result['lifecycleRules'] = self.lifecycle_rules\n result['revision'] = self.revision\n result['options'] = self.options_set\n return result\n\n def __repr__(self):\n return 'Bucket<%s,%s,%s>' % (self.id_, self.name, self.type_)\n\n\nclass BucketFactory(object):\n \"\"\"\n This is a factory for creating bucket objects from different kind of objects.\n \"\"\"\n BUCKET_CLASS = staticmethod(Bucket)\n\n @classmethod\n def from_api_response(cls, api, response):\n \"\"\"\n Create a Bucket object from API response.\n\n :param b2sdk.v1.B2Api api: API object\n :param requests.Response response: response object\n :rtype: b2sdk.v1.Bucket\n \"\"\"\n return [cls.from_api_bucket_dict(api, bucket_dict) for bucket_dict in response['buckets']]\n\n @classmethod\n def from_api_bucket_dict(cls, api, bucket_dict):\n \"\"\"\n Turn a dictionary, like this:\n\n .. code-block:: python\n\n {\n \"bucketType\": \"allPrivate\",\n \"bucketId\": \"a4ba6a39d8b6b5fd561f0010\",\n \"bucketName\": \"zsdfrtsazsdfafr\",\n \"accountId\": \"4aa9865d6f00\",\n \"bucketInfo\": {},\n \"options\": [],\n \"revision\": 1\n }\n\n into a Bucket object.\n\n :param b2sdk.v1.B2Api api: API lient\n :param dict bucket_dict: a dictionary with bucket properties\n :rtype: b2sdk.v1.Bucket\n\n \"\"\"\n bucket_name = bucket_dict['bucketName']\n bucket_id = bucket_dict['bucketId']\n type_ = bucket_dict['bucketType']\n bucket_info = bucket_dict['bucketInfo']\n cors_rules = bucket_dict['corsRules']\n lifecycle_rules = bucket_dict['lifecycleRules']\n revision = bucket_dict['revision']\n options = set(bucket_dict['options'])\n if type_ is None:\n raise UnrecognizedBucketType(bucket_dict['bucketType'])\n return cls.BUCKET_CLASS(\n api, bucket_id, bucket_name, type_, bucket_info, cors_rules, lifecycle_rules, revision,\n bucket_dict, options\n )\n","sub_path":"b2sdk/bucket.py","file_name":"bucket.py","file_ext":"py","file_size_in_byte":37006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"78743259","text":"from models.original_models import madry_model\nfrom tools.art.model import Model\n\n\nclass MadryModel(Model):\n def __init__(self, checkpoint_path, image_width=28, image_height=28, n_channels=1, n_classes=10):\n super().__init__(image_height=image_height, image_width=image_width, n_channels=n_channels, n_classes=n_classes,\n checkpoint_path=checkpoint_path)\n\n def calculate_logits(self, inputs):\n model = madry_model.MadryModel(n_classes=self.n_classes)\n output = model.fprop(inputs)\n return output['logits']\n","sub_path":"tools/art/madry_model.py","file_name":"madry_model.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"326096273","text":"from sklearn.datasets import load_iris\r\nfrom sciope.models import label_propagation\r\nimport numpy as np\r\nimport pytest\r\n\r\n\r\n@pytest.fixture\r\ndef iris_data():\r\n data = load_iris()\r\n idx = np.random.randint(0, len(data.target), 75)\r\n data.new_target = np.copy(data.target)\r\n data.new_target[idx] = -1 # unlabeled data points\r\n data.idx = idx\r\n return data\r\n\r\n\r\ndef test_lpmodel(iris_data):\r\n model = label_propagation.LPModel()\r\n model.train(iris_data.data, iris_data.new_target)\r\n print(model.gamma)\r\n","sub_path":"sciope/tests/test_lpmodel.py","file_name":"test_lpmodel.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"600869135","text":"# Copyright 2017-present Open Networking Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom xos.exceptions import *\nfrom tenantwithcontainer_decl import *\n\n\nclass TenantWithContainer(TenantWithContainer_decl):\n class Meta:\n proxy = True\n\n def __init__(self, *args, **kwargs):\n super(TenantWithContainer, self).__init__(*args, **kwargs)\n\n # vSG service relies on knowing when instance id has changed\n self.orig_instance_id = self.get_attribute(\"instance_id\")\n\n # vSG service relies on instance_id attribute\n def get_attribute(self, name, default=None):\n if name == \"instance_id\":\n if self.instance:\n return self.instance.id\n else:\n return None\n else:\n return super(TenantWithContainer, self).get_attribute(name, default)\n\n # Services may wish to override the image() function to return different\n # images based on criteria in the tenant object. For example,\n # if (self.has_feature_A):\n # return Instance.object.get(name=\"image_with_feature_a\")\n # elif (self.has_feature_B):\n # return Instance.object.get(name=\"image_with_feature_b\")\n # else:\n # return super(MyTenantClass,self).image()\n\n @property\n def image(self):\n from core.models import Image\n\n # Implement the logic here to pick the image that should be used when\n # instantiating the VM that will hold the container.\n\n slice = self.provider_service.slices.all()\n if not slice:\n raise XOSProgrammingError(\"provider service has no slice\")\n slice = slice[0]\n\n # If slice has default_image set then use it\n if slice.default_image:\n return slice.default_image\n\n raise XOSProgrammingError(\"Please set a default image for %s\" % self.slice.name)\n\n def save(self, *args, **kwargs):\n if (not self.creator) and (hasattr(self, \"caller\")) and (self.caller):\n self.creator = self.caller\n\n super(TenantWithContainer, self).save(*args, **kwargs)\n","sub_path":"xos/core/models/tenantwithcontainer.py","file_name":"tenantwithcontainer.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"242535695","text":"# -*- coding: utf-8 -*-\n\nimport json\n\nfrom flask import Blueprint, request, abort\nfrom jinja2 import Template\n\nfrom app import app\nfrom app.agents.models import Bot\nfrom app.commons import build_response\nfrom app.endpoint.utils import SilentUndefined\nfrom app.endpoint.utils import call_api\nfrom app.endpoint.utils import get_synonyms\nfrom app.endpoint.utils import split_sentence\nfrom app.intents.models import Intent\nfrom app.nlu.classifiers.starspace_intent_classifier import \\\n EmbeddingIntentClassifier\nfrom app.nlu.entity_extractor import EntityExtractor\nfrom app.nlu.tasks import model_updated_signal\n\nendpoint = Blueprint('api', __name__, url_prefix='/api')\n\nsentence_classifier = None\nsynonyms = None\nentity_extraction = None\n\n\n# Request Handler\n@endpoint.route('/v1', methods=['POST'])\ndef api():\n \"\"\"\n Endpoint to converse with chatbot.\n Chat context is maintained by exchanging the payload between client and bot.\n\n sample input/output payload =>\n\n {\n \"currentNode\": \"\",\n \"complete\": false,\n \"parameters\": [],\n \"extractedParameters\": {},\n \"missingParameters\": [],\n \"intent\": {\n },\n \"context\": {},\n \"input\": \"hello\",\n \"speechResponse\": [\n ]\n }\n\n :param json:\n :return json:\n \"\"\"\n request_json = request.get_json(silent=True)\n result_json = request_json\n\n if request_json:\n\n context = {\"context\": request_json[\"context\"]}\n\n if app.config[\"DEFAULT_WELCOME_INTENT_NAME\"] in request_json.get(\n \"input\"):\n intent = Intent.objects(\n intentId=app.config[\"DEFAULT_WELCOME_INTENT_NAME\"]).first()\n result_json[\"complete\"] = True\n result_json[\"intent\"][\"object_id\"] = str(intent.id)\n result_json[\"intent\"][\"id\"] = str(intent.intentId)\n result_json[\"input\"] = request_json.get(\"input\")\n template = Template(\n intent.speechResponse,\n undefined=SilentUndefined)\n result_json[\"speechResponse\"] = split_sentence(template.render(**context))\n\n app.logger.info(request_json.get(\"input\"), extra=result_json)\n return build_response.build_json(result_json)\n\n intent_id, confidence, suggestions = predict(request_json.get(\"input\"))\n app.logger.info(\"intent_id => %s\" % intent_id)\n intent = Intent.objects.get(intentId=intent_id)\n\n if intent.parameters:\n parameters = intent.parameters\n else:\n parameters = []\n\n if ((request_json.get(\"complete\") is None) or (\n request_json.get(\"complete\") is True)):\n result_json[\"intent\"] = {\n \"object_id\": str(intent.id),\n \"confidence\": confidence,\n \"id\": str(intent.intentId.encode('utf8'))\n }\n\n if parameters:\n # Extract NER entities\n extracted_parameters = entity_extraction.predict(\n intent_id, request_json.get(\"input\"))\n\n missing_parameters = []\n result_json[\"missingParameters\"] = []\n result_json[\"extractedParameters\"] = {}\n result_json[\"parameters\"] = []\n for parameter in parameters:\n result_json[\"parameters\"].append({\n \"name\": parameter.name,\n \"type\": parameter.type,\n \"required\": parameter.required\n })\n\n if parameter.required:\n if parameter.name not in extracted_parameters.keys():\n result_json[\"missingParameters\"].append(\n parameter.name)\n missing_parameters.append(parameter)\n\n result_json[\"extractedParameters\"] = extracted_parameters\n\n if missing_parameters:\n result_json[\"complete\"] = False\n current_node = missing_parameters[0]\n result_json[\"currentNode\"] = current_node[\"name\"]\n result_json[\"speechResponse\"] = split_sentence(current_node[\"prompt\"])\n else:\n result_json[\"complete\"] = True\n context[\"parameters\"] = extracted_parameters\n else:\n result_json[\"complete\"] = True\n\n elif request_json.get(\"complete\") is False:\n if \"cancel\" not in intent.name:\n intent_id = request_json[\"intent\"][\"id\"]\n intent = Intent.objects.get(intentId=intent_id)\n\n extracted_parameter = entity_extraction.replace_synonyms({\n request_json.get(\"currentNode\"): request_json.get(\"input\")\n })\n\n # replace synonyms for entity values\n result_json[\"extractedParameters\"].update(extracted_parameter)\n\n result_json[\"missingParameters\"].remove(\n request_json.get(\"currentNode\"))\n\n if len(result_json[\"missingParameters\"]) == 0:\n result_json[\"complete\"] = True\n context = {\"parameters\": result_json[\"extractedParameters\"],\n \"context\": request_json[\"context\"]}\n else:\n missing_parameter = result_json[\"missingParameters\"][0]\n result_json[\"complete\"] = False\n current_node = [\n node for node in intent.parameters if missing_parameter in node.name][0]\n result_json[\"currentNode\"] = current_node.name\n result_json[\"speechResponse\"] = split_sentence(current_node.prompt)\n else:\n result_json[\"currentNode\"] = None\n result_json[\"missingParameters\"] = []\n result_json[\"parameters\"] = {}\n result_json[\"intent\"] = {}\n result_json[\"complete\"] = True\n\n if result_json[\"complete\"]:\n if intent.apiTrigger:\n isJson = False\n parameters = result_json[\"extractedParameters\"]\n headers = intent.apiDetails.get_headers()\n app.logger.info(\"headers %s\" % headers)\n url_template = Template(\n intent.apiDetails.url, undefined=SilentUndefined)\n rendered_url = url_template.render(**context)\n if intent.apiDetails.isJson:\n isJson = True\n request_template = Template(\n intent.apiDetails.jsonData, undefined=SilentUndefined)\n parameters = json.loads(request_template.render(**context))\n\n try:\n result = call_api(rendered_url,\n intent.apiDetails.requestType, headers,\n parameters, isJson)\n except Exception as e:\n app.logger.warn(\"API call failed\", e)\n result_json[\"speechResponse\"] = [\"Service is not available. Please try again later.\"]\n else:\n context[\"result\"] = result\n template = Template(\n intent.speechResponse, undefined=SilentUndefined)\n result_json[\"speechResponse\"] = split_sentence(template.render(**context))\n else:\n context[\"result\"] = {}\n template = Template(intent.speechResponse,\n undefined=SilentUndefined)\n result_json[\"speechResponse\"] = split_sentence(template.render(**context))\n app.logger.info(request_json.get(\"input\"), extra=result_json)\n return build_response.build_json(result_json)\n else:\n return abort(400)\n\n\ndef update_model(app, message, **extra):\n \"\"\"\n Signal hook to be called after training is completed.\n Reloads ml models and synonyms.\n :param app:\n :param message:\n :param extra:\n :return:\n \"\"\"\n global sentence_classifier\n\n sentence_classifier = EmbeddingIntentClassifier.load(\n app.config[\"MODELS_DIR\"], app.config[\"USE_WORD_VECTORS\"])\n\n synonyms = get_synonyms()\n\n global entity_extraction\n\n entity_extraction = EntityExtractor(synonyms)\n\n app.logger.info(\"Intent Model updated\")\n\n\nwith app.app_context():\n update_model(app, \"Models updated\")\n\nmodel_updated_signal.connect(update_model, app)\n\n\ndef predict(sentence):\n \"\"\"\n Predict Intent using Intent classifier\n :param sentence:\n :return:\n \"\"\"\n bot = Bot.objects.get(name=\"default\")\n predicted, intents = sentence_classifier.process(sentence)\n app.logger.info(\"predicted intent %s\", predicted)\n if predicted[\"confidence\"] < bot.config.get(\"confidence_threshold\", .90):\n intents = Intent.objects(intentId=app.config[\"DEFAULT_FALLBACK_INTENT_NAME\"])\n intents = intents.first().intentId\n return intents, 1.0, []\n else:\n return predicted[\"intent\"], predicted[\"confidence\"], intents[1:]\n","sub_path":"app/endpoint/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":9092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"305964357","text":"\"\"\"Script to analyze how many dams found and how many pre-known.\"\"\"\nimport logging\nimport os\nimport pathlib\nimport sqlite3\nimport sys\n\nfrom osgeo import gdal\nfrom osgeo import osr\nfrom osgeo import ogr\nimport rtree\nimport shapely.wkb\n\nTARGET_VECTOR_PATH = r\"C:\\Users\\richp\\Downloads\\known_dams.gpkg\"\nBASE_DAMS_DB_PATH = r\"C:\\Users\\richp\\Documents\\annotated_dams\\natgeo_dams_database_2020_07_01.db\"\n\nlogging.basicConfig(\n stream=sys.stdout,\n level=logging.DEBUG,\n format=(\n '%(asctime)s (%(relativeCreated)d) %(processName)s %(levelname)s '\n '%(name)s [%(funcName)s:%(lineno)d] %(message)s'))\nLOGGER = logging.getLogger(__name__)\n\ndef _execute_sqlite(\n sqlite_command, database_path, argument_list=None,\n mode='read_only', execute='execute', fetch=None):\n \"\"\"Execute SQLite command and attempt retries on a failure.\n\n Parameters:\n sqlite_command (str): a well formatted SQLite command.\n database_path (str): path to the SQLite database to operate on.\n argument_list (list): `execute == 'execute` then this list is passed to\n the internal sqlite3 `execute` call.\n mode (str): must be either 'read_only' or 'modify'.\n execute (str): must be either 'execute', 'many', or 'script'.\n fetch (str): if not `None` can be either 'all' or 'one'.\n If not None the result of a fetch will be returned by this\n function.\n\n Returns:\n result of fetch if `fetch` is not None.\n\n \"\"\"\n cursor = None\n connection = None\n try:\n if mode == 'read_only':\n ro_uri = r'%s?mode=ro' % pathlib.Path(\n os.path.abspath(database_path)).as_uri()\n LOGGER.debug(\n '%s exists: %s', ro_uri, os.path.exists(os.path.abspath(\n database_path)))\n connection = sqlite3.connect(ro_uri, uri=True)\n elif mode == 'modify':\n connection = sqlite3.connect(database_path)\n else:\n raise ValueError('Unknown mode: %s' % mode)\n\n if execute == 'execute':\n cursor = connection.execute(sqlite_command, argument_list)\n elif execute == 'many':\n cursor = connection.executemany(sqlite_command, argument_list)\n elif execute == 'script':\n cursor = connection.executescript(sqlite_command)\n else:\n raise ValueError('Unknown execute mode: %s' % execute)\n\n result = None\n payload = None\n if fetch == 'all':\n payload = (cursor.fetchall())\n elif fetch == 'one':\n payload = (cursor.fetchone())\n elif fetch is not None:\n raise ValueError('Unknown fetch mode: %s' % fetch)\n if payload is not None:\n result = list(payload)\n cursor.close()\n connection.commit()\n connection.close()\n return result\n except Exception:\n LOGGER.exception('Exception on _execute_sqlite: %s', sqlite_command)\n if cursor is not None:\n cursor.close()\n if connection is not None:\n connection.commit()\n connection.close()\n raise\n\n\ndef main():\n \"\"\"Entry point.\"\"\"\n for table_id, field in [\n #('detected_dams', 'probability'),\n ('work_status', 'processed')]:\n LOGGER.info(f'processing {table_id}')\n bounding_box_list = _execute_sqlite(\n f'''\n SELECT lng_min, lat_min, lng_max, lat_max, grid_id, processed\n FROM {table_id}\n GROUP BY lng_min, lat_min, lng_max, lat_max\n ''', BASE_DAMS_DB_PATH, fetch='all', argument_list=[])\n gpkg_driver = ogr.GetDriverByName('GPKG')\n vector = gpkg_driver.CreateDataSource(f'{table_id}.gpkg')\n wgs84_srs = osr.SpatialReference()\n wgs84_srs.ImportFromEPSG(4326)\n layer = vector.CreateLayer(\n 'known_dams', wgs84_srs, geom_type=ogr.wkbPolygon)\n layer.CreateField(ogr.FieldDefn('grid_id', ogr.OFTString))\n layer.CreateField(ogr.FieldDefn('processed', ogr.OFTString))\n\n LOGGER.info(f'starting transaction')\n layer.StartTransaction()\n for lng_min, lat_min, lng_max, lat_max, grid_id, processed in (\n bounding_box_list):\n box = shapely.geometry.box(lng_min, lat_min, lng_max, lat_max)\n feature = ogr.Feature(layer.GetLayerDefn())\n feature.SetField('grid_id', grid_id)\n feature.SetField('processed', processed)\n feature.SetGeometry(ogr.CreateGeometryFromWkb(box.wkb))\n layer.CreateFeature(feature)\n\n LOGGER.info(f'commiting transaction')\n layer.CommitTransaction()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"utils/make_shapefile_for_lisa.py","file_name":"make_shapefile_for_lisa.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"262502053","text":"import logging\nfrom typing import Generator\n\nimport pytest\nfrom pymongo import MongoClient\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.fixture(scope=\"session\")\ndef mongo_example_db() -> Generator:\n \"\"\"Return a connection to the MongoDB example DB\"\"\"\n uri = \"mongodb://mongo_user:mongo_pass@mongodb_example/mongo_test\"\n\n client = MongoClient(uri, serverSelectionTimeoutMS=5000)\n logger.debug(f\"Connecting to MongoDB example database at: {uri}\")\n # Setup above...\n yield client\n # Teardown below...\n client.close()\n\n\n@pytest.mark.integration\ndef test_mongo_example_data(mongo_example_db):\n \"\"\"Confirm that the example database is populated with simulated data\"\"\"\n db = mongo_example_db[\"mongo_test\"]\n assert set(db.collection_names()) == {\n \"payment_card\",\n \"orders\",\n \"customer\",\n \"employee\",\n \"product\",\n \"reports\",\n \"customer_details\",\n }\n assert db.customer.count() == 3\n assert db.payment_card.count() == 2\n assert db.orders.count() == 4\n assert db.employee.count() == 2\n assert db.product.count() == 3\n assert db.reports.count() == 4\n","sub_path":"tests/integration_tests/test_integration_mongodb_example.py","file_name":"test_integration_mongodb_example.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"352750986","text":"# coding:utf-8\n__author__ = 'Administrator'\nfrom Tkinter import * # 导入 Tkinter 库\n\nroot = Tk() # 创建窗口对象的背景色\n\nroot.title('Label')\nroot.geometry('900x600')\nroot.resizable(width=False, height=True)\n\nm = Menu(root)\nm2=Menu(m)\nfor item in ['python', 'perl', 'php', 'ruby']:\n m2.add_command(label=item)\n\nm2.add_separator()\n\nfor item in ['java', 'c++', 'c']:\n m2.add_command(label=item)\nm.add_cascade(label='lan', menu=m2)\nroot['menu'] =m\n\nroot.mainloop()\n\n","sub_path":"GUI/Tkinter/ok/Menu/separator.py","file_name":"separator.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"647360046","text":"import array, time\nfrom machine import Pin\nimport rp2\n\n@rp2.asm_pio(sideset_init=rp2.PIO.OUT_LOW, out_shiftdir=rp2.PIO.SHIFT_LEFT, autopull=True, pull_thresh=24)\ndef ws2812():\n T1 = 2\n T2 = 5\n T3 = 3\n wrap_target()\n label(\"bitloop\")\n out(x, 1) .side(0) [T3 - 1]\n jmp(not_x, \"do_zero\") .side(1) [T1 - 1]\n jmp(\"bitloop\") .side(1) [T2 - 1]\n label(\"do_zero\")\n nop() .side(0) [T2 - 1]\n wrap()\n \nclass ws2812b:\n def __init__(self, num_leds, state_machine, pin):\n self.pixels = array.array(\"I\", [0 for _ in range(num_leds)])\n self.sm = rp2.StateMachine(state_machine, ws2812, freq=8000000, sideset_base=Pin(pin))\n self.sm.active(1)\n self.num_leds = num_leds\n\n def set_pixel(self, pixel_num, red, green, blue):\n self.pixels[pixel_num] = blue | red << 8 | green << 16\n \n def show(self):\n for i in range(self.num_leds):\n self.sm.put(self.pixels[i],8)\n \n def fill(self, red, green, blue):\n for i in range(self.num_leds):\n self.set_pixel(i, red, green, blue)\n \n ","sub_path":"ws2812b.py","file_name":"ws2812b.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"409136932","text":"# -*- coding: utf-8 -*-\n# vim: set fileencoding=utf-8:noet:tabstop=4:softtabstop=4:shiftwidth=8:expandtab\n\n\"\"\" python3 method \"\"\"\n\n# Copyright (c) 2010 - 2020, © Badassops LLC / Luc Suryo\n# All rights reserved.\n# BSD 3-Clause License : http://www.freebsd.org/copyright/freebsd-license.html\n\nimport sys\nimport threading\nfrom time import sleep\n\nclass SpinCursor(threading.Thread):\n \"\"\" class and function so display a wait spinner (dots or wheel)\n \"\"\"\n\n def __init__(self, msg=None, maxspin=0, minspin=10, speed=5, mode=None):\n # Count of a spin\n self.count = 0\n self.out = sys.stdout\n self.flag = False\n self.max = maxspin\n self.min = minspin\n # Any message to print first ?\n self.msg = msg\n # Complete printed string\n self.string = None\n # Speed is given as number of spins a second\n # Use it to calculate spin wait time\n self.waittime = 1.0/float(speed*10)\n if mode == 'dots':\n self.spinchars = (u'◦ ', u'○ ', u'◎ ', u'◉ ')\n if mode == 'count-down':\n self.spinchars = (u'9 ', u'8 ', u'7 ', u'6 ', u'5 ', u'4 ', u'3 ', u'2 ', u'1 ', u'0 ')\n if mode == 'wheel':\n self.spinchars = (u'-', u'\\\\ ', u'| ', u'/ ')\n threading.Thread.__init__(self, None, None, \"Spin Thread\")\n\n def spin(self):\n \"\"\" perform a single spin \"\"\"\n for spinchar in self.spinchars:\n if self.msg:\n self.string = self.msg + '...\\t' + spinchar + '\\r'\n else:\n self.string = '...\\t' + spinchar + '\\r'\n #self.string = self.msg + '...\\t' + spinchar + '\\r'\n self.out.write(self.string)\n self.out.flush()\n sleep(self.waittime)\n\n def run(self):\n \"\"\" run spinning \"\"\"\n while (not self.flag) and ((self.count < self.min) or (self.count < self.max)):\n self.spin()\n self.count += 1\n # Clean up display...\n #self.out.write(' '*(len(self.string) + len('...\\t')))\n self.out.write('\\033[2K')\n\n def stop(self):\n \"\"\" stop spinning \"\"\"\n self.flag = True\n\ndef spin_message(message=None, seconds=None):\n \"\"\" print the given message and wait for the given seconds \"\"\"\n spin = SpinCursor(msg=message, minspin=seconds, speed=1, mode='wheel')\n spin.start()\n sleep(seconds)\n spin.stop()\n\ndef dot_message(message=None, seconds=None):\n \"\"\" print dots while and wait for the given seconds \"\"\"\n spin = SpinCursor(msg=message, minspin=seconds, speed=1, mode='dots')\n spin.start()\n sleep(seconds)\n spin.stop()\n\ndef count_down_message(message=None, seconds=None):\n \"\"\" print the given message and wait for the given seconds \"\"\"\n spin = SpinCursor(msg=message, minspin=seconds, speed=1, mode='count-down')\n spin.start()\n sleep(seconds)\n spin.stop()\n","sub_path":"v2/awsbuild/misc/spinner.py","file_name":"spinner.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"12788103","text":"'''\nPython module with utils used to load Google's NYT annotations (downloadable at https://github.com/dmorr-google/nyt-salience).\nJust invoke get_google_dataset method.\n'''\n\nimport os\nimport logging\nimport csv\n\nfrom dataset_utils import NYT_DIR\n\n\nlogger = logging.getLogger('Google')\n\n\nclass GoogleDataset:\n def __init__(self):\n self.train = None\n self.eval = None\n\n\nclass GoogleEntity:\n def __init__(self):\n self.index = None # int\n self.salience = None # string\n self.mention_counts = None # int\n self.text = None # string\n self.start_byte_offset = None # int\n self.end_byte_offset = None # int\n self.freebase_mid = None # string\n\n\nclass GoogleDocument:\n def __init__(self):\n self.id = None # int\n self.title = None # string\n self.entities = []\n\n\ndef fill_with_first_row(nyt_doc, row):\n nyt_doc = GoogleDocument()\n nyt_doc.id = int(row[0])\n nyt_doc.title = row[1]\n\n\ndef row_google_entity(row):\n google_entity = GoogleEntity()\n google_entity.index = int(row[0])\n google_entity.salience = row[1]\n google_entity.mention_counts = int(row[2])\n google_entity.start_byte_offset = int(row[4])\n google_entity.end_byte_offset = int(row[5])\n google_entity.freebase_mid = row[6]\n\n return google_entity\n\n\ndef get_google_documents(path):\n logger.info('Loading {0} file...'.format(path))\n google_docs = []\n\n with open(path, 'r') as f:\n\n r = csv.reader(f, delimiter='\\t')\n first_line = True\n google_doc = GoogleDocument()\n for row in r:\n\n if len(row):\n if first_line:\n\n fill_with_first_row(google_doc, row)\n first_line = False\n\n else:\n\n google_entity = row_google_entity(row)\n google_doc.entities.append(google_entity)\n else:\n\n google_docs.append(google_doc)\n google_doc = GoogleDocument()\n first_line = True\n\n logger.info('{0} file loaded.'.format(path))\n return google_docs\n\n\ndef get_google_dataset():\n logger.info('Loading Google annotations')\n\n google_dataset = GoogleDataset()\n google_dataset.train = get_google_documents(os.path.join(NYT_DIR, 'nyt-train'))\n google_dataset.eval = get_google_documents(os.path.join(NYT_DIR, 'nyt-eval'))\n\n logger.info('Google annotations loaded.')\n\n return google_dataset","sub_path":"src/main/python/dataset/nyt/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"277499093","text":"#\n# Copyright 2021 Ocean Protocol Foundation\n# SPDX-License-Identifier: Apache-2.0\n#\n\"\"\"DID Resolver module.\"\"\"\n# Copyright 2018 Ocean Protocol Foundation\n# SPDX-License-Identifier: Apache-2.0\n\nimport logging\nfrom typing import Optional\n\nfrom enforce_typing import enforce_types\nfrom ocean_lib.assets.asset import Asset\nfrom ocean_lib.common.aquarius.aquarius_provider import AquariusProvider\nfrom ocean_lib.models.data_token import DataToken\nfrom web3.main import Web3\n\nlogger = logging.getLogger(\"keeper\")\n\n\n@enforce_types\ndef resolve_asset(\n did: str,\n metadata_cache_uri: Optional[str] = None,\n web3: Optional[Web3] = None,\n token_address: Optional[str] = None,\n) -> Asset:\n \"\"\"Resolve a DID to an URL/DDO or later an internal/external DID.\n\n :param did: the asset id to resolve, this is part of the ocean\n DID did:op:<32 byte value>\n :param metadata_cache_uri: str the url of the metadata store\n :param web3: Web3 instance\n :param token_address: str the address of the DataToken smart contract\n\n :return Asset: the resolved DID\n \"\"\"\n assert metadata_cache_uri or (\n web3 and token_address\n ), \"Either metadata_cache_uri or (web3 and token_address) is required.\"\n\n if not metadata_cache_uri:\n metadata_cache_uri = DataToken(web3, token_address).get_metadata_url()\n\n logger.debug(f\"found did {did} -> url={metadata_cache_uri}\")\n ddo = AquariusProvider.get_aquarius(metadata_cache_uri).get_asset_ddo(did)\n\n if ddo:\n return Asset(dictionary=ddo.as_dictionary())\n","sub_path":"ocean_lib/assets/asset_resolver.py","file_name":"asset_resolver.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"141198474","text":"import tensorflow as tf\nimport os\n\n# Reset TensorFlow Graph\ntf.reset_default_graph()\n\n# TensorFlow model variable\nlearning_rate = 0.1\nepochs = 500\n\n# Coefficient of quadratic equation\na = 1\nb = -2\nc = 2\n\nx = tf.Variable(0.,name='x_variable')\nstep_x = tf.Variable(0, trainable=False)\nloss = a * x * x - b * x + c\noptimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=step_x)\n\n# TensorBoard\nsummary_op = tf.summary.scalar('x',x)\nfile_writer = tf.summary.FileWriter('./log',graph=tf.get_default_graph())\n# tensorboard --logdir=log (run in terminal to launch TensorBoard)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch in range(epochs):\n _, step ,result, summary = sess.run([optimizer, step_x, x, summary_op])\n print('Step {}: Computed Result = {}'.format(step,result))\n\n file_writer.add_summary(summary,step)\n file_writer.flush()\n\n print('Final Result of x is {}'.format(result))\n","sub_path":"tensorflow-quad.py","file_name":"tensorflow-quad.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"58982655","text":"\"\"\"\nClass to represent data in a table with headers\n\nHeader:\n header (str} - header ID\n attribute (dict} -\n \"Type\": type of element in cell\n \"CastFunction\": function to cast cell to element type\n \"Label\": label header for the column\n \"Alignment\": alignment for column header label\n \"Width\": width for the column header\n\n [\n \"jobID\",\n {\n \"Type\": \"int\",\n \"CastFunction\": int,\n \"Label\": \"Job ID\",\n \"Alignment\": \"right\",\n \"Width\": 80,\n },\n ],\n\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nimport itertools\n\nclass HeaderAttributeKey:\n\n Alignment = \"Alignment\"\n CastFunction = \"CastFunction\"\n Label = \"Label\"\n Type = \"Type\"\n Width = \"Width\"\n\n\nclass DataKey:\n\n Cell = 0\n ToolTip = 1\n Obj = 2\n\n\nclass HeaderInfo:\n \"\"\"\n Header information\n \"\"\"\n\n header = None\n attribute = None\n headerList = None\n toolTip = None\n\n\nclass DataItem:\n \"\"\"\n Data item information\n \"\"\"\n\n cell = None\n toolTip = None\n obj = None\n\n\nclass Index:\n \"\"\"\n Dummy QModelIndex\n\n Returns:\n Index: Dummy QModelIndex\n \"\"\"\n\n def __init__(self, row, column):\n\n self._row = row\n self._column = column\n\n def row(self):\n return self._row\n\n def column(self):\n return self._column\n\n def isValid(self):\n return True\n\n\nclass TableData:\n \"\"\"\n Class to represent data in a table with columns headers\n\n data[x][y] = DataItem\n\n A header is a list of the form:\n [str, dict]\n str = string representing the column name\n dict = dictionary representing different attributes\n [\n \"Column Name\",\n {\n \"Alignment\": \"center\",\n \"CastFunction\": str,\n \"Label\": \"Column Label\",\n \"ToolTip\": \"Tool Tip string\"\n \"Type\": \"str\",\n \"Width\": 220,\n },\n ]\n\n Raises:\n IndexError: index is out of range\n TypeError: invalid index type\n\n Returns:\n str -- tableData[index] column header\n list - tableData[index,] data row\n object - tableData[row, col] element at position row,col on table\n \"\"\"\n\n def __init__(self, headerList=None, dataList=None):\n\n self.data = [] # 2 dimensional dataset\n self.headers = [] # list of HeaderInfo objects\n self.headerName = [] # list of header/columns names HeaderInfo.header\n\n if headerList is not None:\n for h in headerList:\n self.addHeader(h)\n\n if dataList is not None:\n if len(dataList) == 1:\n self.insertRow(0, dataList)\n else:\n for position, data in enumerate(dataList):\n self.insertRow(position, data)\n\n def __getitem__(self, index):\n\n if isinstance(index, (int, slice)):\n if (index < 0) or (index > len(self.headers) - 1):\n raise IndexError(\"list index [{}] out of range\".format(index))\n\n return self.headers[index].attribute[\"Label\"]\n\n if isinstance(index, tuple):\n col = None\n\n if len(index) == 1:\n row = index[0]\n elif len(index) == 2:\n row, col = index\n else:\n raise IndexError(\"Bad index format: {}\".format(index))\n\n if col is None:\n returnRow = []\n currentRow = self.data[row]\n\n for r in currentRow:\n returnRow.append(r.cell)\n\n return returnRow\n\n return self.data[row][col].cell\n\n raise TypeError(\"Invalid index type\")\n\n def __setitem__(self, index, value):\n\n if isinstance(index, int):\n self.headers[index].attribute[\"Label\"] = value\n\n elif isinstance(index, tuple):\n # Only update members of the data table no headers\n if len(index) == 2:\n row, col = index\n else:\n raise IndexError(\"Bad index format: {}\".format(index))\n\n index = Index(row, col) # Simulate index\n self.setData(index, value)\n else:\n raise TypeError(\"Invalid index type\")\n\n def __len__(self):\n return len(self.data)\n\n def addHeader(self, header=None):\n \"\"\"\n Add header information\n\n Keyword Arguments:\n header (list) -- list containing the a header (default: {None})\n \"\"\"\n\n if header is not None:\n oHeader = HeaderInfo()\n oHeader.header = header[0]\n oHeader.attribute = header[1]\n oHeader.headerList = header\n self.headerName.append(header[0])\n self.headers.append(oHeader)\n\n def setData(self, index, value):\n \"\"\"\n Insert row at the end of the data table\n\n Keyword Arguments:\n dataItem (list) -- list containing a data row (default: {None})\n \"\"\"\n\n if (value is not None) and index.isValid():\n # Use self.insertRow() so only one method add data\n # better for logging purposes\n row = index.row()\n column = index.column()\n\n if isinstance(value, DataItem):\n self.data[row][column].cell = value.cell\n self.data[row][column].toolTip = value.toolTip\n self.data[row][column].obj = value.obj\n else:\n self.data[row][column].cell = value\n\n return True\n\n return False\n\n def setToolTip(self, index, value):\n \"\"\"\n Insert row at the end of the data table\n\n Keyword Arguments:\n dataItem (list) -- list containing a data row (default: {None})\n \"\"\"\n\n if (value is not None) and index.isValid():\n # Use self.insertRow() so only one method add data\n # better for logging purposes\n row = index.row()\n column = index.column()\n self.data[row][column].toolTip = value\n\n return True\n\n return False\n\n def insertRow(self, position, row=None):\n \"\"\"\n Insert a data row\n\n Arguments:\n position (int) -- row number where to insert the data\n row (list) -- list with row data\n \"\"\"\n\n if row is not None:\n totalColumns = len(self.headerName)\n emptyRow = []\n for _ in itertools.repeat(None, totalColumns):\n rowItem = DataItem()\n emptyRow.append(rowItem)\n\n self.data.insert(position, emptyRow)\n\n for column, value in enumerate(row):\n if isinstance(value, list):\n newItem = DataItem()\n newItem.cell = value[DataKey.Cell]\n newItem.toolTip = value[DataKey.ToolTip]\n newItem.obj = value[DataKey.Obj]\n index = Index(position, column)\n self.setData(index, newItem)\n else:\n if value is not None:\n raise ValueError(\"Item at index {} is invalid\".format(column))\n else:\n totalColumns = len(self.headerName)\n emptyRow = []\n for _ in itertools.repeat(None, totalColumns):\n emptyRow.append(\"\")\n self.data.insert(position, emptyRow)\n\n def removeRow(self, index):\n \"\"\"\n Delete a data row\n\n Arguments:\n index (int) -- row number to delete 0 based\n\n Returns:\n list -- row deleted\n \"\"\"\n element = self.data.pop(index)\n\n return element\n\n def insertColumn(self, position=0, columnHeader=None, columnData=None):\n \"\"\"\n Insert a data column\n\n Arguments:\n position (int) -- column number where to insert the data\n columnHeader (str) -- header for column to be inserted\n columnData (list) -- data to insert in column cells\n \"\"\"\n\n if columnHeader is None:\n self.headers.insert(position, HeaderInfo())\n self.headerName.insert(position, \"\")\n else:\n if isinstance(columnHeader, list):\n oHeader = HeaderInfo()\n oHeader.header = columnHeader[0]\n oHeader.attribute = columnHeader[1]\n oHeader.headerList = columnHeader\n self.headers.insert(position, oHeader)\n self.headerName.insert(position, oHeader.header)\n elif isinstance(columnHeader, HeaderInfo):\n self.headers.insert(position, columnHeader)\n self.headers.insert(position, columnHeader.header)\n else:\n raise TypeError(\"Invalid column header type.\")\n\n if columnData is None:\n for r in self.data:\n r.insert(position, DataItem())\n else:\n for _, r in enumerate(self.data):\n element = DataItem()\n element.cell = columnData[0]\n r.insert(position, columnData[1])\n\n def deleteColumn(self, index):\n \"\"\"\n Delete a column from the table\n\n Arguments:\n index (int) -- column to delete\n\n Returns:\n list -- list containing the header ID, header attributes and data rows deleted\n \"\"\"\n\n deletedInfo = []\n deletedRows = []\n deletedInfo.append(self.headers.pop(index))\n deletedInfo.append(self.headerName.pop(index))\n\n for row in self.data:\n deletedRows.append(row.pop(index))\n\n deletedInfo.append(deletedRows)\n\n return deletedInfo\n","sub_path":"MKVBatchMultiplex/dataset/TableData.py","file_name":"TableData.py","file_ext":"py","file_size_in_byte":9748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"188554724","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 25 21:11:38 2018\n\n@author: Administrator\n\"\"\"\nimport requests\n\nurl = r'http://www.sse.com.cn/disclosure/listedinfo/announcement/c/2018-03-06/600066_20180306_2.pdf'\ntarget_file_name = '600066.pdf'\n\nr = requests.get(url) # create HTTP response object\nprint(r.content)\nwith open(target_file_name,'wb') as f:\n f.write(r.content)\n ","sub_path":"webscrap/pdf/pdf_download.py","file_name":"pdf_download.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"345034775","text":"from django.shortcuts import render\n\n# Create your views here.\n\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nimport requests\nimport json\nfrom django.shortcuts import render\nfrom django.template import loader\n\n\ndef home(request):\n template = loader.get_template('home.html')\n\n response = requests.get(\"http://10.200.24.125:8000/api/worker\")\n\n context = {\"workers\": json.loads(response.text)[\"results\"]}\n\n return HttpResponse(template.render(context, request))\n\ndef posterHome(request):\n template = loader.get_template('posterhome.html')\n\n context = {}\n\n return HttpResponse(template.render(context, request))\n\n\ndef mePoster(request):\n template = loader.get_template('mePoster.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/poster/3\")\n\n context = json.loads(response.text)\n \n return HttpResponse(template.render(context, request))\n\ndef posterInbox(request, posterID):\n template = loader.get_template('posterinbox.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/messages/\")\n context = json.loads(response.text)\n messages = context[\"results\"]\n context = {\"messages\":[]}\n\n for message in messages:\n print(message)\n d = {\"task_id\":message[\"task_id\"],\"message\":message[\"message_content\"]}\n workerResponse = requests.get(\"http://10.200.26.0:8000/api/worker/\" + str(message[\"worker_id\"]))\n worker = json.loads(workerResponse.text)\n d[\"worker\"] = worker\n\n taskResponse = requests.get(\"http://10.200.26.0:8000/api/tasks/\" + str(message[\"task_id\"]))\n task = json.loads(taskResponse.text)\n d[\"task\"] = task \n context['messages'].append(d)\n\n return HttpResponse(template.render(context, request))\n\ndef showTask(request, ID):\n template = loader.get_template('showtask.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/tasks/\" + str(ID))\n\n context = {\"task\":json.loads(response.text)}\n\n return HttpResponse(template.render(context, request))\n\n\ndef workerHome(request):\n template = loader.get_template('workerhome.html')\n\n response = requests.get(\"http://10.200.24.125:8000/api/worker\")\n\n context = {\"workers\": json.loads(response.text)[\"results\"]}\n\n return HttpResponse(template.render(context, request))\n\n\ndef workerProfile(request, profile_id):\n template = loader.get_template('workerProfile.html')\n\n response = requests.get(\n \"http://10.200.26.0:8000/api/worker/\" + str(profile_id))\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n\n\ndef createJob(request):\n if request.method==\"POST\":\n url = \"http://10.200.26.0:8000/api/tasks/\"\n job = request.POST\n requests.post(url, data = job)\n\n template = loader.get_template('createjob.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/tasks/\")\n\n context = json.loads(response.text)\n tasks = context[\"results\"]\n print(tasks)\n c = {\"tasks\": tasks}\n\n return HttpResponse(template.render(c, request))\n\n\ndef housekeeping(request):\n template = loader.get_template('housekeeping.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/tasks/\")\n\n context = json.loads(response.text)[\"results\"]\n\n c = {\"tasks\":[]}\n\n for task in context:\n if task[\"category\"] == \"HK\":\n c[\"tasks\"].append(task)\n\n return HttpResponse(template.render(c, request))\n\ndef education(request):\n template = loader.get_template('education.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/worker/\")\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n\ndef technology(request):\n template = loader.get_template('technology.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/worker/\")\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n\ndef animals(request):\n template = loader.get_template('animals.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/worker/\")\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n\ndef repairs(request):\n template = loader.get_template('repairs.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/worker/\")\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n\ndef delivery(request):\n template = loader.get_template('delivery.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/worker/\")\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n\ndef entertainment(request):\n template = loader.get_template('entertainment.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/worker/\")\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n","sub_path":"lifetasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87356594","text":"#!/bin/usr/env python\n# -*- coding: utf-8 -*-\nfrom IO import get_parse\nfrom relink import relink_simple\n\ndef main():\n args = get_parse()\n\n if True:\n relink_simple(**args)\n\n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"446514678","text":"from itertools import chain\nfrom collections import defaultdict\nfrom string import ascii_lowercase, ascii_uppercase\n\nimport qanta\nfrom qanta.datasets.quiz_bowl import QuestionDatabase\nfrom qanta.wikipedia.cached_wikipedia import CachedWikipedia\nfrom qanta import qlogging\nfrom ingestion.page_assigner import PageAssigner\n\nfrom fuzzywuzzy import process\nfrom fuzzywuzzy.fuzz import UWRatio\n\nlog = qlogging.get(__name__)\n\n\ndef scorer(left, right):\n if right.startswith(\"list of\") or \\\n right.endswith(\" topics\") or \\\n right.startswith(\"wikiproject\"):\n val = 0\n else:\n val = UWRatio(left, right)\n return val\n\n\ndef reasonable_case(page):\n \"\"\"\n Checks that a wikipedia page doesn't have crazy capitalization\n (which often leads to bad matches.\n \"\"\"\n\n return len(page) > 2 and page[0] in ascii_uppercase and \\\n all(x in ascii_lowercase for x in page[1:])\n\n\nclass TitleFinder:\n def __init__(self, index, wiki, known_pages,\n normalize=lambda x: x, prune=1500):\n import gzip\n\n self.normalize = normalize\n self._index = defaultdict(set)\n self._wiki = wiki\n self._prune = set()\n self._known = known_pages\n\n # map single words to the relevant wikipedia titles\n with gzip.open(index) as f:\n line = 0\n for title in f:\n line += 1\n if line == 1:\n continue\n\n converted_title = None\n for word in [normalize(x) for x in\n title.decode('utf-8').split(\"_\") if len(x) > 2]:\n if converted_title is None:\n converted_title = title.decode('utf-8').strip()\n if len(word) > 2:\n self._index[word].add(converted_title)\n if line % 5000000 == 0:\n log.info(\"%i %s: %s -> %s\" % (line, title, word, list(self._index[word])[:3]))\n self.prune(prune)\n self.prune(prune)\n\n # Take another pass just to add exact titles\n with gzip.open(index) as f:\n for ii in f:\n title = ii.decode('utf-8').strip()\n if \"(\" not in title and reasonable_case(title):\n norm = normalize(title.replace(\"_\", \"_\"))\n self._index[norm].add(title)\n\n def prune(self, prune):\n self._prune |= set(x for x in self._index\n if len(self._index[x]) > prune)\n log.info(\"Pruning %s\" % str(list(self._prune)[:50]))\n for ii in self._prune:\n if ii in self._index:\n del self._index[ii]\n\n def query(self, text):\n norm = self.normalize(text)\n tokens = norm.split()\n candidates = set(chain.from_iterable(self._index[x] for x in tokens\n if x in self._index))\n\n # try looking for plurals\n if tokens[-1].endswith(\"s\") and tokens[-1][:-1] in self._index:\n candidates |= self._index[tokens[-1][:-1]]\n\n # try looking for exact match\n if norm in self._index:\n candidates |= self._index[norm]\n\n candidates = dict((self.normalize(x.replace(\"_\", \" \")), x) for x in\n candidates)\n\n return candidates\n\n def score(self, text, score_function=scorer):\n candidates = self.query(text)\n\n candidates = process.extract(text, candidates, limit=len(candidates),\n scorer=scorer)\n\n collapsed = defaultdict(int)\n for wiki, val, norm in candidates:\n page = self._wiki.redirect(wiki)\n if scorer(self.normalize(text),\n self.normalize(page.replace(\"_\", \" \"))) != 0:\n collapsed[page] += val\n\n # Give bonus to exact matches\n if self.normalize(page) == self.normalize(text):\n collapsed[page] += 250\n\n if page in self._known:\n collapsed[page] += 250\n\n return collapsed\n\n def best_guess(self, unassigned, min_val=50, delta=5):\n results = {}\n guess_num = 0\n for ii in [x for x in unassigned if len(x) > 2]:\n v = self.score(ii)\n if len(v) >= 2:\n scores = sorted(v, key=v.get, reverse=True)\n top = v[scores[0]]\n second = v[scores[1]]\n\n if top - second >= delta and top > min_val:\n results[ii] = scores[0]\n guess_num += 1\n\n if guess_num % 1000 == 0:\n log.info(\"Matching %s -> %s\" % (ii, results.get(ii, None)))\n elif len(v) == 1:\n if max(v.values()) > min_val:\n results[ii] = max(v.keys())\n\n return results\n\n\nif __name__ == \"__main__\":\n import argparse\n from glob import glob\n parser = argparse.ArgumentParser(description='Import questions')\n parser.add_argument('--direct_path', type=str,\n default='data/internal/page_assignment/direct/')\n parser.add_argument('--ambiguous_path', type=str,\n default='data/internal/page_assignment/ambiguous/')\n parser.add_argument('--unambiguous_path', type=str,\n default='data/internal/page_assignment/unambiguous/')\n flags = parser.parse_args()\n\n pa = PageAssigner(QuestionDatabase.normalize_answer)\n for ii in glob(\"%s/*\" % flags.ambiguous_path):\n pa.load_ambiguous(ii)\n for ii in glob(\"%s/*\" % flags.unambiguous_path):\n pa.load_unambiguous(ii)\n for ii in glob(\"%s/*\" % flags.direct_path):\n pa.load_direct(ii)\n\n cw = CachedWikipedia()\n tf = TitleFinder(\"data/enwiki-latest-all-titles-in-ns0.gz\", cw,\n pa.known_pages(),\n normalize=QuestionDatabase.normalize_answer)\n\n\n\n for ii in ['die leiden des jungen werthers', '99 Luftballons', 'saint nicholas of myra', 'édouard roche', 'the mahdi or mohammad ahmed', 'the first vatican council', 'antietam national battlefield', 'cia', 'samuel f b morse', 'the passion according to st matthew or st matthew’s passion or matthäuspassion', 'another world', 'rolling in the deep', 'tony gwynn', 'opal', 'tylenol', 'queues', 'dachau', 'lipoproteins', 'haiku', 'japan', 'zoroastrianism']:\n A = tf.score(ii)\n print(\"--------\")\n num = 0\n for ii in sorted(A, key=A.get, reverse=True):\n num += 1\n print(\"\\t%s\\t%i\" % (ii, A[ii]))\n\n if num > 10:\n break\n","sub_path":"ingestion/title_finder.py","file_name":"title_finder.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"100916065","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/nodeconductor_sugarcrm/migrations/0010_crm_instance_url.py\n# Compiled at: 2016-09-28 11:51:43\nfrom __future__ import unicode_literals\nfrom django.db import models, migrations\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('nodeconductor_sugarcrm', '0009_remove_crm_size_field')]\n operations = [\n migrations.AddField(model_name=b'crm', name=b'instance_url', field=models.URLField(help_text=b'CRMs OpenStack instance URL in NC.', blank=True), preserve_default=True),\n migrations.AlterField(model_name=b'crm', name=b'api_url', field=models.CharField(help_text=b'CRMs OpenStack instance access URL.', max_length=127), preserve_default=True)]","sub_path":"pycfiles/nodeconductor_sugarcrm-0.5.0-py2.7/0010_crm_instance_url.py","file_name":"0010_crm_instance_url.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"513868817","text":"import os\n\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\n\n\ndef get_images_upload_path(instance, filename):\n return os.path.join(\n 'media',\n 'images',\n instance.__class__.__name__.lower(),\n filename\n )\n\n\ndef send_email_template(request, subject, template, recipients, data=None):\n \"\"\"\n This function sends an email using a selected template.\n\n Arguments:\n subject: the subject of the email\n template: the template to be used for the email\n recipient: a list of recipients the email will be sent to\n data: a dictionary to be added as context variables in the email\n \"\"\"\n context = {\n 'current_site': Site.objects.get_current(),\n 'protocol': 'https' if request.is_secure() else 'http'\n }\n context.update(data)\n\n html_content = render_to_string(template, context)\n text_content = strip_tags(html_content)\n\n send_mail(\n subject=f'[Site.objects.get_current().name] {subject}',\n message=text_content,\n from_email=settings.DEFAULT_FROM_EMAIL,\n recipient_list=recipients,\n fail_silently=False,\n html_message=html_content\n )\n","sub_path":"apps/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"197463178","text":"# coding: utf-8\nimport requests\nfrom requests.exceptions import HTTPError\nfrom pygbif import species\nfrom bims.models import (\n Taxon,\n TaxonomyField\n)\nfrom bims.models.taxonomy import Taxonomy\nfrom bims.models.vernacular_name import VernacularName\nfrom bims.enums import TaxonomicRank, TaxonomicStatus\n\n\ndef update_taxa():\n \"\"\"Get all taxon, then update the data bimsd on the gbif id.\"\"\"\n taxa = Taxon.objects.all()\n if not taxa:\n print('No taxon found')\n for taxon in taxa:\n print('Update taxon for %s with gbif id %s' % (\n taxon.common_name, taxon.gbif_id))\n try:\n response = species.name_usage(key=taxon.gbif_id)\n if response:\n update_taxonomy_fields(taxon, response)\n print('Taxon updated')\n except HTTPError as e:\n print('Taxon not updated')\n print(e)\n\n\ndef get_species(gbif_id):\n \"\"\"\n Get species by gbif id\n :param gbif_id: gbif id\n :return: species dictionary\n \"\"\"\n api_url = 'http://api.gbif.org/v1/species/' + str(gbif_id)\n try:\n response = requests.get(api_url)\n json_result = response.json()\n return json_result\n except (HTTPError, KeyError) as e:\n print(e)\n return None\n\n\ndef get_vernacular_names(species_id):\n \"\"\"\n Get vernacular names from species id\n :param species_id: taxonomy id\n :return: array of vernacular name\n \"\"\"\n api_url = 'http://api.gbif.org/v1/species/%s/vernacularNames' % (\n str(species_id)\n )\n try:\n response = requests.get(api_url)\n json_result = response.json()\n return json_result\n except (HTTPError, KeyError) as e:\n print(e)\n return None\n\n\ndef get_children(key):\n \"\"\"\n Lists all direct child usages for a name usage\n :return: list of species\n \"\"\"\n api_url = 'http://api.gbif.org/v1/species/{key}/children'.format(\n key=key\n )\n try:\n response = requests.get(api_url)\n json_response = response.json()\n if json_response['results']:\n return json_response['results']\n return None\n except (HTTPError, KeyError) as e:\n print(e)\n return None\n\n\ndef find_species(original_species_name, rank=None):\n \"\"\"\n Find species from gbif with lookup query.\n :param original_species_name: the name of species we want to find\n :param rank: taxonomy rank\n :return: List of species\n \"\"\"\n print('Find species : %s' % original_species_name)\n try:\n response = species.name_lookup(\n q=original_species_name,\n limit=10,\n rank=rank\n )\n if 'results' in response:\n results = response['results']\n for result in results:\n rank = result.get('rank', '')\n rank_key = rank.lower() + 'Key'\n key_found = (\n 'nubKey' in result or rank_key in result)\n if key_found and 'taxonomicStatus' in result:\n if result['taxonomicStatus'] == 'ACCEPTED' or \\\n result['taxonomicStatus'] == 'SYNONYM':\n return result\n except HTTPError:\n print('Species not found')\n\n return None\n\n\ndef search_exact_match(species_name):\n \"\"\"\n Search species detail\n :param species_name: species name\n :return: species detail if found\n \"\"\"\n api_url = 'http://api.gbif.org/v1/species/match?name=' + str(species_name)\n try:\n response = requests.get(api_url)\n json_result = response.json()\n if json_result and 'usageKey' in json_result:\n key = json_result['usageKey']\n return key\n return None\n except (HTTPError, KeyError) as e:\n print(e)\n return None\n\n\ndef update_collection_record(collection):\n \"\"\"\n Update taxon for a collection.\n :param collection: Biological collection record model\n \"\"\"\n\n taxonomy = Taxonomy.objects.filter(\n scientific_name__contains=collection.original_species_name\n )\n if taxonomy:\n print('%s exists in Taxonomy' % collection.original_species_name)\n collection.taxonomy = taxonomy[0]\n collection.save()\n return\n\n result = find_species(collection.original_species_name)\n\n if not result:\n return\n\n if 'nubKey' in result:\n taxon_key = result['nubKey']\n elif 'speciesKey' in result:\n taxon_key = result['speciesKey']\n else:\n return\n\n taxonomy = process_taxon_identifier(taxon_key)\n collection.taxonomy = taxonomy\n collection.save()\n\n\ndef update_taxonomy_fields(taxon, response):\n \"\"\"Helper to update taxonomy field of taxon from a response dictionary.\n\n :param taxon: The Taxon object.\n :type taxon: Taxon\n\n :param response: A dictionary contains of Taxonomy value.\n :type response: dict\n \"\"\"\n # Iterate through all fields and update the one which is a\n # field from Taxonomy\n taxon_fields = Taxon._meta.get_fields()\n for field in taxon_fields:\n if isinstance(field, TaxonomyField):\n if field.taxonomy_key in response:\n setattr(\n taxon,\n field.get_attname(),\n response[field.taxonomy_key])\n continue\n\n # Set vernacular names\n try:\n if field.get_attname() == 'vernacular_names':\n vernacular_names = []\n for vernacular_name in response['vernacularNames']:\n if 'vernacularName' in vernacular_name:\n vernacular_names.append(\n vernacular_name['vernacularName']\n )\n taxon.vernacular_names = vernacular_names\n except (AttributeError, KeyError) as e:\n print(e)\n continue\n\n taxon.save()\n\n\ndef process_taxon_identifier(key, fetch_parent=True):\n \"\"\"\n Get taxon detail\n :param key: gbif key\n :param fetch_parent: whether need to fetch parent, default to True\n :return:\n \"\"\"\n # Get taxon\n print('Get taxon identifier for key : %s' % key)\n\n try:\n taxon_identifier = Taxonomy.objects.get(\n gbif_key=key,\n scientific_name__isnull=False\n )\n if taxon_identifier.parent or taxon_identifier.rank == 'KINGDOM':\n return taxon_identifier\n except Taxonomy.DoesNotExist:\n pass\n\n detail = get_species(key)\n taxon_identifier = None\n\n try:\n print('Found detail for %s' % detail['scientificName'])\n taxon_identifier, status = Taxonomy.objects.get_or_create(\n gbif_key=detail['key'],\n scientific_name=detail['scientificName'],\n canonical_name=detail['canonicalName'],\n taxonomic_status=TaxonomicStatus[\n detail['taxonomicStatus']].name,\n rank=TaxonomicRank[\n detail['rank']].name,\n )\n # Get vernacular names\n vernacular_names = get_vernacular_names(detail['key'])\n if vernacular_names:\n print('Found %s vernacular names' % len(\n vernacular_names['results']))\n for result in vernacular_names['results']:\n fields = {}\n if 'source' in result:\n fields['source'] = result['source']\n if 'language' in result:\n fields['language'] = result['language']\n if 'taxonKey' in result:\n fields['taxon_key'] = int(result['taxonKey'])\n vernacular_name, status = VernacularName.objects.get_or_create(\n name=result['vernacularName'],\n **fields\n )\n taxon_identifier.vernacular_names.add(vernacular_name)\n taxon_identifier.save()\n\n if 'parentKey' in detail and fetch_parent:\n print('Found parent')\n taxon_identifier.parent = process_taxon_identifier(\n detail['parentKey']\n )\n taxon_identifier.save()\n except (KeyError, TypeError) as e:\n print(e)\n pass\n\n return taxon_identifier\n\n\ndef search_taxon_identifier(search_query, fetch_parent=True):\n \"\"\"\n Search from gbif api, then create taxon identifier\n :param search_query: string query\n :param fetch_parent: whether need to fetch parent, default to True\n :return:\n \"\"\"\n print('Search for %s' % search_query)\n species_detail = None\n key = search_exact_match(search_query)\n\n if not key:\n species_detail = find_species(search_query)\n rank = species_detail.get('rank', '')\n rank_key = rank.lower() + 'Key'\n\n if rank_key in species_detail:\n key = species_detail[rank_key]\n elif 'nubKey' in species_detail:\n key = species_detail['nubKey']\n\n if key:\n species_detail = process_taxon_identifier(key, fetch_parent)\n\n return species_detail\n","sub_path":"bims/utils/gbif.py","file_name":"gbif.py","file_ext":"py","file_size_in_byte":8987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"559332620","text":"import snap\nf = open ('musae_git_edges.csv','r')\nlinea = f.readline()\nG = snap.TNGraph.New()\nfor x in range(0,50000):\n G.AddNode(x)\nind=[]\nfor x in range(0,50000):\n ind.append(0)\n \nwhile ( 1 ):\n linea = f.readline()\n if linea == \"\": break\n x,y =map(int,linea.split(\",\"))\n print(x,\" \",y)\n G.AddEdge(x,y)\n ind[x]+=1\n\ndef BCF( x ):\n n = G.GetNodes()\n v = []\n for j in range(0,n):\n v.append(0)\n val = 100\n for j in range( 0, n):\n if G.IsEdge(j+1,x) and (ind[j+1]>1):\n G.DelEdge(j+1,x)\n PRankH2 = snap.TIntFltH()\n snap.GetPageRank(G, PRankH2)\n v[j]=PRankH2[x]\n val = min( val,v[j])\n G.AddEdge(j+1,x)\n else:\n v[j]=100\n for j in range(0,n):\n if abs(val-v[j])<1e-8:\n return j+1\n \nprint(BCF(100))\n\n\n","sub_path":"better friend/bf.py","file_name":"bf.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"530078440","text":"from kubernetes.client import CustomObjectsApi, ApiClient, CoreV1Api, Configuration, BatchV1Api,V1PodStatus\nfrom log import logger\nimport collections\nfrom utils import parse_resource, ONE_GIBI, ONE_MEBI\nfrom configparser import ConfigParser\nimport urllib3\nimport datetime\nurllib3.disable_warnings()\n\npod_metric_fields = [\n 'ns',\n 'pod',\n 'status',\n 'cpu',\n 'cpu_requests',\n 'cpu_limits',\n 'memory',\n 'memory_requests',\n 'memory_limits',\n]\nPodMetric = collections.namedtuple('PodMetric', pod_metric_fields)\nnode_metric_fields = [\n 'node', 'cpu', 'memory'\n]\nNodeMetric = collections.namedtuple('NodeMetric', node_metric_fields)\n\n\nclass K8sClient:\n def __init__(self):\n config = ConfigParser()\n config.read(\"config.ini\")\n self.token = config.get(\"kubernetes\", \"token\")\n self.host = \"https://{}:6443\".format(config.get(\"kubernetes\", \"api_host\"))\n self.configuration = Configuration()\n self.configuration.api_key['authorization'] = self.token\n self.configuration.api_key_prefix['authorization'] = 'Bearer'\n self.configuration.host = self.host\n self.configuration.verify_ssl = False\n self.api_client = ApiClient(self.configuration)\n self.api_client.configuration.debug = True\n self.node_list = [i.strip() for i in config.get(\"kubernetes\", \"node\").split(\",\")]\n\n def get_metric(self):\n api_instance = CoreV1Api(self.api_client)\n pods = api_instance.list_pod_for_all_namespaces().items\n node_usages = [self.top_node(node) for node in self.node_list]\n pods_usages = sorted([self.top_pod(pod) for pod in pods], key=lambda x: x.memory, reverse=True)\n return {\"nodes\": node_usages, \"pods\": pods_usages}\n\n @logger.catch\n def top_node(self, node):\n custom = CustomObjectsApi(self.api_client)\n data = custom.get_cluster_custom_object(\"metrics.k8s.io\", \"v1beta1\", \"nodes\", node)\n node = data['metadata']['name']\n cpu = parse_resource(data['usage']['cpu'])\n memory = parse_resource(data['usage']['memory'])\n return NodeMetric(node=node, cpu=cpu, memory=memory / ONE_GIBI)\n\n @logger.catch\n def top_pods(self):\n custom = CustomObjectsApi(self.api_client)\n data = custom.list_cluster_custom_object(\"metrics.k8s.io\", \"v1beta1\", \"pods\")\n usage_by_pod = collections.defaultdict(list)\n for pod_data in data['items']:\n pod_name = pod_data['metadata']['name']\n for container_data in pod_data['containers']:\n usage_by_pod[pod_name].append(\n {\n 'pod': container_data['name'],\n 'cpu': parse_resource(container_data['usage']['cpu']),\n 'memory': parse_resource(container_data['usage']['memory']) / ONE_MEBI,\n }\n )\n return usage_by_pod\n\n @staticmethod\n def aggregate_container_resource(pod):\n values = {\n 'memory_limits': 0,\n 'cpu_limits': 0,\n 'memory_requests': 0,\n 'cpu_requests': 0,\n }\n for container in pod.spec.containers:\n limits = getattr(container.resources, 'limits', None)\n if limits:\n values['memory_limits'] += round(parse_resource(limits.get('memory')) / ONE_GIBI, 1)\n values['cpu_limits'] += parse_resource(limits.get('cpu'))\n requests = getattr(container.resources, 'requests', None)\n if requests:\n values['memory_requests'] += round(parse_resource(requests.get('memory')) / ONE_GIBI, 1)\n values['cpu_requests'] += parse_resource(requests.get('cpu'))\n return values\n\n @logger.catch\n def top_pod(self, pod):\n ns = pod.metadata.namespace\n status = pod.status.phase\n data = self.top_pods().get(pod.metadata.name) or []\n cpu = round(sum(pod_data['cpu'] for pod_data in data), 3)\n memory = round(sum(pod_data['memory'] for pod_data in data))\n return PodMetric(ns=ns, pod=pod.metadata.name, status=status, cpu=cpu, memory=memory,\n **self.aggregate_container_resource(pod))\n\n def get_job(self):\n api_instance = BatchV1Api(self.api_client)\n jobs = api_instance.list_job_for_all_namespaces()\n jobs_status = []\n for i in jobs.items:\n name = i.metadata.name\n ns = i.metadata.namespace\n start = i.status.start_time\n if i.status.succeeded == 1:\n status = \"success\"\n elif i.status.failed == 1:\n status = \"failed\"\n else:\n status = \"active\"\n jobs_status.append({\"ns\": ns, \"name\": name, \"start\": start, \"status\": status})\n return {\"desc\": \"jobs\", \"result\": jobs_status}\n\n def get_core(self):\n api_instance = CoreV1Api(self.api_client)\n component = api_instance.list_component_status()\n component_list = []\n for i in component.items:\n status = \"Ready\" if i.conditions[0].status else \"NotReady\"\n component_list.append({\"status\": status, \"name\": i.metadata.name})\n return {\"desc\": \"component\", \"result\": component_list}\n\n def get_readiness(self):\n api_instance = CoreV1Api(self.api_client)\n node = api_instance.list_pod_for_all_namespaces()\n for i in node.items:\n for x in i.spec.containers:\n if x.readiness_probe is not None:\n print(i.metadata.name, x.readiness_probe.http_get)\n\n def get_node(self):\n api_instance = CoreV1Api(self.api_client)\n nodes = api_instance.list_node()\n result = []\n for i in nodes.items:\n node = dict()\n node['name'] = i.metadata.name\n for s in i.status.conditions:\n if s.type == \"Ready\":\n node['status'] = \"Ready\" if s.status else \"NotReady\"\n node['kernel'] = i.status.node_info.kernel_version\n node['container_runtime'] = i.status.node_info.container_runtime_version\n node['cpu'] = i.status.capacity['cpu']\n node['memory'] = round(parse_resource(i.status.capacity['memory']) / ONE_GIBI)\n result.append(node)\n return {\"desc\": \"node\", \"result\": result}\n\n def get_pod(self):\n api_instance = CoreV1Api(self.api_client)\n pods = api_instance.list_pod_for_all_namespaces()\n result = []\n for i in pods.items:\n pod = dict()\n pod['name'] = i.metadata.name\n pod['ns'] = i.metadata.namespace\n pod['status'] = i.status.phase\n if i.status.container_statuses is not None:\n pod['restart'] = max([x.restart_count for x in i.status.container_statuses])\n else:\n pod['restart'] = None\n pod['start_time'] = i.status.start_time\n pod['ip'] = i.status.pod_ip\n pod['host'] = i.status.host_ip\n result.append(pod)\n return {\"desc\": \"pod\", \"result\": result}\n\n\n\n\n\n","sub_path":"k8s.py","file_name":"k8s.py","file_ext":"py","file_size_in_byte":7108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"5533761","text":"import asyncio\n\nfrom typing import Any, Dict, List, Tuple\n\nclass EditAtom:\n '''\n A simple utility class to track all the changes for adding a node or setting a property before committing them all\n at once.\n '''\n def __init__(self, allbldgbuids):\n '''\n Args:\n allbldgbuids (Dict[bytes, Node]): a dict that should be shared among all instances of this class for a\n particular cortex.\n '''\n self.mybldgbuids = {} # buid -> node\n self.otherbldgbuids = set()\n self.doneevent = asyncio.Event()\n self.sops: List[Tuple[str, Tuple[bytes, str, str, Dict[str, Any]]]] = []\n self.allbldgbuids = allbldgbuids # buid -> (Node, Event)\n self.notified = False\n self.npvs = [] # List of tuple(Node, prop, val)\n\n def __enter__(self):\n '''\n Implement the context manager convention\n '''\n return self\n\n def getNodeBeingMade(self, buid):\n '''\n Return a node if it is currently being made, mark as a dependency, else None if none found\n '''\n nodeevnt = self.allbldgbuids.get(buid)\n if nodeevnt is None:\n return None\n if buid not in self.mybldgbuids:\n self.otherbldgbuids.add(buid)\n return nodeevnt[0]\n\n def addNode(self, node):\n '''\n Update the shared map with my in-construction node\n '''\n self.mybldgbuids[node.buid] = node\n self.allbldgbuids[node.buid] = (node, self.doneevent)\n\n async def rendevous(self):\n '''\n Wait until all my adjacent editatoms are also at this point\n '''\n self._notifyDone()\n await self._wait()\n\n def _notifyDone(self):\n '''\n Allow any other editatoms waiting on me to complete to resume\n '''\n if self.notified:\n return\n\n self.doneevent.set()\n\n for buid in self.mybldgbuids:\n del self.allbldgbuids[buid]\n\n self.notified = True\n\n async def _wait(self):\n '''\n Wait on the other editatoms who are constructing nodes my new nodes refer to\n '''\n for buid in self.otherbldgbuids:\n nodeevnt = self.allbldgbuids.get(buid)\n if nodeevnt is None:\n continue\n await nodeevnt[1].wait()\n\n def __exit__(self, exc, cls, tb):\n '''\n Regardless of success, wake up any waiters and clean myself up from shared dict\n '''\n self._notifyDone()\n\n async def commit(self, snap):\n '''\n Push the recorded changes to disk, notify all the listeners\n '''\n if not self.npvs: # nothing to do\n return\n\n for node, prop, _, valu in self.npvs:\n node.props[prop.name] = valu\n node.proplayr[prop.name] = snap.wlyr\n\n splices = [snap.splice('node:add', ndef=node.ndef) for node in self.mybldgbuids.values()]\n for node, prop, oldv, valu in self.npvs:\n info = {'ndef': node.ndef, 'prop': prop.name, 'valu': valu}\n if oldv is not None:\n info['oldv'] = oldv\n splices.append(snap.splice('prop:set', **info))\n\n await snap.stor(self.sops, splices)\n\n for node in self.mybldgbuids.values():\n snap.core.pokeFormCount(node.form.name, 1)\n snap.buidcache.append(node)\n snap.livenodes[node.buid] = node\n\n await self.rendevous()\n\n for node in self.mybldgbuids.values():\n await node.form.wasAdded(node)\n\n # fire all his prop sets\n for node, prop, oldv, valu in self.npvs:\n await prop.wasSet(node, oldv)\n\n if prop.univ:\n univ = snap.model.prop(prop.univ)\n await univ.wasSet(node, oldv)\n\n # Finally, fire all the triggers\n for node, prop, oldv, _ in self.npvs:\n await snap.core.triggers.runPropSet(node, prop, oldv)\n","sub_path":"synapse/lib/editatom.py","file_name":"editatom.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"350803905","text":"#\n# Hello World client in Python\n# Connects REQ socket to tcp://localhost:5555\n# Sends \"Hello\" to server, expects \"World\" back\n#\n\nimport zmq\nimport winsound\nimport time\n\ndef main():\n \"\"\" main method \"\"\"\n \n # Prepare our context and publisher\n context = zmq.Context()\n subscriber = context.socket(zmq.SUB)\n subscriber.connect(\"tcp://127.0.0.1:50004\")\n subscriber.setsockopt(zmq.SUBSCRIBE, '')\n\n while True:\n # Read envelope with address\n message = subscriber.recv_string()\n print(message)\n comand,filename,currentTime,time,x,y,z = message.split(';')\n print(filename)\n winsound.PlaySound(filename, winsound.SND_FILENAME)\n \n\n\n # We never get here but clean up anyhow\n subscriber.close()\n context.term()\n\nif __name__ == \"__main__\":\n main()","sub_path":"pythonSoundClient.py","file_name":"pythonSoundClient.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"98041606","text":"from log import add_log\nfrom get_connection import connect\n\n\ndef id_question_whith_answer():\n conn = connect()\n c = conn.cursor()\n log = add_log()\n\n log.info('Sprawdzenie listy id pytań z odpowiedziami')\n\n query_one = \"\"\"\n SELECT id_question FROM \"answers\" GROUP BY id_question;\n \"\"\"\n c.execute(query_one)\n list_of_id_question = c.fetchall()\n # print(list_of_id_question)\n\n return list_of_id_question\n\n\ndef answers_of_question(id_question):\n conn = connect()\n c = conn.cursor()\n log = add_log()\n\n query_two = \"\"\"\n SELECT id_question, answer, question FROM \"answers\" WHERE id_question = ?;\"\"\"\n # INNER JOIN \"questions\" ON answers.id_question = questions.id WHERE id_question = ?;\"\"\"\n\n c.execute(query_two, (id_question,))\n answers_in_data = c.fetchall()\n # print(answers_in_data)\n\n log.info(f'Pobranie odpowiedzi do pytania: {id_question}')\n\n return answers_in_data\n\n\ndef count_answers(answers_y_or_no):\n log = add_log()\n answer_yes, answer_no = 0, 0\n for id_question, answer, question in answers_y_or_no:\n if answer == 'T':\n answer_yes += 1\n if answer == 'N':\n answer_no += 1\n question = question\n every_answer = answer_yes + answer_no\n result_y_n = {'id_question': id_question, 'question': question, 'answer_yes': answer_yes,\n 'answer_no': answer_no, 'every_answer': every_answer}\n # print(result_y_n)\n\n log.info(f'Policzenie odpowiedzi dla {result_y_n}')\n\n return result_y_n\n\n\ndef percentage_share(i):\n log = add_log()\n sum_of_question = i['answer_yes'] + i['answer_no']\n answer_yes = (i['answer_yes'] / sum_of_question) * 100\n answer_yes = f'{answer_yes:.2f} %'\n answer_yes = answer_yes.replace('.', ',')\n id_question = i['id_question']\n answer_no = (i['answer_no'] / sum_of_question) * 100\n answer_no = f'{answer_no:.2f} %'\n answer_no = answer_no.replace('.', ',')\n question = i['question']\n every_answer = i['every_answer']\n result_percent = {'id_pytania': id_question, 'question': question, 'answer_yes': answer_yes,\n 'answer_no': answer_no, 'every_answer': every_answer}\n\n log.info(f'Dokonuję obliczeń procentowych: {result_percent}')\n\n return result_percent\n\n\ndef verify_number_of_every_questions():\n conn = connect()\n c = conn.cursor()\n log = add_log()\n query = \"\"\"\n SELECT id, question FROM \"questions\";\n \"\"\"\n c.execute(query)\n list_of_id = c.fetchall()\n\n log.info('Sprawdznie ilości wszystkich pytań')\n\n return list_of_id\n\n\ndef verify_questions_without_answer(list_of_answers=id_question_whith_answer(),\n list_of_every_questions=verify_number_of_every_questions()):\n log = add_log()\n log.warning('sprawdzenie pytań bez odpowiedzi')\n\n for i in list_of_answers:\n # print(i)\n # print(list_of_answers)\n for element in list_of_every_questions:\n # print(element)\n if i[0] == element[0]:\n list_of_every_questions.remove(element)\n # print('lista do dodania: ', list_of_every_questions)\n\n return list_of_every_questions\n\n\ndef add_to_results_questions_without_answer(list_without_answers=verify_questions_without_answer()):\n results = []\n log = add_log()\n for question in list_without_answers:\n id_question = question[0]\n question = question[1]\n result_no_answer = {'id_question': id_question, 'question': question, 'answer_yes': '0,00 %',\n 'answer_no': '0,00 %'}\n results.append(result_no_answer)\n\n log.warning(f'Dodanie do wyników pytania bez odpowiedzi: {result_no_answer}')\n # print(results)\n return results\n\n\ndef prepare_data_with_every_answers():\n id_question_whith_answer()\n\n group_by_list_of_answers = []\n for id in id_question_whith_answer():\n id = id[0]\n answers = answers_of_question(id)\n result = count_answers(answers)\n group_by_list_of_answers.append(result)\n\n results_in_prep = []\n for i in group_by_list_of_answers:\n result = percentage_share(i)\n results_in_prep.append(result)\n\n verify_questions_without_answer()\n\n no_answers_results = add_to_results_questions_without_answer()\n for element in no_answers_results:\n results_in_prep.append(element)\n # print('add: ', results)\n return results_in_prep\n","sub_path":"Python - advanced/The_Form_Project/definitions_of_results.py","file_name":"definitions_of_results.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"624421930","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_POST\nfrom django.utils.translation import ugettext_lazy as _, ungettext\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.contrib.sitemaps import Sitemap\nfrom django.template import TemplateDoesNotExist\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom haystack.query import SearchQuerySet\n\nfrom froide.foirequest.models import FoiRequest\nfrom froide.helper.utils import render_400, render_403\nfrom froide.helper.cache import cache_anonymous_page\n\nfrom .models import (PublicBody,\n PublicBodyTag, FoiLaw, Jurisdiction)\nfrom .csv_import import CSVImporter\n\n\ndef index(request, jurisdiction=None, topic=None):\n if jurisdiction is not None:\n jurisdiction = get_object_or_404(Jurisdiction, slug=jurisdiction)\n\n if topic is not None:\n topic = get_object_or_404(PublicBodyTag, slug=topic)\n\n query = request.GET.get('q', '')\n if query:\n publicbodies = SearchQuerySet().models(PublicBody).auto_query(query)\n else:\n publicbodies = PublicBody.objects.get_list()\n\n if topic:\n publicbodies = publicbodies.filter(tags=topic.name if query else topic)\n if jurisdiction:\n publicbodies = publicbodies.filter(\n jurisdiction=jurisdiction.name if query else jurisdiction)\n\n page = request.GET.get('page')\n paginator = Paginator(publicbodies, 50)\n try:\n publicbodies = paginator.page(page)\n except PageNotAnInteger:\n publicbodies = paginator.page(1)\n except EmptyPage:\n publicbodies = paginator.page(paginator.num_pages)\n\n return render(request, 'publicbody/list.html', {\n 'object_list': publicbodies,\n 'jurisdictions': Jurisdiction.objects.get_list(),\n 'jurisdiction': jurisdiction,\n 'topic': topic,\n 'topics': PublicBodyTag.objects.get_topic_list(),\n 'query': query,\n })\n\n\n@cache_anonymous_page(15 * 60)\ndef show_jurisdiction(request, slug):\n jurisdiction = get_object_or_404(Jurisdiction, slug=slug)\n context = {\n \"object\": jurisdiction,\n \"pb_count\": PublicBody.objects.filter(jurisdiction=jurisdiction).count(),\n \"laws\": FoiLaw.objects.filter(meta=False,\n jurisdiction=jurisdiction).order_by('priority'),\n \"foirequests\": FoiRequest.published.filter(jurisdiction=jurisdiction)[:5]\n }\n try:\n return render(request,\n 'publicbody/jurisdiction/%s.html' % jurisdiction.slug, context)\n except TemplateDoesNotExist:\n return render(request,\n 'publicbody/jurisdiction.html', context)\n\n\ndef show_foilaw(request, slug):\n law = get_object_or_404(FoiLaw, slug=slug)\n context = {\"object\": law}\n return render(request, 'publicbody/show_foilaw.html', context)\n\n\ndef show_publicbody(request, slug):\n obj = get_object_or_404(PublicBody, slug=slug)\n context = {\n 'object': obj,\n 'foirequests': FoiRequest.published.filter(\n public_body=obj).order_by('-last_message')[:10],\n 'resolutions': FoiRequest.published.get_resolution_count_by_public_body(obj),\n 'foirequest_count': FoiRequest.published.filter(public_body=obj).count()\n }\n return render(request, 'publicbody/show.html', context)\n\n\n@require_POST\ndef confirm(request):\n if not request.user.is_authenticated:\n return render_403(request)\n if not request.user.is_staff and not request.user.is_superuser:\n return render_403(request)\n try:\n pb = get_object_or_404(PublicBody, pk=int(request.POST.get('public_body', '')))\n except ValueError:\n return render_400(request)\n result = pb.confirm()\n if result is None:\n messages.add_message(request, messages.ERROR,\n _('This request was already confirmed.'))\n else:\n messages.add_message(request, messages.ERROR,\n ungettext('%(count)d message was sent.',\n '%(count)d messages were sent', result\n ) % {\"count\": result})\n return redirect('admin:publicbody_publicbody_change', pb.id)\n\n\n@require_POST\ndef import_csv(request):\n if not request.user.is_authenticated:\n return render_403(request)\n if not request.user.is_staff and not request.user.is_superuser:\n return render_403(request)\n if not request.method == 'POST':\n return render_403(request)\n importer = CSVImporter()\n url = request.POST.get('url')\n try:\n if not url:\n raise ValueError(_('You need to provide a url.'))\n importer.import_from_url(url)\n except Exception as e:\n messages.add_message(request, messages.ERROR, str(e))\n else:\n messages.add_message(request, messages.SUCCESS,\n _('Public Bodies were imported.'))\n return redirect('admin:publicbody_publicbody_changelist')\n\n\nSITEMAP_PROTOCOL = 'https' if settings.SITE_URL.startswith('https') else 'http'\n\n\nclass PublicBodySitemap(Sitemap):\n protocol = SITEMAP_PROTOCOL\n changefreq = \"monthly\"\n priority = 0.6\n\n def items(self):\n return PublicBody.objects.all()\n\n def lastmod(self, obj):\n return obj.updated_at\n\n\nclass JurisdictionSitemap(Sitemap):\n protocol = SITEMAP_PROTOCOL\n changefreq = \"yearly\"\n priority = 0.8\n\n def items(self):\n return Jurisdiction.objects.all()\n\n\nclass FoiLawSitemap(Sitemap):\n protocol = SITEMAP_PROTOCOL\n changefreq = \"yearly\"\n priority = 0.3\n\n def items(self):\n return FoiLaw.objects.all()\n\n def lastmod(self, obj):\n return obj.updated\n","sub_path":"froide/publicbody/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"83264907","text":"# CS4102 Fall 2019 -- Homework 8\r\n#################################\r\n# Collaboration Policy: You are encouraged to collaborate with up to 4 other\r\n# students, but all work submitted must be your own independently written\r\n# solution. List the computing ids of all of your collaborators in the comment\r\n# at the top of your java or python file. Do not seek published or online\r\n# solutions for any assignments. If you use any published or online resources\r\n# (which may not include solutions) when completing this assignment, be sure to\r\n# cite them. Do not submit a solution that you are unable to explain orally to a\r\n# member of the course staff.\r\n#################################\r\n# Your Computing ID: er6qt\r\n# Collaborators: zh2yn, zz9ek\r\n# Sources: Introduction to Algorithms, Cormen\r\n#################################\r\nfrom collections import defaultdict\r\n\r\nclass Marriage:\r\n lukePath = []\r\n lorelaiPath = []\r\n\r\n def __init__(self):\r\n return\r\n\r\n def getLukePath(self):\r\n return self.lukePath\r\n\r\n def getLorelaiPath(self):\r\n return self.lorelaiPath\r\n\r\n # This is the method that should set off the computation\r\n # of marriage. It takes as input a list lines of input\r\n # as strings. You should parse that input and then compute\r\n # the shortest paths that both Luke and Lorelai should take.\r\n # The class fields of lukePath and lorelaiPath should be filled\r\n # with their respective paths. The getters above will be called\r\n # by the grader script.\r\n #\r\n # @return the length of the shortest paths (in rooms)\r\n def compute(self, file_data):\r\n # lukePath = []\r\n # lorelaiPath = []\r\n t_nodes = file_data[0] # total nodes\r\n begin_luke, end_luke = file_data[1].split() #get first and last node of Luke from file (1st list)\r\n begin_lorelai, end_lorelai = file_data[2].split() #get first and last node of Lorelai from file (2nd list)\r\n\r\n if (begin_luke == end_luke) and (begin_lorelai == end_lorelai): # check if beginning and end nodes are the same for both people\r\n done = True\r\n self.lukePath.append(end_luke)\r\n print(len(self.lukePath))\r\n print(\"[\", end_luke, \"]\")\r\n print(\"[\", end_lorelai, \"]\")\r\n\r\n # Add starting nodes to their respective paths\r\n self.lukePath.append(int(begin_luke))\r\n self.lorelaiPath.append(int(begin_lorelai))\r\n self.lukePath.append(int(end_luke))\r\n self.lorelaiPath.append(int(end_lorelai))\r\n\r\n #create an Object Graph1 with a list that holds the edges in the graph\r\n g = Graph1()\r\n edges = []\r\n\r\n #gets all adjacency lists(adjacent nodes) given in the input file\r\n for i in file_data[3:]:\r\n edges.append(i.strip('\\n'))\r\n\r\n # form adjacency list\r\n adj_list = []\r\n for i in edges:\r\n if (i != \" \"):\r\n adj_list.append(i.split(\" \"))\r\n\r\n # go through number of vertices\r\n for vertex in range(0, int(t_nodes)):\r\n #each line\r\n for node in adj_list[vertex]:\r\n g.addEdge(vertex, int(node))\r\n g.addEdge(vertex, vertex)\r\n\r\n # call breath first search on graph to get respective paths\r\n self.lukePath, self.lorelaiPath = g.bfs(self.lukePath[0], self.lukePath[1], self.lorelaiPath[0], self.lorelaiPath[1]) #Change\r\n return len(self.lukePath) # getting \"None\" output, so just replacing it with a blank space\r\n\r\nclass Graph1:\r\n def __init__(self): # graph containing list\r\n self.graph = defaultdict(list)\r\n\r\n def addEdge(self, frm, to): #graph can append nodes/edges\r\n self.graph[frm].append(to)\r\n\r\n def printList(self): #printing graph\r\n return dict.__repr__(self.graph)\r\n\r\n def adjacent(self, frm, to): # checking to see if graph has an edge\r\n if to in self.graph[frm]:\r\n return True\r\n else:\r\n return False\r\n\r\n # main function where bfs algorithm is implemented to get shortest paths\r\n def bfs(self, stLuke, enLuke, stLore, enLore):\r\n Luke_Queue = [[stLuke]]\r\n Lorelai_Queue = [[stLore]]\r\n\r\n LukePath = list()\r\n LorelaiPath = list()\r\n\r\n done = False #set to false to check if we have gotten to our ending nodes\r\n\r\n # if (stLuke == enLuke) and (stLore == enLore):\r\n # done = True\r\n # LukePath.append(enLuke)\r\n # print(len(LukePath))\r\n # print(\"[\", enLuke, \"]\")\r\n # print(\"[\", enLore, \"]\")\r\n\r\n while not done: # as long as we have not reached end nodes\r\n while Luke_Queue: # while queue for Luke is not empty\r\n Luke_st = Luke_Queue.pop(0) #add first node to path by popping it off\r\n nodeL = Luke_st[-1]\r\n adj_nodes = self.graph[nodeL] #get the neighboring(adjacent) nodes\r\n\r\n for adj in adj_nodes: #add all the adjacent nodes to a new list\r\n path = list(Luke_st)\r\n path.append(adj)\r\n Luke_Queue.append(path)\r\n if adj == enLuke:\r\n current_val = True\r\n LukePath.append(path)\r\n break\r\n\r\n while Lorelai_Queue: # while queue for Lorelai is not empty, do relatively same thing as done for Luke \r\n Lorelai_st = Lorelai_Queue.pop(0)\r\n nodeLo = Lorelai_st[-1]\r\n adj_nodes_Lorel = self.graph[nodeLo]\r\n\r\n for adj in adj_nodes_Lorel:\r\n path = list(Lorelai_st)\r\n path.append(adj)\r\n Lorelai_Queue.append(path)\r\n if adj == enLore:\r\n current_val = True\r\n LorelaiPath.append(path)\r\n break\r\n\r\n #conditions that check/ensure that neither is in the other’s line-of-sight\r\n\r\n if len(LukePath) > 0 and len(LorelaiPath) > 0 and current_val:\r\n current_val = False\r\n\r\n for Luke_vx in LukePath: # for each vertex in Luke's path\r\n for Lorelai_vx in LorelaiPath: # and for each vertex in Lorelai's path\r\n if len(Luke_vx) == len(Lorelai_vx): # check if they are the same\r\n shortest_path = True # True means we have found the shortest length path\r\n for node in range(0, len(Luke_vx)):\r\n if (self.adjacent(Luke_vx[node], Lorelai_vx[node])):\r\n shortest_path = False\r\n break\r\n elif (Luke_vx[node] == Lorelai_vx[node]):\r\n shortest_path = False\r\n break\r\n\r\n if shortest_path: # if shortest path is found, done is changed to True, and the function is completed\r\n done = True\r\n (len(Luke_vx))\r\n return([*Luke_vx], [*Lorelai_vx]) #Change\r\n\r\n\r\n # if there is a case where Luke's path is longer than Lorelai's, append last node in Lorelai's\r\n # path to have both paths be the same length\r\n elif len(Luke_vx) > len(Lorelai_vx):\r\n LorelaiPath.pop(LorelaiPath.index(Lorelai_vx))\r\n break\r\n\r\n # if there is a case where Lorelai's path is longer than Luke's, append last node in Luke's\r\n # path to have both paths be the same length\r\n elif len(Luke_vx) < len(Lorelai_vx):\r\n LukePath.pop(LukePath.index(Luke_vx))\r\n break","sub_path":"marriage.py","file_name":"marriage.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"233934678","text":"\"\"\"Setup file for packaging swigibpy\"\"\"\n\nimport os\nfrom distutils.command.build_ext import build_ext\nfrom distutils.core import setup, Extension\nfrom distutils.util import get_platform\n\n###\n\nIB_DIR = 'IB_967'\nVERSION = '0.4'\n\nlibraries = []\nif(get_platform().startswith('win')):\n libraries.append('ws2_32')\n\nib_module = Extension('_swigibpy',\n sources=[IB_DIR +\n '/PosixSocketClient/EClientSocketBase.cpp',\n IB_DIR +\n '/PosixSocketClient/EPosixClientSocket.cpp',\n IB_DIR + '/swig_wrap.cpp'],\n include_dirs=[IB_DIR,\n IB_DIR + '/PosixSocketClient',\n IB_DIR + '/Shared'],\n define_macros=[('IB_USE_STD_STRING', '1')],\n libraries=libraries\n )\n\n\nclass swigibpy_build_ext(build_ext):\n def build_extensions(self):\n compiler = self.compiler.compiler_type\n if compiler == 'msvc':\n extra = ('/D_CRT_SECURE_NO_DEPRECATE',\n '/EHsc', '/wd4355', '/wd4800')\n else:\n extra = ('-Wno-switch',)\n for ext in self.extensions:\n ext.extra_compile_args += extra\n build_ext.build_extensions(self)\n\n\nreadme = os.path.join(os.path.dirname(__file__), 'README.rst')\nsetup(version=VERSION,\n name='swigibpy',\n author=\"Kieran O'Mahony\",\n author_email=\"kieranom@gmail.com\",\n url=\"https://github.com/Komnomnomnom/swigibpy/\",\n license='New BSD License',\n description=\"\"\"Third party Python API for Interactive Brokers\"\"\",\n long_description=open(readme).read(),\n keywords=[\"interactive brokers\", \"tws\"],\n ext_modules=[ib_module],\n py_modules=[\"swigibpy\"],\n cmdclass={'build_ext': swigibpy_build_ext},\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Development Status :: 4 - Beta\",\n \"Environment :: Other Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Office/Business :: Financial\",\n ],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"652454704","text":"from django.db.models.signals import post_save\r\nfrom django.dispatch import receiver\r\nfrom django.core.files import File\r\nfrom .models import Papermail\r\n\r\nfrom django.conf import settings\r\n\r\nimport magic\r\nfrom wand.image import Image\r\nfrom os import remove\r\n\r\nfrom uuid import uuid4\r\n\r\nmedia_root = getattr(settings, 'MEDIA_ROOT')\r\n\r\n@receiver(post_save, sender=Papermail)\r\ndef generate_thumbnail(sender,instance, **kwargs):\r\n \"\"\"\r\n generate a thumbnail of the file to display in views\r\n only jpeg png or pdf is supported\r\n thumbnail name is generate with uuid module\r\n \"\"\"\r\n \r\n mime = magic.Magic(mime=True)\r\n type_fichier = mime.from_file(instance.paper_file.path)\r\n nom_thumbnail = media_root + uuid4().hex + '_thumb.jpeg'\r\n\r\n if type_fichier == 'image/png' or type_fichier == 'image/jpeg':\r\n\r\n with Image(filename=instance.paper_file.path) as img:\r\n with img.clone() as converted:\r\n converted.format = 'jpeg'\r\n converted.resize(300,400)\r\n converted.save(filename= nom_thumbnail)\r\n fich = File(open(nom_thumbnail,'rb'))\r\n post_save.disconnect(generate_thumbnail, sender=Papermail)\r\n instance.thumbnail.save(name = uuid4().hex + '_thumb.jpeg', content = fich)\r\n post_save.connect(generate_thumbnail, sender=Papermail)\r\n remove(nom_thumbnail)\r\n \r\n elif type_fichier == 'application/pdf':\r\n\r\n with Image(filename=instance.paper_file.path + '[0]') as img:\r\n with img.clone() as converted:\r\n converted.format = 'jpeg'\r\n converted.save(filename= nom_thumbnail)\r\n fich = File(open(nom_thumbnail,'rb'))\r\n post_save.disconnect(generate_thumbnail, sender=Papermail)\r\n instance.thumbnail.save(name = uuid4().hex + '_thumb.jpeg', content = fich)\r\n post_save.connect(generate_thumbnail, sender=Papermail)\r\n remove(nom_thumbnail)","sub_path":"paperworks/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"182061640","text":"# 打印图形\r\ntamp = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\r\nlst = []\r\ndic = {}\r\nfor i in range(1, 8):\r\n if i == 1:\r\n dic[i] = [7]\r\n else:\r\n temp = []\r\n for x in dic[i-1]:\r\n temp.append(x-1)\r\n temp.append(x+1)\r\n dic[i] = sorted(list(set(temp)))\r\nfor h in dic.keys():\r\n for j in dic[h]:\r\n tamp[j-1] = '*'\r\n lst.append(''.join(tamp))\r\nfor k in lst:\r\n print(k)\r\n","sub_path":"day2_7.py","file_name":"day2_7.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"446510549","text":"\"\"\" Bob (localhost) \"\"\"\nimport socket, pickle, random\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization, asymmetric\nfrom lib.MyCryptoLibrary import MyCryptoLibrary\n\n# Key generation\nbob_private_key = asymmetric.rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend())\n\n# Assuming that Alice has bob's PK, thus saving it as PEM format to Alice's PC.\nbob_key_pem = bob_private_key.public_key().public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo)\n\nwith open(\"PK_bob.pem\", \"wb\") as key:\n key.write(bob_key_pem)\n\n\ndef retrieve_alice_pk():\n with open(\"PK_alice.pem\", \"rb\") as pem_file:\n PK = serialization.load_pem_public_key(\n pem_file.read(),\n backend=default_backend())\n return PK\n\n\ndef decrypt_and_verify(data, PK):\n decrypted_message = MyCryptoLibrary.decrypt_message(data[0], bob_private_key)\n MyCryptoLibrary.verify_message(decrypted_message, data[1], PK)\n return decrypted_message\n\n\ndef send_encrypted_signed_message(msg, PK):\n cipher_text = MyCryptoLibrary.encrypt_message(msg, PK)\n signature_alice = MyCryptoLibrary.sign_message(msg, bob_private_key)\n data = (cipher_text, signature_alice)\n data_string = pickle.dumps(data)\n client_socket.send(data_string)\n\n\ndef compute_dice_throw(b, a):\n dice_throw = bin(int(b) ^ int(a))\n converted_dice_throw = (int(dice_throw, 2) % 6) + 1\n print(\"Bob computes throw to be \", converted_dice_throw)\n return converted_dice_throw\n\n\n# TCP socket with ipv4\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nhost = \"127.0.0.1\"; port = 6677\naddress = (host, port)\nserver.bind(address)\n\n# Handle connections\nserver.listen(2048)\nrunning = True\nprint(f\"[Server started at {host} on port {port}]\")\n\n# Creating the message to send\n\nwhile running:\n # Accept connection from client\n client_socket, address = server.accept()\n print(f\"Connection from {address} has been established...\")\n\n # Get prerequisites\n PK_alice = retrieve_alice_pk()\n\n print(\"********* Alice's dice throw *********\")\n\n # [b1] Message Com(a,r) received from alice\n received_data = pickle.loads(client_socket.recv(2048))\n print(\"Bob received Com(a,r) from Alice and tries to verify\")\n decrypted_hashed_c_from_alice = decrypt_and_verify(received_data, PK_alice)\n\n # [b2] Bob sends random bit b to Alice\n b1 = bytes(format(random.getrandbits(4), \"b\"), encoding=\"utf-8\")\n send_encrypted_signed_message(b1, PK_alice)\n\n # [b3] Receive second message (a,r) from Alice\n received_data2 = pickle.loads((client_socket.recv(2048)))\n print(\"Bob received (a,r) from Alice and tries to verify\")\n decrypted_a_r = decrypt_and_verify(received_data2, PK_alice)\n decoded_split_a_r = decrypted_a_r.decode(\"utf-8\").split(\",\")\n alice_a1 = decoded_split_a_r[0]\n opened_commitment = bytes(decoded_split_a_r[0] + decoded_split_a_r[1], \"utf-8\")\n\n # [b4] Bob is hashing a + r for checking and computing dice throw\n opened_commitment_hashed = MyCryptoLibrary.hash_message(opened_commitment)\n\n if decrypted_hashed_c_from_alice == opened_commitment_hashed:\n print(\"Bob is checking if the hashes match\")\n print(\"[Success] No changes we made to the message\")\n alice_a = decoded_split_a_r[0]\n bob_b = b1.decode(\"utf-8\")\n compute_dice_throw(bob_b, alice_a)\n else:\n print(\"[WARNING] Alice changed her message\")\n\n print()\n print(\"********* Bob's dice throw *********\")\n\n # [b1] Bob samples random bit b and random 128 bit string and sends Com(a,r)\n b2 = format(random.getrandbits(4), \"b\")\n r2 = format(random.getrandbits(128), \"b\")\n c2 = bytes(b2 + r2, encoding=\"utf-8\")\n c_hashed2 = MyCryptoLibrary.hash_message(c2)\n send_encrypted_signed_message(c_hashed2, PK_alice)\n print(\"Sending encrypted Com(a,r) to Alice\")\n\n # [b2] Message a received\n received_data3 = pickle.loads(client_socket.recv(2048))\n print(\"Alice received b from Bob and tries to verify\")\n a2 = decrypt_and_verify(received_data3, PK_alice)\n\n # [b3] Bob sends (a,r) to Alice\n b_r = bytes(b2 + \",\" + r2, encoding=\"utf-8\")\n send_encrypted_signed_message(b_r, PK_alice)\n\n # [b4] Compute output B XOR a under mod 6\n compute_dice_throw(b2, a2)\n\n running = False\n\nclient_socket.close()\n\n\n","sub_path":"bob_local.py","file_name":"bob_local.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"490622903","text":"#\n# @contactrika\n#\n# Wrappers for custom envs.\n#\nfrom torchbeast import atari_wrappers\nfrom coinrun import coinrunenv\n\n\nclass CoinRunOneEnv(coinrunenv.CoinRunVecEnv):\n\n def seed(self, seed):\n print('CoinRun ignores seed()')\n\n def step(self, actions):\n self.step_async(actions)\n res = self.step_wait()\n return res\n\n\ndef create_env(env_name, flags):\n if env_name.startswith('Coin'):\n from coinrun import coinrunenv\n from coinrun import setup_utils as coinrun_setup_utils\n coinrun_setup_utils.setup_and_load(\n use_cmd_line_args=False,\n set_statics=flags.set_statics,\n set_dynamics=flags.set_dynamics,\n num_levels=flags.num_levels,\n any_custom_game=flags.any_custom_game,\n use_pytorch=True, paint_vel_info=0,\n is_high_res=flags.is_high_res,\n default_zoom=flags.default_zoom,\n float_obs=False) # torchbeast divides by 255\n return CoinRunOneEnv('platform', 1,\n default_zoom=flags.default_zoom, float_obs=False)\n else:\n return atari_wrappers.wrap_pytorch(\n atari_wrappers.wrap_deepmind(\n atari_wrappers.make_atari(env_name),\n clip_rewards=False,\n frame_stack=True,\n scale=False))\n","sub_path":"torchbeast/env_wrappers.py","file_name":"env_wrappers.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"121470240","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport os\n\n\ntry:\n\n chrome_options = Options()\n chrome_options.add_argument('--start-maximized')\n browser = webdriver.Chrome(options=chrome_options)\n browser.get('https://olx.ua')\n wait = WebDriverWait(browser, 10)\n element = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#headerSearch')))\n element.send_keys('Автомобиль')\n element = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#submit-searchmain')))\n element.click()\n\n\n #сделаем скриншот не хедера, а самих объявлений\n element = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#offers_table')))\n screen_elem = element.screenshot_as_png\n with open(os.path.join(os.path.dirname(__file__), 'scr.png'), 'wb') as f:\n f.write(screen_elem)\n\n\nexcept Exception as error_exp:\n print(error_exp)\n \nfinally:\n browser.quit()","sub_path":"HT_10/selenium_olx.py","file_name":"selenium_olx.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"226340549","text":"import allure\nfrom selenium.webdriver.common.by import By\nfrom Web_services.URL import PaidLessonPage\nfrom Web_services.SetUp import StartInterneturokClassMethod\n\n\n@allure.feature(\"Страница урока Тригонометрические функции y = sin t, y = cos t (Алгебра 11 класс)\")\n@allure.story(\"Проверка наличия элементов в Body во вкладке Видеоурок для не авторизованного пользователя\")\nclass ChecksAllElementsInLessonPageTheBodyTabVideoUserNotAuth(StartInterneturokClassMethod):\n @allure.step(\"Перейти на страницу Алгебра 8 класс\")\n def test_000_open_page(self):\n StartInterneturokClassMethod = self.driver\n go_page = PaidLessonPage(StartInterneturokClassMethod)\n go_page.go_lesson_page()\n\n @allure.step(\"На странице урока отображается название урока (Основные понятия)\")\n def test_lesson_title(self):\n self.assertEqual(\"АверНик.Тригонометрические функции y = sin t, y = cos t\",\n self.driver.find_element_by_css_selector(\"h1.lesson-title\").text)\n\n @allure.step(\"На странице урока отображается кнопка перейти на предыдущий урок (Кнопка влево)\")\n def test_lesson_arrow_left(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"span.lesson-arrow_left\"))\n\n @allure.step(\"На странице урока отображается кнопка перейти на следующий урок (Кнопка вправо)\")\n def test_lesson_arrow_right(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"a.lesson-arrow_right\"))\n\n @allure.step(\"На страни��е урока отображается Вкладки урока (Видеоурок, Текстовый урок и т.д)\")\n def test_lesson_controls_body(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"ul.lesson-controls__body\"))\n\n @allure.step(\"На странице урока отображается вкладка (Видеоурок)\")\n def test_button_video(self):\n self.assertEqual(\"Видеоурок\", self.driver.find_element_by_css_selector(\"li.lc-video\").text)\n with allure.step(\"Во вкладке Видеоурок отображается иконка\"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"span.lesson-controls__icon_vid\"))\n\n @allure.step(\"На странице урока отображается вкладка (Тестовый урок)\")\n def test_button_text_lesson(self):\n self.assertEqual(\"Текстовый урок\", self.driver.find_element_by_css_selector(\"li.lc-txt\").text)\n with allure.step(\"Во вкладке Текстовый урок отображается иконка\"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"span.lesson-controls__icon_vid\"))\n\n @allure.step(\"На странице урока отображается вкладка (Тренажеры)\")\n def test_button_training(self):\n self.assertEqual(\"Тренажеры\", self.driver.find_element_by_css_selector(\n \".lesson-controls__body:nth-child(1) .lesson-controls__wrap:nth-child(3)\").text)\n with allure.step(\"Во вкладке Тренажеры отображается иконка\"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"span.lesson-controls__icon_training\"))\n\n @allure.step(\"На странице урока отображается вкладка (Тесты)\")\n def test_button_test(self):\n self.assertEqual(\"Тесты\", self.driver.find_element_by_css_selector(\"li.lc-test\").text)\n with allure.step(\"Во вкладке Тесты отображается иконка\"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"span.lesson-controls__icon_test\"))\n\n @allure.step(\"На странице урока отображается вкладка (Вопросы к уроку)\")\n def test_button_questions(self):\n self.assertEqual(\"Вопросы к уроку\", self.driver.find_element_by_css_selector(\"li.lc-questions\").text)\n with allure.step(\"Во вкладке Вопросы к уроку отображается иконка\"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"span.lesson-controls__icon_questions\"))\n\n @allure.step(\"На странице урока отображается кнопка (Заметки)\")\n def test_button_note(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.lesson-note-widget\"))\n\n @allure.step(\"В видеоуроке отображается (Превью видеоурока)\")\n def test_displayed_preview_video(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.player__block\"))\n\n @allure.step(\"В видеоуроке отображается кнопка (Плей)\")\n def test_displayed_button_play(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.player__block-play\"))\n\n @allure.step(\"В видеоуроке отображается заглушка (Этот видеоурок доступен по абонементу)\")\n def test_video_blocker(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.video-blocker\"))\n with allure.step(\"В загулшке в левом углу отображается звезда (платный урок)\"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.video-blocker__body-corner\"))\n with allure.step(\"В заглушка присутствует текст (Этот видеоурок доступен по абонементу)\"):\n self.assertEqual(\"Этот видеоурок доступен по абонементу\",\n self.driver.find_element_by_css_selector(\"h5.video-blocker__content_text-title\").text)\n with allure.step(\"В заглушка присутствует ссылка (Подробнее об абонементе, платных и бесплатных уроках)\"):\n self.assertEqual(\"Подробнее об абонементе, платных и бесплатных уроках\",\n self.driver.find_element_by_css_selector(\"a.video-blocker__content_text-link.link\").text)\n with allure.step(\"В заглушка присутствует текст (У вас уже есть абонемент?)\"):\n self.assertEqual(\"У вас уже есть абонемент? Войти\",\n self.driver.find_element_by_css_selector(\"p.has-abonement\").text)\n with allure.step(\"В заглушка отображается ссылка (Войти)\"):\n self.assertEqual(\"Войти\",\n self.driver.find_element_by_xpath(\"//div[2]/div/div[1]/p/a\").text)\n with allure.step(\"В заглушка отображается кнопка (Оплатить абонемент от 150 руб. в месяц)\"):\n self.assertEqual(\"Оплатить абонемент\\nот 150 руб. в месяц\",\n self.driver.find_element_by_css_selector(\"a.abonement__buy\").text)\n\n @allure.step(\"В конспекте присутствуют ссылки с таймлайнами (Определение и примеры алгебраических дробей)\")\n def test_displayed_lesson_subtitle(self):\n self.assertEqual(\"1. Определение функции\",\n self.driver.find_element_by_xpath(\"//h2[1]/a\").text)\n\n @allure.step(\n \"В конспекте присутствуют рекламный баннер ДШ (Решите домашниее задание и получите оценку в Домашней школе InternetUrok)\")\n def test_lesson_footer_error(self):\n self.assertEqual(\"https://files.interneturok.ru/public/undertext_ver1_1.jpg\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer__error:nth-child(1) .lesson-footer__error-img:nth-child(1)\").get_attribute(\n \"src\"))\n\n @allure.step(\"В уроке в конце конспекта отображается ссылка (Информация об уроке)\")\n def test_displayed_lesson_footer_button_info(self):\n self.assertEqual(\"Информация об уроке\",\n self.driver.find_element_by_id(\"info-link\").text)\n with allure.step(\"В кнопке Информация об уроке присутствует иконка \"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.icon-lesson-info\"))\n\n @allure.step(\"В уроке в конце конспекта отображается ссылка (Комментарии (11))\")\n def test_displayed_lesson_footer_button_comment(self):\n self.assertEqual(\"Комментарии (11)\",\n self.driver.find_element_by_id(\"comments-link\").text)\n with allure.step(\"В кнопке Комментарии (8) присутствует иконка \"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.icon-lesson-comments\"))\n\n @allure.step(\"В уроке в конце конспекта отображается ссылка (Поделиться)\")\n def test_displayed_lesson_footer_button_share(self):\n self.assertEqual(\"Поделиться\",\n self.driver.find_element_by_id(\"share-link\").text)\n with allure.step(\"В кнопке Поделиться присутствует иконка \"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.icon-lesson-share\"))\n\n @allure.step(\"В уроке в конце конспекта отображается ссылка (В избранное)\")\n def test_displayed_lesson_footer_button_lesson_add(self):\n self.assertEqual(\"В избранное\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer:nth-child(4) .ember-view:nth-child(4) .lesson-icons__group\").text)\n with allure.step(\"В кнопке В избранное присутствует иконка \"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.icon-lesson-add\"))\n\n @allure.step(\"В уроке в конце конспекта отображается ссылка (Нашли ошибку?)\")\n def test_displayed_lesson_footer_button_lesson_report(self):\n self.assertEqual(\"Нашли ошибку?\",\n self.driver.find_element_by_css_selector(\"a.lesson-icons__group.ember-view\").text)\n with allure.step(\"В кнопке Нашли ошибку? присутствует иконка \"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.icon-lesson-report\"))\n\n @allure.step(\"В уроке в конце конспекта отображается текст (Оценить урок:)\")\n def test_displayed_lesson_footer_button_rating(self):\n self.assertEqual(\"Оценить урок:\",\n self.driver.find_element_by_css_selector(\"div.rating\").text)\n with allure.step(\"В уроке в конце конспекта отображается звёзд для оценки урока (5 звезда)\"):\n self.assertEqual(\"star5\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer__row div >label:nth-child(3)\").get_attribute(\"for\"))\n with allure.step(\"В уроке в конце конспекта отображается звёзд для оценки урока (4 звезда)\"):\n self.assertEqual(\"star4\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer__row div >label:nth-child(5)\").get_attribute(\"for\"))\n with allure.step(\"В уроке в конце конспекта отображается звёзд для оценки урока (3 звезда)\"):\n self.assertEqual(\"star3\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer__row div >label:nth-child(7)\").get_attribute(\"for\"))\n with allure.step(\"В уроке в конце конспекта отображается звёзд для оценки урока (3 звезда)\"):\n self.assertEqual(\"star3\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer__row div >label:nth-child(7)\").get_attribute(\"for\"))\n\n with allure.step(\"В уроке в конце конспекта отображается звёзд для оценки урока (2 звезда)\"):\n self.assertEqual(\"star2\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer__row div >label:nth-child(9)\").get_attribute(\"for\"))\n\n with allure.step(\"В уроке в конце конспекта отображается звёзд для оценки урока (1 звезда)\"):\n self.assertEqual(\"star1\",\n self.driver.find_element_by_css_selector(\n\n \".lesson-footer__row div >label:nth-child(11)\").get_attribute(\"for\"))\n\n @allure.step(\n \"В уроке в конце конспекта отображается ссылка Хлебные крошки (Главная > Алгебра, 11 класс > Тригонометрические функции y = sin t, y = cos t)\")\n def test_displayed_link_main(self):\n self.assertEqual(\"Библиотека InternetUrok.ru Алгебра, 11 класс АверНик.Тригонометрические функции y = sin t, y = cos t\",\n self.driver.find_element_by_css_selector(\"ol.breadcrumbs.overflow-h\").text)\n\n @allure.step(\"В уроке в конце конспекта отображается блок оценки урока (Вконтакте:)\")\n def test_displayed_lesson_footer_button_social_vk(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.social__vk\"))\n with allure.step(\"В кнопке Вконтаке отображается кнопка Лайкнуть\"):\n self.assertTrue(self.is_element_present(By.ID, \"vk_like\"))\n\n @allure.step(\"В уроке в конце конспекта отображается блок оценки урока (Facebook)\")\n def test_displayed_lesson_footer_button_social_facebook(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.social__fb\"))\n\n @allure.step(\"В уроке в конце конспекта отображается блок оценки урока (Одноклассники)\")\n def test_displayed_lesson_footer_button_social_od(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.social__ok\"))\n\n @allure.step(\"Проверка наличия кнопки (Подготовка к ЕГЭ) в 11 классе Алгебра\")\n def test_displayed_button_preparation_the_EGE(self):\n self.assertEqual(u\"Подготовка к ЕГЭ\", self.driver.find_element_by_css_selector(\n \".ember-view > div > ul > li.lesson-controls__wrap.lc-ege > a\").text)\n","sub_path":"Web_services/Paid_lesson_page/Сheck_all_elements_on_the_lesson_page/test_пользователь_не_авторизован_проверка_Body_вкладка_Видеоурок.py","file_name":"test_пользователь_не_авторизован_проверка_Body_вкладка_Видеоурок.py","file_ext":"py","file_size_in_byte":15895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"359191109","text":"#修改图片的size小于iphone5的分辨率\nfrom PIL import Image\nimport os\npath = 'E:/PythonProject/Python3/005/pythonimage'\n#size \nsize = 1136,640\n\ndef walkfiles():\n for i in os.listdir(path):\n #过滤文件和目录如果需要判断文件是不是图片的话就需要检测后缀\n if(os.path.isfile(os.path.join(path,i))):\n #print('Path是:' + os.path.join(path,i) + ' name是:' + i)\n thumbnailimage(os.path.join(path,i),i)\n\ndef thumbnailimage(imgpath,name):\n im = Image.open(imgpath)\n #当给定的size大于图片本身的size 则不会生成缩略图\n im.thumbnail(size,Image.ANTIALIAS)\n #这里重命名文件名 用join也能修改格式 哪种方式都一样\n # print(path+'/'+name.replace('.jpg','_thum.jpg'))\n im.save(path+'/'+name.replace('.jpg','_thum.jpg'),'JPEG')\n\nwalkfiles()","sub_path":"005/changeSize.py","file_name":"changeSize.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"109303367","text":"import numpy as np\n\nclass randomff_ext:\n def __init__(self, D1, D2, p, lamda = 0, sigma1 = 1, sigma2 = 1):\n self.D1 = D1\n self.D2 = D2\n self.lamda = lamda\n\n mean = np.zeros(p)\n cov = sigma1 * np.identity(p)\n self.w1 = np.random.multivariate_normal(mean, cov, D1)\n self.bias1 = np.random.uniform(0,2*np.pi,D1)\n\n mean = np.zeros(2*D1)\n cov = sigma2 * np.identity(2*D1)\n self.w2 = np.random.multivariate_normal(mean,cov,D2)\n self.bias2 = np.random.uniform(0,2*np.pi,D2)\n \n def train(self, xdata, ydata):\n print(\"\\nStarting to train the model.\\n\")\n lamda = self.lamda\n z = self.embed_all(xdata)\n self.beta_hat = np.linalg.solve(np.dot(z.T,z) + lamda * np.identity( 2*self.D2 ),\n np.dot( z.T, ydata))\n print(\"\\nThe model has been trained successfully!\\n\")\n\n def predict(self, xdata):\n z = self.embed_all(xdata)\n y_hat = np.dot(z, self.beta_hat)\n return y_hat\n\n def embed(self, xdata, layer = 1):\n n = xdata.shape[0]\n if layer == 1:\n w = self.w1\n D = self.D1\n b = self.bias1\n else:\n w = self.w2\n D = self.D2\n b = self.bias2\n biases = np.array([b for i in range(n)])\n z = np.dot( xdata, w.T ) + biases\n z_cos = np.cos( z ) / np.sqrt( D )\n z_sin = np.sin( z ) / np.sqrt( D )\n z = np.hstack( ( z_cos, z_sin ) )\n return z\n\n def embed_all(self, xdata):\n mu_x = np.zeros(self.D1 * 2)\n for X in xdata:\n z = self.embed(X,layer = 1)\n mean_embedding = np.mean(z, axis=0)\n mu_x = np.vstack((mu_x, mean_embedding))\n mu_x = mu_x[1:,]\n z = self.embed(mu_x, layer = 2)\n return z\n\n def test(self, xdata, ydata):\n y_hat = self.predict(xdata)\n RMSE = np.linalg.norm(y_hat-ydata)/np.sqrt(len(y_hat))\n return RMSE\n","sub_path":"final_files/src/randff_ext.py","file_name":"randff_ext.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"521308909","text":"from threading import Thread\nfrom logger import MyLogger\nfrom time import sleep, time\nfrom numpy import log, polyfit, sqrt, std, subtract\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport sqlalchemy\nimport numpy as np\nimport math\nimport json\n\nfrom xcoin.bitmexAPI import BitMEXWebsocket\n\n'''\n 고팍스 코인 시세 데이터 저장\n https://www.bitmex.com/app/trade/XBTUSD\n\n 해당 거래소에서 발급받은 key, secret 코드로 secrets.json 파일을 아래와 같이 생성해야 함\n {\n \"key\": \"value\",\n \"secret\": \"value\"\n }\n\n 20180415 by Daesony\n'''\nclass BitmexDataCollector(Thread):\n def __init__(self):\n Thread.__init__(self)\n self.logger = MyLogger.instance().logger()\n self.logger.debug('CoinDataCollector Start!!!!')\n\n self.starttime = time()\n\n with open(\"secrets.json\") as secrets_file:\n self.secrets = json.load(secrets_file)\n secrets_file.close()\n\n self.api = BitMEXWebsocket(endpoint=\"https://www.bitmex.com/api/v1\", symbol=\"XBTUSD\",\n symbolSubs=[\"trade\"], genericSubs=[\"wallet\"],\n api_key=self.secrets['bitmexkey'], api_secret=self.secrets['bitmexsecret'])\n\n def run(self):\n\n try:\n self.api.start()\n\n except Exception as e:\n print('ERROR : ' + str(e) + str(e.message) + str(e.args))\n\n\n\n\n def test(self):\n try:\n\n self.api.get_instrument()\n\n '''\n rst = self.api.getTrades('ETH-KRW')\n print(rst)\n df = pd.DataFrame(rst)\n #df.index = df['id']\n #return df['price']\n df['krtime'] = pd.to_datetime(df['date'], unit='s').dt.tz_localize('UTC').dt.tz_convert('Asia/Seoul')\n\n #data = df[['id', 'price', 'date', 'krtime']]\n data = df[['id', 'price', 'date']]\n data.index = data['id']\n data = data.drop('id', axis=1)\n '''\n\n return 0\n\n\n except Exception as c:\n print('ERROR getMarketHistory:' + str(c))\n sleep(1)\n '''\n def insertData(self, data):\n engine = create_engine('mysql+pymysql://root:Da2$ony2016%@localhost/data', encoding='utf8')\n data.to_sql('ETH_KRW', engine, if_exists='append')\n '''\n\n\nif __name__ == '__main__':\n try:\n BitmexDataCollector().start()\n\n except Exception as e :\n print('ERROR : ' + str(e))\n","sub_path":"xcoin/bitmexDataCollector.py","file_name":"bitmexDataCollector.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"525368168","text":"def post(fposts,insieme):\n ID = set()\n doc = open(fposts)\n text = doc.read()\n #il testo va in rifinitura con l'eliminazione del POST e dello spazio\n stringa =' '.join(text.split())\n lst = stringa.split('')\n for i in lst:\n strpost = i.split()\n for stringa in strpost:\n parola = list(stringa)\n #elimino i caratteri fastidiosi\n for char in parola:\n if char == '!' or char == '*' or char=='[' or char==']':\n char == 'char'\n #elimino tutto ciò che non appartiene ad isalpha\n elif not char.isalpha() == True:\n parola.remove(char)\n clean = ''.join(parola) \n minclean = clean.lower()\n #equivalgo le maiuscole con le minuscole\n for x in insieme:\n nuova= x.lower()\n if minclean == nuova:\n ID.add(strpost[0])\n return ID","sub_path":"students/1792156/homework02/program01.py","file_name":"program01.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"291268860","text":"# this module that takes in the the input parameters, trains a \n# polynomial regression algorithm on the dataset\n# and produces the predictions on the test set and provides the error metrics\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn import metrics\nfrom sklearn.linear_model import LinearRegression\n\nclass poly_engine:\n def __init__(self, filename, col_list, target_y, test_size = 0.2, degree = 2):\n self.filename = filename\n self.col_list = col_list\n self.target_y = target_y\n self.test_size = test_size\n self.degree = degree\n self.dataset = self.load_data()\n self.actual_values = []\n self.predicted_values = []\n self.score = 0.0\n\n def load_data(self):\n data = pd.read_csv(self.filename)\n return data.iloc[:2372, : ]\n\n def run_engine(self):\n y = self.dataset[self.target_y][1:].values\n X = self.dataset[self.col_list][:-1].values\n\n #split the entire set into training and test sets\n \n train_length = round(X.shape[0] * (1 - self.test_size))\n X_train = np.array(X[:train_length])\n y_train = np.array(y[:train_length])\n X_test = np.array(X[train_length:])\n y_test = np.array(y[train_length:])\n \n self.actual_values = y_test\n \n #fit the X parameters of the training set into a polynomial of degree d, best case d = 2\n poly_reg = PolynomialFeatures(degree = self.degree)\n X_poly = poly_reg.fit_transform(X_train)\n\n\t#create the model \n lin_reg2 = LinearRegression()\n\n #fit the prediction model\n lin_reg2.fit(X_poly, y_train)\n \n X_transform = poly_reg.fit_transform(X_test)\n self.score = lin_reg2.score(X_transform, self.actual_values)\n\n\t#make predictions\n self.predict(lin_reg2, X_transform)\n return lin_reg2\n\n def predict(self, lin_reg2, X):\n predictions = lin_reg2.predict(X);\n self.predicted_values = predictions\n \n def get_score(self):\n return self.score\n \n def get_root_mean_squared_error(self):\n rmse = np.sqrt(metrics.mean_squared_error(self.actual_values, self.predicted_values))\n return rmse\n \n def get_mean_absolute_error(self):\n mae = metrics.mean_absolute_error(self.actual_values, self.predicted_values)\n return mae\n \n def get_root_mean_squared_log_error(self):\n rmsle = np.sqrt(metrics.mean_squared_log_error(self.actual_values, self.predicted_values))\n return rmsle\n \n def get_error(self):\n return self.predicted_values - self.actual_values\n\n def get_prediction_accuracy(self):\n #calculate errors for each prediction\n y_error = self.predicted_values - self.actual_values\n #variable count stores errors in prediction where error < 20%\n count = []\n for i in range(len(self.actual_values)):\n if(abs(y_error[i]) < 0.20 * self.actual_values[i]):\n count.append(abs(y_error))\n return len(count) / len(y_error) * 100\n","sub_path":"code/model_evaluation_code/polynomial_reg/poly_engine.py","file_name":"poly_engine.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"275311087","text":"items_in_cart = {}\nclass ShoppinCart:\n def __init__(self,customer_id):\n self.customer_id = customer_id\n def add_item(self,product,price):\n if product in items_in_cart:\n print(\"already exists\")\n else:\n items_in_cart[product] = price\n print(product + \" added\")\n def remove_item(self,product):\n if product in items_in_cart:\n del items_in_cart[product]\n else:\n print(\"product not found\")\n # def remove_item(self,product):\nmy_shopping = ShoppinCart(\"id\")\n# my_shopping.add_item(\"oil\",250)\n# my_shopping.add_item(\"oil\",250)\nmy_shopping.add_item(\"fruits\",250)\nprint(items_in_cart)\nmy_shopping.remove_item(\"fruits\")\n\n\n\nprint(items_in_cart)\n\n\n","sub_path":"submissions/sp_014_santhi-sri/week_13/day_3/coding_1/cart_class.py","file_name":"cart_class.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"49602633","text":"import os\nimport pickle\n\n# Reads the User tweets (specified in seedusers files) from the folder /user_tweets_preprocess\n# returns array of preprocessed, tokenized user tweets\n\ndef read_user_stance_tweets_tokenized_pickles(seedusers_file):\n print('started reading user files for ' + seedusers_file)\n \n #Get the seedusers screen names\n seedusers_screen_names = []\n file = open(seedusers_file, 'r') \n for line in file:\n seedusers_screen_names.append(line.strip())\n \n user_tweets = []\n\n for screen_name in seedusers_screen_names:\n #check if file is in path\n filename = screen_name+'.pkl'\n \n if os.path.isfile('user_tweets_preprocess/'+filename):\n try:\n tweets_user_from_file = pickle.load(open(\"user_tweets_preprocess/\"+filename, \"rb\" ) )\n user_tweets.append(tweets_user_from_file)\n except Exception as e:\n print('exception occurred while trying to read user file: {}'.format(filename))\n print(str(e))\n pass\n \n else:\n print('Tweet File not found for user: ' + screen_name)\n \n print('reading of user files finished for seedusers file ' + seedusers_file)\n return user_tweets","sub_path":"Software/Crawling/Workspace/Crawler/DataManipulator/helper_functions/read_user_stance_tweets_tokenized_pickles.py","file_name":"read_user_stance_tweets_tokenized_pickles.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"243990042","text":"from Acquisition import aq_inner\nfrom Acquisition import aq_parent\nfrom opengever.base.interfaces import IReferenceNumber\nfrom opengever.base.interfaces import IReferenceNumberPrefix\nfrom opengever.bundle.sections.constructor import IDontIssueDossierReferenceNumber\nfrom opengever.dossier.behaviors.dossier import IDossier\nfrom opengever.dossier.indexers import TYPES_WITH_CONTAINING_SUBDOSSIER_INDEX\nfrom opengever.globalindex.handlers.task import sync_task\nfrom opengever.globalindex.handlers.task import TaskSqlSyncer\nfrom opengever.meeting.handlers import ProposalSqlSyncer\nfrom opengever.task.task import ITask\nfrom plone import api\nfrom plone.app.workflow.interfaces import ILocalrolesModifiedEvent\nfrom zope.component import getAdapter\nfrom zope.container.interfaces import IContainerModifiedEvent\nfrom zope.lifecycleevent import IObjectRemovedEvent\n\n\ndef set_former_reference_before_moving(obj, event):\n \"\"\"Temporarily store current reference number before\n moving the dossier.\n \"\"\"\n # make sure obj wasn't just created or deleted\n if not event.oldParent or not event.newParent:\n return\n\n dossier_repr = IDossier(obj)\n ref_no = getAdapter(obj, IReferenceNumber).get_number()\n IDossier['temporary_former_reference_number'].set(dossier_repr, ref_no)\n\n\ndef set_former_reference_after_moving(obj, event):\n \"\"\"Use the (hopefully) stored former reference number\n as the real new former reference number. This has to\n be done after the dossier was moved.\n\n \"\"\"\n # make sure obj wasn't just created or deleted\n if not event.oldParent or not event.newParent:\n return\n\n dossier_repr = IDossier(obj)\n former_ref_no = dossier_repr.temporary_former_reference_number\n IDossier['former_reference_number'].set(dossier_repr, unicode(former_ref_no))\n # reset temporary former reference number\n IDossier['temporary_former_reference_number'].set(dossier_repr, u'')\n\n # setting the new number\n parent = aq_parent(aq_inner(obj))\n prefix_adapter = IReferenceNumberPrefix(parent)\n prefix_adapter.set_number(obj)\n\n obj.reindexObject(idxs=['reference'])\n\n\n# Update reference number when adding / moving content\n# (IObjectAddedEvent inherits from IObjectMovedEvent)\ndef save_reference_number_prefix(obj, event):\n if IDontIssueDossierReferenceNumber.providedBy(obj.REQUEST):\n return\n\n if IObjectRemovedEvent.providedBy(event):\n return\n\n parent = aq_parent(aq_inner(obj))\n prefix_adapter = IReferenceNumberPrefix(parent)\n if not prefix_adapter.get_number(obj):\n prefix_adapter.set_number(obj)\n\n # because we can't control the order of event handlers we have to sync\n # all containing tasks manually\n catalog = api.portal.get_tool('portal_catalog')\n tasks = catalog({\n 'path': '/'.join(obj.getPhysicalPath()),\n 'object_provides': 'opengever.task.task.ITask',\n 'depth': -1})\n for task in tasks:\n TaskSqlSyncer(task.getObject(), None).sync()\n\n # And also proposals\n proposals = catalog({\n 'path': '/'.join(obj.getPhysicalPath()),\n 'object_provides': ['opengever.meeting.proposal.IBaseProposal'],\n 'depth': -1})\n for proposal in proposals:\n ProposalSqlSyncer(proposal.getObject(), None).sync()\n\n obj.reindexObject(idxs=['reference'])\n\n\ndef reindex_containing_subdossier_for_contained_objects(dossier, event):\n \"\"\"When a subdossier is modified, we update the ``containing_subdossier``\n index of all contained objects (documents, mails and tasks) so they don't\n show an outdated title in the ``subdossier`` column\n \"\"\"\n catalog = api.portal.get_tool('portal_catalog')\n objects = catalog(path='/'.join(dossier.getPhysicalPath()),\n portal_type=TYPES_WITH_CONTAINING_SUBDOSSIER_INDEX)\n\n for obj in objects:\n obj.getObject().reindexObject(idxs=['containing_subdossier'])\n\n\ndef reindex_containing_dossier_for_contained_objects(dossier, event):\n \"\"\"Reindex the containging_dossier index for all the contained obects.\n \"\"\"\n for brain in dossier.portal_catalog(path='/'.join(dossier.getPhysicalPath())):\n obj = brain.getObject()\n obj.reindexObject(idxs=['containing_dossier'])\n\n if ITask.providedBy(obj):\n sync_task(brain.getObject(), event)\n\n\ndef reindex_contained_objects(dossier, event):\n \"\"\"When a dossier is modified, if the title has changed we reindex\n the corresponding index in all contained object (containing_dossier or\n containing_subdossier)\n \"\"\"\n if ILocalrolesModifiedEvent.providedBy(event) or \\\n IContainerModifiedEvent.providedBy(event):\n return\n\n attrs = tuple(\n attr\n for descr in event.descriptions\n for attr in descr.attributes\n )\n if 'IOpenGeverBase.title' not in attrs:\n return\n\n if dossier.is_subdossier():\n reindex_containing_subdossier_for_contained_objects(dossier, event)\n else:\n reindex_containing_dossier_for_contained_objects(dossier, event)\n\n\ndef reindex_blocked_local_roles(dossier, event):\n \"\"\"Reindex blocked_local_roles upon the acquisition blockedness changing.\"\"\"\n dossier.reindexObject(idxs=['blocked_local_roles'])\n\n\ndef purge_reference_number_mappings(copied_dossier, event):\n \"\"\"Reset the reference number mapping when copying (or actually pasting)\n dossiers.\n \"\"\"\n prefix_adapter = IReferenceNumberPrefix(copied_dossier)\n prefix_adapter.purge_mappings()\n","sub_path":"opengever/dossier/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":5468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"488104704","text":"import numpy as np\r\nimport glob as gb\r\nimport cv2\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport keras.backend as K\r\nimport itertools\r\nfrom imgaug import augmenters as iaa\r\nfrom spatial_transformer import SpatialTransformer\r\nfrom keras.layers.core import Dense, Flatten, Dropout\r\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\r\nfrom keras.utils import np_utils\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom keras.models import Model\r\nfrom keras.layers import Input\r\nfrom keras.layers.merge import concatenate\r\n\r\n\r\ntrainings = []\r\nlabels = []\r\nDIM = 60\r\nnp.random.seed(1337)\r\nbatch_size = 128\r\nnb_classes = 84\r\nnb_epoch = 12\r\nclass_names = []\r\n\r\n\r\n\r\ndef plot_confusion_matrix(cm, classes,normalize=False,title='Confusion matrix',cmap=plt.cm.Blues):\r\n \"\"\"\r\n Plotting the confusion matrix.\r\n \"\"\"\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n\r\ndef listShuffling(trainings, labels):\r\n \"\"\"\r\n Shuffling the training and label lists \r\n \"\"\"\r\n c = list(zip(trainings, labels))\r\n random.shuffle(c)\r\n a, b = zip(*c)\r\n \r\n return a, b\r\n\r\n\r\ndef readImage(picture):\r\n \"\"\"\r\n Reading each image with opencv \r\n \"\"\" \r\n return cv2.imread(picture, 0)\r\n\r\n\r\ndef viewImage(image):\r\n \"\"\"\r\n View Image \r\n \"\"\"\r\n cv2.imshow('image',image)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n\r\ndef formulation(image, directory):\r\n \r\n container = np.array([[]], dtype='float32')\r\n \r\n container = image.ravel() # Reshaping image in 3600\r\n t = container.astype(np.float32) / 255.0\r\n trainings.append(t) # Creating image training list\r\n labels.append(directory) # Creating image label list\r\n \r\n \r\n\r\n\r\ndef prepareTrainingSet():\r\n \"\"\"\r\n Preparing the image training list and image label list \r\n \"\"\"\r\n \r\n r = random.randint(1,6)\r\n\r\n seq = iaa.Sequential([\r\n iaa.Affine(translate_px={\"x\": (-r, r), \"y\": (-r, r)}),\r\n iaa.Affine(rotate=(-10, 10)),\r\n iaa.Affine(scale={\"x\": (r, r), \"y\": (r, r)}),\r\n iaa.AdditiveGaussianNoise(scale=(0, 0.02 * 255))\r\n ])\r\n\r\n \r\n for directory in range(0, 84):\r\n print(\"---------------------\", directory)\r\n class_names.append(directory)\r\n for picture in gb.glob(\"./cluttered/\"+str(directory)+\"/*.png\"):\r\n\r\n img = readImage(picture) # Reading image of 60 x 60 pixels\r\n formulation(img, directory)\r\n \r\n ## Augmentation\r\n img = img[:, :, np.newaxis] # Adding image channel 1\r\n img_aug = seq.augment_image(img) # Applying augmentation\r\n img_transposed = img_aug.transpose(2,0,1) \r\n img_reshaped = img_transposed.reshape(-1, img_aug.shape[1]) # Reshaping in 60 x 60\r\n formulation(img_reshaped, directory)\r\n \r\n \r\n return listShuffling(trainings, labels); # Shuffling lists\r\n\r\n \r\n# Initializing image training list and image label list \r\nX, Y = prepareTrainingSet()\r\nclass_names = np.array(class_names)\r\ntrainings = []\r\nlabels = []\r\n\r\n# Data Split\r\nsamples = len(Y)\r\n\r\ntrain_samples = int(samples * 0.8)\r\nX_train = np.array(X[ : train_samples])\r\ny_train = np.asarray(Y[ : train_samples], dtype=np.int64)\r\n\r\nvalid_samples = int(samples * 0.05)\r\nX_valid = np.array(X[train_samples : train_samples + valid_samples])\r\ny_valid = np.asarray(Y[train_samples : train_samples + valid_samples], dtype=np.int64)\r\n\r\ntest_samples = int(samples * 0.15)\r\nX_test = np.array(X[-test_samples : ])\r\ny_test = np.asarray(Y[-test_samples : ], dtype=np.int64)\r\n\r\n\r\nX = []\r\nY = []\r\n\r\n\r\n# Reshape for convolutions\r\nX_train = X_train.reshape((X_train.shape[0], DIM, DIM, 1))\r\nX_valid = X_valid.reshape((X_valid.shape[0], DIM, DIM, 1))\r\nX_test = X_test.reshape((X_test.shape[0], DIM, DIM, 1))\r\n\r\n\r\ny_train = np_utils.to_categorical(y_train, nb_classes)\r\ny_valid = np_utils.to_categorical(y_valid, nb_classes)\r\ny_test = np_utils.to_categorical(y_test, nb_classes)\r\n\r\ninput_shape = (60, 60, 1)\r\n\r\n# initial weights\r\nb = np.zeros((2, 3), dtype='float32')\r\nb[0, 0] = 1\r\nb[1, 1] = 1\r\nW = np.zeros((50, 6), dtype='float32')\r\nweights = [W, b.flatten()]\r\n\r\n\r\n#Localization\r\n\r\nvisible = Input(shape=input_shape)\r\n\r\n\r\nlocnet = Flatten()(visible)\r\nlocnet = Dense(50)(locnet)\r\nlocnet = Dense(units=50, activation='relu')(locnet)\r\nlocnet = Dense(6, weights=weights)(locnet)\r\nlocnet = Model(input=visible, output=locnet)\r\n\r\n#STN\r\nstn = SpatialTransformer(localization_net=locnet, output_size=(28,28), input_shape=input_shape)(visible)\r\n\r\n\r\n#Convolution\r\nconv1 = Convolution2D(32, kernel_size=(5, 5), activation='relu', padding='same')(stn)\r\nconv2 = Convolution2D(32, kernel_size=(5, 5), activation='relu', padding='same')(conv1)\r\n#Pooling\r\npool1 = MaxPooling2D(pool_size=(2, 2))(conv2)\r\n\r\n#Dropout\r\ndropout1 = Dropout(.25)(pool1)\r\n\r\n#Convolution\r\nconv6 = Convolution2D(64, kernel_size=(5, 5), activation='relu', padding='same')(dropout1)\r\nconv7 = Convolution2D(64, kernel_size=(3, 3), activation='relu', padding='same')(conv6)\r\n\r\nconv8 = Convolution2D(64, kernel_size=(5, 5), activation='relu', padding='same')(dropout1)\r\nconv9 = Convolution2D(64, kernel_size=(3, 3), activation='relu', padding='same')(conv8)\r\n\r\n#Merge\r\nmerge = concatenate([conv7, conv9])\r\n\r\n#Pooling\r\npool2 = MaxPooling2D(pool_size=(2, 2))(merge)\r\n\r\n#Dropout\r\ndropout2 = Dropout(.25)(pool2)\r\n\r\n#Flatten\r\nflatten = Flatten()(dropout2)\r\n\r\n#Dense\r\nfully = Dense(units=1280, activation='relu')(flatten)\r\n\r\n#Final\r\noutput = Dense(units=nb_classes, activation='softmax')(fully)\r\n\r\n#Training\r\nmodel = Model(inputs=visible, outputs=output)\r\n\r\nprint(model.summary())\r\n\r\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\nXX = model.input\r\nYY = model.layers[0].output\r\nF = K.function([XX], [YY])\r\n\r\nnb_epochs = 100\r\nbatch_size = 86\r\nfig = plt.figure()\r\n\r\n\r\ncheckpoint = ModelCheckpoint('stn.h5', monitor='val_acc', verbose=1, save_best_only=True, mode='max')\r\nhistory = model.fit( X_train, y_train, validation_data = (X_test, y_test), epochs = nb_epochs, batch_size = batch_size, callbacks=[checkpoint] )\r\n\r\n#model.load_weights('borno.h5')\r\n#model.fit( X_train, y_train, validation_data = (X_test, y_test), epochs = nb_epochs, batch_size = batch_size)\r\n\r\n# Plot training & validation accuracy values\r\nplt.plot(history.history['acc'])\r\nplt.plot(history.history['val_acc'])\r\nplt.title('Model accuracy')\r\nplt.ylabel('Accuracy')\r\nplt.xlabel('Epoch')\r\nplt.legend(['Train', 'Test'], loc='upper left')\r\nplt.show()\r\n\r\n# Plot training & validation loss values\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\nplt.title('Model loss')\r\nplt.ylabel('Loss')\r\nplt.xlabel('Epoch')\r\nplt.legend(['Train', 'Test'], loc='upper left')\r\nplt.show()\r\n\r\n# Plotting Confusion Matrix\r\ny_pred = model.predict(X_test)\r\ny_pred = (y_pred > 0.5)\r\n\r\ncm = confusion_matrix(y_test.argmax(axis=1), y_pred.argmax(axis=1))\r\nnp.set_printoptions(precision=2)\r\n\r\n\r\n# Plot non-normalized confusion matrix\r\nplt.figure()\r\nplot_confusion_matrix(cm, classes=class_names, title='Non-Normalized')\r\n\r\n# Plot normalized confusion matrix\r\nplt.figure()\r\nplot_confusion_matrix(cm, classes=class_names, normalize=True, title='Normalized')\r\nplt.show()\r\n","sub_path":"version_11.py","file_name":"version_11.py","file_ext":"py","file_size_in_byte":8033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"60019077","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport webapp2\nimport os\n\n#import sys to define the path to your tweepy library\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"lib/tweepy\"))\nimport tweepy\n\n#we use the sessions module provided directly by weapp2. In production code make sure to store your secret key in a secure place\n\nfrom webapp2_extras import sessions\n\nconfig = {}\nconfig['webapp2_extras.sessions'] = {\n 'secret_key': 'yoursecretkey',\n} \n\nclass Handler(webapp2.RequestHandler):\n #juste a method to write shorter code\n def write(self, *a, **kw):\n self.response.out.write(*a, **kw)\n\n\n def render(self, template, **kw):\n self.write(self.render_str(template, **kw))\n \n\n #again it would be safer to put our consumer key and consumer secret in a secure place \n def twitterconnect(self):\n consumer_key=\"yourconsumerkey\"\n consumer_secret=\"yourconsumersecret\"\n \n \n return tweepy.OAuthHandler(consumer_key, consumer_secret)\n \n #webapp2 session method\n def dispatch(self):\n # Get a session store for this request.\n self.session_store = sessions.get_store(request=self.request)\n\n try:\n # Dispatch the request.\n webapp2.RequestHandler.dispatch(self)\n finally:\n # Save all sessions.\n self.session_store.save_sessions(self.response)\n\n @webapp2.cached_property\n def session(self):\n # Returns a session using the default cookie key.\n return self.session_store.get_session()\n\n \n\n\n\nclass OauthHandler(Handler):\n def get(self):\n\n \n\n auth = self.twitterconnect()\n #this line makes sure tweepy connects with ssl\n auth.secure = True\n\n try: \n #we specify signin with twitter so twitter doesn't ask for permissions everytime\n redirect_url = auth.get_authorization_url(signin_with_twitter=True)\n #we store the request token in a session because we will need it on the callback\n self.session['request_token'] = (auth.request_token.key,auth.request_token.secret)\n self.redirect(redirect_url)\n except tweepy.TweepError:\n self.write('Error! Failed to get request token.')\n return\n\n#the callback URL where the user is directed after the Twitter log in\nclass CallBackHandler(Handler):\n def get(self):\n \n \n \n \n \n token = self.session.get('request_token')\n \n \n \n \n #twitter is sending us the oauth verifier as a get paramater\n verifier = self.request.get('oauth_verifier') \n auth = self.twitterconnect()\n #again make sure to use ssl or it will fail\n auth.secure = True\n if token is not None:\n auth.set_request_token(token[0], token[1])\n else:\n self.write(\"no token found\")\n try:\n auth.get_access_token(verifier)\n except tweepy.TweepError:\n self.write(\"error\")\n return\n \n api = tweepy.API(auth)\n api.verify_credentials()\n if not api:\n self.write(\"connexion failed\")\n return\n \n self.session['username']=api.me().screen_name\n \n self.write(\"Welcome\" + self.session['username'])\n \n \n \n \n \n \n \n\n\n\n\n\n\napp = webapp2.WSGIApplication([ ('/tweepyconnection', OauthHandler),('/tweepyconnection/callback.*', CallBackHandler)], debug=True,config=config)","sub_path":"tweepylogin.py","file_name":"tweepylogin.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148596106","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'arkadiy'\nfrom CGRtools.RDFread import RDFread\nfrom rdf_parser import rdf_parser\nfrom reaction_treatment import Add_Reaction\nfrom reaction_treatment import Add_Conditions\nfrom pgtools.models import PGdb\n\nclass Reactions():\n def __init__(self, args):\n inputdata = RDFread(args.input)\n stand = rdf_parser(args.dictionary)\n reactions = Add_Reaction(args.configuration_file)\n conditions = Add_Conditions(args.fields)\n for num, data in enumerate(inputdata.readdata()):\n if num % 1000==0:\n print(\"reaction: %d\" % (num + 1))\n fixed_data = stand.get_standardized(data, args.fields)\n structure_id = reactions.add_Structure(fixed_data)\n if conditions.addConditions(fixed_data, structure_id):\n print(\"Conditions have been added successfuly!\")\n else:\n print(\"There is an error in conditions addition!\")\n","sub_path":"Reactions.py","file_name":"Reactions.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"420813523","text":"#pylint: disable=R0903\nimport factory\n\nimport pyrob.schema.stargus\n\n\nclass CmtsHourCpuStats(factory.alchemy.SQLAlchemyModelFactory):\n class Meta:\n model = pyrob.schema.stargus.CmtsHourCpuStats\n sqlalchemy_session = pyrob.db.SessionS\n\n market = None\n hub = None\n cmts_name = None\n hour = None\n resets = None\n cpu = None\n updated = None\n","sub_path":"pyrob/schema/factory/stargus/cmts_hour_cpu_stats.py","file_name":"cmts_hour_cpu_stats.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"321930922","text":"from flask import Flask, request\nfrom flask_restplus import Resource, Api\n\nimport errors\nimport corpus\n\napp = Flask(__name__)\napi = Api(app)\ncorpus_ns = api.namespace('corpus', description='corpus operations')\nstopwords_ns = api.namespace('stopwords', description='stop words operations')\nsubstwords_ns = api.namespace('substwords', description='word substitutions operations')\n\n@corpus_ns.route('/')\nclass CorpusListing(Resource):\n\n @corpus_ns.doc('list corpora')\n def get(self):\n try:\n status, result = corpus.get_corpora()\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n@corpus_ns.route('/')\nclass Corpus(Resource):\n\n @corpus_ns.doc('gets a corpus')\n def get(self, name):\n try:\n status, result = corpus.get_corpus(name)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n @corpus_ns.doc('creates a corpus')\n def post(self, name):\n try:\n description = api.payload.get('description', '')\n status, result = corpus.save_corpus(name, description, updating=False)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n @corpus_ns.doc('updates a corpus')\n def put(self, name):\n try:\n description = api.payload.get('description', '')\n status, result = corpus.save_corpus(name, description, updating=True)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n @corpus_ns.doc('deletes a corpus')\n def delete(self, name):\n try:\n status, result = corpus.delete_corpus(name)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n@corpus_ns.route('//docs')\nclass CorpusDocListing(Resource):\n\n @corpus_ns.doc('lists docs in a corpus')\n def get(self, name):\n try:\n limit = int(request.args.get('limit', CORPUS_DOC_LIMIT))\n if limit < 1:\n raise errors.ApiException(400, 'invalid limit')\n after_id = request.args.get(\"after_id\", \"\") \n status, result = corpus.list_docs(name, limit, after_id)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n@corpus_ns.route('//docs/')\nclass CorpusDoc(Resource):\n\n @corpus_ns.doc('creates a document')\n def post(self, name, doc_id):\n try:\n status, result = corpus.save_doc(name, doc_id, api.payload, updating=False)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n @corpus_ns.doc('deletes a document')\n def delete(self, name, doc_id):\n try:\n status, result = corpus.delete_doc(name, doc_id)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n @corpus_ns.doc('creates a document')\n def put(self, name, doc_id):\n try:\n status, result = corpus.save_doc(name, doc_id, api.payload, updating=True)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n@stopwords_ns.route('/')\nclass StopWords(Resource):\n\n @stopwords_ns.doc('list stop words files')\n def get(self):\n try:\n raise errors.ApiException(400, 'not implemented')\n except errors.ApiException as e:\n return e.response()\n\n@substwords_ns.route('/')\nclass SubstWords(Resource):\n\n @substwords_ns.doc('list word substitution files')\n def get(self):\n try:\n raise errors.ApiException(400, 'not implemented')\n except errors.ApiException as e:\n return e.response()\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"8154185","text":"# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport proto # type: ignore\n\nfrom google.cloud.gaming_v1.types import common\nfrom google.protobuf import field_mask_pb2 # type: ignore\nfrom google.protobuf import timestamp_pb2 # type: ignore\n\n\n__protobuf__ = proto.module(\n package='google.cloud.gaming.v1',\n manifest={\n 'GameServerClusterView',\n 'ListGameServerClustersRequest',\n 'ListGameServerClustersResponse',\n 'GetGameServerClusterRequest',\n 'CreateGameServerClusterRequest',\n 'PreviewCreateGameServerClusterRequest',\n 'PreviewCreateGameServerClusterResponse',\n 'DeleteGameServerClusterRequest',\n 'PreviewDeleteGameServerClusterRequest',\n 'PreviewDeleteGameServerClusterResponse',\n 'UpdateGameServerClusterRequest',\n 'PreviewUpdateGameServerClusterRequest',\n 'PreviewUpdateGameServerClusterResponse',\n 'GameServerClusterConnectionInfo',\n 'GkeClusterReference',\n 'GameServerCluster',\n 'KubernetesClusterState',\n },\n)\n\n\nclass GameServerClusterView(proto.Enum):\n r\"\"\"A view for GameServerCluster objects.\"\"\"\n GAME_SERVER_CLUSTER_VIEW_UNSPECIFIED = 0\n BASIC = 1\n FULL = 2\n\n\nclass ListGameServerClustersRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.ListGameServerClusters.\n\n Attributes:\n parent (str):\n Required. The parent resource name, in the\n following form:\n \"projects/{project}/locations/{location}/realms/{realm}\".\n page_size (int):\n Optional. The maximum number of items to return. If\n unspecified, the server will pick an appropriate default.\n The server may return fewer items than requested. A caller\n should only rely on response's\n [next_page_token][google.cloud.gaming.v1.ListGameServerClustersResponse.next_page_token]\n to determine if there are more GameServerClusters left to be\n queried.\n page_token (str):\n Optional. The next_page_token value returned from a previous\n List request, if any.\n filter (str):\n Optional. The filter to apply to list\n results.\n order_by (str):\n Optional. Specifies the ordering of results following syntax\n at\n https://cloud.google.com/apis/design/design_patterns#sorting_order.\n view (google.cloud.gaming_v1.types.GameServerClusterView):\n Optional. View for the returned GameServerCluster objects.\n When ``FULL`` is specified, the ``cluster_state`` field is\n also returned in the GameServerCluster object, which\n includes the state of the referenced Kubernetes cluster such\n as versions and provider info. The default/unset value is\n GAME_SERVER_CLUSTER_VIEW_UNSPECIFIED, same as BASIC, which\n does not return the ``cluster_state`` field.\n \"\"\"\n\n parent = proto.Field(\n proto.STRING,\n number=1,\n )\n page_size = proto.Field(\n proto.INT32,\n number=2,\n )\n page_token = proto.Field(\n proto.STRING,\n number=3,\n )\n filter = proto.Field(\n proto.STRING,\n number=4,\n )\n order_by = proto.Field(\n proto.STRING,\n number=5,\n )\n view = proto.Field(\n proto.ENUM,\n number=6,\n enum='GameServerClusterView',\n )\n\n\nclass ListGameServerClustersResponse(proto.Message):\n r\"\"\"Response message for\n GameServerClustersService.ListGameServerClusters.\n\n Attributes:\n game_server_clusters (Sequence[google.cloud.gaming_v1.types.GameServerCluster]):\n The list of game server clusters.\n next_page_token (str):\n Token to retrieve the next page of results,\n or empty if there are no more results in the\n list.\n unreachable (Sequence[str]):\n List of locations that could not be reached.\n \"\"\"\n\n @property\n def raw_page(self):\n return self\n\n game_server_clusters = proto.RepeatedField(\n proto.MESSAGE,\n number=1,\n message='GameServerCluster',\n )\n next_page_token = proto.Field(\n proto.STRING,\n number=2,\n )\n unreachable = proto.RepeatedField(\n proto.STRING,\n number=4,\n )\n\n\nclass GetGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.GetGameServerCluster.\n\n Attributes:\n name (str):\n Required. The name of the game server cluster to retrieve,\n in the following form:\n ``projects/{project}/locations/{location}/realms/{realm-id}/gameServerClusters/{cluster}``.\n view (google.cloud.gaming_v1.types.GameServerClusterView):\n Optional. View for the returned GameServerCluster objects.\n When ``FULL`` is specified, the ``cluster_state`` field is\n also returned in the GameServerCluster object, which\n includes the state of the referenced Kubernetes cluster such\n as versions and provider info. The default/unset value is\n GAME_SERVER_CLUSTER_VIEW_UNSPECIFIED, same as BASIC, which\n does not return the ``cluster_state`` field.\n \"\"\"\n\n name = proto.Field(\n proto.STRING,\n number=1,\n )\n view = proto.Field(\n proto.ENUM,\n number=6,\n enum='GameServerClusterView',\n )\n\n\nclass CreateGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.CreateGameServerCluster.\n\n Attributes:\n parent (str):\n Required. The parent resource name, in the following form:\n ``projects/{project}/locations/{location}/realms/{realm-id}``.\n game_server_cluster_id (str):\n Required. The ID of the game server cluster\n resource to be created.\n game_server_cluster (google.cloud.gaming_v1.types.GameServerCluster):\n Required. The game server cluster resource to\n be created.\n \"\"\"\n\n parent = proto.Field(\n proto.STRING,\n number=1,\n )\n game_server_cluster_id = proto.Field(\n proto.STRING,\n number=2,\n )\n game_server_cluster = proto.Field(\n proto.MESSAGE,\n number=3,\n message='GameServerCluster',\n )\n\n\nclass PreviewCreateGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.PreviewCreateGameServerCluster.\n\n Attributes:\n parent (str):\n Required. The parent resource name, in the following form:\n ``projects/{project}/locations/{location}/realms/{realm}``.\n game_server_cluster_id (str):\n Required. The ID of the game server cluster\n resource to be created.\n game_server_cluster (google.cloud.gaming_v1.types.GameServerCluster):\n Required. The game server cluster resource to\n be created.\n preview_time (google.protobuf.timestamp_pb2.Timestamp):\n Optional. The target timestamp to compute the\n preview.\n view (google.cloud.gaming_v1.types.GameServerClusterView):\n Optional. This field is deprecated, preview\n will always return KubernetesClusterState.\n \"\"\"\n\n parent = proto.Field(\n proto.STRING,\n number=1,\n )\n game_server_cluster_id = proto.Field(\n proto.STRING,\n number=2,\n )\n game_server_cluster = proto.Field(\n proto.MESSAGE,\n number=3,\n message='GameServerCluster',\n )\n preview_time = proto.Field(\n proto.MESSAGE,\n number=4,\n message=timestamp_pb2.Timestamp,\n )\n view = proto.Field(\n proto.ENUM,\n number=6,\n enum='GameServerClusterView',\n )\n\n\nclass PreviewCreateGameServerClusterResponse(proto.Message):\n r\"\"\"Response message for\n GameServerClustersService.PreviewCreateGameServerCluster.\n\n Attributes:\n etag (str):\n The ETag of the game server cluster.\n target_state (google.cloud.gaming_v1.types.TargetState):\n The target state.\n cluster_state (google.cloud.gaming_v1.types.KubernetesClusterState):\n Output only. The state of the Kubernetes cluster in preview,\n this will be available if 'view' is set to ``FULL`` in the\n relevant List/Get/Preview request.\n \"\"\"\n\n etag = proto.Field(\n proto.STRING,\n number=2,\n )\n target_state = proto.Field(\n proto.MESSAGE,\n number=3,\n message=common.TargetState,\n )\n cluster_state = proto.Field(\n proto.MESSAGE,\n number=4,\n message='KubernetesClusterState',\n )\n\n\nclass DeleteGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.DeleteGameServerCluster.\n\n Attributes:\n name (str):\n Required. The name of the game server cluster to delete, in\n the following form:\n ``projects/{project}/locations/{location}/gameServerClusters/{cluster}``.\n \"\"\"\n\n name = proto.Field(\n proto.STRING,\n number=1,\n )\n\n\nclass PreviewDeleteGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.PreviewDeleteGameServerCluster.\n\n Attributes:\n name (str):\n Required. The name of the game server cluster to delete, in\n the following form:\n ``projects/{project}/locations/{location}/gameServerClusters/{cluster}``.\n preview_time (google.protobuf.timestamp_pb2.Timestamp):\n Optional. The target timestamp to compute the\n preview.\n \"\"\"\n\n name = proto.Field(\n proto.STRING,\n number=1,\n )\n preview_time = proto.Field(\n proto.MESSAGE,\n number=2,\n message=timestamp_pb2.Timestamp,\n )\n\n\nclass PreviewDeleteGameServerClusterResponse(proto.Message):\n r\"\"\"Response message for\n GameServerClustersService.PreviewDeleteGameServerCluster.\n\n Attributes:\n etag (str):\n The ETag of the game server cluster.\n target_state (google.cloud.gaming_v1.types.TargetState):\n The target state.\n \"\"\"\n\n etag = proto.Field(\n proto.STRING,\n number=2,\n )\n target_state = proto.Field(\n proto.MESSAGE,\n number=3,\n message=common.TargetState,\n )\n\n\nclass UpdateGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.UpdateGameServerCluster.\n\n Attributes:\n game_server_cluster (google.cloud.gaming_v1.types.GameServerCluster):\n Required. The game server cluster to be updated. Only fields\n specified in update_mask are updated.\n update_mask (google.protobuf.field_mask_pb2.FieldMask):\n Required. Mask of fields to update. At least one path must\n be supplied in this field. For the ``FieldMask`` definition,\n see\n https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask\n \"\"\"\n\n game_server_cluster = proto.Field(\n proto.MESSAGE,\n number=1,\n message='GameServerCluster',\n )\n update_mask = proto.Field(\n proto.MESSAGE,\n number=2,\n message=field_mask_pb2.FieldMask,\n )\n\n\nclass PreviewUpdateGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.UpdateGameServerCluster.\n\n Attributes:\n game_server_cluster (google.cloud.gaming_v1.types.GameServerCluster):\n Required. The game server cluster to be updated. Only fields\n specified in update_mask are updated.\n update_mask (google.protobuf.field_mask_pb2.FieldMask):\n Required. Mask of fields to update. At least one path must\n be supplied in this field. For the ``FieldMask`` definition,\n see\n https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask\n preview_time (google.protobuf.timestamp_pb2.Timestamp):\n Optional. The target timestamp to compute the\n preview.\n \"\"\"\n\n game_server_cluster = proto.Field(\n proto.MESSAGE,\n number=1,\n message='GameServerCluster',\n )\n update_mask = proto.Field(\n proto.MESSAGE,\n number=2,\n message=field_mask_pb2.FieldMask,\n )\n preview_time = proto.Field(\n proto.MESSAGE,\n number=3,\n message=timestamp_pb2.Timestamp,\n )\n\n\nclass PreviewUpdateGameServerClusterResponse(proto.Message):\n r\"\"\"Response message for\n GameServerClustersService.PreviewUpdateGameServerCluster\n\n Attributes:\n etag (str):\n The ETag of the game server cluster.\n target_state (google.cloud.gaming_v1.types.TargetState):\n The target state.\n \"\"\"\n\n etag = proto.Field(\n proto.STRING,\n number=2,\n )\n target_state = proto.Field(\n proto.MESSAGE,\n number=3,\n message=common.TargetState,\n )\n\n\nclass GameServerClusterConnectionInfo(proto.Message):\n r\"\"\"The game server cluster connection information.\n Attributes:\n gke_cluster_reference (google.cloud.gaming_v1.types.GkeClusterReference):\n Reference to the GKE cluster where the game\n servers are installed.\n namespace (str):\n Namespace designated on the game server\n cluster where the Agones game server instances\n will be created. Existence of the namespace will\n be validated during creation.\n \"\"\"\n\n gke_cluster_reference = proto.Field(\n proto.MESSAGE,\n number=7,\n oneof='cluster_reference',\n message='GkeClusterReference',\n )\n namespace = proto.Field(\n proto.STRING,\n number=5,\n )\n\n\nclass GkeClusterReference(proto.Message):\n r\"\"\"A reference to a GKE cluster.\n Attributes:\n cluster (str):\n The full or partial name of a GKE cluster, using one of the\n following forms:\n\n - ``projects/{project}/locations/{location}/clusters/{cluster}``\n - ``locations/{location}/clusters/{cluster}``\n - ``{cluster}`` If project and location are not specified,\n the project and location of the GameServerCluster\n resource are used to generate the full name of the GKE\n cluster.\n \"\"\"\n\n cluster = proto.Field(\n proto.STRING,\n number=1,\n )\n\n\nclass GameServerCluster(proto.Message):\n r\"\"\"A game server cluster resource.\n Attributes:\n name (str):\n Required. The resource name of the game server cluster, in\n the following form:\n ``projects/{project}/locations/{location}/realms/{realm}/gameServerClusters/{cluster}``.\n For example,\n ``projects/my-project/locations/{location}/realms/zanzibar/gameServerClusters/my-onprem-cluster``.\n create_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The creation time.\n update_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The last-modified time.\n labels (Sequence[google.cloud.gaming_v1.types.GameServerCluster.LabelsEntry]):\n The labels associated with this game server\n cluster. Each label is a key-value pair.\n connection_info (google.cloud.gaming_v1.types.GameServerClusterConnectionInfo):\n The game server cluster connection\n information. This information is used to manage\n game server clusters.\n etag (str):\n ETag of the resource.\n description (str):\n Human readable description of the cluster.\n cluster_state (google.cloud.gaming_v1.types.KubernetesClusterState):\n Output only. The state of the Kubernetes cluster, this will\n be available if 'view' is set to ``FULL`` in the relevant\n List/Get/Preview request.\n \"\"\"\n\n name = proto.Field(\n proto.STRING,\n number=1,\n )\n create_time = proto.Field(\n proto.MESSAGE,\n number=2,\n message=timestamp_pb2.Timestamp,\n )\n update_time = proto.Field(\n proto.MESSAGE,\n number=3,\n message=timestamp_pb2.Timestamp,\n )\n labels = proto.MapField(\n proto.STRING,\n proto.STRING,\n number=4,\n )\n connection_info = proto.Field(\n proto.MESSAGE,\n number=5,\n message='GameServerClusterConnectionInfo',\n )\n etag = proto.Field(\n proto.STRING,\n number=6,\n )\n description = proto.Field(\n proto.STRING,\n number=7,\n )\n cluster_state = proto.Field(\n proto.MESSAGE,\n number=11,\n message='KubernetesClusterState',\n )\n\n\nclass KubernetesClusterState(proto.Message):\n r\"\"\"The state of the Kubernetes cluster.\n Attributes:\n agones_version_installed (str):\n Output only. The version of Agones currently\n installed in the registered Kubernetes cluster.\n kubernetes_version_installed (str):\n Output only. The version of Kubernetes that\n is currently used in the registered Kubernetes\n cluster (as detected by the Cloud Game Servers\n service).\n installation_state (google.cloud.gaming_v1.types.KubernetesClusterState.InstallationState):\n Output only. The state for the installed\n versions of Agones/Kubernetes.\n version_installed_error_message (str):\n Output only. The detailed error message for\n the installed versions of Agones/Kubernetes.\n provider (str):\n Output only. The cloud provider type reported\n by the first node's providerID in the list of\n nodes on the Kubernetes endpoint. On Kubernetes\n platforms that support zero-node clusters (like\n GKE-on-GCP), the provider type will be empty.\n agones_version_targeted (str):\n Output only. The version of Agones that is\n targeted to be installed in the cluster.\n \"\"\"\n class InstallationState(proto.Enum):\n r\"\"\"The state of the installed versions of Agones/Kubernetes. See\n also https://cloud.google.com/game-servers/docs/versions-and-\n upgrades.\n \"\"\"\n INSTALLATION_STATE_UNSPECIFIED = 0\n AGONES_KUBERNETES_VERSION_SUPPORTED = 1\n AGONES_VERSION_UNSUPPORTED = 2\n AGONES_KUBERNETES_VERSION_UNSUPPORTED = 3\n AGONES_VERSION_UNRECOGNIZED = 4\n KUBERNETES_VERSION_UNRECOGNIZED = 5\n VERSION_VERIFICATION_FAILED = 6\n AGONES_NOT_INSTALLED = 7\n\n agones_version_installed = proto.Field(\n proto.STRING,\n number=1,\n )\n kubernetes_version_installed = proto.Field(\n proto.STRING,\n number=2,\n )\n installation_state = proto.Field(\n proto.ENUM,\n number=3,\n enum=InstallationState,\n )\n version_installed_error_message = proto.Field(\n proto.STRING,\n number=4,\n )\n provider = proto.Field(\n proto.STRING,\n number=5,\n )\n agones_version_targeted = proto.Field(\n proto.STRING,\n number=6,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","sub_path":"google/cloud/gaming/v1/gaming-v1-py/google/cloud/gaming_v1/types/game_server_clusters.py","file_name":"game_server_clusters.py","file_ext":"py","file_size_in_byte":19938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"607897662","text":"from flask import Flask, render_template\n\napp=Flask(__name__)\n\n@app.route('/')\ndef home():\n\treturn render_template(\"home.html\")\n\n@app.route('/plot/')\ndef plot():\n\tfrom pandas_datareader import data\n\timport datetime\n\tfrom dateutil.relativedelta import relativedelta\n\tfrom bokeh.plotting import figure, show, output_file\n\tfrom bokeh.embed import components\n\tfrom bokeh.resources import CDN\n\n\tstart = datetime.date.today() - relativedelta(months=1)\n\tend = datetime.date.today()\n\n\tdf = data.DataReader(name='AMZN', data_source = 'yahoo', start=start, end=end)\n\n\tdef inc_dec(c, o):\n\t if c > o:\n\t value = 1\n\t elif c <= o:\n\t value = 0\n\t return value\n\n\tdf['status']=[inc_dec(c,o) for c,o in zip(df.Close,df.Open)]\n\tdf['mid']=(df.Open+df.Close)/2\n\tdf['height']=abs(df.Open-df.Close)\n\n\tp=figure(x_axis_type='datetime', width=1000, height=300,\n\t\tresponsive=True, toolbar_location = 'above')\n\tp.title.text='Candlestick Chart: AMZN from %s to %s' % (start,end)\n\tp.grid.grid_line_alpha=0.3\n\n\thour_12 = 12*60*60*1000\n\n\tp.segment(df.index, df.Low, df.index, df.High, line_color='black')\n\n\tp.rect(df.index[df.status==1], df.mid[df.status==1], hour_12, df.height[df.status==1],\n\t\tfill_color='silver', line_color='black')\n\tp.rect(df.index[df.status==0], df.mid[df.status==0], hour_12, df.height[df.status==0],\n\t\tfill_color='tomato', line_color='black')\n\n\tscript, div = components(p)\n\tcdn_js = CDN.js_files[0]\n\tcdn_css = CDN.css_files[0]\n\n\treturn render_template(\"plot.html\", script=script, div=div,\n\t\tcdn_js=cdn_js,cdn_css=cdn_css)\n\n@app.route('/about/')\ndef about():\n\treturn render_template(\"about.html\")\n\nif __name__==\"__main__\":\n\tapp.run(debug=True)","sub_path":"flask_practice.py","file_name":"flask_practice.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"64834580","text":"import sys\r\n\r\n\r\nclass stack_function:\r\n\r\n def __init__(self):\r\n\r\n self.stack = []\r\n\r\n def push(self,X):\r\n\r\n self.stack.append(X)\r\n\r\n def pop(self):\r\n\r\n if len(self.stack) == 0:\r\n\r\n return -1\r\n\r\n else:\r\n\r\n return self.stack.pop()\r\n\r\n def size(self):\r\n\r\n return len(self.stack)\r\n\r\n def empty(self):\r\n\r\n if len(self.stack) == 0:\r\n\r\n return 1\r\n\r\n else:\r\n\r\n return 0\r\n\r\n def top(self):\r\n\r\n if len(self.stack) == 0:\r\n\r\n return -1\r\n\r\n else:\r\n\r\n return self.stack[-1]\r\n\r\n\r\nT = int(sys.stdin.readline())\r\n\r\na = stack_function()\r\n\r\nbox = []\r\n\r\nfor i in range(T):\r\n\r\n func = sys.stdin.readline().split()\r\n\r\n order = func[0]\r\n\r\n if order ==\"push\":\r\n\r\n value = func[1]\r\n\r\n a.push(value)\r\n\r\n elif order ==\"pop\":\r\n\r\n box.append(a.pop())\r\n\r\n elif order ==\"top\":\r\n\r\n box.append(a.top())\r\n\r\n elif order ==\"size\":\r\n\r\n box.append(a.size())\r\n\r\n elif order ==\"empty\":\r\n\r\n box.append(a.empty())\r\n\r\n\r\nfor i in box:\r\n\r\n print(i)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"백준_10828_스택_S4.py","file_name":"백준_10828_스택_S4.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"488668256","text":"import numpy as np\nimport random\n\n\nNUM_TO_TOKEN = {\n 1: 'x',\n -1: 'o',\n 0: ' '\n}\nTOKEN_TO_NUM = {token: num for num, token in NUM_TO_TOKEN.items()}\n\n\nclass Environment(object):\n def __init__(self):\n self.board = np.zeros((3, 3))\n self.winner = 0\n \n def is_empty(self, i, j):\n return self.board[i][j] == 0\n \n def play(self, i, j, num):\n self.board[i][j] = num\n \n def try_play(self, i, j, num):\n self.board[i][j] = num\n h = self.state_hash()\n self.board[i][j] = 0\n return h\n \n def state_hash(self):\n h = 0.\n for i in range(3):\n for j in range(3):\n bit = i * 3 + j\n h += (self.board[i][j] + 1) * (3 ** bit)\n return int(h)\n \n def get_winner(self, force_recalculation=False):\n if self.winner and not force_recalculation:\n return self.winner\n \n self.winner = 0\n \n for i in range(3):\n s = np.sum(self.board[i])\n if s == -3 or s == 3:\n self.winner = s / 3\n \n for j in range(3):\n s = np.sum(self.board[:,j])\n if s == -3 or s == 3:\n self.winner = s / 3\n \n s = np.trace(self.board)\n if s == -3 or s == 3:\n self.winner = s / 3\n \n s = np.trace(np.fliplr(self.board))\n if s == -3 or s == 3:\n self.winner = s / 3\n \n return self.winner\n \n def game_over(self):\n return self.get_winner() or 0 not in self.board\n \n def draw(self):\n for i in range(3):\n print('---------')\n print(' ' \n + ' '.join([NUM_TO_TOKEN[num] \n for num in self.board[i]])\n + ' ')\n print('---------')\n \n def reset(self):\n self.board = np.zeros((3, 3))\n self.winner = 0\n \n def find_winning_positions(self, i=0, j=0, winners=None):\n if winners is None:\n winners = np.zeros(3**9)\n if i == 3:\n winners[self.state_hash()] = self.get_winner(force_recalculation=True)\n return winners\n for fill in range(-1, 2):\n self.board[i][j] = fill\n if j == 2:\n self.find_winning_positions(i+1, 0, winners)\n else:\n self.find_winning_positions(i, j+1, winners)\n return winners\n \n \nclass Agent(object):\n def __init__(self, token):\n self.num = TOKEN_TO_NUM[token]\n self._initialize_values()\n self.history = []\n self.last_state_hash = None\n \n def _initialize_values(self):\n winning_positions = Environment().find_winning_positions()\n agent_wins = np.where(winning_positions == self.num, 1, 0)\n agent_loses = np.where(winning_positions == -self.num, -1, 0)\n wins_or_loses = agent_wins + agent_loses\n self.values = np.where(wins_or_loses == 0, 0.0, 0) + wins_or_loses\n \n def play(self, env, epsilon, verbose=False):\n starting_state_hash = env.state_hash()\n if self.last_state_hash is not None:\n self.history.append((\n self.last_state_hash,\n starting_state_hash\n ))\n r = np.random.random()\n possible_plays = []\n for i in range(3):\n for j in range(3):\n if env.is_empty(i, j):\n possible_plays.append((i, j))\n if r < epsilon:\n if verbose:\n print('AI is making random move, cause why the fuck not')\n i, j = random.choice(possible_plays)\n else:\n play_values = []\n best_play = None\n best_play_value = -float('inf')\n for i, j in possible_plays:\n play_hash = env.try_play(i, j, self.num)\n play_value = self.values[play_hash]\n play_values.append((i, j, play_hash, round(play_value, 3)))\n if play_value > best_play_value:\n best_play = i, j\n best_play_value = play_value\n if verbose:\n print('AI is using the following values: ' + str(play_values))\n i, j = best_play\n env.play(i, j, self.num)\n final_state_hash = env.state_hash()\n self.history.append((\n starting_state_hash,\n final_state_hash\n ))\n self.last_state_hash = final_state_hash\n \n def update(self, env, learning_rate):\n final_state_hash = env.state_hash()\n winner = env.get_winner()\n reward = winner / self.num\n reward = reward if reward == 1 else 0\n self.values[final_state_hash] = reward\n if final_state_hash != self.last_state_hash:\n self.history.append((\n self.last_state_hash,\n final_state_hash\n ))\n for start, final in reversed(self.history):\n self.values[start] = self.values[start] + learning_rate * (self.values[final] - self.values[start])\n self.history = []\n \nclass Human(object):\n \n def __init__(self, token):\n self.num = TOKEN_TO_NUM[token]\n \n def play(self, env, *args, **kwargs):\n legal_move = False\n while not legal_move:\n move = input(\"Enter your move in coordinates i,j: \")\n i, j = move.split(',')\n i = int(i.strip())\n j = int(j.strip())\n legal_move = env.is_empty(i, j)\n env.play(i, j, self.num)\n \n def update(self, *args):\n pass\n \n \ndef play_game(env, player1, player2, learning_rate=0.01, epsilon=0.01, verbose=True):\n env.reset()\n current_player = player1\n while not env.game_over():\n if verbose: \n env.draw()\n current_player.play(env, epsilon, verbose=verbose)\n if current_player is player1:\n current_player = player2\n else:\n current_player = player1\n if verbose: \n env.draw()\n print('GAME OVER!')\n player1.update(env, learning_rate)\n player2.update(env, learning_rate)\n \n\nif __name__ == '__main__':\n env = Environment()\n agent1 = Agent('x')\n agent2 = Agent('o')\n for i in range(10000):\n if i and i % 100 == 0:\n print(i)\n play_game(env, agent1, agent2, epsilon=0.2, learning_rate=0.5, verbose=False)\n \"\"\"\n agent3 = Agent('o')\n for i in range(10000):\n if i and i % 100 == 0:\n print(i)\n play_game(env, agent1, agent3, epsilon=0.2, learning_rate=0.5, verbose=False)\n for i in range(10000):\n if i and i % 100 == 0:\n print(i)\n play_game(env, agent1, agent2, epsilon=0.2, learning_rate=0.5, verbose=False)\n \"\"\"\n \n human = Human('o')\n play_another = True\n while play_another:\n play_game(env, agent1, human)\n decision = input('Would you like to play another game? Y/N: ').strip()\n if decision == 'N':\n play_another = False","sub_path":"Reinforcement_Learning_Intro/tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":7130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"14852898","text":"\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nsns.set()\n\n# Load data set and split to train/test\n\nwith np.load('mnist-6k.npz', allow_pickle=False) as npz_file:\n print(npz_file.keys())\n\nwith np.load('mnist-6k.npz', allow_pickle=False) as npz_file:\n X = npz_file['data']\n y = npz_file['labels']\n\nX_tr, X_te, y_tr, y_te = train_test_split(X, y, stratify=y, test_size=1/6, random_state=0)\nprint(X_tr.shape, X_te.shape, y_tr.shape, y_te.shape)\n\n\n# Dummy classifier\ndummy = DummyClassifier(strategy='most_frequent')\ndummy.fit(X_tr, y_tr)\n\n# Accuracy on test set\naccuracy = dummy.score(X_te, y_te)\nprint('Baseline accuracy: {:.3f}'.format(accuracy))\n\n# k-NN classifier\n\nscaler = StandardScaler()\n\n# grid search for optimal k:\nk_values = np.arange(1, 50, 5)\n\ntest_curve = []\n\nfor k in k_values:\n pipe = Pipeline([\n ('scaler', None), # no scaling of data\n ('knn', KNeighborsClassifier(n_neighbors=k))\n ])\n\n pipe.fit(X_tr, y_tr)\n test_acc = pipe.score(X_te, y_te)\n test_curve.append(test_acc)\n\nplt.plot(k_values, test_curve, label='test')\nplt.legend()\nplt.show()\n\n# Print k with maximum accuracy\nmax_accuracy = max(test_curve)\nbest_k = np.argmax(test_curve) * 5 + 1\nprint('Maximum accuracy: {:.3f} with k={}'.format(max_accuracy, best_k))\n","sub_path":"mnist_knn.py","file_name":"mnist_knn.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"81301921","text":"#!/usr/bin/env python\n# license removed for brevity\nimport numpy as np\nimport rospy\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import Float32MultiArray \nfrom std_msgs.msg import Int32\nimport time\n\nimport Tkinter as tk\n\nfrom rospy.numpy_msg import numpy_msg\nfrom rospy_tutorials.msg import Floats\nposture_data=[0,0,0]\ndepth_data = 0\nfc = 0\ntc = 0\n\ndef B_onclick():\n global eList\n for i in range(3):\n posture_data[i] = float(eList[i].get())\n depth = float(eList[3].get())\n fc = int(eList[4].get())\n tc = int(eList[5].get())\n vol = float(eList[6].get())\n but = int(eList[7].get())\n t = float(eList[8].get())\n trigger = int(eList[9].get())\n pos=np.array(posture_data, dtype = np.float32)\n pub1.publish(pos)\n pub2.publish(depth)\n pub3.publish(fc)\n pub4.publish(tc)\n pub5.publish(vol)\n pub6.publish(but)\n pub7.publish(t)\n pub8.publish(trigger)\n\nwin = tk.Tk()\nwin.title('Dummy motor')\n\neList = []\ntext = ['row', 'pitch', 'yaw', 'depth', 'forward command', 'turn command', 'voltage', 'button', 'sumi_t', 'trigger']\nfor i in range(len(text)):\n L = tk.Label(win, text = text[i]).grid(row=i, column=0)\n e = tk.Entry(win)\n eList.append(e)\n e.grid(row=i, column=1)\n e.insert('insert', 0)\nb = tk.Button(win, text = 'publish', command = B_onclick).grid(row = len(text), column=0)\n\nrospy.init_node('dummy',anonymous=True)\npub1 = rospy.Publisher('/posture',numpy_msg(Floats),queue_size=10)\npub2 = rospy.Publisher('/depth',Float32,queue_size=10)\npub3 = rospy.Publisher('/forward_command',Int32,queue_size=10)\npub4 = rospy.Publisher('/turn_command',Int32,queue_size=10)\npub5 = rospy.Publisher('/voltage',Float32,queue_size=10)\npub6 = rospy.Publisher('/button',Int32,queue_size=10)\npub7 = rospy.Publisher('/sumi_t',Float32,queue_size=10)\npub8 = rospy.Publisher('/trigger_command',Int32,queue_size=10)\n\nwin.mainloop()\n","sub_path":"src/dummy_sensor.py","file_name":"dummy_sensor.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"609025613","text":"import tensorflow as tf\nimport time\nimport numpy as np\n\nimport helpers.helper_funcs as helpers\n#import helpers.cifar_models as models\n\ndef main():\n print('Loading data...')\n x_train, y_train, x_test, y_test = helpers.get_cifar10_data()\n y_test = tf.squeeze(y_test)\n\n #train_and_save_models(x_train, y_train)\n\n print(\"Loading models...\")\n l1_model = tf.keras.models.load_model('models/cifar/l1_model')\n l2_model = tf.keras.models.load_model('models/cifar/l2_model')\n l3_model = tf.keras.models.load_model('models/cifar/l3_model')\n l4_model = tf.keras.models.load_model('models/cifar/l4_model')\n l5_model = tf.keras.models.load_model('models/cifar/l5_model')\n l6_model = tf.keras.models.load_model('models/cifar/l6_model')\n l7_model = tf.keras.models.load_model('models/cifar/l7_model')\n l8_model = tf.keras.models.load_model('models/cifar/l8_model')\n l9_model = tf.keras.models.load_model('models/cifar/l9_model')\n l10_model = tf.keras.models.load_model('models/cifar/l10_model')\n\n\n # Get dictionary of counts of each class in y_test\n y_test_np = y_test.numpy()\n unique, counts = np.unique(y_test.numpy(), return_counts=True)\n count_dict = dict(zip(unique, counts))\n\n # Set up accuracy grid\n accuracies = np.zeros((10, 10))\n\n # Iterate over all models and get their predicted outputs\n models = [l1_model, l2_model, l3_model, l4_model, l5_model, l6_model, l7_model, l8_model, l9_model, l10_model]\n models_preds = []\n for i in range(10):\n model = models[i]\n\n model_probs = model.predict(x_test)\n model_preds = np.argmax(model_probs, axis=1)\n models_preds.append(model_preds)\n\n\n for i in range(10):\n model1 = models_preds[i]\n\n for j in range(10):\n model2 = models_preds[j]\n\n # Compute the number of times where the two models match predictions\n model_count = np.count_nonzero(model1 == model2)\n accuracies[i][j] = model_count / 10000\n\n\n print(accuracies)\n\n\nif __name__ == '__main__':\n main()","sub_path":"DiffNumModelCombinations/model-analysis/model_prediction_matches.py","file_name":"model_prediction_matches.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"336450096","text":"import tkinter\r\nfrom tkinter import filedialog\r\n\r\ndef openfile():\r\n\tf = filedialog.askopenfile()\r\n\tfilepath.set(f.name)\r\n\r\n\tf.close()\r\n\r\nmywind = tkinter.Tk()\r\nframe = tkinter.Frame(mywind,width = \"500\", height = \"400\", bg = \"green\")\r\nframe.pack()\r\n\r\nfilepath = tkinter.StringVar()\r\nfilepath.set(\"filepath\")\r\n\r\nbutton = tkinter.Button(frame, text = \"file open\", command = openfile)\r\nbutton.grid(row = 0, column = 0)\r\n\r\nlabel_path = tkinter.Label(frame, textvariable = filepath)\r\nlabel_path.grid(row = 1, column = 0)\r\n\r\nmywind.mainloop()","sub_path":"courses/w04_py/source/s05/gui/filedialog1.py","file_name":"filedialog1.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"408333933","text":"from PyQt5.QtWidgets import QApplication, QDialog, QVBoxLayout, QHBoxLayout, QPushButton, QSizePolicy, QLabel, \\\n QDoubleSpinBox, QSpacerItem\nfrom PyQt5.QtCore import Qt, pyqtSlot, QObject\nfrom PyQt5.QtGui import QResizeEvent\n\n\nclass _ScalableImageViewerContext(QObject):\n def __init__(self, label, x_offset_spinbox, y_offset_spinbox, scale_spinbox, reset_button, parent, relative=True):\n super(_ScalableImageViewerContext, self).__init__(parent)\n self.label = label\n self.x_offset_spinbox = x_offset_spinbox\n self.y_offset_spinbox = y_offset_spinbox\n self.scale_spinbox = scale_spinbox\n self.reset_button = reset_button\n x_offset_spinbox.valueChanged.connect(self._on_x_offset_changed)\n y_offset_spinbox.valueChanged.connect(self._on_y_offset_changed)\n scale_spinbox.valueChanged.connect(self._on_scale_changed)\n reset_button.clicked.connect(self._on_reset_button_clicked)\n label._image_viewer_context = self\n self.render = None\n\n self._default_scale = 1.\n self._default_x_offset = 0.\n self._default_y_offset = 0.\n\n self._scale = self._default_scale\n self._x_offset = self._default_x_offset\n self._y_offset = self._default_y_offset\n\n self.relative = relative\n\n def set_image(self, image):\n from Viewer.canvas.align_corner.simple_painter import SimplePainter\n self.render = SimplePainter.create_from_tf_image(image)\n self.update()\n\n def set_painter(self, painter):\n self.render = painter\n self.update()\n\n def update(self):\n if self.render is None:\n self.label.clear()\n return\n if self.relative:\n canvas_w, canvas_h = self.render.get_canvas_size()\n base_scale = min((self.label.width() - 1) / (canvas_w - 1), (self.label.height() - 1) / (canvas_h - 1))\n scale = self._scale * base_scale\n x_offset = self._x_offset * base_scale\n y_offset = self._y_offset * base_scale\n else:\n scale = self._scale\n x_offset = self._x_offset\n y_offset = self._y_offset\n image = self.render.render((self.label.width(), self.label.height()), (scale, scale), translation_target_center=(x_offset, y_offset), with_qpixmap=True)\n self.label.setPixmap(image)\n\n @pyqtSlot(float)\n def _on_scale_changed(self, scale):\n self._scale = scale\n self.update()\n\n @pyqtSlot(float)\n def _on_x_offset_changed(self, x_offset):\n self._x_offset = x_offset\n self.update()\n\n @pyqtSlot(float)\n def _on_y_offset_changed(self, y_offset):\n self._y_offset = y_offset\n self.update()\n\n @pyqtSlot(bool)\n def _on_reset_button_clicked(self, _):\n self.reset()\n\n def set_scale(self, value):\n self.scale_spinbox.setValue(value)\n\n def set_x_offset(self, value):\n self.x_offset_spinbox.setValue(value)\n\n def set_y_offset(self, value):\n self.y_offset_spinbox.setValue(value)\n\n def set(self, scale, x_offset, y_offset):\n self._scale = scale\n self._x_offset = x_offset\n self._y_offset = y_offset\n self.update()\n self.x_offset_spinbox.blockSignals(True)\n self.x_offset_spinbox.setValue(x_offset)\n self.x_offset_spinbox.blockSignals(False)\n self.y_offset_spinbox.blockSignals(True)\n self.y_offset_spinbox.setValue(y_offset)\n self.y_offset_spinbox.blockSignals(False)\n self.scale_spinbox.blockSignals(True)\n self.scale_spinbox.setValue(scale)\n self.scale_spinbox.blockSignals(False)\n self.update()\n\n def set_default(self, scale, x_offset, y_offset):\n self._default_scale = scale\n self._default_x_offset = x_offset\n self._default_y_offset = y_offset\n\n def set_default_scale(self, scale):\n self._default_scale = scale\n\n def set_default_x_offset(self, x_offset):\n self._default_x_offset = x_offset\n\n def set_default_y_offset(self, y_offset):\n self._default_y_offset = y_offset\n\n def reset(self):\n self.set(self._default_scale, self._default_x_offset, self._default_y_offset)\n\n\nclass _CanvasLabel(QLabel):\n def __init__(self, *args):\n super(_CanvasLabel, self).__init__(*args)\n self._image_viewer_context = None\n\n def resizeEvent(self, qResizeEvent: QResizeEvent):\n super().resizeEvent(qResizeEvent)\n if self._image_viewer_context is not None:\n self._image_viewer_context.update()\n\n\ndef construct_simple_image_viewer_on_qt_layout(layout):\n image_label = _CanvasLabel()\n image_label.setMinimumSize(1, 1)\n image_label.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)\n\n x_offset_spinbox = QDoubleSpinBox()\n y_offset_spinbox = QDoubleSpinBox()\n\n x_offset_spinbox.setValue(0)\n y_offset_spinbox.setValue(0)\n\n x_offset_spinbox.setMinimum(-65536)\n x_offset_spinbox.setMaximum(65536)\n\n y_offset_spinbox.setMinimum(-65536)\n y_offset_spinbox.setMaximum(65536)\n\n scale_spinbox = QDoubleSpinBox()\n scale_spinbox.setValue(1.)\n scale_spinbox.setSingleStep(0.1)\n\n reset_button = QPushButton()\n reset_button.setText('Reset')\n\n vlayout = QVBoxLayout()\n actor_layout = QHBoxLayout()\n\n spacer = QSpacerItem(0, 0, QSizePolicy.Expanding, QSizePolicy.Minimum)\n actor_layout.addSpacerItem(spacer)\n\n x_offset_label = QLabel()\n x_offset_label.setText('x:')\n actor_layout.addWidget(x_offset_label)\n actor_layout.addWidget(x_offset_spinbox)\n y_offset_label = QLabel()\n y_offset_label.setText('y:')\n actor_layout.addWidget(y_offset_label)\n actor_layout.addWidget(y_offset_spinbox)\n scale_label = QLabel()\n scale_label.setText('scale:')\n actor_layout.addWidget(scale_label)\n actor_layout.addWidget(scale_spinbox)\n\n actor_layout.addWidget(reset_button)\n\n vlayout.addWidget(image_label)\n vlayout.addLayout(actor_layout)\n\n layout.addLayout(vlayout)\n\n return _ScalableImageViewerContext(image_label, x_offset_spinbox, y_offset_spinbox, scale_spinbox, reset_button, layout)\n\n\nclass SimpleViewer:\n def __init__(self, argv=[]):\n app = QApplication(argv)\n\n window = QDialog()\n #window.setWindowState(Qt.WindowMaximized)\n window.setWindowFlags(Qt.WindowMinimizeButtonHint | Qt.WindowMaximizeButtonHint | Qt.WindowCloseButtonHint)\n window.setWindowTitle('Viewer')\n\n layout = QVBoxLayout()\n window.setLayout(layout)\n self.main_layout = layout\n self.app = app\n self.window = window\n\n def addImage(self):\n from data.operator.image.tf.decoder import tf_decode_image\n from Viewer.canvas.align_corner.simple_painter import SimplePainter\n image_viewer_widget = construct_simple_image_viewer_on_qt_layout(self.main_layout)\n image = tf_decode_image(\"K:\\\\dataset\\\\coco\\\\images\\\\train2014\\\\COCO_train2014_000000000009.jpg\")\n painter = SimplePainter.create_from_tf_image(image)\n h, w, c = image.shape\n painter.draw_bounding_box([0, 0, w - 1, h - 1])\n painter.draw_bounding_box_with_label([5,5,10,10], 'a')\n painter.draw_bounding_box([0,0,3,1])\n painter.draw_bounding_box([0,0,2,1])\n painter.draw_bounding_box([0, 0, 1, 1])\n #painter.draw_bounding_box_with_label([2,2,3,3], 'a')\n image_viewer_widget.set_painter(painter)\n\n def setWindowTitle(self, title: str):\n self.window.setWindowTitle(title)\n\n def runEventLoop(self):\n self.window.show()\n return self.app.exec_()\n\n def close(self):\n self.window.close()\n\nif __name__ == '__main__':\n v=SimpleViewer()\n v.addImage()\n v.runEventLoop()\n","sub_path":"Viewer/canvas/align_corner/simple_image_viewer.py","file_name":"simple_image_viewer.py","file_ext":"py","file_size_in_byte":7751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"20142274","text":"from src.common.database import Database\nfrom src.models.alerts.alert import Alert\nDatabase.initialize()\nalerts = Alert.find_needing_update()\n\n\n\nfor alert in alerts:\n alert.load_item_price()\n alert.send_email_if_price_reached()","sub_path":"src/models/alerts/alert_updater.py","file_name":"alert_updater.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"204964977","text":"import socket\nimport asyncio\nimport time\nimport random\nimport json\nimport re\nimport requests\nfrom elasticsearch import Elasticsearch\n\nfrom ioc_finder import find_iocs\nfrom walkoff_app_sdk.app_base import AppBase\n\nclass Tools(AppBase):\n \"\"\"\n An example of a Walkoff App.\n Inherit from the AppBase class to have Redis, logging, and console logging set up behind the scenes.\n \"\"\"\n __version__ = \"1.0.0\"\n app_name = \"Shuffle Tools\" # this needs to match \"name\" in api.yaml for WALKOFF to work\n\n def __init__(self, redis, logger, console_logger=None):\n \"\"\"\n Each app should have this __init__ to set up Redis and logging.\n :param redis:\n :param logger:\n :param console_logger:\n \"\"\"\n super().__init__(redis, logger, console_logger)\n\n # https://github.com/fhightower/ioc-finder\n async def parse_ioc(self, input_string, input_type=\"all\"):\n if input_type == \"\":\n input_type = \"all\"\n\n iocs = find_iocs(input_string)\n newarray = []\n for key, value in iocs.items():\n if input_type != \"all\":\n if key != input_type:\n continue\n\n if len(value) > 0:\n for item in value:\n # If in here: attack techniques. Shouldn't be 3 levels so no\n # recursion necessary\n if isinstance(value, dict):\n for subkey, subvalue in value.items():\n if len(subvalue) > 0:\n for subitem in subvalue:\n data = {\"data\": subitem, \"data_type\": \"%s_%s\" % (key[:-1], subkey)}\n if data not in newarray:\n newarray.append(data)\n else:\n data = {\"data\": item, \"data_type\": key[:-1]}\n if data not in newarray:\n newarray.append(data)\n\n\n # Reformatting IP\n for item in newarray:\n if \"ip\" in item[\"data_type\"]:\n item[\"data_type\"] = \"ip\"\n\n try:\n newarray = json.dumps(newarray)\n except json.decoder.JSONDecodeError as e:\n return \"Failed to parse IOC's: %s\" % e\n\n return newarray\n\n async def parse_list(self, items, splitter=\"\\n\"):\n if splitter == \"\":\n splitter = \"\\n\"\n\n splititems = items.split(splitter)\n\n return str(splititems)\n\n async def get_length(self, item):\n if item.startswith(\"[\") and item.endswith(\"]\"):\n try:\n item = item.replace(\"\\'\", \"\\\"\", -1)\n item = json.loads(item)\n except json.decoder.JSONDecodeError as e:\n print(\"Parse error: %s\" % e) \n pass\n\n return str(len(item))\n\n async def translate_value(self, input_data, translate_from, translate_to):\n splitdata = [translate_from]\n splitvalue = \"\"\n if \", \" in translate_from:\n splitdata = translate_from.split(\", \")\n elif \",\" in translate_from:\n splitdata = translate_from.split(\",\")\n\n for item in splitdata:\n input_data = input_data.replace(item, translate_to)\n\n return input_data\n\n async def execute_python(self, code, shuffle_input):\n print(\"Run with shuffle_data %s\" % shuffle_input)\n print(\"And python code %s\" % code)\n # Write the code to a file, then jdjd\n exec(code)\n\n # May be necessary\n #compile()\n\n return \"Some return: %s\" % shuffle_input\n\n async def parse_json(self, str_input):\n str_input = re.sub('[^A-Za-z0-9\".]', '', str_input)\n list_json = str_input.split(\"\\\"\\\"\")\n #json_object = json.loads(json_strings)\n #return (str_input.split('\"src\": \"')[1]).split('\", \"http_user_agent\":')[0]\n\n #return list_json[list_json.index(\"src\")+1]\n json_object = {\"src\":list_json[list_json.index(\"src\")+1], \"time\":list_json[list_json.index(\"reqtime\")+1]}\n return json_object\n\n async def save_results(self, result, ref_time, analyzer_name):\n es = Elasticsearch([{'host':'10.88.200.105','port':9200}])\n res = es.index(index='results', doc_type='result', id=ref_time+analyzer_name, body=eval(result))\n\n return res\n\nif __name__ == \"__main__\":\n asyncio.run(Tools.run(), debug=True)\n","sub_path":"tools/1.0.0/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"229322602","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\nimport os\r\nimport json\r\nimport deal_file as ssfile\r\nimport time\r\nfrom datetime import datetime\r\nimport math\r\nimport socket\r\n\r\ndf = ssfile.deal_files()\r\nend_dict = df.deals()\r\n\r\n# 整理每个文件中的内容,将重叠的部分重合计算\r\n# 先分个处理,后几个文件一起处理\r\nclass format_json:\r\n def __init__(self):\r\n self.same_sip_lists = []\r\n\r\n def format_single_file(self,files):\r\n\r\n # 将不同Saddress的dict 先存放到list_tmp中再存放到以当前Saddress为key的字典中\r\n # 如果相同则存到同key字典中的list_tmp列表中\r\n same_sip_list = []\r\n same_sip_dict = []\r\n for td in files:\r\n list_tmp = []\r\n list_tmp2 = []\r\n list_dict = {}\r\n try:\r\n ip = td['Saddress']\r\n except:\r\n continue\r\n\r\n if ip not in same_sip_list:\r\n list_tmp.append(td)\r\n list_dict.setdefault(ip,list_tmp)\r\n same_sip_list.append(ip)\r\n same_sip_dict.append(list_dict)\r\n else:\r\n for i in same_sip_dict:\r\n if ip in i.keys():\r\n list_tmp2 = i[ip]\r\n list_tmp2.append(td)\r\n i[ip] = list_tmp2\r\n else:\r\n pass\r\n pass\r\n self.same_sip_lists = same_sip_list\r\n return same_sip_dict\r\n\r\n def get_avge(self,same_sip_dict):\r\n # print(len(same_sip_dict))\r\n lis = []\r\n for tmp_dict in same_sip_dict:\r\n rtt = 0\r\n cwnd = 0\r\n Retrans = 0\r\n send = 0\r\n dic = {}\r\n tmp_list = list(tmp_dict.values())[0]\r\n # print(tmp_list)\r\n # print(len(tmp_list))\r\n for i in range(len(tmp_list)):\r\n rtt = tmp_list[i]['rtt'] + rtt\r\n cwnd = tmp_list[i]['cwnd'] + cwnd\r\n # print(cwnd)\r\n Retrans = float(tmp_list[i]['Retrans']) + Retrans\r\n send = float(tmp_list[i]['send']) + send\r\n # print(tmp_list[i]['Retrans'])\r\n # print(len(tmp_list))\r\n rtt = round(float(rtt/len(tmp_list)),2)\r\n cwnd = round(float(cwnd/len(tmp_list)),2)\r\n Retrans = math.ceil(float(Retrans/len(tmp_list)))\r\n # if Retrans == 0:\r\n # pass\r\n # else:\r\n # print(Retrans)\r\n # pass\r\n send = round(float(send/len(tmp_list)),2)\r\n # print(Retrans,send)\r\n timestamp = datetime.now().strftime(\"%d/%b/%Y:%X +0800\")\r\n total = len(tmp_list)\r\n hostname = socket.gethostname()\r\n #全变成字符串\r\n # rtt = str(rtt)\r\n # cwnd = str(cwnd)\r\n # Retrans = str(Retrans)\r\n # send = str(send)\r\n\r\n # print(rtt)\r\n dic.setdefault(\"Daddress\",tmp_list[0]['Daddress'])\r\n dic.setdefault(\"Saddress\",tmp_list[0]['Saddress'].split(\":\")[0])\r\n dic.setdefault(\"SPort\",tmp_list[0]['Saddress'].split(\":\")[-1])\r\n # print(tmp_list[0])\r\n dic.setdefault(\"rtt\",rtt)\r\n dic.setdefault(\"cwnd\",cwnd)\r\n dic.setdefault(\"Retrans\",Retrans)\r\n dic.setdefault(\"send\",send)\r\n dic.setdefault(\"total\",total)\r\n # print(len(tmp_list))\r\n dic.setdefault(\"line_state\",tmp_list[0]['line_state'])\r\n dic.setdefault(\"type\",tmp_list[0]['type'])\r\n dic.setdefault(\"node_type\",\"network-attack\")\r\n dic.setdefault(\"timestamp\",timestamp)\r\n dic.setdefault(\"hostname\",hostname)\r\n lis.append(dic)\r\n # print(Retrans)\r\n return lis\r\n\r\n\r\n\r\n def get_dict(self):\r\n newfiles = os.listdir(os.path.join(os.path.dirname (os.path.abspath(__file__)),\"tmpfiles/\"))\r\n list_iterator = []\r\n for i in newfiles:\r\n list_iterator = list_iterator + end_dict[i]\r\n # print(i)\r\n same_sip_dict = self.format_single_file(list_iterator)\r\n lis = self.get_avge(same_sip_dict)\r\n return lis\r\n\r\n def update_json(self):\r\n _str = \"\"\r\n lis = self.get_dict()\r\n for i in lis:\r\n _str_tmp = \"\\n\"+json.dumps(i)\r\n _str = _str+_str_tmp\r\n print(\"'\"+_str[1:]+\"'\")\r\nif __name__ == \"__main__\":\r\n fj = format_json()\r\n # fj.get_dict()\r\n fj.update_json()\r\n","sub_path":"getss_work/getss/general_json.py","file_name":"general_json.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"516419296","text":"# Q9 - Solving using adaptive step-size control with the fourth-order Runge-Kutta method\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt \r\n\r\ndef f(x,y):\r\n return (y**2+y)/x\r\n\r\nxtemp=1\r\nx1=1\r\nytemp=-2\r\ny1=-2\r\nY=[-2]\r\nX=[1]\r\nh=0.1\r\nh1=0.9\r\nh0=0.09\r\n\r\nfor i in range(50):\r\n while x1<3:\r\n if hh1:\r\n h=h1\r\n else:\r\n pass\r\n for i in range(1):\r\n \r\n k1=f(xtemp,ytemp) \r\n k2=f(xtemp+h/2,ytemp+h*k1/2)\r\n k3=f(xtemp+h/2,ytemp+k2*h/2)\r\n k4=f(xtemp+h,ytemp+k3*h)\r\n y_s=ytemp+(k1+2*(k2+k3)+k4)*h/6\r\n xtemp=xtemp+h\r\n ytemp=y_s\r\n \r\n k11=f(x1,y1) \r\n k21=f(x1+h,y1+h*k11)\r\n k31=f(x1+h,y1+k21*h)\r\n k41=f(x1+2*h,y1+k31*h*2)\r\n y_d=y1+(k11+2*(k21+k31)+k41)*h/6\r\n x1=x1+h\r\n y1=y_d\r\n X.append(xtemp)\r\n Y.append(y_s)\r\n tol=abs(y_s-y_d)\r\n if tol>1e-4:\r\n h=h/2\r\n if tol<0.5e-4:\r\n h=2*h\r\n else:\r\n pass\r\nx=np.array(X)\r\ny=np.array(Y)\r\nplt.plot(x,y,'r',label=\"Numerical Solution\")\r\nplt.legend()\r\nplt.xlabel(\"x\")\r\nplt.ylabel(\"y\")\r\nplt.show() \r\n","sub_path":"Q9.py","file_name":"Q9.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"427366483","text":"#!/usr/bin/env python3\nimport datetime\nimport json\nimport os\nimport sys\nimport time\n\nfrom flask import Flask, render_template, request\n\nfrom classes import Graph\n\napp = Flask('c3nav-wificollect')\n\n\nif 'C3NAVCONF' in os.environ:\n filename = os.environ['C3NAVCONF']\nelif len(sys.argv) > 1:\n filename = sys.argv[1]\nelse:\n print('Please specify filename: run.py or environment variable C3NAVCONF')\n sys.exit(1)\n\nstarttime = time.time()\n\n\n@app.route('/')\ndef map():\n f = open(filename)\n graph = Graph(json.load(f), auto_connect=False)\n return render_template('wificollect.html', graph=graph)\n\n\n@app.route('/add', methods=['POST'])\ndef addroom():\n data = json.load(open(filename))\n position = [int(i) for i in request.form.get('position').split('.')]\n stations = json.loads(request.form.get('stations'))\n data['wifidata'].append({\n 'level': position[0],\n 'x': position[1],\n 'y': position[2],\n 'time': str(datetime.datetime.now()),\n 'stations': stations\n })\n json.dump(data, open(filename, 'w'), indent=4, sort_keys=True)\n return 'ok'\n\n\n@app.route('/locate', methods=['POST'])\ndef locate():\n f = open(filename)\n graph = Graph(json.load(f), auto_connect=False)\n result = graph.wifi.locate(json.loads(request.form.get('stations')))\n return json.dumps(result)\n\napp.run(threaded=True, debug=True)\n","sub_path":"src/wificollect.py","file_name":"wificollect.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"413496163","text":"\"\"\"This module contains classes and functions for high level interaction\nwith AOVs.\n\n\"\"\"\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\n# Python Imports\nimport glob\nimport json\nimport os\n\n# Houdini Toolbox Imports\nfrom ht.sohohooks.aovs.aov import AOV, AOVGroup, IntrinsicAOVGroup\n\n# Houdini Imports\nimport hou\n\n# =============================================================================\n# CLASSES\n# =============================================================================\n\nclass AOVManager(object):\n \"\"\"This class is for managing and applying AOVs at render time.\"\"\"\n\n def __init__(self):\n self._aovs = {}\n self._groups = {}\n self._interface = None\n\n self._initFromFiles()\n\n # =========================================================================\n # SPECIAL METHODS\n # =========================================================================\n\n def __repr__(self):\n return \"\".format(\n len(self.aovs),\n len(self.groups),\n )\n\n # =========================================================================\n # NON-PUBLIC METHODS\n # =========================================================================\n\n def _buildIntrinsicGroups(self):\n \"\"\"Build intrinsic groups.\"\"\"\n # Process any AOVs that we have to look for any intrinsic groups.\n for aov in self.aovs.itervalues():\n for intrinsic_name in aov.intrinsics:\n # Intrinsic groups are prefixed with \"i:\".\n name = \"i:\" + intrinsic_name\n\n # Group exists so use it.\n if name in self.groups:\n group = self.groups[name]\n\n # Create the group and add it to our list.\n else:\n group = IntrinsicAOVGroup(name)\n self.addGroup(group)\n\n # Add this AOV to the group.\n group.aovs.append(aov)\n\n def _initFromFiles(self):\n \"\"\"Initialize the manager from files on disk.\"\"\"\n file_paths = _findAOVFiles()\n\n readers = [AOVFile(file_path) for file_path in file_paths]\n\n self._mergeReaders(readers)\n\n self._buildIntrinsicGroups()\n\n def _initGroupMembers(self, group):\n \"\"\"Populate the AOV lists of each group based on available AOVs.\"\"\"\n # Process each of the group's includes.\n for include in group.includes:\n # If the AOV name is available, add it to the group.\n if include in self.aovs:\n group.aovs.append(self.aovs[include])\n\n def _mergeReaders(self, readers):\n \"\"\"Merge the data of multiple AOVFile objects.\"\"\"\n # We need to handle AOVs first since AOVs in other files may overwrite\n # AOVs in group definition files.\n for reader in readers:\n for aov in reader.aovs:\n variable_name = aov.variable\n\n # Check if this AOV has already been seen.\n if variable_name in self.aovs:\n # If this AOV has a higher priority, replace the previous\n # one.\n if aov.priority > self.aovs[variable_name].priority:\n self.addAOV(aov)\n\n # Hasn't been seen, so add it.\n else:\n self.addAOV(aov)\n\n # Now that AOVs have been made available, add them to groups.\n for reader in readers:\n for group in reader.groups:\n self._initGroupMembers(group)\n\n group_name = group.name\n\n # Check if this group has already been seen.\n if group_name in self.groups:\n # If this group has a higher priority, replace the previous\n # one.\n if group.priority > self.groups[group_name].priority:\n self.addGroup(group)\n\n # Hasn't been seen, so add it.\n else:\n self.addGroup(group)\n\n # =========================================================================\n # PROPERTIES\n # =========================================================================\n\n @property\n def interface(self):\n \"\"\"Any AOVViewerInterface assigned to the manager.\"\"\"\n return self._interface\n\n @property\n def aovs(self):\n \"\"\"Dictionary containing all available AOVs.\"\"\"\n return self._aovs\n\n @property\n def groups(self):\n \"\"\"Dictionary containing all available AOVGroups.\"\"\"\n return self._groups\n\n # =========================================================================\n # STATIC METHODS\n # =========================================================================\n\n @staticmethod\n def addAOVsToIfd(wrangler, cam, now):\n \"\"\"Add auto_aovs to the ifd.\"\"\"\n import IFDapi\n import IFDsettings\n import soho\n\n # The parameter that defines which automatic aovs to add.\n parms = {\n \"enable\": soho.SohoParm(\n \"enable_auto_aovs\",\n \"int\",\n [1],\n skipdefault=False\n ),\n \"auto_aovs\": soho.SohoParm(\n \"auto_aovs\",\n \"str\",\n [\"\"],\n skipdefault=False\n ),\n }\n\n # Attempt to evaluate the parameter.\n plist = cam.wrangle(wrangler, parms, now)\n\n if plist:\n # Adding is disabled so bail out.\n if plist[\"enable_auto_aovs\"].Value[0] == 0:\n return\n\n aov_str = plist[\"auto_aovs\"].Value[0]\n\n # Construct a manager-laf\n manager = findOrCreateSessionAOVManager()\n\n # Parse the string to get any aovs/groups.\n aovs = manager.getAOVsFromString(aov_str)\n\n # Write any found items to the ifd.\n for aov in aovs:\n aov.writeToIfd(wrangler, cam, now)\n\n # If we are generating the \"Op_Id\" plane we will need to tell SOHO\n # to generate these properties when outputting object. Look for\n # the \"Op_Id\" variable being exported and if so enable operator id\n # generation\n for aov in flattenedList(aovs):\n if aov.variable == \"Op_Id\":\n IFDapi.ray_comment(\"Forcing object id generation\")\n IFDsettings._GenerateOpId = True\n\n break\n\n # =========================================================================\n # METHODS\n # =========================================================================\n\n def addAOV(self, aov):\n \"\"\"Add an AOV to the manager.\"\"\"\n self._aovs[aov.variable] = aov\n\n if self.interface is not None:\n self.interface.aovAddedSignal.emit(aov)\n\n def addGroup(self, group):\n \"\"\"Add an AOVGroup to the manager.\"\"\"\n self.groups[group.name] = group\n\n if self.interface is not None:\n self.interface.groupAddedSignal.emit(group)\n\n def clear(self):\n \"\"\"Clear all definitions.\"\"\"\n self._aovs = {}\n self._groups = {}\n\n def getAOVsFromString(self, aov_str):\n \"\"\"Get a list of AOVs and AOVGroups from a string.\"\"\"\n aovs = []\n\n aov_str = aov_str.replace(',', ' ')\n\n for name in aov_str.split():\n if name.startswith('@'):\n name = name[1:]\n\n if name in self.groups:\n aovs.append(self.groups[name])\n\n else:\n if name in self._aovs:\n aovs.append(self._aovs[name])\n\n return aovs\n\n def initInterface(self):\n \"\"\"Initialize an AOVViewerInterface for this manager.\"\"\"\n from ht.ui.aovs.utils import AOVViewerInterface\n\n self._interface = AOVViewerInterface()\n\n def load(self, path):\n \"\"\"Load a file.\"\"\"\n readers = [AOVFile(path)]\n\n self._mergeReaders(readers)\n\n def reload(self):\n \"\"\"Reload all definitions.\"\"\"\n self.clear()\n self._initFromFiles()\n\n def removeAOV(self, aov):\n \"\"\"Remove the specified AOV from the manager.\"\"\"\n if aov.variable in self.aovs:\n self.aovs.pop(aov.variable)\n\n if self.interface is not None:\n self.interface.aovRemovedSignal.emit(aov)\n\n def removeGroup(self, group):\n \"\"\"Remove the specified group from the manager.\"\"\"\n if group.name in self.groups:\n self.groups.pop(group.name)\n\n if self.interface is not None:\n self.interface.groupRemovedSignal.emit(group)\n\n# =============================================================================\n\nclass AOVFile(object):\n \"\"\"Class to handle reading and writing AOV .json files.\"\"\"\n\n def __init__(self, path):\n self._path = path\n\n self._aovs = []\n self._data = {}\n self._groups = []\n\n if self.exists:\n self._initFromFile()\n\n # =========================================================================\n # NON-PUBLIC METHODS\n # =========================================================================\n\n def _initFromFile(self):\n \"\"\"Read data from the file and create the appropriate entities.\"\"\"\n with open(self.path) as handle:\n data = json.load(handle)\n\n if \"definitions\" in data:\n self._createAOVs(data[\"definitions\"])\n\n if \"groups\" in data:\n self._createGroups(data[\"groups\"])\n\n # =========================================================================\n\n def _createAOVs(self, definitions):\n \"\"\"Create AOVs based on definitions.\"\"\"\n for definition in definitions:\n # Insert this file path into the data.\n definition[\"path\"] = self.path\n\n # Construct a new AOV and add it to our list.\n aov = AOV(definition)\n self.aovs.append(aov)\n\n # =========================================================================\n\n def _createGroups(self, definitions):\n \"\"\"Create AOVGroups based on definitions.\"\"\"\n for name, group_data in definitions.iteritems():\n # Create a new AOVGroup.\n group = AOVGroup(name)\n\n # Process its list of AOVs to include.\n if \"include\" in group_data:\n group.includes.extend(group_data[\"include\"])\n\n # Set any comment.\n if \"comment\" in group_data:\n group.comment = group_data[\"comment\"]\n\n if \"priority\" in group_data:\n group.priority = group_data[\"priority\"]\n\n # Set any icon.\n if \"icon\" in group_data:\n group.icon = os.path.expandvars(group_data[\"icon\"])\n\n # Set the path to this file.\n group.path = self.path\n\n # Add the group to the list.\n self.groups.append(group)\n\n # =========================================================================\n # PROPERTIES\n # =========================================================================\n\n @property\n def aovs(self):\n \"\"\"List containing AOVs defined in this file.\"\"\"\n return self._aovs\n\n # =========================================================================\n\n @property\n def groups(self):\n \"\"\"List containing AOVGroups defined in this file.\"\"\"\n return self._groups\n\n # =========================================================================\n\n @property\n def path(self):\n \"\"\"File path on disk.\"\"\"\n return self._path\n\n # =========================================================================\n\n @property\n def exists(self):\n \"\"\"Check if the file actually exists.\"\"\"\n return os.path.isfile(self.path)\n\n # =========================================================================\n # METHODS\n # =========================================================================\n\n def addAOV(self, aov):\n \"\"\"Add an AOV for writing.\"\"\"\n self.aovs.append(aov)\n\n def addGroup(self, group):\n \"\"\"Add An AOVGroup for writing.\"\"\"\n self.groups.append(group)\n\n def containsAOV(self, aov):\n \"\"\"Check if this file contains an AOV with the same variable name.\"\"\"\n return aov in self.aovs\n\n def containsGroup(self, group):\n \"\"\"Check if this file contains a group with the same name.\"\"\"\n return group in self.groups\n\n def removeAOV(self, aov):\n \"\"\"Remove an AOV from the file.\"\"\"\n idx = self.aovs.index(aov)\n\n del self.aovs[idx]\n\n def removeGroup(self, group):\n \"\"\"Remove a group from the file.\"\"\"\n idx = self.groups.index(group)\n\n del self.groups[idx]\n\n def replaceAOV(self, aov):\n \"\"\"Replace an AOV in the file.\"\"\"\n idx = self.aovs.index(aov)\n\n self.aovs[idx] = aov\n\n def replaceGroup(self, group):\n \"\"\"Replace a group in the file.\"\"\"\n idx = self.groups.index(group)\n\n self.groups[idx] = group\n\n def writeToFile(self, path=None):\n \"\"\"Write data to file.\"\"\"\n data = {}\n\n for group in self.groups:\n groups = data.setdefault(\"groups\", {})\n\n groups.update(group.getData())\n\n for aov in self.aovs:\n aovs = data.setdefault(\"definitions\", [])\n\n aovs.append(aov.getData())\n\n if path is None:\n path = self.path\n\n with open(path, 'w') as handle:\n json.dump(data, handle, indent=4)\n\n# =============================================================================\n# NON-PUBLIC FUNCTIONS\n# =============================================================================\n\ndef _findAOVFiles():\n \"\"\"Find any .json files that should be read.\"\"\"\n # Look for the specific AOV search path.\n if \"HT_AOV_PATH\" in os.environ:\n # Get the search path.\n search_path = os.environ[\"HT_AOV_PATH\"]\n\n # If '&' is in the path then following Houdini path conventions we'll\n # search through the HOUDINI_PATH as well.\n if '&' in search_path:\n # Find any config/aovs folders in HOUDINI_PATH.\n hpath_dirs = _findHoudiniPathAOVFolders()\n\n # If there are any then we replace the '&' with those paths.\n if hpath_dirs:\n search_path = search_path.replace('&', ':'.join(hpath_dirs))\n\n directories = search_path.split(\":\")\n\n else:\n directories = _findHoudiniPathAOVFolders()\n\n all_files = []\n\n for directory in directories:\n all_files.extend(glob.glob(os.path.join(directory, \"*.json\")))\n\n return all_files\n\n\ndef _findHoudiniPathAOVFolders():\n \"\"\"Look for any config/aovs folders in the HOUDINI_PATH.\"\"\"\n # Try to find HOUDINI_PATH directories.\n try:\n directories = hou.findDirectories(\"config/aovs\")\n\n except hou.OperationFailed:\n directories = ()\n\n return directories\n\n# =============================================================================\n# FUNCTIONS\n# =============================================================================\n\ndef buildMenuScript():\n \"\"\"Build a menu script for choosing AOVs and groups.\"\"\"\n manager = findOrCreateSessionAOVManager()\n\n menu = []\n\n if manager.groups:\n for group in sorted(manager.groups.keys()):\n menu.extend([\"@{}\".format(group), group])\n\n menu.extend([\"_separator_\", \"---------\"])\n\n for aov in sorted(manager.aovs):\n menu.extend([aov, aov])\n\n return menu\n\n\ndef createSessionAOVManager():\n \"\"\"Create an AOVManager stored in hou.session.\"\"\"\n manager = AOVManager()\n hou.session.aov_manager = manager\n\n return manager\n\n\ndef findOrCreateSessionAOVManager(rebuild=False):\n \"\"\"Find or create an AOVManager from hou.session.\"\"\"\n manager = None\n\n if hasattr(hou.session, \"aov_manager\") and not rebuild:\n manager = hou.session.aov_manager\n\n else:\n manager = createSessionAOVManager()\n\n return manager\n\n\ndef flattenedList(items):\n \"\"\"Flatten a list that contains AOVs and groups into a list of all AOVs.\"\"\"\n aovs = []\n\n for item in items:\n if isinstance(item, AOVGroup):\n aovs.extend(item.aovs)\n\n else:\n aovs.append(item)\n\n return aovs\n\n\ndef loadJsonFiles():\n \"\"\"Load .json files into the manager.\"\"\"\n result = hou.ui.selectFile(\n pattern=\"*.json\",\n chooser_mode=hou.fileChooserMode.Read,\n multiple_select=True,\n )\n\n paths = result.split(\" ; \")\n\n for path in paths:\n path = os.path.expandvars(path)\n\n if os.path.exists(path):\n MANAGER.load(path)\n\n# =============================================================================\n\nMANAGER = findOrCreateSessionAOVManager(rebuild=True)\n\n","sub_path":"python/ht/sohohooks/aovs/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":16972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"641637173","text":"def add_elipsis(lst,elnum=4):\n for i in lst:\n print('{}{}'.format(i[:elnum],'...'))\ncomments = (\n \"Implementation note\",\n \"Changed\",\n \"ABC for generator\",\n)\nadd_elipsis(comments,elnum=6)\n\n# 这样写,逻辑上也能行,对了 区分下目录哈","sub_path":"P17075-上海-宁明强/homework/add_ellipsis.py","file_name":"add_ellipsis.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"613870306","text":"def merge_lists(list1, list2):\n new_list = []\n for num in list1:\n if not num in new_list:\n new_list.append(num)\n for num in list2:\n if not num in new_list:\n new_list.append(num)\n new_list.sort()\n return new_list\n\nprint(\"Merging [1,2,3],[4,3,2] :\", merge_lists([1,2,3],[4,3,2]))\nprint(\"Merging [3,2,1],[2,6,4,10,4] :\", merge_lists([3,2,1],[2,6,4,10,4]))\nprint(\"Merging [3,1,1],[] :\", merge_lists([3,1,1],[]))\nprint(\"Merging [],[9,7,2,7] :\", merge_lists([],[9,7,2,7]))\n","sub_path":"lab5/lab5_5.py","file_name":"lab5_5.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"218264053","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.models import User\nfrom .models import *\n\nadmin.site.register([Estado,Cidade])\n\nclass PerfilInline(admin.StackedInline):\n model = Perfil\n can_delete = True\n verbose_name_plural = 'Imagens'\n fk_name = 'usuario'\n\nclass ImagemInline(admin.StackedInline):\n model = Imagem\n can_delete = True\n verbose_name_plural = 'Imagens'\n fk_name = 'usuario'\n\nclass CustomUserAdmin(UserAdmin):\n inlines = (PerfilInline, ImagemInline)\n list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')\n list_select_related = ('perfil', )\n\n def get_inline_instances(self, request, obj=None):\n if not obj:\n return list()\n return super(CustomUserAdmin, self).get_inline_instances(request, obj)\n\nadmin.site.unregister(User)\nadmin.site.register(User, CustomUserAdmin)","sub_path":"perfil/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"350530765","text":"#!/usr/bin/env python\nu\"\"\"\ncompute_tidal_currents.py\nWritten by Tyler Sutterley (08/2023)\nCalculates zonal and meridional tidal currents for an input file\n\nUses OTIS format tidal solutions provided by Ohio State University and ESR\n http://volkov.oce.orst.edu/tides/region.html\n https://www.esr.org/research/polar-tide-models/list-of-polar-tide-models/\n ftp://ftp.esr.org/pub/datasets/tmd/\nor Finite Element Solution (FES) models provided by AVISO\n\nINPUTS:\n csv file with columns for spatial and temporal coordinates\n HDF5 file with variables for spatial and temporal coordinates\n netCDF4 file with variables for spatial and temporal coordinates\n geotiff file with bands in spatial coordinates\n\nCOMMAND LINE OPTIONS:\n -D X, --directory X: Working data directory\n -T X, --tide X: Tide model to use in calculating currents\n --atlas-format X: ATLAS tide model format (OTIS, netcdf)\n --gzip, -G: Tide model files are gzip compressed\n --definition-file X: Model definition file for use in calculating currents\n --format X: input and output data format\n csv (default)\n netCDF4\n HDF5\n geotiff\n --variables X: variable names of data in csv, HDF5 or netCDF4 file\n for csv files: the order of the columns within the file\n for HDF5 and netCDF4 files: time, y, x and data variable names\n -H X, --header X: number of header lines for csv files\n --delimiter X: Delimiter for csv or ascii files\n -t X, --type X: input data type\n drift: drift buoys or satellite/airborne altimetry (time per data point)\n grid: spatial grids or images (single time for all data points)\n -e X, --epoch X: Reference epoch of input time (default Modified Julian Day)\n days since 1858-11-17T00:00:00\n -d X, --deltatime X: Input delta time for files without date information\n can be set to 0 to use exact calendar date from epoch\n -s X, --standard X: Input time standard for delta times or input time type\n UTC: Coordinate Universal Time\n GPS: GPS Time\n LORAN: Long Range Navigator Time\n TAI: International Atomic Time\n datetime: formatted datetime string in UTC\n -P X, --projection X: spatial projection as EPSG code or PROJ4 string\n 4326: latitude and longitude coordinates on WGS84 reference ellipsoid\n -I X, --interpolate X: Interpolation method\n spline\n linear\n nearest\n bilinear\n -E X, --extrapolate X: Extrapolate with nearest-neighbors\n -c X, --cutoff X: Extrapolation cutoff in kilometers\n set to inf to extrapolate for all points\n -V, --verbose: Verbose output of processing run\n -M X, --mode X: Permission mode of output file\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python\n https://numpy.org\n https://numpy.org/doc/stable/user/numpy-for-matlab-users.html\n scipy: Scientific Tools for Python\n https://docs.scipy.org/doc/\n h5py: Python interface for Hierarchal Data Format 5 (HDF5)\n https://www.h5py.org/\n netCDF4: Python interface to the netCDF C library\n https://unidata.github.io/netcdf4-python/netCDF4/index.html\n gdal: Pythonic interface to the Geospatial Data Abstraction Library (GDAL)\n https://pypi.python.org/pypi/GDAL\n dateutil: powerful extensions to datetime\n https://dateutil.readthedocs.io/en/stable/\n pyproj: Python interface to PROJ library\n https://pypi.org/project/pyproj/\n\nPROGRAM DEPENDENCIES:\n time.py: utilities for calculating time operations\n spatial: utilities for reading, writing and operating on spatial data\n utilities.py: download and management utilities for syncing files\n arguments.py: load the nodal corrections for tidal constituents\n astro.py: computes the basic astronomical mean longitudes\n convert_crs.py: convert points to and from Coordinates Reference Systems\n load_constituent.py: loads parameters for a given tidal constituent\n io/model.py: retrieves tide model parameters for named tide models\n io/OTIS.py: extract tidal harmonic constants from OTIS tide models\n io/ATLAS.py: extract tidal harmonic constants from netcdf models\n io/FES.py: extract tidal harmonic constants from FES tide models\n interpolate.py: interpolation routines for spatial data\n predict.py: predict tidal values using harmonic constants\n\nUPDATE HISTORY:\n Updated 08/2023: changed ESR netCDF4 format to TMD3 format\n Updated 05/2023: use timescale class for time conversion operations\n Updated 04/2023: using pathlib to define and expand paths\n using long_name and description attributes from model class\n Updated 02/2023: added functionality for time series type\n Updated 01/2023: added default field mapping for reading from netCDF4/HDF5\n added data type keyword for netCDF4 output\n Updated 12/2022: single implicit import of pyTMD tools\n Updated 11/2022: place some imports within try/except statements\n use f-strings for formatting verbose or ascii output\n Updated 10/2022: added delimiter option and datetime parsing for ascii files\n Updated 05/2022: added ESR netCDF4 formats to list of model types\n updated keyword arguments to read tide model programs\n Updated 04/2022: use argparse descriptions within documentation\n Updated 03/2022: using static decorators to define available models\n Updated 02/2022: added Arctic 2km model (Arc2kmTM) to list of models\n Updated 01/2022: added option for changing the time standard\n Updated 12/2021: added TPXO9-atlas-v5 to list of available tide models\n Updated 10/2021: using python logging for handling verbose output\n Updated 09/2021: refactor to use model class for files and attributes\n Updated 07/2021: added tide model reference to output attributes\n can use prefix files to define command line arguments\n Updated 06/2021: added new Gr1km-v2 1km Greenland model from ESR\n Updated 05/2021: added option for extrapolation cutoff in kilometers\n Updated 03/2021: added TPXO9-atlas-v4 in binary OTIS format\n simplified netcdf inputs to be similar to binary OTIS read program\n Updated 02/2021: replaced numpy bool to prevent deprecation warning\n Updated 12/2020: added valid data extrapolation with nearest_extrap\n Updated 11/2020: added options to read from and write to geotiff image files\n Updated 10/2020: using argparse to set command line parameters\n Forked 09/2020 from compute_tidal_elevations.py\n Updated 09/2020: can use HDF5 and netCDF4 as inputs and outputs\n Updated 08/2020: using builtin time operations\n Updated 07/2020: added FES2014 and FES2014_load. use merged delta times\n Updated 06/2020: added version 2 of TPXO9-atlas (TPXO9-atlas-v2)\n Updated 02/2020: changed CATS2008 grid to match version on U.S. Antarctic\n Program Data Center http://www.usap-dc.org/view/dataset/601235\n Updated 11/2019: added AOTIM-5-2018 tide model (2018 update to 2004 model)\n Updated 09/2019: added TPXO9_atlas reading from netcdf4 tide files\n Updated 07/2018: added GSFC Global Ocean Tides (GOT) models\n Written 10/2017 for public release\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\nimport logging\nimport pathlib\nimport argparse\nimport numpy as np\nimport pyTMD\n\n# attempt imports\ntry:\n import pyproj\nexcept (ImportError, ModuleNotFoundError) as exc:\n logging.critical(\"pyproj not available\")\n\n# PURPOSE: try to get the projection information for the input file\ndef get_projection(attributes, PROJECTION):\n # coordinate reference system string from file\n try:\n crs = pyproj.CRS.from_string(attributes['projection'])\n except (ValueError,KeyError,pyproj.exceptions.CRSError):\n pass\n else:\n return crs\n # EPSG projection code\n try:\n crs = pyproj.CRS.from_epsg(int(PROJECTION))\n except (ValueError,pyproj.exceptions.CRSError):\n pass\n else:\n return crs\n # coordinate reference system string\n try:\n crs = pyproj.CRS.from_string(PROJECTION)\n except (ValueError,pyproj.exceptions.CRSError):\n pass\n else:\n return crs\n # no projection can be made\n raise pyproj.exceptions.CRSError\n\n# PURPOSE: read csv, netCDF or HDF5 data\n# compute tides at points and times using tidal model driver algorithms\ndef compute_tidal_currents(tide_dir, input_file, output_file,\n TIDE_MODEL=None,\n ATLAS_FORMAT='netcdf',\n GZIP=True,\n DEFINITION_FILE=None,\n FORMAT='csv',\n VARIABLES=[],\n HEADER=0,\n DELIMITER=',',\n TYPE='drift',\n TIME_UNITS='days since 1858-11-17T00:00:00',\n TIME=None,\n TIME_STANDARD='UTC',\n PROJECTION='4326',\n METHOD='spline',\n EXTRAPOLATE=False,\n CUTOFF=None,\n VERBOSE=False,\n MODE=0o775):\n\n # create logger for verbosity level\n loglevel = logging.INFO if VERBOSE else logging.CRITICAL\n logging.basicConfig(level=loglevel)\n\n # get parameters for tide model\n if DEFINITION_FILE is not None:\n model = pyTMD.io.model(tide_dir).from_file(DEFINITION_FILE)\n else:\n model = pyTMD.io.model(tide_dir, format=ATLAS_FORMAT,\n compressed=GZIP).current(TIDE_MODEL)\n\n # invalid value\n fill_value = -9999.0\n # output netCDF4 and HDF5 file attributes\n # will be added to YAML header in csv files\n attrib = {}\n # latitude\n attrib['lat'] = {}\n attrib['lat']['long_name'] = 'Latitude'\n attrib['lat']['units'] = 'Degrees_North'\n # longitude\n attrib['lon'] = {}\n attrib['lon']['long_name'] = 'Longitude'\n attrib['lon']['units'] = 'Degrees_East'\n # zonal tidal currents\n attrib['u'] = {}\n attrib['u']['description'] = model.description['u']\n attrib['u']['reference'] = model.reference\n attrib['u']['model'] = model.name\n attrib['u']['units'] = 'cm/s'\n attrib['u']['long_name'] = model.long_name['u']\n attrib['u']['_FillValue'] = fill_value\n # meridional tidal currents\n attrib['v'] = {}\n attrib['v']['description'] = model.description['v']\n attrib['v']['reference'] = model.reference\n attrib['v']['model'] = model.name\n attrib['v']['units'] = 'cm/s'\n attrib['v']['long_name'] = model.long_name['v']\n attrib['v']['_FillValue'] = fill_value\n # time\n attrib['time'] = {}\n attrib['time']['long_name'] = 'Time'\n attrib['time']['units'] = 'days since 1992-01-01T00:00:00'\n attrib['time']['calendar'] = 'standard'\n\n # read input file to extract time, spatial coordinates and data\n if (FORMAT == 'csv'):\n parse_dates = (TIME_STANDARD.lower() == 'datetime')\n dinput = pyTMD.spatial.from_ascii(input_file, columns=VARIABLES,\n delimiter=DELIMITER, header=HEADER, parse_dates=parse_dates)\n elif (FORMAT == 'netCDF4'):\n field_mapping = pyTMD.spatial.default_field_mapping(VARIABLES)\n dinput = pyTMD.spatial.from_netCDF4(input_file,\n field_mapping=field_mapping)\n elif (FORMAT == 'HDF5'):\n field_mapping = pyTMD.spatial.default_field_mapping(VARIABLES)\n dinput = pyTMD.spatial.from_HDF5(input_file,\n field_mapping=field_mapping)\n elif (FORMAT == 'geotiff'):\n dinput = pyTMD.spatial.from_geotiff(input_file)\n # copy global geotiff attributes for projection and grid parameters\n for att_name in ['projection','wkt','spacing','extent']:\n attrib[att_name] = dinput['attributes'][att_name]\n # update time variable if entered as argument\n if TIME is not None:\n dinput['time'] = np.copy(TIME)\n\n # converting x,y from projection to latitude/longitude\n crs1 = get_projection(dinput['attributes'], PROJECTION)\n crs2 = pyproj.CRS.from_epsg(4326)\n transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n if (TYPE == 'grid'):\n ny, nx = (len(dinput['y']), len(dinput['x']))\n gridx, gridy = np.meshgrid(dinput['x'], dinput['y'])\n lon, lat = transformer.transform(gridx, gridy)\n elif (TYPE == 'drift'):\n lon, lat = transformer.transform(dinput['x'], dinput['y'])\n elif (TYPE == 'time series'):\n nstation = len(dinput['y'])\n lon, lat = transformer.transform(dinput['x'], dinput['y'])\n\n # extract time units from netCDF4 and HDF5 attributes or from TIME_UNITS\n try:\n time_string = dinput['attributes']['time']['units']\n epoch1, to_secs = pyTMD.time.parse_date_string(time_string)\n except (TypeError, KeyError, ValueError):\n epoch1, to_secs = pyTMD.time.parse_date_string(TIME_UNITS)\n\n # convert delta times or datetimes objects to timescale\n if (TIME_STANDARD.lower() == 'datetime'):\n timescale = pyTMD.time.timescale().from_datetime(\n dinput['time'].flatten())\n else:\n # convert time to seconds\n delta_time = to_secs*dinput['time'].flatten()\n timescale = pyTMD.time.timescale().from_deltatime(delta_time,\n epoch=epoch1, standard=TIME_STANDARD)\n # number of time points\n nt = len(timescale)\n\n # python dictionary with output data\n output = {'time':timescale.tide, 'lon':lon, 'lat':lat}\n # iterate over u and v currents\n for t in model.type:\n # read tidal constants and interpolate to grid points\n if model.format in ('OTIS','ATLAS','TMD3'):\n amp,ph,D,c = pyTMD.io.OTIS.extract_constants(lon.flatten(), lat.flatten(),\n model.grid_file, model.model_file['u'], model.projection,\n type=t, method=METHOD, extrapolate=EXTRAPOLATE, cutoff=CUTOFF,\n grid=model.format)\n deltat = np.zeros((nt))\n elif (model.format == 'netcdf'):\n amp,ph,D,c = pyTMD.io.ATLAS.extract_constants(lon.flatten(), lat.flatten(),\n model.grid_file, model.model_file[t], type=t, method=METHOD,\n extrapolate=EXTRAPOLATE, cutoff=CUTOFF, scale=model.scale,\n compressed=model.compressed)\n deltat = np.zeros((nt))\n elif (model.format == 'FES'):\n amp,ph = pyTMD.io.FES.extract_constants(lon.flatten(), lat.flatten(),\n model.model_file[t], type=t, version=model.version,\n method=METHOD, extrapolate=EXTRAPOLATE, cutoff=CUTOFF,\n scale=model.scale, compressed=model.compressed)\n # available model constituents\n c = model.constituents\n # delta time (TT - UT1)\n deltat = timescale.tt_ut1\n\n # calculate complex phase in radians for Euler's\n cph = -1j*ph*np.pi/180.0\n # calculate constituent oscillation\n hc = amp*np.exp(cph)\n\n # predict tidal currents at time and infer minor corrections\n if (TYPE == 'grid'):\n output[t] = np.ma.zeros((ny,nx,nt),fill_value=fill_value)\n output[t].mask = np.zeros((ny,nx,nt),dtype=bool)\n for i in range(nt):\n TIDE = pyTMD.predict.map(timescale.tide[i], hc, c,\n deltat=deltat[i], corrections=model.format)\n MINOR = pyTMD.predict.infer_minor(timescale.tide[i], hc, c,\n deltat=deltat[i], corrections=model.format)\n # add major and minor components and reform grid\n output[t][:,:,i] = np.reshape((TIDE+MINOR), (ny,nx))\n output[t].mask[:,:,i] = np.reshape((TIDE.mask | MINOR.mask),\n (ny,nx))\n elif (TYPE == 'drift'):\n output[t] = np.ma.zeros((nt), fill_value=fill_value)\n output[t].mask = np.any(hc.mask,axis=1)\n output[t].data[:] = pyTMD.predict.drift(timescale.tide, hc, c,\n deltat=deltat, corrections=model.format)\n minor = pyTMD.predict.infer_minor(timescale.tide, hc, c,\n deltat=deltat, corrections=model.format)\n output[t].data[:] += minor.data[:]\n elif (TYPE == 'time series'):\n output[t] = np.ma.zeros((nstation,nt),fill_value=fill_value)\n output[t].mask = np.zeros((nstation,nt),dtype=bool)\n for s in range(nstation):\n # calculate constituent oscillation for station\n TIDE = pyTMD.predict.time_series(timescale.tide, hc[s,None,:], c,\n deltat=deltat, corrections=model.format)\n MINOR = pyTMD.predict.infer_minor(timescale.tide, hc[s,None,:], c,\n deltat=deltat, corrections=model.format)\n output[t].data[s,:] = TIDE.data[:] + MINOR.data[:]\n output[t].mask[s,:] = (TIDE.mask | MINOR.mask)\n # replace invalid values with fill value\n output[t].data[output[t].mask] = output[t].fill_value\n\n # output to file\n if (FORMAT == 'csv'):\n pyTMD.spatial.to_ascii(output, attrib, output_file,\n delimiter=DELIMITER, header=False,\n columns=['time','lat','lon','u','v'])\n elif (FORMAT == 'netCDF4'):\n pyTMD.spatial.to_netCDF4(output, attrib, output_file, data_type=TYPE)\n elif (FORMAT == 'HDF5'):\n pyTMD.spatial.to_HDF5(output, attrib, output_file)\n elif (FORMAT == 'geotiff'):\n # merge current variables into a single variable\n output['data'] = np.concatenate((output['u'],output['v']),axis=-1)\n attrib['data'] = {'_FillValue':fill_value}\n pyTMD.spatial.to_geotiff(output, attrib, output_file,\n varname='data')\n # change the permissions level to MODE\n output_file.chmod(mode=MODE)\n\n# PURPOSE: create argument parser\ndef arguments():\n parser = argparse.ArgumentParser(\n description=\"\"\"Calculates zonal and meridional tidal currents for\n an input file\n \"\"\",\n fromfile_prefix_chars=\"@\"\n )\n parser.convert_arg_line_to_args = pyTMD.utilities.convert_arg_line_to_args\n group = parser.add_mutually_exclusive_group(required=True)\n # command line options\n # input and output file\n parser.add_argument('infile',\n type=pathlib.Path, nargs='?',\n help='Input file to run')\n parser.add_argument('outfile',\n type=pathlib.Path, nargs='?',\n help='Computed output file')\n # set data directory containing the tidal data\n parser.add_argument('--directory','-D',\n type=pathlib.Path,\n help='Working data directory')\n # tide model to use\n choices = sorted(pyTMD.io.model.ocean_current())\n group.add_argument('--tide','-T',\n type=str, choices=choices,\n help='Tide model to use in calculating currents')\n parser.add_argument('--atlas-format',\n type=str, choices=('OTIS','netcdf'), default='netcdf',\n help='ATLAS tide model format')\n parser.add_argument('--gzip','-G',\n default=False, action='store_true',\n help='Tide model files are gzip compressed')\n # tide model definition file to set an undefined model\n group.add_argument('--definition-file',\n type=pathlib.Path,\n help='Tide model definition file')\n # input and output data format\n parser.add_argument('--format','-F',\n type=str, default='csv', choices=('csv','netCDF4','HDF5','geotiff'),\n help='Input and output data format')\n # variable names (for csv names of columns)\n parser.add_argument('--variables','-v',\n type=str, nargs='+', default=['time','lat','lon','data'],\n help='Variable names of data in input file')\n # number of header lines for csv files\n parser.add_argument('--header','-H',\n type=int, default=0,\n help='Number of header lines for csv files')\n # delimiter for csv or ascii files\n parser.add_argument('--delimiter',\n type=str, default=',',\n help='Delimiter for csv or ascii files')\n # input data type\n # drift: drift buoys or satellite/airborne altimetry (time per data point)\n # grid: spatial grids or images (single time for all data points)\n # time series: station locations with multiple time values\n parser.add_argument('--type','-t',\n type=str, default='drift',\n choices=('drift','grid','time series'),\n help='Input data type')\n # time epoch (default Modified Julian Days)\n # in form \"time-units since yyyy-mm-dd hh:mm:ss\"\n parser.add_argument('--epoch','-e',\n type=str, default='days since 1858-11-17T00:00:00',\n help='Reference epoch of input time')\n # input delta time for files without date information\n parser.add_argument('--deltatime','-d',\n type=float, nargs='+',\n help='Input delta time for files without date variables')\n # input time standard definition\n parser.add_argument('--standard','-s',\n type=str, choices=('UTC','GPS','TAI','LORAN','datetime'), default='UTC',\n help='Input time standard definition')\n # spatial projection (EPSG code or PROJ4 string)\n parser.add_argument('--projection','-P',\n type=str, default='4326',\n help='Spatial projection as EPSG code or PROJ4 string')\n # interpolation method\n parser.add_argument('--interpolate','-I',\n metavar='METHOD', type=str, default='spline',\n choices=('spline','linear','nearest','bilinear'),\n help='Spatial interpolation method')\n # extrapolate with nearest-neighbors\n parser.add_argument('--extrapolate','-E',\n default=False, action='store_true',\n help='Extrapolate with nearest-neighbors')\n # extrapolation cutoff in kilometers\n # set to inf to extrapolate over all points\n parser.add_argument('--cutoff','-c',\n type=np.float64, default=10.0,\n help='Extrapolation cutoff in kilometers')\n # verbose output of processing run\n # print information about each input and output file\n parser.add_argument('--verbose','-V',\n default=False, action='store_true',\n help='Verbose output of run')\n # permissions mode of the local files (number in octal)\n parser.add_argument('--mode','-M',\n type=lambda x: int(x,base=8), default=0o775,\n help='Permission mode of output file')\n # return the parser\n return parser\n\n# This is the main part of the program that calls the individual functions\ndef main():\n # Read the system arguments listed after the program\n parser = arguments()\n args,_ = parser.parse_known_args()\n\n # set output file from input filename if not entered\n if not args.outfile:\n vars = (args.infile.stem,args.tide,'_currents',args.infile.suffix)\n args.outfile = args.infile.with_name('{0}_{1}{2}{3}'.format(*vars))\n\n # run tidal current program for input file\n compute_tidal_currents(args.directory, args.infile, args.outfile,\n TIDE_MODEL=args.tide,\n ATLAS_FORMAT=args.atlas_format,\n GZIP=args.gzip,\n DEFINITION_FILE=args.definition_file,\n FORMAT=args.format,\n VARIABLES=args.variables,\n HEADER=args.header,\n DELIMITER=args.delimiter,\n TYPE=args.type,\n TIME_UNITS=args.epoch,\n TIME=args.deltatime,\n TIME_STANDARD=args.standard,\n PROJECTION=args.projection,\n METHOD=args.interpolate,\n EXTRAPOLATE=args.extrapolate,\n CUTOFF=args.cutoff,\n VERBOSE=args.verbose,\n MODE=args.mode)\n\n# run main program\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/compute_tidal_currents.py","file_name":"compute_tidal_currents.py","file_ext":"py","file_size_in_byte":23140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"67893358","text":"import random\nwinning_num = random.randint(1,5)\nguess = 1\nnum = int(input(\"guess a number betn 1 to 100 :\"))\ngame_over = False\n\nwhile not game_over:\n if num == winning_num:\n print(f\"YOU WIN !!!. and you guess this number in {guess} time\")\n break\n else:\n if num > winning_num:\n print(\"too high\")\n guess += 1\n num = int(input(\"guess again :\"))\n else:\n print(\"too low\")\n guess += 1\n num = int(input(\"guess again :\"))\n\n\n","sub_path":"new_python/begin_1.py","file_name":"begin_1.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"600410853","text":"import os\nimport utilities\nimport gmplot\nfrom entities import Instance, Solution\nfrom algorithm import Algorithm\n\n\nif __name__ == \"__main__\":\n # Create random generator and set seed\n r = utilities.RandGenerator()\n r.set_seed(22)\n\n # Read Instance\n df, instance = utilities.read_instance_csv(\"data_small.csv\")\n #instance = utilities.read_instance(\"instance_small.txt\") # Deprecated\n print(\"Instance has been read\")\n\n # Compute distances\n instance.compute_dist(\"default\")\n print(\"Distances have been calculated\")\n\n # Create a random solution\n solution = Solution(instance)\n algorithm = Algorithm(6, solution)\n algorithm.random_sol()\n print(\"Solution has been created\")\n\n # Get objective functions\n print(solution.get_objvalue(\"sumAllToCenter\"))\n print(solution.get_objvalue(\"sumAllToAll\"))\n print(solution.get_objvalue(\"loadRange\"))\n print(\"Objective function has been calculated\")\n\n # Print clusters\n map = utilities.MapVisualiser()\n # Set key google API\n # Read documentation of ggogle API to get your own key\n #map.set_gmapkey(\"1AIzaSyBrcChgM41NgYRy7FL4oXoxkz6KJbrKyJY\")\n map.draw_cluster(solution)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Districting/src/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"494362467","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport pickle\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split, RandomizedSearchCV\nfrom sklearn.metrics import accuracy_score, f1_score\n\n\nclass RandomForestTrainer():\n def __init__(self, data_path, target_name):\n self.data = pd.read_csv(data_path)\n print(self.data)\n self.X, self.y = self.data.drop([target_name], axis = 1), self.data[target_name]\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=0.33, random_state=42, stratify = self.y)\n\n def train(self):\n n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n max_features = [\"auto\", \"sqrt\"]\n max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]\n max_depth.append(None)\n min_samples_split = [2,5,10]\n min_samples_leaf = [1,2,4]\n bootstrap = [True, False]\n\n random_grid = {\"n_estimators\":n_estimators,\n \"max_features\":max_features,\n \"max_depth\":max_depth,\n \"min_samples_split\":min_samples_split,\n \"min_samples_leaf\":min_samples_leaf,\n \"bootstrap\":bootstrap\n }\n\n rf = RandomForestClassifier()\n rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 100, cv = 3, verbose = 2, random_state = 42, n_jobs = -1)\n\n rf_random.fit(self.X_train, self.y_train)\n self.model = RandomForestClassifier()\n self.model = RandomForestClassifier(**rf_random.best_params_)\n self.model.fit(self.X_train, self.y_train)\n\n def test(self):\n print(\"Accuracy =\", accuracy_score(self.model.predict(self.X_test), self.y_test))\n print(\"F1 Score =\", f1_score(self.model.predict(self.X_test), self.y_test))\n\n def save(self, model_path):\n pickle.dump(self.model, open(model_path, \"wb\"))\n\n def predict(self, model_path):\n \"\"\"\n :param model_path:\n :return:\n \"\"\"\n return None\n\ndef main():\n data_path = sys.argv[1]\n target_name = sys.argv[2]\n model_path = sys.argv[3]\n model = RandomForestTrainer(data_path, target_name)\n model.train()\n model.test()\n model.save(model_path)\n\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"133599880","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Author : LEITENG\n@Version : \n------------------------------------\n@File : p39_VAE_mnist.py\n@Description : \n@CreateTime : 2020/6/23 10:15\n------------------------------------\n@ModifyTime : 手写数字生成\n\n条件式 VAE: CVAE\n\n\"\"\"\nimport p43_framework_muti_gpus as myf\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist.input_data import read_data_sets\nimport numpy as np\nimport cv2\nimport os\n\n\nclass MyConfig(myf.Config):\n def __init__(self):\n super(MyConfig, self).__init__()\n # mac windows下分隔符不同,以下是通用表达\n # self.sample_path = '..{sep}deeplearning_tensorflow_p{sep}MNIST_data'.format(sep=os.sep)\n self.sample_path = './MNIST_data'\n self.vector_size = 4\n # 惯性系数\n self.momentum = 0.99\n self.cols = 20\n self.img_path = './imgs/{name}/test.jpg'.format(name=self.get_name())\n self.batch_size = 500\n self.epoches = 5\n\n def get_name(self):\n return 'p44'\n\n def get_sub_tensors(self, gpu_idx):\n return MySubTensors(self)\n\n def get_tensors(self):\n return MyTensors(self)\n\n\nclass MyTensors(myf.Tensors):\n def get_loss_for_summary(self, loss):\n return tf.sqrt(loss)\n\n\nclass MySubTensors:\n def __init__(self, config: MyConfig):\n self.config = config\n with tf.device('/gpu:0'):\n # 分配第0个gpu,而不是操作系统的第 0 块gpu\n x = tf.placeholder(tf.float32, [None, 784], 'x')\n label = tf.placeholder(tf.int32, [None], 'label')\n self.inputs = [x, label]\n\n x = tf.reshape(x, [-1, 28, 28, 1])\n # [-1, 10, 4]\n self.vec = self.encode(x, config.vector_size) # [-1, 4]\n\n self.process_normal(self.vec) # 注意次序!!!\n self.y = self.decode(self.vec, label) # [-1, 28, 28, 1]\n\n self.losses = [tf.reduce_mean(tf.square(self.y - x))]\n # loss = tf.reduce_mean(tf.square(self.y - x))\n # opt = tf.train.AdamOptimizer(lr)\n # with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n # # loss,assign 在 train_op 之上定义\n # # train_op 依赖 loss,assign\n # self.train_op = opt.minimize(loss)\n # self.summary = tf.summary.scalar('loss', tf.sqrt(loss))\n self.y = tf.reshape(self.y, [-1, 28, 28])\n\n def process_normal(self, vec):\n '''\n 计算平均数(动量法)\n 并在计算图中保存变量(assign)\n :param vec: [-1, vec_size]\n :return:\n '''\n mean = tf.reduce_mean(vec, axis=0) # [vec_size]\n # mean square difference\n msd = tf.reduce_mean(tf.square(vec), axis=0)\n\n vector_size = vec.shape[1].value\n # [10, 4]\n self.final_mean = tf.get_variable('mean', [vector_size], tf.float32, tf.initializers.zeros, trainable=False)\n self.final_msd = tf.get_variable('msd', [vector_size], tf.float32, tf.initializers.zeros, trainable=False)\n\n mom = self.config.momentum\n # final_mean = final_mean * mom + mean * (1 - mom) # 错误做法:变量没有更新\n assign = tf.assign(self.final_mean, self.final_mean * mom + mean * (1 - mom))\n # 建立 assign 与 train_op 的控制依赖,正向传播是走实线和虚线,反向传播不去求控制依赖的值\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign)\n\n assign = tf.assign(self.final_msd, self.final_msd*mom + msd*(1-mom))\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign)\n\n def encode(self, x, vec_size):\n '''\n encode the x to vector which size is vec_size\n :param x: input tensor, shape is [-1, 28, 28, 1]\n :param vec_size:\n :return: the semantics vectors which shape is [-1, vec_size]\n '''\n filters = 16\n x = tf.layers.conv2d(x, filters, 3, 1, 'same', activation=tf.nn.relu, name='conv1') # [-1, 28, 28, 16]\n for i in range(2):\n filters *= 2\n # [-1, 28, 28, 32] [-1, 14, 14, 64]\n x = tf.layers.conv2d(x, filters, 3, 1, 'same', activation=tf.nn.relu, name='conv2_%d' % i)\n # 池化操作不产生可训练参数, 不需要训练参数\n # [-1, 14, 14, 32] [-1, 7, 7, 64]\n x = tf.layers.max_pooling2d(x, 2, 2, 'valid')\n # x: [-1, 7, 7, 64]\n # 卷积或者使用全连接\n # [-1, 1, 1, vec_size]\n x = tf.layers.conv2d(x, vec_size, 7, 1, 'valid', name='conv3')\n return tf.reshape(x, [-1, vec_size])\n\n def decode(self, vec, label):\n '''\n 使用反卷积(上采样),反卷积只能恢复尺寸,不能恢复数值\n the semantics vector\n :param vec: [-1, vec_size]\n :param label: [-1]\n :return: [-1, 28, 28, 1]\n '''\n y = tf.layers.dense(vec, 7 * 7 * 64, activation=tf.nn.relu, name='dens_1') # [-1 ,4] -> [-1, 7*7*64]\n label = tf.one_hot(label, 10)\n l = tf.layers.dense(label, 7 * 7 * 64, name='dense1')\n y += l\n # [-1, 7*7*64] -> [-1, 7, 7, 64]\n y = tf.reshape(y, [-1, 7, 7, 64])\n filters = 64\n size = 7\n for i in range(2):\n filters //= 2\n size *= 2\n # 两次反卷积 :[-1, 14, 14, 32] [-1, 28, 28, 16]\n y = tf.layers.conv2d_transpose(y, filters, 3, 2, 'same', activation=tf.nn.relu, name='deconv1_%d' % i)\n l = tf.layers.dense(label, size * size * filters, name='deconv_l_1_%d' % i)\n l = tf.reshape(l, [-1, size, size, filters])\n y += l\n # [-1, 28, 28, 16]\n y = tf.layers.conv2d_transpose(y, 1, 3, 1, 'same', name='deconv2') # [-1, 28, 28, 1]\n return y\n\n\nclass MyDS:\n def __init__(self, ds, config):\n self.ds = ds\n self.num_examples = ds.num_examples\n\n def next_batch(self, batch_size):\n xs, labels = self.ds.next_batch(batch_size)\n return xs, labels\n\n\nclass App:\n def __init__(self):\n pass\n\n\ndef predict(app, samples, path, cols):\n mean = app.session.run(app.ts.sub_ts[0].final_mean)\n print(mean)\n msd = app.session.run(app.ts.sub_ts[0].final_msd) # 二阶原点矩\n std = np.sqrt(msd - mean ** 2)\n print(std)\n\n vec = np.random.normal(mean, std, [samples, len(std)])\n label = [e % 10 for e in range(samples)]\n # feed_dict 中,任何一个张量局可作为key\n # imgs = app.session.run(app.ts.y, {app.ts.vec: vec, app.ts.inputs[-1]: label}) # 【-1, 28, 28】\n imgs = app.session.run(app.ts.sub_ts[0].y,\n {app.ts.sub_ts[0].vec: vec, app.ts.sub_ts[0].inputs[-1]: label}) # [-1, 28, 28]\n\n # 方法一:\n imgs = np.reshape(imgs, [-1, cols, 28, 28])\n imgs = np.transpose(imgs, [0, 2, 1, 3]) # [-1, 28, 20, 28]\n # imgs = np.reshape(imgs, [-1, 28, cols*28])\n # imgs = np.transpose(imgs, [1, 0, 2]) # [28, -1, 20*28]\n imgs = np.reshape(imgs, [-1, cols*28])\n # 方法二:\n # imgs = np.transpose(imgs, [1, 0, 2])\n # imgs = np.reshape(imgs, [-1, 28, cols * 28])\n # imgs = np.transpose(imgs, [1, 0, 2])\n # imgs = np.reshape(imgs, [-1, cols * 28])\n\n myf.make_dirs(path)\n cv2.imwrite(path, imgs*255)\n print('write image into', path)\n\n\nif __name__ == '__main__':\n cfg = MyConfig()\n cfg.from_cmd()\n print('_'*20)\n print(cfg)\n\n dss = read_data_sets(cfg.sample_path)\n app = myf.App(cfg)\n with app:\n # app.train(MyDS(dss.train, cfg), MyDS(dss.validation, cfg))\n predict(app, cfg.batch_size, cfg.img_path, cfg.cols)\n","sub_path":"deeplearning_tensorflow_p/p44_CVAE_mutigpus.py","file_name":"p44_CVAE_mutigpus.py","file_ext":"py","file_size_in_byte":7669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"338917107","text":"# Import the required module for text \r\n\r\nfrom gtts import gTTS \r\n\r\n \r\n# to play the converted audio \r\nimport os \r\n\r\n# The text that you want to convert to audio \r\nmytext = 'your text is converted to speech!'\r\n\r\n# Language in which you want to convert \r\nlanguage = 'en'\r\n\r\n# Passing the text and language to the engine, \r\n\r\nmyobj = gTTS(text=mytext, lang=language, slow=False) \r\n\r\n# Saving the converted audio in a mp3 file named \r\n\r\nmyobj.save(\"my.mp3\") \r\n\r\n# Playing the converted file \r\nos.system(\"mpg321 my.mp3\") \r\n","sub_path":"text_to_speech.py","file_name":"text_to_speech.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"324466825","text":"\"\"\"\nStack to queue converter.\n\"\"\"\n\nfrom arraystack import ArrayStack\nfrom arrayqueue import ArrayQueue\nimport copy\n\n\ndef stack_to_queue(stack):\n \"\"\"\n Convert queue to a stack\n \"\"\"\n input_stack = copy.deepcopy(stack)\n output_queue = ArrayQueue()\n while True:\n try:\n output_queue.add(input_stack.pop())\n except KeyError:\n break\n return output_queue\n","sub_path":"stack_to_queue.py","file_name":"stack_to_queue.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"352877140","text":"from faker.providers import BaseProvider\nimport random\nimport time\n\n\nclass MetricProvider(BaseProvider):\n def hostname(self):\n validIds = [\n \"doc\",\n \"grumpy\",\n \"sleepy\",\n \"bashful\",\n \"happy\",\n \"sneezy\",\n \"dopey\",\n ]\n return validIds[random.randint(0, len(validIds) - 1)]\n\n def cpu_id(self):\n validIds = [\"cpu1\", \"cpu2\", \"cpu3\", \"cpu4\", \"cpu5\"]\n return validIds[random.randint(0, len(validIds) - 1)]\n\n def usage(self):\n return random.random() * 30 + 70\n\n def produce_msg(self):\n hostname = self.hostname()\n ts = time.time()\n message = {\n \"hostname\": hostname,\n \"cpu\": self.cpu_id(),\n \"usage\": self.usage(),\n \"occurred_at\": int(ts * 1000),\n }\n key = {\"hostname\": hostname}\n return message, key\n","sub_path":"metricproducer.py","file_name":"metricproducer.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"56756678","text":"from app import app\n\nfrom flask import render_template,request,jsonify\n\n\nfrom forms import GetQuestion\n\n\nfrom random import randint\n\n\"\"\"@app.route('/lotto/')\ndef new_numbers():\n game_nums = []\n while len(game_nums) < 6:\n n = randint(1,47)\n if not n in game_nums:\n game_nums.append(n)\n game_nums.sort()\n return jsonify(result1=game_nums)\n \"\"\" \n \n@app.route('/8ball/')\ndef get_answer():\n quest = request.args.get('question')\n choice = randint(1,20)\n #https://en.wikipedia.org/wiki/Magic_8-Ball\n if choice ==1:\n response = \"It is certain\"\n elif choice ==2:\n response = \"It is decidedly so\"\n elif choice ==3:\n response = \"Without a doubt\"\n elif choice ==4:\n response = \"Yes, definitely\"\n elif choice ==5:\n response = \"You may rely on it\"\n elif choice ==6:\n response = \"As I see it, yes\"\n elif choice ==7:\n response = \"Most likely\"\n elif choice ==8:\n response = \"Outlook good\"\n elif choice ==9:\n response = \"Yes\"\n elif choice ==10:\n response = \"Signs point to yes\"\n elif choice ==11:\n response = \"Reply hazy try again\"\n elif choice ==12:\n response = \"Ask again later\"\n elif choice ==13:\n response = \"Better not tell you now\"\n elif choice ==14:\n response = \"Cannot predict now\"\n elif choice ==15:\n response = \"Concentrate and ask again\"\n elif choice ==16:\n response = \"Don't count on it\"\n elif choice ==17:\n response = \"My reply is no\"\n elif choice ==18:\n response = \"My sources say no\"\n elif choice ==19:\n response = \"Outlook not so good\"\n elif choice ==20:\n response = \"Very doubtful\"\n return jsonify(result=response)\n \n@app.route('/')\ndef index():\n game_nums = []\n while len(game_nums) < 6:\n n = randint(1,47)\n if not n in game_nums:\n game_nums.append(n)\n game_nums.sort()\n return render_template('index.html',game_nums = game_nums)\n\n@app.route('/die/')\ndef get_die():\n a = randint(1,6)\n return jsonify(result2 = a) ","sub_path":"project-class/app/.~c9_invoke_cZ5PYS.py","file_name":".~c9_invoke_cZ5PYS.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"10984096","text":"train_sent = open('train_sentiments.txt','r')\ntest_sent = open('test_sentiments.txt','r')\n\nY = []\nfor line in train_sent:\n Y += [line[0:-1]]\n\nZ = []\nfor line in test_sent:\n Z += [line[0:-1]]\n\ntrain_sent.close()\ntest_sent.close()\n\n\n\nimport numpy\nimport codecs\nfrom sklearn.svm import LinearSVC\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nword_vectorizer = CountVectorizer(analyzer='word')\nX = word_vectorizer.fit_transform(codecs.open('train_tweets.txt','r','utf8'))\n\nclf = LinearSVC()\n\nparams_space = { 'C': numpy.logspace(-6,0,11), 'class_weight':[None,'auto']}\ngscv = GridSearchCV(clf,params_space,cv=3)\n\ngscv.fit(X, Y)\nprint(gscv.best_estimator_, gscv.best_params_, gscv.best_score_)\n\ntestset = word_vectorizer.transform(codecs.open('test_tweets.txt','r','utf8'))\nresults = gscv.predict(testset)\n#print(results[0::])\n\ncorrect = wrong = total = 0\nwl = []\n\nfor i in results:\n total += 1\n if i == Z[total - 1]:\n correct += 1\n else:\n wrong += 1\n wl += [Z[total - 1]]\n\npercent_right = correct/total * 100 \nprint(str(correct) + ' correct \\n' + str(wrong) + ' wrong \\n' + str(total) + ' total \\n' + str(percent_right) + '% correct')\n\ns = a = n = 0\n\nfor i in wl:\n if i == 'skeptical':\n s += 1\n if i == 'neutral':\n n += 1\n if i == 'activist':\n a += 1\n\nprint('\\nWrong breakdown:\\n' + str(s) + ' skeptical\\n' + str(n) + ' neutral\\n' + str(a) + ' activist')\n\n\n\n\n\n\n\n\n'''\n######## helper function ##########\n## determines if the tweet has already been processed\n## will return true if it has been\ndef already_in(twt):\n a = False\n check_file = open('tweets_sentiment3.txt','r')\n for aline in check_file:\n if twt in aline:\n if 'redundant' in aline:\n break\n else:\n a = True\n break\n check_file.close()\n return a\n\n\n\n\n###### main program #########\nclimate_file = open('climate_19_Jul_2015.txt','r')\n#climate_file = codecs.open('climate_19_Jul_2015.txt','r','utf8')\n\nfor line in climate_file:\n sentiment = ''\n tweet = line.split('\":::text: \"')[1].split('\":::id: ')[0]\n if already_in(tweet):\n print('Already have that tweet')\n continue\n \n print('\\n' + tweet + '\\n')\n ans = input('1:skeptical, 2:neutral, 3:activist, 4:redundant --- type \"end\" to quit \\n')\n while (ans != 'end') and (ans != '1') and (ans != '2') and (ans != '3') and (ans != '4'):\n ans = input('1:skeptical, 2:neutral, 3:activist, 4;redundant --- type \"end\" to quit \\n')\n if ans == 'end':\n break\n if ans == '1':\n sentiment = 'skeptical'\n if ans == '2':\n sentiment = 'neutral'\n if ans == '3':\n sentiment = 'activist'\n if ans == '4':\n sentiment = 'redundant'\n tweets_file = open('tweets_sentiment2.txt','w')\n tweets_file.write(tweet)\n tweets_file.close()\n \n tweets_file3 = open('tweets_sentiment3.txt','a')\n tweets_file3.write(sentiment + ' ::---:: ' + tweet + '\\n')\n tweets_file3.close() \n \n test = word_vectorizer.transform(codecs.open('tweets_sentiment2.txt','r','utf8'))\n results = gscv.predict(test)\n print('\\n' + results[0] + '\\n')\n \n \n \n rw_file = open('right_wrong.txt','a') \n \n if results[0] == sentiment:\n print('correct')\n rw_file.write('correct\\n')\n else:\n print('wrong \\n guess: ' + results[0] + '\\n actual: ' + sentiment)\n rw_file.write('wrong\\n')\n \n answer = input('Would you like to add this tweet to the training set? (y or n)\\n')\n while answer != 'y' and answer != 'n':\n answer = input('Would you like to add this tweet to the training set? (y or n)\\n') \n \n if answer == 'y':\n add = open('tweets_sentiment_refined.txt','a')\n add.write(sentiment + ' ::---:: ' + tweet)\n add.close()\n \n rw_file.close()\n \nrw_file2 = open('right_wrong.txt','r')\nr = w = 0\n\nfor line in rw_file2:\n if 'correct' in line:\n r += 1\n if 'wrong' in line:\n w += 1\nprint(r)\nprint(w)\nper = (r / (r + w)) * 100 \nprint(str(r) + ' right\\n' + str(w) + ' wrong\\n' + str(per) + ' % correct') \n\nrw_file2.close()\nclimate_file.close()\n'''","sub_path":"Vince_Talbot_Code_Training_Set/work again/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637751031","text":"#!/usr/bin/python\n\n\"\"\"\nWindows user information\n\"\"\"\n\n# >>> win32net.NetUserGetInfo(None,\"rchateau\",3)\n# {'comment': u'', 'workstations': u'', 'country_code': 0L, 'last_logon': 1480721751L, 'password_expired': 0L, 'full_name': u'', 'parm\n# s': u'', 'code_page': 0L, 'priv': 2L, 'auth_flags': 0L, 'logon_server': u'\\\\\\\\*', 'home_dir': u'', 'home_dir_drive': u'', 'usr_comme\n# nt': u'', 'profile': u'', 'acct_expires': 4294967295L, 'primary_group_id': 513L, 'bad_pw_count': 0L, 'user_id': 1001L, 'logon_hours'\n# : '\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff', 'password': None, 'units_per_week': 168L,\n# 'last_logoff': 0L, 'name': u'rchateau', 'max_storage': 4294967295L, 'num_logons': 15896L, 'password_age': 45314825L, 'flags': 66081L\n# , 'script_path': u''}\n\nimport sys\nimport lib_util\nimport lib_common\nfrom lib_properties import pc\n\nimport win32net\n\nfrom sources_types import Win32_UserAccount as survol_Win32_UserAccount\n\nUsable = lib_util.UsableWindows\n\nCanProcessRemote = True\n\ndef Main():\n\tcgiEnv = lib_common.CgiEnv(can_process_remote = True)\n\n\ttry:\n\t\t# Exception if local machine.\n\t\thostName = cgiEnv.m_entity_id_dict[\"Domain\"]\n\texcept KeyError:\n\t\thostName = None\n\n\tif not hostName or lib_util.IsLocalAddress( hostName ):\n\t\tserverBox = lib_common.gUriGen\n\t\tserverNode = lib_common.nodeMachine\n\t\tservName_or_None = None\n\telse:\n\t\tserverBox = lib_common.RemoteBox(hostName)\n\t\tserverNode = lib_common.gUriGen.HostnameUri(hostName)\n\t\tservName_or_None = hostName\n\n\t\t# hostname = \"Titi\" for example\n\t\t# lib_win32.WNetAddConnect(hostName)\n\n\n\n\n\n\tuserName = cgiEnv.m_entity_id_dict[\"Name\"]\n\n\tgrph = cgiEnv.GetGraph()\n\n\tnodeUser = survol_Win32_UserAccount.MakeUri( userName, hostName )\n\n\ttry:\n\t\tinfoList = win32net.NetUserGetInfo(servName_or_None, userName, 2)\n\texcept:\n\t\tlib_common.ErrorMessageHtml(\"Error:\"+str(sys.exc_info()))\n\n\tfor infoKey in infoList:\n\n\t\ttry:\n\t\t\tinfoVal = infoList[infoKey]\n\t\t\tgrph.add( ( nodeUser, lib_common.MakeProp(infoKey), lib_common.NodeLiteral(infoVal) ) )\n\t\texcept:\n\t\t\ttxtDisp = str( sys.exc_info()[1] )\n\t\t\tgrph.add( ( nodeUser, lib_common.MakeProp(infoKey), lib_common.NodeLiteral(txtDisp) ) )\n\n\n\n\tcgiEnv.OutCgiRdf()\n\nif __name__ == '__main__':\n\tMain()\n\n\n","sub_path":"survol/sources_types/Win32_UserAccount/Win32_NetUserGetInfo.py","file_name":"Win32_NetUserGetInfo.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"462407876","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os.path\n\nfrom OpenGL.GL import *\n\nimport pyassimp as assimp\nfrom mesh import Mesh\n\n\nclass Model(object):\n\n def __init__(self, path, gamma=False):\n self.gammaCorrection = gamma\n self.meshes = []\n self.textures_loaded = []\n self.directory = ''\n\n self.loadModel(path)\n\n def draw(self, shader):\n for mesh in self.meshes:\n mesh.draw(shader)\n\n def loadModel(self, path):\n scene = assimp.load(path, processing=(assimp.postprocess.aiProcess_Triangulate |\n assimp.postprocess.aiProcess_FlipUVs |\n assimp.postprocess.aiProcess_CalcTangentSpace))\n if not scene:\n raise Exception(\"ASSIMP can't load model\")\n\n self.directory = os.path.dirname(path)\n\n for mesh in scene.meshes:\n self.meshes.append(Mesh(mesh, self.directory))\n\n assimp.release(scene)\n\n # self.__processNode(scene)\n #\n # def __processNode(self, scene):\n # for mesh in scene.meshes:\n # self.meshes.append(Mesh(mesh))\n #\n # assimp.release(scene)","sub_path":"pysrc/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"447328804","text":"import cv2\nimport time\nimport numpy as np\nfrom pyardrone import ARDrone, at\nimport threading\n\nimport move\n\n\ndef nothing(x):\n pass\n\n\ndef init():\n uav = ARDrone()\n print(\"Initiating - waiting..\")\n uav.navdata_ready.wait()\n print(\"NavData Ready\")\n uav.send(at.CONFIG('general:navdata_demo', True))\n time.sleep(0.1)\n uav.send(at.CONFIG(\"video:video_channel\", 1))\n time.sleep(0.1)\n print(\"Battery = \", uav.navdata.demo.vbat_flying_percentage)\n\n if uav.state.vbat_low:\n print(\"Battery to low, please replace before flying\")\n exit()\n if uav.state.video_mask == 0:\n print(\"Video Disabled\")\n if uav.state.vision_mask == 0:\n print(\"Vision Disabled\")\n if uav.state.altitude_mask == 0:\n print(\"Altitude control inactive\")\n if uav.state.camera_mask == 0:\n print(\"Camera not ready\")\n if uav.state.travelling_mask == 0:\n print(\"Travelling mask disabled\")\n if uav.state.usb_mask == 0:\n print(\"USB key not ready\")\n if uav.state.navdata_demo_mask == 0:\n print(\"navdata demo not activated\")\n if uav.state.navdata_bootstrap:\n print(\"no navdata options send\")\n if uav.state.motors_mask:\n print(\"Motors problem\")\n if uav.state.com_lost_mask:\n print(\"Communication problem\")\n if uav.state.software_fault:\n print(\"Software fault detected\")\n if uav.state.magneto_needs_calib:\n print(\"Magneto calibration needed\")\n if uav.state.angles_out_of_range:\n print(\"angles_out_of_range\")\n if uav.state.wind_mask:\n print(\"Too much wind\")\n if uav.state.ultrasound_mask:\n print(\"Ultrasonic sensor deaf\")\n if uav.state.cutout_mask:\n print(\"Cutout system detected\")\n\n while uav.state.emergency_mask:\n print(\"Emergency\")\n uav.send(at.REF(0b0100000000))\n time.sleep(1)\n\n # Create windows and sliders\n cv2.namedWindow(\"Image\", cv2.WINDOW_AUTOSIZE)\n\n # cv2.namedWindow('slider', cv2.WINDOW_AUTOSIZE)\n # cv2.moveWindow('slider', 640, 0)\n # cv2.resizeWindow('slider', 560, 400)\n # cv2.createTrackbar('B', 'slider', 0, 255, nothing)\n # cv2.createTrackbar('G', 'slider', 0, 255, nothing)\n # cv2.createTrackbar('R', 'slider', 0, 255, nothing)\n #\n # cv2.createTrackbar('B1', 'slider', 0, 255, nothing)\n # cv2.createTrackbar('G1', 'slider', 0, 255, nothing)\n # cv2.createTrackbar('R1', 'slider', 0, 255, nothing)\n #\n # cv2.createTrackbar('kernel', 'slider', 1, 20, nothing)\n # cv2.setTrackbarPos('kernel', 'slider', 2)\n\n return uav\n\n\ndef filter_image(img, lower_mask, upper_mask):\n # For calibrating for different backgrounds\n # set sliders to start values\n\n # cv2.setTrackbarPos('B', 'slider', lower_mask[0])\n # cv2.setTrackbarPos('G', 'slider', lower_mask[1])\n # cv2.setTrackbarPos('R', 'slider', lower_mask[2])\n # cv2.setTrackbarPos('B1', 'slider', upper_mask[0])\n # cv2.setTrackbarPos('G1', 'slider', upper_mask[1])\n # cv2.setTrackbarPos('R1', 'slider', upper_mask[2])\n\n # # wait a bit to update\n # cv2.waitKey(5)\n\n # # Read slider positions\n # b = cv2.getTrackbarPos('B', 'slider')\n # g = cv2.getTrackbarPos('G', 'slider')\n # r = cv2.getTrackbarPos('R', 'slider')\n # b1 = cv2.getTrackbarPos('B1', 'slider')\n # g1 = cv2.getTrackbarPos('G1', 'slider')\n # r1 = cv2.getTrackbarPos('R1', 'slider')\n kernelsize = 2\n # kernelsize = cv2.getTrackbarPos('kernel', 'slider')\n kernel = np.ones((kernelsize, kernelsize), np.uint8)\n #\n # # Build mask array from sliders\n # lower_unit = np.array([b, g, r])\n # upper_unit = np.array([b1, g1, r1])\n\n lower_unit = lower_mask\n upper_unit = upper_mask\n # Convert image to HSV\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n\n # Filter colors\n mask = cv2.inRange(hsv, lower_unit, upper_unit)\n res = cv2.bitwise_and(img, img, mask=mask)\n # cv2.imshow(\"res\", res)\n\n # Convert to grayscale\n gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)\n\n # Make binary image\n ret, thres = cv2.threshold(gray, 20, 255, 0)\n\n # Close some holes\n thres = cv2.morphologyEx(thres, cv2.MORPH_CLOSE, kernel)\n\n # Return binary image and slider data, so program remembers their position\n return thres\n\n\ndef takeoff(drone):\n print(\"Take-off..\")\n while not drone.state.fly_mask:\n drone.takeoff()\n\n print(\"Hovering\")\n timeout = time.time() + 6\n while True:\n drone.hover()\n if time.time() > timeout:\n break\n\n print(\"Going up\")\n altitude = drone.navdata.demo.altitude\n while altitude < 1800:\n drone.move(up=0.2)\n altitude = drone.navdata.demo.altitude\n drone.move(up=0)\n\n print(\"Hovering\")\n timeout = time.time() + 3\n while True:\n drone.hover()\n\n if time.time() > timeout:\n break\n\n\nclass MoveData:\n def __init__(self, marker, dir_x, speed_x, dir_y, speed_y):\n self.speed_y = speed_y\n self.dir_y = dir_y\n self.speed_x = speed_x\n self.dir_x = dir_x\n self.marker = marker\n\n\nprint(\"\\tStarting Program\")\ncam = cv2.VideoCapture('tcp://192.168.1.1:5555')\nprint(\"\\tVideoCapture ready\")\ndrone = init()\n\n# Data for debuging\nlower_mask = np.array([0, 4, 148])\nupper_mask = np.array([255, 255, 255])\ni = 1\nret = True\n\nnextMarker = 1 # first marker\nmaxMarkers = 4 # number of markers +1\nfirstMarker = True\nspeed = 0.1 # speed of drone\n\nlookForNextMarker = False\nMarkerFound = False\n\n# Create things for thread that moves the drone\nmoveData = MoveData(False, 0, speed, 0, speed)\nmovethread = threading.Thread(target=move.droneMove, args=(moveData, drone))\n\n# takeoff(drone)\n\nwhile True:\n # img = cv2.imread(\"drone/img\" + str(i) + \".jpg\") # for testing with images\n tijd = time.time()\n ret, img = cam.read() # Get picture from video feed\n if ret: # If picture gotten\n\n # For setting color filtering settings, useful for different backgrounds\n # thres, b, g, r, b1, g1, r1 = filter_image(img, lower_mask, upper_mask)\n # lower_mask = [b, g, r]\n # upper_mask = [b1, g1, r1]\n thres = filter_image(img, lower_mask, upper_mask)\n hist = cv2.calcHist([thres], [0], None, [256], [0, 256])\n\n cv2.imshow(\"thres\", thres)\n print(\"hist\", hist[255])\n if hist[255] < 18200:\n try:\n im2, contours, hierarchy = cv2.findContours(thres, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n hierarchy = hierarchy[0]\n # print(\"hierarchy: \", hierarchy)\n\n except TypeError:\n # When contrours doesnt find anything\n print(\" Ja Dat was me weer een errotje zeg\")\n\n else: # If contours found\n currentMarker = 0\n # area = np.array([[180, 0], [180, 360], [540, 360], [540, 0]])\n # cv2.drawContours(img, [area], 0, (255, 0, 0), 2)\n\n for component in zip(contours, hierarchy):\n print(\"contour loop\")\n currentContour = component[0]\n currentHierarchy = component[1]\n\n if 500 < cv2.contourArea(currentContour) < 50000:\n print(\"area loop\")\n # print(\"currentHierarchy: \", currentHierarchy)\n if currentHierarchy[2] >= 0 and currentHierarchy[3] >= 0: # if contour has a child and a parent\n # Draw box around contours\n # rect = cv2.minAreaRect(currentContour)\n # box = cv2.boxPoints(rect)\n # box = np.int0(box)\n\n # currentContour should be black square, check if its parent doesnt have a parent and its child\n # doesnt have a child\n if hierarchy[currentHierarchy[2]][2] < 0 and hierarchy[currentHierarchy[3]][3] < 0:\n print(\"found contour with child and parant\")\n # Probably found a marker, yeey!\n MarkerContourOutside = contours[currentHierarchy[3]]\n MarkerContourInside = currentContour\n # print(\"Contour area ratio: \",\n # cv2.contourArea(MarkerContourInside) / cv2.contourArea(MarkerContourOutside))\n\n # Draw box around contours\n # rect2 = cv2.minAreaRect(MarkerContourOutside)\n # box2 = cv2.boxPoints(rect2)\n # box2 = np.int0(box2)\n\n # Found and printed marker contours above. Now check for circles in it.\n circleContour = contours[currentHierarchy[2]]\n circleHierarchy = hierarchy[currentHierarchy[2]]\n currentMarker = 0\n breakNext = False\n\n if circleHierarchy[0] == -1:\n breakNext = True\n\n while True:\n print(\"counting circles\")\n print(circleHierarchy)\n # rect3 = cv2.minAreaRect(circleContour)\n # box3 = cv2.boxPoints(rect3)\n # box3 = np.int0(box3)\n # cv2.drawContours(img, [box3], 0, (255, 0, 0), 2)\n currentMarker += 1\n circleHierarchy = hierarchy[circleHierarchy[0]]\n circleContour = contours[circleHierarchy[0]]\n\n if breakNext:\n break\n\n if circleHierarchy[0] == -1:\n breakNext = True\n\n # If the marker found is the marker we're looking for calculate its distance to the center\n # of the image\n if currentMarker == nextMarker:\n lookForNextMarker = False\n moveData.marker = True\n firstMarker = False\n moments = cv2.moments(MarkerContourOutside)\n\n cx = int(moments['m10'] / moments['m00'])\n cy = int(moments['m01'] / moments['m00'])\n\n dx = cx - 320\n dy = cy - 180\n distanceToCenter = np.sqrt(dx * dx + dy * dy)\n print(\"calculated line to center\")\n cv2.line(img, (cx, cy), (320, 180), (0, 255, 0), thickness=4)\n # print(\"D: \", distanceToCenter)\n if distanceToCenter < 40: # If close to center, we are above the marker!\n\n nextMarker = currentMarker + 1\n print(\"TAKE PICTURE OF WALL\")\n # move.takePicture(drone, currentMarker, 1, cam)\n timeout = time.time() + 3\n drone.send(at.CONFIG(\"video:video_channel\", 0))\n while time.time() < timeout:\n drone.hover()\n\n ret, img = cam.read()\n cv2.imshow(\"Image\", img)\n\n ret, muur = cam.read()\n\n if ret:\n string = str(time.ctime()) + \"_\" + str(currentMarker) + \"_1\" + \".jpg\"\n string.replace(\" \",\"_\")\n string.replace(\":\", \"_\")\n cv2.imwrite(string, muur)\n cv2.imwrite(\"img\" + str(currentMarker) + \".jpg\", muur)\n cv2.imshow(string, muur)\n cv2.waitKey(1)\n print(\"saved image\")\n time.sleep(0.1)\n\n drone.send(at.CONFIG(\"video:video_channel\", 1))\n timeout = time.time() + 3\n while time.time() < timeout:\n drone.hover()\n lookForNextMarker = True\n if nextMarker == maxMarkers:\n nextMarker = 1\n print(\"Landing\")\n while drone.state.fly_mask:\n drone.land()\n exit()\n\n if dx > 0:\n print(\"move right\")\n # move right\n cv2.putText(img, \"Move: Right\", (10, 80), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 255))\n moveData.dir_y = 0\n\n else:\n print(\"move left\")\n # move left\n cv2.putText(img, \"Move: Left\", (10, 80), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 255))\n moveData.dir_y = 1\n if dy > 0:\n print(\"move back\")\n\n # move back\n cv2.putText(img, \"Move: back\", (10, 100), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 255))\n moveData.dir_x = 0\n else:\n print(\"move forward\")\n\n # move left\n cv2.putText(img, \"Move: forward\", (10, 100), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 255))\n moveData.dir_x = 1\n\n # Print more!\n\n cv2.putText(img, str(currentMarker), (10, 50), cv2.FONT_HERSHEY_PLAIN, 3, (0, 255, 255))\n print(\"marker = \", currentMarker)\n\n # cv2.drawContours(img, [box2], 0, (0, 255, 255), 2)\n # cv2.drawContours(img, [box], 0, (255, 255, 255), 2)\n else:\n # cv2.drawContours(img, [box2], 0, (0, 0, 0), 2)\n # cv2.drawContours(img, [box], 0, (0, 0, 0), 2)\n pass\n\n cv2.imshow(\"Image\", img)\n cv2.waitKey(1)\n tijd2 = time.time()\n if not movethread.is_alive() and moveData.marker:\n print(\"start move thread marker = True\")\n movethread = threading.Thread(target=move.droneMove, args=(moveData, drone))\n movethread.start()\n moveData.marker = False\n\n if not movethread.is_alive() and not moveData.marker and not firstMarker:\n print(\"start move thread marker = false\")\n movethread = threading.Thread(target=move.droneMove, args=(moveData, drone))\n movethread.start()\n # time.sleep(3)\n # print(\"Alles: \", time.time() - tijd)\n # print(\"Threads: \",time.time() - tijd2)\n","sub_path":"Nextcloud/1. School/UAV/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":16129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"593988326","text":"@pytest.mark.parametrize('dtype', [None, object])\ndef test_raise_when_saving_timezones(self, engine, ext, dtype, tz_aware_fixture):\n tz = tz_aware_fixture\n data = pd.Timestamp('2019', tz=tz)\n df = DataFrame([data], dtype=dtype)\n with pytest.raises(ValueError, match='Excel does not support'):\n df.to_excel(self.path)\n data = data.to_pydatetime()\n df = DataFrame([data], dtype=dtype)\n with pytest.raises(ValueError, match='Excel does not support'):\n df.to_excel(self.path)","sub_path":"Data Set/bug-fixing-4/06a6b496a4608bdcc54c8e0ad85197437257d9dc--bug.py","file_name":"06a6b496a4608bdcc54c8e0ad85197437257d9dc--bug.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"428718239","text":"import pandas as pd\nimport numpy as np\nfrom dask.distributed import Client\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport time\nimport sys\nsys.path.append('/home/sandeep/Desktop/BankBuddy/Reco-usecases/new_reco/preprocessing_pipeline/')\nfrom DataPreprocess import *\n\nstart_time = time.time()\n\nloan_file = './data/unprocessed/loan_up_sell.csv'\n\nloan_obj = DataPreprocess(loan_file)\nloan_df = loan_obj.get_df()\n\n\"\"\"Setup Dask Client\"\"\"\nworkers = 1\nthr_per_worker = 4\nprocess = False\nmemory = '2GB'\nclient = Client(n_workers=workers, threads_per_worker=thr_per_worker, processes=process, memory_limit=memory)\n\n\"\"\"Format Column Names\"\"\"\nloan_df.columns = loan_obj.format_column_name(loan_df)\n\n\"\"\"Check dtypes\"\"\"\nloan_df_dtypes = loan_obj.check_dtype(loan_df)\nprint(loan_df_dtypes)\n\n\"\"\" Drop gender column\"\"\"\nloan_df = loan_obj.drop_column(loan_df,\"gender\")\n\n\"\"\"Get list of obj/non-obj columns\"\"\"\nloan_obj_cols = loan_obj.get_obj_col_list(loan_df)\nloan_nonobj_cols = loan_obj.get_nonobj_col_list(loan_df)\n\n\"\"\"Convert string (object datatypes) to lowercase\"\"\"\nloan_df = loan_obj.convert_data_to_lowercase(loan_df, loan_obj_cols)\n\n\"\"\"Get df with number of unique values column-wise\"\"\"\nloan_unique_col_values = loan_obj.get_unique_col_values(loan_df)\nprint(loan_unique_col_values)\n\"\"\"unnecessary_cols stores columns with number of unique values either equal to 1 or len(df)\"\"\"\nloan_unnecessary_cols = loan_unique_col_values[loan_unique_col_values['num_unique_values'].isin([1, len(loan_df)])]\nprint(\"columns to be dropped - \", loan_unnecessary_cols.column.unique())\n\ntarget_df = pd.DataFrame()\ntarget_df['loan_availed2'] = loan_df['loan_availed2']\nloan_df.drop('loan_availed2', axis=1, inplace=True)\n\n\"\"\"Update list of obj/non-obj columns\"\"\"\nloan_obj_cols = loan_obj.get_obj_col_list(loan_df)\nloan_nonobj_cols = loan_obj.get_nonobj_col_list(loan_df)\n\n\"\"\"CALCULATING DERIVED DATA\"\"\"\n\n\"\"\"Age from DoB\"\"\"\nloan_df = loan_obj.get_age_col(loan_df, 'dob', '/', 1, 0, 2)\nprint(\"age added\")\n\n\"\"\"Customer Since\"\"\"\nloan_df = loan_obj.get_customer_since(loan_df, 'customer_to_bank', '/', 1, 0, 2)\n\n\"\"\"Customer group - new/old\"\"\"\nloan_df = loan_obj.group_customer(loan_df, 'customer_since_months', 'customer_rel_dur_segment')\nprint(\"grouped\")\n\n\"\"\"\"Zip code retrieval and distance mapping\"\"\"\ndict_add={}\n\naddress = list(loan_df['address'].unique())\n\nfor add in address:\n dict_add[add]=loan_obj.zipcode_distance_retrieval(add,\"IN\")\n\n\nloan_df = loan_obj.address_distance(loan_df,dict_add,\"address\")\n\n\n\n\"\"\"Since we have added some new columns - we have to update our cols_list\"\"\"\nloan_obj_cols = loan_obj.get_obj_col_list(loan_df)\nloan_nonobj_cols = loan_obj.get_nonobj_col_list(loan_df)\n\nloan_temp_df = loan_df.copy()\nloan_temp_df.to_csv('./data/unprocessed/consolidated_for_comparison.csv', index=False)\n\nprint(loan_df.head())\n\n\"\"\"Handling Missing Data\"\"\"\nloan_missing_df = loan_obj.get_missing_df(loan_df)\nloan_missing_df = loan_missing_df[loan_missing_df.percent_missing > 0]\nprint(loan_missing_df)\n\n\"\"\"Handling Categorical Variables\"\"\"\nloan_cat_col_uniques_dict = loan_obj.get_cat_cols_unique_val_dict(loan_df, loan_obj_cols)\nprint(loan_cat_col_uniques_dict)\n\nloan_binary_cols_list = loan_cat_col_uniques_dict.get(2).split(\",\")\nloan_onehot_col_list = loan_cat_col_uniques_dict[4].split(\",\") + loan_cat_col_uniques_dict[6].split(\",\")\n\nloan_df = loan_obj.convert_cat_cols_to_binary(loan_df, loan_binary_cols_list)\nloan_df = loan_obj.convert_cat_cols_to_onehot(loan_df, loan_onehot_col_list)\n# # customer_df = customer_obj.group_less_occurring_cat_vars(customer_df, customer_obj_cols)\nprint(loan_df.head())\nprint(\"Categorical data handling complete for Customer data\")\n\n\"\"\"OUTLIER DETECTION\"\"\"\nloan_outlier_df = loan_obj.outlier_detection(loan_df[loan_nonobj_cols])\nloan_outlier_df = loan_obj.outlier_details(loan_outlier_df)\nprint(\"Outliers\")\nprint(loan_outlier_df)\n\n\"\"\"\" log scaling \"\"\"\nlist_cols_scaled = [\"applicant_income\",\"coapplicant_income\",\"loan_amt\"]\nloan_df = loan_obj.log_scaling(loan_df,list_cols_scaled)\n\n\n\"\"\"CORRELATION ANALYSIS\"\"\"\n\n\"\"\"Top Absolute Correlation\"\"\"\nprint(\"Top Absolute Correlation\")\n#print(loan_obj.get_top_abs_correlations(loan_df.iloc[:, 1:], 10))\n\n\"\"\"Highly correlated columns -- exceeding 0.75\"\"\"\nprint(\"Suggested Highly Correlated Columns to be dropped\")\nprint(loan_obj.drop_highly_corr_var(loan_df, 0.75))\n\nloan_df['loan_availed2'] = target_df['loan_availed2']\nprint(loan_df.head())\nloan_df.to_csv(\"./data/processed/loan_upsell_processed.csv\", index=False)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n","sub_path":"loan/upsell/loan_upsell_preprocess.py","file_name":"loan_upsell_preprocess.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"512859860","text":"from framework.interface_drivers.http.HttpLib import HttpLib\nfrom framework.support.Log import log_info\nfrom test_project.api_call_builders.SettingsFiltersApi import SettingsFiltersApi\nfrom test_project.configurations.status_codes import status_code_200, status_code_204\nfrom test_project.models.SettingsFiltersModel import SettingsFiltersModel\n\n\ndef create_filters_model(criteria_label):\n \"\"\"\n Create random model filter\n :param criteria_label: Created label with method label.create\n :return: model\n \"\"\"\n model = SettingsFiltersModel().get_randomly_model(criteria_label)\n log_info(\"Create random model settingsFilters:\\nModel:\\n{model}\".format(model=model))\n return model\n\n\ndef create_filters(user_id, model_filters):\n \"\"\"\n Creates a filter filter\n :param user_id: User's email address. The special value \"me\" can be used to indicate the authenticated user.\n :param model_filters:\n :type: SettingsFiltersModel\n :returns: model \n \"\"\"\n response, model = SettingsFiltersApi().filters_create(user_id, model_filters)\n response_status_code = HttpLib.get_response_status_code(response)\n assert response_status_code == status_code_200, \\\n \"Create filter failed: Status code isn't '200 OK'.\" \\\n \"\\nStatus code = {status_code}\".format(status_code=response_status_code)\n return model\n\n\ndef get_filters(user_id, filter_id):\n \"\"\"\n Gets a filter step\n :param user_id: User's email address. The special value \"me\" can be used to indicate the authenticated user.\n :param filter_id: The server assigned ID of the filter\n :return: model \n \"\"\"\n response, model = SettingsFiltersApi().filters_get(user_id, filter_id)\n response_status_code = HttpLib.get_response_status_code(response)\n assert response_status_code == status_code_200, \\\n \"Get filter failed: Status code isn't '200 OK'.\" \\\n \"\\nStatus code = {status_code}\".format(status_code=response_status_code)\n return model\n\n\ndef list_filters(user_id):\n \"\"\"\n Lists the message filters of a Gmail user step.\n :param user_id: User's email address. The special value \"me\" can be used to indicate the authenticated user.\n :return: list\n \"\"\"\n response, model_list = SettingsFiltersApi().filters_list(user_id)\n response_status_code = HttpLib.get_response_status_code(response)\n assert response_status_code == status_code_200, \\\n \"List filter failed: Status code isn't '200 OK'.\" \\\n \"\\nStatus code = {status_code}\".format(status_code=response_status_code)\n return model_list\n\n\ndef delete_filters(user_id, filter_id):\n \"\"\"\n Deletes a filter step\n :param user_id: User's email address. The special value \"me\" can be used to indicate the authenticated user.\n :param filter_id: The server assigned ID of the filter\n :return: response\n \"\"\"\n response = SettingsFiltersApi().filters_delete(user_id, filter_id)\n response_status_code = HttpLib.get_response_status_code(response)\n assert response_status_code == status_code_204, \\\n \"Delete filter failed: Status code isn't '204 OK'.\" \\\n \"\\nStatus code = {status_code}\".format(status_code=response_status_code)\n\n\ndef check_model(expected_model, actual_model):\n \"\"\"\n Compare two models.\n :param expected_model\n :type: SettingsFiltersModel\n :param actual_model\n :type: SettingsFiltersModel\n \"\"\"\n assert (expected_model == actual_model), \"Not Compare model: Expected model:\\n {0}\\nActual model:\\n {1}\". \\\n format(expected_model, actual_model)\n\n\ndef check_model_is_the_list_models(model_list, insert_model):\n \"\"\"\n Checking contains model in the list models\n :param model_list:\n :param insert_model:\n \"\"\"\n for model in model_list:\n if model == insert_model:\n return\n assert False, \"Not Contains model in the list:\\nModel_list:\\n{model_list}\\nInsert model:\\n{insert_model}\" \\\n .format(model_list='\\n'.join(str(item.__dict__) for item in model_list),\n insert_model=str(insert_model.__dict__))\n\n\ndef check_model_is_not_the_list_models(model_list, insert_model):\n \"\"\"\n Checking contains model in the list models\n :param model_list:\n :param insert_model:\n \"\"\"\n for model in model_list:\n if model == insert_model:\n assert False, \"Contains model in the list:\\nModel_list:\\n{model_list}\\nInsert model:\\n{insert_model}\" \\\n .format(model_list='\\n'.join(str(item.__dict__) for item in model_list),\n insert_model=str(insert_model.__dict__))\n","sub_path":"test_project/steps/SettingsFiltersSteps.py","file_name":"SettingsFiltersSteps.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"214977264","text":"import numpy as np\r\nimport torch\r\nimport nibabel as nib\r\n\r\nfrom torchio.data.image import Image\r\nimport torchio\r\n\r\nclass IXI_H5DSImage(Image):\r\n def __init__(self, h5DS=None, lazypatch=True, imtype=torchio.INTENSITY, **kwargs):\r\n kwargs['path'] = ''\r\n kwargs['type'] = imtype\r\n super().__init__(**kwargs)\r\n self.h5DS = h5DS\r\n self.lazypatch = lazypatch\r\n\r\n if not self.lazypatch:\r\n self.load()\r\n\r\n def load(self) -> None:\r\n if self._loaded:\r\n return\r\n if self.lazypatch:\r\n tensor, affine = self.h5DS, np.eye(4)\r\n else:\r\n tensor, affine = self.read_and_check_h5(self.h5DS)\r\n self[torchio.DATA] = tensor\r\n self[torchio.AFFINE] = affine\r\n self._loaded = True\r\n\r\n @property\r\n def spatial_shape(self):\r\n if self.lazypatch:\r\n return self.shape\r\n else:\r\n return self.shape[1:]\r\n\r\n def crop(self, index_ini, index_fin):\r\n new_origin = nib.affines.apply_affine(self.affine, index_ini)\r\n new_affine = self.affine.copy()\r\n new_affine[:3, 3] = new_origin\r\n i0, j0, k0 = index_ini\r\n i1, j1, k1 = index_fin\r\n if len(self.data.shape) == 4:\r\n patch = self.data[:, i0:i1, j0:j1, k0:k1]\r\n else:\r\n patch = np.expand_dims(self.data[i0:i1, j0:j1, k0:k1], 0)\r\n if not isinstance(self.data, torch.Tensor):\r\n patch = torch.from_numpy(patch)\r\n kwargs = dict(\r\n tensor=patch,\r\n affine=new_affine,\r\n type=self.type,\r\n path=self.path,\r\n h5DS=self.h5DS\r\n )\r\n for key, value in self.items():\r\n if key in torchio.data.image.PROTECTED_KEYS: continue\r\n kwargs[key] = value \r\n return self.__class__(**kwargs)\r\n\r\n def read_and_check_h5(self, h5DS):\r\n tensor, affine = torch.from_numpy(h5DS[()]).unsqueeze(0), np.eye(4)\r\n tensor = super().parse_tensor_shape(tensor)\r\n if self.channels_last:\r\n tensor = tensor.permute(3, 0, 1, 2)\r\n if self.check_nans and torch.isnan(tensor).any():\r\n warnings.warn(f'NaNs found in file \"{path}\"')\r\n return tensor, affine","sub_path":"datasets/ixi_torchiowrap.py","file_name":"ixi_torchiowrap.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"145610833","text":"from django.shortcuts import render, get_object_or_404\n\nfrom ticketsub.models import Ticket, TicketForm\n\ndef submit_ticket(request, *args):\n if request.method == 'POST':\n form = TicketForm(request.POST)\n if form.is_valid():\n form.save()\n return render(request, 'success.html')\n\n else:\n if args:\n ticket = get_object_or_404(Ticket, \n pk=args[0]\n )\n form = TicketForm(instance=ticket)\n else:\n form = TicketForm()\n\n return render(request, 'form.html', {\n 'form' : form,\n })\n\ndef ticket_list(request):\n t = Ticket.objects.order_by('id')\n return render(request, 'list.html', {\n 'ticketlist': t,\n })\n","sub_path":"utk_prog_team_2015_04_09/djtest/ticketsub/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"501406655","text":"import logging\nimport random\nfrom struct import pack\nfrom zlib import crc32\n\nfrom pox.core import core\nfrom pox.lib.util import dpidToStr\nimport pox.openflow.libopenflow_01 as of\nfrom pox.lib.revent import EventMixin\nfrom pox.lib.packet.ipv4 import ipv4\nfrom pox.lib.packet.udp import udp\nfrom pox.lib.packet.tcp import tcp\nfrom pox.lib.addresses import IPAddr, EthAddr\nfrom pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST\nfrom pox.lib.packet.arp import arp\nfrom pox.lib.util import str_to_bool, dpid_to_str, str_to_dpid\nfrom ripllib.mn import topos\n\nfrom util import buildTopo, getRouting\nimport time\nimport random\n\nlog = core.getLogger(\"HederaController\")\n#log.setLevel(logging.WARNING)\n\n# Number of bytes to send for packet_ins\nMISS_SEND_LEN = 2000\n\nIDLE_TIMEOUT = 10\nCAPACITY = 1\n\nFLOW_IDLE_TIMEOUT = 10\nFLOW_MEMORY_TIMEOUT = 60 * 5\n\n# Borrowed from pox/forwarding/l2_multi\nclass Switch(object):\n def __init__(self):\n self.connection = None\n self.ports = None\n self.dpid = None\n self._listeners = None\n\n def __repr__(self):\n return dpidToStr(self.dpid)\n\n def disconnect(self):\n if self.connection is not None:\n log.debug(\"Disconnect %s\" % (self.connection,))\n self.connection.removeListeners(self._listeners)\n self.connection = None\n self._listeners = None\n\n def connect(self, connection):\n if self.dpid is None:\n self.dpid = connection.dpid\n assert self.dpid == connection.dpid\n if self.ports is None:\n self.ports = connection.features.ports\n self.disconnect()\n log.debug(\"Connect %s\" % (connection,))\n self.connection = connection\n self._listeners = connection.addListeners(self)\n\n def send_packet_data(self, outport, data=None):\n msg = of.ofp_packet_out(in_port=of.OFPP_NONE, data=data)\n msg.actions.append(of.ofp_action_output(port=outport))\n self.connection.send(msg)\n\n def send_packet_data2(self, outport, server_src, mac_src, data=None):\n msg = of.ofp_packet_out(in_port=of.OFPP_NONE, data=data)\n msg.actions.append(of.ofp_action_nw_addr.set_src(server_src))\n msg.actions.append(of.ofp_action_dl_addr.set_src(mac_src))\n msg.actions.append(of.ofp_action_output(port=outport))\n self.connection.send(msg)\n\n def send_packet_data3(self, outport, server_dst, mac_dst, data=None):\n msg = of.ofp_packet_out(in_port=of.OFPP_NONE, data=data)\n msg.actions.append(of.ofp_action_nw_addr.set_dst(server_dst))\n msg.actions.append(of.ofp_action_dl_addr.set_dst(mac_dst))\n msg.actions.append(of.ofp_action_output(port=outport))\n self.connection.send(msg)\n\n def send_packet_bufid(self, outport, buffer_id=None):\n msg = of.ofp_packet_out(in_port=of.OFPP_NONE)\n msg.actions.append(of.ofp_action_output(port=outport))\n msg.buffer_id = buffer_id\n self.connection.send(msg)\n\n def install(self, port, match, buf=None, idle_timeout=0, hard_timeout=0,\n priority=of.OFP_DEFAULT_PRIORITY):\n msg = of.ofp_flow_mod()\n msg.match = match\n msg.idle_timeout = idle_timeout\n msg.hard_timeout = hard_timeout\n msg.priority = priority\n msg.actions.append(of.ofp_action_output(port=port))\n msg.buffer_id = buf\n self.connection.send(msg)\n\n def install2(self, port, server_src, mac_src, match, buf=None, idle_timeout=0, hard_timeout=0,\n priority=of.OFP_DEFAULT_PRIORITY):\n msg = of.ofp_flow_mod()\n msg.match = match\n msg.idle_timeout = idle_timeout\n msg.hard_timeout = hard_timeout\n msg.priority = priority\n msg.actions.append(of.ofp_action_nw_addr.set_src(server_src))\n msg.actions.append(of.ofp_action_dl_addr.set_src(mac_src))\n msg.actions.append(of.ofp_action_output(port=port))\n msg.buffer_id = buf\n self.connection.send(msg)\n\n def install3(self, port, server_dst, mac_dst, match, buf=None, idle_timeout=0, hard_timeout=0,\n priority=of.OFP_DEFAULT_PRIORITY):\n msg = of.ofp_flow_mod()\n msg.match = match\n msg.idle_timeout = idle_timeout\n msg.hard_timeout = hard_timeout\n msg.priority = priority\n msg.actions.append(of.ofp_action_nw_addr.set_dst(server_dst))\n msg.actions.append(of.ofp_action_dl_addr.set_dst(mac_dst))\n msg.actions.append(of.ofp_action_output(port=port))\n msg.buffer_id = buf\n self.connection.send(msg)\n\n def install_multiple(self, actions, match, buf=None, idle_timeout=0,\n hard_timeout=0, priority=of.OFP_DEFAULT_PRIORITY):\n msg = of.ofp_flow_mod()\n msg.match = match\n msg.idle_timeout = idle_timeout\n msg.hard_timeout = hard_timeout\n msg.priority = priority\n for a in actions:\n msg.actions.append(a)\n msg.buffer_id = buf\n self.connection.send(msg)\n\n def _handle_ConnectionDown(self, event):\n self.disconnect()\n pass\n\n\ndef sep():\n log.info(\"************************************************\")\n\nclass MemoryEntry(object):\n \"\"\"\n Record for flows we are balancing\n Table entries in the switch \"remember\" flows for a period of time, but\n rather than set their expirations to some long value (potentially leading\n to lots of rules for dead connections), we let them expire from the\n switch relatively quickly and remember them here in the controller for\n longer.\n Another tactic would be to increase the timeouts on the switch and use\n the Nicira extension which can match packets with FIN set to remove them\n when the connection closes.\n \"\"\"\n\n def __init__(self, server, first_packet, client_port):\n self.server = server\n self.first_packet = first_packet\n self.client_port = client_port\n self.refresh()\n\n def refresh(self):\n self.timeout = time.time() + FLOW_MEMORY_TIMEOUT\n\n @property\n def is_expired(self):\n return time.time() > self.timeout\n\n @property\n def key1(self):\n ethp = self.first_packet\n ipp = ethp.find('ipv4')\n tcpp = ethp.find('tcp')\n\n return ipp.srcip, ipp.dstip, tcpp.srcport, tcpp.dstport\n\n @property\n def key2(self):\n ethp = self.first_packet\n ipp = ethp.find('ipv4')\n tcpp = ethp.find('tcp')\n\n return self.server, ipp.srcip, tcpp.dstport, tcpp.srcport\n\n @property\n def from_client_to_server(self):\n ethp = self.first_packet\n ipp = ethp.find('ipv4')\n tcpp = ethp.find('tcp')\n\n return ipp.srcip, ipp.dstip, tcpp.srcport, tcpp.dstport\n\n @property\n def from_server_to_client(self):\n ethp = self.first_packet\n ipp = ethp.find('ipv4')\n tcpp = ethp.find('tcp')\n\n return self.server, ipp.srcip, tcpp.dstport, tcpp.srcport\n\nclass HederaController(object):\n\n def __init__(self, t, r, service_ip, servers=[]):\n self.switches = {} # Switches seen: [dpid] -> Switch\n self.t = t # Master Topo object, passed in and never modified.\n self.r = r # Master Routing object, passed in and reused.\n self.macTable = {} # [mac] -> (dpid, port)\n self.macTable2 = {}\n self.paths = {}\n self.flows = {}\n self.link_usage = {}\n self.last_server = 0\n\n self.service_ip = IPAddr(service_ip)\n self.servers = [IPAddr(a) for a in servers]\n self.live_servers = {} # IP -> MAC,port\n self.selected_server = None\n try:\n self.log = log.getChild(dpid_to_str(self.con.dpid))\n except:\n # Be nice to Python 2.6 (ugh)\n self.log = log\n\n self.total_connection = {} # IP -> total connection\n for ip in servers:\n self.total_connection[ip] = 0\n self.memory = {} # (srcip,dstip,srcport,dstport) -> MemoryEntry\n\n self.outstanding_probes = {} # IP -> expire_time\n # How quickly do we probe?\n self.probe_cycle_time = 5\n\n # How long do we wait for an ARP reply before we consider a server dead?\n self.arp_timeout = 3\n\n\n # TODO: generalize all_switches_up to a more general state machine.\n self.all_switches_up = False # Sequences event handling.\n core.openflow.addListeners(self, priority=0)\n\n\n def _raw_dpids(self, arr):\n \"Convert a list of name strings (from Topo object) to numbers.\"\n return [self.t.id_gen(name=a).dpid for a in arr]\n\n def _flow_key(self, src_ip, dst_ip):\n return str(src_ip) + \"::\" + str(dst_ip)\n\n def _path_key(self, src_sw_name, dst_sw_name):\n return src_sw_name + \"::\" + dst_sw_name\n\n def _link_key(self, sw1_name, sw2_name):\n return sw1_name + \"::\" + sw2_name\n\n def _do_expire(self):\n \"\"\"\n Expire probes and \"memorized\" flows\n Each of these should only have a limited lifetime.\n \"\"\"\n t = time.time()\n\n # Expire probes\n for ip, expire_at in self.outstanding_probes.items():\n if t > expire_at:\n self.outstanding_probes.pop(ip, None)\n if ip in self.live_servers:\n self.log.warn(\"Server %s down\", ip)\n del self.live_servers[ip]\n\n # Expire flow\n c = len(self.memory)\n self.memory = {k: v for k, v in self.memory.items()\n if not v.is_expired}\n if len(self.memory) != c:\n self.log.debug(\"Expired %i flows\", c - len(self.memory))\n\n def _do_probe(self):\n \"\"\"\n Send an ARP to a server to see if it's still up\n \"\"\"\n self._do_expire()\n\n server = self.servers.pop(0)\n self.servers.append(server)\n\n r = arp()\n r.hwtype = r.HW_TYPE_ETHERNET\n r.prototype = r.PROTO_TYPE_IP\n r.opcode = r.REQUEST\n r.hwdst = ETHER_BROADCAST\n r.protodst = server\n r.hwsrc = self.mac\n r.protosrc = self.service_ip\n e = ethernet(type=ethernet.ARP_TYPE, src=self.mac,\n dst=ETHER_BROADCAST)\n e.set_payload(r)\n self.log.debug(\"ARPing for %s\", server)\n msg = of.ofp_packet_out()\n msg.data = e.pack()\n msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))\n msg.in_port = of.OFPP_NONE\n self.con.send(msg)\n\n self.outstanding_probes[server] = time.time() + self.arp_timeout\n core.callDelayed(self._probe_wait_time, self._do_probe)\n\n @property\n def _probe_wait_time(self):\n \"\"\"\n Time to wait between probes\n \"\"\"\n r = self.probe_cycle_time / float(len(self.servers))\n r = max(.25, r) # Cap it at four per second\n return r\n\n def _ecmp_hash(self, packet):\n \"Return an ECMP-style 5-tuple hash for TCP/IP packets, otherwise 0.\"\n hash_input = [0] * 5\n if isinstance(packet.next, ipv4):\n ip = packet.next\n hash_input[0] = ip.srcip.toUnsigned()\n if ip.dstip == self.service_ip:\n hash_input[1] = self.selected_server.toUnsigned()\n else:\n hash_input[1] = ip.dstip.toUnsigned()\n hash_input[2] = ip.protocol\n if isinstance(ip.next, tcp) or isinstance(ip.next, udp):\n l4 = ip.next\n hash_input[3] = l4.srcport\n hash_input[4] = l4.dstport\n return crc32(pack('LLHHH', *hash_input))\n return 0\n\n\n def _install_reactive_path(self, event, out_dpid, final_out_port, packet):\n \"Install entries on route between two switches.\"\n inport = event.port\n mac, port_s = self.live_servers[self.selected_server]\n ip = packet.next\n in_name = self.t.id_gen(dpid=event.dpid).name_str()\n out_name = self.t.id_gen(dpid=out_dpid).name_str()\n hash_ = self._ecmp_hash(packet)\n route = self.r.get_route(in_name, out_name, hash_, False)\n log.info(\"route: %s\" % route)\n # match = of.ofp_match.from_packet(packet)\n for i, node in enumerate(route):\n node_dpid = self.t.id_gen(name=node).dpid\n if i < len(route) - 1:\n next_node = route[i + 1]\n out_port, next_in_port = self.t.port(node, next_node)\n if i == 0:\n in_port = inport\n else:\n prev_node = route[i - 1]\n in_port, next_in_port2 = self.t.port(node, prev_node)\n else:\n prev_node = route[i - 1]\n in_port, next_in_port2 = self.t.port(node, prev_node)\n out_port = final_out_port\n if ip.dstip == self.service_ip:\n log.info(\"path to %s , to %s server\" % (node_dpid, mac))\n match = of.ofp_match.from_packet(packet, in_port)\n if i == len(route) - 1:\n self.switches[out_dpid].install3(out_port, self.selected_server, mac, match,\n idle_timeout=IDLE_TIMEOUT)\n else:\n self.switches[node_dpid].install(out_port, match, idle_timeout=IDLE_TIMEOUT)\n else:\n match = of.ofp_match.from_packet(packet, in_port)\n if i == len(route) - 1:\n self.switches[out_dpid].install2(out_port, self.service_ip, self.mac, match,\n idle_timeout=IDLE_TIMEOUT)\n else:\n self.switches[node_dpid].install(out_port, match, idle_timeout=IDLE_TIMEOUT)\n\n def _eth_to_int(self, eth):\n return sum(([ord(x) * 2 ** ((5 - i) * 8) for i, x in enumerate(eth.raw)]))\n\n def _int_to_eth(self, inteth):\n return EthAddr(\"%012x\" % (inteth,))\n\n def _src_dst_str(self, src_dpid, dst_dpid):\n \"Return a hash based on src and dst dpids.\"\n return crc32(pack('QQ', src_dpid, dst_dpid))\n\n def _flood(self, event):\n packet = event.parsed\n dpid = event.dpid\n in_port = event.port\n # log.info(\"flood PacketIn to: %s\" % packet)\n\n t = self.t\n\n # Broadcast to every output port except the input on the input switch.\n # Hub behavior, baby!\n for sw in self._raw_dpids(t.layer_nodes(t.LAYER_EDGE)):\n # log.info(\"considering sw %s\" % sw)\n ports = []\n sw_name = t.id_gen(dpid=sw).name_str()\n for host in t.down_nodes(sw_name):\n sw_port, host_port = t.port(sw_name, host)\n if sw != dpid or (sw == dpid and in_port != sw_port):\n ports.append(sw_port)\n # Send packet out each non-input host port\n # TODO: send one packet only.\n for port in ports:\n # log.info(\"sending to port %s on switch %s\" % (port, sw))\n # buffer_id = event.ofp.buffer_id\n # if sw == dpid:\n # self.switches[sw].send_packet_bufid(port, event.ofp.buffer_id)\n # else:\n self.switches[sw].send_packet_data(port, event.data)\n # buffer_id = None\n\n def _flood2(self, event, server, mac, dpid, in_port):\n\n # log.info(\"flood PacketIn to: %s\" % packet)\n\n t = self.t\n\n # Broadcast to every output port except the input on the input switch.\n # Hub behavior, baby!\n for sw in self._raw_dpids(t.layer_nodes(t.LAYER_EDGE)):\n # log.info(\"considering sw %s\" % sw)\n ports = []\n sw_name = t.id_gen(dpid=sw).name_str()\n for host in t.down_nodes(sw_name):\n sw_port, host_port = t.port(sw_name, host)\n if sw != dpid or (sw == dpid and in_port != sw_port):\n ports.append(sw_port)\n # Send packet out each non-input host port\n # TODO: send one packet only.\n for port in ports:\n log.info(\"sending to port %s on switch %s\" % (port, sw))\n # buffer_id = event.ofp.buffer_id\n # if sw == dpid:\n # self.switches[sw].send_packet_bufid(port, event.ofp.buffer_id)\n # else:\n self.switches[sw].send_packet_data3(port, server, mac, event.data)\n # buffer_id = None\n\n def _pick_server(self, key, in_port):\n \"\"\"\n Pick a server for a (hopefully) new connection\n \"\"\"\n self.last_server = (self.last_server + 1) % len(self.live_servers)\n return self.live_servers.keys()[self.last_server]\n return random.choice(self.live_servers.keys())\n\n\n def _handle_packet_reactive(self, event):\n global server\n packet = event.parsed\n dpid = event.dpid\n # log.info(\"PacketIn: %s\" % packet)\n in_port = event.port\n t = self.t\n\n def drop():\n if event.ofp.buffer_id is not None:\n # Kill the buffer\n msg = of.ofp_packet_out(data=event.ofp)\n self.con.send(msg)\n return None\n #log.info(\"mactable: %s\" % self.macTable)\n # self.macTable[packet.src] = (dpid, in_port)\n tcpp = packet.find('tcp')\n if not tcpp:\n arpp = packet.find('arp')\n if arpp:\n # Handle replies to our server-liveness probes\n if arpp.opcode == arpp.REPLY:\n # log.info(\"packetin arp : %s\" %packet)\n if arpp.protosrc in self.outstanding_probes:\n # A server is (still?) up; cool.\n del self.outstanding_probes[arpp.protosrc]\n if (self.live_servers.get(arpp.protosrc, (None, None))\n == (arpp.hwsrc, in_port)):\n\n pass\n else:\n # Ooh, new server.\n self.live_servers[arpp.protosrc] = arpp.hwsrc, in_port\n self.log.info(\"Server %s port %s up\" % (arpp.hwsrc, in_port))\n\n return\n # Not TCP and not ARP. Don't know what to do with this. Drop it.\n return\n ipp = packet.find('ipv4')\n # Learn MAC address of the sender on every packet-in.\n # log.info(\"reacPacketIn: %s\" % packet)\n self.macTable[packet.src] = (dpid, in_port)\n\n if ipp.srcip in self.servers:\n key = ipp.srcip, ipp.dstip, tcpp.srcport, tcpp.dstport\n entry = self.memory.get(key)\n if entry is None:\n # We either didn't install it, or we forgot about it.\n self.log.debug(\"No client for %s\", key)\n return drop()\n\n # Refresh time timeout and reinstall.\n entry.refresh()\n\n log.info(\"packetin dri server :%s\" % packet)\n if packet.dst in self.macTable2:\n out_dpid, out_port = self.macTable2[packet.dst]\n log.info(\"instal path S: %s %s\" % (out_dpid, out_port))\n self._install_reactive_path(event, out_dpid, out_port, packet)\n\n log.info(\"sending to S entry in mactable: %s %s\" % (out_dpid, out_port))\n self.switches[out_dpid].send_packet_data2(out_port, self.service_ip, self.mac, event.data)\n elif packet.dst in self.macTable:\n out_dpid, out_port = self.macTable[packet.dst]\n log.info(\"instal path S: %s %s\" % (out_dpid, out_port))\n self._install_reactive_path(event, out_dpid, out_port, packet)\n\n log.info(\"sending to S entry in mactable: %s %s\" % (out_dpid, out_port))\n self.switches[out_dpid].send_packet_data2(out_port, self.service_ip, self.mac, event.data)\n\n elif ipp.dstip == self.service_ip:\n log.info(\"packetin dri client :%s\" % packet)\n # Ah, it's for our service IP and needs to be load balanced\n\n # Do we already know this flow?\n key = ipp.srcip, ipp.dstip, tcpp.srcport, tcpp.dstport\n entry = self.memory.get(key)\n if entry is None or entry.server not in self.live_servers:\n # Don't know it (hopefully it's new!)\n if len(self.live_servers) == 0:\n self.log.warn(\"No servers!\")\n return drop()\n # Pick a server for this flow\n server = self._pick_server(key, in_port)\n self.log.debug(\"Directing traffic to %s\", server)\n entry = MemoryEntry(server, packet, in_port)\n self.memory[entry.key1] = entry\n self.memory[entry.key2] = entry\n self.selected_server = server\n\n # Update timestamp\n entry.refresh()\n\n # Set up table entry towards selected server\n mac, port = self.live_servers[entry.server]\n dpid_mac = self._eth_to_int(mac)\n # Insert flow, deliver packet directly to destination.\n if mac in self.macTable:\n out_dpid, out_port = self.macTable[mac]\n log.info(\"sending to entry gff: %s %s\" % (out_dpid, out_port))\n\n self._install_reactive_path(event, out_dpid, out_port, packet)\n\n log.info(\"sending to entry in mactable: %s %s\" % (out_dpid, out_port))\n self.switches[out_dpid].send_packet_data3(out_port, server, mac, event.data)\n else:\n self._flood2(event, server, mac, dpid_mac, port)\n else:\n self._flood(event)\n\n # Get host index.\n def dpid_port_to_host_index(self, dpid, port):\n node = self.t.id_gen(dpid=dpid)\n return node.pod * ((self.t.k ** 2) / 4) + node.sw * (self.t.k / 2) + ((port - 2) / 2)\n\n def _handle_PacketIn(self, event):\n # log.info(\"Parsing PacketIn.\")\n\n packet = event.parsed\n if not self.all_switches_up:\n log.info(\"Saw PacketIn before all switches were up - ignoring.\")\n #log.info(\"PacketIn: %s\" % packet)\n return\n else:\n self._handle_packet_reactive(event)\n\n def _get_links_from_path(self, path):\n path_len = len(path)\n for i in range(0, path_len - 1):\n link_key = self._link_key(path[i], path[i + 1])\n reverse_link_key = self._link_key(path[i + 1], path[i])\n # if link_key is not in self.link_usage and reverse_link_key is not in self.link_usage:\n self.link_usage[link_key] = 0\n self.link_usage[reverse_link_key] = 0\n\n def _get_equal_cost_routes(self, src, dst):\n src_host_name = self.t.id_gen(dpid=src).name_str()\n src_sw = self.t.up_nodes(src_host_name)\n assert len(src_sw) == 1\n src_sw_name = src_sw[0]\n dst_host_name = self.t.id_gen(dpid=dst).name_str()\n dst_sw = self.t.up_nodes(dst_host_name)\n assert len(dst_sw) == 1\n dst_sw_name = dst_sw[0]\n all_paths = self.r.get_route(src_sw_name, dst_sw_name, None, True)\n for path in all_paths:\n self._get_links_from_path(path)\n self.paths[self._path_key(src_sw_name, dst_sw_name)] = all_paths\n\n def _get_all_paths(self):\n t = self.t\n # Install L2 src/dst flow for every possible pair of hosts.\n for src in sorted(self._raw_dpids(t.layer_nodes(t.LAYER_HOST))):\n for dst in sorted(self._raw_dpids(t.layer_nodes(t.LAYER_HOST))):\n self._get_equal_cost_routes(src, dst)\n\n def _handle_ConnectionUp(self, event):\n sw = self.switches.get(event.dpid)\n sw_str = dpidToStr(event.dpid)\n self.con = event.connection\n self.mac = self.con.eth_addr\n log.info(\"Saw switch come up: %s\", sw_str)\n name_str = self.t.id_gen(dpid=event.dpid).name_str()\n if name_str not in self.t.switches():\n log.warn(\"Ignoring unknown switch %s\" % sw_str)\n return\n if sw is None:\n log.info(\"Added fresh switch %s\" % sw_str)\n sw = Switch()\n self.switches[event.dpid] = sw\n sw.connect(event.connection)\n else:\n log.info(\"Odd - already saw switch %s come up\" % sw_str)\n sw.connect(event.connection)\n sw.connection.send(of.ofp_set_config(miss_send_len=MISS_SEND_LEN))\n\n if len(self.switches) == len(self.t.switches()):\n log.info(\"Woo! All switches up\")\n self.all_switches_up = True\n self._get_all_paths()\n if self.all_switches_up == True:\n self._do_probe()\n\ndef launch(topo, ip, servers):\n \"\"\"\n Launch Hedera Controller\n\n topo is in format toponame,arg1,arg2,...\n \"\"\"\n # Boot up ARP Responder\n from proto.arp_responder import launch as arp_launch\n arp_launch(eat_packets=False, **{str(ip): True})\n import logging\n logging.getLogger(\"proto.arp_responder\").setLevel(logging.WARN)\n\n # Instantiate a topo object from the passed-in file.\n if not topo:\n raise Exception(\"please specify topo and args on cmd line\")\n else:\n t = buildTopo(topo, topos)\n r = getRouting('hashed', t)\n servers = servers.replace(\",\", \" \").split()\n servers = [IPAddr(x) for x in servers]\n ip = IPAddr(ip)\n log.info(\"Load Balancer Ready.\")\n core.registerNew(HederaController, t, r, IPAddr(ip), servers)\n\n log.info(\"Hedera running with topo=%s.\" % topo)\n","sub_path":"jogress2.py","file_name":"jogress2.py","file_ext":"py","file_size_in_byte":25447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"93141226","text":"from django.db.models import Field\nfrom django.db.models.expressions import Col\n\nfrom .constants import CPK_SEP\n\n\nclass CompositeCol(Col):\n def __init__(self, alias, target, output_field=None):\n super().__init__(alias, target, output_field)\n self.children = [Col(alias, key, output_field) for key in target.keys]\n\n def as_sql(self, compiler, connection):\n sqls = []\n for child in self.children:\n sql, _ = child.as_sql(compiler, connection)\n sqls.append(sql)\n return \"(%s)\" % \",\".join(sqls), []\n\n\nclass CompositeKey(Field):\n def __init__(self, keys, primary=False):\n names = tuple((f.name for f in keys))\n join_name = CPK_SEP.join(names)\n db_columns = tuple((f.db_column if f.db_column else f.name for f in keys))\n db_join_column = \"(\" + \",\".join(db_columns) + \")\"\n super().__init__(\n name=join_name, \n primary_key=primary,\n unique=True,\n )\n self.keys = keys\n self.attname = join_name\n self.column = join_name\n self.names = names\n self.model = keys[0].model\n\n def get_col(self, alias, output_field=None):\n return CompositeCol(alias, self, output_field)\n","sub_path":"compositepk-model/cpkmodel/compositekey.py","file_name":"compositekey.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"443017947","text":"from flask import Flask\nfrom flask.templating import render_template\n\napp = Flask(__name__)\n\n\n@app.route('/test')\ndef test():\n data = {\n \"Status\": 200,\n \"Message\": \"Up and Running\"\n }\n return data\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"Day 56/portfolio-website/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"151033166","text":"import datetime\nimport os\nimport tempfile\n\nfrom django.conf import settings as djangoSettings\nfrom django.http import HttpResponse\nfrom django.template.loader import get_template\n\nfrom integrations.google.analytics.utils.charts.pageviews_users import \\\n pageviews_users\nfrom integrations.google.analytics.utils.charts.world_map import world_map\nfrom integrations.google.analytics.utils.create_csv import create_csv\nfrom integrations.mailchimp.utils.list_campaigns import list_campaigns\nfrom integrations.pdfreactor.utils.helper_function import html_file_2_pdf\nfrom issuer.models import Issuer\n\n\ndef traffic_report(request, issuer_pk):\n \"\"\"Generate an external report.\"\"\"\n\n template_file = 'issuer/reports/traffic.html'\n\n issuer_obj = Issuer.objects.get(pk=issuer_pk)\n\n campaigns = list_campaigns(filter=issuer_obj.internal_identifier)\n\n df = create_csv(issuer_obj)\n\n # Replace all Jinja2 variables with relevant data\n context = {\n 'environment': djangoSettings.ENVIRONMENT_MODE,\n 'style': {\n 'top_color': '#37474F'\n },\n 'content': {\n 'top_header': 'traffic report',\n 'report_date': datetime.datetime.now().strftime('%Y-%m-%d'),\n 'report_title': 'Traffic report for ' + issuer_obj.legal_name,\n 'issuer': {\n 'data': issuer_obj,\n },\n 'campaigns': campaigns,\n 'charts': {\n 'world_map': world_map(df, return_base64=True),\n 'pageviews_users': pageviews_users(df, return_base64=True),\n }\n }\n }\n\n # Return formatted as html\n rendered_html = get_template(template_file).render(context)\n\n f = tempfile.NamedTemporaryFile(mode='w+t',\n suffix='.html')\n f.write(rendered_html)\n f.read()\n\n # Generate a unique file name\n target_file = 'Web traffic report for {}, extracted on {} at {}.pdf'.\\\n format(\n issuer_obj.legal_name,\n datetime.datetime.today().strftime('%Y-%m-%d'),\n datetime.datetime.today().strftime('%H%M'), )\n html_file_2_pdf(f.name, target_file)\n\n from document_export.util import compress_pdf\n # Make pdf file much smaller\n compress_pdf(target_file)\n\n response = HttpResponse(content=open(target_file, 'rb'))\n response['Content-Type'] = 'application/pdf'\n response['Content-Disposition'] = 'inline; filename=\"%s\"' % target_file\n\n os.unlink(target_file)\n\n return response\n","sub_path":"ncr_website/document_export/reports/traffic.py","file_name":"traffic.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"470966055","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2014, 2015 Adam.Dybbroe\n\n# Author(s):\n\n# Adam.Dybbroe \n# Panu Lahtinen \n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\"\"\"Conversion between radiances and brightness temperatures for the IR bands of\nvarious satellite sensors\n\"\"\"\n\nimport numpy as np\nfrom pyspectral.blackbody import blackbody, blackbody_wn\nfrom pyspectral.utils import BANDNAMES\n\nimport logging\nLOG = logging.getLogger(__name__)\n\nWAVE_LENGTH = 'wavelength'\nWAVE_NUMBER = 'wavenumber'\n\nEPSILON = 0.01\nTB_MIN = 150.\nTB_MAX = 360.\n\n# Meteosat SEVIRI regression parameters according to documentation\n# (PDF_EFFECT_RAD_TO_BRIGHTNESS.pdf).\n#\n# Tb = C2 * νc/{α * log[C1*νc**3 / L + 1]} - β/α\n#\n# L = C1 * νc**3 / (exp (C2 νc / [αTb + β]) − 1)\n#\n# C1 = 2 * h * c**2 and C2 = hc/k\n#\n# Units are cm-1 for the channel/band central wavenumber, K for the beta\n# parameter, and the alpha parameter is dimensionless:\n#\nSEVIRI = {'IR3.9': {'Meteosat-8': [2567.330, 0.9956, 3.410],\n 'Meteosat-9': [2568.832, 0.9954, 3.438],\n 'Meteosat-10': [],\n },\n 'WV6.2': {'Meteosat-8': [1598.103, 0.9962, 2.218],\n 'Meteosat-9': [1600.548, 0.9963, 2.185],\n },\n 'WV7.3': {'Meteosat-8': [1362.081, 0.9991, 0.478],\n 'Meteosat-9': [1360.330, 0.9991, 0.470],\n },\n 'IR8.7': {'Meteosat-8': [1149.069, 0.9996, 0.179],\n 'Meteosat-9': [1148.620, 0.9996, 0.179],\n },\n 'IR9.7': {'Meteosat-8': [1034.343, 0.9999, 0.060],\n 'Meteosat-9': [1035.289, 0.9999, 0.056],\n },\n 'IR10.8': {'Meteosat-8': [930.647, 0.9983, 0.625],\n 'Meteosat-9': [931.700, 0.9983, 0.640],\n },\n 'IR12.0': {'Meteosat-8': [839.660, 0.9988, 0.397],\n 'Meteosat-9': [836.445, 0.9988, 0.408],\n },\n 'IR13.4': {'Meteosat-8': [752.387, 0.9981, 0.578],\n 'Meteosat-9': [751.792, 0.9981, 0.561],\n },\n }\n\n\nclass RadTbConverter(object):\n\n \"\"\"A radiance to brightness temperature calculator\n\n It can do the conversion either based on direct use of the band relative\n spectral response function, or on officially (by satellite agencies)\n tabulated standard values using non-linear regression methods.\n Methods:\n 1: Spectral response function\n 2: non-linear approximation using tabulated coefficients \n \"\"\"\n\n def __init__(self, platform_name, instrument, bandname, method=1,\n **options):\n \"\"\"E.g.:\n platform_name = 'Meteosat-9'\n instrument = 'seviri'\n \"\"\"\n self.platform_name = platform_name\n self.instrument = instrument\n self.rsr = None\n self.bandname = BANDNAMES.get(bandname, bandname)\n\n if 'detector' in options:\n self.detector = options['detector']\n else:\n self.detector = 'det-1'\n\n if 'wavespace' in options:\n if options['wavespace'] not in [WAVE_LENGTH, WAVE_NUMBER]:\n raise AttributeError('Wave space not %s or %s!' % (WAVE_LENGTH,\n WAVE_NUMBER))\n self.wavespace = options['wavespace']\n else:\n self.wavespace = WAVE_LENGTH\n\n self._wave_unit = ''\n self._wave_si_scale = 1.0\n\n if 'tb_resolution' in options:\n self.tb_resolution = options['tb_resolution']\n else:\n self.tb_resolution = 0.1\n self.tb_scale = 1. / self.tb_resolution\n\n if method == 1:\n self.get_rsr()\n\n def get_rsr(self):\n \"\"\"Get all spectral responses for the sensor\"\"\"\n from pyspectral.utils import convert2wavenumber\n from pyspectral.rsr_reader import RelativeSpectralResponse\n\n sensor = RelativeSpectralResponse(self.platform_name,\n self.instrument)\n LOG.debug(\"Wavenumber? %s\", str(self.wavespace))\n if self.wavespace == WAVE_NUMBER:\n LOG.debug(\"Converting to wavenumber...\")\n self.rsr, info = convert2wavenumber(sensor.rsr)\n else:\n self.rsr = sensor.rsr\n info = {'unit': sensor.unit, 'si_scale': sensor.si_scale}\n\n self._wave_unit = info['unit']\n self._wave_si_scale = info['si_scale']\n\n def _getsatname(self):\n \"\"\"Get the satellite name used in the rsr-reader, from the platform\n and number\n \"\"\"\n if self.platform_name.startswith(\"Meteosat\"):\n return self.platform_name\n else:\n raise NotImplementedError('Platform %s not yet supported...' %\n str(self.platform_name))\n\n def tb2radiance(self, tb_, bandname, lut=None):\n \"\"\"Get the radiance from the brightness temperature (Tb) given the\n band name. \n \"\"\"\n from scipy import integrate\n\n if self.wavespace == WAVE_NUMBER:\n unit = 'W/m^2 sr^-1 (m^-1)^-1'\n scale = 1.0\n else:\n unit = 'W/m^2 sr^-1 m^-1'\n scale = 1.0\n\n if not bandname and not np.any(lut):\n raise SyntaxError('Either a band name or a lut needs '\n 'to be provided as input to the function call!')\n\n if lut:\n ntb = (tb_ * self.tb_scale).astype('int16')\n start = int(lut['tb'][0] * self.tb_scale)\n retv = {}\n bounds = 0, lut['radiance'].shape[0] - 1\n index = np.clip(ntb - start, bounds[0], bounds[1])\n retv['radiance'] = lut['radiance'][index]\n retv['unit'] = unit\n retv['scale'] = scale\n return retv\n\n if self.wavespace == WAVE_LENGTH:\n wv_ = (self.rsr[bandname][self.detector]['wavelength'] *\n self._wave_si_scale)\n resp = self.rsr[bandname][self.detector]['response']\n planck = blackbody(wv_, tb_) * resp\n elif self.wavespace == WAVE_NUMBER:\n wv_ = (self.rsr[bandname][self.detector]['wavenumber'] *\n self._wave_si_scale)\n resp = self.rsr[bandname][self.detector]['response']\n planck = blackbody_wn(wv_, tb_) * resp\n else:\n raise NotImplementedError('%s representation of '\n 'rsr data not supported!' %\n str(self.wavespace))\n\n radiance = integrate.trapz(planck, wv_) / np.trapz(resp, wv_)\n\n return {'radiance': radiance,\n 'unit': unit,\n 'scale': scale}\n\n def make_tb2rad_lut(self, bandname, filepath):\n \"\"\"Generate a Tb to radiance look-up table\"\"\"\n tb_ = np.arange(TB_MIN, TB_MAX, self.tb_resolution)\n retv = self.tb2radiance(tb_, bandname)\n rad = retv['radiance']\n np.savez(filepath, tb=tb_, radiance=rad.compressed())\n\n def read_tb2rad_lut(self, filepath):\n \"\"\"Read the Tb to radiance look-up table\"\"\"\n retv = np.load(filepath, 'r')\n return retv\n\n def tb2radiance_simple(self, tb_, bandname):\n \"\"\"Get the radiance from the Tb using the simple non-linear regression\n method. SI units of course!\n \"\"\"\n # L = C1 * νc**3 / (exp (C2 νc / [αTb + β]) − 1)\n #\n # C1 = 2 * h * c**2 and C2 = hc/k\n #\n from pyspectral.blackbody import (H_PLANCK, K_BOLTZMANN, C_SPEED)\n\n c_1 = 2 * H_PLANCK * C_SPEED ** 2\n c_2 = H_PLANCK * C_SPEED / K_BOLTZMANN\n\n vc_ = SEVIRI[bandname][self.platform_name][0]\n # Multiply by 100 to get SI units!\n vc_ *= 100.0\n alpha = SEVIRI[bandname][self.platform_name][1]\n beta = SEVIRI[bandname][self.platform_name][2]\n\n radiance = c_1 * vc_ ** 3 / \\\n (np.exp(c_2 * vc_ / (alpha * tb_ + beta)) - 1)\n\n unit = 'W/m^2 sr^-1 (m^-1)^-1'\n scale = 1.0\n #unit = 'mW/m^2 sr^-1 (cm^-1)^-1'\n #scale = 10.0\n return {'radiance': radiance,\n 'unit': unit,\n 'scale': scale}\n\n def radiance2tb_simple(self, rad, bandname):\n \"\"\"Get the Tb from the radiance using the simple non-linear regression\n method. \n rad: Radiance in units = 'mW/m^2 sr^-1 (cm^-1)^-1'\n \"\"\"\n #\n # Tb = C2 * νc/{α * log[C1*νc**3 / L + 1]} - β/α\n #\n # C1 = 2 * h * c**2 and C2 = hc/k\n #\n from pyspectral.blackbody import (H_PLANCK, K_BOLTZMANN, C_SPEED)\n\n c_1 = 2 * H_PLANCK * C_SPEED ** 2\n c_2 = H_PLANCK * C_SPEED / K_BOLTZMANN\n\n vc_ = SEVIRI[bandname][self.platform_name][0]\n # Multiply by 100 to get SI units!\n vc_ *= 100.0\n alpha = SEVIRI[bandname][self.platform_name][1]\n beta = SEVIRI[bandname][self.platform_name][2]\n\n tb_ = c_2 * vc_ / \\\n (alpha * np.log(c_1 * vc_ ** 3 / rad + 1)) - beta / alpha\n\n return tb_\n","sub_path":"pyspectral/radiance_tb_conversion.py","file_name":"radiance_tb_conversion.py","file_ext":"py","file_size_in_byte":9748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"577028704","text":"\"\"\"\nSQL Storage for Policies.\n\"\"\"\n\nimport json\nimport logging\n\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import FlushError\n\nfrom .model import PolicyModel, PolicyActionModel, PolicyResourceModel, PolicySubjectModel\nfrom ..abc import Storage\nfrom ...checker import StringExactChecker, StringFuzzyChecker, RegexChecker, RulesChecker\nfrom ...exceptions import PolicyExistsError, UnknownCheckerType\nfrom ...policy import TYPE_STRING_BASED, TYPE_RULE_BASED\n\nlog = logging.getLogger(__name__)\n\n\nclass SQLStorage(Storage):\n \"\"\"Stores all policies in SQL Database\"\"\"\n\n def __init__(self, scoped_session):\n \"\"\"\n Initialize SQL Storage\n\n :param scoped_session: SQL Alchemy scoped session\n \"\"\"\n self.session = scoped_session\n\n def add(self, policy):\n try:\n policy_model = PolicyModel.from_policy(policy)\n self.session.add(policy_model)\n self.session.commit()\n except IntegrityError:\n self.session.rollback()\n log.error('Error trying to create already existing policy with UID=%s.', policy.uid)\n raise PolicyExistsError(policy.uid)\n # todo - figure out why FlushError is raised instead of IntegrityError on PyPy tests\n except FlushError as e:\n if 'conflicts with persistent instance' in str(e):\n self.session.rollback()\n log.error('Error trying to create already existing policy with UID=%s.', policy.uid)\n raise PolicyExistsError(policy.uid)\n log.info('Added Policy: %s', policy)\n\n def get(self, uid):\n policy_model = self.session.query(PolicyModel).get(uid)\n if not policy_model:\n return None\n return policy_model.to_policy()\n\n def get_all(self, limit, offset):\n self._check_limit_and_offset(limit, offset)\n cur = self.session.query(PolicyModel).slice(offset, offset + limit)\n for policy_model in cur:\n yield policy_model.to_policy()\n\n def find_for_inquiry(self, inquiry, checker=None):\n cur = self._get_filtered_cursor(inquiry, checker)\n for policy_model in cur:\n yield policy_model.to_policy()\n\n def update(self, policy):\n try:\n policy_model = self.session.query(PolicyModel).get(policy.uid)\n if not policy_model:\n return\n policy_model.update(policy)\n self.session.commit()\n except IntegrityError:\n self.session.rollback()\n raise\n log.info('Updated Policy with UID=%s. New value is: %s', policy.uid, policy)\n\n def delete(self, uid):\n self.session.query(PolicyModel).filter(PolicyModel.uid == uid).delete()\n log.info('Deleted Policy with UID=%s.', uid)\n\n def _get_filtered_cursor(self, inquiry, checker):\n \"\"\"\n Returns cursor with proper query-filter based on the checker type.\n \"\"\"\n cur = self.session.query(PolicyModel)\n if isinstance(checker, StringFuzzyChecker):\n return cur.filter(\n PolicyModel.type == TYPE_STRING_BASED,\n PolicyModel.subjects.any(PolicySubjectModel.subject.like(\"%{}%\".format(inquiry.subject))),\n PolicyModel.resources.any(PolicyResourceModel.resource.like(\"%{}%\".format(inquiry.resource))),\n PolicyModel.actions.any(PolicyActionModel.action.like(\"%{}%\".format(inquiry.action))))\n elif isinstance(checker, StringExactChecker):\n # A string is converted to a JSON string before inserting\n return cur.filter(\n PolicyModel.type == TYPE_STRING_BASED,\n PolicyModel.subjects.any(PolicySubjectModel.subject == json.dumps(inquiry.subject)),\n PolicyModel.resources.any(PolicyResourceModel.resource == json.dumps(inquiry.resource)),\n PolicyModel.actions.any(PolicyActionModel.action == json.dumps(inquiry.action)))\n elif isinstance(checker, RegexChecker):\n return cur.filter(\n PolicyModel.type == TYPE_STRING_BASED)\n elif isinstance(checker, RulesChecker):\n return cur.filter(\n PolicyModel.type == TYPE_RULE_BASED)\n elif not checker:\n return cur\n else:\n log.error('Provided Checker type is not supported.')\n raise UnknownCheckerType(checker)\n","sub_path":"vakt/storage/sql/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"236240122","text":"#!/usr/bin/python\r\nimport os\r\nimport time\r\nimport sys\r\nimport json \r\nimport argparse\r\nimport csv\r\nimport DTIAtlasBuilder_Preprocess\r\nimport DTIAtlasBuilder_AtlasBuilding\r\nimport DTIAtlasBuilder_Utilities\r\nimport shutil\r\nimport threading\r\nimport traceback\r\nfrom copy import deepcopy\r\n\r\n### load configutation json\r\n\r\ndef unique(list1): \r\n unique_list = [] \r\n for x in list1: \r\n # check if exists in unique_list or not \r\n if x not in unique_list: \r\n unique_list.append(x) \r\n return unique_list\r\n\r\ndef isComponent(seq,name):\r\n comp=list(filter(lambda x : x['name']==name,seq))\r\n if len(comp)>0 :\r\n return comp[0] \r\n else:\r\n return False \r\n\r\ndef find_config_by_nodename(build_sequence,nodename):\r\n for cfg in build_sequence:\r\n if cfg[\"m_NodeName\"]==nodename:\r\n return cfg \r\n\r\n\r\ndef generate_deformation_track(seq,node=\"target\"): #input : initialSequence to generate deformation field tracking information (to concatenate them)\r\n component=isComponent(seq,node)\r\n outseq=[]\r\n\r\n if component != False:\r\n for c in component[\"dataset_ids\"]:\r\n tmpseq=generate_deformation_track(seq,c)\r\n for t in tmpseq:\r\n outseq.append(node+\"/\"+t)\r\n else:\r\n outseq.append(node)\r\n return outseq \r\n return outseq\r\n\r\ndef invert_deformation_track(deformation_seq):\r\n seq=deepcopy(deformation_seq)\r\n outseq=[]\r\n for s in seq:\r\n elm=s\r\n strvec=s['id'].split(\"/\")\r\n strvec.reverse()\r\n elm['id']='/'.join(strvec)\r\n #elm['original_dti_id']=strvec[-1]\r\n arr=[]\r\n for e in s['filelist']:\r\n basedir=os.path.dirname(e)\r\n name=\"_\".join(os.path.basename(e).split('_')[:-1])+\"_InverseGlobalDisplacementField.nrrd\"\r\n inverted_deform_path=os.path.join(basedir,name)\r\n arr.append(inverted_deform_path)\r\n arr.reverse()\r\n elm['filelist']=arr \r\n\r\n output_dir=os.path.dirname(s['output_path'])\r\n output_name=\"_\".join(os.path.basename(s['output_path']).split('_')[:-2])+\"_InverseGlobalDisplacementField_Concatenated.nrrd\"\r\n output_path=os.path.join(output_dir,output_name)\r\n elm['output_path']=output_path\r\n outseq.append(elm)\r\n return outseq \r\n\r\n \r\ndef furnish_deformation_track(seq,project_path,build_sequence,inverse=False): #input deformSequence \r\n res=[]\r\n for d in seq:\r\n tmp={}\r\n tmp['id']=d\r\n compseq=d.split('/')\r\n cfg=find_config_by_nodename(build_sequence,compseq[-2])\r\n originalDTIId=compseq[-1]\r\n originalDTIPath=None\r\n for idx,case in enumerate(zip(cfg[\"m_CasesIDs\"],cfg[\"m_CasesPath\"])):\r\n caseID,casePath=case \r\n if originalDTIId==caseID: \r\n originalDTIPath=casePath\r\n break\r\n\r\n entry=[]\r\n for idx,c in enumerate(compseq[0:-1]):\r\n fpath=\"atlases/\" + c + \"/5_Final_Atlas/FinalDeformationFields/\" + compseq[idx+1] + \"_GlobalDisplacementField.nrrd\"\r\n fpath=os.path.join(project_path,fpath)\r\n entry.append(fpath)\r\n tmp['filelist']=entry\r\n tmp['original_dti_path']=originalDTIPath \r\n tmp['original_dti_id']=originalDTIId\r\n tmp['scalar_measurement']=cfg[\"m_ScalarMeasurement\"]\r\n tmp['nb_loops']=cfg['m_nbLoops']\r\n tmp['nb_loops_dtireg']=cfg['m_nbLoopsDTIReg']\r\n tmp['project_path']=cfg['m_OutputPath']\r\n tmp['need_to_be_cropped']=cfg['m_NeedToBeCropped']\r\n outputDir=os.path.join(project_path,\"displacement_fields\")\r\n hpairList=tmp[\"id\"].split(\"/\")\r\n outFilename=\"_\".join(hpairList) + \"_GlobalDisplacementField_Concatenated.nrrd\"\r\n outFilename=os.path.join(outputDir,outFilename)\r\n tmp['output_path']=outFilename\r\n res.append(tmp)\r\n return res \r\n\r\n\r\n\r\n\r\ndef parse_hbuild(hb,root_path,root_node=\"target\"): #hbuild parser to generate build sequence\r\n if root_node is None:\r\n root_node=hb['project']['target_node']\r\n root=hb['build'][root_node]\r\n seq=[]\r\n nodeFiles=[] ## sub node's final atlases\r\n # scalar=hb['config']['m_ScalarMeasurement']\r\n if root[\"type\"]==\"node\": \r\n for c in root[\"components\"]:\r\n seq+=parse_hbuild(hb, root_path=root_path, root_node=c)\r\n nodeAtlasPath=os.path.join(root_path,\"atlases/\"+c+\"/5_Final_Atlas/FinalAtlasDTI.nrrd\")\r\n nodeFiles.append(nodeAtlasPath)\r\n elif root[\"type\"]==\"end_node\":\r\n if root[\"filetype\"]==\"dataset\":\r\n rows=[]\r\n rows_id=[]\r\n with open(str(root['datasetfiles']),'r') as f:\r\n csvreader=csv.reader(f)\r\n next(csvreader,None)\r\n for r in csvreader:\r\n fpath=str(r[1])\r\n fid=os.path.splitext(os.path.basename(fpath))[0]\r\n rows.append(fpath)\r\n rows_id.append(str(fid))\r\n\r\n return [{\"name\" : str(root_node),\r\n \"dataset_files\" : rows,\r\n \"dataset_ids\" : rows_id,\r\n \"project_path\" : str(os.path.join(root_path,\"atlases/\"+root_node))\r\n }]\r\n else:\r\n flist=list(map(str,root[\"datasetfiles\"]))\r\n fids=[]\r\n for e in flist:\r\n fid=os.path.splitext(os.path.basename(e))[0]\r\n fids.append(fid)\r\n\r\n return [{\"name\" : str(root_node),\r\n \"dataset_files\" : flist,\r\n \"dataset_ids\" : fids ,\r\n \"project_path\" : str(os.path.join(root_path,\"atlases/\"+root_node))\r\n }]\r\n\r\n # node type file reading\r\n\r\n seq+=[{\"name\" : str(root_node),\r\n \"dataset_files\" : list(map(str,nodeFiles)),\r\n \"dataset_ids\" : list(map(str,root[\"components\"])),\r\n \"project_path\" : str(os.path.join(root_path,\"atlases/\"+root_node))\r\n\r\n }]\r\n seq=unique(seq)\r\n\r\n ## generate final buildsequence furnished with configuration\r\n\r\n\r\n return seq\r\n\r\ndef furnish_sequence(hb,seq):\r\n bs=[]\r\n for s in seq:\r\n conf=hb[\"config\"].copy()\r\n conf[\"m_OutputPath\"]=s['project_path']\r\n conf[\"m_CasesPath\"]=s['dataset_files']\r\n conf[\"m_CasesIDs\"]=s['dataset_ids']\r\n conf[\"m_NodeInfo\"]=hb[\"build\"][s['name']]\r\n conf[\"m_NodeName\"]=s[\"name\"]\r\n bs.append(conf)\r\n\r\n return bs\r\n\r\ndef generate_directories(project_path,sequence): ## from build sequence, generate directories\r\n atlasesPath=os.path.join(project_path,\"atlases\")\r\n finalAtlasPath=os.path.join(project_path,\"final_atlas\")\r\n if not os.path.isdir(atlasesPath):\r\n print(\"\\n=> Creation of the atlas directory = \" + atlasesPath)\r\n os.mkdir(atlasesPath)\r\n if not os.path.isdir(finalAtlasPath):\r\n print(\"\\n=> Creation of the atlas directory = \" + finalAtlasPath)\r\n os.mkdir(finalAtlasPath)\r\n for s in sequence:\r\n apath=os.path.join(s[\"m_OutputPath\"])\r\n if not os.path.isdir(apath):\r\n print(\"\\n=> Creation of the atlas directory = \" + apath)\r\n os.mkdir(apath)\r\n print(\"Initial directories are generated\")\r\n\r\n\r\ndef dependency_satisfied(hb,node_name,completed_atlases):\r\n if hb[\"build\"][node_name][\"type\"]==\"end_node\": \r\n return True\r\n else:\r\n comps=hb[\"build\"][node_name][\"components\"]\r\n for c in comps:\r\n if c not in completed_atlases: return False \r\n return True\r\n\r\n\r\n\r\ndef generate_results_csv_from_deformation_track(deformation_track,project_path): # generate final result file with deformation track file\r\n\r\n dt=deformation_track\r\n outpath=os.path.join(project_path,\"DTIAtlasBuilderResults.csv\")\r\n \r\n m_ScalarMeasurement=dt[0][\"scalar_measurement\"]\r\n m_NeedToBeCropped=dt[0][\"need_to_be_cropped\"]\r\n header=[\"id\", \"Original DTI Image\"]\r\n if m_NeedToBeCropped==1: header + [\"Cropped DTI\"]\r\n tmp=[m_ScalarMeasurement+ \" from original\",\r\n \"Affine transform\", \"Affine Registered DTI\", \r\n \"Affine Registered \"+m_ScalarMeasurement,\r\n \"Diffeomorphic Deformed \" + m_ScalarMeasurement,\r\n \"Diffeomorphic Deformation field to Affine space\",\r\n \"Diffeomorphic Deformation field to Affine space\",\r\n \"Diffeomorphic DTI\",\r\n \"Diffeomorphic Deformation field to Original space\",\r\n \"DTI-Reg Final DTI\"\r\n ]\r\n header+=tmp\r\n with open(outpath,\"w\") as f:\r\n csvwriter=csv.writer(f,delimiter=',')\r\n csvwriter.writerow(header)\r\n for idx,case in enumerate(dt):\r\n caseID,casePath = case[\"original_dti_id\"],case[\"original_dti_path\"]\r\n m_OutputPath=case[\"project_path\"]\r\n m_nbLoops=case[\"nb_loops\"]\r\n m_nbLoopsDTIReg=case[\"nb_loops_dtireg\"]\r\n row=[\r\n idx+1,\r\n casePath]\r\n if m_NeedToBeCropped==1: row+=[m_OutputPath+\"/1_Affine_Registration/\" + caseID+\"_croppedDTI.nrrd\"]\r\n concatenated_displacement_path=case[\"output_path\"]\r\n row+=[\r\n m_OutputPath+\"/1_Affine_Registration/\" + caseID + \"_\" + m_ScalarMeasurement + \".nrrd\",\r\n m_OutputPath+\"/1_Affine_Registration/Loop\" + str(m_nbLoops) + \"/\" + caseID + \"_Loop\" + str(m_nbLoops)+\"_LinearTrans.txt\",\r\n m_OutputPath+\"/1_Affine_Registration/Loop\" + str(m_nbLoops) + \"/\" + caseID + \"_Loop\" + str(m_nbLoops)+\"_LinearTrans_DTI.nrrd\",\r\n m_OutputPath+\"/1_Affine_Registration/Loop\" + str(m_nbLoops) + \"/\" + caseID + \"_Loop\" + str(m_nbLoops)+\"_Final\" + m_ScalarMeasurement +\".nrrd\",\r\n m_OutputPath+\"/2_NonLinear_Registration/\" + caseID + \"_NonLinearTrans_\" + m_ScalarMeasurement + \".mhd\",\r\n m_OutputPath+\"/2_NonLinear_Registration/\" + caseID + \"_HField.mhd\" ,\r\n m_OutputPath+\"/2_NonLinear_Registration/\" + caseID + \"_InverseHField.mhd\" ,\r\n m_OutputPath+\"/3_Diffeomorphic_Atlas/\" + caseID + \"_DiffeomorphicDTI.nrrd\",\r\n concatenated_displacement_path,\r\n m_OutputPath+\"/4_Final_Resampling/FinalTensors/\" + caseID + \"_FinalDeformedDTI.nrrd\"\r\n ]\r\n csvwriter.writerow(row)\r\n\r\ndef generate_results_csv(cfg):\r\n\r\n outpath=os.path.join(cfg[\"m_OutputPath\"],\"DTIAtlasBuilderResults.csv\")\r\n m_OutputPath=cfg[\"m_OutputPath\"]\r\n m_ScalarMeasurement=cfg[\"m_ScalarMeasurement\"]\r\n m_nbLoops=cfg[\"m_nbLoops\"]\r\n m_nbLoopsDTIReg=cfg[\"m_nbLoopsDTIReg\"]\r\n m_NeedToBeCropped=cfg[\"m_NeedToBeCropped\"]\r\n header=[\"id\", \"Original DTI Image\"]\r\n if m_NeedToBeCropped==1: header + [\"Cropped DTI\"]\r\n tmp=[cfg[\"m_ScalarMeasurement\"]+ \" from original\",\r\n \"Affine transform\", \"Affine Registered DTI\", \r\n \"Affine Registered \"+cfg[\"m_ScalarMeasurement\"],\r\n \"Diffeomorphic Deformed \" + cfg[\"m_ScalarMeasurement\"],\r\n \"Diffeomorphic Deformation field to Affine space\",\r\n \"Diffeomorphic Deformation field to Affine space\",\r\n \"Diffeomorphic DTI\",\r\n \"Diffeomorphic Deformation field to Original space\",\r\n \"DTI-Reg Final DTI\"\r\n ]\r\n header+=tmp\r\n with open(outpath,\"w\") as f:\r\n csvwriter=csv.writer(f,delimiter=',')\r\n csvwriter.writerow(header)\r\n for idx,case in enumerate(zip(cfg[\"m_CasesIDs\"],cfg[\"m_CasesPath\"])):\r\n caseID,casePath = case\r\n row=[\r\n idx+1,\r\n casePath]\r\n if m_NeedToBeCropped==1: row+=[m_OutputPath+\"/1_Affine_Registration/\" + caseID+\"_croppedDTI.nrrd\"]\r\n row+=[\r\n m_OutputPath+\"/1_Affine_Registration/\" + caseID + \"_\" + m_ScalarMeasurement + \".nrrd\",\r\n m_OutputPath+\"/1_Affine_Registration/Loop\" + str(m_nbLoops) + \"/\" + caseID + \"_Loop\" + str(m_nbLoops)+\"_LinearTrans.txt\",\r\n m_OutputPath+\"/1_Affine_Registration/Loop\" + str(m_nbLoops) + \"/\" + caseID + \"_Loop\" + str(m_nbLoops)+\"_LinearTrans_DTI.nrrd\",\r\n m_OutputPath+\"/1_Affine_Registration/Loop\" + str(m_nbLoops) + \"/\" + caseID + \"_Loop\" + str(m_nbLoops)+\"_Final\" + m_ScalarMeasurement +\".nrrd\",\r\n m_OutputPath+\"/2_NonLinear_Registration/\" + caseID + \"_NonLinearTrans_\" + m_ScalarMeasurement + \".mhd\",\r\n m_OutputPath+\"/2_NonLinear_Registration/\" + caseID + \"_HField.mhd\" ,\r\n m_OutputPath+\"/2_NonLinear_Registration/\" + caseID + \"_InverseHField.mhd\" ,\r\n m_OutputPath+\"/3_Diffeomorphic_Atlas/\" + caseID + \"_DiffeomorphicDTI.nrrd\",\r\n m_OutputPath+\"/4_Final_Resampling/FinalDeformationFields/\" + caseID + \"_GlobalDisplacementField.nrrd\",\r\n m_OutputPath+\"/4_Final_Resampling/FinalTensors/\" + caseID + \"_FinalDeformedDTI.nrrd\"\r\n ]\r\n csvwriter.writerow(row)\r\n\r\n\r\n\r\n\r\ndef main(args):\r\n projectPath=os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),\"../\"))\r\n scriptPath=os.path.join(projectPath,\"scripts\")\r\n commonPath=os.path.join(projectPath,'common')\r\n configPath=os.path.join(commonPath,\"config.json\")\r\n hbuildPath=os.path.join(commonPath,\"h-build.json\")\r\n \r\n ### generate build sequence\r\n buildSequence=[]\r\n hbuild={}\r\n deformSequence=[]\r\n numThreads=1\r\n if args.buildsequence is None:\r\n hbuild={}\r\n with open(hbuildPath,'r') as f:\r\n hbuild=json.load(f)\r\n config={}\r\n with open(configPath,'r') as f:\r\n config=json.load(f)\r\n numThreads=max(1,int(config[\"m_NbThreadsString\"]))\r\n hbuild[\"config\"]=config\r\n hbuild['config']['m_GreedyAtlasParametersTemplatePath']=str(os.path.join(commonPath,'GreedyAtlasParameters.xml'))\r\n initSequence=parse_hbuild(hbuild,root_path=projectPath,root_node=args.node)\r\n buildSequence=furnish_sequence(hbuild,initSequence)\r\n\r\n # for s in buildSequence:\r\n # print(s)\r\n #save sequence \r\n with open(os.path.join(commonPath,'build_sequence.json'),'w') as f:\r\n json.dump(buildSequence,f,indent=4,sort_keys=True)\r\n\r\n # generate scaffolding directories \r\n generate_directories(projectPath,buildSequence)\r\n else:\r\n with open(args.buildsequence,'r') as f:\r\n buildSequence=json.load(f)\r\n numThreads=max(int(buildSequence[0][\"m_NbThreadsString\"]),1)\r\n\r\n with open(os.path.join(commonPath,'initial_sequence.json'),'w') as f:\r\n json.dump(initSequence,f,indent=4)\r\n\r\n\r\n\r\n ## generate deformation field map\r\n deformInitSequence=generate_deformation_track(initSequence,node=hbuild['project']['target_node'])\r\n deformSequence=furnish_deformation_track(deformInitSequence,projectPath,buildSequence)\r\n inverseDeformSequence=invert_deformation_track(deformSequence)\r\n\r\n with open(os.path.join(commonPath,'deformation_track.json'),'w') as f:\r\n json.dump(deformSequence,f,indent=4)\r\n with open(os.path.join(commonPath,'deformation_track_inverted.json'),'w') as f:\r\n json.dump(inverseDeformSequence,f,indent=4)\r\n\r\n\r\n\r\n\r\n\r\n ### atlas build begins (to be multiprocessed)\r\n print(\"\\nThe current date and time are:\")\r\n print( time.strftime('%x %X %Z') )\r\n print(\"\\n=============== Main Script ================\")\r\n time1=time.time()\r\n\r\n\r\n ## threading\r\n completedAtlases=[] #entry should be the node name \r\n runningAtlases=[] # should have length less or equal than numTheads, entry is the node name\r\n\r\n\r\n def buildAtlas(conf,rt,ct): # rt : list of running threads, ct : list of completed threads, nt : number of thread (numThreads)\r\n prjName=conf[\"m_NodeName\"]\r\n rt.append(prjName)\r\n try:\r\n DTIAtlasBuilder_Preprocess.run(conf)\r\n except Exception as e:\r\n raise Exception(\"Error occurred in DTIAtlasBuilder_Preprocess : \" + str(e))\r\n\r\n try:\r\n DTIAtlasBuilder_AtlasBuilding.run(conf)\r\n except Exception as e:\r\n raise Exception(\"Error occurred in DTIAtlasBuilding_DTIAtlasBuilder : \" + str(e)) \r\n rt.remove(prjName)\r\n ct.append(prjName)\r\n\r\n numNodes=len(buildSequence)\r\n while len(completedAtlases) < numNodes:\r\n if len(runningAtlases) < numThreads and len(buildSequence)>0:\r\n if dependency_satisfied(hbuild,buildSequence[0][\"m_NodeName\"],completedAtlases):\r\n cfg=buildSequence.pop(0)\r\n generate_results_csv(cfg)\r\n threading.Thread(target=buildAtlas,args=(cfg,runningAtlases,completedAtlases)).start()\r\n\r\n # print(\"Completed : \" + str(completedAtlases))\r\n # print(\"Running : \" + str(runningAtlases))\r\n # print(\"Pending : \" + str([x[\"m_NodeName\"] for x in buildSequence]))\r\n time.sleep(1.0)\r\n\r\n # print(\"Completed : \" + str(completedAtlases))\r\n # print(\"Running : \" + str(runningAtlases))\r\n # print(\"Pending : \" + str([x[\"m_NodeName\"] for x in buildSequence]))\r\n\r\n ### copy final atals to 'final_atlas' directory\r\n try:\r\n if args.node is None:\r\n src=os.path.join(projectPath,\"atlases/\"+hbuild['project']['target_node'])\r\n else:\r\n src=os.path.join(projectPath,\"atlases/\"+args.node)\r\n dst=os.path.join(projectPath,\"final_atlas\")\r\n print(\"Copying filed from %s to %s\" %(src,dst))\r\n shutil.rmtree(dst)\r\n shutil.copytree(src,dst)\r\n\r\n except Exception as e:\r\n raise Exception(\"Error occurred in copying final atlas directory : \" +str(e))\r\n\r\n print(\"Final atlas copied into %s \"% dst)\r\n\r\n\r\n ### Concatenate the displacement fields\r\n print(\"\\nConcatenating deformation fields\")\r\n try:\r\n DTIAtlasBuilder_Utilities.ITKTransformTools_Concatenate(config,deformSequence)\r\n DTIAtlasBuilder_Utilities.ITKTransformTools_Concatenate_Inverse(config,inverseDeformSequence)\r\n generate_results_csv_from_deformation_track(deformSequence,projectPath)\r\n\r\n except Exception as e:\r\n raise Exception(\"Error occurred in concatenating deformation fields : \" + str(e))\r\n\r\n # Display execution time\r\n time2=time.time()\r\n timeTot=time2-time1\r\n if timeTot<60 : print(\"| Execution time = \" + str(int(timeTot)) + \"s\")\r\n elif timeTot<3600 : print(\"| Execution time = \" + str(int(timeTot)) + \"s = \" + str(int(timeTot/60)) + \"m \" + str( int(timeTot) - (int(timeTot/60)*60) ) + \"s\")\r\n else : print(\"| Execution time = \" + str(int(timeTot)) + \"s = \" + str(int(timeTot/3600)) + \"h \" + str( int( (int(timeTot) - int(timeTot/3600)*3600) /60) ) + \"m \" + str( int(timeTot) - (int(timeTot/60)*60) ) + \"s\")\r\n\r\n\r\n \r\n\r\nif __name__==\"__main__\":\r\n parser=argparse.ArgumentParser(description=\"Argument Parser\")\r\n parser.add_argument('--node',help=\"node to build\",type=str)\r\n parser.add_argument('--buildsequence',help='build sequence file, if this option is inputted then build sequence process will be skipped',type=str)\r\n args=parser.parse_args()\r\n\r\n\r\n try:\r\n main(args)\r\n sys.exit(0)\r\n except Exception as e:\r\n print(str(e))\r\n traceback.print_exc(file=sys.stdout)\r\n sys.exit(1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Scripts/template_main.py","file_name":"template_main.py","file_ext":"py","file_size_in_byte":19068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"557343019","text":"from selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nfrom Config import config\n\nconfig = config\n\n\nclass Driver:\n\n @staticmethod\n def get_driver():\n options = Options()\n options.headless = config.headless\n return webdriver.Firefox(options=options, service_log_path='../Logs/geckodriver.log')\n\n @staticmethod\n def close_driver(driver):\n driver.quit()\n","sub_path":"SeleniumFramework/Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"327205963","text":"batch_size = 100\nnum_classes = 10\nepochs = 100\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Conv2D, SeparableConv2D, DepthwiseConv2D\nfrom keras.layers import MaxPooling2D, AveragePooling2D, GlobalMaxPooling2D, GlobalAveragePooling2D\nfrom keras import regularizers\nfrom keras import metrics\nfrom keras import callbacks\nfrom apscheduler.schedulers.background import BackgroundScheduler\nimport pickle\nimport psutil\n\nprocess = psutil.Process()\n\nmcpu = 0\nmmem = 0\n\ndef get_info():\n global mmem\n mem = process.memory_info().rss\n if mmem < mem:\n mmem = mem\n\nscheduler = BackgroundScheduler()\nscheduler.add_job(get_info, 'interval', seconds=1)\n\n\ndata_augmentation = True\n\n#load saved data\npkl_file = open('/exports/home/j_liu21/projects/genetic_algorithms/x_train.pkl', 'rb')\nx_train = pickle.load(pkl_file, encoding='latin1')\npkl_file.close()\n\n############################################################\nx_data_len = len(x_train)\n#end = int(.2*x_data_len)\n#x_train_train = x_train[0:end]\n#x_train_valid = x_train[end:]\nprint(x_data_len)\nx_train_train = x_train[0:40000]\nx_train_valid = x_train[40000:50000]\n############################################################\n\npkl_file = open('/exports/home/j_liu21/projects/genetic_algorithms/y_train.pkl', 'rb')\ny_train = pickle.load(pkl_file, encoding='latin1')\npkl_file.close()\n\n############################################################\n#y_train_train = y_train[0:end]\n#y_train_valid = y_train[end:]\ny_train_train = y_train[0:40000]\ny_train_valid = y_train[40000:50000]\n############################################################\n\npkl_file = open('/exports/home/j_liu21/projects/genetic_algorithms/x_test.pkl', 'rb')\nx_test = pickle.load(pkl_file, encoding='latin1')\npkl_file.close()\n\npkl_file = open('/exports/home/j_liu21/projects/genetic_algorithms/y_test.pkl', 'rb')\ny_test = pickle.load(pkl_file, encoding='latin1')\npkl_file.close()\n\n# Convert class vectors to binary class matrices.\ny_train_train = keras.utils.to_categorical(y_train_train, num_classes)\ny_train_valid = keras.utils.to_categorical(y_train_valid, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n\ndef identity_block(model):\n save_model = model\n\n model.add(Conv2D(20, (3, 3), padding='same', activation='relu'))\n model.add(BatchNormalization())\n\n\n\nmodel = Sequential()\n\n\n#print(model.summary())\n\nopt = keras.optimizers.rmsprop(lr=0.01, decay=1e-6)\n#opt = keras.optimizers.SGD(learning_rate = .01, decay=1e-6)\n\nes = keras.callbacks.EarlyStopping(monitor = 'val_loss', patience = 10, verbose = 1)\n\nscheduler.start()\n\nmodel.compile(loss = 'categorical_crossentropy',\n optimizer = opt,\n metrics = ['accuracy'])\n\nx_train_train = x_train_train.astype('float32')\nx_train_valid = x_train_valid.astype('float32')\nx_test = x_test.astype('float32')\nx_train_train /= 255\nx_train_valid /= 255\nx_test /= 255\n\nif not data_augmentation:\n print('Not using data augmentation.')\n model.fit(x_train_train, y_train_train,\n batch_size = batch_size,\n epochs = epochs,\n validation_data = (x_train_valid, y_train_valid),\n shuffle = True)\nelse:\n print('Using real-time data augmentation.')\n # This will do preprocessing and realtime data augmentation:\n datagen = ImageDataGenerator(\n featurewise_center = False, # set input mean to 0 over the dataset\n samplewise_center = False, # set each sample mean to 0\n featurewise_std_normalization = False, # divide inputs by std of the dataset\n samplewise_std_normalization = False, # divide each input by its std\n zca_whitening = False, # apply ZCA whitening\n zca_epsilon = 1e-06, # epsilon for ZCA whitening\n rotation_range = 0, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range = 0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range = 0.1, # randomly shift images vertically (fraction of total height)\n shear_range = 0., # set range for random shear\n zoom_range = 0., # set range for random zoom\n channel_shift_range = 0., # set range for random channel shifts\n fill_mode = 'nearest', # set mode for filling points outside the input boundaries\n cval = 0., # value used for fill_mode = \"constant\"\n horizontal_flip = True, # randomly flip images\n vertical_flip = False, # randomly flip images\n rescale = None, # set rescaling factor (applied before any other transformation)\n preprocessing_function = None, # set function that will be applied on each input\n data_format = None, # image data format, either \"channels_first\" or \"channels_last\"\n validation_split = 0.0 ) # fraction of images reserved for validation (strictly between 0 and 1)\n\n\n # Compute quantities required for feature-wise normalization\n # (std, mean, and principal components if ZCA whitening is applied).\n\n datagen.fit(x_train_train)\n\n # Fit the model on the batches generated by datagen.flow().\n\n model.fit_generator(datagen.flow(x_train_train, y_train_train, batch_size = batch_size),\n steps_per_epoch = 100,\n epochs = epochs,\n validation_data = (x_train_valid, y_train_valid),\n workers = 8,\n callbacks = [es] )\n\n #model.fit_generator(datagen.flow(x_train_train, y_train_train, batch_size = batch_size),\n # steps_per_epoch = 100,\n # epochs = epochs,\n # validation_data = (x_train_valid, y_train_valid),\n # workers = 8 )\n\n\n# Score trained model.\nscores_train_train = model.evaluate(x_train_train, y_train_train, verbose = 0)\nscores_test = model.evaluate(x_test, y_test, verbose = 0)\nscheduler.shutdown()\ncpu_time = process.cpu_times().user\n\nprint('Training_loss: {} Test_accuracy: {} Mem: {} CPU: {}'.format(scores_train_train[0], scores_test[1], mmem, cpu_time) )\n\n","sub_path":"sample_networks/ResNet/ResNet.py","file_name":"ResNet.py","file_ext":"py","file_size_in_byte":6582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"13737313","text":"import numpy\nimport pandas\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom sklearn.model_selection import train_test_split\nfrom torch.autograd import Variable\nfrom sklearn.utils import shuffle\n\nMAX_EPOCH = 1000\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n self.l1 = nn.Linear(7,77)\n self.l2 = nn.Linear(77,77)\n self.l3 = nn.Linear(77,77)\n self.l4 = nn.Linear(77,77)\n self.l5 = nn.Linear(77,77)\n self.l6 = nn.Linear(77,77)\n self.l7 = nn.Linear(77,77)\n self.l8 = nn.Linear(77,2)\n self.l9 = nn.Linear(2,1)\n\t\t\n def forward(self, x):\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n x = F.relu(self.l3(x))\n x = F.relu(self.l4(x))\n x = F.relu(self.l5(x))\n x = F.relu(self.l6(x))\n x = F.relu(self.l7(x))\n x = F.relu(self.l8(x))\n x = F.relu(self.l9(x))\n return x\n\ndef percentage_correct(pred, labels, threshold = 0.5):\n\tcorrect = 0\n\ttotal = 0\n\tconverted_pred = []\n\tfor p in pred:\n\t\tif (p.data[0] > threshold):\n\t\t\tconverted_pred.append(1)\n\t\telse:\n\t\t\tconverted_pred.append(0)\n \n\tif (len(converted_pred) == len(labels)):\n\t\tfor i in range(len(converted_pred)):\n\t\t\tif (converted_pred[i] == labels[i].data[0]):\n\t\t\t\tcorrect += 1\n\t\t\ttotal += 1\n\treturn correct/total\n\t\t\n## Importing Data ##\ndataset = pandas.read_csv('kickstarter_data_full.csv', low_memory = False)\ndata = dataset[[\n\t'disable_communication',\n\t'country',\n\t'currency',\n\t'staff_pick',\n\t'static_usd_rate',\n\t'category',\n\t'spotlight',\n\t'SuccessfulBool'\n]].dropna().reset_index(drop = True)\n\n## Converting Categorical Columns to Integers and Bools to 0/1 ##\ndata['disable_communication'] = (data['disable_communication']).astype(int)\ndata['staff_pick'] = (data['staff_pick']).astype(int)\ndata['spotlight'] = (data['spotlight']).astype(int)\ndata['country'] = (data['country']).astype('category').cat.codes\ndata['currency'] = (data['currency']).astype('category').cat.codes\ndata['category'] = (data['category']).astype('category').cat.codes\n\n## Initiallizing Testing and Training Data ##\nY = data.iloc[0:int(data.size / 8), 7].as_matrix()\nX = data.iloc[0:int(data.size / 8), data.columns != 'SuccessfulBool'].as_matrix()\n\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 42)\n\n#shuffle X and y\nX_train, y_train = shuffle(X_train, y_train, random_state=0)\n\nX_train = torch.Tensor(X_train)\nX_test = torch.Tensor(X_test)\ny_train = torch.Tensor(y_train)\ny_test = torch.Tensor(y_test)\n\ntrain_data = Variable(X_train)\ntrain_labels= Variable(y_train)\ntest_data = Variable(X_test)\ntest_labels = Variable(y_test)\n\n## Training the Model ##\nmodel = Classifier()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.03, momentum=0.9)\nloss = nn.BCEWithLogitsLoss()\n\naccuracies = []\nb_size = 100 #batch size\n\nfor epoch in range(MAX_EPOCH):\n model.train()\n for batch in range(0,train_data.size(0),b_size):\n \td = train_data[batch:batch+b_size]\n \tl = train_labels[batch:batch+b_size]\n \toptimizer.zero_grad()\n \tpred = model(d).view(len(l))\n \terror = loss(pred, l)\n \terror.backward()\n \toptimizer.step()\n print ('epoch {} -- percentage correct: {}, error: {}'.format(epoch, percentage_correct(pred,l), error.data[0]))\n\n\n## Testing the Model ## \nmodel.eval()\npred = model(test_data).view(len(test_labels))\nerror = loss(pred, test_labels)\nprint(\"===================================\")\nprint(\"Final Accuracy\")\nprint(percentage_correct(pred, test_labels))\nprint(\"===================================\")","sub_path":"neural_networks/old_nns/nn2.py","file_name":"nn2.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"380283574","text":"from splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nfrom selenium import webdriver\nimport os\n\nimport pandas as pd\n\ndef init_browser():\n # @NOTE: Replace the path with your actual path to the chromedriver\n executable_path ={\"executable_path\":\"c:/Users/ai3ca/web-scraping-challenge/Missions_to_Mars/chromedriver\"}\n \n return Browser(\"chrome\", **executable_path, headless=False)\n\n\ndef scrape():\n browser = init_browser()\n mars={}\n\n url = \"https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest\"\n browser.visit(url)\n\n html = browser.html\n soup = bs(html, \"html.parser\")\n\n\n #scrape latest news \n title= soup.find(\"div\", class_=\"content_title\").text\n paragraph= soup.find(\"div\", class_=\"article_teaser_body\").text\n mars[\"News Title\"]=title\n mars[\"News Paragraph\"]=paragraph\n \n # scrape the image\n image_url=\"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n browser.visit(image_url)\n #Getting the base url\n from urllib.parse import urlsplit\n base_url = \"{0.scheme}://{0.netloc}/\".format(urlsplit(image_url))\n print(base_url)\n xpath = \"//*[@id=\\\"page\\\"]/section[3]/div/ul/li[1]/a/div/div[2]/img\"\n results = browser.find_by_xpath(xpath)\n img = results[0]\n img.click()\n html_image = browser.html\n soup = bs(html_image, \"html.parser\")\n url_image = soup.find(\"img\", class_=\"fancybox-image\")[\"src\"]\n featured_image_url = base_url + url_image\n print(featured_image_url)\n mars[\"featured_image\"]=featured_image_url\n \n # scrape the weather data\n \n weather_url=\"https://twitter.com/marswxreport?lang=en\"\n browser.visit(weather_url)\n html_weather= browser.html\n soup = bs(html_weather, \"html.parser\")\n mars_weather = soup.find(\"p\", class_=\"TweetTextSize TweetTextSize--normal js-tweet-text tweet-text\").text\n mars[\"weather\"]=mars_weather\n \n # scrape the fact data\n \n fact_url=\"https://space-facts.com/mars/\"\n browser.visit(fact_url)\n mars_table = pd.read_html(fact_url)\n mars_table[0]\n mars_df=mars_table[0]\n mars_df.columns=[\"Parameter\",\"Mars\",\"Earth\"]\n mars_df.set_index([\"Parameter\"], inplace=True)\n\n mars_df\n mars_fact=mars_df.to_html(index=True,header=True)\n mars_fact=mars_fact.replace(\"\\n\",\"\")\n mars[\"fact\"]=mars_fact\n\n # mars hemisphere\n hemis_url=\"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n browser.visit(hemis_url)\n #Getting the base url\n base_url = \"{0.scheme}://{0.netloc}/\".format(urlsplit(hemis_url))\n print(base_url)\n\n # create a empty for these hemisphere url\n all_urls=[]\n \n hemisphere_html = browser.html\n soup = bs(hemisphere_html, 'html.parser')\n image_list = soup.find_all('div', class_='item')\n # Loop through list of hemispheres \n for image in image_list:\n hemisphere_dict = {}\n # Find link\n href = image.find('a', class_='itemLink product-item')\n link = base_url + href['href']\n # Visit the link\n browser.visit(link)\n # Parse the html of the new page\n\n hemisphere_html2 = browser.html\n soup2 = bs(hemisphere_html2, 'html.parser')\n \n # Find the title\n img_title = soup2.find('div', class_='content').find('h2', class_='title').text\n # Append to dict\n hemisphere_dict['Title'] = img_title\n # Find image url\n img_url = soup2.find('div', class_='downloads').find('a')['href']\n # Append to dict\n hemisphere_dict['URL_IMG'] = img_url\n\n # Append dict to list\n\n all_urls.append(hemisphere_dict)\n\n mars[\"hemisphere_img_url\"] = all_urls\n\n\n return mars\n\n\n\n\n \n \n","sub_path":"Missions_to_Mars/.ipynb_checkpoints/scrape_mars-checkpoint.py","file_name":"scrape_mars-checkpoint.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"217420123","text":"# -- coding = 'utf-8' -- \n# Author Kylin\n# Python Version 3.7.3\n# OS macOS\n\"\"\"\nNo.334 递增的三元子序列\n需求:\n 给你一个整数数组nums,判断这个数组中是否存在长度为3的递增子序列\n 如果存在这样的三元组下标 (i, j, k) 且满足 i < j < k ,使得 nums[i] < nums[j] < nums[k] ,返回 true ;否则,返回 false 。\n\n\"\"\"\n\n\ndef increasingTriplet_onePointer(nums):\n \"\"\"\n 利用枚举+单指针,可惜超时了\n 时间复杂度:O(n^2)\n 空间复杂度:O(1)\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n n = len(nums)\n # 固定中间元素\n for i in range(1, n-1):\n left, right = 0, n-1\n flag1, flag2 = False, False\n\n while left < i:\n if nums[left] < nums[i]:\n flag1 = True\n break\n left += 1\n while right > i:\n if nums[right] > nums[i]:\n flag2 = True\n break\n right -= 1\n\n if flag1 and flag2:\n return True\n\n return False\n\ndef increasingTriplet_pointers(nums):\n \"\"\"\n 贪心+快慢指针\n 在a < b的前提下,保证a尽可能小。在b < c的条件下,保证 b 尽可能小。\n 时间复杂度:O(n)\n 空间复杂度:O(1)\n :param nums:\n :return:\n \"\"\"\n a = float('inf')\n b = float('inf')\n\n for num in nums:\n if num <= a:\n # 当前元素比a小,就用当前元素赋值a\n a = num\n elif num <= b:\n # 当前元素比a大,但是比b小,就用该元素赋值b\n b = num\n else:\n # 否则说明该元素比a和b都大,符合要求\n return True\n return False\n\nif __name__ == \"__main__\":\n nums = [20,100,10,12,5,13]\n flag = increasingTriplet_pointers(nums)\n print(flag)","sub_path":"LeetCode/src/search07/increasing_triplet.py","file_name":"increasing_triplet.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"373265798","text":"import numpy as np\nfrom Game import Game as Game\nclass CambiaGame(Game):\n \"\"\"\n This class specifies the base Game class. To define your own game, subclass\n this class and implement the functions below. This works when the game is\n two-player, adversarial and turn-based.\n\n Use 1 for player1 and -1 for player2.\n\n See othello/OthelloGame.py for an example implementation.\n \"\"\"\n def getInitBoard(self):\n \"\"\"\n Returns:\n startBoard: a representation of the board (ideally this is the form\n that will be the input to your neural network)\n \"\"\"\n # the last 4 moves\n # 8 possible cards, 4 for each player, plus one for each player to represent the card they draw\n # 53 channels, one-hot representation for 52 card deck\n # 56 channels here because we convert to 55 channels before predicting - last 2 channels holds whether card is known by player 1 and player 2\n # use 2x2 convolutions to get history\n newBoard = np.zeros(shape=(4, 10, 56))\n for i in range(4):\n # add p1 cards\n # set starting cards\n newDraw = np.random.randint(low=0, high=54)\n newBoard[0][i] = np.zeros(shape=(56))\n newBoard[0][i][newDraw] = 1.\n if i < 2:\n # give vision\n newBoard[0][i][54] = 1.\n\n # add p2 cards\n newDraw = np.random.randint(low=0, high=54)\n newBoard[0][i + 5] = np.zeros(shape=(56))\n newBoard[0][i + 5][newDraw] = 1.\n if i < 2:\n # give vision\n newBoard[0][i + 5][55] = 1.\n\n # p1 starts with draw\n newDraw = np.random.randint(low=0, high=54)\n newBoard[0][4] = np.zeros(shape=(56))\n newBoard[0][4][newDraw] = 1.\n # give vision\n newBoard[0][4][54] = 1.\n\n return newBoard\n\n def getBoardSize(self):\n \"\"\"\n Returns:\n (x,y): a tuple of board dimensions\n \"\"\"\n return (4, 10)\n\n def getActionSize(self):\n \"\"\"\n Returns:\n actionSize: number of all possible actions\n \"\"\"\n # You can only play one of 4 cards, or the card you draw = 5 actions\n return 5\n\n def getNextState(self, board, player, action):\n \"\"\"\n Input:\n board: current board\n player: current player (1 or -1)\n action: action taken by current player\n\n Returns:\n nextBoard: board after applying action\n nextPlayer: player who plays in the next turn (should be -player)\n \"\"\"\n startIndex = 0\n # If player1\n if player == 1:\n startIndex = 0\n else:\n # player2\n startIndex = 5\n # delete the last move in history\n newBoard = np.delete(board, 3, axis=0)\n # insert a new move that's identical to previous move\n # handle playing own card\n a = np.copy(newBoard[0])\n newBoard = np.insert(newBoard, 0, a, axis=0)\n cardPlayed=0\n if action < 4:\n # play own card and swap with drawn card\n\n # get own card\n a = np.where(np.isin(newBoard[0][action + startIndex], [1.]))[0]\n if a.size > 0:\n cardToPlayIndex = a[0]\n # get unknown status - index 51 is player1, 52 is player2\n isCardKnown = newBoard[0][action + startIndex][54]\n if player == -1:\n isCardKnown = newBoard[0][action + startIndex][55]\n\n isCardKnownOpponent = newBoard[0][action + startIndex][54]\n if player == -1:\n isCardKnownOpponent = newBoard[0][action + startIndex][55]\n\n # our card\n # if card is known play it, otherwise get a random card to play\n # we assume a deck composed of infinite decks so we can uniformly draw\n if isCardKnown == 1.:\n cardPlayed = cardToPlayIndex\n else:\n cardPlayed = np.random.randint(low=0, high=54)\n\n # replace the current card with the drawn card and remove the drawn card from the board\n # replace played card slot with the drawn card\n newBoard[0][action + startIndex] = np.copy(newBoard[0][4 + startIndex])\n # remove card from drawn card slot\n newBoard[0][4 + startIndex] = np.zeros(shape=(56))\n # remove opponent's vision of new card\n temp = 0\n if player == -1:\n temp = 1\n newBoard[0][action + startIndex][54 + temp] = 0.\n \n if action == 4:\n # play drawn card\n s = np.where(np.isin(newBoard[0][4 + startIndex], [1.]))[0]\n if s.size > 0:\n cardPlayed = s[0]\n # set drawn card slot to nothing\n newBoard[0][4 + startIndex] = np.zeros(shape=(56))\n\n\n temp = 0\n if player == -1:\n temp = 1\n # regardless of action, play out the effects of the played card\n redKing = False\n if cardPlayed == 38 or cardPlayed == 51:\n redKing = True\n cardPlayed += 1\n cardPlayed %= 13\n if cardPlayed == 7 or cardPlayed == 8:\n # look at one of your own unknown cards\n toLookAtRandom = []\n for i in range(4):\n if newBoard[0][i + startIndex][54 + temp] == 0.:\n toLookAtRandom.append(i)\n if len(toLookAtRandom) < 1:\n toLookAtRandom.append(np.random.randint(low=0, high=5))\n rand = np.random.randint(low=0, high=len(toLookAtRandom))\n newBoard[0][toLookAtRandom[rand]][54 + temp] = 1.\n if cardPlayed == 9 or cardPlayed == 10:\n # look at one of opponent's cards that we don't know\n toLookAtRandom = []\n if player == 1:\n s = 5\n else:\n # player2\n s = 0\n for i in range(4):\n if newBoard[0][i + s][54 + temp] == 0.:\n toLookAtRandom.append(i)\n if len(toLookAtRandom) < 1:\n toLookAtRandom.append(np.random.randint(low=0, high=5))\n rand = np.random.randint(low=0, high=len(toLookAtRandom))\n newBoard[0][toLookAtRandom[rand]][54 + temp] = 1.\n if cardPlayed == 11 or cardPlayed == 12:\n # blind swap with opponent's cards - choose one of our unknown and one of opponent's unknown\n myUnknown = 0\n toLookAtRandom = []\n for i in range(4):\n if newBoard[0][i + startIndex][54 + temp] == 0.:\n toLookAtRandom.append(i)\n # if there's no unknown, randomly append one\n if len(toLookAtRandom) < 1:\n toLookAtRandom.append(np.random.randint(low=0, high=5))\n rand = np.random.randint(low=0, high=len(toLookAtRandom))\n myUnknown = toLookAtRandom[rand]\n\n oppUnknown = 5\n toLookAtRandom = []\n if player == 1:\n s = 5\n else:\n # player2\n s = 0\n for i in range(4):\n if newBoard[0][i + s][54 + temp] == 0.:\n toLookAtRandom.append(i)\n if len(toLookAtRandom) < 1:\n toLookAtRandom.append(np.random.randint(low=0, high=5))\n rand = np.random.randint(low=0, high=len(toLookAtRandom))\n oppUnknown = toLookAtRandom[rand]\n\n # swap cards\n oppCard = np.copy(newBoard[0][oppUnknown])\n newBoard[0][oppUnknown] = np.copy(newBoard[0][myUnknown])\n newBoard[0][myUnknown] = np.copy(oppCard)\n\n if cardPlayed == 13:\n # King; check if it's red, if not then we look at an opponent's card and swap\n if not redKing:\n # swap one of our unknown with lowest or random opponent's card\n # blind swap with opponent's cards - choose one of our unknown and one of opponent's unknown\n myUnknown = 0\n toLookAtRandom = []\n for i in range(4):\n if newBoard[0][i + startIndex][54 + temp] == 0.:\n toLookAtRandom.append(i)\n if len(toLookAtRandom) < 1:\n toLookAtRandom.append(np.random.randint(low=0, high=5))\n rand = np.random.randint(low=0, high=len(toLookAtRandom))\n myUnknown = toLookAtRandom[rand]\n\n oppUnknown = 5\n toLookAtRandom = []\n if player == 1:\n s = 5\n else:\n # player2\n s = 0\n for i in range(4):\n if newBoard[0][i + s][54 + temp] == 0.:\n toLookAtRandom.append(i)\n if len(toLookAtRandom) < 1:\n toLookAtRandom.append(np.random.randint(low=0, high=5))\n rand = np.random.randint(low=0, high=len(toLookAtRandom))\n oppUnknown = toLookAtRandom[rand]\n\n # swap cards\n oppCard = np.copy(newBoard[0][oppUnknown])\n oppCard[54 + temp] = 1.\n newBoard[0][oppUnknown] = np.copy(newBoard[0][myUnknown])\n newBoard[0][myUnknown] = np.copy(oppCard)\n\n\n # play all identical cards\n for j in range(10):\n # loop through and look for cards that are similar\n for index in np.where(np.isin(newBoard[0][j], [1.]))[0]:\n # don't play red kings or jokers\n if index != 38 and index != 51 and index < 52:\n # if the card is similar to current card, play it\n c = index + 1\n c %= 13\n if c == cardPlayed:\n newBoard[0][j] = np.zeros(shape=(56))\n\n # then draw a card for the opponent and switch turns\n # swap startIndex and temp\n if player == 1:\n startIndex = 5\n else:\n # player2\n startIndex = 0\n\n temp = 1\n if player == -1:\n temp = 0\n \n # set opponent's draw\n newDraw = np.random.randint(low=0, high=54)\n newBoard[0][4 + startIndex] = np.zeros(shape=(56))\n newBoard[0][4 + startIndex][newDraw] = 1.\n # give vision to opponent only\n newBoard[0][4 + startIndex][54 + temp] = 1.\n\n # update turn count\n newBoard[2][0][0] = newBoard[3][0][0] + 1\n return newBoard, player * -1\n\n\n def getValidMoves(self, board, player):\n \"\"\"\n Input:\n board: current board\n player: current player\n\n Returns:\n validMoves: a binary vector of length self.getActionSize(), 1 for\n moves that are valid from the current board and player,\n 0 for invalid moves\n \"\"\"\n outArr = np.zeros(shape=(5), dtype='int')\n if player == 1:\n startIndex = 0\n else:\n # player2\n startIndex = 5\n for i in range(4):\n if np.where(np.isin(board[0][i + startIndex], [1.]))[0].size > 0 and np.where(np.isin(board[0][i + startIndex], [1.]))[0][0] < 54:\n outArr[i] = 1\n outArr[4] = 1\n #print(outArr)\n return outArr\n\n def getGameEnded(self, board, player):\n \"\"\"\n Input:\n board: current board\n player: current player (1 or -1)\n\n Returns:\n r: 0 if game has not ended. 1 if player won, -1 if player lost,\n small non-zero value for draw.\n \n \"\"\"\n # if player1 or player2 is 1 or lower, game is over\n player1, player2 = self.computeScore(board)\n if player1 <= 1:\n return 1\n if player2 <= 1:\n return -1\n if board[2][0][0] > 52:\n # else, game ends after ~54 turns\n if player1 > player2:\n return -1\n else:\n return 1\n return 0\n\n \n def computeScore(self, board):\n \"\"\"\n Input:\n board: current board\n Returns:\n score1: player1's score\n score2: player2's score\n \"\"\"\n player1 = 0\n player2 = 0\n\n startIndex = 0\n for i in range(4):\n s = np.where(np.isin(board[0][i + startIndex], [1.]))[0]\n if s.size > 0:\n player1 += self.getCardScore(s[0])\n\n startIndex = 5\n for i in range(4):\n s = np.where(np.isin(board[0][i + startIndex], [1.]))[0]\n if s.size > 0:\n player2 += self.getCardScore(s[0])\n \n return player1, player2\n\n def getCardScore(self, num):\n \"\"\"\n Input:\n num: an index of a card from 0-53\n Returns:\n value: the card's value\n \"\"\"\n if num == 38 or num == 51:\n return -1\n if num == 52 or num == 53:\n return 0\n num += 1\n num %= 13\n return num\n\n\n def getCanonicalForm(self, board, player):\n \"\"\"\n Input:\n board: current board\n player: current player (1 or -1)\n\n Returns:\n canonicalBoard: returns canonical form of board. The canonical form\n should be independent of player. For e.g. in chess,\n the canonical form can be chosen to be from the pov\n of white. When the player is white, we can return\n board as is. When the player is black, we can invert\n the colors and return the board.\n \"\"\"\n newArr = np.zeros(shape=(4, 10, 56))\n # swap 0-4 with 5-9\n for i in range(4):\n for j in range(5):\n newArr[i][j + 5] = board[i][j]\n for i in range(4):\n for j in range(5):\n newArr[i][j] = board[i][j + 5]\n\n # swap vision columns\n for i in range(4):\n for j in range(10):\n temp = newArr[i][j][54]\n newArr[i][j][54] = newArr[i][j][55]\n newArr[i][j][55] = temp\n \n return newArr\n\n def unknownize(self, board, player):\n \"\"\"\n Input:\n board: current full board\n player: current player (1 or -1)\n Returns:\n unknownizedBoard: returns board with unknown spots blanked. (4x12x55) shape\n \"\"\"\n index = 55\n if player == -1:\n index = 54\n newBoard = np.delete(board, index, axis=2)\n #print(newBoard)\n #print(newBoard.shape)\n for i in range(newBoard.shape[0]):\n for j in range(newBoard[i].shape[0]):\n #print(newBoard.size)\n s = np.where(np.isin(board[i][j], [1.]))\n known = False\n #print(s[0])\n for indice in range(s[0].size):\n #print(indice)\n if indice == 54:\n # if it's known, don't black card out\n known = True\n if not known and s[0].size > 0:\n #print(s[0])\n if s[0][0] > 54:\n s[0][0] = 54\n newBoard[i][j][s[0][0]] = 0.\n return newBoard\n\n\n def getSymmetries(self, board, pi):\n \"\"\"\n Input:\n board: current board\n pi: policy vector of size self.getActionSize()\n\n Returns:\n symmForms: a list of [(board,pi)] where each tuple is a symmetrical\n form of the board and the corresponding pi vector. This\n is used when training the neural network from examples.\n \"\"\"\n # We don't need to input symmetries because not having them doesn't prevent convergence\n # See AlphaZero Reddit AMA\n return [(board, pi)]\n\n def stringRepresentation(self, board):\n \"\"\"\n Input:\n board: current board\n\n Returns:\n boardString: a quick conversion of board to a string format.\n Required by MCTS for hashing.\n \"\"\"\n return np.array_str(board)\n","sub_path":"CambiaGame.py","file_name":"CambiaGame.py","file_ext":"py","file_size_in_byte":16236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"118555101","text":"from PIL import Image\nimport cv2\nimport pytesseract\nfrom .handwriting_extract.src.main import extract\nfrom .field.data.field_data import FieldData, FieldType\nfrom skimage.segmentation import clear_border\nfrom imutils import contours\nimport imutils\nimport os\nimport numpy as np\n\n\"\"\"\nMethod extracts data from given image using Py-Tesseract\n\n@return text the extracted text\n\"\"\"\n\n\ndef extract_data_pytesseract(image):\n filename = \"{}.png\".format(\"temp\")\n cv2.imwrite(filename, image)\n text = pytesseract.image_to_string(Image.open(filename), config='--psm 7')\n\n return text\n\n\"\"\"\nMethod extracts data from given image using handwriting_extract library\n\n@return the text that was extracted\n\"\"\"\n\n\ndef extract_data_handwriting(image):\n return extract(image)\n\n\n\"\"\"\nPerforms account and routing extraction from the provided image. Checks the given\npair parameters' field_type field to see if it wants the routing or the account number, and then\nsets the pair's extracted_data field to the accordingly, and returns the pair. If not successful, blank \nor garbage information is returned, otherwise both the extracted_data for routing and account would be a \nsingle string of digits.\n\n@param img: image to extract the data from - this is a cropped version of full image, containing only the bottom 3rd\n@param pair: the value that contains the type of the field that is requested, and the extracted_data itself to be returned\n\n@return pair regardless of if extraction was successful; difference is only in the accuracy of pair.extracted_data\n\"\"\"\n\n\ndef account_routing_extraction(img, pair: FieldData):\n print(\"Account/Writing extraction\")\n if img is not None:\n filedir = os.path.abspath(os.path.dirname(__file__))\n ref_image_file = os.path.join(\n filedir, '../../resources/images/micr_e13b_reference.png')\n\n # init list of reference character names, in same order as they appear in reference\n # image where the digits, their names and:\n # T = Transit (delimit bank branch routing transit #)\n # U = On-us (delimit customer account number)\n # A = Amount (delimit transaction amount)\n # D = Dash (delimit parts of numbers, such as routing or account)\n charNames = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\",\n \"7\", \"8\", \"9\", \"0\", \"T\", \"U\", \"A\", \"D\"]\n\n # load ref MICR image, convert to grayscale and threshold it\n # this will cause digits to appear white on black background\n ref = cv2.imread(ref_image_file)\n ref = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)\n ref = imutils.resize(ref, width=400)\n ref = cv2.threshold(\n ref, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n\n # find contours in the MICR image and sort them left to right\n refCnts = cv2.findContours(\n ref.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n refCnts = imutils.grab_contours(refCnts)\n refCnts = contours.sort_contours(refCnts, method=\"left-to-right\")[0]\n\n # extract digits and symbols from list of contours\n refROIs = extract_digits_and_symbols(ref, refCnts, minW=10, minH=20)[0]\n chars = {}\n\n # loop over reference ROIs\n for (name, roi) in zip(charNames, refROIs):\n # resize the ROI to a fixed size, then update the chars dict,\n # mapping char name to ROI\n roi = cv2.resize(roi, (36, 36))\n chars[name] = roi\n\n # init rectangular kernel along w/an empty list to store output of OCR\n rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (17, 7))\n output = []\n\n # load the input image, grab its dimensions, and apply array slicing\n # to keep only the bottom 40% of the image (that's where the account/routing info is)\n (h, w) = img.shape[:2]\n delta = int(h - (h * 0.65))\n height_max = int(h*0.85)\n bottom = img[delta:height_max, 0:w]\n\n # convert bottom image to grayscale, apply blackhat morphological operator\n # to find dark regions against a light background (the routing/account #s)\n # gray = cv2.cvtColor(bottom, cv2.COLOR_BGR2GRAY)\n blackhat = cv2.morphologyEx(bottom, cv2.MORPH_BLACKHAT, rectKernel)\n\n # compute the Scharr gradient of the blackhat image, then scale\n # the rest back into the range [0, 255]\n gradX = cv2.Sobel(blackhat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)\n gradX = np.absolute(gradX)\n (minVal, maxVal) = (np.min(gradX), np.max(gradX))\n gradX = (255 * ((gradX - minVal) / (maxVal - minVal)))\n gradX = gradX.astype(\"uint8\")\n\n # apply a closing operation using rectangular kernel to close gaps\n # between digits, then apply Otsus thresholding method to binarize image\n gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)\n thresh = cv2.threshold(\n gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n\n # remove any pixels that are touching borders of image (helps us in next\n # step when pruning contours)\n thresh = clear_border(thresh)\n\n # find contours in thresholded image, init list of group locations\n groupCnts = cv2.findContours(\n thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n groupCnts = imutils.grab_contours(groupCnts)\n groupLocs = []\n\n # loop over group contours\n for (i, c) in enumerate(groupCnts):\n # compute bounding box of contour\n (x, y, w, h) = cv2.boundingRect(c)\n\n # only accept contour region as a grouping of chars if ROI sufficiently large\n if w > 50 and h > 15:\n groupLocs.append((x, y, w, h))\n\n # sort the digit locs from left to right\n groupLocs = sorted(groupLocs, key=lambda x: x[0])\n\n # loop over group locations\n for (gX, gY, gW, gH) in groupLocs:\n # init the group output of chars\n groupOutput = []\n\n # extract group ROI of chars from the grayscale image\n # then apply thresholding to segment the digits from background\n group = bottom[gY - 5: gY + gH + 5, gX - 5: gX + gW + 5]\n group = cv2.threshold(\n group, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n\n # find char contours in the group, then sort from left to right\n charCnts = cv2.findContours(\n group.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n charCnts = imutils.grab_contours(charCnts)\n charCnts = contours.sort_contours(charCnts, method=\"left-to-right\")[0]\n\n # find chars and symbols in the group\n (rois, locs) = extract_digits_and_symbols(group, charCnts)\n\n # loop over ROIS from group\n for roi in rois:\n # init list of template matching scores and resize ROI to fixed size\n scores = []\n roi = cv2.resize(roi, (36, 36))\n\n # loop over ref char name and corresponding ROi\n for charName in charNames:\n # apply correlation-based template matching, take score, update scores list\n result = cv2.matchTemplate(roi, chars[charName], cv2.TM_CCOEFF)\n (_, score, _, _) = cv2.minMaxLoc(result)\n scores.append(score)\n\n # the classification for char ROI will be ref char name w/largest template matching score\n groupOutput.append(charNames[np.argmax(scores)])\n\n # add group output to overall check OCR output\n output.append(\"\".join(groupOutput))\n\n # display output check OCR info to screen\n print(\"Check OCR: {}\".format(\" \".join(output)))\n\n if pair.field_type == FieldType.FIELD_TYPE_ROUTING:\n print('routing ' + output[0].translate({ord(c): None for c in 'TUAD'}))\n pair.extracted_data = output[0].translate({ord(c): None for c in 'TUAD'})\n elif pair.field_type == FieldType.FIELD_TYPE_ACCOUNT:\n print('account ' + output[1].translate({ord(c): None for c in 'TUAD'}))\n pair.extracted_data = output[1].translate({ord(c): None for c in 'TUAD'})\n return pair\n\n\n\"\"\"\nThis function extracts each digit and symbol from the given image. If it is successful, it returns a tuple containing a\nlist of the roi (regions of interest, regions containing the chars to extract) and a list of locs (the actual locations\nof those rois)\n\n@param image: image to extract the data from - cropped version of full image, containing only an image of group of chars \n@param charCnts: list of character contours (what is used to determine each characters' location and identity)\n@param minW: minimum width of a char for it to count as a character\n@param minH: minimum height of a char for it to count as a character\n\n@return tuple containing a list of rois and a list of locs\n\"\"\"\n\n\ndef extract_digits_and_symbols(image, charCnts, minW=5, minH=15):\n # get Python iterator for character contours, and init ROI and location lists\n charIter = charCnts.__iter__()\n rois = []\n locs = []\n\n # loop over char contours until end of list\n while True:\n try:\n # get next char contour, compute bounding box, init ROI\n c = next(charIter)\n (cX, cY, cW, cH) = cv2.boundingRect(c)\n roi = None\n\n # check width/height if large enough, meaning we found a digit\n if cW >= minW and cH >= minH:\n # extract ROI\n roi = image[cY:cY + cH, cX: cX + cW]\n rois.append(roi)\n locs.append((cX, cY, cX + cW, cY + cH))\n else: # otherwise it is a special symbol\n # MICR special symbols include 3 parts, so\n # need to get next 2 from iterator, then\n # init bounding box coordinates for symbol\n parts = [c, next(charIter), next(charIter)]\n # init to positive and negative infinities\n (sXA, sYA, sXB, sYB) = (np.inf, np.inf, -np.inf, -np.inf)\n\n # loop over parts\n for p in parts:\n # calc bounding box for each part, update bookkeeping variables\n (pX, pY, pW, pH) = cv2.boundingRect(p)\n sXA = min(sXA, pX)\n sYA = min(sYA, pY)\n sXB = max(sXB, pX + pW)\n sYB = max(sYB, pY + pH)\n\n # extract ROI\n roi = image[sYA:sYB, sXA:sXB]\n rois.append(roi)\n locs.append((sXA, sYA, sXB, sYB))\n except StopIteration: # reached end of iterator, break from loop\n break\n\n # return tuple of ROIS and locations\n return rois, locs\n","sub_path":"src/main/backend/data_extraction/extract_methods.py","file_name":"extract_methods.py","file_ext":"py","file_size_in_byte":10806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"74326145","text":"from WindPy import *\nimport sys\n\nfrom WindPy import w as WindPyGateway\nfrom datetime import datetime\nfrom CommonUtilityFunc import *\n\n\ndef getFirstTradeDayOfMonth(year_n, month_n):\n date_str = '%d-%d-%d'%(year_n, month_n, 1)\n# date_str_next = '%d-%d-%d'%(year_n, month_n, 2)\n if w.tdayscount(date_str, date_str).Data[0][0]==1:\n return date_str \n tradeDay = WindPyGateway.tdaysoffset(1, date_str, \"\")\n return tradeDay.Data[0][0].strftime('%Y-%m-%d')\n\ndef getNearestTradeDay(date):\n date_str = '%d-%d-%d'%(date.year, date.month, date.day)\n tradeDay = WindPyGateway.tdaysoffset(0, date_str, \"\")\n return tradeDay.Data[0][0]\n\ndef getNextTradeDayStr(date_str):\n date = datetime.strptime(date_str,'%Y-%m-%d')\n date_str = '%d-%d-%d'%(date.year, date.month, date.day)\n tradeDay = WindPyGateway.tdaysoffset(1, date_str, \"\")\n return tradeDay.Data[0][0].strftime('%Y-%m-%d')\n \ndef getLastTradeDay(date_str):\n tradeDay = WindPyGateway.tdaysoffset(-1, date_str, \"\")\n return tradeDay.Data[0][0].strftime('%Y-%m-%d')\n \nif __name__=='__main__':\n WindPyGateway.start()\n date_str = '2017-2-19'\n date = datetime.strptime(date_str,'%Y-%m-%d')\n print(getNearestTradeDay(date))\n \n print(getLastTwoTradeDaysOfStock('SH600004', '2017-2-18'))\n print(getNextTradeDayStr('2017-2-18'))\n# print(data, str(data[2], encoding='gbk'))\n print(getLastTradeDayOfStock('IC1703', '2017-2-18'))\n# print(getFirstTradeDayOfMonth(2017, 1))","sub_path":"src/bakup_file/TradeDateCompute.py","file_name":"TradeDateCompute.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"438856764","text":"\"\"\"inition migrate\n\nRevision ID: c06e10d89e95\nRevises: a958748b0a72\nCreate Date: 2018-08-23 14:39:24.230841\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c06e10d89e95'\ndown_revision = 'a958748b0a72'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('role', schema=None) as batch_op:\n batch_op.add_column(sa.Column('role_name', sa.String(), nullable=True))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('role', schema=None) as batch_op:\n batch_op.drop_column('role_name')\n\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/c06e10d89e95_inition_migrate.py","file_name":"c06e10d89e95_inition_migrate.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"500511495","text":"#!/usr/bin/env python\n# -*- encoding:utf-8 -*-\nimport json\nfrom web import models\nfrom django.conf import settings\nfrom web.views import Paging\nfrom django.views import View\nfrom bs4 import BeautifulSoup\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect, HttpResponse\n\n\ndef article_list(request):\n return render(request, 'manager/article_list.html')\n\n\ndef article_list_table(request):\n if request.method == 'GET':\n row = int(request.GET.get('rows'))\n page = int(request.GET.get('page'))\n title = request.GET.get('title', None)\n print(row,page)\n if title:\n article_obj = models.Article.objects.filter(\n title__contains=title).order_by('-id')\n total_count = article_obj.count()\n else:\n article_all = models.Article.objects.all()\n article_obj = article_all.order_by('-id')[(row * page) -\n row:row * page]\n total_count = article_all.count()\n rows = list()\n for obj in article_obj:\n tag = str()\n result = {\n 'id': obj.id,\n 'title': obj.title,\n 'author': obj.user.username,\n 'category': obj.category.title,\n 'is_recommend': obj.is_recommend,\n 'date_publish': obj.date_publish.strftime('%Y-%m-%d %H:%M:%S')\n }\n for i in obj.tag.all():\n tag += i.title + '、'\n result.update({'tag': tag.rstrip('、')})\n rows.append(result)\n return JsonResponse({'total': total_count, 'rows': rows}, safe=False)\n\n\ndef is_recommend(request):\n if request.method == 'POST':\n article_id = request.POST.get('id')\n recommend = request.POST.get('recommend')\n is_data = True if recommend.lower() == 'true' else False\n models.Article.objects.filter(id=article_id).update(\n is_recommend=is_data)\n return JsonResponse({'status': 'success', 'msg': '数据变更成功!'})\n\n\nclass Article_edit(View):\n def get(self, request):\n article_id = request.GET.get('article')\n Category = models.Category.objects.all()\n tag_obj = models.Tag.objects.all()\n article_obj = models.Article.objects.filter(id=article_id).first()\n return render(request, 'manager/article_edit.html', locals())\n\n def post(self, request):\n article_id = request.POST.get('id')\n content = request.POST.get(\"content\")\n title = request.POST.get(\"title\")\n soup = BeautifulSoup(content, \"html.parser\")\n for i in soup.find_all(\"script\"):\n i.decompose()\n\n with transaction.atomic():\n category = models.Category.objects.filter(\n title=request.POST.get(\"category\")).first()\n tag = json.loads(request.POST.get(\"tag\"))\n tag_list = []\n for i in tag:\n c = models.Tag.objects.filter(title=i).first().id\n if not c:\n c = models.Tag.objects.create(title=i).id\n tag_list.append(c)\n models.Article.objects.filter(id=article_id).update(\n title=title,\n desc=soup.text[0:150],\n category=category,\n content=content,\n )\n report_obj = models.Article.objects.filter(id=article_id).first()\n report_obj.tag.set(tag_list)\n return JsonResponse(\"ok\", safe=False)\n\n\ndef article_del(request):\n if request.method == \"POST\":\n article_id = json.loads(request.POST.get('id'))\n models.Article.objects.filter(id__in=article_id).delete()\n return JsonResponse({\n 'status': 'success',\n 'msg': '删除成功!'\n })\n\n\ndef link_list(request):\n if request.method == 'GET':\n return render(request, 'manager/link_list.html')\n\n\ndef link_table(request):\n if request.method == 'GET':\n link_all = models.Links.objects.all()\n result = list()\n for obj in link_all:\n rdict = {\n 'id': obj.id,\n 'title': obj.title,\n 'description': obj.description,\n 'callback_url': obj.callback_url,\n 'date_publish': obj.date_publish.strftime('%Y-%m-%d %H:%M:%S')\n }\n result.append(rdict)\n return JsonResponse({\n 'total': link_all.count(),\n 'rows': result\n }, safe=False)\n\n\ndef link_new(request):\n if request.method == 'POST':\n title = request.POST.get('title')\n url = request.POST.get('url')\n desc = request.POST.get('desc')\n models.Links.objects.create(\n title=title, description=desc, callback_url=url)\n return JsonResponse({\n 'status': 'success',\n 'msg': '创建成功!',\n })\n\n\ndef link_del(request):\n if request.method == 'POST':\n link_id = json.loads(request.POST.get('id'))\n links = models.Links.objects.filter(id__in=link_id).delete()\n return JsonResponse({\n 'status': 'success',\n 'msg': '删除成功!',\n })\n\nclass Link_edit(View):\n def get(self, request):\n link_id = request.GET.get('link')\n link_obj = models.Links.objects.filter(id=link_id).first()\n res = {\n 'id': link_obj.id,\n 'title': link_obj.title,\n 'url': link_obj.callback_url,\n 'desc': link_obj.description\n }\n return JsonResponse(res, safe=False)\n\n def post(self, request):\n id = request.GET.get('link')\n title = request.POST.get('title')\n url = request.POST.get('url')\n desc = request.POST.get('desc')\n models.Links.objects.filter(id=id).update(\n title=title, callback_url=url, description=desc)\n return redirect('/manager/link_list/')\n\n\ndef menu_list(request):\n if request.method == \"GET\":\n return render(request, 'manager/menu_list.html')\n\n\ndef menu_table(request):\n if request.method == 'GET':\n menu_all = models.Menu.objects.all().order_by('-index')\n menu_obj = menu_all.values(\n 'id',\n 'title',\n 'icon',\n 'index',\n )\n return JsonResponse({\n 'total': menu_all.count(),\n 'rows': list(menu_obj)\n }, safe=False)\n\n\ndef api_list(request):\n if request.method == \"GET\":\n return render(request, 'manager/api_list.html')\n\n\ndef api_table(request):\n if request.method == 'GET':\n api_all = models.Menu_List.objects.all().order_by('-index')\n api_obj = api_all.values(\n 'id',\n 'title',\n 'url',\n 'menu__title',\n )\n return JsonResponse({\n 'total': api_all.count(),\n 'rows': list(api_obj)\n }, safe=False)\n\n\ndef menu_new(request):\n return render(request, 'manager/menu_new.html', locals())\n\n\ndef profile(request):\n return render(request, 'manager/profile.html', locals())\n\n\ndef gallery_table(request):\n if request.method == 'GET':\n gallery_all = models.Gallery.objects.all().order_by('-id')\n gallery_obj = gallery_all.values(\n 'id',\n 'title',\n 'desc',\n )\n return JsonResponse({\n 'total': gallery_all.count(),\n 'rows': list(gallery_obj)\n }, safe=False)\n\nclass Gallery(View):\n def get(self, request):\n return render(request, 'manager/gallery_list.html')\n\n def post(self, request):\n result = dict()\n try:\n id = request.POST.get('id')\n title = request.POST.get('title')\n desc = request.POST.get('desc')\n if title:\n models.Gallery.objects.filter(id=id).update(title=title)\n elif desc:\n models.Gallery.objects.filter(id=id).update(desc=desc)\n result.update({'status': 'success', 'msg': '数据变更成功!'})\n except:\n result.update({'status': 'error', 'msg': '数据变更失败!'})\n return JsonResponse(result)\n\n\nclass Image_new(View):\n def get(self, request):\n obj = models.Gallery.objects.all()\n return render(request, 'manager/image_new.html', {'obj': obj})\n\n def post(self, request):\n title = request.POST.get('title')\n filedata = request.FILES.getlist('filedata')\n try:\n if title:\n gallery_obj = models.Gallery.objects.filter(\n title=title).first()\n for item in filedata:\n models.Image.objects.create(\n parent=gallery_obj,\n image=item,\n )\n except:\n result = {'status': 'error', 'msg': \"上传失败\"}\n else:\n result = {'status': 'success', 'msg': \"上传成功\"}\n return JsonResponse(result)\n\n\nclass Gallery_new(View):\n \"\"\"\n 添加相册\n \"\"\"\n\n def get(self, request):\n pass\n\n def post(self, request):\n title = request.POST.get('title')\n desc = request.POST.get('desc')\n obj = models.Gallery.objects.filter(title=title).exists()\n result = dict()\n if obj:\n result.update({'status': 'success', 'msg': '相册已存在!'})\n else:\n models.Gallery.objects.create(title=title, desc=desc)\n result.update({'status': 'success', 'msg': '相册创建成功!'})\n return JsonResponse(result)\n\n\nclass Comment(View):\n def get(self, request):\n return render(request, 'manager/comment_list.html')\n\n\ndef comment_table(request):\n if request.method == 'GET':\n comment_all = models.Comment.objects.all().order_by('-id')\n result = list()\n for obj in comment_all:\n rdict = {\n 'id': obj.id,\n 'username': obj.username,\n 'email': obj.email,\n 'article__title': obj.article.title,\n 'content': obj.content\n }\n result.append(rdict)\n return JsonResponse({\n 'total': comment_all.count(),\n 'rows': result\n }, safe=False)\n\n\ndef user_list(request):\n if request.method == 'GET':\n return render(request, 'manager/user_list.html')\n\n\ndef user_table(request):\n if request.method == 'GET':\n user_all = models.Userinfo.objects.all().order_by('-id')\n user_obj = user_all.values(\n 'id',\n 'username',\n 'desc',\n 'qq',\n 'email'\n )\n return JsonResponse({\n 'total': user_all.count(),\n 'rows': list(user_obj)\n }, safe=False)\n\n\ndef Category(request):\n return render(request, 'manager/category.html')\n\n\nclass Category_list(View):\n def get(self, request):\n rows = int(request.GET.get('rows'))\n page = int(request.GET.get('page'))\n title = request.GET.get('title', None)\n if title:\n obj = models.Category.objects.filter(title__contains=title)\n rows = obj.order_by('-id')[(rows * page) -\n rows:rows * page].values(\n 'id', 'title', 'pid__title')\n else:\n obj = models.Category.objects.all()\n rows = obj.order_by('-id')[(rows * page) -\n rows:rows * page].values(\n 'id', 'title', 'pid__title')\n return JsonResponse({\n 'total': obj.count(),\n 'rows': list(rows)\n },\n safe=False)\n\n def post(self, request):\n pass\n\n\ndef category_edit(request):\n if request.method == 'POST':\n ids = request.POST.get('id', None)\n value = request.POST.get('value', None)\n field = request.POST.get('field', None)\n result = dict()\n if field == 'pid__title':\n obj = models.Category.objects.filter(title=value)\n if obj.exists():\n if obj.first().pid:\n result.update({'status': 'error', 'msg': '所选父类不能拥有父类!'})\n else:\n models.Category.objects.filter(id=ids).update(\n pid=obj.first())\n result.update({'status': 'success', 'msg': '编辑成功!'})\n else:\n result.update({'status': 'error', 'msg': '没有指定的父分类!'})\n elif field == 'title':\n models.Category.objects.filter(id=ids).update(title=value)\n result.update({'status': 'success', 'msg': '编辑成功!'})\n print(result)\n return JsonResponse(result)\n\n\ndef category_new(request):\n if request.method == \"POST\":\n title = request.POST.get('title')\n pid_title = request.POST.get('pid_title')\n result = dict()\n obj = models.Category.objects.filter(title=title)\n if pid_title:\n ptitle = models.Category.objects.filter(title=pid_title)\n if ptitle.exists and not ptitle.first().pid:\n if obj.exists():\n result.update({'status': 'error', 'msg': '分类已存在!'})\n else:\n models.Category.objects.create(\n title=title, pid=ptitle.first())\n result.update({'status': 'success', 'msg': '新增成功!'})\n else:\n result.update({'status': 'error', 'msg': '指定父分类不存在或着不是一级菜单!'})\n else:\n if obj.exists():\n result.update({'status': 'error', 'msg': '分类已存在!'})\n else:\n models.Category.objects.create(title=title)\n result.update({'status': 'success', 'msg': '新增成功!'})\n return JsonResponse(result)\n\n\ndef category_del(request):\n if request.method == 'POST':\n ids = json.loads(request.POST.get('id'))\n title = json.loads(request.POST.get('title'))\n result = dict()\n if ids:\n obj = models.Category.objects.filter(Q(id__in=ids) and Q(title__in=title))\n for i in obj:\n if i.article_set.all().exists():\n result.update({\n 'status': 'error',\n 'msg': '数据删除失败,所选分类存在文章!'\n })\n break\n elif i.category_set.all().exists():\n result.update({\n 'status': 'error',\n 'msg': '数据删除失败,所选分类存在子分类!'\n })\n break\n else:\n i.delete()\n result.update({'status': 'success', 'msg': '数据删除成功!'})\n else:\n result.update({'status': 'error', 'msg': '没有找到指定的数据!'})\n return JsonResponse(result)\n\n\ndef tag_list(request):\n if request.method == 'GET':\n return render(request, 'manager/tag_list.html')\n\n\ndef tag_new(request):\n if request.method == 'POST':\n title = request.POST.get('title')\n obj = models.Tag.objects.filter(title=title).exists()\n result = dict()\n if obj:\n result.update({'status': 'error', 'msg': '标签已存在!'})\n else:\n models.Tag.objects.create(title=title)\n result.update({'status': 'success', 'msg': '标签创建成功!'})\n return JsonResponse(result)\n\n\ndef tag_del(request):\n if request.method == 'POST':\n id_list = json.loads(request.POST.get('id'))\n title_list = json.loads(request.POST.get('title'))\n obj = models.Tag.objects.filter(Q(id__in=tag_list) and Q(title__in=title_list))\n result = dict()\n if obj.exists():\n obj.delete()\n result.update({'status': 'success', 'msg': '标签删除成功!'})\n else:\n result.update({'status': 'error', 'msg': '没有指定的数据!'})\n return JsonResponse(result)\n\n\ndef tag_table(request):\n if request.method == 'GET':\n row = int(request.GET.get('rows'))\n page = int(request.GET.get('page'))\n title = request.GET.get('title', None)\n if title:\n tag_obj = models.Tag.objects.filter(title__contains=title).order_by('-id').values('id', 'title')\n total_count = tag_obj.count()\n else:\n tag_all = models.Tag.objects.all()\n tag_obj = tag_all.order_by('-id')[(row * page) - row:row * page].values('id', 'title')\n total_count = tag_all.count()\n return JsonResponse({'total': total_count, 'rows': list(tag_obj)}, safe=False)\n","sub_path":"web/views/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":16916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"553551041","text":"from Share import List_Dict_Basis_Alg, List_Sheffer_Sets\n\n\ndef basis_construct(basis_len: int, full_set: list):\n \"\"\"\n Generator for construct basis by extending basis with len - 1\n\n :param basis_len: len of basis for construct\n :param full_set: list of operations from which basis construct\n :rtype: list\n :return: basis as sorted list of tuples multi operations or -1\n\n \"\"\"\n List_Dict_Basis_Alg.append({})\n\n count_extended_basis = 0\n\n for basis, alg in List_Dict_Basis_Alg[basis_len - 1].items():\n\n remaining_operations = set(full_set) - set(alg)\n\n if remaining_operations:\n\n remaining_operations = list(remaining_operations)\n\n remaining_operations.sort()\n\n for remaining_operation in remaining_operations:\n\n is_sheffer_basis = False\n\n if basis_len == 2:\n\n new_basis = sorted(list((basis,) + (remaining_operation,)))\n\n else:\n\n new_basis = sorted(list(basis + (remaining_operation,)))\n\n if len(List_Sheffer_Sets) > 0 and any(map(set(new_basis).issuperset, List_Sheffer_Sets)):\n is_sheffer_basis = True\n\n if (is_sheffer_basis is False) and (tuple(new_basis) not in List_Dict_Basis_Alg[basis_len]):\n count_extended_basis += 1\n\n yield new_basis\n else:\n\n continue\n\n if count_extended_basis == 0:\n yield \"NO BASIS FOR EXTEND\"\n","sub_path":"Basis_Construct.py","file_name":"Basis_Construct.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"271119924","text":"# Kornpob Bhirombhakdi 20200331\n\nimport numpy as np\n\ndef make_SIP(coef,x,y,startx=True):\n \"\"\"\n Simple imaging polynomial (SIP) is a conventional method to describe non-linear variation in an image. Ref: https://fits.gsfc.nasa.gov/registry/sip/shupeADASS.pdf.\n ##########\n Assume a SIP model of order 2, i.e., Z = a0 + a1*X + a2*X**2.\n Typically, X is relative to SIP reference system whose origin is corresponding to (xref,yref) in the original image. Therefore, X = x - xref where (x,y) is an image pixel.\n Z is a quantity of interests. In aXe grism reduction, Z can be Y (as y = Y + yref) for trace, or wavelength.\n SIP coefficients are 2D with a given polynomial order. Assume the order is 3. Therefore, ai = ai0 + ai1*X' + ai2*Y' + ai3*X'**2 + ai4*X'*Y' + ai5*Y'**2 + ... + ai9*Y'**3.\n Note X is the leading term (this is specified by startx=True in make_SIP). Set startx=False otherwise.\n Note that X and X' might be different. For aXe reduction, (X',Y') = (xd,yd) as the source location from direct image.\n \"\"\"\n if startx:\n xref,yref = x,y\n else:\n xref,yref = y,x\n n = len(coef)\n d = []\n px,py = 0,0\n a = [(px,py)]\n b = [(xref,yref)]\n p = 0\n q = True\n while(q):\n if px==0:\n p+=1\n px=p\n py=0\n else:\n px-=1\n py+=1\n a.append((px,py))\n b.append((xref,yref))\n if len(a)>=len(coef):\n q = False\n a,b = np.array(a),np.array(b)\n c = b**a\n c = np.sum(c[:,0]*c[:,1]*coef)\n d.append(c)\n d = np.array(d)\n return d \n","sub_path":"axehelper/make_sip.py","file_name":"make_sip.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}