diff --git "a/4803.jsonl" "b/4803.jsonl" new file mode 100644--- /dev/null +++ "b/4803.jsonl" @@ -0,0 +1,604 @@ +{"seq_id":"9608258","text":"def get_data(file):\n with open(file,\"r\") as file:\n data = []\n while True:\n line = file.readline().replace(\"\\n\",\"\")\n if not line:\n break\n data.append(line.split(\",\"))\n return data\n\ndef sum_vectors(file):\n vectors = get_data(file)\n vector_sum = []\n positions = []\n for i in range(len(vectors[0])):\n sum = 0\n for j in range(len(vectors)):\n sum += int(vectors[j][i])\n if sum != 0:\n vector_sum.append(sum)\n positions.append(i)\n vector_sum.append(len(vectors[1]))\n positions.append(\"len\")\n return positions,vector_sum\n\ndef sparce_dict(file):\n positions,vector_sum = sum_vectors(file)\n return dict(zip(positions,vector_sum))\n\n\npath = \"/home/pedro/Documentos/Python/Exames/Exame Normal 2015/Ex_5/data.txt\"\nprint(sparce_dict(path))\n","sub_path":"Others/Python/Exames/Exame Normal 2015/Ex_5/Ex_5.py","file_name":"Ex_5.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"77001424","text":"import praw\nfrom prawcore import NotFound\nimport csv\nimport datetime as dt\n\nc_id = \"###\"\nc_secret = \"###\"\nu_a = \"###\"\nusrnm = \"###\"\npasswd = \"###\"\n\ndate = dt.datetime.now().strftime(\"%m-%d-%Y\")\n\ncategories = [\"Hot\",\"New\",\"Controversial\",\"Top\",\"Rising\",\"Search\"]\n\nmax_comments = 5\n\nline_mul = 52\npath = \"./output/\"\n\ndef existence(reddit,sub_list):\n found = []\n not_found = []\n\n for sub in sub_list:\n try:\n reddit.subreddits.search_by_name(sub, exact = True)\n found.append(sub)\n except NotFound:\n not_found.append(sub)\n\n return found,not_found\n\ndef title():\n print(\"=\"*line_mul)\n print(\"\\n\\tReddit Scraper\\n\")\n print(\"=\"*line_mul)\n\ndef confirm_subs(reddit,sub_list):\n print(\"\\nChecking if subreddit(s) exist...\\n\")\n found,not_found = existence(reddit,sub_list)\n if found:\n print(\"\\nThe following subreddits were found:\")\n print(\"-\"*line_mul)\n print(*not_found, sep = \"\\n\")\n print(\"-\"*line_mul)\n \n if not_found:\n print(\"\\nThe following subreddits were not found and will be skipped:\")\n print(\"-\"*line_mul)\n print(*not_found, sep = \"\\n\")\n print(\"-\"*line_mul)\n \n subs = [sub for sub in found]\n return subs\n\ndef connect():\n try:\n reddit = praw.Reddit(client_id = c_id ,\n client_secret = c_secret ,\n user_agent = u_a ,\n username = usrnm ,\n password = passwd)\n return reddit\n \n except praw.exceptions.APIException as e:\n print(\"\\nThere was a server-side error.\")\n print(e.error_type)\n print(e.message)\n print(e.field)\n\n except praw.exceptions.ClientException:\n print(\"\\nThere was a client-side error.\")\n \ndef getSubreddits(reddit):\n while True:\n try:\n search_for = str(input(\" Enter a list of subreddits to search separated by a space or a .txt file:\\n\"))\n if(\".txt\" in search_for.strip()):\n f = open(search_for,\"r\")\n search_for = \" \".join([ i.strip() for i in f.readlines()])\n f.close()\n if not search_for :\n raise ValueError\n sub = [ s.strip() for s in search_for.split(\" \") ]\n sub_list = [subreddit for subreddit in search_for.split(\" \")]\n found,not_found = existence(reddit,sub_list)\n if found:\n print(\"\\nThe following subreddits were found and will be scraped:\")\n print(\"-\"*line_mul)\n print(*found, sep = \"\\n\")\n if not_found:\n print(\"\\nThe following subreddits were not found and will be skipped:\")\n print(\"-\"*line_mul)\n print(*not_found, sep = \"\\n\")\n confirm = str(input(\"\\nConfirm selection? [Y/N] \")).strip()\n if confirm.lower():\n return found\n else:\n raise ValueError\n except Exception:\n print(\"Wrong input.\")\n\ndef createDict(subs):\n return dict((sub,[]) for sub in subs)\n\ndef getSettings(subs,master):\n while True:\n try:\n search_for = str(input(\"\\nWhat to search for or a .txt file:\\n\")).strip()\n if not search_for:\n raise ValueError\n else:\n if ( \".txt\" in search_for.strip()):\n f = open(search_for,\"r\")\n search_for = \" \".join([ i.strip()\n for i in f.readlines()])\n f.close()\n for sub in subs:\n for sub_n,values in master.items():\n if sub_n == sub:\n for search in search_for.split(\" \"):\n settings = [5,search]\n master[sub].append(settings)\n break\n except Exception:\n print(\"Wrong input! Try again.\")\n\ndef printSettings(master):\n print(\"\\n------------------Current settings for each subreddit-------------------\")\n print(\"\\n{:<25}{:<17}{:<30}\".format(\"Subreddit\",\"Category\",\"Number of results / Keyword(s)\"))\n print(\"-\"*line_mul)\n for sub,settings in master.items():\n for each in settings:\n cat_i = each[0]\n specific = each[1]\n print(\"\\n{:<25}{:<17}{:<30}\".format(sub,categories[cat_i],specific))\n confirm = input(\"\\nConfirm options? [Y/N] \").strip()\n if confirm.lower() == \"y\":\n return True\n else:\n return False\n\ndef getPosts(reddit,sub,cat_i,search_for):\n print(\"\\nGetting posts for r/%s...\" % sub)\n subreddit = reddit.subreddit(sub)\n return subreddit.search(\"%s\" % search_for)\n\ndef getTopComments(post):\n return \" , \".join([ comment.body for comment in post.comments[:max_comments]])\n\ndef sortPosts(collected):\n print(\"Sorting posts...\")\n overview = {\"Title\" : [] , \"Text\" : [] , \"Comments\" : []}\n\n for post in collected:\n overview[\"Title\"].append(post.title)\n overview[\"Text\"].append(post.selftext)\n overview[\"Comments\"].append(getTopComments(post))\n\n return overview\n\ndef writeCSV(sub,overview,x):\n fname = str((\"%s-%s-%s-%s.csv\") % ( sub , \"Search\" , str(x) , date ))\n results = open(path+fname,\"w\")\n writer = csv.writer(results, delimiter = \",\")\n writer.writerow(overview.keys())\n writer.writerows(zip(*overview.values()))\n print(\"CSV file %s for r/%s created.\" % (fname , sub))\n results.close()\n \ndef get_sort_write(reddit,master):\n for sub,settings in master.items():\n x = 1\n for each in settings:\n cat_i = each[0]\n search_for = each[1]\n collected = getPosts(reddit,sub,cat_i,search_for)\n overview = sortPosts(collected)\n writeCSV(sub,overview,x)\n x=x+1\ndef main():\n title()\n reddit = connect()\n if reddit:\n subs = getSubreddits(reddit)\n master = createDict(subs)\n getSettings(subs,master)\n if(printSettings(master)):\n get_sort_write(reddit,master)\n else:\n print(\"\\nClosing\\n\")\n else:\n print(\"\\ncould not connect\\n\")\n\nmain()\n","sub_path":"simplescraper.py","file_name":"simplescraper.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"230718686","text":"# Done\r\nimport cv2\r\nimport random\r\nimport numpy as np\r\nimport time\r\n\r\nstart = time.time()\r\nimg = cv2.imread('./data/cars.jpg', cv2.IMREAD_GRAYSCALE)\r\nimg = cv2.resize(img, (800, 400))\r\nimgFloat = np.float32(img)\r\nfourier = cv2.dft(imgFloat, flags=cv2.DFT_COMPLEX_OUTPUT)\r\nfourierShifted = np.fft.fftshift(fourier)\r\nend = time.time()\r\n\r\nprint(\"Ellapsed in \" + str(end - start) + \"ms\")\r\n\r\nstandartDiv = random.normalvariate(1.6555, 0.12444)\r\nmagnitude = standartDiv * \\\r\n cv2.log(cv2.magnitude(fourierShifted[:, :, 0], fourierShifted[:, :, 1]))\r\nfourier = cv2.idft(fourierShifted)\r\n\r\ncv2.imshow(\"Original\", img)\r\n# Squaring magnitude to increase contrast\r\ncv2.imshow(\"Magnitude\", np.int8(magnitude) ** 2)\r\ncv2.imshow(\"Fourier\", np.int8(fourier[:, :, 0]) ** 2)\r\n\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()\r\n","sub_path":"task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"617449854","text":"'''\n@author:lvming\n@time:2021/6/29\n'''\n# 运用到pom思想 页面元素分离出来\n# 页面元素经常变化 页面元素可提取出来\nfrom time import sleep\nfrom selenium.webdriver.common.by import By\nfrom class12.app09over.common.design import appium_yaml_conf\nfrom class12.app09over.BaseView.base_view import BaseView\n\nclass Common(BaseView):\n\n # 同意的元素\n agree1=(By.XPATH,'//*[@text=\"同意并开启以上权限\"]')\n into=(By.XPATH,'//*[@text=\"进入地图\"]')\n allow1 = (By.XPATH, '//*[@text=\"允许\"]')\n allow2=(By.XPATH,'//*[@text=\"始终允许\"]')\n allow3=(By.XPATH,'//*[@text=\"允许\"]')\n\n def agree(self):\n try:\n self.on_click(self.agree1)\n self.on_click(self.into)\n self.on_click(self.allow1)\n self.on_click(self.allow2)\n self.on_click(self.allow3)\n except Exception as e:\n print('没有同意页面')\n # 获得窗口大小\n def get_size(self):\n x=self.window_size()['x']\n y=self.window_size()['y']\n return x,y\n # 滑动\n def swipe_left(self):\n le=self.get_size()\n x1 = le[0]*0.9\n y1 = le[1]*0.5\n x2 = le[2]*0.2\n self.swipe(x1,y1,x2,y1,1000)\n\nif __name__ == '__main__':\n # 先启动程序\n driver = appium_yaml_conf()\n # 点击跳过\n com = Common(driver)\n com.agree()\n\n# 登录的操作\n# 登录页面 支付页面 下单页面 购物页面\n# pom\n","sub_path":"class12/app09over/common/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"79667958","text":"class Solution:\n def largestTriangleArea(self, points: List[List[int]]) -> float:\n maxArea = 0\n for i in range(len(points)):\n for j in range(i, len(points)):\n for k in range(j, len(points)):\n curArea = self.Shoelace(points, i, j, k)\n if curArea > maxArea:\n maxArea = curArea\n return maxArea\n \n def Shoelace(self, points, i, j, k):\n curArea = 0.5* abs((points[i][0]*points[j][1]\n +points[j][0]*points[k][1]\n +points[k][0]*points[i][1]) -\n (points[i][1]*points[j][0]\n +points[j][1]*points[k][0]\n +points[k][1]*points[i][0]))\n print(curArea)\n return curArea","sub_path":"LeetCode/0812.py","file_name":"0812.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"433978242","text":"from biocrnpyler import *\n\nkb, ku, ktx, ktl, kdeg, kdil = 100, 10, 3, 2, 1, .1\nparameters = {\"kb\":kb, \"ku\":ku, \"ktx\":ktx, \"ktl\":ktl, \"kdeg\": kdeg, \"kdil\":kdil}\n\n#Creates a global mechanism that acts on all species generated except for\n# those with the type or attribute \"genome\"\ndilution_mechanism = Dilution(filter_dict = {\"genome\":False})\n\n#Add this mechanism to a dictionary which is passed into the Mixture txtl.BasicExtract\nglobal_mechanisms = {\"dilution\":dilution_mechanism}\nmyMixture = BasicExtract(name = \"txtl\", parameters = parameters, global_mechanisms = global_mechanisms)\n\n#Creates a dna assembly. This assembly is type \"dna\" so it will be degraded\nA_dna = DNAassembly(name = \"G1\", promoter = \"pBest\",\n rbs = \"BCD2\")\n\n#Create another dna assembly but set its internal specie's type to \"genome\" so it will not be degraded\n#Note: this only protects the dna_G2 species encoded by this assembly. Protein and mRNA products will\n#still be degraded by dilution. This could be overcome by creating custom transcript and protein species\n#with some attribute that is passed into the filter_dict.\nA_genome = DNAassembly(name = \"G2\", promoter = \"pBest\",\n rbs = \"BCD2\")\nA_genome.dna.type = \"genome\" #Note: that the code A_genome.dna.attributes.append(\"genome\") also works here.\n\nmyMixture.add_components(A_dna)\nmyMixture.add_components(A_genome)\nmyCRN = myMixture.compile_crn()\nprint(repr(myCRN))","sub_path":"Tests/dilution_with_global_mechanisms.py","file_name":"dilution_with_global_mechanisms.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"325213953","text":"\"\"\"\nDocker compose support\n======================\n\nAllows to spin up services configured via :code:`docker-compose.yml`.\n\"\"\"\n\nimport subprocess\n\nimport blindspin\nimport requests\n\nfrom testcontainers.core.waiting_utils import wait_container_is_ready\nfrom testcontainers.core.exceptions import NoSuchPortExposed\n\n\nclass DockerCompose(object):\n \"\"\"\n Docker compose containers.\n\n Example\n -------\n ::\n\n with DockerCompose(\"/home/project\", pull=True) as compose:\n host = compose.get_service_host(\"hub\", 4444)\n port = compose.get_service_port(\"hub\", 4444)\n driver = webdriver.Remote(\n command_executor=(\"http://{}:{}/wd/hub\".format(host,port)),\n desired_capabilities=CHROME,\n )\n driver.get(\"http://automation-remarks.com\")\n\n\n .. code-block:: yaml\n\n hub:\n image: selenium/hub\n ports:\n - \"4444:4444\"\n firefox:\n image: selenium/node-firefox\n links:\n - hub\n expose:\n - \"5555\"\n chrome:\n image: selenium/node-chrome\n links:\n - hub\n expose:\n - \"5555\"\n \"\"\"\n def __init__(\n self,\n filepath,\n compose_file_name=\"docker-compose.yml\",\n pull=False):\n self.filepath = filepath\n self.compose_file_name = compose_file_name\n self.pull = pull\n\n def __enter__(self):\n self.start()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.stop()\n\n def start(self):\n with blindspin.spinner():\n if self.pull:\n subprocess.call([\"docker-compose\", \"-f\", self.compose_file_name, \"pull\"],\n cwd=self.filepath)\n subprocess.call([\"docker-compose\", \"-f\", self.compose_file_name, \"up\", \"-d\"],\n cwd=self.filepath)\n\n def stop(self):\n with blindspin.spinner():\n subprocess.call([\"docker-compose\", \"-f\", self.compose_file_name, \"down\", \"-v\"],\n cwd=self.filepath)\n\n def get_service_port(self, service_name, port):\n return self._get_service_info(service_name, port)[1]\n\n def get_service_host(self, service_name, port):\n return self._get_service_info(service_name, port)[0]\n\n def _get_service_info(self, service, port):\n cmd_as_list = [\"docker-compose\", \"-f\", self.compose_file_name, \"port\", service, str(port)]\n output = subprocess.check_output(cmd_as_list,\n cwd=self.filepath).decode(\"utf-8\")\n result = str(output).rstrip().split(\":\")\n if len(result) == 1:\n raise NoSuchPortExposed(\"Port {} was not exposed for service {}\"\n .format(port, service))\n return result\n\n @wait_container_is_ready()\n def wait_for(self, url):\n requests.get(url)\n return self\n","sub_path":"testcontainers/compose.py","file_name":"compose.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"190395648","text":"from autobahn.twisted.util import sleep\nfrom autobahn.twisted.wamp import ApplicationSession, ApplicationRunner\nfrom autobahn.wamp.exception import ApplicationError\n\n\nclass ComponentInitializer(object):\n\tdef __init__(self, urls, realms, factory_func):\n\t\tzipped = zip(urls, realms, factory_func)\n\t\tif len(zipped) > 0:\n\t\t\tself.components = [{'url': x, 'realm': y, 'factory': z} for x,y,z in zipped]\n\t\telse:\n\t\t\traise ValueError(\"unequal number of arguments\")\n\n\tdef run(self):\n\t\tfor comp in components:\n\t\t\trunner = ApplicationRunner(url=comp.get('url'), realm=comp.get('realm'))\n\t\t\trunner.run()","sub_path":"wamp/components/componentinit.py","file_name":"componentinit.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"247501746","text":"__author__ = 'kaihami'\n#!/usr/bin/python\n# encoding: utf-8\n\nfrom scriptLattes import *\nfrom scriptLattes.geradorDePaginasWeb import *\nfrom scriptLattes.util import compararCadeias\n\nclass ArtigoEmPeriodico:\n\titem = None # dado bruto\n\tidMembro = None\n\tqualis = None\n\tqualissimilar = None\n\n\tdoi = None\n\trelevante = None\n\tautores = None\n\ttitulo = None\n\trevista = None\n\tvolume = None\n\tpaginas = None\n\tnumero = None\n\tano = None\n\tresto = None\n\tchave = None\n\tissn = None\n\n\tdef __init__(self, idMembro, partesDoItem='', doi='', relevante='', complemento=''):\n\t\tself.idMembro = set([])\n\t\tself.idMembro.add(idMembro)\n\n\t\tself.doi = ''\n\t\tself.relevante = ''\n\t\tself.autores = ''\n\t\tself.titulo = ''\n\t\tself.revista = ''\n\t\tself.volume = ''\n\t\tself.paginas = ''\n\t\tself.numero = ''\n\t\tself.ano = ''\n\t\tself.issn = ''\n\n\t\tif not partesDoItem=='':\n\t\t\t# partesDoItem[0]: Numero (NAO USADO)\n\t\t\t# partesDoItem[1]: Descricao do artigo (DADO BRUTO)\n\t\t\tself.item = partesDoItem[1]\n\t\t\tself.doi = doi\n\t\t\tself.relevante = relevante\n\n\t\t\t# Dividir o item na suas partes constituintes (autores e o resto)\n\t\t\tpartes = self.item.partition(\" . \")\n\n\t\t\t# Verificar quando há um numero de autores > que 25\n\t\t\tif partes[1]=='': # muitos autores (mais de 25) e o lattes insere etal. termina lista com ;\n\t\t\t\tpartes = self.item.partition(\" ; \")\n\t\t\t\ta = partes[0].partition(\", et al.\") # remocao do et al.\n\t\t\t\ta = a[0] + a[2] # estes autores nao estao bem separados pois falta ';'\n\t\t\t\tb = a.replace(', ','*')\n\t\t\t\tc = b.replace(' ',' ; ')\n\t\t\t\tself.autores = c.replace('*',', ')\n\t\t\telse:\n\t\t\t\tself.autores = partes[0].strip()\n\n\t\t\t# Processando o resto (tudo menos autores)\n\t\t\tpartes = partes[2].rpartition(\", \")\n\t\t\tself.ano = partes[2].strip().rstrip(\".\")\n\n\t\t\tpartes = partes[0].rpartition(\"p. \")\n\t\t\tif partes[1]=='': # se nao existe paginas\n\t\t\t\tself.paginas = ''\n\t\t\t\tpartes = partes[2]\n\t\t\telse:\n\t\t\t\tself.paginas = partes[2].strip()\n\t\t\t\tpartes = partes[0]\n\n\t\t\tpartes = partes.rpartition(\", n.\")\n\t\t\tif partes[1]=='': # se nao existe numero\n\t\t\t\tself.numero = ''\n\t\t\t\tpartes = partes[2]\n\t\t\telse:\n\t\t\t\tself.numero = partes[2].strip().rstrip(\",\")\n\t\t\t\tpartes = partes[0]\n\n\t\t\tpartes = partes.rpartition(\", v. \")\n\t\t\tif partes[1]=='': # se nao existe volume\n\t\t\t\tself.volume = ''\n\t\t\t\tpartes = partes[2]\n\t\t\telse:\n\t\t\t\tself.volume = partes[2].strip().rstrip(\",\")\n\t\t\t\tpartes = partes[0]\n\n\t\t\tp1 = partes.partition(\". \")\n\t\t\tp2 = partes.rpartition(\". \")\n\t\t\tif len(p1[0])>len(p2[2]):\n\t\t\t\tself.titulo = p2[0].strip()\n\t\t\t\tself.revista = p2[2].strip()\n\t\t\telse:\n\t\t\t\tself.titulo = p1[0].strip()\n\t\t\t\tself.revista = p1[2].strip()\n\n\t\t\tself.chave = self.autores # chave de comparação entre os objetos\n\n\n\t\t# usando os dados complementares (obtidos do div/cvuri)\n\t\tnomePeriodicoParte = complemento.split(\"nomePeriodico=\")\n\n\t\tif (len(nomePeriodicoParte)==2):\n\t\t\tself.revista = nomePeriodicoParte[1].strip()\n\n\t\tcomplementoPartes = complemento.split(\"&\")\n\t\tfor parametro in complementoPartes:\n\t\t\tpartes = parametro.split(\"=\")\n\t\t\tif len(partes)==2:\n\t\t\t\tparametroNome = partes[0].strip()\n\t\t\t\tparametroValor = partes[1].strip()\n\t\t\t\tif parametroNome==\"issn\" : self.issn = parametroValor\n\t\t\t\tif parametroNome==\"volume\" : self.volume = parametroValor\n\t\t\t\tif parametroNome==\"titulo\" : self.titulo = parametroValor\n\t\t\t\t#if parametroNome==\"nomePeriodico\": self.revista = parametroValor\n\n\n\tdef html(self, listaDeMembros):\n\t\ts = self.autores + '. ' + self.titulo + '. ' + self.revista + '. '\n\t\ts+= 'v. ' + self.volume + ', ' if not self.volume=='' else ''\n\t\ts+= 'n. ' + self.numero + ', ' if not self.numero== '' else ''\n\t\ts+= 'p. ' + self.paginas + ', ' if not self.paginas=='' else ''\n\t\ts+= 'issn: ' + self.issn + ', ' if not self.issn=='' else ''\n\t\ts+= str(self.ano) + '.' if str(self.ano).isdigit() else '.'\n\n\t\tif not self.doi=='':\n\t\t\ts+= ' '\n\n\t\ts+= menuHTMLdeBuscaPB(self.titulo)\n\t\ts+= formataQualis(self.qualis, self.qualissimilar)\n\t\treturn s\n\n\n\tdef ris(self):\n\t\tpaginas = self.paginas.split('-')\n\t\tif len(paginas)<2:\n\t\t\tp1 = self.paginas\n\t\t\tp2 = ''\n\t\telse:\n\t\t\tp1 = paginas[0]\n\t\t\tp2 = paginas[1]\n\t\ts = '\\n'\n\t\ts+= '\\nTY - JOUR'\n\t\ts+= '\\nAU - '+self.autores\n\t\ts+= '\\nTI - '+self.titulo\n\t\ts+= '\\nJO - '+self.revista\n\t\ts+= '\\nVL - '+self.volume\n\t\ts+= '\\nIS - '+self.numero\n\t\ts+= '\\nSP - '+p1\n\t\ts+= '\\nEP - '+p2\n\t\ts+= '\\nPY - '+str(self.ano)\n\t\ts+= '\\nL2 - '+self.doi\n\t\ts+= '\\nL3 - '+self.issn\n\t\ts+= '\\nER - '\n\t\treturn s\n\n\tdef csv(self, nomeCompleto=\"\"):\n\t\tif self.qualis==None:\n\t\t\tself.qualis=''\n\t\tif self.qualissimilar==None:\n\t\t\tself.qualissimilar=''\n\t\ts = \"artigoEmPeriodico\\t\"\n\t\tif nomeCompleto==\"\": # tratamento grupal\n\t\t\ts += str(self.ano) +\"\\t\"+ self.doi +\"\\t\"+ self.titulo +\"\\t\"+ self.revista +\"\\t\"+ self.autores +\"\\t\"+ self.qualis +\"\\t\"+ self.qualissimilar\n\t\telse: # tratamento individual\n\t\t\ts += nomeCompleto +\"\\t\"+ str(self.ano) +\"\\t\" + self.doi +\"\\t\"+ self.titulo +\"\\t\"+ self.revista +\"\\t\"+ self.autores +\"\\t\"+ self.qualis +\"\\t\"+ self.qualissimilar\n\t\treturn s\n\n\n\t# ------------------------------------------------------------------------ #\n\tdef __str__(self):\n\t\ts = \"\\n[ARTIGO EM PERIODICO] \\n\"\n\t\ts += \"+ID-MEMBRO : \" + str(self.idMembro) + \"\\n\"\n\t\ts += \"+RELEVANTE : \" + str(self.relevante) + \"\\n\"\n\t\ts += \"+DOI : \" + self.doi.encode('utf8','replace') + \"\\n\"\n\t\ts += \"+AUTORES : \" + self.autores.encode('utf8','replace') + \"\\n\"\n\t\ts += \"+TITULO : \" + self.titulo.encode('utf8','replace') + \"\\n\"\n\t\ts += \"+REVISTA : \" + self.revista.encode('utf8','replace') + \"\\n\"\n\t\ts += \"+PAGINAS : \" + self.paginas.encode('utf8','replace') + \"\\n\"\n\t\ts += \"+VOLUME : \" + self.volume.encode('utf8','replace') + \"\\n\"\n\t\ts += \"+NUMERO : \" + self.numero.encode('utf8','replace') + \"\\n\"\n\t\ts += \"+ANO : \" + str(self.ano) + \"\\n\"\n\t\ts += \"+ISSN : \" + str(self.issn) + \"\\n\"\n\t\ts += \"+item : \" + self.item.encode('utf8','replace') + \"\\n\"\n\t\treturn s","sub_path":"Get_Publications.py","file_name":"Get_Publications.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"582923525","text":"__author__ = 'jmasramon'\n\nfrom utilityFunctions import *\n\n\nclass Rover:\n position = {}\n orientation = ''\n orders = ''\n navigator = None\n ORDER_DICT = {'fN': go_north, 'fS': go_south, 'fE': go_east, 'fW': go_west,\n 'bN': go_south, 'bS': go_north, 'bE': go_west, 'bW': go_east,\n 'lN': turn_west, 'lS': turn_east, 'lE': turn_north, 'lW': turn_south,\n 'rN': turn_east, 'rS': turn_west, 'rE': turn_south, 'rW': turn_north}\n\n def __init__(self, initial_position, initial_orientation, navigator=''):\n self.position = initial_position\n self.orientation = initial_orientation\n self.navigator = navigator\n\n def run_orders(self):\n for order in self.orders:\n if order in ('f', 'b'):\n self._move(order)\n elif order in ('l', 'r'):\n self._reorient(order)\n\n def _move(self, order):\n if not self.navigator:\n self.position = self._get_order_from_dict(order)(self.position)\n else:\n self.position = self._get_order_from_navigator(order)(self.position)\n\n def _get_order_from_dict(self, order):\n return self.ORDER_DICT[order + self.orientation]\n\n def _get_order_from_navigator(self, order):\n return getattr(self.navigator, (self._get_order_from_dict(order)).__name__)\n\n def _reorient(self, order):\n self.orientation = self._get_order_from_dict(order)()\n","sub_path":"Rover.py","file_name":"Rover.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"398356841","text":"\n\nfrom xai.brain.wordbase.nouns._libation import _LIBATION\n\n#calss header\nclass _LIBATIONS(_LIBATION, ):\n\tdef __init__(self,): \n\t\t_LIBATION.__init__(self)\n\t\tself.name = \"LIBATIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"libation\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_libations.py","file_name":"_libations.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"96316832","text":"import dynet\n\nfrom gated_intra_attention import GatedIntraAttention\n\n\nclass CompositionLayer(object):\n def __init__(self):\n self._gated_attention = GatedIntraAttention()\n\n def forward(self, inputs):\n\n inputs_value = inputs[0]\n v_average = self._average_pooling(inputs_value)\n v_max = dynet.max_dim(dynet.concatenate(inputs_value, d=1), 1)\n v_gated = self._gated_attention.forward(inputs)\n return dynet.concatenate([v_average, v_max, v_gated])\n\n @staticmethod\n def _average_pooling(inputs):\n sum = dynet.sum_dim(dynet.concatenate(inputs, d=1), d=[1])\n return sum / len(inputs)\n\n\n\n","sub_path":"composition_layer.py","file_name":"composition_layer.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"432600711","text":"\"\"\"\n 消费xiaozijia_build队列,请求,入楼栋库xiaozijia_build_fast\n 大约需要10个小时\n\"\"\"\n\nfrom lib.log import LogHandler\nfrom lib.mongo import Mongo\nfrom lib.rabbitmq import Rabbit\nimport requests\nimport json\nimport yaml\nfrom xiaozijia.user_headers import get_headers\n\nlog = LogHandler('小资家_build')\n\nsetting = yaml.load(open('config.yaml'))\n\n# mongo\nm = Mongo(setting['xiaozijia']['mongo']['host'], setting['xiaozijia']['mongo']['port'],\n user_name=setting['xiaozijia']['mongo']['user_name'], password=setting['xiaozijia']['mongo']['password'])\ncoll_build = m.connect[setting['xiaozijia']['mongo']['db']][setting['xiaozijia']['mongo']['build_coll']]\n\n# rabbit\nr = Rabbit(setting['xiaozijia']['rabbit']['host'], setting['xiaozijia']['rabbit']['port'])\nchannel = r.get_channel()\nbuild_queue = setting['xiaozijia']['rabbit']['queue']['xiaozijia_build']\nhouse_queue = setting['xiaozijia']['rabbit']['queue']['xiaozijia_house']\nchannel.queue_declare(queue=build_queue)\nchannel.queue_declare(queue=house_queue)\n\n\nclass Build(object):\n def __init__(self, username):\n self.headers = get_headers(username)\n self.user_name = username\n\n def get_build_info(self, ch, method, properties, body):\n \"\"\"\n 消费xiaozijia_build队列,请求,入小区库,并放入房号页\n :param ch:\n :param method:\n :param properties:\n :param body:\n :return:\n \"\"\"\n body_json = json.loads(body.decode())\n ConstructionPhaseId = body_json['ConstructionPhaseId']\n ConstructionName = body_json['ConstructionName']\n ConstructionId = body_json['ConstructionId']\n build_url = 'http://www.xiaozijia.cn/HousesForJson/' + ConstructionPhaseId + '/2'\n try:\n response = requests.get(build_url, headers=self.headers, timeout=20)\n html_json = response.json()\n if not html_json:\n log.info('小区没有楼栋,url={}'.format(build_url))\n for i in html_json:\n i['ConstructionName'] = ConstructionName\n i['ConstructionId'] = ConstructionId\n channel.basic_publish(exchange='',\n routing_key=house_queue,\n body=json.dumps(i))\n coll_build.insert_one(i)\n log.info(i)\n\n except Exception as e:\n self.headers = get_headers(self.user_name)\n log.error('请求错误,url=\"{}\",ConstructionPhaseId=\"{}\",ConstructionName=\"{}\",ConstructionId=\"{}\",e=\"{}\"'\n .format(build_url, ConstructionPhaseId, ConstructionName, ConstructionId, e))\n channel.basic_publish(exchange='',\n routing_key=build_queue,\n body=body)\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\n def consume_queue(self):\n channel.basic_qos(prefetch_count=1)\n channel.basic_consume(consumer_callback=self.get_build_info, queue=build_queue)\n channel.start_consuming()\n\n\nif __name__ == '__main__':\n consume_queue()\n","sub_path":"hilder_friends/backup/xiaozijia/get_build_rabbit.py","file_name":"get_build_rabbit.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"446118915","text":"import math\r\nfrom time import sleep\r\n'''Exercício 018: Leia um ângulo qualquer\r\ne mostre o valor do seno, cosseno e tangente'''\r\n\r\nx = float(input('Digite um angulo: '))\r\ncos = math.cos(math.radians(x)) #pega x e converte para radianos\r\nsen = math.sin(math.radians(x))\r\ntg = math.tan(math.radians(x))\r\n\r\nprint('''Seno: {:.3f}\\nCosseno: {:.3f}\\nTangente: {:.3f}'''.format(sen, cos, tg))\r\nsleep(3)","sub_path":"018.py","file_name":"018.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"535026237","text":"\"\"\" Starter code for simple logistic regression model for MNIST\nwith tf.data module\nMNIST dataset: yann.lecun.com/exdb/mnist/\nCreated by Chip Huyen (chiphuyen@cs.stanford.edu)\nCS20: \"TensorFlow for Deep Learning Research\"\ncs20.stanford.edu\nLecture 03\n\"\"\"\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n\nimport numpy as np\nimport tensorflow as tf\nimport time\n\nimport utils\n\n# Define paramaters for the model\nlearning_rate = 0.01\nbatch_size = 128\nn_epochs = 100\nn_train = 60000\nn_test = 10000\n\n# Step 1: Read in data\nmnist_folder = 'data/mnist'\nutils.download_mnist(mnist_folder)\ntrain, val, test = utils.read_mnist(mnist_folder, flatten=True)\n\n# Step 2: Create datasets and iterator\n# create training Dataset and batch it\ntrain_data = tf.data.Dataset.from_tensor_slices(train)\ntrain_data = train_data.shuffle(10000) # if you want to shuffle your data\ntrain_data = train_data.batch(batch_size)\n\n# create testing Dataset and batch it\ntest_data = tf.data.Dataset.from_tensor_slices(test)\ntest_data = test_data.shuffle(10000) # if you want to shuffle your data\ntest_data = test_data.batch(batch_size)\n\n\n# create one iterator and initialize it with different datasets\niterator = tf.data.Iterator.from_structure(train_data.output_types, \n train_data.output_shapes)\nimg, label = iterator.get_next()\n\ntrain_init = iterator.make_initializer(train_data)\t# initializer for train_data\ntest_init = iterator.make_initializer(test_data)\t# initializer for train_data\n\n# Step 3: create weights and bias\nlayer1_size = 200\nlayer2_size = 100\nlayer3_size = 60\nlayer4_size = 30\n\nw1 = tf.get_variable(name='weights_1', shape=[784, layer1_size], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))\nb1 = tf.get_variable(name='bias_1', shape=[1, layer1_size], initializer=tf.zeros_initializer())\n\nw2 = tf.get_variable(name='weights_2', shape=[layer1_size, layer2_size], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))\nb2 = tf.get_variable(name='bias_2', shape=[1, layer2_size], initializer=tf.zeros_initializer())\n\nw3 = tf.get_variable(name='weights_3', shape=[layer2_size, layer3_size], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))\nb3 = tf.get_variable(name='bias_3', shape=[1, layer3_size], initializer=tf.zeros_initializer())\n\nw4 = tf.get_variable(name='weights_4', shape=[layer3_size, layer4_size], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))\nb4 = tf.get_variable(name='bias_4', shape=[1, layer4_size], initializer=tf.zeros_initializer())\n\nw5 = tf.get_variable(name='weights_5', shape=[layer4_size, 10], initializer=tf.random_normal_initializer(mean=0, stddev=0.01))\nb5 = tf.get_variable(name='bias_5', shape=[1, 10], initializer=tf.zeros_initializer())\n\n# Step 4: build model\n'''\nout1 = tf.nn.sigmoid(tf.matmul(img, w1) + b1)\nout2 = tf.nn.sigmoid(tf.matmul(out1, w2) + b2)\nout3 = tf.nn.sigmoid(tf.matmul(out2, w3) + b3)\nout4 = tf.nn.sigmoid(tf.matmul(out3, w4) + b4)\nlogits = tf.matmul(out4, w5) + b5\n'''\n\n'''\nout1 = tf.matmul(img, w1) + b1\nout2 = tf.matmul(out1, w2) + b2\nout3 = tf.matmul(out2, w3) + b3\nout4 = tf.matmul(out3, w4) + b4\nlogits = tf.matmul(out4, w5) + b5\n'''\n\nout1 = tf.nn.relu(tf.matmul(img, w1) + b1)\nout2 = tf.nn.relu(tf.matmul(out1, w2) + b2)\nout3 = tf.nn.relu(tf.matmul(out2, w3) + b3)\nout4 = tf.nn.relu(tf.matmul(out3, w4) + b4)\nlogits = tf.matmul(out4, w5) + b5\n\n\n# Step 5: define loss function\n# use cross entropy of softmax of logits as the loss function\nprint(label)\nentropy = tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=logits)\nloss = tf.reduce_mean(entropy)\n\n\n# Step 6: define optimizer\n# using Adamn Optimizer with pre-defined learning rate to minimize loss\nlr_start = 0.001\nlr = 0.001\noptimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)\n\n\n# Step 7: calculate accuracy with test set\npreds = tf.nn.softmax(logits)\ncorrect_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(label, 1))\naccuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))\n\n#writer = tf.summary.FileWriter('./graphs/logreg', tf.get_default_graph())\nwith tf.Session() as sess:\n \n start_time = time.time()\n sess.run(tf.global_variables_initializer())\n\n # train the model n_epochs times\n for i in range(n_epochs):\n sess.run(train_init)\t# drawing samples from train_data\n total_loss = 0\n n_batches = 0\n lr = lr_start\n try:\n while True:\n #lr = lr / 2\n #lr = max(lr, 0.001)\n #print(\"lr: \" + str(lr))\n _, l = sess.run([optimizer, loss])\n total_loss += l\n n_batches += 1\n\n except tf.errors.OutOfRangeError:\n pass\n print('Average loss epoch {0}: {1}'.format(i, total_loss/n_batches))\n print('Total time: {0} seconds'.format(time.time() - start_time))\n\n # test the model\n sess.run(test_init)\t\t\t# drawing samples from test_data\n total_correct_preds = 0\n try:\n while True:\n accuracy_batch = sess.run(accuracy)\n total_correct_preds += accuracy_batch\n except tf.errors.OutOfRangeError:\n pass\n\n print('Accuracy {0}'.format(total_correct_preds/n_test))\n#writer.close()","sub_path":"hw1/03_logreg_nn.py","file_name":"03_logreg_nn.py","file_ext":"py","file_size_in_byte":5198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"76539378","text":"# adapted after https://github.com/yxlao/pytorch-reverse-gan/blob/master/dcgan_reverse.py\n\nimport argparse\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\n\ndef reverse_z(netG, x, nz=128, z_distribution=\"normal\", cuda=False, clip='disabled', lr=0.001, niter=1000, loss_type='L2', apply_transform=None):\n \"\"\"\n Estimate z_approx given G and G(z).\n Args:\n netG: nn.Module, generator network.\n g_z: Variable, G(z).\n opt: argparse.Namespace, network and training options.\n z: Variable, the ground truth z, ref only here, not used in recovery.\n clip: Although clip could come from of `opt.clip`, here we keep it\n to be more explicit.\n Returns:\n Variable, z_approx, the estimated z value.\n \"\"\"\n # sanity check\n assert clip in ['disabled', 'standard', 'stochastic']\n\n xt = torch.from_numpy(x).float()\n if apply_transform is not None:\n xt = apply_transform(xt)\n xv = Variable(xt)\n xv = xv.detach()\n\n # loss metrics\n mse_loss = nn.MSELoss()\n l1_loss = nn.L1Loss()\n\n # init tensor\n if z_distribution == 'uniform':\n z_approx = torch.FloatTensor(1, nz, 1, 1).uniform_(-1, 1)\n elif z_distribution == 'normal':\n z_approx = torch.FloatTensor(1, nz, 1, 1).normal_(0, 1)\n else:\n raise ValueError()\n\n # transfer to gpu\n if cuda:\n mse_loss.cuda()\n l1_loss.cuda()\n z_approx = z_approx.cuda()\n xv = xv.cuda()\n\n # convert to variable\n z_approx = Variable(z_approx)\n z_approx.requires_grad = True\n\n # optimizer\n optimizer_approx = optim.Adam([z_approx], lr=lr, betas=(0.5, 0.999))\n\n # train\n loss_g_z_hist = []\n loss_min = 1000\n z_approx_min = z_approx\n iter_disp = niter / 10\n for i in range(niter):\n g_z_approx = netG(z_approx)\n mse_g_z = mse_loss(g_z_approx, xv)\n l1_g_z = l1_loss(g_z_approx, xv)\n if loss_type == 'L2':\n loss_g_z = mse_g_z\n elif loss_type == 'L1':\n loss_g_z = l1_g_z\n else:\n loss_g_z = mse_g_z + l1_g_z\n\n if i % iter_disp == 0:\n print(\"[Iter {}/{}] loss_g_z: {}\"\n .format(i, niter, mse_g_z.data[0]))\n loss_g_z_hist += [loss_g_z.data[0]]\n if mse_g_z.data[0] < loss_min:\n loss_min = loss_g_z.data[0]\n z_approx_min = z_approx\n\n # bprop\n optimizer_approx.zero_grad()\n loss_g_z.backward()\n optimizer_approx.step()\n\n # clipping\n if clip == 'standard':\n z_approx.data[z_approx.data > 1] = 1\n z_approx.data[z_approx.data < -1] = -1\n if clip == 'stochastic':\n z_approx.data[z_approx.data > 1] = random.uniform(-1, 1)\n z_approx.data[z_approx.data < -1] = random.uniform(-1, 1)\n\n return z_approx_min, loss_g_z_hist","sub_path":"latent_space.py","file_name":"latent_space.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"159431810","text":"# -*- coding: utf-8 -*-\nclass Stack:\n def __init__(self):\n self.s = list()\n def push(self, n):\n self.s.append(n)\n def pop(self):\n if len(self.s) > 0:\n self.s.pop()\n def top(self):\n if len(self.s) > 0:\n return self.s[len(self.s) - 1]\n def empty(self):\n return len(self.s) == 0\n \nclass CheckValidation():\n def check(self, s):\n st = Stack()\n for i in range(0, len(s)):\n if s[i] == \"(\" or s[i] == \"[\" or s[i] == \"{\":\n st.push(s[i])\n else:\n if not st.empty():\n c = st.top()\n if (c == \"(\" and s[i] == \")\") or (c == \"[\" and s[i] == \"]\") or (c == \"{\" and s[i] == \"}\"):\n st.pop()\n else:\n return False\n else:\n return False\n return st.empty() \n\nprint(\"Введите скобочную последовательность для проверки на правильность: \")\ns=str(input()) \nprint(\"Последовательность правильна?\")\nc=CheckValidation()\nprint(c.check(s))","sub_path":"pack_3/3_1.py","file_name":"3_1.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"442920491","text":"__author__ = 'Sergey Tomin'\n\nfrom ocelot import *\nfrom ocelot.gui import *\nfrom ocelot.adaptors import *\n\nbeam = Beam()\nbeam.E = 148.3148e-3 #in GeV ?!\nbeam.beta_x = 14.8821\nbeam.beta_y = 18.8146\nbeam.alpha_x = -0.61309\nbeam.alpha_y = -0.54569\nbeam.emit_xn = 1.5e-6\nbeam.emit_yn = 1.5e-6\nbeam.emit_x = beam.emit_xn / (beam.E / m_e_GeV)\nbeam.emit_y = beam.emit_yn / (beam.E / m_e_GeV)\n\ntw0 = Twiss(beam)\nfrom desy.demos.ebeam.flash.lattice_FLASH_S2E import *\n#exec(open('lattice.inp'))\nmethod = MethodTM()\nmethod.global_method = SecondTM\nlat = MagneticLattice(lattice, method=method)\n\ntws=twiss(lat, tw0, nPoints=None)\nplot_opt_func(lat, tws, top_plot=[\"E\"])\np_array, charge_array = astraBeam2particleArray(filename='elegant_files/flash_out_200000.ast')\n\n#p_array.particles[4::6] = sc.smooth_z(p_array.particles[4::6], mslice=10000)\n\n# plot current\nbins_start, hist_start = get_current(p_array, charge=charge_array[0], num_bins=200)\n\n\n\n\n\nfrom ocelot.cpbd.sc import *\n\n\n#csr = SCRProcess()\n#csr.step = 2\nsc = SpaceCharge()\n\n\np_array.q_array = charge_array\n#p_array.list2array(p_list)\n\nnavi = Navigator(lat)\n\n#navi.add_physics_proc(sc, lat.sequence[0], lat.sequence[-1])\n#navi.add_physics_proc(csr, d1, d2)\nnavi.unit_step = 1.\n\ntws_track, p_array = track(lat, p_array, navi)\n\n\n\n\n\n\n\n\n\n\"\"\"\ndz = 1.\n#order = 2\nSC = True\ndebug = False\n\nZ = np.linspace(0, lat.totalLen, num=int(lat.totalLen/dz))\n\ntwsi=twiss(lat, tw0, nPoints=len(Z) )\ntw0 = get_envelope(p_array, tws_i = twsi[0])\ntws_track = [tw0]\n\nif debug:\n f=plt.figure()\n plt.ion()\n plt.hold(False)\n\nnavi = Navigator(lattice=lat)\nfor i, zi in enumerate(Z[1:]):\n print (zi)\n dz = zi - Z[i]\n tracking_step(lat=lat, particle_list=p_array, dz=dz, navi=navi)\n #p_array.particles[4::6] = sc.smooth_z(p_array.particles[4::6], mslice=10000)\n if SC:\n sc_apply(p_array, q_array=charge_array, zstep=dz, nmesh_xyz=[63, 63, 63], low_order_kick=True)\n tw = get_envelope(p_array,tws_i=twsi[i+1])\n #print \"emit_x = \", tw.emit_y, beam.emit_y\n tw.s = navi.z0\n tws_track.append(tw)\n if debug:\n f.add_subplot(211)\n plt.plot(p_array.particles[::6], p_array.particles[2::6], '.')\n f.add_subplot(212)\n plt.plot(p_array.particles[4::6],p_array.particles[5::6],'.')\n plt.draw()\n plt.pause(0.1)\nplt.ioff()\n\"\"\"\n\n# plot current at the beginning of accelerator\nplt.figure(1)\nplt.title(\"current: start\")\nplt.plot(bins_start, hist_start)\nplt.xlabel(\"s, m\")\nplt.ylabel(\"I, A\")\nplt.grid(True)\n\n# plot current at the end of accelerator\nbins, hist = get_current(p_array, charge=charge_array[0], num_bins=200)\nplt.figure(2)\nplt.title(\"current: end\")\nplt.plot(bins, hist)\nplt.xlabel(\"s, m\")\nplt.ylabel(\"I, A\")\nplt.grid(True)\n\n\neleg_opt = np.genfromtxt('elegant_files/elegant_beam_optics_2ndOrder.txt')\ns_b=eleg_opt[:, 0]\nbetax_b=eleg_opt[:, 8]\nbetay_b=eleg_opt[:, 11]\n\n\nplt.figure(3)\nplt.title(r\"$\\beta_y - functions$\")\nplt.plot([p.s for p in tws_track], [p.beta_y for p in tws_track], \"ro-\", label = \"ocelot\")\nplt.plot(s_b, betay_b, \"bo-\", label = \"elegant\")\nplt.legend()\nplt.grid(True)\n\nplt.figure(4)\nplt.title(r\"$\\beta_x - functions$\")\nplt.plot([p.s for p in tws_track], [p.beta_x for p in tws_track], \"ro-\", label = \"ocelot\")\nplt.plot(s_b, betax_b, \"bo-\", label = \"elegant\")\nplt.legend()\nplt.grid(True)\n\nplt.show()","sub_path":"demos/ebeam/flash/sc_flash_new.py","file_name":"sc_flash_new.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"599374620","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.utils.timezone import utc\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('define_trip', '0019_remove_train_time'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='table',\n name='date',\n field=models.DateField(default=datetime.datetime(2015, 8, 1, 6, 37, 53, 160409, tzinfo=utc)),\n preserve_default=False,\n ),\n ]\n","sub_path":"SepasIran_m/define_trip/migrations/0020_table_date.py","file_name":"0020_table_date.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"592187202","text":"# -*- coding: utf-8 -*-\r\n\r\nimport socket, cv2\r\nfrom vidgear.gears import NetGear\r\nfrom pickle import dumps, loads\r\nimport threading\r\n\r\nclass Main():\r\n ids = {}\r\n clients = {}\r\n \r\n def __init__(self, port = 12351):\r\n self.sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\r\n self.port = port\r\n \r\n def start(self, n_cli = 5):\r\n self.sock.bind(('',self.port))\r\n self.sock.listen(n_cli)\r\n self.ids['recv_sig'] = threading.Thread(target=self.start_sig)\r\n self.ids['recv_sig'].start()\r\n \r\n def send_sig(self, c_sock, sig):\r\n c_sock.send(sig.encode())\r\n \r\n def recv_sig(self):\r\n sig = self.sock.recv(1024).decode()\r\n \r\n def start_sig(self):\r\n while True :\r\n c_sock, addr = self.sock.accept()\r\n name = c_sock.recv(1024).decode()\r\n self.clients[name] = [c_sock, addr]\r\n \r\n def recv_video(self, port = 54321):\r\n def inner(port):\r\n client = NetGear(port = str(port), protocol=\"tcp\", receive_mode=True)\r\n while True:\r\n frame = client.recv()\r\n if frame is None:\r\n break\r\n cv2.imshow(\"Output Frame\", frame)\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord(\"q\"):\r\n break\r\n cv2.destroyAllWindows()\r\n client.close()\r\n self.ids['recv_video'] = threading.Thread(target=inner, args=(port,))\r\n self.ids['recv_video'].start()\r\n \r\n def deny_ip(self, c_sock, website = None):\r\n self.send_sig(c_sock, f'DENY {website}')\r\n \r\n def allow_ip(self, c_sock,website = None):\r\n self.send_sig(c_sock, f'ALLOW {website}')\r\n \r\ndef printmsg(msgtype, msg, hdr = 'Undefined'):\r\n if msgtype == 'log':\r\n print(f'[ {hdr} ] > {msg}')\r\n else:\r\n print(msg)\r\n \r\n\r\nif __name__ == '__main__':\r\n\r\n help_str = \"\"\"--------------->\r\nlist - List the Clients Connected to the Lan Monitor\r\npeep - Peep into Clients Screen\r\ndeny_ip - Block an ip address or website in clients computer\r\nallow_ip - Allow an ip address or website in clients computer\r\nexit - Exit the Program\r\nhelp - To list commands\r\n--------------->\"\"\"\r\n\r\n main = Main()\r\n main.start()\r\n \r\n print(help_str)\r\n while True:\r\n temp1 = input('[ LAN MONITOR ] > ')\r\n if temp1 == 'list':\r\n if main.clients == {}:\r\n printmsg(None, 'No Clients Connected !')\r\n else:\r\n for clients in main.clients:\r\n printmsg(None, clients)\r\n \r\n elif temp1 == 'peep':\r\n temp2 = input('[ Client Name ] > ')\r\n if temp2 in main.clients.keys():\r\n main.send_sig(main.clients[temp2][0],'VID') \r\n main.recv_video()\r\n else:\r\n printmsg('log', 'Invalid', f'No Client named {temp2} !')\r\n \r\n elif temp1 == 'deny_ip':\r\n temp2 = input('[ Client Name ] > ')\r\n temp3 = input('[ Website ] > ')\r\n if temp2 in main.clients.keys():\r\n main.deny_ip(main.clients[temp2][0], temp3)\r\n else:\r\n printmsg('log', 'Invalid', f'No Client named {temp2} !')\r\n \r\n elif temp1 == 'allow_ip':\r\n temp2 = input('[ Client Name ] > ')\r\n temp3 = input('[ Website ] > ')\r\n if temp2 in main.clients.keys():\r\n main.allow_ip(main.clients[temp2][0], temp3)\r\n else:\r\n printmsg('log', 'Invalid', f'No Client named {temp2} !')\r\n \r\n elif temp1 == 'help':\r\n print(help_str)\r\n \r\n elif temp1 == 'exit':\r\n break\r\n else:\r\n printmsg('log', f'No command named {temp1} !, type help for more details.', 'Invalid')\r\n ","sub_path":"Separate Functions/shyam.py","file_name":"shyam.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"388496609","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\nGet id and temperature from all ds18s20 devices connected\nto Raspberry Pi\ncode adapted from www.netzmafia.de\n\"\"\"\n\ndef getTemp():\n data = dict()\n # 1-Wire Slave-Liste lesen\n file = open('/sys/devices/w1_bus_master1/w1_master_slaves')\n w1_slaves = file.readlines()\n file.close()\n\n # Fuer jeden 1-Wire Slave aktuelle Temperatur ausgeben\n for line in w1_slaves:\n # 1-wire Slave extrahieren\n w1_slave = line.split(\"\\n\")[0]\n # 1-wire Slave Datei lesen\n file = open('/sys/bus/w1/devices/' + str(w1_slave) + '/w1_slave')\n filecontent = file.read()\n file.close()\n\n # Temperaturwerte auslesen und konvertieren\n stringvalue = filecontent.split(\"\\n\")[1].split(\" \")[9]\n temperature = float(stringvalue[2:]) / 1000\n data.update({w1_slave : temperature})\n\n return data\n\nif __name__ == \"__main__\":\n import time\n while True:\n result = getTemp()\n for dev, temp in result.items():\n print(str(dev) + ': %6.2f °C' % temp)\n time.sleep(1)\n\n\n","sub_path":"onewire.py","file_name":"onewire.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"226387495","text":"import pymysql\r\nimport pass_word\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n#ccypairs = [\"eurusd\",\"usdjpy\",\"gbpusd\"]\r\n#tenors = [\"1w\",\"1m\",\"3m\"]\r\n\r\nccypairs = [\"gbpusd\"]\r\ntenors = [\"3m\"]\r\n\r\nfor ccypair in ccypairs:\r\n for tenor in tenors:\r\n\r\n midtobid = 0\r\n fwdptsmult = 0\r\n\r\n if(ccypair==\"eurusd\"):\r\n cols = [\"Date\",\"EURv1w\",\"EURf1w\",\"EURv1m\",\"EURf1m\",\"EURv3m\",\"EURf3m\"]\r\n fwdptsmult=10000\r\n key = \"EUR\"\r\n #1w b/o\t0.55 // 0.80\r\n #1m b/o\t0.20 // 0.30\r\n #3m b/o\t0.20 // 0.30\r\n if(tenor==\"1w\"):\r\n midtobid = 0.40\r\n elif((tenor==\"1m\")or(tenor==\"3m\")):\r\n midtobid = 0.15\r\n\r\n elif(ccypair==\"usdjpy\"):\r\n cols = [\"Date\",\"JPYv1w\",\"JPYf1w\",\"JPYv1m\",\"JPYf1m\",\"JPYv3m\",\"JPYf3m\"]\r\n fwdptsmult=100\r\n key = \"JPY\"\r\n #1w b/o\t0.75 // 0.80\r\n #1m b/o\t0.30 // 0.30\r\n #3m b/o\t0.25 // 0.30\r\n if(tenor==\"1w\"):\r\n midtobid = 0.40\r\n elif((tenor==\"1m\")or(tenor==\"3m\")):\r\n midtobid = 0.15\r\n\r\n elif(ccypair==\"gbpusd\"):\r\n cols = [\"Date\",\"GBPv1w\",\"GBPf1w\",\"GBPv1m\",\"GBPf1m\",\"GBPv3m\",\"GBPf3m\"]\r\n fwdptsmult=10000\r\n key = \"GBP\"\r\n if(tenor==\"1w\"):\r\n midtobid = 0.40\r\n elif((tenor==\"1m\")or(tenor==\"3m\")):\r\n midtobid = 0.15\r\n\r\n volfwdpts_df = pd.read_csv(\"%s_vol_fwdpts.txt\" % (ccypair), parse_dates=True,skiprows=1,infer_datetime_format=True,sep=\",\",names=cols,index_col=\"Date\", na_values=\"N/A\")\r\n\r\n conn = pymysql.connect(host='127.0.0.1', user='valery', passwd=pass_word.var1, db=pass_word.var2)\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"SELECT cycle_id, cycle_start from %s_%s_cycles where (vol_bid IS NULL or fwd_pts is NULL) order by cycle_id;\" % (ccypair, tenor))\r\n results=cur.fetchall()\r\n\r\n for result in results:\r\n (cycle_id, cycle_start) = result\r\n if(str(cycle_start) in volfwdpts_df.index):\r\n if((not np.isnan(volfwdpts_df.ix[cycle_start,key+\"v\"+tenor])) and (not np.isnan(volfwdpts_df.ix[cycle_start,key+\"f\"+tenor]))):\r\n volbid = volfwdpts_df.ix[cycle_start,key+\"v\"+tenor]-midtobid\r\n fwdpts = volfwdpts_df.ix[cycle_start,key+\"f\"+tenor]/fwdptsmult\r\n print(cycle_id,\" vol = \",volbid, \" fwdpts = \",fwdpts)\r\n query = r\"UPDATE %s_%s_cycles set vol_bid=%s, fwd_pts=%s where cycle_id=%s;\" % (ccypair,tenor,volbid,fwdpts,cycle_id)\r\n print(query)\r\n cur.execute(query)\r\n\r\n conn.commit()\r\n\r\n cur.execute(\"SELECT a.cycle_id, a.cycle_start, a.fwd_pts, b.m_minrowid from %s_%s_cycles as a, %s_date_mapping as b where a.cycle_start=b.m_date and a.starting_spot IS NULL and (not a.fwd_pts is null) order by a.cycle_id;\" % (ccypair, tenor, ccypair))\r\n results=cur.fetchall()\r\n\r\n for result in results:\r\n (cycle_id, cycle_start, fwd_pts, minrowid) = result\r\n sql_query = \"SELECT quotebid, quoteoffer from %s_new1 where quotedate='%s' and quotetime>='10:00:00' and rowid>=%s order by rowid limit 1;\" % (ccypair,str(cycle_start),minrowid)\r\n\r\n print(sql_query)\r\n\r\n cur.execute(sql_query)\r\n results2 = cur.fetchone();\r\n spot = round((results2[0]+results2[1])*0.5,6)\r\n strike = round(spot+float(fwd_pts),6)\r\n\r\n sql_query = \"UPDATE %s_%s_cycles set starting_spot=%s, strike=%s where cycle_id=%s;\" % (ccypair,tenor,spot,strike,cycle_id)\r\n print(sql_query)\r\n cur.execute(sql_query)\r\n\r\n conn.commit()\r\n conn.close()\r\n","sub_path":"py_files/04.fill_cycles_table.py","file_name":"04.fill_cycles_table.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"204124873","text":"\"\"\"Code to interact with ABF files. https://github.com/swharden/pyABF/ \"\"\"\n\nimport sys\nimport numpy as np # OKAY INSIDE ABF CLASS\nimport matplotlib.pyplot as plt # HAVEN'T DECIDED IF WILL BE IN ABF CLASS\n\nfrom header import ABFheader\n\nclass ABF:\n def __init__(self,abf):\n \"\"\"The ABF class provides easy pythonic access to header and signal data in ABF2 files.\n \n * Although it is typically instantiated with a path (string), you can also use an ABF or ABFheader.\n \n Quick start:\n >>> abf = ABF(\"/path/to/file.abf\")\n >>> abf.setSweep(0) # load data from the first sweep\n >>> print(abf.dataY) # signal data\n >>> print(abf.dataX) # timestamps\n >>> print(abf.dataC) # command waveform\n\n See all the properties available to you:\n >>> abf.help()\n \n Developers can access the ABFheader class features:\n >>> abf._abfHeader.saveHTML()\n \n \"\"\"\n \n # get our abfHeader in order depending on what type of object we were given\n if type(abf) is str:\n self._abfHeader = ABFheader(abf)\n elif str(type(abf)).endswith(\".ABF'>\"):\n self._abfHeader = abf._abfHeader\n elif str(type(abf)).endswith(\".ABFheader'>\"):\n self._abfHeader = abf\n else:\n raise ValueError('abf must be a file path (str), ABF object, or ABFheader object.')\n \n ### Populate meaningful ABF attributes. Think about how you will use them: abf.something\n self.ID = self._abfHeader.header['abfID']\n self.filename = self._abfHeader.header['abfFilename']\n self.datetime = self._abfHeader.header['abfDatetime']\n self.pointDurSec = self._abfHeader.header['timeSecPerPoint']\n self.pointDurMS = self._abfHeader.header['timeSecPerPoint']*1000.0\n self.pointsPerSweep = self._abfHeader.header['sweepPointCount']\n self.pointsPerSec = self._abfHeader.header['rate']\n self.sweepCount = self._abfHeader.header['sweepCount']\n self.sweepList = np.arange(self.sweepCount)\n self.sweepLengthSec = self._abfHeader.header['sweepLengthSec']\n self.sweepPointCount = self._abfHeader.header['sweepPointCount']\n self.mode = self._abfHeader.header['mode']\n self.units = self._abfHeader.header['units']\n self.unitsLong = \"Membrane Potential (mV)\" if self.units is 'mV' else \"Membrane Current (pA)\"\n self.unitsCommand = self._abfHeader.header['unitsCommand']\n self.unitsCommandLong = \"Clamp Potential (mV)\" if self.unitsCommand is 'mV' else \"Clamp Current (pA)\"\n self.commandHoldingByDAC = self._abfHeader.header['commandHoldingByDAC']\n self.commandHold = self.commandHoldingByDAC[0]\n self.experimentLengthSec = self.sweepLengthSec*self.sweepCount\n self.unitsTime = \"seconds\"\n self.unitsTimeLong = \"Signal Time (seconds)\"\n \n ### Add information about the epochs / command waveform\n self.epochCount = len(self._abfHeader.header['nEpochType'])\n self.epochType = self._abfHeader.header['nEpochType']\n self.epochCommand = self._abfHeader.header['fEpochInitLevel']\n self.epochCommandDelta = self._abfHeader.header['fEpochLevelInc']\n self.epochDuration = self._abfHeader.header['lEpochInitDuration']\n self.epochDurationDelta = self._abfHeader.header['lEpochDurationInc']\n self.epochPulsePeriod = self._abfHeader.header['lEpochPulsePeriod']\n self.epochPulseWidth = self._abfHeader.header['lEpochPulseWidth']\n self.epochDigOut = self._abfHeader.header['nEpochDigitalOutput']\n \n ### Preload signal and time data (totalling ~10MB of memory per minute of 20kHz recording)\n self.signalData = self._abfHeader.data\n self.signalTimes = np.arange(len(self.signalData),dtype='float32')*self.pointDurSec\n \n def setSweep(self,sweepNumber=0,absoluteTime=False):\n \"\"\"set all the self.data variables to contain data for a certain sweep\"\"\"\n self.dataSweepSelected = sweepNumber\n self.sweepSelected = sweepNumber\n pointStart=sweepNumber*self.pointsPerSweep\n pointEnd=pointStart+self.pointsPerSweep\n self.dataY = self.signalData[pointStart:pointEnd]\n if absoluteTime:\n self.dataX = self.signalTimes[pointStart:pointEnd]\n else:\n self.dataX = self.signalTimes[0:self.pointsPerSweep]\n self.updateCommandWaveform()\n \n def updateCommandWaveform(self):\n \"\"\"Read the epochs and figure out how to fill self.dataC with the command signal.\"\"\"\n self.dataC = np.empty(self.dataX.size) # start as random data\n position=0 # start at zero here for clarity\n position+=int(self.pointsPerSweep/64) # the first 1/64th is pre-epoch (why???)\n self.dataC[:position]=self.commandHold # fill the pre-epoch with the command holding\n for epochNumber in range(self.epochCount):\n pointCount=self.epochDuration[epochNumber]\n deltaCommand=self.epochCommandDelta[epochNumber]*self.sweepSelected\n self.dataC[position:position+pointCount]=self.epochCommand[epochNumber]+deltaCommand\n position+=pointCount\n self.dataC[position:]=self.commandHold # set the post-epoch to the command holding\n \n def help(self):\n \"\"\"Show information about the ABF class which may be useful.\"\"\"\n print(\"\\n### ATTRIBUTES ###\")\n for thing in [x for x in sorted(dir(self)) if not x.startswith(\"_\")]:\n if not \"bound method\" in str(getattr(self,thing)):\n print(\"abf.%s = %s\"%(thing,str(getattr(self,thing)))) \n print(\"\\n### FUNCTIONS ###\")\n for thing in [x for x in sorted(dir(self)) if not x.startswith(\"_\")]:\n if \"bound method\" in str(getattr(self,thing)):\n print(\"abf.%s()\"%(thing))\n \nif __name__==\"__main__\": \n abf=ABF(R\"../../../../data/17o05028_ic_steps.abf\")\n #abf=ABF(R\"../../../../data/17o05024_vc_steps.abf\")\n #demo_trace_and_protocol(abf)\n #abf.setSweep(0)\n print(\"DONE\")\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"docs/advanced/abf-file-format/old/2017-10-13 api concepts/abf.py","file_name":"abf.py","file_ext":"py","file_size_in_byte":6321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"27816745","text":"import os\nimport asyncio\n\nfrom google.cloud import storage\nfrom google.oauth2 import service_account\nimport google.api_core\nimport hailjwt as hj\n\nfrom .google_storage import upload_private_gs_file_from_string, download_gs_file_as_string\nfrom .google_storage import delete_gs_file\n\n\nbatch_gsa_key = os.environ.get('BATCH_GSA_KEY', '/batch-gsa-key/privateKeyData')\ncredentials = service_account.Credentials.from_service_account_file(batch_gsa_key)\ngcs_client = storage.Client(credentials=credentials)\n\nbatch_jwt = os.environ.get('BATCH_JWT', '/batch-jwt/jwt')\nwith open(batch_jwt, 'r') as f:\n batch_bucket_name = hj.JWTClient.unsafe_decode(f.read())['bucket_name']\n\n\ndef _gs_log_path(instance_id, job_id, task_name):\n return f'{instance_id}/{job_id}/{task_name}/job.log'\n\n\nasync def write_gs_log_file(thread_pool, instance_id, job_id, task_name, log):\n path = _gs_log_path(instance_id, job_id, task_name)\n await blocking_to_async(thread_pool, upload_private_gs_file_from_string, gcs_client, batch_bucket_name, path, log)\n return f'gs://{batch_bucket_name}/{path}'\n\n\nasync def read_gs_log_file(thread_pool, uri):\n if uri is not None:\n assert uri.startswith('gs://')\n uri = uri.lstrip('gs://').split('/')\n bucket_name = uri[0]\n path = '/'.join(uri[1:])\n try:\n return await blocking_to_async(thread_pool, download_gs_file_as_string, gcs_client, bucket_name, path)\n except google.api_core.exceptions.NotFound:\n return None\n return None\n\n\nasync def delete_gs_log_file(thread_pool, instance_id, job_id, task_name):\n path = _gs_log_path(instance_id, job_id, task_name)\n try:\n await blocking_to_async(thread_pool, delete_gs_file, gcs_client, batch_bucket_name, path)\n except google.api_core.exceptions.NotFound:\n pass\n\n\nasync def blocking_to_async(thread_pool, f, *args, **kwargs):\n return await asyncio.get_event_loop().run_in_executor(\n thread_pool, lambda: f(*args, **kwargs))\n","sub_path":"batch/batch/server/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"179756889","text":"from datetime import datetime\n\nfrom flask import Flask, jsonify, request, make_response\n\nfrom Model.department import Department\nfrom Model.patient import Patient\nfrom Model.doctor import Doctor\n\napp = Flask(__name__)\ndepartment = Department(name=\"Surgery\")\ndepartment.save()\ndepartment.update_entities()\n\n\n@app.route(\"/department/\", methods=[\"POST\"])\ndef add_person(person_type):\n data = request.json\n _dateObj = data[\"date_of_birth\"]\n _strObj = datetime.strptime(_dateObj, \"%d-%b-%Y\")\n if not data:\n return make_response(\"No JSON. Check headers and JSON format.\", 400)\n print(data[\"date_of_birth\"])\n person = department.get_person_by_id(data['id'])\n if person:\n return make_response(\"Invalid record! Person ID already exists.\", 404)\n\n try:\n if data['id'][0:1] == 'P' and person_type == 'Patient':\n person = Patient(firstName=data[\"first_name\"], lastName=data[\"last_name\"],\n date_of_birth=_strObj, address=data[\"address\"], person_id=data[\"id\"],\n is_released=data[\"is_released\"], room_num=data[\"room_num\"], bill=data[\"bill\"])\n department.add_person(person)\n\n elif data['id'][0:1] == 'D' and person_type == 'Doctor':\n person = Doctor(firstName=data[\"first_name\"], lastName=data[\"last_name\"],\n date_of_birth=_strObj, address=data[\"address\"], person_id=data[\"id\"],\n is_released=data[\"is_released\"], office_num=data[\"office_num\"], income=data[\"income\"])\n department.add_person(person)\n else:\n return make_response(f\"Invalid ID {data['id']} for {person_type} type\", 404)\n return make_response(f\"Person record (ID: {str(data['id'])}) has been added successfully.\", 200)\n except Exception as err:\n return make_response(\"Cannot add a person record to the department due to: \" + str(err), 400)\n\n\n# it works\n@app.route(\"/department/person/all\", methods=[\"GET\"])\ndef list_persons():\n department.update_entities()\n return make_response(jsonify(department.to_dict()), 200)\n\n\n# it works\n@app.route(\"/department/persons/all/\", methods=[\"GET\"])\ndef get_person_by_type(person_type):\n person = department.get_person_by_type(person_type)\n if not person:\n return make_response(\"Person Not Found.\", 400)\n\n return make_response(jsonify(department.get_person_by_type(person_type)), 200)\n\n# it works\n@app.route(\"/department/person/\", methods=[\"GET\"])\ndef get_person(person_id):\n person = department.get_person_by_id(person_id)\n if not person:\n return make_response(\"Person not found.\", 404)\n try:\n return make_response(jsonify(department.get_person_by_id(person_id).to_dict()), 200)\n except AttributeError as err:\n return make_response(f\"Cannot get a person (ID: {str(person_id)}) record in the department due to: \" + str(err), 404)\n\n# it works\n@app.route(\"/department/persons/stats\", methods=[\"GET\"])\ndef get_stats():\n return make_response(jsonify(department.get_statistics().to_dict()), 200)\n\n# it works\n@app.route(\"/department/person//\", methods=[\"PUT\"])\ndef update_person(person_id, person_type):\n data = request.json\n if not data:\n return make_response(\"No JSON. Check headers and JSON format.\", 404)\n\n update_key = [\"first_name\", \"last_name\", \"office_room_num\", \"bill_income\", \"address\"]\n\n person = department.get_person_by_id(person_id)\n if not person:\n return make_response(\"Person not found.\", 404)\n\n for key in update_key:\n if key not in data.keys():\n return make_response(\"Invalid value! There is an empty attribute's value.\", 404)\n\n try:\n if person_type == 'Patient':\n department.update_person(person_id, data[\"first_name\"], data[\"last_name\"],\n data[\"office_room_num\"], data[\"bill_income\"], data[\"address\"])\n return make_response(f\"Patient record (ID: {str(person_id)}) has been updated successfully.\", 200)\n elif person_type == 'Doctor':\n department.update_person(person_id, data[\"first_name\"], data[\"last_name\"],\n data[\"office_room_num\"], data[\"bill_income\"], data[\"address\"])\n return make_response(f\"Doctor record (ID: {str(person_id)}) has been updated successfully.\", 200)\n except Exception as err:\n return make_response(\"Update record error due to: \" + str(err), 404)\n\n\n@app.route(\"/department/person/\", methods=[\"DELETE\"])\ndef delete_person(person_id):\n person = department.get_person_by_id(person_id)\n if not person:\n return make_response(\"Person not found.\", 404)\n department.remove_person_by_id(person_id)\n return make_response(f\"Person record (ID: {str(person_id)}) has been removed successfully.\", 200)\n\n\n@app.route(\"/validate\", methods=[\"GET\", \"POST\", \"PUT\", \"DELETE\"])\ndef validate_setup():\n return jsonify(\n {\n \"method\": request.method,\n \"Content-Type header\": request.headers.get(\"Content-Type\"),\n \"data\": request.data.decode(),\n }\n )\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"Assignment 4/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"145898483","text":"import os\nimport psycopg2\nimport sqlite3\nfrom psycopg2.extras import execute_values\nimport json\nimport pandas as pd\nfrom psycopg2.extras import DictCursor\nfrom dotenv import load_dotenv\n\n## Setting up PostgreSQL Connection\n\nload_dotenv()\n\nDB_NAME = os.getenv('DB_NAME1', default='Check env variables')\nDB_USER = os.getenv('DB_USER1', default='Check env variables')\nDB_PASSWORD = os.getenv('DB_PASSWORD1', default='Check env variables')\nDB_HOST = os.getenv('DB_HOST1', default='Check env variables')\n\nconnection = psycopg2.connect(dbname = DB_NAME, user = DB_USER,\n password = DB_PASSWORD, host = DB_HOST)\nprint(\"CONNECTION:\", connection)\n\ncursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)\nprint(\"CURSOR:\", cursor)\n\ncreate_table = \"\"\"CREATE TABLE IF NOT EXISTS rpg_data (character_id SERIAL PRIMARY KEY, \nname varchar(30) NOT NULL, \nlevel int, \nexp int, \nhp int, \nstrength int, \nintelligence int, \ndexterity int, \nwisdom int);\n\"\"\"\n\ntable_query = \"SELECT * FROM rpg_data\"\n\ncursor.execute(create_table)\ncursor.execute(table_query)\nconnection.commit()\nresult = cursor.fetchall()\nprint(\"RESULT:\", type(result))\n# print(result)\n\n# Connecting to SQLite3 DB for RPG Data\n\nsl_conn = sqlite3.connect('rpg_db.sqlite3')\nsl_cursor = sl_conn.cursor()\ncharacters = sl_conn.execute('SELECT * FROM charactercreator_character').fetchall()\nprint(characters)\n\n## Inserting SQLite data into PostgreSQL DB\n\nfor character in characters:\n insert_query_pg = f\"\"\"INSERT INTO rpg_data (character_id, name, level, exp, hp, strength, intelligence, dexterity, wisdom) VALUES \n {character}\"\"\"\n\n print(insert_query_pg)\n\n cursor.execute(insert_query_pg)\n\nconnection.commit()","sub_path":"module2-sql-for-analysis/rpg_queries.py","file_name":"rpg_queries.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"425170379","text":"from flask_restful import Resource,reqparse\nfrom Loadbalance.Loadbalance import Loadbalance\nfrom flask import jsonify\nclass Customers(Resource):\n def get(self):\n loadbalance = Loadbalance()\n query_string = \"select * from customer\"\n return jsonify(loadbalance.exec(query_string))\n\n def post(self):\n loadbalance = Loadbalance()\n parser = reqparse.RequestParser()\n parser.add_argument('city', type=str, help='city must exist',required=True)\n parser.add_argument('cus_id', type=int, help='customer id cannot be null value',required=True)\n parser.add_argument('dob', type=str, help='date of birth cannot be null value',required=True)\n parser.add_argument('email', type=str, help='email must exist',required=True)\n parser.add_argument('firstname', type=str,help='firstname must exist',required=True)\n parser.add_argument('gender', type=str, help='gender must exist',required=True)\n parser.add_argument('lastname', type=str, help='lastname must exist',required=True)\n parser.add_argument('nationality', type=str, help='nationality must exist',required=True)\n parser.add_argument('postalcode', type=str, help='postalcode must exist',required=True)\n parser.add_argument('salary', type=str, help='salary cannot be negative number',required=True)\n parser.add_argument('state', type=str, help='state must exist',required=True)\n parser.add_argument('street', type=str, help='street must exist',required=True)\n args = parser.parse_args()\n query_string = \"insert into customer(city,cus_id,dob,email,firstname,gender,lastname,nationality,postalcode,salary,state,street) values ({},{},{},{},{},{},{},{},{},{},{},{})\".format(args[\"city\"],args[\"cus_id\"],args[\"dob\"],args[\"email\"],args[\"firstname\"],args[\"gender\"],args[\"lastname\"],args[\"nationality\"],args[\"postalcode\"],args[\"salary\"],args[\"state\"],args[\"street\"])\n print(query_string)\n print(jsonify(loadbalance.execOne(query_string)))\n return jsonify({\"message\":\"customer has been created\"})\n\n def put(self):\n next()\n\n def delete(self):\n next()","sub_path":"Database-service/Resource/Customers.py","file_name":"Customers.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"305741101","text":"\"\"\"Unit test for Treadmill rrdutils module.\n\"\"\"\n\nimport unittest\n\nimport mock\n\nfrom treadmill import rrdutils\n\n\nclass RrdUtilsTest(unittest.TestCase):\n \"\"\"This contains the treadmill.rrdutils tests.\"\"\"\n\n @mock.patch('treadmill.subproc.check_output')\n @mock.patch('subprocess.check_output')\n def test_first(self, subprocess_mock, subproc_mock):\n \"\"\"Test the function that returns the first ts in the designated RRA.\n \"\"\"\n rrdutils.first('foo.rrd', 'no_such_timeframe')\n subproc_mock.assert_called_with(\n [rrdutils.RRDTOOL, 'first', 'foo.rrd', '--daemon',\n 'unix:%s' % rrdutils.SOCKET, '--rraindex',\n rrdutils.TIMEFRAME_TO_RRA_IDX['short']])\n\n rrdutils.first('foo.rrd', 'long', exec_on_node=False)\n subprocess_mock.assert_called_with(\n [rrdutils.RRDTOOL, 'first', 'foo.rrd', '--rraindex',\n rrdutils.TIMEFRAME_TO_RRA_IDX['long']])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/rrdutils_test.py","file_name":"rrdutils_test.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"407654920","text":"\n# coding: utf-8\n\n# # Resampling and frequency\n\n# ### Resampling and frequency\n# \n# ### Pandas provides methods for resampling time series data. When downsampling or upsampling, the syntax is similar, but the methods called are different. Both use the concept of 'method chaining' - df.method1().method2().method3() - to direct the output from one method call to the input of the next, and so on, as a sequence of operations, one feeding into the next.\n# \n# ### For example, if you have hourly data, and just need daily data, pandas will not guess how to throw out the 23 of 24 points. You must specify this in the method. One approach, for instance, could be to take the mean, as in df.resample('D').mean().\n# \n# ### In this exercise, a data set containing hourly temperature data has been pre-loaded for you. Your job is to resample the data using a variety of aggregation methods to answer a few questions\n\n# In[1]:\n\nimport pandas as pd\n\n\n# In[2]:\n\nimport numpy as np\n\n\n# In[3]:\n\ndates = pd.date_range(start='2010-01-01', end='2010-12-31', freq='H')\n\n\n# In[4]:\n\ndates\n\n\n# In[6]:\n\nlen(dates)\n\n\n# In[15]:\n\ntemp = np.random.randint(40, 50, 8737) * 1.03\n\n\n# In[17]:\n\ndew_point = np.random.randint(30, 40, 8737) * 1.04\n\n\n# In[21]:\n\nx=pd.Series([1.0])\n\n\n# In[22]:\n\nx\n\n\n# In[25]:\n\npressure = x.repeat(8737)#Pandas repeat function\n\n\n# In[27]:\n\ndf = pd.DataFrame({'Date':dates, 'temperature':temp, 'DewPoint':dew_point, 'Pressure': pressure})\n\n\n# In[28]:\n\ndf.head()\n\n\n# In[29]:\n\ndf.set_index('Date', inplace=True)\n\n\n# In[30]:\n\ndf.head()\n\n\n# In[37]:\n\n# Downsample to 6 hour data and aggregate by mean: df1\ndf1 = df['temperature'].resample('6h').mean()\n\n\n# In[38]:\n\ndf1.head()\n\n\n# In[35]:\n\n# Downsample to daily data and count the number of data points: df2\ndf2 = df['temperature'].resample('D').count()\n\n\n# In[36]:\n\ndf2.head()\n\n\n# In[ ]:\n\n\n\n","sub_path":"Resampling_and_Frequency.py","file_name":"Resampling_and_Frequency.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"612514780","text":"from ortools.sat.python import cp_model\nimport numpy as np\nimport pandas as pd\nfrom bokeh.io import show, output_notebook\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.plotting import figure\nfrom ortools.constraint_solver import routing_enums_pb2\nfrom ortools.constraint_solver import pywrapcp\nimport json\n# plotting imports\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style(\"darkgrid\")\n# for reading files from urls\nimport urllib.request\n# display imports\nfrom IPython.display import display, IFrame\nfrom IPython.core.display import HTML\n\n\n# generalizable schedule code\n\ntask_duration_dict = {\n 'A': 2,\n 'B': 4,\n 'C': 10,\n 'D': 6,\n 'E': 4,\n 'F': 5,\n 'G': 7,\n 'H': 9,\n 'I': 7,\n 'J': 8,\n 'K': 4,\n 'L': 5,\n 'M': 2,\n 'N': 6\n}\ntask_names = list(task_duration_dict.keys())\nnum_tasks = len(task_names)\ndurations = list(task_duration_dict.values())\n\n# for each task we have a list of tasks that must go after\n# task:['these','tasks','after']\nprecedence_dict = {\n 'B': ['A'],\n 'C': ['B'],\n 'D': ['C'],\n 'E': ['C'],\n 'F': ['E'],\n 'G': ['D'],\n 'H': ['E','G'],\n 'I': ['C'],\n 'J': ['F', 'I'],\n 'K': ['J'],\n 'L': ['J'],\n 'M': ['H'],\n 'N': ['K', 'L']\n}\n\ntask_name_to_number_dict = dict(zip(task_names, np.arange(0, num_tasks)))\n\nhorizon = sum(task_duration_dict.values())\n\nmodel = cp_model.CpModel()\n\nstart_vars = [\n model.NewIntVar(0, horizon, name=f'start_{t}') for t in task_names\n]\nend_vars = [model.NewIntVar(0, horizon, name=f'end_{t}') for t in task_names]\n\n# the `NewIntervalVar` are both variables and constraints, the internally enforce that start + duration = end\nintervals = [\n model.NewIntervalVar(start_vars[i],\n durations[i],\n end_vars[i],\n name=f'interval_{task_names[i]}')\n for i in range(num_tasks)\n]\n\n# precedence constraints\nfor after in list(precedence_dict.keys()):\n for before in precedence_dict[after]:\n before_index = task_name_to_number_dict[before]\n after_index = task_name_to_number_dict[after]\n model.Add(end_vars[before_index] <= start_vars[after_index])\n\nobj_var = model.NewIntVar(0, horizon, 'largest_end_time')\nmodel.AddMaxEquality(obj_var, end_vars)\nmodel.Minimize(obj_var)\n\nsolver = cp_model.CpSolver()\nstatus = solver.Solve(model)\n\nprint(f'Optimal Schedule Length: {solver.ObjectiveValue()}')\nfor i in range(num_tasks):\n print(\n f'{task_names[i]} start at {solver.Value(start_vars[i])} and end at {solver.Value(end_vars[i])}'\n )\n\n# output_notebook()\n\nstarts = [solver.Value(start_vars[i]) for i in range(num_tasks)]\nends = [solver.Value(end_vars[i]) for i in range(num_tasks)]\n\nsource = ColumnDataSource(data=dict(tasks=task_names, starts = starts, ends=ends))\n\np = figure(x_range=(0,solver.ObjectiveValue()), y_range=task_names, plot_height=350, title=\"Task Time Spans\",\n toolbar_location=None, tools=\"\")\n\np.hbar(y='tasks', left='starts', right='ends', height=0.9, source=source)\n\np.xaxis.axis_label = \"Time\"\np.ygrid.grid_line_color = None\n\nshow(p)","sub_path":"Homework/Lesson 07 Homework - CP/HW7.4.py","file_name":"HW7.4.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"408162770","text":"import re\nfrom django.core.exceptions import ValidationError\nfrom django.utils.crypto import get_random_string\nfrom django.utils.translation import gettext_lazy as _\n\n\ndef validate_url(value):\n \"\"\"\n Валидация ссылок на видео с видеохостинга YouTube\n \"\"\"\n video_url = value\n regex = re.compile(\n r\"(https?://)?(www\\.)?(youtube|youtu|youtube-nocookie)\\.(com|be)/(watch\\?v=|embed/|v/|.+\\?v=)?(?P[A-Za-z0-9\\-=_]{11})\"\n )\n match = regex.match(video_url)\n if not match:\n raise ValidationError(\n _(\"%(value)s не является корректной YouTube ссылкой\"),\n params={\"value\": value},\n )\n\n\ndef generate_secret_key():\n chars = \"abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)\"\n return get_random_string(50, chars)\n","sub_path":"extend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"176567631","text":"# -*- coding: UTF-8 -*-\nimport os\nimport selenium\nimport collections\nimport pandas as pd\nfrom selenium import webdriver\n\ncoffeeData = pd.DataFrame(pd.read_csv(\"../coffee/Coffee-clean.csv\", encoding=\"utf-8\"))\ncountries = set(coffeeData[\"countryOfOrigin\"])\n\n\"\"\"website for ISO code\"\"\"\nwebsiteCrawling = \"https://unstats.un.org/unsd/methodology/m49/\"\n\n\"\"\"setting the chrome driver for data crawling\"\"\"\nchromeDriver = webdriver.Chrome(executable_path=\"chromedriver.exe\")\nchromeDriver.get(websiteCrawling)\ncountryCodeTable = chromeDriver.find_element_by_xpath(\"//*[@id='ENG_COUNTRIES']/table/tbody\")\ncountryRow = countryCodeTable.find_elements_by_tag_name(\"tr\")\ncountryCodeDict = collections.defaultdict(lambda: \"\")\nfor i in range(1, len(countryRow)):\n\tcountry = countryRow[i].find_element_by_xpath(\"./td[1]\").text.strip()\n\tISOcode = countryRow[i].find_element_by_xpath(\"./td[3]\").text.strip()\n\tcountryCodeDict[country] = ISOcode\nchromeDriver.quit()\n\n\"\"\" write dict to csv file \"\"\"\ncountryCodeDict[\"Taiwan\"] = \"TWN\" # special add Taiwan\n\ncsvWriteDict = {\"Country\":list(), \"ISO3\":list()}\nfor country in countries:\n\tcsvWriteDict[\"Country\"].append(country)\n\tcsvWriteDict[\"ISO3\"].append(countryCodeDict[country])\ncountryCodeDF = pd.DataFrame(data=csvWriteDict)\ncountryCodeDF.to_csv(\"CountryCode.csv\", index=False)\n\n\"\"\" extract coordinates and iso2 code \"\"\"\ncountryInCoffee = pd.DataFrame(pd.read_csv(\"CountryCode.csv\", encoding=\"utf-8\"))\ncountryInfoAll = pd.DataFrame(pd.read_csv(\"CountriesCoordinates.csv\", encoding=\"utf-8\"))\n\ncountryCodeMap = dict()\nfor row in range(countryInCoffee.shape[0]):\n\tcountryCodeMap[countryInCoffee[\"ISO3\"][row]] = countryInCoffee[\"Country\"][row]\n\nremoveIndex = countryInfoAll[\"Alpha-3 code\"].index[countryInfoAll[\"Alpha-3 code\"].apply(lambda x: True if x.strip() not in countryCodeMap.keys() else False)]\nfor index in removeIndex:\n countryInfoAll.drop(axis=0, index=index, inplace=True)\n\ncountryCodeCoordinateWriteDict = {\"Country\": list(), \"ISO3\":list(), \"ISO2\": list(), \"lat\": list(), \"lng\": list()}\nfor index, row in countryInfoAll.iterrows():\n\tcountryISO3 = countryInfoAll[\"Alpha-3 code\"][index].strip()\n\tcountryCodeCoordinateWriteDict[\"Country\"].append(countryCodeMap[countryISO3])\n\tcountryCodeCoordinateWriteDict[\"ISO3\"].append(countryISO3)\n\tcountryCodeCoordinateWriteDict[\"ISO2\"].append(countryInfoAll[\"Alpha-2 code\"][index].strip())\n\tcountryCodeCoordinateWriteDict[\"lat\"].append(str(float(countryInfoAll[\"Latitude (average)\"][index])))\n\tcountryCodeCoordinateWriteDict[\"lng\"].append(str(float(countryInfoAll[\"Longitude (average)\"][index])))\npd.DataFrame(data=countryCodeCoordinateWriteDict).to_csv(\"CoffeeCountryInfo.csv\", index=False)\n\nos.remove(\"CountryCode.csv\")","sub_path":"data/country/CountryCodeCoordinateExtract.py","file_name":"CountryCodeCoordinateExtract.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"624439270","text":"import random\n\nword_list = [\"adana\", \"adıyaman\", \"afyon\", \"ağrı\", \"amasya\", \"ankara\",\n \"antalya\", \"artvin\", \"aydın\", \"balıkesir\", \"bilecik\",\n \"bingöl\", \"bitlis\", \"bolu\", \"burdur\", \"bursa\", \"çanakkale\",\n \"çankırı\", \"çorum\", \"denizli\", \"diyarbakır\", \"edirne\",\n \"elazığ\", \"erzincan\", \"erzurum\", \"eskişehir\", \"gaziantep\",\n \"giresun\", \"gümüşhane\", \"hakkari\", \"hatay\", \"Isparta\",\n \"mersin\", \"istanbul\", \"izmir\", \"kars\", \"kastamonu\",\n \"kayseri\", \"kırklareli\", \"kırşehir\", \"kocaeli\", \"konya\",\n \"kütahya\", \"malatya\", \"manisa\", \"kahramanmaraş\", \"mardin\",\n \"muğla\", \"muş\", \"nevşehir\", \"niğde\", \"ordu\", \"rize\", \"sakarya\",\n \"samsun\", \"siirt\", \"sinop\", \"sivas\", \"tekirdağ\", \"tokat\",\n \"trabzon\", \"tunceli\", \"şanlıurfa\", \"uşak\", \"van\", \"yozgat\",\n \"zonguldak\", \"aksaray\", \"bayburt\", \"karaman\", \"kırıkkale\",\n \"batman\", \"şırnak\", \"bartın\", \"ardahan\", \"ığdır\", \"yalova\",\n \"karabük\", \"kilis\", \"osmaniye\", \"düzce\"\n ]\nchosen_word = random.choice(word_list)\nguess = input(\"Guess a letter: \").lower()\n\nfor letter in chosen_word:\n if letter == guess:\n print(\"Right\")\n else:\n print(\"Wrong!\")","sub_path":"Day-7/Hangman-1-Start.py","file_name":"Hangman-1-Start.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"120515271","text":"#-*- coding: utf-8 -*-\n# Creation Date : 2017-02-05\n# Created by : Antoine LeBel\nfrom . import transportable\n\nclass Phaser(transportable.Transportable):\n TYPE = \"Phaser\"\n CONSTRUCT = [int, int]\n\n def __init__(self, nom, args):\n transportable.Transportable.__init__(self, nom)\n self.construire(args)\n\n def construire(self, args):\n if self.valide(args, self.CONSTRUCT):\n self._volume = args[0]\n self._masse = args[1]\n else:\n self.erreur_non_construction();\n","sub_path":"farstar/phaser.py","file_name":"phaser.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"526714746","text":"from django.conf.urls import url, include\nfrom django.views.generic import TemplateView\nfrom viewflow import views as viewflow\nfrom ticket.flows import VacateFlow\nfrom material.frontend import urls as frontend_urls\n\n\nurlpatterns = [\n url(r'^vacate/',\n include([\n VacateFlow.instance.urls,\n # url('^$', ProcessListView.as_view(), name='index'),\n # url('^tasks/$', viewflow.TaskListView.as_view(), name='tasks'),\n # url('^queue/$', viewflow.QueueListView.as_view(), name='queue'),\n url('^details/(?P\\d+)/$', viewflow.ProcessDetailView.as_view(), name='details')\n ], namespace=VacateFlow.instance.namespace),\n {'flow_cls': VacateFlow} ),\n\n url(r'', include(frontend_urls)),\n]\n\nurlpatterns += [\n url('^$', TemplateView.as_view(template_name=\"ticket/index.html\"), name=\"index\"),\n url('^customers/$', TemplateView.as_view(template_name=\"ticket/index.html\"),\n name=\"leads\"),\n url('^leads/$', TemplateView.as_view(template_name=\"ticket/index.html\"),\n name=\"leads\"),\n url('^opportunities/$', TemplateView.as_view(template_name=\"ticket/index.html\"),\n name=\"opportunities\"),\n]","sub_path":"ticket/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"315333196","text":"# time: O(n) | space: O(min(n, m)) where m is the length of all the unqiue characters in the input string\nclass Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if len(s) == 0:\n return 0\n lastSeen = {}\n longest = [0, 1]\n startIdx = 0\n for i, char in enumerate(s):\n if char in lastSeen:\n startIdx = max(startIdx, lastSeen[char] + 1)\n if longest[1] - longest[0] < i + 1 - startIdx:\n longest = [startIdx, i + 1]\n lastSeen[char] = i\n\n return longest[1] - longest[0]\n","sub_path":"2.25/longest_unique_substring.py","file_name":"longest_unique_substring.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"353752038","text":"try:\n import cPickle as pickle\nexcept ImportError:\n import pickle\nimport json\nimport multiprocessing\nimport os\nimport socket\nimport sys\nimport tempfile\nimport time\nimport traceback\nfrom collections import OrderedDict\nfrom shutil import rmtree\n\nimport dill\n\nfrom django.conf import settings\nfrom django.core.handlers.wsgi import WSGIRequest\n\n\nPY3 = sys.version_info[0] == 3\n\n# PyPy 2 doesn't allow importing of the reduction module because of a platform\n# mismatch issue. We prefer to use it so try and import it in any case. There\n# is a file-based fallback code path.\nPIPES_POSSIBLE = False\ntry:\n from multiprocessing import reduction\n PIPES_POSSIBLE = True\nexcept ImportError:\n pass\n\n# Python 3 and PyPy 3 allows checking for SCM_RIGHTS. Without these rights\n# pipes aren't possible.\nif PY3 and not hasattr(socket, \"SCM_RIGHTS\"):\n PIPES_POSSIBLE = False\n\n# Python 3.5 deprecates the reduce_connection function. Until I figure out how\n# to do it the new way provide these functions.\nif PIPES_POSSIBLE and not hasattr(reduction, \"reduce_connection\"):\n\n def reduce_connection(conn):\n df = reduction.DupFd(conn.fileno())\n return rebuild_connection, (df, conn.readable, conn.writable)\n\n def rebuild_connection(df, readable, writable):\n from multiprocessing.connection import Connection\n fd = df.detach()\n return Connection(fd, readable, writable)\n\n reduction.reduce_connection = reduce_connection\n reduction.rebuild_connection = rebuild_connection\n\n\ndefault_app_config = \"multicore.app.MulticoreAppConfig\"\nNUMBER_OF_WORKERS = multiprocessing.cpu_count()\n_workers = []\n_queue = None\n\n\nclass Process(multiprocessing.Process):\n \"\"\"Wrap Process so exception handling propagates to parent process\"\"\"\n\n def __init__(self, *args, **kwargs):\n multiprocessing.Process.__init__(self, *args, **kwargs)\n self._pconn, self._cconn = multiprocessing.Pipe()\n self._exception = None\n\n def run(self):\n try:\n multiprocessing.Process.run(self)\n self._cconn.send(None)\n except Exception as e:\n tb = traceback.format_exc()\n self._cconn.send((e, tb))\n raise\n\n @property\n def exception(self):\n if self._pconn.poll():\n self._exception = self._pconn.recv()\n return self._exception\n\n\nclass Traceback(object):\n\n def __init__(self, exc, msg):\n self.exc = exc\n self.msg = msg\n\n def __call__(self):\n raise self.exc.__class__(self.msg)\n\n\nclass TimeoutExceededError(Exception):\n pass\n\n\nclass Task(object):\n\n def __new__(cls, *args, **kwargs):\n # If the load average for the last minute is larger than a defined\n # threshold then don't return a task. Note that the threshold is\n # specified as for a single core machine, so we multiply it with the\n # number of workers. \"None\" is the default and always allows a task\n # to be returned.\n try:\n v = settings.MULTICORE[\"max-load-average\"]\n except (AttributeError, KeyError):\n v = None\n if (v is not None) and (os.getloadavg()[0] > v * NUMBER_OF_WORKERS):\n return None\n return super(Task, cls).__new__(cls, *args, **kwargs)\n\n def __init__(self, **kwargs):\n self.count = 0\n self.use_pipes = use_pipes()\n if self.use_pipes:\n self.receivers = OrderedDict()\n else:\n self.path = tempfile.mkdtemp()\n\n def run(self, runnable, *args, **kwargs):\n serialization_format = kwargs.pop(\"serialization_format\", \"pickle\")\n if serialization_format not in (\"pickle\", \"json\", \"string\"):\n raise RuntimeError(\n \"Unrecognized serialization_format %s\" % serialization_format\n )\n\n if self.use_pipes:\n # http://stackoverflow.com/questions/1446004/python-2-6-send-connection-object-over-queue-pipe-etc\n # expains why reduction is required.\n receiver, pipe = multiprocessing.Pipe(False)\n self.receivers[self.count] = receiver\n arg = pickle.dumps(reduction.reduce_connection(pipe))\n else:\n arg = self.path\n\n _queue.put((\n self.count, arg, dill.dumps(runnable), serialization_format,\n dill.dumps(args), dill.dumps(kwargs)\n ))\n self.count += 1\n\n def get(self, timeout=10.0):\n datas = [None] * self.count\n\n if self.use_pipes:\n for i, receiver in self.receivers.items():\n data = receiver.recv()\n datas[i] = data\n\n else:\n # Monitor directory to see if files are complete. Exhaustive checks\n # are luckily quite fast.\n start = time.time()\n while True:\n filenames = os.listdir(self.path)\n if (len(filenames) == self.count):\n filenames.sort(key=lambda f: int(f))\n filenames = [os.path.join(self.path, f) for f in filenames]\n if all([os.path.getsize(f) for f in filenames]):\n for n, filename in enumerate(filenames):\n fp = open(filename, \"r\")\n try:\n datas[n] = fp.read()\n finally:\n fp.close()\n break\n if time.time() - start > timeout:\n raise TimeoutExceededError()\n time.sleep(0.01)\n rmtree(self.path)\n\n # Convert list and possibly raise exception\n results = []\n for data in datas:\n serialization_format = data[:6].strip()\n if serialization_format == \"pickle\":\n if PY3:\n result = pickle.loads(bytes(data[6:], \"ascii\"))\n else:\n result = pickle.loads(data[6:])\n elif serialization_format == \"json\":\n result = json.loads(data[6:])\n else:\n result = data[6:]\n results.append(result)\n\n if isinstance(result, Traceback):\n result()\n\n return results\n\n\ndef fetch_and_run():\n global _queue\n\n while True:\n\n # Fetch task and run it\n index, pipe_or_path, runnable, serialization_format, args, \\\n kwargs = _queue.get()\n\n if use_pipes():\n f, a = pickle.loads(pipe_or_path)\n pipe = f(*a)\n else:\n path = pipe_or_path\n filename = os.path.join(path, str(index))\n\n runnable = dill.loads(runnable)\n args = dill.loads(args)\n try:\n result = runnable(*args)\n\n if serialization_format == \"pickle\":\n if PY3:\n serialized = pickle.dumps(result, 0).decode()\n else:\n serialized = pickle.dumps(result)\n elif serialization_format == \"json\":\n # We need it to be 6 chars\n serialization_format = \"json \"\n serialized = json.dumps(result, indent=4)\n elif serialization_format == \"string\":\n serialized = result\n if use_pipes():\n pipe.send(serialization_format + serialized)\n pipe.close()\n else:\n fp = open(filename, \"w\")\n try:\n fp.write(serialization_format + serialized)\n finally:\n fp.close()\n\n except Exception as exc:\n msg = traceback.format_exc()\n if PY3:\n pickled = pickle.dumps(Traceback(exc, msg), 0).decode()\n else:\n pickled = pickle.dumps(Traceback(exc, msg))\n if use_pipes():\n pipe.send(\n serialization_format\n + pickled\n )\n pipe.close()\n else:\n fp = open(filename, \"w\")\n try:\n fp.write(\n \"pickle\" \\\n + pickled\n )\n finally:\n fp.close()\n\n\ndef use_pipes():\n try:\n return getattr(settings, \"MULTICORE\", {}).get(\"pipes\", True) \\\n and PIPES_POSSIBLE\n except (AttributeError, KeyError):\n return False\n\n\ndef initialize():\n \"\"\"Start the queue workers if needed. Called by app.ready and possibly unit\n tests.\"\"\"\n\n global NUMBER_OF_WORKERS\n global _queue\n global _workers\n\n # If we already have a queue do nothing\n if _queue is not None:\n return\n\n _queue = multiprocessing.Manager().Queue()\n\n for i in range(0, NUMBER_OF_WORKERS):\n p = Process(target=fetch_and_run)\n _workers.append(p)\n p.start()\n\n\ndef shutdown():\n \"\"\"Stop the queue workers. Called by unit tests.\"\"\"\n\n global _queue\n global _workers\n\n # Immediately set running to false so workers may exit\n for p in _workers:\n # We can't join because we have no way of notifying the worker to stop\n # looping in a clean way. todo: send a message on the queue?\n p.terminate()\n del p\n\n del _queue\n _queue = None\n _workers = []\n","sub_path":"multicore/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"188628211","text":"\"\"\"\nContains convenient types to work with when attempting to work with\ndns protocol.\n\"\"\"\nimport re\nfrom .exceptions import *\nfrom socket import inet_aton\nimport re\n\nclass DnsQuery(object):\n\t\"\"\"\n\tRepresents a parsed DNS query\n\t\"\"\"\n\n\tdef __init__(self, query):\n\t\t# Parse the query\n\t\tparsed_query = _parse_query(query)\n\n\t\t# assign the header fields\n\t\tself.transaction_id = parsed_query[0]\n\t\tself.flags = parsed_query[1]\n\t\tself.questions = parsed_query[2]\n\t\tself.answer_rrs = parsed_query[3]\n\t\tself.authority_rrs = parsed_query[4]\n\t\tself.additional_rrs = parsed_query[5]\n\t\tself.name = parsed_query[6]\n\t\tself.dns_type = parsed_query[7]\n\t\tself.dns_class = parsed_query[8]\n\n\tdef get_name(self):\n\t\t\"\"\"\n\t\tReturns a textual representation of the queried domain name\n\t\t\"\"\"\n\t\treturn self.name[1:-1].decode().replace(\n\t\t\t'\\x03', '.').replace('\\x02', '.').replace('\\x06', '.').\\\n\t\t\treplace('\\x08', '.')\n\n\tdef __bytes__(self):\n\t\treturn self.transaction_id + self.flags + self.questions + \\\n\t\t\t self.answer_rrs + self.authority_rrs + self.additional_rrs + \\\n\t\t\t self.name + self.dns_type + self.dns_class\n\n\nclass DnsResponse(object):\n\t\"\"\"\n\tRepresents a DNS response\n\t\"\"\"\n\tdef __init__(self, response):\n\t\tparsed_response = _parse_response(response)\n\n\t\t# assign header fields -> (transaction_id, flags, questions, answer_rrs,\n\t\t#\t\t\t\t\t\t authority_rrs, additional_rrs, name,\n\t\t#\t\t\t\t\t\t dns_type, dns_class, Answers)\n\t\tself.transaction_id = parsed_response[0]\n\t\tself.flags = parsed_response[1]\n\t\tself.questions = parsed_response[2]\n\t\tself.answer_rrs = parsed_response[3]\n\t\tself.authority_rrs = parsed_response[4]\n\t\tself.additional_rrs = parsed_response[5]\n\t\tself.name = parsed_response[6]\n\t\tself.dns_type = parsed_response[7]\n\t\tself.dns_class = parsed_response[8]\n\t\tself.answers = parsed_response[9]\n\n\tdef __bytes__(self):\n\t\treturn self.transaction_id + self.flags + self.questions + \\\n\t\t\t self.answer_rrs + self.authority_rrs + self.additional_rrs + \\\n\t\t\t self.name + self.dns_type + self.dns_class + \\\n\t\t\t b''.join(bytes(answer) for answer in self.answers)\n\n\nclass Answer(object):\n\tdef __init__(self, answer):\n\t\tparsed_answer = _parse_answer(answer)\n\n\t\t# assign header fields -> (Name, Type, Class, TTL, Data length, address)\n\t\tself.name = parsed_answer[0]\n\t\tself.answer_type = parsed_answer[1]\n\t\tself.answer_class = parsed_answer[2]\n\t\tself.ttl = parsed_answer[3]\n\t\tself.data_length = parsed_answer[4]\n\t\tself.address = parsed_answer[5]\n\n\tdef get_address(self):\n\t\t\"\"\"\n\t\t:return: Textual representation of the IP address\n\t\t\"\"\"\n\t\treturn '.'.join(str(byte) for byte in self.address)\n\n\tdef change_ip(self, ip_addr):\n\t\t\"\"\"\n\t\tChanges the answer IP\n\t\t:param ip_addr: ip address\n\t\t:type ip_addr: str\n\t\t:return: None\n\t\t\"\"\"\n\t\t# Validate IP address\n\t\tif not re.match(r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', ip_addr):\n\t\t\traise InvalidIPError\n\t\tif any(int(byte) > 255 for byte in ip_addr.split('.')):\n\t\t\traise InvalidIPError\n\n\t\tself.address = inet_aton(ip_addr)\n\n\tdef __bytes__(self):\n\t\treturn self.name + self.answer_type + self.answer_class + self.ttl + \\\n\t\t\t self.data_length + self.address\n\n\tdef __repr__(self):\n\t\treturn self.get_address()\n\n\ndef _parse_query(query):\n\t\"\"\"\n\tParses a DNS query\n\n\t:param query: DNS query to parse\n\t:return: (transaction_id, flags, questions, answer_rrs, authority_rrs,\n\t \t\t additional_rrs, name, dns_type, dns_class)\n\t\"\"\"\n\t# Validate query\n\tname_ending = b'\\x00' \t# Domain name ending\n\tif len(query) <= 12: raise InvalidQueryError\n\tif name_ending not in query[12:]: raise InvalidQueryError\n\n\ttransaction_id = query[0:2]\n\tflags = query[2:4]\n\tquestions = query[4:6]\n\tanswer_rrs = query[6:8]\n\tauthority_rrs = query[8:10]\n\tadditional_rrs = query[10:12]\n\tname = query[12:query.find(name_ending, 12) + len(name_ending)]\n\tdns_type = query[-4:-2]\n\tdns_class = query[-2:]\n\n\treturn (\n\t\ttransaction_id,\n\t\tflags,\n\t\tquestions,\n\t\tanswer_rrs,\n\t\tauthority_rrs,\n\t\tadditional_rrs,\n\t\tname,\n\t\tdns_type,\n\t\tdns_class\n\t)\n\n\ndef _parse_answer(answer):\n\t\"\"\"\n\tParses a specific answer from the dns response\n\t:param answer: answer to parse\n\t:return: (Name, Type, Class, TTL, Data length, address)\n\t\"\"\"\n\tname = answer[0:2]\n\tanswer_type = answer[2:4]\n\tanswer_class = answer[4:6]\n\tttl = answer[6:10]\n\tdata_length = answer[10:12]\n\taddress = answer[12:]\n\n\treturn (\n\t\tname,\n\t\tanswer_type,\n\t\tanswer_class,\n\t\tttl,\n\t\tdata_length,\n\t\taddress\n\t)\n\n\ndef _parse_response(response):\n\t\"\"\"\n\tParses a DNS response\n\n\t:return:(transaction_id, flags, questions, answer_rrs,\n\t\t authority_rrs, additional_rrs, name,\n\t\t dns_type, dns_class, Answers)\n\t\"\"\"\n\tanswers = []\n\tnew_answer_start = b'\\xc0\\x0c'\n\twhile new_answer_start in response:\n\t\tindex = response.rfind(new_answer_start)\n\t\tanswers.append(Answer(response[index:]))\n\t\tresponse = response[:index]\n\tanswers.reverse()\n\treturn _parse_query(response) + (answers,)\n","sub_path":"dnsproxy/dnstypes.py","file_name":"dnstypes.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"129468824","text":"# coding: utf8\n\ndef index(): # get text from user's file\n form = SQLFORM.factory(Field('word')).process()\n if form.accepted:\n redirect(URL('context', vars = form.vars))\n return dict(form=form)\n\ndef context1():\n text1 = request.vars.word\n texts = trymysql(trymysql.allword.word==request.vars.word).select()\n strings=[]\n for all in texts:\n location = all.text_location\n title1 = trymysql(trymysql.text1.id==int(all.title)).select()[0]\n title=title1.title\n author= (title1.author.name, title1.author.family)\n string=\"...\"\n for x in range(all.id-6, all.id+6):\n try:\n for_string = trymysql(trymysql.allword.id==x).select()[0]\n if for_string.lemma==\",\" or for_string.lemma=='.' or for_string.lemma=='!':\n string = string + str(for_string.lemma)\n else:\n string= string + \" \" + str(for_string.lemma)\n except:\n pass\n strings.append((string +\"...\", title, author, int(title1.id)))\n return dict(strings=strings)\n\ndef context():\n text1 = request.vars.word\n texts = trymysql(trymysql.mystem.word==request.vars.word).select()\n if len(texts)==0:\n word = text1+'?'\n texts = trymysql(trymysql.mystem.word==word).select()\n strings=[]\n for all in texts:\n location = all.location\n title1 = trymysql(trymysql.text1.id==int(all.title)).select()[0]\n title=title1.title\n author= (title1.author.name, title1.author.family)\n with open(title1.filename, 'r') as f:\n content = f.readlines()\n string = content[int(all.location)-1]\n all_string_words = [[w.lemma, w.id] for w in trymysql((trymysql.mystem.title==all.title)&(trymysql.mystem.location==all.location)).select()]\n color_string = []\n for lemma in all_string_words:\n if lemma[1] == all.id:\n color_word = '&' + lemma[0]\n color_string.append(color_word)\n else:\n color_string.append(lemma[0])\n strings.append((' '.join(color_string) +\"...\", title, author, int(title1.id)))\n return dict(strings=strings)\n","sub_path":"controllers/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"402531240","text":"#Author: Craig Lage, NYU; \n#Date: 20-Mar-13\n\n\n#This program centers the densest grid on the cluster centers\n\nimport sys\nfrom subprocess import *\nfrom pylab import *\n\ndef CalcGridEdges(OuterGridLeftEdge, OuterGridRightEdge, OuterGridN, InnerGridN, InnerGridCenter):\n\tOuterGridD=[]\n\tInnerGridLeftEdge=[]\n\tInnerGridRightEdge=[]\n\tInnerGridD=[]\n\tInnerGridSize=[]\n\tfor i in range(3):\n\t\tStep = (OuterGridRightEdge[i] - OuterGridLeftEdge[i]) / OuterGridN[i]\n\t\tOuterGridD.append(Step)\n\t\tInnerGridD.append(Step / 2.0)\n\t\tInnerGridSize.append(Step/2.0 * InnerGridN[i])\n\tfor i in range(3):\n\t\tInnerGridLeftEdge.append(OuterGridLeftEdge[i] + OuterGridD[i] * round(((InnerGridCenter[i]-InnerGridSize[i]/2.0) - OuterGridLeftEdge[i]) / OuterGridD[i]))\n\t\tInnerGridRightEdge.append(InnerGridLeftEdge[i] + InnerGridSize[i])\n\n\treturn [InnerGridLeftEdge, InnerGridRightEdge]\n\ndef ReadClusterCenters(filename):\n\tfile = open(filename,'r')\n\tlines = file.readlines()\n\tfile.close()\n\tline1 = lines[0].split()\n\tline2 = lines[1].split()\n\tBulletCenter=[float(line1[5]),float(line1[7]),float(line1[9])]\n\tMainCenter=[float(line2[5]),float(line2[7]),float(line2[9])]\n\treturn [MainCenter,BulletCenter]\n\ndef ModAMRTest(filename,BulletLeftEdge,BulletRightEdge,MainLeftEdge,MainRightEdge):\n\tfile = open(filename,'r')\n\tlines = file.readlines()\n\tfile.close()\n\n\tlines[42] = \"CosmologySimulationGridLeftEdge[3] = %.4f %.4f %.4f\\n\"%(BulletLeftEdge[0],BulletLeftEdge[1],BulletLeftEdge[2])\n\tlines[43] = \"CosmologySimulationGridRightEdge[3] = %.4f %.4f %.4f\\n\"%(BulletRightEdge[0],BulletRightEdge[1],BulletRightEdge[2])\n\tlines[46] = \"CosmologySimulationGridLeftEdge[4] = %.4f %.4f %.4f\\n\"%(MainLeftEdge[0],MainLeftEdge[1],MainLeftEdge[2])\n\tlines[47] = \"CosmologySimulationGridRightEdge[4] = %.4f %.4f %.4f\\n\"%(MainRightEdge[0],MainRightEdge[1],MainRightEdge[2])\n\n\tfile = open(filename,'w')\n\tfor line in lines:\n\t\tfile.write(line)\n\tfile.close()\n\t\t\n\treturn\n\n\n#*************************************MAIN PROGRAM*******************************************\ncollisionfilename = 'collision.out'\nAMRfilename = 'AMRTest.enzo'\nOuterGridLeftEdge = [-2906.25,-750.0,-750.0]\nOuterGridRightEdge = [843.75,750.0,750.0]\nOuterGridN = [160,64,64]\nInnerGridN = [32,32,32]\n\n[MainCenter,BulletCenter] = ReadClusterCenters(collisionfilename)\n[BulletLeftEdge,BulletRightEdge] = CalcGridEdges(OuterGridLeftEdge, OuterGridRightEdge, OuterGridN, InnerGridN, BulletCenter)\n[MainLeftEdge,MainRightEdge] = CalcGridEdges(OuterGridLeftEdge, OuterGridRightEdge, OuterGridN, InnerGridN, MainCenter)\n\n#print BulletLeftEdge, BulletRightEdge\n#print MainLeftEdge, MainRightEdge\n\nModAMRTest(AMRfilename,BulletLeftEdge,BulletRightEdge,MainLeftEdge,MainRightEdge)\n\n#*************************************END MAIN PROGRAM*******************************************\n\n\t\t \n","sub_path":"code/pysubs/mod_grid_locations_big.py","file_name":"mod_grid_locations_big.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"15197962","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport cPickle\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom PIL import Image\n\nCWD = os.getcwd()\n\n\ndef _int64_feature(value):\n\treturn tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n\treturn tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef convert_to(images, labels, name):\n\tnum_examples = images.shape[0]\n\trows = images.shape[1]\n\tcols = images.shape[2]\n\tdepth = images.shape[3]\n\tif num_examples!=10000:\n\t\traise ValueError(\"Examples don't match\")\n\n\tfilename = os.path.join(CWD, name + '.tfrecords')\n\tprint('Writing', filename)\n\tprint('Parameters', rows,cols,depth)\n\twriter = tf.python_io.TFRecordWriter(filename)\n\tfor i in range(num_examples):\n\t\timage_raw = images[i].tostring()\n\t\texample = tf.train.Example(features=tf.train.Features(feature={\n\t\t\t'height': _int64_feature(rows),\n\t\t\t'width': _int64_feature(cols),\n\t\t\t'depth': _int64_feature(depth),\n\t\t\t'label': _int64_feature(int(labels[i])),\n\t\t\t'image_raw': _bytes_feature(image_raw)}))\n\t\twriter.write(example.SerializeToString())\n\ndef unpickle():\n\t#Output images should be identical.\n\timport random\n\tdef test(img_array1, img_array2, labels):\n\t\trand = random.randint(0,50000)\n\t\tim1 = Image.fromarray(img_array1[rand].reshape([3,1024]).T.reshape([32,32,3]))\n\t\tim2 = Image.fromarray(img_array2[rand])\n\t\tim1.save(\"im1.jpeg\")\n\t\tim2.save(\"im2.jpeg\")\n\t\tf = open('out.txt','w')\n\t\tf.write(str(labels[rand]))\n\n\t# NUM_EXAMPLES = 50000\n\t# CHANNELS = 3\n\t# IMAGE_SIZE = 32\n\t# images = [0] * NUM_EXAMPLES\n\t# labels = [0] * NUM_EXAMPLES\n\t# for i in range(1,6):\n\t# \tf = \"cifar-10-batches-py/data_batch_\" + str(i)\n\t# \tfo = open(f, 'rb')\n\t# \tdic = cPickle.load(fo)\n\t# \tfo.close()\n\t# \timages[i*10000-10000:i*10000] = dic['data']\n\t# \tlabels[i*10000-10000:i*10000] = dic['labels']\n\tNUM_EXAMPLES = 10000\n\tCHANNELS = 3\n\tIMAGE_SIZE = 32\n\timages = [0] * NUM_EXAMPLES\n\tlabels = [0] * NUM_EXAMPLES\n\tf = 'test_batch'\n\tfo = open(f, 'rb')\n\tdic = cPickle.load(fo)\n\tfo.close()\n\timages[0:10000] = dic['data']\n\tlabels[0:10000] = dic['labels']\n\timages = np.array(images)\n\tlabels = np.array(labels)\n\timages_o = np.transpose(images.reshape([NUM_EXAMPLES,CHANNELS,IMAGE_SIZE*IMAGE_SIZE]),(0,2,1)).reshape([NUM_EXAMPLES,IMAGE_SIZE,IMAGE_SIZE,CHANNELS])\n\t# test(images,images_o,labels)\n\treturn images_o,labels\n\nimages,labels = unpickle()\nconvert_to(images,labels,'test')\n\n\n# print (len(dic['data']))\n\n","sub_path":"converters/convert_cifar10.py","file_name":"convert_cifar10.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"393381419","text":"from matplotlib import font_manager as fm\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.rcParams['font.sans-serif']=['SimHei'] #解决中文乱码\nplt.style.use('ggplot')\nfrom matplotlib import cm\n#原始数据\nshapes = ['天津', '江西省', '安徽省', '云南省', '福建省', '河南省', '辽宁省',\n '重庆', '湖南省', '四川省', '北京', '上海', '广西壮族自治区', '河北省',\n '浙江省', '江苏省', '湖北省', '山东省', '广东省']\nvalues = [287,383,842,866,1187,1405,1495,1620,1717,\n 2313,2378,3070,4332,5841,6482,7785,9358,9818,20254]\ns = pd.Series(values, index=shapes)\nlabels = s.index\nsizes = s.values\nfig, ax = plt.subplots(figsize=(6,6)) # 设置绘图区域大小\ncolors = cm.rainbow(np.arange(len(sizes))/len(sizes)) # 颜色地图:秋天→彩虹→灰色→春天→黑色\npatches, texts, autotexts = ax.pie(sizes, labels=labels, autopct='%1.0f%%',\n shadow=False, startangle=170, colors=colors)\nax.axis('equal')\nax.set_title('各地区线上图书销售占比图',loc='left')\n# 重新设置字体大小\nproptease = fm.FontProperties()\n# 字体大小(从小到大): xx-small、x-small、small、medium、large、x-large、xx-large,或者是数字,如18\nproptease.set_size('small')\nplt.setp(autotexts, fontproperties=proptease)\nplt.setp(texts, fontproperties=proptease)\nplt.show()","sub_path":"Python数据分析从入门到精通/MR/Code/05/example/02/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"297619180","text":"count = int(input())\n\nfor _ in range(0, count):\n final = 0\n numbers = list(map(int, input().split(' ')))\n numbers.sort()\n\n for i in range(numbers[0] + 1, numbers[1]):\n if i % 2 != 0:\n final += i\n\n print(final)","sub_path":"INE5402-01208B/estruturas-repeticao/1099.py","file_name":"1099.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"608131305","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^post$', views.post),\n url(r'^show/(?P\\d+)$', views.show),\n url(r'^delete/(?P\\d+)$', views.delete),\n url(r'^update/(?P\\d+)$', views.update_view),\n url(r'^update_action/(?P\\d+)$', views.update),\n url(r'^all_shows$', views.all_shows),\n]\n","sub_path":"python_stack/django/django_full_stack/semi/apps/semi_apps/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"183352747","text":"\"\"\"\ncore/shell_native.py -- Subset of core/shell.py that we translate to C++.\n\nTODO: consolidate with core/shell.py.\n\"\"\"\nfrom __future__ import print_function\n\nimport time as time_\n\nfrom _devbuild.gen import arg_types\nfrom _devbuild.gen.option_asdl import builtin_i\nfrom _devbuild.gen.runtime_asdl import cmd_value\nfrom _devbuild.gen.syntax_asdl import source\n\nfrom asdl import runtime\n\nfrom core import alloc\nfrom core import dev\nfrom core import error\nfrom core import executor\nfrom core import main_loop\nfrom core import process\nfrom core.pyerror import e_usage, log\nunused1 = log\nfrom core import pyos\nfrom core import pyutil\nfrom core.pyutil import stderr_line\nfrom core import state\nfrom core import ui\nfrom core import util\nfrom core import vm\n\nfrom frontend import args\nfrom frontend import flag_def # side effect: flags are defined!\nunused2 = flag_def\nfrom frontend import flag_spec\nfrom frontend import reader\nfrom frontend import parse_lib\n\nfrom osh import builtin_assign\nfrom osh import builtin_bracket\nfrom osh import builtin_meta\nfrom osh import builtin_misc\nfrom osh import builtin_printf\n#from osh import builtin_process\nfrom osh import builtin_pure\nfrom osh import cmd_eval\nfrom osh import prompt\nfrom osh import sh_expr_eval\nfrom osh import split\nfrom osh import word_eval\n\nfrom mycpp import mylib\nfrom pylib import os_path\n\nimport posix_ as posix\n\nfrom typing import List, Dict, Optional, Any, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from _devbuild.gen.runtime_asdl import cmd_value__Argv, Proc\n from core import optview\n from oil_lang import expr_eval\n from pgen2 import grammar\n\n\ndef MakeBuiltinArgv(argv1):\n # type: (List[str]) -> cmd_value__Argv\n argv = [''] # dummy for argv[0]\n argv.extend(argv1)\n # no location info\n return cmd_value.Argv(argv, [runtime.NO_SPID] * len(argv), None)\n\n\ndef AddPure(b, mem, procs, modules, mutable_opts, aliases, search_path, errfmt):\n # type: (Dict[int, vm._Builtin], state.Mem, Dict[str, Proc], Dict[str, bool], state.MutableOpts, Dict[str, str], state.SearchPath, ui.ErrorFormatter) -> None\n b[builtin_i.set] = builtin_pure.Set(mutable_opts, mem)\n\n b[builtin_i.alias] = builtin_pure.Alias(aliases, errfmt)\n b[builtin_i.unalias] = builtin_pure.UnAlias(aliases, errfmt)\n\n b[builtin_i.hash] = builtin_pure.Hash(search_path)\n b[builtin_i.getopts] = builtin_pure.GetOpts(mem, errfmt)\n\n true_ = builtin_pure.Boolean(0)\n b[builtin_i.colon] = true_ # a \"special\" builtin \n b[builtin_i.true_] = true_\n b[builtin_i.false_] = builtin_pure.Boolean(1)\n\n b[builtin_i.shift] = builtin_assign.Shift(mem)\n\n b[builtin_i.type] = builtin_meta.Type(procs, aliases, search_path, errfmt)\n b[builtin_i.module] = builtin_pure.Module(modules, mem.exec_opts, errfmt)\n\n\ndef AddIO(b, mem, dir_stack, exec_opts, splitter, parse_ctx, errfmt):\n # type: (Dict[int, vm._Builtin], state.Mem, state.DirStack, optview.Exec, split.SplitContext, parse_lib.ParseContext, ui.ErrorFormatter) -> None\n mapfile = builtin_misc.MapFile(mem, errfmt)\n\n b[builtin_i.echo] = builtin_pure.Echo(exec_opts)\n b[builtin_i.mapfile] = mapfile\n b[builtin_i.readarray] = mapfile\n\n b[builtin_i.read] = builtin_misc.Read(splitter, mem, parse_ctx)\n b[builtin_i.cat] = builtin_misc.Cat() # for $( None\n \"\"\"Builtins that run more code.\"\"\"\n\n builtins[builtin_i.builtin] = builtin_meta.Builtin(shell_ex, errfmt)\n builtins[builtin_i.command] = builtin_meta.Command(shell_ex, procs, aliases,\n search_path)\n builtins[builtin_i.runproc] = builtin_meta.RunProc(shell_ex, procs, errfmt)\n builtins[builtin_i.try_] = builtin_meta.Try(mutable_opts, mem, shell_ex, errfmt)\n\n\ndef AddBlock(builtins, mem, mutable_opts, dir_stack, cmd_ev, errfmt):\n # type: (Dict[int, vm._Builtin], state.Mem, state.MutableOpts, state.DirStack, cmd_eval.CommandEvaluator, ui.ErrorFormatter) -> None\n # These builtins take blocks, and thus need cmd_ev.\n builtins[builtin_i.cd] = builtin_misc.Cd(mem, dir_stack, cmd_ev, errfmt)\n builtins[builtin_i.shopt] = builtin_pure.Shopt(mutable_opts, cmd_ev)\n\n\ndef InitAssignmentBuiltins(mem, procs, errfmt):\n # type: (state.Mem, Dict[str, Proc], ui.ErrorFormatter) -> Dict[int, vm._AssignBuiltin]\n\n assign_b = {} # type: Dict[int, vm._AssignBuiltin]\n\n new_var = builtin_assign.NewVar(mem, procs, errfmt)\n assign_b[builtin_i.declare] = new_var\n assign_b[builtin_i.typeset] = new_var\n assign_b[builtin_i.local] = new_var\n\n assign_b[builtin_i.export_] = builtin_assign.Export(mem, errfmt)\n assign_b[builtin_i.readonly] = builtin_assign.Readonly(mem, errfmt)\n\n return assign_b\n\n\ndef Main(lang, arg_r, environ, login_shell, loader, line_input):\n # type: (str, args.Reader, Dict[str, str], bool, pyutil._ResourceLoader, Any) -> int\n \"\"\"The full shell lifecycle. Used by bin/osh and bin/oil.\n\n Args:\n lang: 'osh' or 'oil'\n argv0, arg_r: command line arguments\n environ: environment\n login_shell: Was - on the front?\n loader: to get help, version, grammar, etc.\n line_input: optional GNU readline\n \"\"\"\n # Differences between osh and oil:\n # - --help? I guess Oil has a SUPERSET of OSH options.\n # - oshrc vs oilrc\n # - shopt -s oil:all\n # - Change the prompt in the interactive shell?\n\n # osh-pure:\n # - no oil grammar\n # - no expression evaluator\n # - no interactive shell, or line_input\n # - no process.*\n # process.{ExternalProgram,Waiter,FdState,JobState,SignalState} -- we want\n # to evaluate config files without any of these\n # Modules not translated yet: completion, comp_ui, builtin_comp, process\n # - word evaluator\n # - shouldn't glob? set -o noglob? or hard failure?\n # - ~ shouldn't read from the file system\n # - I guess it can just be the HOME=HOME?\n # Builtin:\n # shellvm -c 'echo hi'\n # shellvm <<< 'echo hi'\n\n argv0 = arg_r.Peek()\n assert argv0 is not None\n arg_r.Next()\n\n assert lang in ('osh', 'oil'), lang\n\n try:\n attrs = flag_spec.ParseMore('main', arg_r)\n except error.Usage as e:\n stderr_line('osh usage error: %s', e.msg)\n return 2\n flag = arg_types.main(attrs.attrs)\n\n arena = alloc.Arena()\n errfmt = ui.ErrorFormatter(arena)\n\n help_builtin = builtin_misc.Help(loader, errfmt)\n if flag.help:\n help_builtin.Run(MakeBuiltinArgv(['%s-usage' % lang]))\n return 0\n if flag.version:\n # OSH version is the only binary in Oil right now, so it's all one version.\n pyutil.ShowAppVersion('Oil', loader)\n return 0\n\n no_str = None # type: str\n\n debug_stack = [] # type: List[state.DebugFrame]\n if arg_r.AtEnd():\n dollar0 = argv0\n else:\n dollar0 = arg_r.Peek() # the script name, or the arg after -c\n\n # Copy quirky bash behavior.\n frame0 = state.DebugFrame(dollar0, 'main', no_str, state.LINE_ZERO, 0, 0)\n debug_stack.append(frame0)\n\n # Copy quirky bash behavior.\n frame1 = state.DebugFrame(no_str, no_str, no_str, runtime.NO_SPID, 0, 0)\n debug_stack.append(frame1)\n\n script_name = arg_r.Peek() # type: Optional[str]\n arg_r.Next()\n mem = state.Mem(dollar0, arg_r.Rest(), arena, debug_stack)\n\n opt_hook = state.OptHook()\n parse_opts, exec_opts, mutable_opts = state.MakeOpts(mem, opt_hook)\n # Note: only MutableOpts needs mem, so it's not a true circular dep.\n mem.exec_opts = exec_opts # circular dep\n mutable_opts.Init()\n\n version_str = pyutil.GetVersion(loader)\n state.InitMem(mem, environ, version_str)\n\n procs = {} # type: Dict[str, Proc]\n\n if attrs.show_options: # special case: sh -o\n mutable_opts.ShowOptions([])\n return 0\n\n # Set these BEFORE processing flags, so they can be overridden.\n if lang == 'oil':\n mutable_opts.SetShoptOption('oil:all', True)\n\n builtin_pure.SetShellOpts(mutable_opts, attrs.opt_changes, attrs.shopt_changes)\n # feedback between runtime and parser\n aliases = {} # type: Dict[str, str]\n\n oil_grammar = None # type: grammar.Grammar\n #oil_grammar = pyutil.LoadOilGrammar(loader)\n\n if flag.one_pass_parse and not exec_opts.noexec():\n e_usage('--one-pass-parse requires noexec (-n)')\n parse_ctx = parse_lib.ParseContext(arena, parse_opts, aliases, oil_grammar)\n parse_ctx.Init_OnePassParse(flag.one_pass_parse)\n\n # Three ParseContext instances SHARE aliases.\n comp_arena = alloc.Arena()\n comp_arena.PushSource(source.Unused('completion'))\n trail1 = parse_lib.Trail()\n # one_pass_parse needs to be turned on to complete inside backticks. TODO:\n # fix the issue where ` gets erased because it's not part of\n # set_completer_delims().\n comp_ctx = parse_lib.ParseContext(comp_arena, parse_opts, aliases,\n oil_grammar)\n comp_ctx.Init_Trail(trail1)\n comp_ctx.Init_OnePassParse(True)\n\n hist_arena = alloc.Arena()\n hist_arena.PushSource(source.Unused('history'))\n trail2 = parse_lib.Trail()\n hist_ctx = parse_lib.ParseContext(hist_arena, parse_opts, aliases,\n oil_grammar)\n hist_ctx.Init_Trail(trail2)\n\n # Deps helps manages dependencies. These dependencies are circular:\n # - cmd_ev and word_ev, arith_ev -- for command sub, arith sub\n # - arith_ev and word_ev -- for $(( ${a} )) and $x$(( 1 )) \n # - cmd_ev and builtins (which execute code, like eval)\n # - prompt_ev needs word_ev for $PS1, which needs prompt_ev for @P\n cmd_deps = cmd_eval.Deps()\n cmd_deps.mutable_opts = mutable_opts\n\n # TODO: In general, cmd_deps are shared between the mutually recursive\n # evaluators. Some of the four below are only shared between a builtin and\n # the CommandEvaluator, so we could put them somewhere else.\n cmd_deps.traps = {}\n cmd_deps.trap_nodes = [] # TODO: Clear on fork() to avoid duplicates\n\n my_pid = posix.getpid()\n\n debug_path = ''\n debug_dir = environ.get('OSH_DEBUG_DIR')\n if flag.debug_file is not None:\n # --debug-file takes precedence over OSH_DEBUG_DIR\n debug_path = flag.debug_file\n elif debug_dir is not None:\n debug_path = os_path.join(debug_dir, '%d-osh.log' % my_pid)\n\n if len(debug_path):\n raise NotImplementedError()\n else:\n debug_f = util.NullDebugFile() # type: util._DebugFile\n\n cmd_deps.debug_f = debug_f\n\n # Not using datetime for dependency reasons. TODO: maybe show the date at\n # the beginning of the log, and then only show time afterward? To save\n # space, and make space for microseconds. (datetime supports microseconds\n # but time.strftime doesn't).\n if mylib.PYTHON:\n iso_stamp = time_.strftime(\"%Y-%m-%d %H:%M:%S\")\n debug_f.log('%s [%d] OSH started with argv %s', iso_stamp, my_pid, arg_r.argv)\n if len(debug_path):\n debug_f.log('Writing logs to %r', debug_path)\n\n if flag.xtrace_to_debug_file:\n trace_f = debug_f\n else:\n trace_f = util.DebugFile(mylib.Stderr())\n tracer = dev.Tracer(parse_ctx, exec_opts, mutable_opts, mem, trace_f)\n\n # TODO: We shouldn't have SignalState?\n sig_state = pyos.SignalState()\n sig_state.InitShell()\n\n job_state = process.JobState()\n fd_state = process.FdState(errfmt, job_state, mem, tracer, None)\n waiter = process.Waiter(job_state, exec_opts, sig_state, tracer)\n fd_state.waiter = waiter # circular dep\n\n interp = environ.get('OSH_HIJACK_SHEBANG', '')\n search_path = state.SearchPath(mem)\n ext_prog = process.ExternalProgram(interp, fd_state, errfmt, debug_f)\n\n splitter = split.SplitContext(mem)\n\n # This could just be OSH_DEBUG_STREAMS='debug crash' ? That might be\n # stuffing too much into one, since a .json crash dump isn't a stream.\n crash_dump_dir = environ.get('OSH_CRASH_DUMP_DIR', '')\n cmd_deps.dumper = dev.CrashDumper(crash_dump_dir)\n\n #comp_lookup = completion.Lookup()\n\n # Various Global State objects to work around readline interfaces\n #compopt_state = completion.OptionState()\n #comp_ui_state = comp_ui.State()\n #prompt_state = comp_ui.PromptState()\n\n dir_stack = state.DirStack()\n\n #\n # Initialize builtins that don't depend on evaluators\n #\n\n builtins = {} # type: Dict[int, vm._Builtin]\n modules = {} # type: Dict[str, bool]\n\n AddPure(builtins, mem, procs, modules, mutable_opts, aliases, search_path, errfmt)\n AddIO(builtins, mem, dir_stack, exec_opts, splitter, parse_ctx, errfmt)\n\n builtins[builtin_i.help] = help_builtin\n\n #\n # Initialize Evaluators\n #\n\n arith_ev = sh_expr_eval.ArithEvaluator(mem, exec_opts, parse_ctx, errfmt)\n bool_ev = sh_expr_eval.BoolEvaluator(mem, exec_opts, parse_ctx, errfmt)\n expr_ev = None # type: expr_eval.OilEvaluator\n word_ev = word_eval.NormalWordEvaluator(mem, exec_opts, mutable_opts,\n splitter, errfmt)\n\n assign_b = InitAssignmentBuiltins(mem, procs, errfmt)\n cmd_ev = cmd_eval.CommandEvaluator(mem, exec_opts, errfmt, procs,\n assign_b, arena, cmd_deps)\n\n shell_ex = executor.ShellExecutor(\n mem, exec_opts, mutable_opts, procs, builtins, search_path,\n ext_prog, waiter, tracer, job_state, fd_state, errfmt)\n\n # PromptEvaluator rendering is needed in non-interactive shells for @P.\n prompt_ev = prompt.Evaluator(lang, parse_ctx, mem)\n\n # Wire up circular dependencies.\n vm.InitCircularDeps(arith_ev, bool_ev, expr_ev, word_ev, cmd_ev, shell_ex,\n prompt_ev, tracer)\n\n #\n # Initialize builtins that depend on evaluators\n #\n\n unsafe_arith = sh_expr_eval.UnsafeArith(mem, exec_opts, parse_ctx, arith_ev)\n vm.InitUnsafeArith(mem, word_ev, unsafe_arith)\n\n builtins[builtin_i.printf] = builtin_printf.Printf(mem, parse_ctx,\n unsafe_arith, errfmt)\n builtins[builtin_i.unset] = builtin_assign.Unset(mem, procs, unsafe_arith,\n errfmt)\n builtins[builtin_i.eval] = builtin_meta.Eval(parse_ctx, exec_opts, cmd_ev,\n tracer)\n\n #source_builtin = builtin_meta.Source(parse_ctx, search_path, cmd_ev,\n #fd_state, tracer, errfmt)\n #builtins[builtin_i.source] = source_builtin\n #builtins[builtin_i.dot] = source_builtin\n\n AddMeta(builtins, shell_ex, mutable_opts, mem, procs, aliases, search_path,\n errfmt)\n AddBlock(builtins, mem, mutable_opts, dir_stack, cmd_ev, errfmt)\n\n #builtins[builtin_i.trap] = builtin_process.Trap(sig_state, cmd_deps.traps,\n # cmd_deps.trap_nodes,\n # parse_ctx, errfmt)\n\n if flag.c is not None:\n arena.PushSource(source.CFlag())\n line_reader = reader.StringLineReader(flag.c, arena) # type: reader._Reader\n if flag.i: # -c and -i can be combined\n mutable_opts.set_interactive()\n\n elif flag.i: # force interactive\n raise NotImplementedError()\n\n else:\n if script_name is None:\n stdin = mylib.Stdin()\n arena.PushSource(source.Stdin(''))\n line_reader = reader.FileLineReader(stdin, arena)\n else:\n arena.PushSource(source.MainFile(script_name))\n try:\n f = fd_state.Open(script_name)\n #f = mylib.open(script_name)\n except OSError as e:\n stderr_line(\"osh: Couldn't open %r: %s\", script_name,\n pyutil.strerror(e))\n return 1\n line_reader = reader.FileLineReader(f, arena)\n\n # TODO: assert arena.NumSourcePaths() == 1\n # TODO: .rc file needs its own arena.\n c_parser = parse_ctx.MakeOshParser(line_reader)\n\n if exec_opts.interactive():\n raise NotImplementedError()\n\n if exec_opts.noexec():\n status = 0\n try:\n node = main_loop.ParseWholeFile(c_parser)\n except error.Parse as e:\n ui.PrettyPrintError(e, arena)\n status = 2\n\n if status == 0 :\n if flag.parser_mem_dump is not None: # only valid in -n mode\n input_path = '/proc/%d/status' % posix.getpid()\n pyutil.CopyFile(input_path, flag.parser_mem_dump)\n\n ui.PrintAst(node, flag)\n else:\n if flag.parser_mem_dump is not None:\n e_usage('--parser-mem-dump can only be used with -n')\n\n with state.ctx_ThisDir(mem, script_name):\n try:\n status = main_loop.Batch(cmd_ev, c_parser, arena,\n cmd_flags=cmd_eval.IsMainProgram)\n except util.UserExit as e:\n status = e.status\n box = [status]\n cmd_ev.MaybeRunExitTrap(box)\n status = box[0]\n\n # NOTE: 'exit 1' is ControlFlow and gets here, but subshell/commandsub\n # don't because they call sys.exit().\n if flag.runtime_mem_dump is not None:\n input_path = '/proc/%d/status' % posix.getpid()\n pyutil.CopyFile(input_path, flag.runtime_mem_dump)\n\n # NOTE: We haven't closed the file opened with fd_state.Open\n return status\n","sub_path":"core/shell_native.py","file_name":"shell_native.py","file_ext":"py","file_size_in_byte":17227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"376980221","text":"'''\r\n\r\nCreated on 2018年4月20日\r\n@author: rocky.wang\r\n'''\r\nfrom selenium import webdriver\r\nimport time\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport os\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.firefox.options import Options\r\n\r\ndef fetchToken():\r\n \r\n options = Options()\r\n options.add_argument(\"--headless\")\r\n driver = webdriver.Firefox(firefox_options=options)\r\n \r\n print(\"開啟臉書\")\r\n driver.get(\"https://www.facebook.com/\")\r\n time.sleep(3)\r\n print(\"進行登入\")\r\n elem = driver.find_element_by_id(\"email\")\r\n elem.clear()\r\n elem.send_keys(\"rockywang101@gmail.com\")\r\n elem = driver.find_element_by_id(\"pass\")\r\n elem.clear()\r\n elem.send_keys(os.environ[\"MY_PASSWORD\"])\r\n elem.send_keys(Keys.RETURN)\r\n time.sleep(2)\r\n \r\n print(\"進入 API 頁面\")\r\n driver.get(\"https://developers.facebook.com/tools/explorer\")\r\n time.sleep(3)\r\n print(\"取得權杖\")\r\n elem = driver.find_element_by_link_text(\"取得權杖\")\r\n elem.click()\r\n time.sleep(1)\r\n print(\"取得用戶存取權杖\")\r\n elem2 = driver.find_element_by_link_text(\"取得用戶存取權杖\")\r\n elem2.click()\r\n time.sleep(1)\r\n print(\"取得存取權杖\")\r\n elem3 = driver.find_element(By.XPATH, '//button[text()=\"取得存取權杖\"]')\r\n elem3.click()\r\n time.sleep(3)\r\n \r\n inputList = driver.find_elements_by_class_name(\"_58al\")\r\n token = inputList[2].get_attribute('value')\r\n print(\"取得 API Token: \" + token)\r\n\r\n driver.close()\r\n return token\r\n\r\nif __name__ == \"__main__\":\r\n token = fetchToken()\r\n \r\n","sub_path":"selenium00/seleniumDemo04.py","file_name":"seleniumDemo04.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"565345821","text":"'''\nCreated on 17.02.2015\n\n@author: marscher\n'''\n\n\ndef build_chain(transformers, chunksize=None):\n \"\"\"\n utility method to build a working pipeline out of given data source and\n transformers\n \"\"\"\n\n for i in xrange(1, len(transformers)):\n transformers[i].data_producer = transformers[i - 1]\n\n if chunksize is not None:\n for t in transformers:\n t.chunksize = chunksize\n\n return transformers\n\n\ndef run_chain(chain):\n for c in chain:\n c.parametrize()\n","sub_path":"pyemma/coordinates/util/chaining.py","file_name":"chaining.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"441489188","text":"#!/usr/bin/env python2\n\n'''\n This program is free software; you can redistribute it and/or modify\n it under the terms of the Revised BSD License.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n Revised BSD License for more details.\n\n Copyright 2011-2016 Game Maker 2k - https://github.com/GameMaker2k\n Copyright 2011-2016 Kazuki Przyborowski - https://github.com/KazukiPrzyborowski\n\n $FileInfo: pypac-gen.py - Last Update: 6/1/2016 Ver. 0.2.0 RC 1 - Author: cooldude2k $\n'''\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals;\nimport re, os, sys, time, datetime, argparse, hashlib, subprocess, json;\n\n__version_info__ = (0, 2, 0, \"rc1\");\nif(__version_info__[3]!=None):\n __version__ = str(__version_info__[0])+\".\"+str(__version_info__[1])+\".\"+str(__version_info__[2])+\"+\"+str(__version_info__[3]);\nif(__version_info__[3]==None):\n __version__ = str(__version_info__[0])+\".\"+str(__version_info__[1])+\".\"+str(__version_info__[2]);\n\nproname = \"pypac-gen\";\nprover = __version__;\nprofullname = proname+\" \"+prover;\n\ndef which_exec(execfile):\n for path in os.environ[\"PATH\"].split(\":\"):\n if os.path.exists(path + \"/\" + execfile):\n return path + \"/\" + execfile;\n\nparser = argparse.ArgumentParser(conflict_handler = \"resolve\", add_help = True);\nparser.add_argument(\"-v\", \"--version\", action = \"version\", version = profullname);\nparser.add_argument(\"-s\", \"--source\", default = os.path.realpath(os.getcwd()), help = \"source dir\");\nparser.add_argument(\"-g\", \"--getsource\", action = \"store_true\", help = \"get source dir\");\nparser.add_argument(\"-p\", \"--getparent\", action = \"store_true\", help = \"get parent dir\");\nparser.add_argument(\"-t\", \"--gettarname\", action = \"store_true\", help = \"get tar name\");\nparser.add_argument(\"-d\", \"--getdirname\", action = \"store_true\", help = \"get dir name\");\nparser.add_argument(\"-e\", \"--getpkgsource\", action = \"store_true\", help = \"get pkg source\");\ngetargs = parser.parse_args();\ngetargs.source = os.path.realpath(getargs.source);\npkgsetuppy = os.path.realpath(getargs.source+os.path.sep+\"setup.py\");\npyexecpath = os.path.realpath(sys.executable);\nif(not os.path.exists(getargs.source) or not os.path.isdir(getargs.source)):\n raise Exception(\"Could not find directory.\");\nif(not os.path.exists(pkgsetuppy) or not os.path.isfile(pkgsetuppy)):\n raise Exception(\"Could not find setup.py in directory.\");\n\npypkgenlistp = subprocess.Popen([pyexecpath, pkgsetuppy, \"getversioninfo\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE);\npypkgenout, pypkgenerr = pypkgenlistp.communicate();\nif(sys.version[0]==\"3\"):\n pypkgenout = pypkgenout.decode('utf-8');\npymodule = json.loads(pypkgenout);\nsetuppy_verinfo = pymodule['versionlist'];\nsetuppy_author = pymodule['author'];\nsetuppy_authoremail = pymodule['authoremail'];\nsetuppy_maintainer = pymodule['maintainer'];\nsetuppy_maintaineremail = pymodule['maintaineremail'];\nsetuppy_description = pymodule['description'];\nsetuppy_license = pymodule['license'];\nsetuppy_keywords = pymodule['keywords'];\nsetuppy_url = pymodule['url'];\nsetuppy_downloadurl = pymodule['downloadurl'];\nsetuppy_longdescription = pymodule['longdescription'];\nsetuppy_platforms = pymodule['platforms'];\n\nif(sys.version[0]==\"2\"):\n pkgsource = \"py2motherless\";\nif(sys.version[0]==\"3\"):\n pkgsource = \"py3motherless\";\npkgupstreamname = \"PyMotherless\";\npkgveralt = str(setuppy_verinfo[0])+\".\"+str(setuppy_verinfo[1])+\".\"+str(setuppy_verinfo[2]);\npkgveraltrel = str(setuppy_verinfo[4]);\npkgver = str(pkgveralt)+\"-rc\"+str(setuppy_verinfo[4]);\npkgurgency = \"urgency=low\";\npkgauthorname = setuppy_author;\npkgauthoremail = setuppy_authoremail;\npkgauthoremailalt = setuppy_authoremail.replace(\"@\", \"[at]\").replace(\".\", \"[dot]\");\npkgauthor = pkgauthorname+\" <\"+pkgauthoremail+\">\";\npkgauthoralt = pkgauthorname+\" <\"+pkgauthoremailalt+\">\";\npkgmaintainername = setuppy_maintainer;\npkgmaintaineremail = setuppy_maintaineremail;\npkgmaintaineremailalt = setuppy_maintaineremail.replace(\"@\", \"[at]\").replace(\".\", \"[dot]\");\npkgmaintainer = pkgmaintainername+\" <\"+pkgmaintaineremail+\">\";\npkgmaintaineralt = pkgmaintainername+\" <\"+pkgmaintaineremailalt+\">\";\npkggiturl = \"https://github.com/GameMaker2k/PyMotherless.git\";\npkghomepage = setuppy_url;\npkgsection = \"python\";\npkgpriority = \"optional\";\nif(sys.version[0]==\"2\"):\n pkgbuilddepends = \"'python2'\";\nif(sys.version[0]==\"3\"):\n pkgbuilddepends = \"'python'\";\npkgstandardsversion = \"3.9.8\";\nif(sys.version[0]==\"2\"):\n pkgpackage = \"python2-pymotherless\";\n pkgoldname = \"python2-motherless\";\nif(sys.version[0]==\"3\"):\n pkgpackage = \"python-pymotherless\";\n pkgoldname = \"python-motherless\";\npkgarchitecture = \"'any' 'i686' 'x86_64'\";\nif(sys.version[0]==\"2\"):\n pkgdepends = \"'python2-setuptools'\";\nif(sys.version[0]==\"3\"):\n pkgdepends = \"'python-setuptools'\";\npkgdescription = setuppy_description+\"\\n \"+setuppy_longdescription;\npkgtzstr = time.strftime(\"%a, %d %b %Y %H:%M:%S %z\");\n\nif(getargs.getsource==True):\n print(getargs.source);\n sys.exit();\nif(getargs.getparent==True):\n print(os.path.realpath(os.path.dirname(getargs.source)));\n sys.exit();\nif(getargs.getdirname==True):\n print(pkgsource+\"_\"+pkgveralt+\".orig\");\n sys.exit();\nif(getargs.gettarname==True):\n print(pkgsource+\"_\"+pkgveralt+\".orig.tar.gz\");\n sys.exit();\nif(getargs.getpkgsource==True):\n print(pkgsource);\n sys.exit();\n\nprint(\"generating arch linux package build directory\");\n\npacpkg_pkgbuild_dir = os.path.realpath(getargs.source+os.path.sep+pkgsource);\nprint(\"creating directory \"+pacpkg_pkgbuild_dir);\nif(not os.path.exists(pacpkg_pkgbuild_dir)):\n os.makedirs(pacpkg_pkgbuild_dir);\nos.chmod(pacpkg_pkgbuild_dir, int(\"0755\", 8));\n\ngzparentdir = os.path.realpath(os.path.dirname(getargs.source));\nfiletargz = open(os.path.realpath(gzparentdir+os.path.sep+pkgsource+\"_\"+pkgveralt+\".orig.tar.gz\"), \"rb\");\nfiletargzmd5 = hashlib.md5(filetargz.read()).hexdigest();\nfiletargz.seek(0);\nfiletargzsha1 = hashlib.sha1(filetargz.read()).hexdigest();\nfiletargz.seek(0);\nfiletargzsha224 = hashlib.sha224(filetargz.read()).hexdigest();\nfiletargz.seek(0);\nfiletargzsha256 = hashlib.sha256(filetargz.read()).hexdigest();\nfiletargz.seek(0);\nfiletargzsha384 = hashlib.sha384(filetargz.read()).hexdigest();\nfiletargz.seek(0);\nfiletargzsha512 = hashlib.sha512(filetargz.read()).hexdigest();\nfiletargz.close();\n\npacpkg_pkgbuild_file = os.path.realpath(pacpkg_pkgbuild_dir+os.path.sep+\"PKGBUILD\");\nprint(\"generating file \"+pacpkg_pkgbuild_file);\nif(sys.version[0]==\"2\"):\n pacpkg_string_temp = \"# Maintainer: \"+pkgmaintaineralt+\"\\n\";\n pacpkg_string_temp += \"# This file was automatically generated by \"+profullname+\" at\\n\";\n pacpkg_string_temp += \"# \"+pkgtzstr+\"\\n\\n\";\n pacpkg_string_temp += \"pkgname=\"+pkgpackage+\"\\n\";\n pacpkg_string_temp += \"pkgver=\"+pkgveralt+\"\\n\";\n pacpkg_string_temp += \"pkgrel=\"+pkgveraltrel+\"\\n\";\n pacpkg_string_temp += \"pkgdesc='\"+setuppy_description+\"'\\n\";\n pacpkg_string_temp += \"url='\"+setuppy_url+\"'\\n\";\n pacpkg_string_temp += \"arch=(\"+pkgarchitecture+\")\\n\";\n pacpkg_string_temp += \"license=('\"+setuppy_license+\"')\\n\";\n pacpkg_string_temp += \"groups=()\\n\";\n pacpkg_string_temp += \"depends=(\"+pkgbuilddepends+\")\\n\";\n pacpkg_string_temp += \"optdepends=()\\n\";\n pacpkg_string_temp += \"makedepends=(\"+pkgdepends+\")\\n\";\n pacpkg_string_temp += \"conflicts=()\\n\";\n pacpkg_string_temp += \"replaces=('\"+pkgoldname+\"')\\n\";\n pacpkg_string_temp += \"backup=()\\n\";\n pacpkg_string_temp += \"options=(!strip !emptydirs)\\n\";\n pacpkg_string_temp += \"install=''\\n\";\n pacpkg_string_temp += \"source=('.\"+os.path.sep+pkgsource+\"_\"+pkgveralt+\".orig.tar.gz')\\n\";\n pacpkg_string_temp += \"md5sums=('\"+filetargzmd5+\"')\\n\";\n pacpkg_string_temp += \"sha1sums=('\"+filetargzsha1+\"')\\n\";\n pacpkg_string_temp += \"sha224sums=('\"+filetargzsha224+\"')\\n\";\n pacpkg_string_temp += \"sha256sums=('\"+filetargzsha256+\"')\\n\";\n pacpkg_string_temp += \"sha384sums=('\"+filetargzsha384+\"')\\n\";\n pacpkg_string_temp += \"sha512sums=('\"+filetargzsha512+\"')\\n\\n\";\n pacpkg_string_temp += \"build() {\\n\";\n pacpkg_string_temp += \" cd \\\"${srcdir}/\"+pkgsource+\"_${pkgver}.orig\\\"\\n\";\n pacpkg_string_temp += \" python2 ./setup.py build\\n\";\n pacpkg_string_temp += \"}\\n\\n\";\n pacpkg_string_temp += \"package() {\\n\";\n pacpkg_string_temp += \" cd \\\"${srcdir}/\"+pkgsource+\"_${pkgver}.orig\\\"\\n\";\n pacpkg_string_temp += \" python2 ./setup.py install --root=\\\"${pkgdir}\\\" --optimize=1\\n\";\n pacpkg_string_temp += \"}\\n\\n\";\n pacpkg_string_temp += \"# vim:set ts=2 sw=2 et:\\n\";\nif(sys.version[0]==\"3\"):\n pacpkg_string_temp = \"# Maintainer: \"+pkgmaintaineralt+\"\\n\";\n pacpkg_string_temp += \"# This file was automatically generated by \"+profullname+\" at\\n\";\n pacpkg_string_temp += \"# \"+pkgtzstr+\"\\n\\n\";\n pacpkg_string_temp += \"pkgname=\"+pkgpackage+\"\\n\";\n pacpkg_string_temp += \"pkgver=\"+pkgveralt+\"\\n\";\n pacpkg_string_temp += \"pkgrel=\"+pkgveraltrel+\"\\n\";\n pacpkg_string_temp += \"pkgdesc='\"+setuppy_description+\"'\\n\";\n pacpkg_string_temp += \"url='\"+setuppy_url+\"'\\n\";\n pacpkg_string_temp += \"arch=(\"+pkgarchitecture+\")\\n\";\n pacpkg_string_temp += \"license=('\"+setuppy_license+\"')\\n\";\n pacpkg_string_temp += \"groups=()\\n\";\n pacpkg_string_temp += \"depends=(\"+pkgbuilddepends+\")\\n\";\n pacpkg_string_temp += \"optdepends=()\\n\";\n pacpkg_string_temp += \"makedepends=(\"+pkgdepends+\")\\n\";\n pacpkg_string_temp += \"conflicts=()\\n\";\n pacpkg_string_temp += \"replaces=('\"+pkgoldname+\"')\\n\";\n pacpkg_string_temp += \"backup=()\\n\";\n pacpkg_string_temp += \"options=(!strip !emptydirs)\\n\";\n pacpkg_string_temp += \"install=''\\n\";\n pacpkg_string_temp += \"source=('.\"+os.path.sep+pkgsource+\"_\"+pkgveralt+\".orig.tar.gz')\\n\";\n pacpkg_string_temp += \"md5sums=('\"+filetargzmd5+\"')\\n\";\n pacpkg_string_temp += \"sha1sums=('\"+filetargzsha1+\"')\\n\";\n pacpkg_string_temp += \"sha224sums=('\"+filetargzsha224+\"')\\n\";\n pacpkg_string_temp += \"sha256sums=('\"+filetargzsha256+\"')\\n\";\n pacpkg_string_temp += \"sha384sums=('\"+filetargzsha384+\"')\\n\";\n pacpkg_string_temp += \"sha512sums=('\"+filetargzsha512+\"')\\n\\n\";\n pacpkg_string_temp += \"build() {\\n\";\n pacpkg_string_temp += \" cd \\\"${srcdir}/\"+pkgsource+\"_${pkgver}.orig\\\"\\n\";\n pacpkg_string_temp += \" python3 ./setup.py build\\n\";\n pacpkg_string_temp += \"}\\n\\n\";\n pacpkg_string_temp += \"package() {\\n\";\n pacpkg_string_temp += \" cd \\\"${srcdir}/\"+pkgsource+\"_${pkgver}.orig\\\"\\n\";\n pacpkg_string_temp += \" python3 ./setup.py install --root=\\\"${pkgdir}\\\" --optimize=1\\n\";\n pacpkg_string_temp += \"}\\n\\n\";\n pacpkg_string_temp += \"# vim:set ts=2 sw=2 et:\\n\";\npacpkg_file_temp = open(pacpkg_pkgbuild_file, \"w\");\npacpkg_file_temp.write(pacpkg_string_temp);\npacpkg_file_temp.close();\nos.chmod(pacpkg_pkgbuild_file, int(\"0755\", 8));\n","sub_path":"pkgbuild/archlinux/python2/pypac-gen.py","file_name":"pypac-gen.py","file_ext":"py","file_size_in_byte":10687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"579927174","text":"from django.shortcuts import render, HttpResponse, redirect\n\ndef index(request):\n if 'total_spent' not in request.session:\n request.session['total_spent'] = 0\n if 'products_ordered' not in request.session:\n request.session['products_ordered'] = 0\n return render(request, 'shop/index.html')\n\ndef checkout(request):\n checkout_product_id = request.POST.get('product_id')\n request.session['checkout_quantity'] = int(request.POST.get('quantity'))\n\n product_prices = [{'1': 19.99}, {'2': 29.99}, {'3': 4.99}, {'4': 49.99}]\n for product in product_prices:\n for product_id in product:\n if checkout_product_id == product_id:\n request.session['product_price'] = (product[product_id] * request.session['checkout_quantity'])\n request.session['total_spent'] += (product[product_id] * request.session['checkout_quantity'])\n request.session['products_ordered'] += request.session['checkout_quantity']\n \n product_names = [{'1': 'Dojo Tshirt'}, {'2': 'Dojo Sweater'}, {'3': 'Dojo Cup'}, {'4': 'Algorithm Book'}]\n for product in product_names:\n for product_name in product:\n if checkout_product_id == product_name:\n request.session['product_name'] = product[product_name]\n return redirect('/success')\n\ndef success(request):\n return render(request, 'shop/success.html')","sub_path":"apps/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"14597672","text":"import os\nfrom setuptools import setup, find_packages\n\nmy_dir = os.path.dirname(os.path.realpath(__file__))\n\ndef readme():\n with open(os.path.join(my_dir, 'README.rst')) as f:\n return f.read()\n\n\nsetup(\n name='music_album_creator',\n version='1.0.7',\n description='A CLI application intending to automate offline music library building',\n long_description=readme(),\n keywords='music album automation youtube audio metadata download',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Multimedia :: Sound/Audio :: Conversion',\n 'Topic :: Multimedia :: Sound/Audio :: Editors',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Science/Research',\n ],\n url='https://github.com/boromir674/music-album-creator',\n download_url='https://github.com/boromir674/music-album-creator/archive/v1.0.7.tar.gz',\n author='Konstantinos Lampridis',\n author_email='k.lampridis@hotmail.com',\n license='GNU GPLv3',\n packages=find_packages(exclude=[\"testing.*\", \"testing\"]),\n install_requires=['tqdm', 'click', 'sklearn', 'mutagen', 'PyInquirer', 'youtube_dl'],\n include_package_data=True,\n entry_points = {\n 'console_scripts': ['create-album=music_album_creation.create_album:main'],\n },\n setup_requires=['pytest-runner>=2.0',],\n tests_require=['pytest',],\n # test_suite='',\n zip_safe=False\n)\n","sub_path":"pypi_install_script/music_album_creator-1.0.7.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"485658904","text":"# _*_ coding:utf-8 _*_\n# @time: 2020/9/28 上午10:06\n# @author: 张新新\n# @email: 1262981714@qq.com\nfrom Vrep import vrep\nfrom Vrep import vrep_connect\nimport time\nimport numpy as np\nimport cv2\nclass Camera():\n def __init__(self,clientId,config):\n self.clientID = clientId\n errorCode, self.KinectRgbHandle = vrep.simxGetObjectHandle(self.clientID, 'kinect_rgb',\n vrep.simx_opmode_blocking)\n errorCode, self.KinectDepthHandle = vrep.simxGetObjectHandle(self.clientID, 'kinect_depth',\n vrep.simx_opmode_oneshot_wait)\n self.imgsize = (640,480)\n fs2 = cv2.FileStorage(config, cv2.FileStorage_READ)\n self.intrinsic = fs2.getNode(\"intrinsic\").mat()\n self.dist = fs2.getNode(\"dist\").mat()\n fs2.release()\n\n def get_rgb_image(self):\n # Get color image from simulation\n\n sim_ret, resolution, raw_image = vrep.simxGetVisionSensorImage(self.clientID, self.KinectRgbHandle, 0,\n vrep.simx_opmode_blocking)\n color_img = np.asarray(raw_image)\n color_img.shape = (resolution[1], resolution[0], 3)\n color_img = color_img.astype(np.float) / 255\n color_img[color_img < 0] += 1\n color_img *= 255\n color_img = np.fliplr(color_img)\n color_img = color_img.astype(np.uint8)\n center = (resolution[0]/2,resolution[1]/2)\n size = (resolution[0],resolution[1])\n rotateMat = cv2.getRotationMatrix2D(center, 180, 1)\n result_img = cv2.warpAffine(color_img, rotateMat, size)\n\n return result_img\n\n def get_depth_image(self):\n sim_ret, resolution, depth_buffer = vrep.simxGetVisionSensorDepthBuffer(self.clientID, self.KinectDepthHandle,\n vrep.simx_opmode_blocking)\n depth_img = np.asarray(depth_buffer)\n depth_img.shape = (resolution[1], resolution[0])\n depth_img = np.fliplr(depth_img)\n return depth_img\nif __name__ == '__main__':\n camera = Camera(vrep_connect.getVrep_connect())\n rgb = camera.get_rgb_image()\n cv2.imshow(\"rgb\",rgb)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","sub_path":"Vrep/Kinect.py","file_name":"Kinect.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"419827549","text":"# Autor: Jose LuiS Mata Lomeli, A01377205 \n# Descripcion: Dar el porcentaje de alumnos y alumnas de un grupo.\n\n# Escribe tu programa después de esta línea.\n\nAlumnos_morras= int(input(\"Escriba la cantidad de alumnas inscritas: \")) \nAlumnos_morros= int(input(\"Escriba la cantidad de alumnos inscritos: \"))\n\nTotal_Alumnos= Alumnos_morras + Alumnos_morros\n\nPorcentaje_Femm= (Alumnos_morras*100)/Total_Alumnos\nPorcentaje_Masc= (Alumnos_morros*100)/Total_Alumnos\n\nprint(\"Total de inscritos: \", (Total_Alumnos))\nprint(\"Porcentaje de Alumnas: \", (Porcentaje_Femm))\nprint(\"Porcentaje de Alumnos: \", (Porcentaje_Masc))\n\n\n","sub_path":"porcentajes.py","file_name":"porcentajes.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"218092163","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 8 22:58:23 2017\n\n@author: mh70\n\"\"\"\n\nimport sqlite3\nfrom sqlite3 import Error\nimport platform\nimport pandas as pd\n\ndef get_db_fname():\n db_file_name = \"History\"\n if platform.system() == 'Linux':\n db_path = r\"/home/mh/.config/google-chrome/Default\"\n db_file = db_path + '/' + db_file_name\n else: \n db_path = r\"C:\\Users\\m.houska\\AppData\\Local\\Google\\Chrome\\User Data\\Default\"\n db_file = db_path + '\\\\' + db_file_name\n return db_file\n\ndef create_connection(db_file):\n \"\"\" create a database connection to the SQLite db specified by the db_file\n :param db_file: database file\n :return: Connection object or None \n \"\"\"\n \n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n \n return None\n\n\ndef get_data_from_db(db_file):\n \n \n # create a database connection \n conn = create_connection(db_file)\n with conn:\n \n # visit_time + last_visit_time\n # number of microseconds since midnight UTC of 1 January 1601\n # \n # time shift \n # dt_1601 = datetime(1601, 1, 1, 0, 0, 0, 0)\n # dt_1970 = datetime(1970, 1, 1, 0, 0, 0, 0)\n # (dt_1970 - dt_1601).total_seconds() -> 11644473600.0\n\n \n sql_urls = \"\"\"select id, url,\n datetime(last_visit_time/1000000-11644473600,'unixepoch'), \n visit_count \n from urls \n order by last_visit_time desc;\n \"\"\"\n \n sql_visits = \"\"\"select \n datetime(visit_time/1000000-11644473600,'unixepoch'), url, transition \n from visits\n order by visit_time desc;\n \"\"\"\n cur = conn.cursor()\n \n cur.execute(sql_urls)\n rows_urls = cur.fetchall() #raw sql data\n \n cur.execute(sql_visits)\n rows_visits = cur.fetchall() #raw sql data\n \n \n return (rows_urls, rows_visits)\n\n\n\ndef main():\n \n #read from db , close chrome first\n raw_urls, raw_visits = get_data_from_db(get_db_fname()) \n \n #read into pandas DF\n df_visits = pd.DataFrame(raw_visits,\n columns=['datetime', 'url_fk', 'transition'])\n df_urls = pd.DataFrame(raw_urls,\n columns=['id', 'url', 'datetime', 'visitcount'])\n \n \n #convert the datetime string column into a column of Pandas datetime elements\n # Since pandas represents timestamps in nanosecond resolution, \n # the timespan that can be represented using a 64-bit integer is limited \n # to approximately 584 years \n # '1677-09-22 00:12:43.145225' to '2262-04-11 23:47:16.854775807'\n # see pd.Timestamp.min ; pd.Timestamp.max\n # errors='coerce' create NaT for invalid values\n \n df_urls.datetime = pd.to_datetime(df_urls.datetime, errors='coerce')\n df_visits.datetime = pd.to_datetime(df_visits.datetime, errors='coerce')\n\n df_visits['trans_type'] = df_visits['transition'].apply(lambda t: t & 0xff)\n # https://groups.google.com/a/chromium.org/forum/#!topic/chromium-discuss/r7UQ2i98Lu4\n # https://developer.chrome.com/extensions/history\n\n return (df_visits, df_urls)\n\n\n\n\"\"\"\ndt_1677 = datetime(1677, 9, 22, 0, 12, 43, 145225)\ndt_1601 = datetime(1601, 1, 1, 0, 0, 0, 0)\ndt_1970 = datetime(1970, 1, 1, 0, 0, 0, 0)\n(dt_1970 - dt_1601).total_seconds()\n\"\"\"\n\n","sub_path":"spyder/first_snow/get_raw_data.py","file_name":"get_raw_data.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"337166776","text":"\"\"\"\nСортировка выбором\n\"\"\"\n\narr = [0, 3, 24, 2, 3, 7]\nlength = len(arr)\n\nprint(\"Исходный список: \", arr)\n\nfor i in range(length):\n for j in range(length):\n if arr[i] < arr[j]:\n arr[i], arr[j] = arr[j], arr[i]\n\nprint(\"Отсортированный: \", arr)\n","sub_path":"day2/5_sort.py","file_name":"5_sort.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"5372474","text":"import RPi.GPIO as GPIO \nimport time \n\nGPIO.setmode(GPIO.BCM)\n\ndef start_up():\n print (\"Welcome to Raspberry Pi Network Monitor.\")\n accept = input(\"Would you like to continue? (y/n)\").lower().strip()\n if accept == \"y\":\n RPi.start(GPIO.BCM)\n else:\n print (\"See you next time\")\n RPi.off(GPIO.BCM)\n\n","sub_path":"NEMS/RaspberryPi Startup.py","file_name":"RaspberryPi Startup.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"497564066","text":"\"\"\" Experiment \"\"\"\n\nimport tensorflow as tf\nfrom . import monitor\nfrom . import memory\nfrom .. import cells\nfrom .. import utils\n\nclass Experiment(object):\n def __init__(self, dt: float = 1.0, input_start=0, input_delay=0,\\\n training_start=0, training_delay=0) -> None:\n tf.reset_default_graph()\n self.dt = dt\n self.cell_groups = {}\n self.connections = {}\n self.connection_list = []\n self.trainable_connections = {}\n self.connection_ops = []\n self.input_name_list = []\n self.input_ops = []\n self.train_ops = []\n self.monitors = {}\n self.session = tf.Session()\n self.memories = {}\n self.step_counter = 0\n self.input_start = input_start\n self.input_delay = input_delay\n self.input_tracker = -1\n self.training_start = training_start\n self.training_delay = training_delay\n self.training_tracker = -1\n self.experiment_output = {}\n self.has_input = tf.placeholder(tf.bool, shape=())\n self.training_loss = None\n\n def add_input(self, dtype, shape, name):\n input_placeholder = tf.placeholder(dtype, shape=shape, name=name)\n self.input_name_list.append(name)\n return input_placeholder\n\n def add_group_cells(self, name, amount):\n g_cells = cells.Cells(amount)\n self.cell_groups[name] = g_cells\n return g_cells\n\n def add_cells(self, name, g_cells):\n self.cell_groups[name] = g_cells\n return g_cells\n\n def add_state_memory(self, state, memory_size):\n state_memory = memory.Memory(self,state,memory_size)\n self.memories[state] = state_memory\n return state_memory.get_op()\n\n def update_experiment_output(self, new_connection):\n if new_connection.from_group in self.experiment_output and\\\n new_connection.to_group not in self.experiment_output:\n del self.experiment_output[new_connection.from_group]\n\n self.experiment_output[new_connection.to_group] = new_connection\n\n def add_connection(self, name, connection):\n connection.set_experiment(self)\n self.connections[name] = connection\n self.connection_list.insert(0,connection)\n self.connection_ops.append(connection.list_ops)\n self.update_experiment_output(connection)\n if connection.from_group.name.split(\":\")[0] in self.input_name_list: # if input\n self.input_ops.append(connection.list_ops)\n else:\n self.connection_ops.append(connection.list_ops)\n return connection\n\n def add_trainable_connection(self, name, connection):\n self.add_connection(name, connection)\n self.trainable_connections[name] = connection\n return connection\n\n def initialize_cells(self):\n self.session.run(tf.global_variables_initializer())\n for monitor_key in self.monitors:\n self.monitors[monitor_key].initialize()\n\n def set_training(self, loss, learning_rate, optimizer=\"adam\"):\n model_vars = tf.trainable_variables()\n self.training_loss = loss\n t_vars = []\n for var in model_vars:\n for conn_key in self.trainable_connections:\n if conn_key in var.name:\n t_vars.append(var)\n\n if optimizer == \"adam\":\n train_op = tf.train.AdamOptimizer(learning_rate, beta1=0, beta2=0).minimize(loss, var_list=t_vars)\n else:\n print(\"set_training has set invalid optimizer\")\n\n self.train_ops.append(train_op)\n\n def close(self):\n self.session.close()\n\n def is_input_step(self):\n return ((self.step_counter-self.input_start) // (self.input_delay+1)) > self.input_tracker\n\n def is_training_step(self):\n return ((self.step_counter-self.training_start) // (self.training_delay+1)) > self.training_tracker\n\n def run(self,timesteps: int = 10):\n for step in range(timesteps-1):\n self.run_step()\n utils.progressbar(step+1, timesteps-1)\n\n def run_with_input_list(self, timesteps: int, feed_dict_list):\n feed_counter = 0\n for step in range(timesteps-1):\n if self.is_input_step() or self.is_training_step():\n self.run_step(feed_dict=feed_dict_list[feed_counter])\n feed_counter += 1\n else:\n self.run_step()\n utils.progressbar(step+1, timesteps-1)\n\n def run_with_input_generator(self, timesteps: int, generator):\n for step in range(timesteps-1):\n if self.is_input_step() or self.is_training_step():\n\n feed_dict = generator(self.step_counter)\n self.run_step(feed_dict=feed_dict)\n else:\n self.run_step()\n utils.progressbar(step+1, timesteps-1)\n\n def run_step(self, feed_dict={}):\n feed_dict[self.has_input] = False\n if self.is_input_step():\n feed_dict[self.has_input] = True\n self.input_tracker += 1\n\n for experiment_output_key in self.experiment_output:\n self.session.run(self.experiment_output[experiment_output_key].assign_output,feed_dict=feed_dict)\n\n for memory_key in self.memories:\n self.memories[memory_key].update_state_memory()\n\n for monitor_key in self.monitors:\n self.monitors[monitor_key].record()\n\n if self.is_training_step():\n self.session.run(self.train_ops, feed_dict=feed_dict)\n self.training_tracker += 1\n self.step_counter += 1\n\n def check_group_cells_state(self, group_cells_name, state_name):\n group_cells_name_exists = group_cells_name in self.cell_groups\n assert group_cells_name_exists, \"Error: group_cells_name for group_cells does not exist.\"\n\n state_name_exists = state_name in self.cell_groups[group_cells_name].states\n assert state_name_exists, \"Error: state_name for state does not exist.\"\n\n def get_group_cells_state(self, group_cells_name, state_name):\n self.check_group_cells_state(group_cells_name, state_name)\n\n return self.session.run(self.cell_groups[group_cells_name].states[state_name])\n\n def add_monitor(self, group_cells_name, state_name, timesteps=None):\n self.check_group_cells_state(group_cells_name, state_name)\n\n self.monitors[(group_cells_name,state_name)] =\\\n monitor.Monitor(self, group_cells_name, state_name, duration=timesteps)\n\n def get_monitor(self, group_cells_name, state_name):\n self.check_group_cells_state(group_cells_name, state_name)\n\n return self.monitors[(group_cells_name,state_name)].get()\n\n def get_connection(self, conn_name):\n conn_name_exists = conn_name in self.connections\n assert conn_name_exists, \"Error: conn_name for connections does not exist.\"\n\n return self.connections[conn_name]","sub_path":"evodynamic/experiment/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":6293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"641223395","text":"import socket\n\ns=socket.socket()\nhost=socket.gethostname()\nport=8080\ns.bind((host,port))\ns.listen(5)\n\nwhile True:\n c,addr=s.accept()\n print (\"Connection established with: \"+addr)\n c.send(\"You have established a connection with: \"+ host)\n c.send(\"Closing\")\n c.close()\n","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"552542672","text":"import cv2\nimport numpy as np\n\nport1 = int(input('Left port? '))\n\ntry:\n lcam = cv2.VideoCapture(port1)\n assert lcam.isOpened()\nexcept Exception as e:\n print('Failed opening a camera on port {}'.format(port1))\n print(e)\n\nport2 = int(input('Right port? '))\n\ntry:\n rcam = cv2.VideoCapture(port2)\n assert rcam.isOpened()\nexcept Exception as e:\n print('Failed opening a camera on port {}'.format(port2))\n print(e)\n\n\nwhile True:\n try:\n (lr, lim), (rr, rim) = lcam.read(), rcam.read()\n if not (lr and rr): continue\n cv2.imshow('Frame', np.concatenate((lim, rim), axis=1))\n cv2.waitKey(1)\n except KeyboardInterrupt:\n break\n\ndel lcam\ndel rcam\nprint('Bye!')\n","sub_path":"stream-stereo.py","file_name":"stream-stereo.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"511049912","text":"import cv2 as cv\nimport numpy as np\n\ndef mouse_event(event, x, y, flags, param) :\n global radius\n global colorList\n global colorIdx\n\n if event == cv.EVENT_LBUTTONDOWN : \n cv.circle(param, (x, y), radius, colorList[colorIdx], 2)\n cv.imshow('draw', src)\n if event == cv.EVENT_MOUSEWHEEL :\n if flags > 0 :\n radius += 1\n elif flags < 0 : \n radius -= 1\n\n if event == cv.EVENT_RBUTTONDOWN :\n if colorIdx == len(colorList) :\n colorIdx = 0\n else :\n colorIdx +=1\n\nradius = 3\ncolorIdx = 0\ncolorList = [(255,255,255),(255,0,0),(0,255,0),(0,0,255),(255,0,255),(0,255,255)]\n\nsrc = np.zeros((500,500,3), dtype = np.uint8)\n\ncv.imshow('draw', src)\ncv.setMouseCallback('draw', mouse_event, src)\n\ncv.waitKey(0)\n\n\n\n","sub_path":"OpenCV/04-GUI/202-mouseCallBack2.py","file_name":"202-mouseCallBack2.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"23189377","text":"import unittest\n\nfrom training.dataprep.codecs.text_codec import TextCodec\n\nclass TestTextCodec(unittest.TestCase):\n def test_padding(self):\n codec = TextCodec()\n\n assert codec.encode(\"\") == [0]\n assert codec.decode([0]) == \"\"\n\n def test_initial_encoding(self):\n text = \"this is the string the\"\n output = [1, 2, 3, 4, 3]\n codec = TextCodec()\n assert codec.encode(text) == output\n\n def test_initial_decoding(self):\n text = \"this is the string the\"\n codec = TextCodec()\n encoded = codec.encode(text)\n assert codec.decode(encoded) == text\n\n def test_extra_encoding(self):\n initial_text = \"this is the string the\"\n extra_text = \"more to the encoding\"\n codec = TextCodec()\n\n codec.encode(initial_text)\n encoded = codec.encode(extra_text)\n\n assert encoded == [5, 6, 3,7]\n \n def test_extra_decoding(self):\n initial_text = \"this is the string the\"\n extra_text = \"more to the encoding\" \n codec = TextCodec()\n\n codec.encode(initial_text)\n encoded = codec.encode(extra_text)\n\n assert codec.decode(encoded) == extra_text\n","sub_path":"test/test_text_codec.py","file_name":"test_text_codec.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"450833427","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport pygame\nimport os\nfrom snake_lib import snakeLib\n\n# Initialize pygame\npygame.init()\n\n#Init snake\nsnake = snakeLib()\n\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 153, 51)\nGREEN2 = (51, 204, 51)\nRED = (255, 0, 0)\n\n# Képernyő felbontás és ablak méret beállítás\nSCREEN_SIZE = (pygame.display.Info().current_w, pygame.display.Info().current_h)\nWINDOW_SIZE = (515, 600)\nMARGIN = 2\nWIDTH = 15\nHEIGHT = 15\n\n# Középen nyissa meg\npos_x = SCREEN_SIZE[0] / 2 - WINDOW_SIZE[0] / 2\npos_y = SCREEN_SIZE[1] / 2 - WINDOW_SIZE[1]\nos.environ['SDL_VIDEO_WINDOW_POS'] = '%i,%i' % (pos_x,pos_y)\nos.environ['SDL_VIDEO_CENTERED'] = '0'\n \n# Set the HEIGHT and WIDTH of the screen\nscreen = pygame.display.set_mode(WINDOW_SIZE)\n\n# Set title of screen\npygame.display.set_caption(\"Amőba\")\n\n# Kiiráshoz:\nfont = pygame.font.Font(None, 25)\n\n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n\ningame = False\n\nmatch_going = True\n\nsnake_moved = False\n\napple_tick = 0\n\nSTART = False\n\nwhile not ingame:\n for event in pygame.event.get(): # User did something\n if event.type == pygame.QUIT: # If user clicked close\n ingame = True # Flag that we are done so we exit this loop\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n snake.move(\"L\") \n if event.key == pygame.K_RIGHT:\n snake.move(\"R\")\n if event.key == pygame.K_UP:\n snake.move(\"U\")\n if event.key == pygame.K_DOWN:\n snake.move(\"D\")\n if event.key == pygame.K_SPACE:\n START = True\n\n snake_moved = True\n \n # Set the screen background\n screen.fill(BLACK)\n # Draw the grid\n for row in range(30):\n for column in range(30):\n field = snake.get_field()\n \n if field[row][column] == \"S\":\n color = GREEN2\n if snake.get_snake_head()[0] == row and snake.get_snake_head()[1] == column:\n color = GREEN\n elif field[row][column] == \"A\":\n color = RED\n else:\n color = BLACK\n pygame.draw.rect(screen,\n color,\n [(MARGIN + WIDTH) * column + MARGIN,\n (MARGIN + HEIGHT) * row + MARGIN,\n WIDTH,\n HEIGHT])\n\n pygame.draw.line(screen, WHITE, [0,0], [512,0], 3)\n pygame.draw.line(screen, WHITE, [0,0], [0,510], 3)\n pygame.draw.line(screen, WHITE, [512,0], [512,510], 3)\n pygame.draw.line(screen, WHITE, [0,510], [512,510], 3)\n\n # Limit to 60 frames per second\n clock.tick(10)\n\n if START:\n\n # RUSH B effect\n if snake_moved:\n snake_moved = False\n else:\n snake.move()\n\n # Hogy legyen 3 alma\n if snake.get_apples_number() < 3:\n snake.random_apple()\n\n # Régi almák eltüntetése\n if apple_tick == 10:\n snake.remove_old_apple()\n apple_tick = 0\n else:\n apple_tick += 1\n\n #Pontszám\n out_txt = \"Pontszám: \" + str(snake.get_snake_length() - 3)\n text = font.render(out_txt,True,RED)\n screen.blit(text, [WINDOW_SIZE[0]/2 - 50,WINDOW_SIZE[1]-30])\n\n # Vége szöveg\n if not snake.is_alive():\n out_txt = \"Vége!!!\"\n text = font.render(out_txt,True,RED)\n screen.blit(text, [WINDOW_SIZE[0]/2 - 35,WINDOW_SIZE[1] - 60])\n else:\n #Pontszám\n out_txt = \"SPACE-re indul!!\"\n text = font.render(out_txt,True,RED)\n screen.blit(text, [WINDOW_SIZE[0]/2 - 50,WINDOW_SIZE[1]-30])\n\n # Go ahead and update the screen with what we've drawn.\n pygame.display.flip()\n\n# Be IDLE friendly. If you forget this line, the program will 'hang'\n# on exit.\npygame.quit()\n","sub_path":"Snake_tervezet.ora/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"325285211","text":"import unittest\n\nfrom dummy import enc_string, dec_string\n\nclass DummyTest(unittest.TestCase):\n def test_name(self):\n exp = \"Melvin\"\n step = enc_string(exp)\n result = dec_string(step)\n self.assertNotEqual(exp, step)\n self.assertEqual(exp, result)\n\n def test_even_length_string(self):\n exp = \"appel\"\n step = enc_string(exp)\n result = dec_string(step)\n self.assertNotEqual(exp, step)\n self.assertEqual(exp, result)\n\n def test_odd_length_string(self):\n exp = \"appels\"\n step = enc_string(exp)\n result = dec_string(step)\n self.assertNotEqual(exp, step)\n self.assertEqual(exp, result)\n\n def test_long_string(self):\n exp = long_string\n step = enc_string(exp)\n result = dec_string(step)\n self.assertNotEqual(exp, step)\n self.assertEqual(exp, result)\n\n def test_long_string_one_longer(self):\n exp = long_string + \"s\"\n step = enc_string(exp)\n result = dec_string(step)\n self.assertNotEqual(exp, step)\n self.assertEqual(exp, result)\n\n def test_long_string_five_times(self):\n exp = long_string * 5\n step = enc_string(exp)\n result = dec_string(step)\n self.assertNotEqual(exp, step)\n self.assertEqual(exp, result)\n\n def test_long_string_six_times(self):\n exp = long_string * 6 \n step = enc_string(exp)\n result = dec_string(step)\n self.assertNotEqual(exp, step)\n self.assertEqual(exp, result)\n\n\n\n\nlong_string = \"\"\"An even number is an integer which is \"evenly divisible\" by two. This means that if the integer is divided by 2, it yields no remainder. Zero is an even number because zero divided by two equals zero. Even numbers can be either positive or negative. You can tell if any decimal number is an even number if its final digit is an even number.\"\"\"\n\n\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"DailyProgrammer/dummyenc/test_dummy.py","file_name":"test_dummy.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"470091281","text":"from ROAR.control_module.controller import Controller\nfrom ROAR.utilities_module.vehicle_models import VehicleControl, Vehicle\nfrom ROAR.utilities_module.data_structures_models import Transform, Location\nfrom collections import deque\nimport numpy as np\nimport math\nimport logging\nfrom ROAR.agent_module.agent import Agent\nfrom typing import Tuple\n\n\nclass PIDController(Controller):\n def __init__(self, agent: Agent, steering_boundary: Tuple[float, float],\n throttle_boundary: Tuple[float, float], **kwargs):\n super().__init__(agent, **kwargs)\n self.max_speed = self.agent.agent_settings.max_speed\n self.throttle_boundary = throttle_boundary\n self.steering_boundary = steering_boundary\n self.long_pid_controller = LongPIDController(agent=agent,\n throttle_boundary=throttle_boundary,\n max_speed=self.max_speed)\n self.lat_pid_controller = LatPIDController(\n agent=agent,\n steering_boundary=steering_boundary\n )\n self.logger = logging.getLogger(__name__)\n\n def run_in_series(self, next_waypoint: Transform, **kwargs) -> VehicleControl:\n throttle = self.long_pid_controller.run_in_series(next_waypoint=next_waypoint)\n steering = self.lat_pid_controller.run_in_series(next_waypoint=next_waypoint)\n return VehicleControl(throttle=throttle, steering=steering)\n\n\nclass LongPIDController(Controller):\n def __init__(self, agent, throttle_boundary: Tuple[float, float], max_speed: float,\n dt: float = 0.03, **kwargs):\n super().__init__(agent, **kwargs)\n self.max_speed = max_speed\n self.throttle_boundary = throttle_boundary\n self._error_buffer = deque(maxlen=10)\n\n self._dt = dt\n\n def run_in_series(self, next_waypoint: Transform, **kwargs) -> float:\n target_speed = min(self.max_speed, self.agent.kwargs.get(\"target_speed\", self.max_speed))\n self.logger.debug(f\"Target_Speed: {target_speed} | max_speed = {self.max_speed}\")\n current_speed = Vehicle.get_speed(self.agent.vehicle)\n\n k_p, k_d, k_i = self.find_k_values()\n error = target_speed - current_speed\n\n self._error_buffer.append(error)\n\n if len(self._error_buffer) >= 2:\n # print(self._error_buffer[-1], self._error_buffer[-2])\n _de = (self._error_buffer[-2] - self._error_buffer[-1]) / self._dt\n _ie = sum(self._error_buffer) * self._dt\n else:\n _de = 0.0\n _ie = 0.0\n output = float(np.clip((k_p * error) + (k_d * _de) + (k_i * _ie), self.throttle_boundary[0],\n self.throttle_boundary[1]))\n # self.logger.debug(f\"curr_speed: {round(current_speed, 2)} | kp: {round(k_p, 2)} | kd: {k_d} | ki = {k_i} | \"\n # f\"err = {round(error, 2)} | de = {round(_de, 2)} | ie = {round(_ie, 2)}\")\n # f\"self._error_buffer[-1] {self._error_buffer[-1]} | self._error_buffer[-2] = {self._error_buffer[-2]}\")\n return output\n\n def find_k_values(self) -> Tuple[float, float, float]:\n k_p = self.agent.kwargs.get(\"long_k_p\", 1)\n k_d = self.agent.kwargs.get(\"long_k_d\", 0)\n k_i = self.agent.kwargs.get(\"long_k_i\", 0)\n return k_p, k_d, k_i\n\n\nclass LatPIDController(Controller):\n def __init__(self, agent, steering_boundary: Tuple[float, float],\n dt: float = 0.03, **kwargs):\n super().__init__(agent, **kwargs)\n self.steering_boundary = steering_boundary\n self._error_buffer = deque(maxlen=10)\n self._dt = dt\n\n def run_in_series(self, next_waypoint: Transform, **kwargs) -> float:\n # calculate a vector that represent where you are going\n v_begin = self.agent.vehicle.control.location\n v_end = v_begin + Location(\n x=math.cos(math.radians(self.agent.vehicle.control.rotation.pitch)),\n y=0,\n z=math.sin(math.radians(self.agent.vehicle.control.rotation.pitch)),\n )\n v_vec = np.array([v_end.x - v_begin.x, 0, v_end.z - v_begin.z])\n # calculate error projection\n w_vec = np.array(\n [\n next_waypoint.location.x - v_begin.x,\n 0,\n next_waypoint.location.z - v_begin.z,\n ]\n )\n _dot = math.acos(\n np.clip(\n np.dot(v_vec, w_vec) / (np.linalg.norm(w_vec) * np.linalg.norm(v_vec)),\n -1.0,\n 1.0,\n )\n )\n _cross = np.cross(v_vec, w_vec)\n if _cross[1] > 0:\n _dot *= -1\n self._error_buffer.append(_dot)\n if len(self._error_buffer) >= 2:\n _de = (self._error_buffer[-1] - self._error_buffer[-2]) / self._dt\n _ie = sum(self._error_buffer) * self._dt\n else:\n _de = 0.0\n _ie = 0.0\n\n k_p, k_d, k_i = self.find_k_values()\n\n lat_control = float(\n np.clip((k_p * _dot) + (k_d * _de) + (k_i * _ie), self.steering_boundary[0], self.steering_boundary[1])\n )\n return lat_control\n\n def find_k_values(self) -> Tuple[float, float, float]:\n k_p = self.agent.kwargs.get(\"lat_k_p\", 1)\n k_d = self.agent.kwargs.get(\"lat_k_d\", 0)\n k_i = self.agent.kwargs.get(\"lat_k_i\", 0)\n return k_p, k_d, k_i\n","sub_path":"ROAR/control_module/rl_pid_controller.py","file_name":"rl_pid_controller.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"159096193","text":"import sys\n\nimport pyaudio\nimport speech_recognition as a2t\n\n# find the desired microphone or the first input in the list\nmic_index = -1\nspk_index = -1\naudio = pyaudio.PyAudio()\nfor i in range(audio.get_device_count()):\n info = audio.get_device_info_by_index(i)\n print('device:', info['name'])\n if (info['maxInputChannels'] > 0):\n if (info['name'] == 'USB audio CODEC' or mic_index == -1):\n mic_index = i\n if (info['maxOutputChannels'] > 0 and spk_index == -1):\n spk_index = i\n\n\nif spk_index != -1:\n print(\"microphone:\")\n print(audio.get_device_info_by_index(mic_index))\nelse:\n print(\"microphone not found\")\nprint()\nif spk_index != -1:\n print(\"speaker:\")\n print(audio.get_device_info_by_index(spk_index))\nelse:\n print(\"speaker not found\")\nprint()\n\n\nsys.stdout.write(\"initializing...\")\nsys.stdout.flush()\n\nrec = a2t.Recognizer()\nwith a2t.Microphone(mic_index) as mic:\n rec.dynamic_energy_adjustment_ratio = 2\n\n try:\n rec.listen(mic, timeout=1, phrase_time_limit=3)\n except a2t.WaitTimeoutError:\n pass # nothing detected\n\n print(\"done.\")\n\n iteration = 0\n while True:\n text = None\n wake = None\n try:\n if iteration % 5 == 0:\n sys.stdout.write(\"adjusting for noise...\")\n sys.stdout.flush()\n rec.adjust_for_ambient_noise(mic, 5)\n print(\"done.\")\n sys.stdout.write(\"listening... \")\n sys.stdout.flush()\n sample = rec.listen(mic, timeout=2, phrase_time_limit=3)\n sys.stdout.write(\"recognizing... \")\n text = rec.recognize_sphinx(sample)\n wake = rec.recognize_sphinx(sample,\n keyword_entries=[(\"computer\", 0.75)])\n except a2t.WaitTimeoutError:\n pass # nothing detected\n except a2t.UnknownValueError:\n pass # wake word not recognized\n # print(\"audio_handler: sample value error\")\n except a2t.RequestError as e:\n print(\"audio_handler: recognition error - {0}\".format(e))\n except Exception as e:\n print(\"audio_handler: unknown error - {0}\".format(e))\n print(\"wake: {:20} text: {}\".format(wake or \"\", text or \"\"))\n iteration = iteration + 1\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"355805058","text":"\ndef dl(url, filename): \n\n\timport requests\n\tres = requests.get(url)\n\tres.raise_for_status()\n\tplayFile = open(filename, 'wb')\n\tfor chunk in res.iter_content(100000):\n\t playFile.write(chunk)\n\tplayFile.close()\n\treturn\n\ndef makesoup(url):\n\timport requests, bs4, re\n\tres = requests.get(url)\n\tres.raise_for_status()\n\tsoup = bs4.BeautifulSoup(res.text, \"lxml\")\n\treturn(soup)\n\t\ndef dlpdf(site,filename):\n\treq = Request(site, headers={'User-Agent': 'Mozilla/5.0'})\n\twebpage = urlopen(req).read()\n\twith open(filename, 'wb') as wf:\n\t\twf.write(webpage)\n\ndef capture2dict(readfile, writefile, pat):\n\twith open(readfile, 'r') as rf:\n\t\twith open(writefile, 'w') as wf:\n\t\t\tfor line in rf:\n\t\t\t\tf=pat.search(line)\n\t\t\t\tif f:\n\t\t\t\t\ta=f.group(1)\n\t\t\t\t\tb=f.group(2)\n\t\t\t\t\tprint(a,b)\n\t\t\t\t\td={a+': '+b+','} #dict formatting\n\t\t\t\t\tprint(type(d))\n\ndef capture2(readfile, writefile, pattern):\n\twith open(readfile, 'r') as rf:\n\t\twith open(writefile, 'w') as wf:\n\t\t\tfor line in rf:\n\t\t\t\tf=pattern.search(line)\n\t\t\t\tif f:\n\t\t\t\t\tfilename = f.group(1)\n\t\t\t\t\turl = f.group(2)\n\t\t\t\t\tprint(filename, url)\n\t\t\t\t\t# return(filename, url)\n\ndef ziplines(readfile1, separator, readfile2, writefile):\n\twith open(readfile1, 'r') as rf:\n\t\thash_contents = rf.read()\n\th=hash_contents.split('\\n')\n\n\twith open(readfile2, 'r') as rf:\n\t\ttrim_contents = rf.read()\n\tt=trim_contents.split('\\n')\n\n\n\tj=[x + separator + y for x, y in zip(h, t)]\n\tameld='\\n'.join(j)\n\n\tprint(ameld)\n\t \n\twith open(writefile, 'w') as wf:\n\t\t\twf.write(ameld)\n\t\t\t\n\ndef sort_file(input_file, output_file):\n\twith open(input_file, 'r') as rf:\n\t\tf_contents = rf.read()\n\t\tarray=f_contents.split('\\n')\n\t\tarray.sort()\n\t\tnew='\\n'.join(array)\n\t\tprint(new)\n\t\twith open(output_file, 'w') as wf:\n\t\t\twf.write(new)\n\ndef stringreplace(readfile,writefile,str,repl):\n\twith open(readfile, \"r\") as rf:\n\t with open(writefile, \"w\") as wf:\n\t for line in rf:\n\t wf.write(line.replace(str, repl))\n\ndef killemptylines(readfile):\n\twith open('1.txt', 'r+') as rf:\n\t\tfor line in rf:\n\t\t\tif line != '\\n':\n\t\t\t\trf.write(line)","sub_path":"my_fcns.py","file_name":"my_fcns.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"127743668","text":"import torch\r\nfrom torch.utils.data import Dataset, DataLoader\r\nfrom torchvision import transforms, set_image_backend\r\nfrom PIL import Image\r\nimport os\r\nimport math\r\nimport random\r\nimport numpy as np\r\nfrom imgaug import augmenters as iaa\r\nimport imgaug as ia\r\n\r\n# import functools\r\n# import accimage\r\n# set_image_backend('accimage')\r\n\r\nclass Normaliztion(object):\r\n \"\"\"\r\n same as mxnet, normalize into [-1, 1]\r\n image = (image - 127.5)/128\r\n \"\"\"\r\n def __call__(self, Image):\r\n new_video_x = (Image - 127.5) / 128\r\n return new_video_x\r\n\r\nclass Videodatasets_Fusion(Dataset):\r\n def __init__(self, dataset_root, ground_truth1, typ1, ground_truth2, typ2, ground_truth3, typ3, sample_duration=16, phase='train'):\r\n\r\n def get_data_list_and_label(data_df, typ):\r\n T = 0 # if typ == 'M' else 1\r\n return [(lambda arr: ('/'.join(arr[T].split('/')[1:]), int(arr[1]), int(arr[2])))(i[:-1].split(' '))\r\n for i in open(data_df).readlines()]\r\n\r\n self.dataset_root = dataset_root\r\n self.sample_duration = sample_duration\r\n self.phase = phase\r\n self.typ1, self.typ2, self.typ3 = typ1, typ2, typ3\r\n self.transform = transforms.Compose([Normaliztion(), transforms.ToTensor()])\r\n\r\n lines = filter(lambda x: x[1] > 7, get_data_list_and_label(ground_truth1, typ1))\r\n lines2 = filter(lambda x: x[1] > 7, get_data_list_and_label(ground_truth2, typ2))\r\n lines3 = filter(lambda x: x[1] > 7, get_data_list_and_label(ground_truth3, typ3))\r\n self.inputs = list(lines)\r\n self.inputs2 = list(lines2)\r\n self.inputs3 = list(lines3)\r\n def transform_params(self, resize=(320, 240), crop_size=224, flip=0.5):\r\n if self.phase == 'train':\r\n left, top = random.randint(0, resize[0] - crop_size), random.randint(0, resize[1] - crop_size)\r\n is_flip = True if random.uniform(0, 1) > flip else False\r\n else:\r\n left, top = (resize[0] - crop_size) // 2, (resize[1] - crop_size) // 2\r\n is_flip = False\r\n return (left, top, left + crop_size, top + crop_size), is_flip\r\n\r\n def __getitem__(self, index):\r\n \"\"\"\r\n Args:\r\n index (int): Index\r\n Returns:\r\n tuple: (image, target) where target is class_index of the target class.\r\n \"\"\"\r\n resize = (320, 240) # default | (256, 256) may be helpful\r\n crop_rect, is_flip = self.transform_params(resize=resize, flip=1.0) # no flip\r\n\r\n def image_to_np(image):\r\n \"\"\"\r\n Returns:\r\n np.ndarray: Image converted to array with shape (width, height, channels)\r\n \"\"\"\r\n image_np = np.empty([image.channels, image.height, image.width], dtype=np.uint8)\r\n image.copyto(image_np)\r\n image_np = np.transpose(image_np, (1, 2, 0))\r\n return image_np\r\n\r\n def transform(img):\r\n img = img.resize(resize)\r\n img = img.crop(crop_rect)\r\n if is_flip:\r\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\r\n return np.array(img.resize((112, 112))) # Image.open\r\n # return image_to_np(img.resize((112, 112))) # accimage.Image\r\n def Sample_Image(imgs_path, sl):\r\n frams = []\r\n for a in sl:\r\n # img = transform(accimage.Image(os.path.join(imgs_path, \"%06d.jpg\" % a)))\r\n img = transform(Image.open(os.path.join(imgs_path, \"%06d.jpg\" % a)))\r\n frams.append(self.transform(img).view(3, 112, 112, 1))\r\n return torch.cat(frams, dim=3).type(torch.FloatTensor)\r\n\r\n sn = self.sample_duration\r\n if self.phase == 'train':\r\n f = lambda n: [(lambda n, arr: n if arr == [] else random.choice(arr))(n * i / sn,\r\n range(int(n * i / sn),\r\n max(int(n * i / sn) + 1,\r\n int(n * (\r\n i + 1) / sn))))\r\n for i in range(sn)]\r\n else:\r\n f = lambda n: [(lambda n, arr: n if arr == [] else int(np.mean(arr)))(n * i / sn, range(int(n * i / sn),\r\n max(int(\r\n n * i / sn) + 1,\r\n int(n * (\r\n i + 1) / sn))))\r\n for i in range(sn)]\r\n\r\n sl = f(self.inputs3[index][1])\r\n\r\n # Iso\r\n data_path = os.path.join(os.path.join(self.dataset_root, self.typ1, self.phase), self.inputs[index][0])\r\n clip = Sample_Image(data_path, sl)\r\n\r\n data_path2 = os.path.join(os.path.join(self.dataset_root, self.typ2, self.phase), self.inputs2[index][0])\r\n clip2 = Sample_Image(data_path2, sl)\r\n\r\n data_path3 = os.path.join(os.path.join(self.dataset_root, self.typ3, self.phase), self.inputs3[index][0])\r\n clip3 = Sample_Image(data_path3, sl)\r\n\r\n assert self.inputs[index][2] == self.inputs2[index][2] and self.inputs2[index][2] == self.inputs3[index][2]\r\n return clip.permute(0, 3, 1, 2), self.inputs[index][2], clip2.permute(0, 3, 1, 2), clip3.permute(0, 3, 1, 2)\r\n\r\n def __len__(self):\r\n return len(self.inputs3)","sub_path":"AutoGesture/Fusion/Videodatasets_Fusion.py","file_name":"Videodatasets_Fusion.py","file_ext":"py","file_size_in_byte":5816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"604329922","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*- \n#\n# Copyright 2006,2007 TUBITAK/UEKAE\n# Licensed under the GNU General Public License, version 2.\n# See the file http://www.gnu.org/copyleft/gpl.txt.\n\nfrom pisi.actionsapi import autotools\nfrom pisi.actionsapi import pisitools\nfrom pisi.actionsapi import shelltools\nfrom pisi.actionsapi import libtools\nfrom pisi.actionsapi import get\n\nxorgserver = \"xorg-server-1.3.0.0\"\nmesa = \"Mesa-7.0.2\"\n\ndef setup():\n shelltools.cd(xorgserver)\n\n libtools.libtoolize(\"--copy --force\")\n autotools.autoreconf()\n autotools.automake()\n\n autotools.configure(\"--enable-ipv6 \\\n --enable-xvfb \\\n --enable-xnest \\\n --enable-install-libxf86config \\\n --enable-dri \\\n --enable-xorg \\\n --enable-glx-tls \\\n --disable-xorgcfg \\\n --disable-xprint \\\n --disable-static \\\n --with-pic \\\n --enable-composite \\\n --with-mesa-source=%s/xorg-server-7.2/%s/ \\\n --with-dri-driver-path=/usr/lib/xorg/modules/dri \\\n --with-os-name=\\\"Pardus\\\" \\\n --with-os-vendor=\\\"TÜBİTAK, UEKAE\\\" \\\n --sysconfdir=/etc/X11 \\\n --localstatedir=/var \\\n --with-default-font-path=/usr/share/fonts/misc,/usr/share/fonts/dejavu,/usr/share/fonts/100dpi,/usr/share/fonts/75dpi,/usr/share/fonts/TTF,/usr/share/fonts/Type1\" % (get.workDIR(), mesa))\n\ndef build():\n # prepare Mesa for compilation\n shelltools.cd(mesa)\n autotools.make(\"linux-dri-x86\")\n\n shelltools.cd(\"../%s\" % xorgserver)\n autotools.make()\n\ndef install():\n shelltools.cd(xorgserver)\n autotools.rawInstall(\"DESTDIR=%s\" % get.installDIR())\n\n shelltools.cd(\"../%s/\" % mesa)\n autotools.rawInstall(\"INSTALL_DIR=%s/usr DRI_DRIVER_INSTALL_DIR=%s/usr/lib/xorg/modules/dri INCLUDE_DIR=%s/usr/include\" % \n (get.installDIR(), get.installDIR(), get.installDIR()))\n\n # Create glxinfo/gears\n shelltools.cd(\"progs/xdemos/\")\n autotools.make(\"glxinfo\")\n autotools.make(\"glxgears\")\n\n pisitools.insinto(\"/usr/bin\", \"glxinfo\")\n pisitools.insinto(\"/usr/bin\", \"glxgears\")\n\n # Don't install private headers\n pisitools.remove(\"/usr/include/GL/GLw*P.h\")\n\n # Create needed symlinks\n pisitools.dosym(\"libGLU.so.1.3.070002\", \"/usr/lib/libGLU.so.1.3\")\n pisitools.dosym(\"libGLw.so.1.0.0\", \"/usr/lib/libGLw.so.1.0\")\n\n # Moving libGL and friends for dynamic switching\n pisitools.dodir(\"/usr/lib/opengl/lib\")\n pisitools.dodir(\"/usr/lib/opengl/extensions\")\n pisitools.dodir(\"/usr/lib/opengl/include\")\n\n for file in shelltools.ls(\"%s/usr/lib/libGL.so*\" % get.installDIR()):\n pisitools.domove(file.replace(get.installDIR(), \"\"), \"/usr/lib/opengl/xorg-x11/lib/\")\n\n pisitools.domove(\"/usr/lib/libGL.la\", \"/usr/lib/opengl/xorg-x11/lib/\")\n pisitools.domove(\"/usr/lib/libGL.a\", \"/usr/lib/opengl/xorg-x11/lib/\")\n\n for file in shelltools.ls(\"%s/usr/lib/xorg/modules/extensions/libglx*\" % get.installDIR()):\n pisitools.domove(file.replace(get.installDIR(), \"\"), \"/usr/lib/opengl/xorg-x11/extensions/\")\n\n for file in (\"gl.h\", \"glx.h\", \"glext.h\", \"glxext.h\"):\n pisitools.domove(\"/usr/include/GL/%s\" % file, \"/usr/lib/opengl/xorg-x11/include\")\n\n # Default cursor theme\n pisitools.dodir(\"/usr/share/cursors/xorg-x11/default\")\n shelltools.echo(\"%s/usr/share/cursors/xorg-x11/default/index.theme\" % get.installDIR(), \"[Icon Theme]\\nInherits=Jimmac\")\n\n # Workaround for liveCD\n pisitools.removeDir(\"/usr/share/X11/xkb/compiled/\")\n pisitools.dosym(\"/tmp\", \"/usr/share/X11/xkb/compiled\")\n pisitools.dosym(\"/usr/share/X11/xkb\", \"/etc/X11/xkb\")\n","sub_path":"pardus/tags/2007-EOL/desktop/freedesktop/xorg/xorg-server/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"351243881","text":"from typing import List, Dict, Tuple\nimport random\n\nimport pandas as pd\n\n\n\"\"\"\nModels for the scc game. At the moment, we play on a simple 8 by 8 grid.\n(eight hours, eight possible charging values)\n\"\"\"\n\n\nclass SolarPark:\n \"\"\"\n \"\"\"\n generation: pd.Series\n\n def __init__(self, generation: pd.Series):\n self.generation = generation\n\n\nclass Car:\n id: str\n initial_charge: int\n target_charge: int\n charging_actions: List[int]\n capacity: int\n current_charge: int\n\n def __init__(self, car_id: str, target_charge: int):\n \"\"\" We make some useful simplifications here\"\"\"\n self.id = car_id\n self.initial_charge = 0\n self.target_charge = self.capacity = target_charge\n self.charging_actions = [0] * 8\n self.current_charge = 0\n\n def get_final_payoff(self) -> int:\n if self.current_charge is 0:\n payoff = -100 # Penalty if you let the car leave empty\n else:\n payoff = 50 * self.current_charge # Reward for each energy token in the car\n return payoff\n\n\nclass ChargingStation:\n \"\"\"\n An electric vehicle charging station.\n It can accept one car at a time.\n \"\"\"\n id: str\n capacity: int\n car_attendances: List[int] # int-based IDs of attending cars (>0 means attendance)\n cars: Dict[str, Car]\n actions: pd.Series\n\n def __init__(self, station_id: str, capacity: int, attendances: List[int]):\n \"\"\"\n We parse attendances, and create cars accordingly.\n \"\"\"\n self.id = station_id\n self.capacity = capacity\n self.car_attendances = attendances\n self.cars = dict()\n for car_number in [n for n in pd.Series(attendances).drop_duplicates().values if n > 0]:\n car_id = \"Car%s\" % car_number\n self.cars[car_id] = Car(car_id=car_id, target_charge=random.randint(2, 4))\n\n def __repr__(self):\n return \"<%s: cap:%d att: %s>\" % (self.id, self.capacity, self.car_attendances)\n\n def has_car_at(self, step: int) -> bool:\n if step < 0 or step >= len(self.car_attendances):\n return False\n return self.car_attendances[step] > 0\n\n def get_car_at(self, step: int):\n if self.has_car_at(step):\n return self.cars[\"Car%s\" % self.car_attendances[step]]\n\n\nclass World:\n \"\"\"\n The game world. Initialise this to get the rest.\n \"\"\"\n\n solar_park: SolarPark\n demand: List[int]\n market_prices: List[int]\n charging_stations: Dict[str, ChargingStation]\n current_step: int\n money: int\n\n def __init__(self,\n solar_generation: pd.Series,\n charging_stations: Dict[str, ChargingStation]):\n assert(solar_generation.size == 8)\n for gen in solar_generation:\n assert(gen in range(1, 9))\n \n self.solar_park = SolarPark(solar_generation)\n self.demand = [4, 9, 5, 2, 10, 6, 7, 1]\n self.market_prices = [100, 60, 30, 15, 8, 4, 2, 1]\n self.charging_stations = charging_stations\n\n self.current_step = 0\n self.money = 1000\n\n def __repr__(self):\n return \"\"\\\n % (self.current_step, self.money, self.solar_park.generation.values, self.demand, self.charging_stations)\n\n def imbalance_at(self, time_step: int):\n \"\"\"\n Compute the imbalance at time_step\n \"\"\"\n charging = 0\n for station in self.charging_stations.values():\n car = station.get_car_at(time_step)\n if car is not None:\n charging += car.charging_actions[time_step]\n generation = self.solar_park.generation.values\n return int(generation[time_step]) - self.demand[time_step] - charging\n\n def available_tokens(self, time_step: int) -> int:\n \"\"\"\n Compute the tokens to display at time_step\n \"\"\"\n return self.imbalance_at(time_step) + 4\n\n def imbalance(self, until: int=8):\n \"\"\"Aggregated imbalance up until a time step\"\"\"\n return sum([self.imbalance_at(i) for i in range(until)])\n\n def calculate_profits(self, action: int) -> int:\n result = 0\n index = self.imbalance_at(self.current_step) + 4\n if action > 0: # buy\n for _ in range(action):\n result -= self.market_prices[index - 1]\n index -= 1\n elif action < 0: # sell\n for _ in range(abs(action)):\n result += self.market_prices[index]\n index += 1\n return result\n\n def check_validity_of_orders(self, orders: Dict[str, int]) -> bool:\n combined_action = sum(orders.values())\n if self.imbalance_at(self.current_step) - combined_action not in range(-4, 5):\n raise Exception('Resulting imbalance outside of allowed range')\n for station_id, action in orders.items():\n station = self.charging_stations.get(station_id)\n car = station.get_car_at(self.current_step)\n if action is not 0:\n if car is None:\n raise Exception('An action is attempted on a station where there is no car: ' + station_id)\n elif abs(action) > station.capacity:\n raise Exception('Action is larger than station capacity: ' + station_id)\n elif car.current_charge + action < 0:\n raise Exception('Cars cannot have negative charge: ' + str(car.id))\n return True\n\n def next_step(self, orders: Dict[str, int]) -> Tuple[int, int]:\n self.check_validity_of_orders(orders)\n\n # First, get the costs/value of the combined actions\n overall_action = sum(orders.values())\n profits = self.calculate_profits(overall_action)\n self.money += profits\n summary = \"\"\n if overall_action > 0:\n summary = \"Overall you charged %d token(s).\" % overall_action\n elif overall_action < 0:\n summary = \"Overall you discharged %d token(s).\" % -overall_action\n if profits > 0:\n summary += \" This earned you %d coins (minus %d coin(s) due to charging inefficiency).\"\\\n % (profits, -overall_action)\n elif profits < 0:\n summary += \" This cost you %d coins, as well as %d coin(s) due to charging inefficiency.\"\\\n % (-profits, overall_action)\n\n # Now record the action on the cars\n for station_id, action in orders.items():\n station = self.charging_stations.get(station_id)\n car = station.get_car_at(self.current_step)\n if car is not None:\n car.charging_actions[self.current_step] = action\n car.current_charge += action\n if station.get_car_at(self.current_step + 1) != car:\n # account for payoff when a car leaves\n final_payoff = car.get_final_payoff()\n self.money += final_payoff\n profits += final_payoff\n if summary != \"\":\n summary += \"
\"\n summary += \"The car at %s left. \" % station_id\n if final_payoff > 0:\n summary += \"Your reward for achieving a charge level of %d was %d coins.\"\\\n % (car.current_charge, final_payoff)\n else:\n summary += \"Your penalty for achieving a charge level of %d was %d coins.\" \\\n % (car.current_charge, -final_payoff)\n # account transaction costs for charging\n self.money -= abs(action)\n profits -= abs(action)\n self.current_step += 1\n return summary\n\n def reset(self):\n for station_id in self.charging_stations:\n station = self.charging_stations.get(station_id)\n for turn_j in range(0, self.current_step):\n car = station.get_car_at(turn_j)\n if car is not None:\n car.charging_actions[turn_j] = 0\n car.current_charge = 0\n self.money = 1000\n self.current_step = 0\n return\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"361220232","text":"from config import common_config\nfrom mysql import mysql_read\nfrom http_handle import http_requests\nfrom result_handle import assert_result\nimport unittest\n\ndef setUpModule():\n #模块测试开始前的准备工作\n global url, mysql\n url = 'http://{}/iss/specific/Basic8021q.html'.format(common_config.ip)\n mysql = mysql_read.Mysql('802.1q_basic')\n\ndef tearDownModule():\n mysql.db_close()\n\nclass Basic_Q(unittest.TestCase):\n\n #@unittest.skip('')\n def test_basic_q(self):\n '''Basic 802.1q 端口vlan id设置'''\n data = mysql.db_select('vlanGroupID')\n\n r = assert_result.Result()\n for val in data:\n form = {'Gambit': common_config.gambit,\n 'status': 'Enable',\n 'port1': '1',\n 'port2': '4',\n 'port3': '3',\n 'port4': '1',\n 'port5': '1',\n 'port6': '1',\n 'port7': '1',\n 'port8': '1',\n 'port9': '1',\n 'port10': '1',\n 'port11': '1',\n 'port12': '1',\n 'vlanGroupID': val[0],\n 'ACTION': ''}\n\n http = http_requests.HttpHandle(url, form)\n r.assert_wrong_data(http.response(), val[0])\n r.make_result()\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"testcases/test_basic_q.py","file_name":"test_basic_q.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"113242280","text":"# -*- coding:utf-8 -*-\n# Created by Fan Jin\nimport xlrd,xlwt\nimport time\nfrom datetime import datetime\nimport re\n\n#prepare the files & tables\n\nfile_10 = xlrd.open_workbook('main_v4.3.xls')\nsheet_10 = file_10.sheet_by_index(0)\n\nfile_11 = xlrd.open_workbook('surgery_db_all.xls')\nsheet_11 = file_11.sheet_by_index(0)\n\nfile_20 = xlwt.Workbook(encoding = 'utf-8')\nsheet_20 = file_20.add_sheet('main')\n\nn10 = sheet_10.nrows\nn11 = sheet_11.nrows\nn20 = n11\n\nc10 = sheet_10.ncols\nc11 = sheet_11.ncols\nc20 = 10\n\nstart = [0 for i in range(999999)]\nend = [0 for i in range(999999)]\nhold = [0 for i in range(999999)]\nqc1 = [0 for i in range(999999)]\nname = [\"\" for i in range(999999)]\nsurg = [\"\" for i in range(999999)]\ntime = [0 for i in range(999999)]\nqc2 = [0 for i in range(999999)]\nmatrix = [list(\"\" for i in range(c20)) for i in range(n20)]\n\n# prepare some settings\ndateFormat = xlwt.XFStyle()\ndateFormat.num_format_str = 'yyyy-mm-dd'\n\nwarning_style = xlwt.XFStyle()\nfnt = xlwt.Font()\nfnt.colour_index = 2\nwarning_style.font = fnt\n\ntt = 60*60*24\ns_date = int(datetime(1899, 12, 31).timestamp()/tt)-2\n\n# input the data\nfor i in range(1,n10):\n nn = int(sheet_10.row(i)[0].value)\n start[nn] = sheet_10.row(i)[10].value\n end[nn] = sheet_10.row(i)[11].value\n hold[nn] = sheet_10.row(i)[12].value\n qc1[nn] = 0\n if (hold[nn]<25 or hold[nn]>50):\n qc1[nn] = 1\n #qc1[nn] = sheet_10.row(i)[13].value # quality control\n\nk = 0\nfor i in range(1,n11):\n nn = int(sheet_11.row(i)[0].value)\n name[nn] = sheet_11.row(i)[1].value\n surg[nn] = sheet_11.row(i)[2].value\n time[nn] = sheet_11.row(i)[3].value\n\n matrix[i][0] = nn\n matrix[i][1] = name[nn]\n matrix[i][2] = start[nn]\n matrix[i][3] = end[nn]\n matrix[i][4] = hold[nn]\n matrix[i][5] = qc1[nn]\n matrix[i][6] = surg[nn]\n matrix[i][7] = time[nn]\n\n qc2[nn] = 0\n if (isinstance(time[nn],float)):\n matrix[i][8] = int(time[nn] - end[nn]) \n if (matrix[i][8]<=0 or matrix[i][8]>30*6 ): # 6个月以上的暂时去除\n qc2[nn] = 1\n matrix[i][9] = qc2[nn]\n\n# output the data\nsheet_20.write(0, 0, label=\"病例号\")\nsheet_20.write(0, 1, label=\"姓名\")\nsheet_20.write(0, 2, label=\"放疗开始时间\")\nsheet_20.write(0, 3, label=\"放疗结束时间\")\nsheet_20.write(0, 4, label=\"放疗天数\")\nsheet_20.write(0, 5, label=\"放疗是否异常\")\nsheet_20.write(0, 6, label=\"手术方式\")\nsheet_20.write(0, 7, label=\"手术时间\")\nsheet_20.write(0, 8, label=\"术前等待天数\")\nsheet_20.write(0, 9, label=\"手术是否异常\")\n\nfor i in range(1,n11):\n for j in range(10):\n if (j==2 or j==3 or j==7):\n sheet_20.write(i, j, matrix[i][j], dateFormat)\n else:\n sheet_20.write(i, j, label=matrix[i][j])\n \n# save the file\nfile_20.save('radiother_v1.xls')\n\n\n","sub_path":"2_final_radiother_time/radiother.py","file_name":"radiother.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"388560075","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Layout,SiteUrls\nimport os\n\n# Create your views here.\ndef pingrequest (request):\n hostname = SiteUrls.objects.site_url #example\n response = os.system(\"ping -c 1 \" + hostname)\n \n pingreqnew = { \n 'pingreq' == True\n }\n\n\ndef index(request):\n\n posts = Layout.objects.all()\n urlpost = SiteUrls.objects.all()[:20]\n \n context = {\n 'posts':posts,\n 'urlpost':urlpost,\n 'pingrequest':pingrequest \n }\n\n return render (request, 'posts/index.html', context)","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"236875885","text":"import numpy as np\n\n\ndef adjust_time(time):\n return abs(int(time))\n\n\nif __name__ == '__main__':\n input_file = open('processes_parameters.txt')\n output_file = open('processes.txt', 'w')\n processes_count, arrival_time_mean, arrival_time_std, \\\n burst_time_mean, burst_time_std, priority_lambda = [float(x) for x in input_file.read().split()]\n\n processes_count = int(processes_count)\n output_file.write(str(processes_count) + '\\n')\n\n for i in range(1, processes_count + 1):\n arrival_time = adjust_time(np.random.normal(arrival_time_mean, arrival_time_std))\n burst_time = adjust_time(np.random.normal(burst_time_mean, burst_time_std))\n priority = adjust_time(np.random.poisson(priority_lambda))\n output_file.write('{0} {1} {2} {3}'.format(\n i, arrival_time, burst_time, priority))\n output_file.write('\\n')\n","sub_path":"proccess_generator/process_generator.py","file_name":"process_generator.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"317403378","text":"\"\"\"\n path_functions\n ~~~~~~~~~~~~~~\n\n Contains functions used for computation of accessible mutational paths.\n\n :author: Fahad Khalid\n :license: MIT, see LICENSE for more details.\n\"\"\"\n\nimport igraph\n\n# from memory_profiler import profile\n\nfrom genonets_utils import Utils\n\n\nclass PathAnalyzer:\n # Constructor\n def __init__(self, network, netUtils, delta):\n # Reference to the network on which to perform this\n # analysis\n self.network = network\n\n # Get a reference to the NetworkUtils object\n self.netUtils = netUtils\n\n # Get a reference to the BitSeqManipulator in use\n self.bitManip = netUtils.bitManip\n\n # Keep a copy of the delta value\n self.delta = delta\n\n # Summit vertex, to be populated later.\n self.summitId = None\n\n # Longest mutational path in the network (among all shortest paths)\n self.max_path_length = 0\n\n # Dict to store all accessible paths. {vertexId : [paths]}\n self.allPathsToPeak = self.initPathsToPeak()\n\n def initPathsToPeak(self):\n return {vId: [] for vId in range(len(self.network.vs[\"sequences\"]))}\n\n def getSummitId(self):\n return self.summitId\n\n def getAllPathsToPeak(self):\n return self.allPathsToPeak\n\n def getPathsThruVtxs(self):\n # List: Each element is the No. of paths through the vertex Id\n # \t\tcorresponding to the index\n pathsThruVtx = [0 for i in range(self.network.vcount())]\n\n # Go through accessible paths corresponding to each vertex\n for vtxId in range(self.network.vcount()):\n # Get the list of accessible paths for this vertex\n vtxPaths = self.network.vs[vtxId][\"pathsToSummit\"]\n\n # For each path,\n for path in vtxPaths:\n # For each vertex in the path,\n for vtx in path:\n # Increment the No. of paths that go through\n # this vertex\n pathsThruVtx[vtx] += 1\n\n return pathsThruVtx\n\n # Get the ratio of accessible paths to all paths of the\n # given length.\n def getAccessiblePaths(self, pathLength=0):\n # Stats for the entire network\n totalPaths = 0 # Total mutational paths (only shortest)\n allAccPaths = 0 # No. of accessible paths (only shortest)\n\n # Get the sequence with the highest score. This sequence\n # will represent the global peak. All paths use this\n # sequence as the target.\n summit = Utils.getSeqWithMaxScore(self.network, self.bitManip.seqLength)\n\n # Get the vertex object that represents the summit\n trgtVrtx = self.netUtils.getVertex(summit, self.network)\n\n # Store a copy of the summit\n self.summitId = trgtVrtx.index\n\n # Get a list of all sequences in the network\n sequences = self.network.vs[\"sequences\"]\n # Remove the target itself\n sequences.remove(summit)\n\n # For each sequence in the network\n for source in sequences:\n # If we only need to calculate all accessible paths, regardless\n # of the path length,\n if pathLength == 0:\n # Get all shortest paths as well as all accessible\n # shortest paths from source to summit\n self.getShortestAccPaths(source, trgtVrtx, pathLength)\n else: # Ratios should be calculated\n shrtPaths, accPaths = self.getShortestAccPaths(source,\n trgtVrtx,\n pathLength + 1)\n\n # If at least one shortest path of length == 'pathLength' was\n # found,\n if shrtPaths:\n # Increment the counts\n totalPaths += float(len(shrtPaths))\n allAccPaths += float(len(accPaths))\n\n try:\n return float(allAccPaths) / float(totalPaths)\n except ZeroDivisionError:\n return 0\n\n # From within all shortest paths between source sequence and the\n # given target vertex, computes all accessible paths.\n # For computation, returns only those paths that are of the given\n # length. However, all accessible paths are stored regardless of\n # size. This is then used in the visualization.\n # @profile\n def getShortestAccPaths(self, source, trgtVrtx, pathLength):\n # Get the source and target vertices\n srcVrtx = self.netUtils.getVertex(source, self.network)\n\n # Get all shortest paths between source and target\n allShrtPaths = self.network.get_all_shortest_paths(srcVrtx,\n trgtVrtx,\n mode=igraph.OUT)\n\n # If we only need to calculate all accessible paths, regardless\n # of the path length,\n if pathLength == 0:\n # Get all shortest accessible paths\n shrtAccPaths = [\n self.network.vs[path].indices\n for path in allShrtPaths\n if self.isAccessible(path)\n ]\n\n # Store the paths\n self.allPathsToPeak[srcVrtx.index].extend(shrtAccPaths)\n\n # Update the value of the longest path\n if len(allShrtPaths[0]) > self.max_path_length:\n self.max_path_length = len(allShrtPaths[0])\n\n return None, None\n else: # If only paths of 'path length' are required,\n # If the shortest path length is the same as the required\n # length,\n if len(allShrtPaths[0]) == pathLength:\n allShrtAccPaths = [\n self.network.vs[path][\"sequences\"]\n for path in allShrtPaths\n if self.isAccessible(path)\n ]\n else:\n # We did not find a path of the required length\n allShrtPaths = None\n allShrtAccPaths = None\n\n return allShrtPaths, allShrtAccPaths\n\n # Determines whether the given path is an accessible path, i.e.,\n # scores on this path increase monotonously.\n def isAccessible(self, path):\n isAcc = True\n\n # Get a list of escores for the sequences in the path\n escores = self.network.vs[path][\"escores\"]\n\n # Place holder to keep track of the highest score\n # encountered inside the loop\n maxYet = -0.5\n # For each escore in the list\n for i in range(len(escores) - 1):\n # If the current score is higher than the max so far,\n if escores[i] > maxYet:\n # Assign current escore to max\n maxYet = escores[i]\n\n # If the next sequence in the path is in a lower bin,\n if maxYet - self.delta > escores[i + 1]:\n # The increase in e-score is not monotonous. Hence,\n # the path is not accessible.\n isAcc = False\n break\n\n return isAcc\n","sub_path":"genonets/path_functions.py","file_name":"path_functions.py","file_ext":"py","file_size_in_byte":7101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"316987959","text":"# In this assignment you must do a Twitter search on any term\n# of your choice.\n# Deliverables:\n# 1) Print each tweet\n# 2) Print the average subjectivity of the results\n# 3) Print the average polarity of the results\n\n# Be prepared to change the search term during demo.\n\nimport tweepy\nfrom textblob import TextBlob\n\n# Unique code from Twitter\naccess_token = \"459652384-jiGFor8jLLPGUnSutcEnr0dqQg576MEhEGIKkZVX\"\naccess_token_secret = \"T9sl2TZZvzjXXq0W2Vjhk6vneY69Onp1qRxY7ubWIKpO8\"\nconsumer_key = \"EwClIX1YYUr8GJt3EV8csg6gp\"\nconsumer_secret = \"F3EnfNJ8WGgTHlz01QF3uecTLCZ1VjU5JRExi31YWI0nvJLvDB\"\n\n# Boilerplate code here\nauth = tweepy.OAuthHandler(consumer_key,consumer_secret)\nauth.set_access_token(access_token,access_token_secret)\n\n# Allows us to Create Tweets, Delete Tweets, and Find Twitter Users\napi = tweepy.API(auth)\n\n# Searches for tweets related to the term \"Barack Obama\"\npublic_tweets = api.search('Barack Obama')\n\n# Prints each tweet \nfor tweet in public_tweets:\n\tprint(tweet.text)\n\n# Creates an empty list that will be used to stored the polarity value of each tweet\npolarity_list = []\n\n# Creates an empty list that will be used to stored the subjectivity value of each tweet\nsubjectivity_list = []\n\n# Iterates through all of the tweets public tweets that are related to the term \"Barack Obama\"\nfor tweet in public_tweets:\n\t# Sentiment Analysis - Understand and Extracting Feelings from Data\n\tanalysis = TextBlob(tweet.text)\n\t# We extract the first value - which represents polarity - from the sentiment analysis data for each tweet and append it to the list of polarity values \n\tpolarity_list.append(analysis.sentiment[0])\n\t# We extract the second value - which represents subjectivity - from the sentiment analysis data for each tweet and append it to the list of subjectivity values\n\tsubjectivity_list.append(analysis.sentiment[1])\n\n# We find the average polarity by finding the sum of all of the values in the polarity list and dividing that by the length of the list \npolarity = sum(polarity_list)/len(polarity_list)\n# We find the average subjectivity by finding the sum of all of the values in the subjectivity list and dividing that by the length of the list\nsubjectivity = sum(subjectivity_list)/len(subjectivity_list)\n\nprint(\"Average subjectivity is\", subjectivity)\nprint(\"Average polarity is\", polarity)\n","sub_path":"twitterhw3b.py","file_name":"twitterhw3b.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"196222607","text":"\nimport h5py\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport keras\nfrom keras import backend as K\nfrom keras.optimizers import Adam\nfrom keras.metrics import categorical_crossentropy\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Model\nfrom keras.layers import Dense,GlobalAveragePooling2D,Dropout,SeparableConv2D,BatchNormalization, Activation\nfrom keras.optimizers import Adam, SGD\nfrom keras.callbacks import ModelCheckpoint\n\nimport keras\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense\nfrom keras.layers import Input, Flatten, Dropout, Activation, BatchNormalization\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import losses, models, optimizers\nfrom keras.utils import to_categorical\nfrom keras.activations import softmax\nfrom keras.layers import (Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout,\n GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense)\n\ndef model():\n nclass = 6\n inp = Input(shape=(224,224,3)) \n x = Convolution2D(32, (4,10), padding=\"same\")(inp)\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n x = MaxPool2D()(x)\n x = Dropout(rate=0.2)(x)\n \n x = Convolution2D(64, (4,10), padding=\"same\")(x)\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n x = MaxPool2D()(x)\n x = Dropout(rate=0.2)(x)\n \n x = Convolution2D(64, (4,10), padding=\"same\")(x)\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n x = MaxPool2D()(x)\n x = Dropout(rate=0.4)(x)\n \n x = Convolution2D(128, (4,10), padding=\"same\")(x)\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n x = MaxPool2D()(x)\n x = Dropout(rate=0.3)(x)\n \n x = Flatten()(x)\n x = Dense(64)(x)\n x = Dropout(rate=0.2)(x)\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n x = Dropout(rate=0.2)(x)\n \n out = Dense(nclass, activation=softmax)(x)\n \n opt = optimizers.Adam(0.001)\n model = models.Model(inputs=inp, outputs=out)\n model.compile(optimizer=opt, loss=losses.categorical_crossentropy, metrics=['acc'])\n return model\n\nif __name__ == \"__main__\":\n\n num_classes = 6\n with h5py.File(os.path.join(\"dataset_RGB_2.h5\"), \"r\") as hf:\n X_train = hf[\"X_tr\"][:]\n y_train = hf[\"y_tr\"][:]\n # X_test = np.expand_dims(hf[\"X_te\"][:], axis=-1)\n X_test = hf[\"X_te\"][:]\n y_test = hf[\"y_te\"][:]\n\n y_train = tf.keras.utils.to_categorical(y_train, num_classes=num_classes)\n y_test = tf.keras.utils.to_categorical(y_test, num_classes=num_classes)\n model = model()\n model.summary()\n\n # Directory where the checkpoints will be saved\n checkpoint_dir = \"ckpt_cnn\"\n # Name of the checkpoint files\n mcp = ModelCheckpoint(f'{checkpoint_dir}/CNN_2.h5', \n monitor=\"val_acc\",\n save_best_only=True, \n save_weights_only=True,\n verbose = 1)\n\n print(\"Training model ...\")\n \n epochs = 100\n batch_size = 64\n model.fit(X_train, y_train, validation_data=(X_test, y_test), batch_size=batch_size, epochs=epochs, callbacks=[mcp])\n model.load_weights(f'{checkpoint_dir}/CNN_2.h5')\n\n epochs = 100\n batch_size = 32\n\n datagen = ImageDataGenerator(\n width_shift_range=0.2,\n height_shift_range=0.2,\n zoom_range=0.2)\n datagen.fit(X_train)\n \n model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size), validation_data=(X_test, y_test), steps_per_epoch=len(X_train)/batch_size, epochs=epochs, callbacks=[mcp])","sub_path":"SourceCode/train_model_rgb_2.py","file_name":"train_model_rgb_2.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"643798377","text":"import requests\n\nclass OctoDataRetriever:\n \"\"\"Simple class to retrieve data from the Octopart API as a Python Dict\"\"\"\n API_KEY = '?apikey=KEY HERE'\n url = 'http://octopart.com/api/v3/'\n\n ### 'include' Search Options:\n # short_description\n # datasheets\n # compliance_documents\n # descriptions\n # imagesets\n # specs\n # category_uids\n # external_links\n # reference_designs\n # cad_models\n\n def __init__(self):\n pass\n\n @staticmethod\n def toWeirdFormat(dict):\n \"\"\"Weird format is the way one enters data into the url to search\"\"\"\n string = ''\n for key, val in enumerate(dict):\n string += \"&{}={}\".format(val, dict[val])\n return string\n\n def partSearch(self, q, start=0, limit=20, include=(), returnURL=False, filters=()):\n \"\"\"Searches for a specific part given a search string q. Most commonly used\"\"\"\n\n includeString = ''\n for val in include:\n includeString += \"&include[]={}\".format(val)\n\n filterString = ''\n for val in filters:\n val = val.replace(' ', '+')\n filterString += val\n\n\n url = OctoDataRetriever.url\n url += 'parts/search{}{}{}{}'.format(\n OctoDataRetriever.API_KEY,\n self.toWeirdFormat({'q': q, \"start\": start, \"limit\": limit}),\n includeString,\n filterString\n )\n\n if returnURL:\n return requests.get(url).json(), url\n else:\n return requests.get(url).json()\n\n def partUID(self, uid, returnURL=False, include=()):\n \"\"\"Searches for a specific item through its Octopart UID\"\"\"\n includeString = ''\n for val in include:\n includeString += \"&include[]={}\".format(val)\n\n url = OctoDataRetriever.url\n url += 'parts/{}{}{}'.format(uid, OctoDataRetriever.API_KEY, includeString)\n if returnURL:\n return requests.get(url).json(), url\n else:\n return requests.get(url).json()\n\n def brandsSearch(self, q, start=0, limit=10, include=(), returnURL=False):\n \"\"\"Searches for a brand given a search string q\"\"\"\n includeString = ''\n for val in include:\n includeString += \"&include[]={}\".format(val)\n\n url = OctoDataRetriever.url\n url += 'brands/search{}{}{}'.format(\n OctoDataRetriever.API_KEY,\n self.toWeirdFormat({'q': q, \"start\": start, \"limit\": limit}),\n includeString\n )\n\n if returnURL:\n return requests.get(url + OctoDataRetriever.API_KEY).json(), url\n else:\n return requests.get(url + OctoDataRetriever.API_KEY).json()\n\n def sellersSearch(self, q, start=0, limit=10,include=(), returnURL=False):\n \"\"\"Searches for a seller given search string q\"\"\"\n includeString = ''\n for val in include:\n includeString += \"&include[]={}\".format(val)\n\n url = OctoDataRetriever.url\n url += 'sellers/search{}{}{}'.format(\n OctoDataRetriever.API_KEY,\n self.toWeirdFormat({'q': q, \"start\": start, \"limit\": limit}),\n includeString\n )\n\n if returnURL:\n return requests.get(url + OctoDataRetriever.API_KEY).json(), url\n else:\n return requests.get(url + OctoDataRetriever.API_KEY).json()","sub_path":"OctoIO-.py","file_name":"OctoIO-.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"92227409","text":"#!/usr/bin/python\n#\n# Copyright 2018-2020 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n\n\"\"\"\n Polyaxon SDKs and REST API specification.\n\n Polyaxon SDKs and REST API specification. # noqa: E501\n\n The version of the OpenAPI document: 1.4.4\n Contact: contact@polyaxon.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom polyaxon_sdk.configuration import Configuration\n\n\nclass V1Action(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'hub_ref': 'str',\n 'label': 'str',\n 'many': 'str',\n 'params': 'dict(str, V1Param)',\n 'run_patch': 'object'\n }\n\n attribute_map = {\n 'hub_ref': 'hub_ref',\n 'label': 'label',\n 'many': 'many',\n 'params': 'params',\n 'run_patch': 'run_patch'\n }\n\n def __init__(self, hub_ref=None, label=None, many=None, params=None, run_patch=None, local_vars_configuration=None): # noqa: E501\n \"\"\"V1Action - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._hub_ref = None\n self._label = None\n self._many = None\n self._params = None\n self._run_patch = None\n self.discriminator = None\n\n if hub_ref is not None:\n self.hub_ref = hub_ref\n if label is not None:\n self.label = label\n if many is not None:\n self.many = many\n if params is not None:\n self.params = params\n if run_patch is not None:\n self.run_patch = run_patch\n\n @property\n def hub_ref(self):\n \"\"\"Gets the hub_ref of this V1Action. # noqa: E501\n\n\n :return: The hub_ref of this V1Action. # noqa: E501\n :rtype: str\n \"\"\"\n return self._hub_ref\n\n @hub_ref.setter\n def hub_ref(self, hub_ref):\n \"\"\"Sets the hub_ref of this V1Action.\n\n\n :param hub_ref: The hub_ref of this V1Action. # noqa: E501\n :type: str\n \"\"\"\n\n self._hub_ref = hub_ref\n\n @property\n def label(self):\n \"\"\"Gets the label of this V1Action. # noqa: E501\n\n\n :return: The label of this V1Action. # noqa: E501\n :rtype: str\n \"\"\"\n return self._label\n\n @label.setter\n def label(self, label):\n \"\"\"Sets the label of this V1Action.\n\n\n :param label: The label of this V1Action. # noqa: E501\n :type: str\n \"\"\"\n\n self._label = label\n\n @property\n def many(self):\n \"\"\"Gets the many of this V1Action. # noqa: E501\n\n\n :return: The many of this V1Action. # noqa: E501\n :rtype: str\n \"\"\"\n return self._many\n\n @many.setter\n def many(self, many):\n \"\"\"Sets the many of this V1Action.\n\n\n :param many: The many of this V1Action. # noqa: E501\n :type: str\n \"\"\"\n\n self._many = many\n\n @property\n def params(self):\n \"\"\"Gets the params of this V1Action. # noqa: E501\n\n\n :return: The params of this V1Action. # noqa: E501\n :rtype: dict(str, V1Param)\n \"\"\"\n return self._params\n\n @params.setter\n def params(self, params):\n \"\"\"Sets the params of this V1Action.\n\n\n :param params: The params of this V1Action. # noqa: E501\n :type: dict(str, V1Param)\n \"\"\"\n\n self._params = params\n\n @property\n def run_patch(self):\n \"\"\"Gets the run_patch of this V1Action. # noqa: E501\n\n\n :return: The run_patch of this V1Action. # noqa: E501\n :rtype: object\n \"\"\"\n return self._run_patch\n\n @run_patch.setter\n def run_patch(self, run_patch):\n \"\"\"Sets the run_patch of this V1Action.\n\n\n :param run_patch: The run_patch of this V1Action. # noqa: E501\n :type: object\n \"\"\"\n\n self._run_patch = run_patch\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, V1Action):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, V1Action):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"sdks/python/http_client/v1/polyaxon_sdk/models/v1_action.py","file_name":"v1_action.py","file_ext":"py","file_size_in_byte":6347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"364217200","text":"import json\nfrom itertools import chain\n\nfrom django.http import HttpResponse\nfrom django.utils.html import format_html\nfrom django.forms.widgets import Select\nfrom django.utils.encoding import force_text\nfrom django.utils.safestring import mark_safe\nfrom django.contrib.auth.decorators import login_required\n\nfrom flexselect import (FlexSelectWidget, choices_from_instance, \n details_from_instance, instance_from_request)\n\n\nclass CustomSelectWidget(Select):\n\n def render_option(self, selected_choices, option_value, option_label):\n if option_value is None:\n option_value = ''\n option_value = force_text(option_value)\n if option_value in selected_choices:\n selected_html = mark_safe(' selected=\"selected\"')\n if not self.allow_multiple_selected:\n # Only allow for a single selection.\n selected_choices.remove(option_value)\n else:\n selected_html = ''\n return format_html('',\n option_value,\n selected_html,\n force_text(option_label))\n\n\n def render_options(self, choices, selected_choices):\n # Normalize to strings.\n selected_choices = set(force_text(v) for v in selected_choices)\n output = []\n for option_value, option_label in chain(self.choices, choices):\n if isinstance(option_label, (list, tuple)):\n output.append(format_html('', force_text(option_value)))\n for option in option_label:\n output.append(self.render_option(selected_choices, *option))\n output.append('')\n else:\n output.append(self.render_option(selected_choices, option_value, option_label))\n\n return '\\n'.join(output)\n\n\n@login_required\ndef field_changed(request):\n \"\"\"\n Ajax callback called when a trigger field or base field has changed. Returns\n html for new options and details for the dependent field as json.\n \"\"\"\n hashed_name = request.POST.__getitem__('hashed_name')\n widget = FlexSelectWidget.instances[hashed_name] \n instance = instance_from_request(request, widget)\n \n if bool(int(request.POST.__getitem__('include_options'))):\n choices = choices_from_instance(instance, widget)\n options = CustomSelectWidget(choices=choices).render_options([], [])\n else:\n options = None\n \n return HttpResponse(json.dumps({\n 'options' : options,\n 'details': details_from_instance(instance, widget),\n }))\n","sub_path":"flexselect/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"30926874","text":"from django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom app.models import (user_watchlist, validate_user, Timeframe, validate_stock, toggle_watchlist_entry)\nfrom app.views.core import show_companies\n\n@login_required\ndef show_watched(request):\n validate_user(request.user)\n matching_companies = user_watchlist(request.user)\n\n timeframe = Timeframe()\n return show_companies(\n matching_companies,\n request,\n timeframe,\n {\n \"title\": \"Stocks you are watching\",\n \"sentiment_heatmap_title\": \"Watchlist stocks sentiment: {}\".format(timeframe.description),\n }\n )\n\n\ndef redirect_to_next(request, fallback_next=\"/\"):\n \"\"\"\n Call this function in your view once you have deleted some database data: set the 'next' query href\n param to where the redirect should go to. If not specified '/' will be assumed. Not permitted to\n redirect to another site.\n \"\"\"\n # redirect will trigger a redraw which will show the purchase since next will be the same page\n assert request is not None\n if request.GET is not None:\n next_href = request.GET.get(\"next\", fallback_next)\n assert next_href.startswith(\"/\") # PARANOIA: must be same origin\n return HttpResponseRedirect(next_href)\n else:\n return HttpResponseRedirect(fallback_next)\n\n\n@login_required\ndef toggle_watched(request, stock=None):\n validate_stock(stock)\n validate_user(request.user)\n toggle_watchlist_entry(request.user, stock)\n return redirect_to_next(request)\n","sub_path":"src/viewer/app/views/watchlist.py","file_name":"watchlist.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"426337752","text":"'''\r\ndef twoSum(nums, target):\r\n hashmap={}\r\n for ind,num in enumerate(nums):\r\n hashmap[num] = ind\r\n for i,num in enumerate(nums):\r\n #j = hashmap[target - num]\r\n j = hashmap.get(target - num)\r\n #如果列表里有两个元素相同,得用j = hashmap.get(target - num)\r\n if j is not None and i!=j:\r\n \treturn [i,j]\r\n\r\nnums = [2,7,11,15]\r\ntarget = 9\r\na = twoSum(nums,target)\r\nprint(a)\r\nnums1 = [1,5,5,7]\r\ntarget2 = 10\r\nb = twoSum(nums1,target2)\r\nprint(b)\r\n'''\r\n\r\n'''\r\nenumerate()用法1\r\n>>>seasons = ['Spring', 'Summer', 'Fall', 'Winter']\r\n>>> list(enumerate(seasons))\r\n[(0, 'Spring'), (1, 'Summer'), (2, 'Fall'), (3, 'Winter')]\r\n>>> list(enumerate(seasons, start=1)) # 下标从 1 开始\r\n[(1, 'Spring'), (2, 'Summer'), (3, 'Fall'), (4, 'Winter')]\r\n\r\nenumerate()用法2\r\n>>>seq = ['one', 'two', 'three']\r\n>>> for i, element in enumerate(seq):\r\n... print i, element\r\n... \r\n0 one\r\n1 two\r\n2 three\r\n'''\r\n\r\n'''方法四:\r\n\r\n类似方法二,不需要 mun2 不需要在整个 dict 中去查找。可以在 num1 之前的 dict 中查找,因此就只需要一次循环可解决。'''\r\n\r\ndef twoSum(nums, target):\r\n hashmap={}\r\n for i,num in enumerate(nums):\r\n if hashmap.get(target - num) is not None:\r\n return [i,hashmap.get(target - num)]\r\n hashmap[num] = i #这句不能放在if语句之前,解决list中有重复值或target-num=num的情况\r\n'''def two_sum(nums, target):\r\n \"\"\"这样写更直观,遍历列表同时查字典\"\"\"\r\n dct = {}\r\n for i, n in enumerate(nums):\r\n if target - n in dct:\r\n return [i, dct[target - n]]\r\n dct[n] = i'''\r\n'''不过方法四相较于方法三的运行速度没有像方法二相较于方法一的速度提升。运行速度在 70ms 多。'''\r\nnums = [2,7,11,15]\r\ntarget = 9\r\na = twoSum(nums,target)\r\nprint(a)\r\nnums1 = [1,5,5,7]\r\ntarget2 = 10\r\nb = twoSum(nums1,target2)\r\nprint(b)","sub_path":"python基础笔记/利用字典算两数之和.py","file_name":"利用字典算两数之和.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"483888251","text":"\"\"\"\n使用两个线程,\n一个打印1-52 这52个数字\n一个打印a-z这些字母\n两个线程一起执行,要求打印出来的顺序为\n12A34B...5152Z\n多为面试题的笔试题\n\"\"\"\n\nfrom threading import Lock, Thread\nimport time\n\nlock01 = Lock()\nlock02 = Lock()\n\n\ndef print_int():\n for i in range(1, 52, 2):\n lock01.acquire()\n print(i)\n print(i + 1)\n lock02.release()\n\n\ndef print_alpha():\n for i in range(65, 91):\n lock02.acquire()\n print(chr(i))\n lock01.release()\n\n\nt1 = Thread(target=print_alpha)\nt2 = Thread(target=print_int)\nlock02.acquire()\nt2.start()\nt1.start()\n\n","sub_path":"month02/thread/important_exercise.py","file_name":"important_exercise.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"396744886","text":"from app.base.base_views import BaseView, user_required\nfrom app import app\n\nclass IndexView(BaseView):\n\n def __init__(self):\n self.title = \"Página Inicial\"\n self.template = \"/frontend/index_view.html\"\n\n\nviewIndex = IndexView.as_view('root')\napp.add_url_rule('/', view_func=viewIndex)\n\n","sub_path":"web/app/base/index_view.py","file_name":"index_view.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"635085029","text":"from compiler.frontend import ASTVisitor\nfrom compiler.ast import *\nfrom compiler.entity import *\nfrom compiler.type import *\nfrom compiler.utils import *\n\nclass TypeChecker(ASTVisitor):\n bool_type = BoolType()\n integer_type = IntegerType()\n string_type = StringType()\n\n loop_depth = 0\n current_function = None\n scope = None\n malloc_func = None\n\n def __init__(self, scope):\n self.scope = scope\n self.malloc_func = self.scope.lookup_current_level( \\\n LIB_PREFIX + 'malloc')\n @staticmethod\n def check_compatibility(loc, real, expect, is_expected):\n if not real.is_compatible(expect):\n message = ''\n if is_expected:\n message += 'Invalid type ' + str(real) + \\\n ', expecting ' + str(expect)\n else:\n message += 'Incompatible type ' + str(real) + \\\n ', and ' + str(expect)\n raise SemanticError(loc, message)\n def visit(self, node):\n if isinstance(node, FunctionDefNode):\n self.current_function = node.entity\n if not self.current_function.is_constructor and \\\n not self.current_function.return_type:\n raise SemanticError(node.location, \\\n 'expecting a return type')\n self.visit_stmt(node.entity.body)\n self.current_function = None\n return\n elif isinstance(node, VariableDefNode):\n init = node.entity.initializer\n if init:\n self.visit_expr(init)\n TypeChecker.check_compatibility(node.location, \\\n init.type, node.entity.type, False)\n if node.entity.type.is_void:\n raise SemanticError(node.location, 'Cannot \\\n set void type for variable')\n return\n elif isinstance(node, IfNode):\n self.visit_expr(node.cond)\n if node.then_body:\n self.visit_stmt(node.then_body)\n if node.else_body:\n self.visit_stmt(node.else_body)\n TypeChecker.check_compatibility(node.location, \\\n node.cond.type, self.bool_type, True)\n return\n elif isinstance(node, WhileNode):\n self.visit_expr(node.cond)\n if node.body:\n self.loop_depth += 1\n self.visit_stmt(node.body)\n self.loop_depth -= 1\n TypeChecker.check_compatibility(node.location, \\\n node.cond.type, self.bool_type, True)\n return\n elif isinstance(node, ForNode):\n if node.init:\n self.visit_expr(node.init)\n if node.cond:\n self.visit_expr(node.cond)\n TypeChecker.check_compatibility(node.location, \\\n node.cond.type, self.bool_type, True)\n if node.init:\n self.visit_expr(node.incr)\n if node.body:\n self.loop_depth += 1\n self.visit_stmt(node.body)\n self.loop_depth -= 1\n return\n elif isinstance(node, BreakNode):\n if self.loop_depth <= 0:\n raise SemanticError(node.location, 'unexpected break')\n return\n elif isinstance(node, ContinueNode):\n if self.loop_depth <= 0:\n raise SemanticError(node.location, 'unexpected continue')\n return\n elif isinstance(node, ReturnNode):\n if not self.current_function:\n raise SemanticError(node.location, \\\n 'caannot return outside function')\n if self.current_function.is_constructor:\n if node.expr:\n raise SemanticError(node.location, \\\n 'cannot return in constructor')\n else:\n if node.expr:\n self.visit_expr(node.expr)\n TypeChecker.check_compatibility(node.location, \\\n node.expr.type, \\\n self.current_function.return_type, True)\n else:\n if not self.current_function.return_type.is_void:\n raise SemanticError(node.location, \\\n 'cannot return to void')\n return\n elif isinstance(node, AssignNode):\n self.visit_expr(node.lhs)\n self.visit_expr(node.rhs)\n if not node.lhs.is_assignable:\n raise SemanticError(node.location, \\\n 'LHS of \\'=\\' is not assignable')\n TypeChecker.check_compatibility(node.location, \n node.lhs.type, node.rhs.type, False)\n return\n elif isinstance(node, UnaryOpNode):\n self.visit_expr(node.expr)\n expect = None\n op = node.operator\n if op == UnaryOpNode.UnaryOp.PRE_INC or \\\n op == UnaryOpNode.UnaryOp.PRE_DEC or \\\n op == UnaryOpNode.UnaryOp.SUF_INC or \\\n op == UnaryOpNode.UnaryOp.SUF_DEC or \\\n op == UnaryOpNode.UnaryOp.MINUS or \\\n op == UnaryOpNode.UnaryOp.ADD or \\\n op == UnaryOpNode.UnaryOp.BIT_NOT:\n expect = self.integer_type\n elif op == UnaryOpNode.UnaryOp.LOGIC_NOT:\n expect = self.bool_type\n else:\n raise InternalError('invalid operator ' + str(op))\n TypeChecker.check_compatibility(node.location, \\\n node.expr.type, expect, True)\n if isinstance(node, PrefixOpNode):\n if op == UnaryOpNode.UnaryOp.PRE_INC or \\\n op == UnaryOpNode.UnaryOp.PRE_DEC:\n node.is_assignable = True\n return\n elif isinstance(node, SuffixOpNode):\n if not node.expr.is_assignable:\n raise SemanticError(node.location, \\\n 'lvalue is needed')\n return\n elif isinstance(node, BinaryOpNode):\n self.visit_expr(node.left)\n self.visit_expr(node.right)\n ltype = node.left.type\n rtype = node.right.type\n op = node.operator\n if op == BinaryOpNode.BinaryOp.MUL or \\\n op == BinaryOpNode.BinaryOp.DIV or \\\n op == BinaryOpNode.BinaryOp.MOD or \\\n op == BinaryOpNode.BinaryOp.SUB or \\\n op == BinaryOpNode.BinaryOp.LSHIFT or \\\n op == BinaryOpNode.BinaryOp.RSHIFT or \\\n op == BinaryOpNode.BinaryOp.BIT_AND or \\\n op == BinaryOpNode.BinaryOp.BIT_XOR or \\\n op == BinaryOpNode.BinaryOp.BIT_OR:\n TypeChecker.check_compatibility(node.left.location, \\\n ltype, self.integer_type, True)\n TypeChecker.check_compatibility(node.right.location, \\\n rtype, self.integer_type, True)\n node.type = ltype\n elif op == BinaryOpNode.BinaryOp.GT or \\\n op == BinaryOpNode.BinaryOp.LE or \\\n op == BinaryOpNode.BinaryOp.GE or \\\n op == BinaryOpNode.BinaryOp.LT:\n TypeChecker.check_compatibility(node.left.location, \\\n ltype, rtype, False)\n if not ltype.is_full_comparable and \\\n not rtype.is_full_comparable:\n raise SemanticError(node.location, \\\n 'Cannot compare twp ' + str(ltype))\n node.type = self.bool_type\n elif op == BinaryOpNode.BinaryOp.EQ or \\\n op == BinaryOpNode.BinaryOp.NE:\n TypeChecker.check_compatibility(node.location, \\\n ltype, rtype, True)\n if not ltype.is_half_comparable and \\\n not rtype.is_half_comparable:\n raise SemanticError(node.location, \\\n 'Cannot compare two ' + str(ltype))\n node.type = self.bool_type\n elif op == BinaryOpNode.BinaryOp.LOGIC_AND or \\\n op == BinaryOpNode.BinaryOp.LOGIC_OR:\n TypeChecker.check_compatibility(node.left.location, \\\n ltype, self.bool_type, True)\n TypeChecker.check_compatibility(node.right.location, \\\n rtype, self.bool_type, True)\n node.type = ltype\n elif op == BinaryOpNode.BinaryOp.ADD:\n TypeChecker.check_compatibility(node.location, \\\n ltype, rtype, True)\n if not ltype.is_integer and not ltype.is_string:\n raise SemanticError(node.location, \\\n 'Cannot add two ' + str(ltype))\n node.type = ltype\n else:\n raise InternalError('invalid operator ' + str(node.operator))\n return\n elif isinstance(node, FuncallNode):\n self.visit_expr(node.expr)\n type = node.expr.type\n if not type.is_function:\n raise SemanticError(node.location, 'invalid type : ' \\\n + str(type) + ' expecting function')\n entity = type.entity\n if self.current_function:\n self.current_function.add_call(entity)\n params = entity.params\n exprs = node.args\n base = 0\n if isinstance(node.expr, MemberNode) or \\\n (isinstance(node, VariableNode) and \\\n node.expr.is_member):\n base = 1\n if len(params) - base != len(exprs):\n raise SemanticError(node.location, \\\n 'Incompatible parameter number : ' + \\\n str(len(exprs) + ', expecting ' + len(params) - base))\n for i in range(base, len(params)):\n expr = exprs[i - base]\n self.visit_expr(expr)\n TypeChecker.check_compatibility(expr.location, \\\n expr.type, params[i].type, True)\n\n if base != 0:\n if isinstance(node.expr, MemberNode):\n node.add_thispointer(node.expr.expr)\n else:\n node.add_thispointer(VariableNode( \\\n self.current_function.params[0]))\n return\n elif isinstance(node, ArefNode):\n self.visit_expr(node.expr)\n self.visit_expr(node.index)\n if not node.expr.type.is_array:\n raise SemanticError(node.location, 'invalid reference of '\\\n + str(node.expr.type) + ', expecting an array')\n TypeChecker.check_compatibility(node.index.location, node.index.type, \\\n self.integer_type, True)\n node.type = node.expr.type.base_type\n return\n elif isinstance(node, CreatorNode):\n if self.current_function:\n self.current_function.add_call(self.malloc_func)\n if node.exprs:\n for expr in node.exprs:\n self.visit_expr(expr)\n TypeChecker.check_compatibility(expr.location, \\\n expr.type, self.integer_type, True)\n return\n elif isinstance(node, MemberNode):\n self.visit_expr(node.expr)\n type = node.expr.type\n if type.is_class:\n entity = type.entity\n member = entity.scope.lookup_current_level(node.member)\n if not member:\n raise SemanticError(node.location, \\\n 'Cannot resolve member :' + str(node.member))\n node.entity = member\n node.type = member.type\n elif type.is_array or type.is_string:\n member = None\n if type.is_array:\n member = ArrayType.scope.lookup_current_level(\\\n node.member)\n else:\n member = StringType.scope.lookup_current_level(\\\n node.member)\n if not member:\n raise SemanticError(node.location, \\\n 'Cannot resolve member : ' + node.member)\n node.entity = member\n else:\n raise SemanticError(node.location, 'Invalid get member \\\n operation : ' + str(node.expr.type) + \\\n ', expecting class, array or string')\n return\n return super().visit(node)\n","sub_path":"compiler/frontend/type_checker.py","file_name":"type_checker.py","file_ext":"py","file_size_in_byte":12673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"408009841","text":"from module import *\nfrom moduleElement import *\n\n\n\nclass Student(object):\n\n def __init__(self, name):\n ######## CODE MISSING HERE 1.\n self.name = name\n self.modules = [] # 2.\n self.grades = {} # 3.\n\n\n def add_module(self,title):\n ######## CODE MISSING HERE 4.\n # title is an instance of class Module\n self.modules.append(title)\n # 5.\n self.grades[title] = title.get_grade()\n\n\n def get_list_modules(self):\n ######## CODE MISSING HERE 6.\n print(\"Modules of Student {}\".format(self.name))\n for module in self.modules:\n print(\"\\t{}\".format(module.get_title()))\n\n def get_grades(self):\n ######## CODE MISSING HERE 7.\n print(\"Grades of Student {}\".format(self.name))\n for module, grade in self.grades.items():\n # I'm not sure whether accessing the module title in the following\n # way is asked for.. I do it that way because I don't want to change the Course.__str__()\n # method in the other file (because of the other expected outputs there).\n print(\"\\t{}: {}\".format(module.get_title(),grade))\n\n\n### test cases ###\n\nme = Student(\"FirstName LastName\")\nme.add_module(info1)\n#me.add_module(math1)\nme.get_list_modules()\n# expected output:\n# Modules of Student FirstName LastName:\n#\tInfo 1\n\nme.get_grades()\n# expected output:\n# Grades of Student FirstName LastName:\n#\tInfo 1: 6\n","sub_path":"student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"381561011","text":"\n\nfrom django import forms\nfrom django.utils import timezone\n\n\nclass OrderForm(forms.Form):\n name = forms.CharField()\n last_name = forms.CharField(required=False)\n phone = forms.CharField()\n buying_type = forms.ChoiceField(widget=forms.Select(), choices=([(\"self\", \"Самовывоз\"), (\"delivery\", \"Доставка\")]))\n date = forms.DateField(widget=forms.SelectDateWidget(), initial=timezone.now())\n address = forms.CharField(required=False)\n comments = forms.CharField(widget=forms.Textarea, required=False)\n\n\n def __init__(self, *args, **kwargs):\n super(OrderForm, self).__init__(*args, **kwargs)\n self.fields['name'].label = 'Имя'\n self.fields['last_name'].label = 'Фамилия'\n self.fields['phone'].label = 'Контактный телефон'\n self.fields['phone'].help_text = 'Пожалусто, указывайте реальный номер телефона, по которому с Вами можно связаться'\n self.fields['buying_type'].label = 'Способ получения'\n self.fields['address'].label = 'Адресс доставки'\n self.fields['address'].help_text = '*Обязательно указывайте город!'\n self.fields['comments'].label = 'Комментарии к заказу'\n self.fields['date'].label = 'Дата доставки'\n self.fields['date'].help_text = 'Доставка производится на следующий день после оформления заказа. Менеджер с Вами предварительно свяжется.'","sub_path":"econapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"452214728","text":"from scipy import spatial\nimport numpy as np\n\nclass KDTree(object):\n def __init__(self):\n\n self.tree = None\n self.is_initialized = False\n self.x = []\n self.y = []\n self.z = []\n\n def init_tree(self, waypoints):\n\n for waypoint in waypoints:\n self.x.append(waypoint.pose.pose.position.x)\n self.y.append(waypoint.pose.pose.position.y)\n self.z.append(waypoint.pose.pose.position.z)\n\n data = zip(self.x,self.y,self.z)\n\n self.tree = spatial.KDTree(data)\n\n self.is_initialized = True\n\n def query(self, current_pose):\n\n distance, wps_closest = self.tree.query(\n np.array([[current_pose.position.x, current_pose.position.y, current_pose.position.z]]))\n\n return distance[0], wps_closest[0]","sub_path":"ros/src/waypoint_updater/src/waypoint_updater/kd_tree.py","file_name":"kd_tree.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"294776179","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\n\nfiles = (f for f in os.listdir(os.getcwd()) if not f.startswith('.') and f.endswith('.log'))\nok = []\nwarning = []\nfor f in files:\n\ttemp = open(f,'r')\n\tfor x in temp.readlines():\n\t\tif 'Output has been logged to file' in x:\n\t\t\tok.append(f)\n\t\t\tbreak\n\telse:\n\t\twarning.append(f)\nwarning_file = open(os.path.join(os.getcwd(),'warning.txt'), 'w')\nfor x in warning:\n\twarning_file.write(x[:5]+'\\n')\nwarning_file.close()\nwok_file = open(os.path.join(os.getcwd(),'wok.txt'), 'w')\nfor x in ok:\n\twok_file.write(x[:5]+'\\n')\nwarning_file.close()","sub_path":"__beta/bye.py","file_name":"bye.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"28168582","text":"import numpy as np\n\n#\ndatalist_num = ['50', '51', '52', '53', '54', '55',\n '56', '57', '58', '59', '690', '692', '701', '713', '6201', '6203', '6204', '6208']\n# datalist_num = ['50']\nfor num in datalist_num:\n o_data = np.genfromtxt('./stock_feature/' + num + '_feature.csv', delimiter=',', skip_header=1)\n o_y_data = np.genfromtxt('./stock_label/' + num + '_label_ratio.csv', delimiter=',', skip_header=1)\n date = np.genfromtxt('./stock_workday/' + num + '_workday.csv', delimiter=',', skip_header=1)\n # print(o_data.shape)\n # print(o_y_data)\n\n start = np.where(date == 20180111)\n ind = int(start[0])\n lim = o_data.shape[0]\n print(lim)\n\n all_train_data = np.array([o_data[0]])\n all_label_data = np.array([])\n all_pre_data = np.array([o_data[0]])\n all_pre_y_data = np.array([])\n\n print('-----train------')\n for i in range(0, lim - 14, 1):\n train_data = o_data[i:i + 10]\n print(date[i],date[i+9])\n all_train_data = np.vstack((all_train_data, train_data))\n all_train_data = all_train_data[1:]\n print(all_train_data.shape)\n np.savetxt('./train_r/' + num + 'train_data.csv', all_train_data, delimiter=',')\n\n print('------label-------')\n for j in range(0, lim - 14, 1):\n label_data = o_y_data[j + 10:j + 15]\n print(date[j+10],date[j+14])\n all_label_data = np.hstack((all_label_data, label_data))\n print(all_label_data.shape)\n np.savetxt('./train_r/' + num + 'all_label_data.csv', all_label_data, delimiter=',')\n\n print('------test x-------')\n for k in range(ind, lim + 1, 1):\n pre_data = o_data[k - 10:k]\n all_pre_data = np.vstack((all_pre_data, pre_data))\n print(date[k - 10], date[k - 1])\n all_pre_data = all_pre_data[1:]\n print(all_pre_data.shape)\n # print(all_pre_data)\n np.savetxt('./train_r/' + num + 'all_pre_data.csv', all_pre_data, delimiter=',')\n #\n print('------test y-------')\n for f in range(ind, lim - 4, 1):\n pre_y_data = o_y_data[f:f + 5]\n all_pre_y_data = np.hstack((all_pre_y_data, pre_y_data))\n print(date[f], date[f + 4])\n print(all_pre_y_data.shape)\n # print(all_pre_y_data)\n np.savetxt('./train_r/' + num + 'all_pre_y_data.csv', all_pre_y_data, delimiter=',')\n print(num + ' finish')\n","sub_path":"data_processing_r.py","file_name":"data_processing_r.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"380510849","text":"def inverte(x):\r\n if(x > 0):\r\n ss=str(x)\r\n lol=ss\r\n if(len(\"x\") == 4):\r\n return lol[::-1] \r\n else:\r\n print(\"o numero digitado nao tem 4 digitos ou nao e positivo\")\r\n \r\n \r\nx=int(input())\r\nprint(inverte(x))\r\n#srt=str(x)\r\n#print(srt[::-1])\r\n","sub_path":"LISTAS/3aL/RESOLVIDAS/Q_11.py","file_name":"Q_11.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"77282759","text":"from django.urls import path\nfrom . import views\n\napp_name = \"course\"\n\nurlpatterns = [\n path(\"\", views.CourseListView.as_view(), name=\"course-list\"),\n path(\"detail//\", views.CourseDetailView.as_view(), name=\"course-detail\"),\n path(\"episodes//\", views.EpisodeDetailView.as_view(), name=\"episode-detail\"),\n path(\"add/\", views.add_course, name=\"add-course\"),\n path(\"add//\", views.add_course, name=\"edit-course\"),\n path(\"add//\", views.add_episode, name=\"add-episode\"),\n path(\"add///\", views.add_episode, name=\"edit-episode\"),\n path(\"add///step/\", views.add_step, name=\"add-step\"),\n path(\"add///step//\", views.add_step, name=\"edit-step\"),\n]","sub_path":"course/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"238499083","text":"# Merge two sorted linked lists and return it as a sorted list.\n# The list should be made by splicing together the nodes of the first two lists.\n#\n# Example 1:\n# Input: l1 = [1,2,4], l2 = [1,3,4]\n# Output: [1,1,2,3,4,4]\n#\n# Constraints:\n# The number of nodes in both lists is in the range [0, 50].\n# -100 <= Node.val <= 100\n# Both l1 and l2 are sorted in non-decreasing order.\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\n# class Solution:\n# def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def append(self, new_val):\n new_node = Node(new_val)\n if self.head is None:\n self.head = new_node\n return\n current = self.head\n while current.next:\n current = current.next\n current.next = new_node\n\n def print_list(self):\n if self.head is None:\n print(\"Empty list!\")\n return\n current = self.head\n while current:\n print(current.val, end=\" \")\n current = current.next\n\n\ndef merge_two_linked_lists(list1, list2):\n if list1 is None and list2 is None:\n return\n\n if list1 is None:\n return list2\n\n if list2 is None:\n return list1\n\n merged_linked_list = LinkedList()\n while not(list1 is None or list2 is None):\n if list1.val < list2.val:\n merged_linked_list.append(list1.val)\n list1 = list1.next\n else:\n merged_linked_list.append(list2.val)\n list2 = list2.next\n if list1 is None:\n while list2 is not None:\n merged_linked_list.append(list2.val)\n list2 = list2.next\n if list2 is None:\n while list1 is not None:\n merged_linked_list.append(list1.val)\n list1 = list1.next\n merged_linked_list.print_list()\n return merged_linked_list.head\n\n\nif __name__ == '__main__':\n l1 = LinkedList()\n l1.append(1)\n l1.append(2)\n l1.append(4)\n\n l2 = LinkedList()\n l2.append(1)\n l2.append(3)\n l2.append(4)\n\n merge_two_linked_lists(l1.head, l2.head)","sub_path":"src/main/easy/merge_two_lists1.py","file_name":"merge_two_lists1.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"649126896","text":"import competition_utilities as cu\nimport features\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\n\ntrain_file = \"train-sample_October_9_2012_v2.csv\"\nfull_train_file = \"train_October_9_2012.csv\"\ntest_file = \"private_leaderboard.csv\"\nsubmission_file = \"middle_submission.csv\"\n\nfeature_names = [ \"BodyLength\"\n , \"NumTags\"\n , \"OwnerUndeletedAnswerCountAtPostTime\"\n , \"ReputationAtPostCreation\"\n , \"TitleLength\"\n , \"UserAge\"\n ]\n\ndef main():\n print(\"Reading the data\")\n data = cu.get_dataframe(train_file)\n\n print(\"Extracting features\")\n fea = features.extract_features(feature_names, data)\n\n print(\"Training the model\")\n rf = RandomForestClassifier(n_estimators=100, verbose=2, compute_importances=True, n_jobs=-1)\n rf.fit(fea, data[\"OpenStatus\"])\n gb = GradientBoostingClassifier(n_estimators=100, learn_rate=1.0)\n gb.fit(fea, data[\"OpenStatus\"])\n dt = DecisionTreeClassifier(max_depth=None, min_samples_split=1, random_state=0)\n dt.fit(fea, data[\"OpenStatus\"])\n et = ExtraTreesClassifier(n_estimators=100, max_depth=None, min_samples_split=1, random_state=0)\n et.fit(fea, data[\"OpenStatus\"])\n\n print(\"Reading test file and making predictions\")\n data = cu.get_dataframe(test_file)\n test_features = features.extract_features(feature_names, data)\n\n probs = rf.predict_proba(test_features)\n probs2 = gb.predict_proba(test_features)\n probs3 = dt.predict_proba(test_features)\n probs4 = et.predict_proba(test_features)\n\n for i in range(0, len(probs)):\n for j in range(0,5):\n probs[i][j] = (probs[i][j] + probs2[i][j] + probs3[i][j] + probs4[i][j])/4\n\n print(\"Calculating priors and updating posteriors\")\n new_priors = cu.get_priors(full_train_file)\n old_priors = cu.get_priors(train_file)\n probs = cu.cap_and_update_priors(old_priors, probs, new_priors, 0.001)\n\n print(\"Saving submission to %s\" % submission_file)\n cu.write_submission(submission_file, probs)\n\nif __name__==\"__main__\":\n main()\n","sub_path":"middle_benchmark.py","file_name":"middle_benchmark.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"51772685","text":"from selenium import webdriver\nimport unittest,time\nfrom pages.search_baidu import Search_baidu\nfrom pages.baidu_shezhi import Baidu_shezhi\nfrom pages.click_news import Click_news\nimport ddt\ntestdates = [\n {'text':'selenium','expect':'selenium'},\n {'text':'自动化','expect':'自动化'},\n {'text':'百度一下','expect':'百度一下'}\n ]\n@ddt.ddt\nclass Testbaiducase(unittest.TestCase):\n '''测试百度搜索测试用例'''\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.driver.delete_all_cookies()\n self.driver.refresh()\n self.search_baidu = Search_baidu(self.driver)\n self.baidu_shezhi = Baidu_shezhi(self.driver)\n self.click_news = Click_news(self.driver)\n\n def input_case(self,text,expect):\n\n self.search_baidu.input_text(text)\n self.search_baidu.click_baidu()\n #断言\n result = self.search_baidu.get_name(text)\n self.assertTrue(result == expect)\n\n @ddt.data(*testdates)\n def test_01(self,data):\n #data1 = testdates[0]\n print('测试数据 ’%s' % data)\n self.input_case(data['text'],data['expect'])\n\n\nif __name__ == '__main':\n unittest.main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''\n测试用例,\nclass Testbaiducase(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.search_baidu = Search_baidu(self.driver)\n self.baidu_shezhi = Baidu_shezhi(self.driver)\n self.click_news = Click_news(self.driver)\n\n\n def test_01(self):\n self.search_baidu.input_text()\n self.search_baidu.input_text()\n self.search_baidu.click_baidu()\n result = self.search_baidu.get_name()\n self.assertTrue(result=='selenium')\n\n def test_02(self):\n self.baidu_shezhi.click_sousuoshez()\n self.baidu_shezhi.click_select()\n self.baidu_shezhi.click_option()\n self.baidu_shezhi.click_baocunshez()\n self.baidu_shezhi.click_alter()\n\n def test_03(self):\n self.click_news.open_news()\n result = self.click_news.get_link()\n self.assertTrue(result=='百度首页')\n\n def tearDown(self):\n self.driver.quit()\n'''","sub_path":"case/test_baidu.py","file_name":"test_baidu.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"411667033","text":"import unittest\nfrom gradescope_utils.autograder_utils.decorators import weight\nfrom gradescope_utils.autograder_utils.files import check_submitted_files\n\nclass TestFiles(unittest.TestCase):\n @weight(0)\n def test_submitted_files(self):\n \"\"\"Check submitted files: looking for 'homework7.py'\"\"\"\n missing_files = check_submitted_files(['homework7.py'])\n for path in missing_files:\n print('Missing %s' % path)\n self.assertEqual(len(missing_files), 0, 'Missing some required files!')\n print('All required files submitted!')\n","sub_path":"assignments/hw7/autograder/tests/test_files.py","file_name":"test_files.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"87214173","text":"from open_position import OpenPosition\nfrom order import Order\nfrom cbpro import AuthenticatedClient\nfrom indicators import Indicator\nfrom indicators import RSI\nfrom indicators import BB\nfrom dict import new_dict\nfrom trade import Trade\nfrom new_strategies import MACDStrategy\nfrom new_strategies import RSISTrategy\nfrom new_strategies import Bollinger\nfrom new_strategies import RateOfChange\nimport Data\nimport csv\n\n\n#Body of script\n#token = \"AGLD-USD\"\nwrite = open(\"../../test_cases/RateOfChange_5m_test.txt\", \"w\")\nwrite.write(\"Buy when RCO is less than -1, sell when price greater than 1\\n\")\nwrite.write(\"token, capital, lapse, all_trades, prof_trades, success rate\\n\")\nwrite.close()\n\nfor token in new_dict:\n new_reader = None\n try:\n data = open(f\"../../data_5m/{token}_5m.csv\", \"r\")\n new_reader = csv.reader(data)\n except FileNotFoundError as fnfe:\n continue\n\n listener = open(\"../txt_files/data.txt\", \"w\")\n listener.close()\n\n candles = []\n for line in new_reader:\n candles.append(line)\n\n capital = 100\n private_client = AuthenticatedClient(Data.API_Public_Key, Data.API_Secret_Key, Data.Passphrase)\n new_order = Order(private_client)\n position = OpenPosition(new_order)\n indicator = Indicator()\n bands_1dev = BB(ndbevup=1, nbdevdn=1)\n\n indicator_list = [indicator, bands_1dev]\n for new_indicator in indicator_list:\n new_indicator.candles = candles[1:]\n new_indicator.get_data_set()\n new_indicator.reverse_data()\n new_indicator.get_dates()\n new_indicator.get_np_array()\n new_indicator.set_indicator()\n\n # verifies that the data and dates was extracted successfully\n h = 0\n y = -1\n while h < len(indicator.candles):\n if indicator.date_array[h] == indicator.candles[y][0]:\n pass\n else:\n raise ValueError(\"dates do not match\")\n if indicator.close_array[h] == float(indicator.candles[y][4]):\n pass\n else:\n raise ValueError(\"close prices do not match\")\n h = h + 1\n y = y - 1\n\n if token in new_dict:\n strategy_5m = Bollinger(indicator, bands_1dev, new_order)\n new_trade = Trade()\n params: dict\n all_trades = 0\n prof_trades = 0\n success_rate = 0\n i = 1\n k = -2\n\n while i < len(indicator.close_array):\n strategy_5m.strategy(index=i, beg=k)\n if position.get_position() is False:\n if new_order.get_bottom():\n\n params = {\n \"id\": i,\n \"size\": capital / float(indicator.candles[k][4]),\n \"product_id\": token,\n \"side\": \"buy\",\n \"funds\": capital,\n \"status\": \"done\",\n \"done_at\": indicator.date_array[i],\n \"executed_value\": capital * 0.995,\n \"product_price\": indicator.close_array[i]\n }\n\n writer = open(\"../txt_files/data.txt\", \"w\")\n new_order.details = params\n for line in new_order.details:\n writer.write(str(new_order.details[line]) + \"\\n\")\n writer.close()\n print(\"buy order details: \", new_order.details)\n if \"side\" in new_order.details:\n if new_order.get_key(\"side\") == \"buy\":\n position.long_position = True\n\n elif position.get_position():\n\n ready_to_trade = False\n avg_cost = float(new_order.get_key(\"executed_value\")) / float(new_order.get_key(\"size\"))\n percentage = ((float(indicator.get_index(-1) * 100)) / avg_cost) - 100\n\n if new_order.is_top:\n ready_to_trade = True\n done_reason = strategy_5m.index\n\n if ready_to_trade:\n reader = open(\"../txt_files/data.txt\", \"r\")\n reader.read()\n new_size = new_order.get_key(\"size\")\n capital = (indicator.close_array[i] * new_size) * 0.995\n\n if capital > new_order.get_key(\"executed_value\"):\n all_trades = all_trades + 1\n prof_trades = prof_trades + 1\n else:\n all_trades = all_trades + 1\n\n params = {\n \"id\": i,\n \"size\": new_size,\n \"product_id\": token,\n \"side\": \"sell\",\n \"funds\": indicator.close_array[i] * new_size,\n \"status\": \"done\",\n \"done_at\": indicator.date_array[i],\n \"executed_value\": capital,\n \"product_price\": indicator.close_array[i]\n }\n\n new_order.details = params\n if \"side\" in new_order.details:\n if new_order.get_key(\"side\") == \"sell\":\n position.long_position = False\n print(\"sell order details: \", new_order.details)\n # print(\"position: \", position.get_position())\n\n i = i + 1\n k = k - 1\n strategy_5m.reset_order()\n strategy_5m.index = 0\n\n lapse = float(indicator.date_array[-1]) - float(indicator.date_array[0])\n if lapse >= 86400:\n lapse = lapse / 86400\n try:\n success_rate = (prof_trades * 100) / all_trades\n except ZeroDivisionError:\n print(new_order.details)\n\n final = {\n \"token\": token,\n \"capital\": \"%.2f\" % capital,\n \"days\": \"%.2f\" % lapse,\n \"all_trades\": all_trades,\n \"success_trades\": prof_trades,\n \"success_rate\": \"%.2f\" % success_rate\n }\n\n awriter = open(\"../../test_cases/RateOfChange_5m_test.txt\", \"a\")\n for key in final:\n awriter.write(f\"{final[key]}, \")\n awriter.write(\"\\n\")\n awriter.close()\n print(capital)","sub_path":"src/strategy_tester.py","file_name":"strategy_tester.py","file_ext":"py","file_size_in_byte":6224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"496294971","text":"# -*- coding: utf-8 -*-\nimport os, sys\nimport Image\n\nimport tinify\n\n# https://tinypng.com/developers\n# 必須事先申請 tinypng 的 API key\b或找前任助理。\ntinify.key = \"\"\n\ninput_dir = \"gallery\" # 原始圖檔輸入目錄\ncompress_dir = \"compress\" # 壓縮圖檔輸出目錄\nthumbnail_dir = \"thumb\" # 縮圖書出目錄\n\nout_wdith = 300\nout_height = 200\n\nif not os.path.isdir(compress_dir):\n os.makedirs(compress_dir)\n\nif not os.path.isdir(thumbnail_dir):\n os.makedirs(thumbnail_dir)\n\ndef resize(target,out_dir):\n \"\"\"return: Image instance,or None if open fail.\n \"\"\"\n try:\n im = Image.open(target)\n except IOError:\n return None\n im = im.resize((out_wdith, out_height))\n split_name = list(os.path.splitext(os.path.basename(target)))\n split_name.insert(-1, \"_m\")\n filename = \"\".join(split_name)\n im.save(os.path.join(out_dir,filename))\n return im\n\nif __name__ == \"__main__\":\n for img in os.listdir(input_dir):\n print(img)\n tinify.from_file(os.path.join(input_dir, img)).to_file(os.path.join(compress_dir, img))\n \n for img in os.listdir(compress_dir):\n resize(os.path.join(compress_dir, img), thumbnail_dir)\n","sub_path":"scripts/photo_preprocess.py","file_name":"photo_preprocess.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"265954284","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nsmartStr.py\n\nCreated by Fernando Cezar on 2012-03-23.\nCopyright (c) 2012 __MyCompanyName__. All rights reserved.\n\"\"\"\nimport types\nimport errorHandler\n\ndef normalize(string):\n \"\"\"Given a string, returns it in UTF-8\"\"\"\n \n possible_encodings = [\"utf-8\", \"utf-16\", \"iso8859-1\", \"windows-1250\",\n \"windows-1252\", \"windows-1251\", \"iso-8859-15\"]\n \n for encoding in possible_encodings:\n try:\n encoded_string = __smart_str(string, from_encoding = encoding)\n return encoded_string\n except:\n continue\n \n errorHandler.logMessage(\"normalization\", \"Could not decode %s\" % repr(string))\n return \"%s (Error decoding)\" % repr(string)\n\n\ndef __smart_str(s, encoding='utf-8', errors='strict', from_encoding='iso8859-1'):\n if type(s) in (int, long, float, types.NoneType):\n return str(s)\n elif type(s) is str:\n return s.decode(from_encoding, errors).encode(encoding, errors)\n elif type(s) is unicode:\n return s.encode(encoding, errors)\n elif hasattr(s, '__str__'):\n return __smart_str(str(s), encoding, errors, from_encoding)\n elif hasattr(s, '__unicode__'):\n return __smart_str(unicode(s), encoding, errors, from_encoding)\n else:\n return __smart_str(str(s), encoding, errors, from_encoding)\n\n","sub_path":"src/utils/smartStr.py","file_name":"smartStr.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"640373867","text":"# Radix Sort Algorithm\r\n\r\n\r\n# get number of digits in largest item\r\ndef __get_num_digits(array):\r\n m = 0\r\n for item in array:\r\n m = max(item, m)\r\n return len(str(m))\r\n\r\nfrom functools import reduce\r\ndef __flatten(array):\r\n return reduce(lambda x, y : x + y, array)\r\n\r\ndef radix(array, num_digits):\r\n for digit in range(0, num_digits):\r\n bucket = [[] for i in range(10)]\r\n for item in array:\r\n # num is the bucket number that the item will be put into\r\n num = item // 10 ** (digit) % 10\r\n bucket[num].append(item)\r\n array = __flatten(bucket)\r\n return array\r\n\r\ndef main():\r\n array = [55, 45, 3, 289, 213, 1, 288, 53, 2, 0, 7865, 2]\r\n \r\n # find longest integer in our list\r\n num_digits = __get_num_digits(array)\r\n array = radix(array, num_digits)\r\n print(array)\r\nmain()\r\n","sub_path":"013-radix-sort.py","file_name":"013-radix-sort.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"338805861","text":"# url = 'https://www.swexpertacademy.com/main/code/problem/problemDetail.do?contestProbId=AWIsY84KEPMDFAWN&categoryId=AWIsY84KEPMDFAWN&categoryType=CODE'\n\nfor tc in range(int(input())):\n deck = {'S': [0]*13,\n 'D': [0]*13,\n 'H': [0]*13,\n 'C': [0]*13}\n cards = input()\n flag = True\n for i in range(0, len(cards), 3):\n if not deck[cards[i]][int(cards[i+1:i+3])-1]:\n deck[cards[i]][int(cards[i+1:i+3])-1] += 1\n else:\n flag = False\n break\n if flag:\n result = []\n for k in deck:\n result.append(str(deck[k].count(0)))\n result = ' '.join(result)\n else:\n result = 'ERROR'\n print(f'#{tc+1} {result}')","sub_path":"SW Expert Academy/Difficulty_3/4047. 영준이의 카드 카운팅.py","file_name":"4047. 영준이의 카드 카운팅.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"254297257","text":"import json\n\nimport scrapy\n\nfrom kingfisher_scrapy.base_spider import BaseSpider\n\n\nclass MexicoCDMXSource(BaseSpider):\n name = 'mexico_cdmx'\n\n def start_requests(self):\n yield scrapy.Request(\n url='http://www.contratosabiertos.cdmx.gob.mx/api/contratos/todos',\n meta={'kf_filename': 'list.json'},\n callback=self.parse_list\n )\n\n def parse_list(self, response):\n if response.status == 200:\n\n data = json.loads(response.text)\n if self.sample:\n data = [data[0]]\n\n for data_item in data:\n yield scrapy.Request(\n url=data_item['uri'],\n meta={'kf_filename': 'id%s.json' % data_item['id']},\n callback=self.parse_record\n )\n else:\n yield {\n 'success': False,\n 'file_name': 'list.json',\n \"url\": response.request.url,\n \"errors\": {\"http_code\": response.status}\n }\n\n def parse_record(self, response):\n if response.status == 200:\n yield self.save_response_to_disk(response, response.request.meta['kf_filename'],\n data_type='release_package')\n else:\n yield {\n 'success': False,\n 'file_name': response.request.meta['kf_filename'],\n \"url\": response.request.url,\n \"errors\": {\"http_code\": response.status}\n }\n","sub_path":"kingfisher_scrapy/spiders/mexico_cdmx.py","file_name":"mexico_cdmx.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"529672527","text":"def criterio(dato, clave):\n if(clave==None):\n return dato\n elif(clave=='telefono'):\n return dato.telefono\n elif(clave=='apellido'):\n return dato.apellido\n elif(clave=='nombre'):\n return dato.nombre\n\nclass NodoLista():\n def __init__(self):\n self.info = None\n self.sig= None\n \nclass TLista():\n def __init__(self):\n self.tamanio=0\n self.cab= None\n\ndef insertar(lista,x, crit=None):\n lista.tamanio=lista.tamanio+1\n aux = NodoLista()\n aux.info = x\n if (lista.cab==None) or (criterio(x, crit) < criterio(lista.cab.info, crit)):\n aux.sig=lista.cab\n lista.cab=aux\n else:\n ant = lista.cab\n act = lista.cab.sig\n while (act != None) and (criterio(act.info,crit) < criterio(x, crit)):\n act=act.sig\n ant=ant.sig\n aux.sig=act\n ant.sig=aux\n \ndef eliminar(lista,ku, crit=None):\n x = None\n if (criterio(lista.cab.info,crit)==ku):\n x = lista.cab.info\n lista.cab = lista.cab.sig\n lista.tamanio=lista.tamanio-1\n else:\n ant=lista.cab\n act=lista.cab.sig\n while((act!=None)and(criterio(act.info,crit)!=ku)):\n ant=ant.sig\n act=act.sig\n if(act!=None):\n x=act.info\n ant.sig=act.sig\n lista.tamanio=lista.tamanio-1\n return x\n\ndef busqueda(lista,ku, clave):\n pos=lista.cab\n while((pos!=None) and (criterio(pos.info,clave)!=ku)):\n pos=pos.sig\n return pos \n \ndef tamanio(lista):\n return lista.tamanio \n\ndef listavacia(lista):\n return lista.tamanio == 0 \n\ndef listar(lista):\n for i in range (0,lista.tamanio):\n print(lista.cab.i.info)","sub_path":"Guia Telefonica/TDA_Listas.py","file_name":"TDA_Listas.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"70853309","text":"# -*- coding: utf-8 -*-\n\"\"\"\nJunior\n\nPlotting local features as clusters\n\n===================================\nDemo of HDBSCAN clustering algorithm\n===================================\nFinds a clustering that has the greatest stability over a range\nof epsilon values for standard DBSCAN. This allows clusterings\nof different densities unlike DBSCAN.\n\"\"\"\nprint(__doc__)\n\nimport cv2\n\nimport numpy as np\n\nfrom hdbscan import HDBSCAN\nfrom sklearn import metrics\n\nimport time\n\ndef getClusterIdx(labels, label):\n lst = []\n \n for i in range(len(labels)):\n if labels[i] == label:\n lst.append(i)\n \n return lst\n\ndef getKeypoints(keypoints, idx):\n kps = []\n for i in idx:\n kps.append(keypoints[i])\n return kps\n\ndef decomposeDescriptors(desc):\n shape = desc.shape\n dec = [[], []]\n for i in range(shape[0]):\n x, y = 0.0, 0.0\n row = desc[i]\n for j in range(0, shape[1], 2):\n x = x + row[j]\n y = y + row[j+1]\n dec[0].append(x)\n dec[1].append(y)\n return dec\n \ndef distance(desc):\n ground = desc[1]\n dist = []\n for row in desc:\n d = np.linalg.norm(row - ground)\n dist.append(d)\n return dist\n\ndef showCluster(img, label):\n idx = getClusterIdx(labels1, label)\n kps = getKeypoints(keypoints, idx)\n img2 = cv2.drawKeypoints(image,kps,None,(255,0,0), cv2.DRAW_MATCHES_FLAGS_)\n plt.imshow(img2)\n plt.show()\n \nimage = cv2.imread('/home/ojmakh/programming/phd/data/vocount/test/1 frame.jpg')\nsurf = cv2.xfeatures2d.SURF_create(500)\nkeypoints, descriptors = surf.detectAndCompute(image, None)\n\n##############################################################################\n# Compute DBSCAN\nhdb_t1 = time.time()\nhdb = HDBSCAN(min_cluster_size=4).fit(descriptors.astype('double'))\nlabels1 = hdb.labels_\nhdb_elapsed_time = time.time() - hdb_t1\n\nlset1 = set(labels1)\n","sub_path":"python/feature_plot.py","file_name":"feature_plot.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"70250421","text":"import numpy as np\n\n\nclass NeuralNetwork(object):\n def __init__(self):\n self.input_layer_size = 2\n self.output_layer_size = 1\n self.hidden_layer_size = 3\n self.W1 = np.random.randn(self.input_layer_size,\n self.hidden_layer_size)\n self.W2 = np.random.randn(self.hidden_layer_size,\n self.output_layer_size)\n\n def forward(self, X):\n \"\"\"\n Propagation inputs through network.\n \"\"\"\n self.z2 = np.dot(X, self.W1)\n self.a2 = self.sigmoid(self.z2)\n self.z3 = np.dot(self.a2, self.W2)\n y_hat = self.sigmoid(self.z3)\n return y_hat\n\n def sigmoid(self, z):\n \"\"\"\n Sigmoid acivation function to scalar, vector, or matrix.\n \"\"\"\n return 1 / (1 + np.exp(-1))\n\n\nnn = NeuralNetwork()\nX = np.array([[4, 2], [4, 5], [5, 3]], np.int32)\nyhat = nn.forward(X)\nprint(yhat)\nprint(type(X))\n","sub_path":"simple_nn.py","file_name":"simple_nn.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"581994656","text":"from imagepy.core.util import fileio\nfrom imagepy import IPy\nimport os\nimport numpy as np\nfrom PIL import Image, ImageSequence\n\n\n\nclass SaveAnimate(fileio.Writer):\n\ttitle = 'GIF Animate Save'\n\tfilt = ['GIF']\n\tnote = ['8-bit', 'rgb', 'stack']\n\n\t#process\n\tdef run(self, ips, imgs, para = None):\n\t\timgs = [Image.fromarray(i) for i in imgs] \n\t\timgs[0].save(para['path'], save_all=True, loop=0, duration=10, append_images=imgs[1:])\n\nclass OpenAnimate(fileio.Reader):\n\ttitle = 'GIF Animate Open'\n\tfilt = ['GIF']\n\tnote = ['8-bit', 'rgb', 'stack']\n\n\t#process\n\tdef run(self, para = None):\n\t\t#imgs = readGif(para['path'])\n\n\t\timgs = Image.open(para['path'])\n\t\timgs = ImageSequence.Iterator(imgs)\n\t\timgs = [np.array(i.convert('RGB')) for i in imgs]\n\t\tfor i in range(len(imgs)):\n\t\t\tif imgs[i].ndim==3 and imgs[i].shape[2]>3:\n\t\t\t\timgs[i] = imgs[i][:,:,:3].copy()\n\t\tfp, fn = os.path.split(para['path'])\n\t\tfn, fe = os.path.splitext(fn) \n\t\tIPy.show_img(imgs, fn)\n\nplgs = [OpenAnimate, SaveAnimate]","sub_path":"imagepy/menus/File/GIF/animate_plgs.py","file_name":"animate_plgs.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"550190165","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('companies', '0006_auto_20150618_1520'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='company',\n name='notified',\n field=models.BooleanField(default=False, editable=False),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='company',\n name='text',\n field=models.TextField(null=True, verbose_name='\\u041e\\u043f\\u0438\\u0441\\u0430\\u043d\\u0438\\u0435 \\u043a\\u043e\\u043c\\u043f\\u0430\\u043d\\u0438\\u0438', blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='company',\n name='title',\n field=models.CharField(max_length=255, null=True, verbose_name='\\u041d\\u0430\\u0437\\u0432\\u0430\\u043d\\u0438\\u0435 \\u043a\\u043e\\u043c\\u043f\\u0430\\u043d\\u0438\\u0438', blank=True),\n preserve_default=True,\n ),\n migrations.RunSQL('UPDATE companies_company SET notified=True', reverse_sql=\"SELECT 'foo'\")\n ]\n","sub_path":"apps/companies/migrations/0007_auto_20160201_0538.py","file_name":"0007_auto_20160201_0538.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"207740758","text":"# -*- coding:utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport pickle\nimport matplotlib.pyplot as plt\n# from keras.models import Model\n# from tensorflow.python.layers import base\n# from keras.models import Model\nfrom datetime import datetime\nimport model_2dec\nimport time\nfrom datetime import datetime\n\nimport librosa\nimport librosa.display\nimport soundfile as sf\nimport matplotlib\n\nmatplotlib.use('Agg')\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--num_enc', type=int, default='6', help='number of encoderlayers')\nparser.add_argument('--num_dec', type=int, default='6', help='number of decoder layers')\nparser.add_argument('--d_model', type=int, default='256', help='number of hidden size(frequency sizes)')\nparser.add_argument('--num_heads', type=int, default='8', help='number of multihead attention')\nparser.add_argument('--dff', type=int, default='1024', help='number of feed forward network size')\nparser.add_argument('--max_sequence_length', type=int, default='438', help='number of max sequence size')\nparser.add_argument('--dropout_rate', type=float, default='0.1', help='number of max sequence size')\nparser.add_argument('--max_text_length', type=int, default='39', help='number of text max sequence size')\nparser.add_argument('--lr', type=float, default='1e-5', help='initial learning rate')\nparser.add_argument('--nfft', type=int, default='512', help='number of fft')\nparser.add_argument('--hop', type=int, default='256', help='number of noverlap')\nparser.add_argument('--ckpt', default='0', help='check point path')\nparser.add_argument('--batch_size', type=int, default='64', help='number of batch')\nparser.add_argument('--epochs', type=int, default='10000', help='number of epochs')\nparser.add_argument('--gpus', type=str, default='0', help='using gpu number')\nparser.add_argument('--infor', type=str, default='what', help='option')\nargs = parser.parse_args()\n\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpus\n\n# for use tf ver 1.0\nconfig = tf.compat.v1.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.compat.v1.Session(config=config)\n\n\ndef plot_attention_weights_spec(attention, layer, cnt, find_zero, find_zero_tar):\n fig = plt.figure(figsize=(16, 8))\n # print(\"first attention\", np.shape(attention))\n\n # print(\"attention[layer]\", attention[layer])\n # print(\"shape_tensor\", attention[layer].shape)\n # print(\"shape_np\", np.shape(attention[layer].shape))\n\n attention = attention[layer]\n # print(\"shape of attention[layer]\", attention.shape)\n attention = attention[0]\n # print(\"shape of attention[0]\", attention.shape)\n # attention = tf.squeeze(attention, axis=0)\n\n # attention = tf.squeeze(attention[layer], axis=0)\n\n # print(\"final attention shape\", attention.shape)\n attention = attention[:, :find_zero_tar, :find_zero]\n # print(\"attention shape after find_zero\", attention.shape)\n\n for head in range(attention.shape[0]):\n ax = fig.add_subplot(2, 4, head + 1)\n\n # plot the attention weights\n ax.matshow(attention[head], cmap='viridis') # for now\n\n fontdict = {'fontsize': 12}\n\n ax.set_title(' Encoder time step ', fontdict=fontdict)\n ax.set_ylabel(' Decoder time step', fontdict=fontdict)\n # ax.set_xlabel('Head {}'.format(head + 1))\n # plt.title('Head {}'.format(head + 1))\n ax.set_xlabel('Head {}'.format(head + 1))\n\n plt.tight_layout()\n\n cnt = str(cnt)\n save_dir = './attn_map/train/ckpt={}/'.format(args.ckpt)\n others = 'spec,epoch={}'.format(cnt)\n save_dir = os.path.join(save_dir, others)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n plt.savefig(save_dir + '/' + layer + '.png')\n plt.cla()\n plt.close()\n # plt.show()\n\n\ndef plot_attention_weights(attention, layer, cnt, find_zero, find_zero_asr):\n fig = plt.figure(figsize=(16, 8))\n # print(\"first attention\", np.shape(attention))\n\n attention = attention[layer]\n attention = attention[0]\n # attention = tf.squeeze(attention, axis=0)\n\n # attention = tf.squeeze(attention[layer], axis=0)\n # print('original input attention shape', attention.shape)\n attention = attention[:, :find_zero_asr, :find_zero]\n # print(\"find_zero {} find_zero_asr {} attention {}\".format(find_zero, find_zero_asr, attention.shape))\n\n for head in range(attention.shape[0]):\n ax = fig.add_subplot(2, 4, head + 1)\n\n # plot the attention weights\n ax.matshow(attention[head], cmap='viridis') # for now\n\n fontdict = {'fontsize': 12}\n\n ax.set_title(' Encoder time step ', fontdict=fontdict)\n ax.set_ylabel(' Decoder time step', fontdict=fontdict)\n # ax.set_xlabel('Head {}'.format(head + 1))\n # plt.title('Head {}'.format(head + 1))\n ax.set_xlabel('Head {}'.format(head + 1))\n\n plt.tight_layout()\n\n cnt = str(cnt)\n save_dir = './attn_map/train/ckpt={}/'.format(args.ckpt)\n others = 'spec,epoch={}'.format(cnt)\n save_dir = os.path.join(save_dir, others)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n plt.savefig(save_dir + '/' + layer + '.png')\n plt.cla()\n plt.close()\n # plt.show()\n\n\ndef create_padding_mask_text(seq):\n seq = tf.cast(tf.math.equal(seq, 0), tf.float32)\n\n # add extra dimensions to add the padding\n # to the attention logits.\n return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)\n\n\ndef create_padding_mask_spec(seq):\n seq = tf.cast(tf.not_equal(seq, 0), tf.float32)\n seq = tf.cast(tf.reduce_max(seq, axis=-1), tf.float32)\n seq = tf.cast(tf.not_equal(seq, 1), tf.float32)\n # add extra dimensions to add the padding\n # to the attention logits.\n return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)\n\n\ndef create_look_ahead_mask(size):\n mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)\n return mask # (seq_len, seq_len)\n\n\ndef create_masks(inp_spec, tar_txt, tar_spec):\n # Encoder padding mask\n\n enc_padding_mask = create_padding_mask_spec(inp_spec)\n print(\"enc_padding_mask\", enc_padding_mask)\n print(\"enc_padding mask[0]\", enc_padding_mask[0])\n\n # Used in the 2nd attention block in the decoder.\n # This padding mask is used to mask the encoder outputs.\n # dec_padding_mask_asr = create_padding_mask_text(inp_txt)\n dec_padding_mask = create_padding_mask_spec(inp_spec)\n # dec_padding_mask = tf.concat([dec_padding_mask_text, dec_padding_mask_spec], axis=3) # concat with text and spec\n # Used in the 1st attention block in the decoder.\n # It is used to pad and mask future tokens in the input received by\n # the decoder.\n look_ahead_mask_asr = create_look_ahead_mask(tf.shape(tar_txt)[1]) # 39, 39\n look_ahead_mask = create_look_ahead_mask(tf.shape(tar_spec)[1]) # 438, 438\n # look_ahead_mask_txt = create_look_ahead_mask(tf.shape(tar_txt)[1])\n # print(\"look_ahead mask txt shape is\", look_ahead_mask_txt)\n # look_ahead_mask_spec = create_look_ahead_mask(tf.shape(tar_spec)[1])\n # print(\"look_ahead_mask spec shape is\", look_ahead_mask)\n\n # look_ahead_mask = tf.concat([look_ahead_mask_txt, look_ahead_mask_spec], axis=3)\n dec_target_padding_mask_asr = create_padding_mask_text(tar_txt) # batch_size, 1, 1, 39\n dec_target_padding_mask = create_padding_mask_spec(tar_spec) # batch_size, 1, 1, 438\n # dec_target_padding_mask = tf.concat([dec_target_padding_mask_text, dec_target_padding_mask_spec], axis=3)\n\n combined_mask_asr = tf.maximum(dec_target_padding_mask_asr, look_ahead_mask_asr) # batch_size, 1, 1, 39\n combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) # batch_size, 1, 1, 438\n # combined_mask_spec = tf.maximum(dec_target_padding_mask_spec, look_ahead_mask)\n # combined_mask = tf.concat([combined_mask_text, combined_mask_spec], axis=3) # concat with text and spec\n\n return enc_padding_mask, combined_mask_asr, combined_mask, dec_padding_mask\n\n\nloss_object_l1 = tf.keras.losses.MeanAbsoluteError(reduction='none')\nloss_object_mse = tf.keras.losses.MeanSquaredError(reduction='none')\nloss_object_sparse = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')\n\n\n# def loss_function(real_asr, real_spec, pred_asr, pred_spec):\n# if real_a\n\ndef loss_function_spec(real, pred):\n mask = tf.cast(tf.math.equal(real, 0), tf.float32)\n mask = tf.cast(tf.logical_not(tf.cast(tf.reduce_min(mask, axis=-1), tf.bool)), tf.float32)\n # mse = loss_object_mse(real, pred, sample_weight=mask)\n l1 = loss_object_l1(real, pred, sample_weight=mask)\n # final_loss = (loss * 0.5) + (loss2 * 0.5)\n\n # return loss\n return tf.reduce_mean(l1)\n\n\ndef loss_function_text(real, pred):\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n\n loss_ = loss_object_sparse(real, pred)\n # loss = tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(real, pred, from_logits=True))\n\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n # mask = tf.cast(mask, dtype=loss.dtype)\n # loss *= mask\n\n # return loss\n\n return tf.reduce_mean(loss_)\n\n\ndef input_fn(spec_inp, txt_dec, spec_dec, txt_tar, spec_tar, BATCH_SIZE, BUFFER_SIZE):\n # txt_dataset = tf.data.Dataset.from_tensor_slices((txt_inp, txt_inp)).map(tf_encode)\n # print(txt_dataset)\n dataset = tf.data.Dataset.from_tensor_slices((spec_inp, txt_dec, spec_dec, txt_tar, spec_tar))\n print(\"dataset slide\", dataset)\n dataset = dataset.cache()\n\n train_dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\n # train_dataset = dataset.batch(BATCH_SIZE)\n\n # print(\"train_dataset shuffle\",train_dataset)\n train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)\n # print(\"train_dataset\",train_dataset)\n # print(\"prefetch\", train_dataset)\n # return next(iter(train_data))\n return train_dataset\n\n\ndef main():\n # load dataset here\n\n asr_dec_inp = np.load('./cmu1120/train_id_dec.npy')\n asr_tar_inp = np.load('./cmu1120/train_id_tar.npy')\n\n spec_enc_inp = np.load('./cmu1120/origin/x_train_ori_all_512_256.npy') # source man, ori\n spec_enc_inp = spec_enc_inp.astype('float32')\n\n spec_dec_inp = np.load('./cmu1120/origin/y_train_dec_all_512_256.npy') # source man, ori\n spec_dec_inp = spec_dec_inp.astype('float32')\n\n spec_tar_inp = np.load('./cmu1120/origin/y_train_tar_all_512_256.npy') # source man, ori\n spec_tar_inp = spec_tar_inp.astype('float32')\n\n spec_enc_inp = spec_enc_inp[:, :-1, :]\n spec_dec_inp = spec_dec_inp[:, :-1, :]\n spec_tar_inp = spec_tar_inp[:, :-1, :]\n\n enc_inp_spec = np.transpose(spec_enc_inp, (0, 2, 1))\n dec_inp_spec = np.transpose(spec_dec_inp, (0, 2, 1))\n tar_inp_spec = np.transpose(spec_tar_inp, (0, 2, 1))\n\n ckpt_path = args.ckpt\n\n batch_size = args.batch_size\n buffer_size = 1500\n EPOCHS = args.epochs\n vocab_size = 1000\n\n train_dataset = input_fn(enc_inp_spec, asr_dec_inp, dec_inp_spec, asr_tar_inp, tar_inp_spec, batch_size,\n buffer_size)\n print(train_dataset) # ok\n\n train_loss_text = tf.keras.metrics.Mean(name='train_loss_text')\n print(train_loss_text)\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n train_loss_spec = tf.keras.metrics.Mean(name='train_loss_spec')\n print(train_loss_spec)\n transformer = model_2dec.Transformer(args.num_enc, args.num_dec, args.d_model, args.num_heads, args.dff, vocab_size,\n args.max_sequence_length, args.max_text_length, rate=args.dropout_rate)\n print(transformer)\n # my_model = model_2dec.Transformer\n # model_summary()\n\n lr_schedule = model_2dec.CustomSchedule(args.d_model)\n\n # initial_learning_rate = args.lr\n\n # use decay\n\n '''\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate,\n decay_steps=4000,\n decay_rate=0.96,\n staircase=True)\n '''\n\n # initial_learning_rate = 1e-5\n\n # optimizer_text = tf.keras.optimizers.Adam(lr_schedule, beta_1=0.9, beta_2=0.98,\n # epsilon=1e-9)\n # optimizer_spec = tf.keras.optimizers.Adam(lr_schedule, beta_1=0.9, beta_2=0.98,\n # epsilon=1e-9)\n optimizer = tf.keras.optimizers.Adam(lr_schedule, beta_1=0.9, beta_2=0.98, epsilon=1e-9)\n ## okay\n\n checkpoint_path = \"./checkpoints{}/train\".format(args.ckpt)\n\n # ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer_t)\n # ckpt = tf.train.Checkpoint(optimizer_text=optimizer_text, optimizer_spec=optimizer_spec, transformer=transformer)\n ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer)\n ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=None)\n\n # merged_summary = tf.summary.merge_all()\n\n # writer = tf.summary.create_file_writer(\"/tmp/mylogs/eager\")\n logdir = \"logs/scalars{}/\".format(args.ckpt) + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n file_writer = tf.summary.create_file_writer(logdir + \"/metrics\")\n file_writer.set_as_default()\n\n # write_op = tf.summary.merge_all()\n # merged_summary = tf.compat.v1.contrib.summary.merge_all()\n\n # tf.compat.v1.summary.all_v2_summary_ops()\n # writer = tf.compat.v1.summary.FileWriter(logdir + \"/metrices\", sess.graph)\n\n if ckpt_manager.latest_checkpoint:\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n\n train_step_signature = [\n tf.TensorSpec(shape=(None, None, None), dtype=tf.float32),\n tf.TensorSpec(shape=(None, None), dtype=tf.int64),\n tf.TensorSpec(shape=(None, None, None), dtype=tf.float32),\n tf.TensorSpec(shape=(None, None), dtype=tf.int64),\n tf.TensorSpec(shape=(None, None, None), dtype=tf.float32),\n\n ]\n\n @tf.function(input_signature=train_step_signature)\n def train_step(inp_spec, asr_txt, dec_spec, tar_txt, tar_spec):\n # not key\n enc_padding_mask, combined_mask_asr, combined_mask, dec_padding_mask = create_masks(inp_spec, asr_txt, dec_spec)\n\n with tf.GradientTape() as tape:\n predict_text, predict_spec, attention_weight_asr, attention_weight = transformer(inp_spec, asr_txt,\n dec_spec,\n True,\n enc_padding_mask,\n combined_mask_asr,\n combined_mask,\n dec_padding_mask)\n loss_text = loss_function_text(tar_txt, predict_text)\n loss_spec = loss_function_spec(tar_spec, predict_spec)\n final_loss = loss_text + loss_spec\n\n # if batch%\n\n graident = tape.gradient(final_loss, transformer.trainable_variables)\n # gradients_text = text_tape.gradient(loss_text, transformer.trainable_variables)\n # gradients_spec = spec_tape.gradient(loss_spec, transformer.trainable_variables)\n # print(\"gradients_text {}, gradients_spec {}\".format(gradients_text, gradients_spec))\n # gradients_spec = tape.gradient(loss_spec, transformer.trainable_variables)\n\n optimizer.apply_gradients(zip(graident, transformer.trainable_variables))\n # optimizer_text.apply_gradients(zip(gradients_text, transformer.trainable_variables))\n # optimizer_spec.apply_gradients(zip(gradients_spec, transformer.trainable_variables))\n # print(\"optimizer_text {}, gradients_spec {}\".format(gradients_text, gradients_spec))\n\n train_loss_text(loss_text)\n train_loss_spec(loss_spec)\n train_accuracy(tar_txt, predict_text)\n\n return predict_text, predict_spec, attention_weight_asr, attention_weight\n\n # tf.summary.trace_on(graph=True, profiler=True)\n\n for epoch in range(EPOCHS):\n start = time.time()\n\n train_loss_text.reset_states()\n train_loss_spec.reset_states()\n train_accuracy.reset_states()\n\n # inp -> man, tar -> woman\n for (batch, (inp_spec, dec_txt, dec_spec, tar_txt, tar_spec)) in enumerate(train_dataset):\n\n epc_before = int(epoch)\n name_before = 'before_predict_epoch={}'.format(epc_before)\n result_before = inp_spec[0]\n result_before = np.transpose(result_before, (1, 0))\n\n result_txt, result, attention_weight_asr, attention_weight = train_step(inp_spec, dec_txt, dec_spec,\n tar_txt, tar_spec)\n\n # profiler_outdir=logdir + \"/metrics\")\n\n if batch % 20 == 0:\n print('Epoch {} Batch {} Text_Loss {:.4f} Spec_Loss {:.4f}, Text_Acc {:.4f}'.format(\n epoch + 1, batch, train_loss_text.result(), train_loss_spec.result(), train_accuracy.result()))\n\n if (epoch + 1) % 20 == 0:\n ckpt_save_path = ckpt_manager.save()\n print('Saving checkpoint for epoch {} at {}'.format(epoch + 1,\n ckpt_save_path))\n ''' \n spec_t = inp_spec[0]\n spec_t = spec_t.numpy()\n spec_t = spec_t.T\n print(\"spec_t shape is\", np.shape(spec_t))\n # spec_t = tf.transpose(spec_t)\n idx_spec = np.argwhere(np.diff(np.r_[False, spec_t[0], False]))\n find_zero_spec = np.squeeze(idx_spec)\n zero_cnt = find_zero_spec[-1]\n\n print(\"zero_cnt\", zero_cnt)\n\n for x in range(6):\n plot = 'decoder_layer{}_block2'.format(x + 1)\n # plot_asr = 'asr_decoder_layer{}_block2'.format(x + 1)\n\n ###################### check ######################\n\n # plot_attention_weights_spec(attention_weights, plot, i + 1, find_zero, set_name) # only spec??\n plot_attention_weights_spec(attention_weight, plot, epoch, zero_cnt) # spec plot\n # plot_attention_weights(attention_weights_asr, plot_asr, i + 1, set_name) # asr plot\n '''\n\n print('Epoch {} Text_Loss {:.4f} Spec_Loss {:.4f}, Text_Acc {:.4f}'.format(epoch + 1, train_loss_text.result(),\n train_loss_spec.result(),\n train_accuracy.result()))\n\n tf.summary.scalar('text_loss', data=train_loss_text.result(), step=epoch)\n tf.summary.scalar('spec_loss', data=train_loss_spec.result(), step=epoch)\n tf.summary.scalar('text_accuracy', data=train_accuracy.result(), step=epoch)\n\n print('Time taken for 1 epoch: {} secs\\n'.format(time.time() - start))\n\n if epoch % 20 == 0:\n # print(\"attention weight\", np.shape(attention_weight))\n # print(\"for tensor\", attention_weight)\n # print(\"shape\", attention_weight.shape)\n spec_t = inp_spec[0]\n\n spec_tar = dec_spec[0]\n spec_tar = spec_tar.numpy()\n # print(\"spec_tar\", np.shape(spec_tar))\n spec_tar_t = spec_tar.T\n\n # print(\"attn weight type\", type(attention_weight))\n # attention_weight = attention_weight[]\n # attention_weight = attention_weight[0]\n spec_t = spec_t.numpy()\n spec_t = spec_t.T\n\n text = dec_txt[0]\n idx_text_inp = np.argwhere(np.diff(np.r_[False, text, False]))\n idex_text_inp = np.squeeze(idx_text_inp)\n zero_cnt_text = idex_text_inp[-1]\n # print(\"spec_t shape is\", np.shape(spec_t))\n # spec_t = tf.transpose(spec_t)\n idx_spec = np.argwhere(np.diff(np.r_[False, spec_t[0], False]))\n idx_tar = np.argwhere(np.diff(np.r_[False, spec_tar_t[0], False]))\n find_zero_spec = np.squeeze(idx_spec)\n find_zero_spec_tar = np.squeeze(idx_tar)\n zero_cnt = find_zero_spec[-1]\n zero_cnt_tar = find_zero_spec_tar[-1]\n\n # print(\"zero_cnt\", zero_cnt)\n\n for x in range(6):\n plot = 'decoder_layer{}_block2'.format(x + 1)\n plot_asr = 'asr_decoder_layer{}_block2'.format(x + 1)\n\n ###################### check ######################\n\n # plot_attention_weights_spec(attention_weights, plot, i + 1, find_zero, set_name) # only spec??\n plot_attention_weights_spec(attention_weight, plot, epoch, zero_cnt, zero_cnt_tar) # spec plot\n plot_attention_weights(attention_weight_asr, plot_asr, epoch, zero_cnt, zero_cnt_text) # asr plot\n\n if epoch % 5 == 0:\n epc = int(epoch)\n name_after = 'after_predict_epoch={}'.format(epc)\n result_after = result[0]\n result_after = np.transpose(result_after, (1, 0))\n\n # train before (original input)\n plt.figure(figsize=(10, 4))\n # plt.figure(figsize=(10, 4))\n librosa.display.specshow(librosa.amplitude_to_db(result_before, ref=np.max), y_axis='hz', x_axis='time',\n sr=16000, hop_length=args.hop)\n plt.title(name_before)\n plt.colorbar(format='%+2.0f dB')\n plt.tight_layout()\n fig_save_dir = './result/' + ckpt_path + '_fig/'\n if not os.path.exists(fig_save_dir):\n os.makedirs(fig_save_dir)\n plt.savefig(fig_save_dir + name_before + '.png')\n plt.cla()\n plt.close()\n\n make_wav = librosa.istft(result_before, hop_length=args.hop)\n wav_save_dir = './result/' + ckpt_path + '_wav/'\n if not os.path.exists(wav_save_dir):\n os.makedirs(wav_save_dir)\n sf.write(wav_save_dir + name_before + '.wav', make_wav, 16000, format='WAV', endian='LITTLE',\n subtype='PCM_16')\n\n # train after (y_hat)\n plt.figure(figsize=(10, 4))\n # plt.figure(figsize=(10, 4))\n librosa.display.specshow(librosa.amplitude_to_db(result_after, ref=np.max), y_axis='hz', x_axis='time',\n sr=16000, hop_length=args.hop)\n plt.title(name_after)\n plt.colorbar(format='%+2.0f dB')\n plt.tight_layout()\n plt.savefig(fig_save_dir + name_after + '.png')\n plt.cla()\n plt.close()\n\n make_wav = librosa.istft(result_after, hop_length=args.hop)\n sf.write(wav_save_dir + name_after + '.wav', make_wav, 16000, format='WAV', endian='LITTLE',\n subtype='PCM_16')\n\n # real input (source)\n save_tar = tar_spec[0]\n save_tar = np.transpose(save_tar, (1, 0))\n plt.figure(figsize=(10, 4))\n # plt.figure(figsize=(10, 4))\n librosa.display.specshow(librosa.amplitude_to_db(save_tar, ref=np.max), y_axis='hz', x_axis='time',\n sr=16000, hop_length=args.hop)\n real_name = 'real_epoch={}'.format(epc_before)\n plt.title(real_name)\n plt.colorbar(format='%+2.0f dB')\n plt.tight_layout()\n fig_save_dir = './result/' + ckpt_path + '_fig/'\n if not os.path.exists(fig_save_dir):\n os.makedirs(fig_save_dir)\n plt.savefig(fig_save_dir + real_name + '.png')\n plt.cla()\n plt.close()\n\n make_wav = librosa.istft(save_tar, hop_length=args.hop)\n\n wav_save_dir = './result/' + ckpt_path + '_wav/'\n if not os.path.exists(wav_save_dir):\n os.makedirs(wav_save_dir)\n sf.write(wav_save_dir + real_name + '.wav', make_wav, 16000, format='WAV', endian='LITTLE',\n subtype='PCM_16')\n\n # train before np file\n np_save_dir = './result/' + ckpt_path + '_np_file/'\n if not os.path.exists(np_save_dir):\n os.makedirs(np_save_dir)\n np.save(np_save_dir + name_before, result_before)\n\n # train after np file\n np_save_dir = './result/' + ckpt_path + '_np_file/'\n if not os.path.exists(np_save_dir):\n os.makedirs(np_save_dir)\n np.save(np_save_dir + name_after, result_after)\n\n # real\n np_save_dir = './result/' + ckpt_path + '_np_file/'\n if not os.path.exists(np_save_dir):\n os.makedirs(np_save_dir)\n real_name = 'y_real_epoch={}'.format(epc)\n\n np.save(np_save_dir + real_name, save_tar)\n\n\nif __name__ == '__main__':\n main()","sub_path":"main_2dec.py","file_name":"main_2dec.py","file_ext":"py","file_size_in_byte":25114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"469905903","text":"from subprocess import STDOUT,PIPE\nimport os.path,subprocess\nimport json, re\nfrom stanfordcorenlp import StanfordCoreNLP\nfrom nltk.tree import ParentedTree\nfrom nltk import Tree\n\n\n#CORE_NLP_DIR = '/Users/virk/Downloads/stanford-corenlp-full-2018-01-31/' \n#PARSER = StanfordCoreNLP(CORE_NLP_DIR, memory='8g', lang='en')\nprops={'annotators': 'lemma'}\nprint('healpers loaded')\ndef compute_head2(newtree):\n tree_nodes_file = open('node-tree.txt', 'w')\n tree_nodes_list = []\n for subtree in newtree.subtrees():\n tree_nodes_file.write(str(subtree))\n tree_nodes_list.append(str(subtree))\n tree_nodes_file.close()\n java_file = 'Test'\n cmd = 'java -cp .:./stanford-parser-full-2014-01-04/stanford-parser.jar:./headFinder/ ' + java_file \n proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n \n return dict(zip(tree_nodes_list,str(out.decode('utf-8')).split('\\n')))\n #print(out)\ndef find_target_attribs(newtree,word,PARSER):\n for subtree in newtree.subtrees():\n if subtree.label() in ['CC','CD','DT','EX','FW','IN','JJ','JJR','JJS','LS','MD','NN',\n 'NNS','NNP',\t'NNPS','PDT','POS','PRP','PRP$','RB','RBR','RBS',\n 'RP'\t,'SYM','TO','UH','VB','VBD','VBG','VBN','VBP','VBZ','WDT',\n 'WP','WP$','WRB']: \n \n w,pos= subtree.pos()[0]\n #print(w,word)\n if w == word:\n target_tree = subtree\n target_lemma = json.loads(PARSER.annotate(w,properties=props))[\"sentences\"][0]['tokens'][0]['lemma']\n target_pos = pos\n return(target_tree,target_lemma,target_pos)\n return (None,None,None)\n\ndef find_subcat(target_tree):\n return str(target_tree.parent().productions()[0]).replace(' ','')\ndef find_left_right_word_attribs(subtree,head_tree):\n if len(subtree.pos()) == 1:\n left_word,left_word_pos = 'NA','NA'\n right_word,right_word_pos = 'NA','NA'\n elif head_tree.pos()[0] == subtree.pos()[0]:\n left_word,left_word_pos = subtree.pos()[-1]\n right_word,right_word_pos = 'NA','NA'\n elif head_tree.pos()[0] == subtree.pos()[-1]:\n right_word,right_word_pos = 'NA','NA'\n left_word,left_word_pos = subtree.pos()[0]\n else:\n right_word,right_word_pos = subtree.pos()[-1]\n left_word,left_word_pos = subtree.pos()[0]\n return (left_word,left_word_pos,right_word,right_word_pos)\n\ndef compute_position(subtree,target_tree):\n for (a,b) in (zip(subtree.treeposition(),target_tree.treeposition())):\n if a == b:\n continue\n elif a < b:\n return 'L'\n else:\n return 'R'\n return 'O'\n\ndef compute_gov_cat(subtree):\n #print(subtree)\n if subtree.label() in ['S','VP','SINV','SQ','ROOT']:\n return subtree.label()\n else:\n return compute_gov_cat(subtree.parent())\ndef compute_parent_attribs(subtree,parent,heads_dict):\n if parent == None:\n parent_word,parent_word_pos='ROOT','ROOT'\n else:\n \n parent_head = heads_dict[str(subtree.parent())]\n \n parent_head_tree = ParentedTree.convert(Tree.fromstring(parent_head.rstrip()))\n parent_word,parent_word_pos=parent_head_tree.pos()[0]\n return (parent_word,parent_word_pos)\n\ndef path_finder(subtree,target_node):\n path = VisitNode(subtree,target_node)\n if path != None:\n #print(subtree)\n return '-'.join(path)\n #print('#'*50)\n else:\n temp_path = []\n \n while (path==None):\n if subtree.parent() == None:\n break\n temp_path = temp_path + [subtree.label()] \n subtree = subtree.parent()\n \n path = VisitNode(subtree,target_node)\n if path != None :\n path = temp_path + path\n \n break\n return '-'.join(path)\n \n \n \ndef VisitNode(node, target):\n #print(node)\n # Base case. If we found the target, return target in a list\n if node == target:\n return [node.label()]\n\n # If we're at a leaf and it isn't the target, return None \n if node.height() == 2:\n return None\n\n # recursively iterate over children\n #children = node.subtrees()\n #print(children)\n for i in node:\n #print(i)\n tail = VisitNode(i, target)\n \n if tail: # is not None\n return [node.label()] + tail # prepend node to path back from target\n return None #none of the children contains target\n\ndef build_fes_dict():\n from itertools import groupby\n from operator import itemgetter\n fes_file = open('frame_elements.txt').readlines()\n fes_tuple_list = [tuple(l.rstrip().split('\\t')) for l in fes_file]\n #print(fes_tuple_list)\n return {k : '#and#'.join(list(list(zip(*g))[1])) for k, g in groupby(fes_tuple_list, itemgetter(0))}\n ","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":5606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"17632041","text":"\"\"\"\nutil.py\n\"\"\"\n\nimport gzip, cPickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport theano\nimport theano.tensor as T\n\n# GLOBAL\nMNPATH = '/Users/jacobmenick/Desktop/sandbox/learn_theano/data/mnist.pkl.gz'\n\ndef load_mnist(dataset_path):\n f = gzip.open(dataset_path, 'rb')\n train_set, valid_set, test_set = cPickle.load(f)\n f.close()\n return train_set, valid_set, test_set\n\ndef shared_dataset(data_xy):\n data_x, data_y = data_xy\n shared_x = theano.shared(\n np.asarray(data_x, dtype = theano.config.floatX),\n borrow=True\n )\n\n shared_y = theano.shared(\n np.asarray(data_y, dtype = theano.config.floatX),\n borrow=True\n )\n return shared_x, T.cast(shared_y, 'int32')\n\ndef as_shared(ndarr, cast_to_int = False):\n shared_arr = theano.shared(\n np.asarray(ndarr, dtype = theano.config.floatX),\n borrow = True, broadcastable=True\n )\n if cast_to_int:\n return T.cast(shared_arr, 'int32')\n else:\n return shared_arr\n\ndef quick_load_mnist():\n return map(shared_dataset, load_mnist(MNPATH))\n\ndef visualize_image(pixel_vector):\n n = np.sqrt(pixel_vector.shape[0])\n if not n.is_integer():\n raise RuntimeError(\"Image is not square. \")\n pixel_array = np.array(np.split(pixel_vector, n))\n plt.imshow(pixel_array)\n plt.show()\n\ndef onehotvec(index, ndims):\n out = np.zeros(ndims, dtype=np.int32)\n out[index] = 1\n return out\n\ndef onehot_to_id(onehot):\n return int(np.argmax(onehot))\n\ndef rand_onehot_seq(ndims, length):\n return np.array(map(lambda x: onehotvec(x, ndims), list(np.random.randint(low=0, high=ndims, size=(length,)))))\n\ndef rand_index_seq(ndims, length):\n return np.random.randint(low=0, high=ndims, size=(length,))\n\ndef squarify(image):\n shp = image.shape\n n = np.sqrt(shp[0])\n assert(n.is_integer())\n return np.array(np.split(image, n))\n\n","sub_path":"syntaur/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"356354295","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 31 12:21:34 2018\n\n@author: madsthoisen\n\"\"\"\n## Poor idea. For each state wanted to only consider the squares next to \n# the boxes since these are the only relevant squares. And then move the shortest way\n# to these squares. But it was way to compute intensive.\n\nimport time\nimport random\n# [Left,Right,Up,Down]\n\nfrom Sokoban import GameEngine as GE\nfrom Sokoban import Dijkstra as D\n\ndef ReachableSquares(StartState, GroupWalls, GroupStorages, MapHeight, MapWidth, nboxes):\n startTime = time.time()\n\n wallPositions = []\n for wall in GroupWalls:\n wallPositions.append((wall.x, wall.y))\n \n storageLoc = []\n for storage in GroupStorages:\n storageLoc.append((storage.x, storage.y))\n storageLoc.sort()\n\n endBoxes = ()\n for s in storageLoc:\n endBoxes += s\n \n endPlayer = []\n for s in storageLoc:\n playerLeft = (s[0] - 1, s[1])\n if playerLeft not in wallPositions and playerLeft not in storageLoc:\n endPlayer.append(playerLeft)\n playerRight = (s[0] + 1, s[1])\n if playerRight not in wallPositions and playerRight not in storageLoc:\n endPlayer.append(playerRight)\n playerUp = (s[0], s[1] - 1)\n if playerUp not in wallPositions and playerUp not in storageLoc:\n endPlayer.append(playerUp)\n playerDown = (s[0], s[1] + 1)\n if playerDown not in wallPositions and playerDown not in storageLoc:\n endPlayer.append(playerDown)\n \n newStates = []\n for e in endPlayer:\n newStates.append(e + endBoxes)\n\n states = set()\n for ns in newStates:\n states.add(ns)\n\n step = 1\n visitedStates = newStates[:]\n while True:\n print(step)\n newStates = set(GE.GameEngineReverse(wallPositions, newStates)) - set(visitedStates)\n newRoutes = []\n for ns in newStates:\n states.add(ns)\n ns_dist, ns_prev = D.dijkstra(wallPositions, ns, MapHeight, MapWidth)\n ns_boxes = [ns[i:i+2] for i in range(2,len(ns),2)]\n ns_boxes_neighbors = [(n[0]+1,n[1]) for n in ns_boxes]\n ns_boxes_neighbors.extend([(n[0]-1,n[1]) for n in ns_boxes])\n ns_boxes_neighbors.extend([(n[0],n[1]+1) for n in ns_boxes])\n ns_boxes_neighbors.extend([(n[0],n[1]-1) for n in ns_boxes])\n ns_boxes_neighbors = set(ns_boxes_neighbors) - set(wallPositions) - set(ns_boxes) - set([ns[:2]])\n \n for neighbor in ns_boxes_neighbors:\n if ns_dist[neighbor] < 1e6:\n S = []\n u = neighbor\n while True:\n if ns_prev[u] == ns[:2]:\n break\n S.append(ns_prev[u])\n u = ns_prev[u]\n for s in S:\n routeState = s + ns[2:]\n states.add(routeState)\n newRoutes.append(routeState)\n states.add(neighbor + ns[2:])\n newRoutes.append(neighbor + ns[2:])\n \n visitedStates.extend(newStates)\n visitedStates.extend(newRoutes)\n \n newStates = newStates.union(set(newRoutes))\n step += 1\n print(len(states), len(newStates))\n if len(newStates) == 0 or StartState in states:\n break\n ns_dist, ns_prev = D.dijkstra(wallPositions, StartState, MapHeight, MapWidth)\n \n for n in ns_dist.keys():\n if n + StartState[2:] in states:\n if ns_dist[n] < 1e6:\n S = []\n u = n\n while True:\n if u == StartState[:2]:\n break\n S.append(ns_prev[u])\n u = ns_prev[u]\n for s in S:\n states.add(s + StartState[2:])\n \n print(StartState)\n\n NumberOfEpochs = 50\n rewardFinish = 100\n from tqdm import tqdm\n from Sokoban import GameEngines as GE\n import numpy as np\n \n policy = {}\n values = {}\n rewards = {}\n \n storageLoc = []\n for storage in GroupStorages:\n storageLoc.append((storage.x, storage.y))\n\n count = 0\n for state in states:\n fixrand = [0,0,0,0] # [Left,Right,Up,Down]\n fixrand[random.randint(0,3)] = 1 \n policy[state] = fixrand\n values[state] = 0\n rewards[state] = -1\n boxLoc = []\n for i in range(nboxes):\n boxLoc.append(state[2+2*i:4+2*i])\n filled = 0\n for box in boxLoc:\n if box in storageLoc:\n filled += 1\n if filled == nboxes:\n rewards[state] = rewardFinish\n\n # List over next states:\n NextStateLeft = {}\n NextStateRight = {}\n NextStateUp = {}\n NextStateDown = {}\n\n for state in tqdm(states):\n v = values[state]\n NextStateL = GE.GameEngineManual(nboxes, GroupWalls, GroupStorages, MapWidth, MapHeight, ManualMove = [1,0,0,0], PlayerPos = state[0:2], BoxPos = state[2:])\n NextStateLeft[state] = tuple(np.concatenate(([NextStateL[3]], [NextStateL[4]], NextStateL[5])))\n NextStateR = GE.GameEngineManual(nboxes, GroupWalls, GroupStorages, MapWidth, MapHeight, ManualMove = [0,1,0,0], PlayerPos = state[0:2], BoxPos = state[2:])\n NextStateRight[state] = tuple(np.concatenate(([NextStateR[3]], [NextStateR[4]], NextStateR[5])))\n NextStateU = GE.GameEngineManual(nboxes, GroupWalls, GroupStorages, MapWidth, MapHeight, ManualMove = [0,0,1,0], PlayerPos = state[0:2], BoxPos = state[2:])\n NextStateUp[state] = tuple(np.concatenate(([NextStateU[3]], [NextStateU[4]], NextStateU[5])))\n NextStateD = GE.GameEngineManual(nboxes, GroupWalls, GroupStorages, MapWidth, MapHeight, ManualMove = [0,0,0,1], PlayerPos = state[0:2], BoxPos = state[2:])\n NextStateDown[state] = tuple(np.concatenate(([NextStateD[3]], [NextStateD[4]], NextStateD[5]))) \n\n # Value iteration\n count = 0\n for i in tqdm(range(NumberOfEpochs)):\n Delta = 0\n for state in states: \n if rewards[state] != rewardFinish:\n v = values[state]\n if NextStateLeft[state] in states:\n Left = rewards[NextStateLeft[state]]+values[NextStateLeft[state]]\n else:\n Left = -1e6\n \n if NextStateRight[state] in states:\n Right = rewards[NextStateRight[state]]+values[NextStateRight[state]]\n else:\n Right = -1e6\n \n if NextStateUp[state] in states:\n Up = rewards[NextStateUp[state]]+values[NextStateUp[state]]\n else:\n Up = -1e6\n \n if NextStateDown[state] in states:\n Down = rewards[NextStateDown[state]]+values[NextStateDown[state]]\n else:\n Down = -1e6\n values[state] = max(Left, Right, Up, Down)\n Delta = max(Delta, abs(v - values[state]))\n count += 1\n# print(Delta)\n print(time.time() - startTime)\n return values\n","sub_path":"sokoban/___GRAVEYARD/ReachableSquares.py","file_name":"ReachableSquares.py","file_ext":"py","file_size_in_byte":7279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"623493661","text":"\nimport tensorflow as tf\nimport numpy as np\nnp.set_printoptions(threshold=np.inf)\n\n#########################################################\n\nw_np = np.random.uniform(size=(100, 10))\nidx_np = np.random.randint(low=0, high=100, size=(5,))\nupdate_np = np.ones(shape=(5, 10))\n\nw = tf.Variable(w_np, dtype=tf.float32)\nidx = tf.Variable(idx_np, dtype=tf.int32)\nupdate = tf.Variable(update_np, dtype=tf.float32)\n\nw = tf.scatter_update(w, idx, tf.gather(w, idx) + update)\n'''\nidx = tf.reshape(idx, [5, 1])\nvec = tf.gather_nd(w, idx)\n'''\nvec = tf.gather(w, idx)\n\n#########################################################\n\nsess = tf.InteractiveSession()\ntf.global_variables_initializer().run()\ntf.local_variables_initializer().run()\n\n#####################################################\n\n[vec] = sess.run([vec], feed_dict={})\nprint (vec)\nprint ()\nprint (w_np[idx_np])\n\n#####################################################\n\n\n\n\n\n\n","sub_path":"Embedding/scatter2.py","file_name":"scatter2.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"113861153","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nt = np.arange(0, 2, 0.01)\ns = 1 + np.sin(2 * np.pi * t)\n\nfig, ax = plt.subplots()\nax.plot(t, s)\n\nax.set(xlabel='time (s)', ylabel='voltage (mV)',\n title='About as simple as it gets, folks')\n\nax.grid()\nplt.show()","sub_path":"pyplot/src/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"415389888","text":"zmienne = list()\nwyrazenie = list()\nworking = True\nimplikacje = list()\nrownowaznosci = list()\nalternatywy = list()\nkoniunkcje = list()\nnegacje = list()\n\n\nprint(\"wpisz ile zmiennych bedzie w wyrazeniu\")\nprint(\"Potraktuj zlozone wyrazenia jako jedno wyrazenie, tzn jezeli chcesz przeksztalcic'(pVp) -> (pvp)' to wpisz '(pvp)' jako jedna zmienna\")\nfor i in range(int(input())):\n print(\"wprowadz zmienne\")\n zmienne.append(input())\n\nprint(\"oto twoje zmienne\" ,zmienne)\n\nprint(\"Wpisz wyrażenie logiczne a ja przerobie je na uproszczone\")\nprint(\"Zmienne wprowadzaj wedlug tego schematu:\"\n \"pierwsza zmienna na liscie powyzej odpowiada numerowi 0, druga numerowi 1, trzecia numerowi 2 itd.\")\nprint(\"Implikacje umieść jako jedno w ten spoób '->' \")\nprint(\"Równoważność umieść jako jedno w ten sposób '<->' \")\nprint(\"Alternatywę umieść jako 'v' \")\nprint(\"Koniunkcje umieść jako '^' \")\nprint(\"Aby zakonczyc dodawani wprowadź '?'\")\n\nwhile working == True:\n wyrazenie.append((input()))\n for j in range(len(zmienne)):\n for i in range(len(wyrazenie)):\n if str(j) == str(wyrazenie[i]):\n wyrazenie[i] = zmienne[j]\n print(\"wyrażenie teraz wyglada tak\", wyrazenie)\n\n for i in range(len(wyrazenie)):\n if wyrazenie[i] == \"?\":\n wyrazenie.pop(i)\n working = False\n\nfor j in range(len(wyrazenie)):\n if wyrazenie[j] == \"->\":\n implikacje.append(j)\n\nfor j in range(len(wyrazenie)):\n if wyrazenie[j] == \"<->\":\n rownowaznosci.append(j)\n\nfor j in range(len(wyrazenie)):\n if wyrazenie[j] == \"^\":\n koniunkcje.append(j)\n\nfor j in range(len(wyrazenie)):\n if wyrazenie[j] == \"v\":\n alternatywy.append(j)\n\nfor j in range(len(wyrazenie)):\n if wyrazenie[j] == \"~\":\n negacje.append(j)\n\n\nif len(implikacje) != 0:\n wyrazenie.insert(implikacje[0]-1, \"~\")\n wyrazenie[implikacje[0]+1] = \" v \"\n implikacje.pop(0)\n for j in range(len(zmienne)):\n for i in range(len(wyrazenie)):\n if str(j) == str(wyrazenie[i]):\n wyrazenie[i] = zmienne[j]\n print(\"wyrażenie teraz wyglada tak\", wyrazenie)\n\n\n\nif len(rownowaznosci) != 0:\n if wyrazenie[0] or wyrazenie[2] != \"~\":\n rowno = [\"(\", \"~\", wyrazenie[0], \"v\", wyrazenie[2], \")\", \"^\", \"(\", \"~\", wyrazenie[2], \"v\", wyrazenie[0], \")\" ]\n wyrazenie = rowno\n print(wyrazenie)\n elif wyrazenie[0] == \"~\":\n rowno = [\"(\", \"~\", wyrazenie[1], \"v\", wyrazenie[2], \")\", \"^\", \"(\", \"~\", wyrazenie[2], \"v\", wyrazenie[1], \")\"]\n wyrazenie = rowno\n print(wyrazenie)\n elif wyrazenie[2] == \"~\":\n rowno = [\"(\", \"~\", wyrazenie[0], \"v\", wyrazenie[3], \")\", \"^\", \"(\", \"~\", wyrazenie[3], \"v\", wyrazenie[0], \")\"]\n wyrazenie = rowno\n print(wyrazenie)\n\n\nif len(negacje) != 0:\n li = list(zmienne[0])\n for i in range(len(li)):\n if li[i] == '^':\n print(li)\n li[i] = \"v\"\n li.insert(i-1, \"~\")\n print(li)\n\n li.insert(i+2, \"~\")\n for i in range(len(li)):\n if li[i] == \"~\" and li[i+1] == \"~\":\n li.remove(li[i+1])\n li.remove((li[i]))\n print(li)\n break\n\n elif li[i] == 'v':\n li[i] = \"^\"\n li.insert(i-1, \"~\")\n li.insert(i+2, \"~\")\n for i in range(len(li)):\n if li[i] == \"~\" and li[i+1] == \"~\":\n li.remove(li[i+1])\n li.remove((li[i]))\n print(li)\n\n break\n\n\n\nfor i in range(wyrazenie):\n if wyrazenie[i] == \"~\" and wyrazenie[i+1] == \"~\":\n wyrazenie.remove(wyrazenie[i+1])\n wyrazenie.remove((wyrazenie[i]))\n\n","sub_path":"mainv3.py","file_name":"mainv3.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"441703581","text":"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\nCreated on April 15 2017\n\n@author: yytang\n\"\"\"\nimport re\n\nfrom scrapy import Selector\n\nfrom libs.misc import get_spider_name_from_domain\nfrom libs.polish import polish_title, polish_subtitle, polish_content\nfrom novelsCrawler.spiders.novelSpider import NovelSpider\n\n\nclass KanmeikanSpider(NovelSpider):\n \"\"\"\n classdocs\n\n example: http://book.kanmeikan.com/read/46398/\n \"\"\"\n\n allowed_domains = ['book.kanmeikan.com']\n name = get_spider_name_from_domain(allowed_domains[0])\n custom_settings = {\n 'DOWNLOAD_DELAY': 0.3,\n }\n\n def parse_title(self, response):\n sel = Selector(response)\n title = sel.xpath('//h1/text()').extract()[0]\n title = polish_title(title, self.name)\n return title\n\n def parse_episodes(self, response):\n sel = Selector(response)\n episodes = []\n subtitle_selectors = sel.xpath('//dl/dd[@class=\"chapter_list\"]/a')\n episode_num = len(subtitle_selectors)\n for idx, subtitle_selector in enumerate(subtitle_selectors):\n if idx % 3 == 0:\n page_id = idx + 2\n if page_id >= episode_num:\n page_id = episode_num - 1\n elif idx % 3 == 1:\n page_id = idx\n if page_id == episode_num - 1:\n page_id -= 1\n else:\n page_id = idx - 2\n\n subtitle_url = subtitle_selector.xpath('@href').extract()[0]\n pattern = r'javascript:Chapter\\((\\d+),(\\d+)\\);'\n m = re.match(pattern, subtitle_url)\n if m:\n subtitle_url = '/read/{}/{}/'.format(m.group(2), m.group(1))\n subtitle_url = response.urljoin(subtitle_url.strip())\n subtitle_name = subtitle_selector.xpath('text()').extract()[0]\n subtitle_name = polish_subtitle(subtitle_name)\n episodes.append((page_id, subtitle_name, subtitle_url))\n return episodes\n\n def parse_content(self, response):\n sel = Selector(response)\n content = sel.xpath('//div[@style=\"line-height: 30px;padding: 10px 50px;word-wrap: break-word;\"]/p/text()').extract()\n content = polish_content(content)\n return content\n","sub_path":"novelsCrawler/spiders/kanmeikan.py","file_name":"kanmeikan.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"234912694","text":"from playhouse.migrate import PostgresqlMigrator, migrate\n\nfrom redash.models import db\nfrom redash import models\n\nif __name__ == '__main__':\n db.connect_db()\n migrator = PostgresqlMigrator(db.database)\n \n with db.database.transaction():\n\n column = models.Group.countries\n column.null = True\n \n migrate(migrator.add_column('users', 'countries', models.Group.countries))\n\n # for group in models.Group.select():\n # group.save()\n migrate(migrator.drop_not_null('users', 'countries'))\n\n db.close_db(None)\n","sub_path":"migrations/add_countries_field_to_users.py","file_name":"add_countries_field_to_users.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"327531482","text":"from collections import Counter\r\nfrom bwt import *\r\nfrom compsci260lib import *\r\n\r\n\r\ndef find(query, bwt_data):\r\n \"\"\"Given a query sequence and a series of data structures containing\r\n various information about the reference genome, return a list containing\r\n all the locations of the query sequence in the reference genome.\r\n\r\n Args:\r\n query (str): query sequence to identify in the reference genome\r\n\r\n Returns:\r\n (list of ints): of all locations of the query sequence in the\r\n reference genome\r\n \"\"\"\r\n\r\n bwt, suffix_array, ranks, counts = bwt_data\r\n length = len(bwt)\r\n results = []\r\n query_value = query[len(query)-1]\r\n count_start_index = counts.get(query_value)\r\n count_end_index = length\r\n if query_value == 'A':\r\n next_value = 'C'\r\n count_end_index = counts.get(next_value)\r\n if query_value == 'C':\r\n next_value = 'G'\r\n count_end_index = counts.get(next_value)\r\n if query_value == 'G':\r\n next_value = 'T'\r\n count_end_index = counts.get(next_value)\r\n a = count_start_index + 1\r\n b = count_end_index\r\n\r\n qi = len(query) - 2\r\n while qi >= 0:\r\n qv = query[qi]\r\n a = counts.get(qv) + ranks.get(qv)[a - 1]\r\n b = counts.get(qv) + ranks.get(qv)[b-1]\r\n if a > b:\r\n return results\r\n qi -= 1\r\n \"\"\"\r\n After the fnal step of the iteration (in which we have \fnished\r\nconsidering the largest possible query sufx, i.e. the entire query sequence itself), if we \fnd that the range\r\nof sorted su\u000Exes contains at least one row with a match to the query, we can use the range indices to obtain\r\nthe actual locations of those matches in the reference genome, locations which are themselves stored in the\r\nsufx array\r\n \r\n \"\"\"\r\n\r\n temp = []\r\n for i in range(a, b+1):\r\n if bwt[i] == query[qi+1]:\r\n temp.append(suffix_array[i])\r\n results = temp\r\n\r\n return sorted(results)\r\n\r\n# It may be helpful to read the documentation for the methods\r\n# given below, but you will NOT have to make any changes to\r\n# them in order to complete the problem set.\r\ndef rank(bwt_seq):\r\n \"\"\"Takes as input a string transformed by the BWT. Returns a dictionary\r\n with characters as keys and lists as values. Each list contains the total\r\n number of occurrences for the corresponding character up until each\r\n position in the BWT-transformed string (i.e., its rank).\r\n\r\n For example:\r\n rank('ACTGA$TA')['$'] --> [0, 0, 0, 0, 0, 1, 1, 1]\r\n rank('ACTGA$TA')['A'] --> [1, 1, 1, 1, 2, 2, 2, 3]\r\n rank('ACTGA$TA')['C'] --> [0, 1, 1, 1, 1, 1, 1, 1]\r\n rank('ACTGA$TA')['G'] --> [0, 0, 0, 1, 1, 1, 1, 1]\r\n rank('ACTGA$TA')['T'] --> [0, 0, 1, 1, 1, 1, 2, 2]\r\n\r\n Args:\r\n bwt_seq (str): BWT-transformed sequence\r\n\r\n Returns:\r\n (dict): with characters as keys and lists of integers\r\n containing the total number of occurrences for the\r\n corresponding character up until each position in the\r\n BWT-transformed string (i.e., its rank)\r\n \"\"\"\r\n rank = {}\r\n characters = set(bwt_seq)\r\n for character in characters:\r\n rank[character] = [0]\r\n rank[bwt_seq[0]] = [1]\r\n for letter in bwt_seq[1:]:\r\n for k, v in list(rank.items()):\r\n v.append(v[-1] + (k == letter))\r\n return rank\r\n\r\n\r\ndef make_suffix_array(seq):\r\n \"\"\"Makes the suffix array of a given input string sequence.\r\n\r\n For example:\r\n make_suffix_array('GATTACA$') --> [7, 6, 4, 1, 5, 0, 3, 2]\r\n\r\n Args:\r\n seq (str): input string with an EOF character\r\n\r\n Returns:\r\n (list): of integers of the suffix array of the input string.\r\n \"\"\"\r\n suffixes = {}\r\n for x in range(len(seq)):\r\n suffixes[seq[x:]] = x\r\n suffix_array = [suffixes[suffix] for suffix in sorted(suffixes.keys())]\r\n return suffix_array\r\n\r\n\r\ndef count_smaller_chars(seq):\r\n \"\"\"Takes as input a string. Returns a dictionary with characters as keys\r\n and integers as values. The integers track the number of characters in the\r\n input string which are lexicographically smaller than the corresponding\r\n character key.\r\n\r\n For example, using an input DNA sequence like 'GATTACA':\r\n count_smaller_chars('GATTACA')['A'] --> 0\r\n (A, being lexicographically first in a DNA sequence,\r\n should always return 0)\r\n\r\n count_smaller_chars('GATTACA')['C'] --> 3\r\n (C, being second, should return the number of A's, which here is 3)\r\n\r\n count_smaller_chars('GATTACA')['G'] --> 4\r\n (G, being third, should return the number of A's or C's,\r\n which here is 4)\r\n\r\n count_smaller_chars('GATTACA')['T'] --> 5\r\n (T, being fourth, should return the number of A's or C's or G's,\r\n which here is 5)\r\n \"\"\"\r\n characters = set(seq)\r\n cntr = Counter(seq)\r\n total = 0\r\n counts = {}\r\n for character in sorted(characters):\r\n counts[character] = total\r\n total += cntr[character]\r\n return counts\r\n\r\n\r\ndef make_all(reference):\r\n \"\"\"Takes as input a reference string. Returns the data structures necessary\r\n to perform efficient exact string matching searches.\r\n\r\n Args:\r\n reference (str): reference string to create data structures for\r\n\r\n Returns:\r\n tuple of\r\n (str) forward bwt of the reference string\r\n (list of int): suffix_array of the reference string\r\n (list of int): ranks of the forward bwt\r\n (dict of str to int): smaller character counts\r\n \"\"\"\r\n counts = count_smaller_chars(reference)\r\n reference = reference + '$'\r\n suffix_array = make_suffix_array(reference)\r\n bwt = forward_bwt(reference)\r\n ranks = rank(bwt)\r\n return bwt, suffix_array, ranks, counts\r\n\r\n\r\nif __name__ == '__main__':\r\n # example query sequence\r\n query_sequence = \"AAACGA\"\r\n # example reference sequence\r\n sequence = \"AAAAAAAAACGATAGAGA\"\r\n find(query_sequence, make_all(sequence))\r\n\r\n","sub_path":"read_aligner.py","file_name":"read_aligner.py","file_ext":"py","file_size_in_byte":6045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"195323619","text":"import json\nimport logging\n\nfrom ingestion import census, url_file_to_gcs, gcs_to_bq_util\nfrom datasources.data_source import DataSource\n\n\n# Names of the counties in the United States from US Census data.\nclass CountyNames(DataSource):\n\n @staticmethod\n def get_id():\n \"\"\"Returns the data source's unique id. \"\"\"\n return 'COUNTY_NAMES'\n\n @staticmethod\n def get_table_name():\n \"\"\"Returns the BigQuery table name where the data source's data will\n stored. \"\"\"\n return 'county_names'\n\n def upload_to_gcs(self, url, gcs_bucket, filename):\n \"\"\"Uploads county names and FIPS codes from census to GCS bucket.\"\"\"\n url_params = census.get_census_params_by_county(['NAME'])\n return url_file_to_gcs.url_file_to_gcs(url, url_params, gcs_bucket, filename)\n\n def write_to_bq(self, dataset, gcs_bucket, filename):\n \"\"\"Writes county names to BigQuery from the provided GCS bucket\n\n dataset: The BigQuery dataset to write to\n table_name: The name of the biquery table to write to\n gcs_bucket: The name of the gcs bucket to read the data from\n filename: The name of the file in the gcs bucket to read from\"\"\"\n try:\n frame = gcs_to_bq_util.load_values_as_dataframe(gcs_bucket, filename)\n frame = frame.rename(columns={\n 'NAME': 'county_name',\n 'state': 'state_fips_code',\n 'county': 'county_fips_code'\n })\n column_types = {\n 'county_name': 'STRING',\n 'state_fips_code': 'STRING',\n 'county_fips_code': 'STRING'\n }\n gcs_to_bq_util.add_dataframe_to_bq(frame, dataset, self.get_table_name(),\n column_types=column_types)\n except json.JSONDecodeError as err:\n logging.error(\n 'Unable to write to BigQuery due to improperly formatted data: %s', err)\n","sub_path":"python/datasources/county_names.py","file_name":"county_names.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"520647328","text":"__author__ = 'lundh'\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nfrom collections import deque\nimport matplotlib.animation as animation\nfrom multiprocessing import Queue\nfrom matplotlib.lines import Line2D\n\n\nclass Plot():\n def __init__(self, interactive=False):\n\n self.fig, self.ax = plt.subplots()\n self.interactive = interactive\n if self.interactive:\n self.cid = self.fig.canvas.mpl_connect('button_press_event', self.onpick)\n self.lines = list()\n self.interval = list()\n\n def onpick(self, event):\n #print('click', event)\n x_click = event.xdata\n y_click = event.ydata\n #self.line.set_data(x, self.ys)\n if len(self.lines) <= 2:\n line, = self.ax.plot([x_click, x_click], [-0.5, 1.5], 'k-', lw=2, color=cnames[\"blue\"])\n self.lines.append(line)\n self.interval.append(x_click)\n self.interval.sort(reverse=False)\n line.figure.canvas.draw()\n # else:\n # #an interval is already selected but there's another click, move the line closest to the click\n # cloest_line = self.lines.sort(key=lambda x: x.xdata, reverse=True)\n\n def set_limit_y(self, limit_h, limit_l):\n plt.ylim([limit_l, limit_h])\n\n def add_digital_line(self, durations, values, color):\n time = 0\n times = list()\n for p in durations:\n times.append(time+p)#+p)\n time += p\n\n self.ax.step(times, values, where='pre', color=color)\n\n def add_bars(self, y):\n frequencies = list()\n for x in range(100):\n try:\n frequencies.append(np.bincount(y)[x])\n except IndexError:\n frequencies.append(0)\n\n self.ax.bar(list(range(1, 100)), frequencies[1:], 0.12, color='r')\n self.fig.text(0.8, 0.8, \"0=\"+str(frequencies[0]), style='italic',\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 10})\n\n def add_legend(self, list):\n #list of strings like \"X1 = blaghlbah\"\n plt.legend([x for x in list], loc='upper right')\n\n def add_plot_line(self, x, y, color):\n if self.interactive:\n self.ax.plot(x, y, color=color, picker=2)\n else:\n self.ax.plot(x, y, color=color)\n\n def show_graph(self):\n plt.show()\n\n def save_graph(self, name):\n #self.fig.savefig(str(dt.datetime.now().strftime(\"%y-%m-%d-%H-%M-%S\"))+\".png\")\n self.fig.savefig(str(name)+\".png\")\n\n def add_title(self, title):\n self.ax.set_title(title)\n\n def add_ylabel(self, label):\n self.ax.set_ylabel(label)\n\n def add_xlabel(self, label):\n self.ax.set_xlabel(label)\n\n\nclass Plot3D():\n def __init__(self):\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111, projection='3d')\n\n def add_plot_line(self, x, y, z, color):\n self.ax.plot(xs=x, ys=y, zs=z, zdir='z', color=color)\n\n def add_legend(self, list):\n #list of strings like \"X1 = blaghlbah\"\n plt.legend([x for x in list], loc='upper right')\n\n def show_graph(self):\n plt.show()\n\n def save_graph(self, name):\n #self.fig.savefig(str(dt.datetime.now().strftime(\"%y-%m-%d-%H-%M-%S\"))+\".png\")\n self.fig.savefig(str(name)+\".png\")\n\n def add_title(self, title):\n self.ax.set_title(title)\n\n def add_ylabel(self, label):\n self.ax.set_ylabel(label)\n\n def add_xlabel(self, label):\n self.ax.set_xlabel(label)\n\n\nclass PlotLive():\n def __init__(self, max_len):\n self.input_queue = Queue()\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(1, 1, 1)\n self.ax.set_ylim(0, 10)\n self.values_x = [0.0]*max_len\n self.values_y = [0.0]*max_len\n self.max_len = max_len\n self.line, = self.ax.plot(self.values_y)\n\n def add(self, value, timestamp):\n #print(\"adding\")\n #self.input_queue.put((value, timestamp))\n self.values_x.append(timestamp)\n self.values_y.append(value)\n\n def update(self, *args):\n\n #print(self.input_queue.qsize())\n #if not self.input_queue.empty():\n d = self.input_queue.get()\n print(d)\n self.values_x.append(d[1])\n self.values_y.append(d[0])\n del self.values_x\n del self.values_y\n self.line.set_ydata(self.values_y)\n self.line.set_xdata(self.values_x)\n self.ax.set_xlim(self.values_x[0], self.values_x[-1])\n return [self.line]\n\n def show_graph(self):\n self.ani = animation.FuncAnimation(self.fig, self.update, #self.update, # range(0, max_len),\n interval=0, blit=False)\n plt.show(block=False)\n\ncnames = {\n 'aliceblue': '#F0F8FF',\n 'antiquewhite': '#FAEBD7',\n 'aqua': '#00FFFF',\n 'aquamarine': '#7FFFD4',\n 'azure': '#F0FFFF',\n 'beige': '#F5F5DC',\n 'bisque': '#FFE4C4',\n 'black': '#000000',\n 'blanchedalmond': '#FFEBCD',\n 'blue': '#0000FF',\n 'blueviolet': '#8A2BE2',\n 'brown': '#A52A2A',\n 'burlywood': '#DEB887',\n 'cadetblue': '#5F9EA0',\n 'chartreuse': '#7FFF00',\n 'chocolate': '#D2691E',\n 'coral': '#FF7F50',\n 'cornflowerblue': '#6495ED',\n 'cornsilk': '#FFF8DC',\n 'crimson': '#DC143C',\n 'cyan': '#00FFFF',\n 'darkblue': '#00008B',\n 'darkcyan': '#008B8B',\n 'darkgoldenrod': '#B8860B',\n 'darkgray': '#A9A9A9',\n 'darkgreen': '#006400',\n 'darkkhaki': '#BDB76B',\n 'darkmagenta': '#8B008B',\n 'darkolivegreen': '#556B2F',\n 'darkorange': '#FF8C00',\n 'darkorchid': '#9932CC',\n 'darkred': '#8B0000',\n 'darksalmon': '#E9967A',\n 'darkseagreen': '#8FBC8F',\n 'darkslateblue': '#483D8B',\n 'darkslategray': '#2F4F4F',\n 'darkturquoise': '#00CED1',\n 'darkviolet': '#9400D3',\n 'deeppink': '#FF1493',\n 'deepskyblue': '#00BFFF',\n 'dimgray': '#696969',\n 'dodgerblue': '#1E90FF',\n 'firebrick': '#B22222',\n 'floralwhite': '#FFFAF0',\n 'forestgreen': '#228B22',\n 'fuchsia': '#FF00FF',\n 'gainsboro': '#DCDCDC',\n 'ghostwhite': '#F8F8FF',\n 'gold': '#FFD700',\n 'goldenrod': '#DAA520',\n 'gray': '#808080',\n 'green': '#008000',\n 'greenyellow': '#ADFF2F',\n 'honeydew': '#F0FFF0',\n 'hotpink': '#FF69B4',\n 'indianred': '#CD5C5C',\n 'indigo': '#4B0082',\n 'ivory': '#FFFFF0',\n 'khaki': '#F0E68C',\n 'lavender': '#E6E6FA',\n 'lavenderblush': '#FFF0F5',\n 'lawngreen': '#7CFC00',\n 'lemonchiffon': '#FFFACD',\n 'lightblue': '#ADD8E6',\n 'lightcoral': '#F08080',\n 'lightcyan': '#E0FFFF',\n 'lightgoldenrodyellow': '#FAFAD2',\n 'lightgreen': '#90EE90',\n 'lightgray': '#D3D3D3',\n 'lightpink': '#FFB6C1',\n 'lightsalmon': '#FFA07A',\n 'lightseagreen': '#20B2AA',\n 'lightskyblue': '#87CEFA',\n 'lightslategray': '#778899',\n 'lightsteelblue': '#B0C4DE',\n 'lightyellow': '#FFFFE0',\n 'lime': '#00FF00',\n 'limegreen': '#32CD32',\n 'linen': '#FAF0E6',\n 'magenta': '#FF00FF',\n 'maroon': '#800000',\n 'mediumaquamarine': '#66CDAA',\n 'mediumblue': '#0000CD',\n 'mediumorchid': '#BA55D3',\n 'mediumpurple': '#9370DB',\n 'mediumseagreen': '#3CB371',\n 'mediumslateblue': '#7B68EE',\n 'mediumspringgreen': '#00FA9A',\n 'mediumturquoise': '#48D1CC',\n 'mediumvioletred': '#C71585',\n 'midnightblue': '#191970',\n 'mintcream': '#F5FFFA',\n 'mistyrose': '#FFE4E1',\n 'moccasin': '#FFE4B5',\n 'navajowhite': '#FFDEAD',\n 'navy': '#000080',\n 'oldlace': '#FDF5E6',\n 'olive': '#808000',\n 'olivedrab': '#6B8E23',\n 'orange': '#FFA500',\n 'orangered': '#FF4500',\n 'orchid': '#DA70D6',\n 'palegoldenrod': '#EEE8AA',\n 'palegreen': '#98FB98',\n 'paleturquoise': '#AFEEEE',\n 'palevioletred': '#DB7093',\n 'papayawhip': '#FFEFD5',\n 'peachpuff': '#FFDAB9',\n 'peru': '#CD853F',\n 'pink': '#FFC0CB',\n 'plum': '#DDA0DD',\n 'powderblue': '#B0E0E6',\n 'purple': '#800080',\n 'red': '#FF0000',\n 'rosybrown': '#BC8F8F',\n 'royalblue': '#4169E1',\n 'saddlebrown': '#8B4513',\n 'salmon': '#FA8072',\n 'sandybrown': '#FAA460',\n 'seagreen': '#2E8B57',\n 'seashell': '#FFF5EE',\n 'sienna': '#A0522D',\n 'silver': '#C0C0C0',\n 'skyblue': '#87CEEB',\n 'slateblue': '#6A5ACD',\n 'slategray': '#708090',\n 'snow': '#FFFAFA',\n 'springgreen': '#00FF7F',\n 'steelblue': '#4682B4',\n 'tan': '#D2B48C',\n 'teal': '#008080',\n 'thistle': '#D8BFD8',\n 'tomato': '#FF6347',\n 'turquoise': '#40E0D0',\n 'violet': '#EE82EE',\n 'wheat': '#F5DEB3',\n 'white': '#FFFFFF',\n 'whitesmoke': '#F5F5F5',\n 'yellow': '#FFFF00',\n 'yellowgreen': '#9ACD32'}","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":8695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"388514043","text":"import tensorflow as tf\nfrom loader_utils import *\n\n\ndef extract(tfrecord_file):\n features = {\n \"raw_img\": tf.FixedLenFeature(shape=[], dtype=tf.string),\n \"labels\": tf.FixedLenFeature(shape=[322],\n dtype=tf.float32)\n }\n return tf.parse_single_example(serialized=tfrecord_file,\n features=features)\n\n\nclass Reader(object):\n def __init__(self, cfg, compression_type=\"GZIP\"):\n self.cfg = cfg\n tfrecord_file = cfg.loader.tfrecord_file\n if not isinstance(tfrecord_file, list):\n self.tfrecord_file = [tfrecord_file]\n self.compression_type = compression_type\n\n self.dataset = tf.data.TFRecordDataset(filenames=self.tfrecord_file,\n compression_type=self.compression_type)\n self.dataset = self.dataset.map(map_func=extract)\n self.dataset = self.dataset.shuffle(\n buffer_size=self.cfg.train.shuffle_buffer_size)\n # drop_reminder=False (default) will generator data with a dynamic\n # batch size (None in the shape), which is recommended\n self.dataset = self.dataset.batch(batch_size=self.cfg.train.batch_size,\n drop_remainder=False)\n self.dataset = self.dataset.repeat(self.cfg.train.n_epoch)\n self.iterator = self.dataset.make_one_shot_iterator()\n self.next_data = self.iterator.get_next()\n img = self.next_data[\"raw_img\"]\n img = tf.decode_raw(img, tf.uint8)\n # -1 will keep img batch size as None when reshaping\n img = tf.reshape(img, [-1, 664, 664, 3])\n self.next_data[\"raw_img\"] = img\n\n def feed(self):\n img = self.next_data[\"raw_img\"]\n labels = self.next_data[\"labels\"] # lmks + lmk_attr + bbox = 322\n lmks = labels[:, :self.cfg.loader.n_lmks * 2]\n lmks_attr = labels[:,\n self.cfg.loader.n_lmks * 2:self.cfg.loader.n_lmks * 3]\n\n w = self.cfg.loader.input_shape[1]\n h = self.cfg.loader.input_shape[0]\n max_rotate_angle = 30\n positive_radius = self.cfg.loader.positive_radius\n negative_radius = self.cfg.loader.negative_radius\n regression_radius = self.cfg.loader.regression_radius\n input_over_output_stride = self.cfg.loader.input_over_output_stride\n\n # TODO: data preprocessing is used numpy and converted into tensor by\n # tf.py_func, which is inefficient, an ideal way is preprocessing tensor\n # directly\n data_batch, heatmap, regression_map, regression_weight = tf.py_func(\n func=preprocess,\n inp=[img, lmks, lmks_attr, w, h, max_rotate_angle, positive_radius,\n input_over_output_stride, negative_radius, regression_radius],\n Tout=[tf.float32, tf.float32, tf.float32, tf.float32])\n # datas from py_func will have shape unknown, need to be set a shape\n data_batch.set_shape([None, self.cfg.loader.input_shape[0],\n self.cfg.loader.input_shape[1],\n self.cfg.loader.input_shape[2]])\n heatmap.set_shape([None, 64, 64, 106])\n regression_map.set_shape([None, 64, 64, 212])\n regression_weight.set_shape([None, 64, 64, 212])\n return data_batch, heatmap, regression_map, regression_weight\n\n\ndef preprocess(img_array, lmks, lmks_attr, w, h, max_rotate_angle,\n positive_radius,\n input_over_output_stride, negative_radius, regression_radius):\n # img_array = data[\"raw_img\"]\n # labels = data[\"labels\"] # lmks + lmk_attr + bbox = 322\n # lmks = labels[:, :cfg.loader.n_lmks * 2]\n # lmks_attr = labels[:, cfg.loader.n_lmks * 2:cfg.loader.n_lmks * 3]\n\n # w = cfg.loader.input_shape[1]\n # h = cfg.loader.input_shape[0]\n # max_rotate_angle = 30\n # positive_radius = cfg.loader.positive_radius\n # negative_radius = cfg.loader.negative_radius\n # regression_radius = cfg.loader.regression_radius\n # input_over_output_stride = cfg.loader.input_over_output_stride\n ### new data space for self.data\n data_batch = np.zeros((img_array.shape[0],\n h,\n w,\n 3))\n n_lmks = 106\n batch_size = data_batch.shape[0]\n\n for i in range(img_array.shape[0]):\n img = img_array[i, :]\n # img = np.transpose(img, (1, 2, 0)) # CHW -> HWC\n this_lmks = lmks[i].reshape(-1, 2)\n this_lmks_attr = lmks_attr[i].ravel()\n\n ### Crop data to augment attr performance.\n img, this_lmks, this_lmks_attr = attr_crop(w, h,\n img, this_lmks,\n this_lmks_attr)\n\n img, this_lmks, this_lmks_attr = flip_lr(n_lmks, w, img,\n this_lmks,\n this_lmks_attr)\n\n img, this_lmks, this_lmks_attr = rotate(n_lmks, w, h, max_rotate_angle,\n img,\n this_lmks,\n this_lmks_attr)\n param_conf = {'kernel_size_min': 3, 'kernel_size_max': 9,\n 'sigma_min': 1, 'sigma_max': 5,\n 'length_min': 2, 'length_max': 10,\n 'angle_min': 1, 'angle_max': 359}\n img, this_lmks, this_lmks_attr = blur_perturb(n_lmks, img,\n this_lmks,\n this_lmks_attr,\n param_conf)\n\n data_batch[i, :] = img # HWC -> CHW\n lmks[i] = this_lmks.ravel()\n lmks_attr[i] = this_lmks_attr.ravel()\n\n # augmentation image and label\n # data_batch, lmks, lmks_attr = augmentation(self.config, data_batch, lmks, lmks_attr)\n\n # change image format\n # data_batch = transform_image_format(data_batch, source_format='rgb',\n # target_format=cfg.loader.input_format)\n\n # normalize image\n data_batch -= np.array([128., 128., 128.], np.float32)\n data_batch *= 1. / 128.\n\n heatmap = transform_lmks_to_heatmap_batch(lmks,\n lmks_attr,\n batch_size,\n h,\n w,\n input_over_output_stride,\n n_lmks,\n positive_radius,\n regression_radius,\n negative_radius\n )\n regression_map, regression_weight = transform_lmks_to_regression_map_batch(\n lmks,\n lmks_attr,\n batch_size,\n h,\n w,\n input_over_output_stride,\n n_lmks,\n positive_radius,\n regression_radius,\n negative_radius)\n data_batch = data_batch.astype(np.float32)\n heatmap = heatmap.astype(np.float32)\n regression_map = regression_map.astype(np.float32)\n regression_weight = regression_weight.astype(np.float32)\n return data_batch, heatmap, regression_map, regression_weight\n\n\nif __name__ == \"__main__\":\n # from PIL import Image\n import numpy as np\n from get_config import get_config\n\n cfg = get_config(\"./configs/config.yaml\")\n\n Reader = Reader(cfg)\n # next_data, iterator_handle, handle = Reader.feed()\n # data = iterator.get_next()\n # batch = get_data(iterator, cfg)\n datas = Reader.feed()\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n # next_data_array = sess.run(fetches=next_data,\n # feed_dict={handle: iterator_handle.eval(\n # session=sess)})\n\n while True:\n # next_data_array = sess.run(Reader.next_data)\n data_batch, heatmap, regression_map, regression_weight = sess.run(\n datas)\n # print(next_data_array.keys())\n print(data_batch.shape, heatmap.shape, regression_map.shape,\n regression_weight.shape)\n pass\n","sub_path":"loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":8451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"117144613","text":"\"\"\"\nQuestion 6\nLevel 2\n\nQuestion:\nWrite a program that calculates and prints the value according to the given formula:\nQ = Square root of [(2 * C * D)/H]\nFollowing are the fixed values of C and H:\nC is 50. H is 30.\nD is the variable whose values should be input to your program in a comma-separated sequence.\nExample\nLet us assume the following comma separated input sequence is given to the program:\n100,150,180\nThe output of the program should be:\n18,22,24\n\nHints:\nIf the output received is in decimal form, it should be rounded off to its nearest value (for example, if the output received is 26.0, it should be printed as 26)\nIn case of input data being supplied to the question, it should be assumed to be a console input.\n\"\"\"\n\nfrom math import sqrt\n\n\n# def q_formula(C, D, H):\n# q = sqrt((2 * C * D) / H)\n# return q\nq_formula = lambda C, D, H: sqrt((2 * C * D) / H)\n\nc = 50\nh = 30\nvalue = input(\"please provide d : \")\n\nd = lambda x: x.split(\",\") if x.count(\",\") else x\n\nprint([q_formula(c, int(i), h) for i in d(value)])\n","sub_path":"q6.py","file_name":"q6.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"386382124","text":"# Copyright 2015 refractionPOINT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom beach.actor import Actor\nimport hashlib\nfrom sets import Set\nimport time\nSAMLoadWidgets = Actor.importLib( 'analytics/StateAnalysis', 'SAMLoadWidgets' )\n\ndef GenerateDetectReport( agentid, msgIds, cat, detect ):\n if type( msgIds ) is not tuple and type( msgIds ) is not list:\n msgIds = ( msgIds, )\n if type( agentid ) is tuple or type( agentid ) is list:\n agentid = ' / '.join( agentid )\n reportId = hashlib.sha256( str( msgIds ) ).hexdigest()\n return { 'source' : agentid, 'msg_ids' : msgIds, 'cat' : cat, 'detect' : detect, 'report_id' : reportId }\n\nclass StatelessActor ( Actor ):\n def init( self, parameters ):\n if not hasattr( self, 'process' ):\n raise Exception( 'Stateless Actor has no \"process\" function' )\n self._reporting = self.getActorHandle( 'analytics/report' )\n self._tasking = None\n self.handle( 'process', self._process )\n\n def newDetect( self, objects = [], relations = [], desc = None, mtd = {} ):\n d = {}\n\n if 0 != len( objects ):\n d[ 'obj' ] = objects\n if 0 != len( relations ):\n d[ 'rel' ] = relations\n if desc is not None:\n d[ 'desc' ] = desc\n if 0 != len( mtd ):\n d[ 'mtd' ] = mtd\n\n return d\n\n def task( self, msg, dest, cmdsAndArgs, expiry = None, inv_id = None ):\n routing, event, mtd = msg.data\n if self._tasking is None:\n self._tasking = self.getActorHandle( 'analytics/autotasking', mode = 'affinity' )\n self.log( \"creating tasking handle for the first time for this detection module\" )\n\n if type( cmdsAndArgs[ 0 ] ) not in ( tuple, list ):\n cmdsAndArgs = ( cmdsAndArgs, )\n data = { 'msg' : msg.data, 'dest' : dest, 'tasks' : cmdsAndArgs }\n\n if expiry is not None:\n data[ 'expiry' ] = expiry\n if inv_id is not None:\n data[ 'inv_id' ] = inv_id\n\n self._tasking.shoot( 'task', data, key = routing[ 'agentid' ] )\n self.log( \"sent for tasking: %s\" % ( str(cmdsAndArgs), ) )\n\n def _process( self, msg ):\n detects = self.process( msg )\n\n if 0 != len( detects ):\n self.log( \"reporting detects generated\" )\n routing, event, mtd = msg.data\n cat = type( self ).__name__\n cat = cat[ cat.rfind( '.' ) + 1 : ]\n for detect in detects:\n self._reporting.shoot( 'report', GenerateDetectReport( routing[ 'agentid' ],\n ( routing[ 'event_id' ], ),\n cat,\n detect ) )\n return ( True, )\n\nclass StatefulActor ( Actor ):\n def init( self, parameters ):\n self._compiled_machines = {}\n self._machine_activity = {}\n self._machine_ttl = parameters.get( 'machine_ttl', ( 60 * 60 * 24 * 7 ) )\n if not hasattr( self, 'initMachines' ):\n raise Exception( 'Stateful Actor has no \"initMachines\" function' )\n if not hasattr( self, 'processDetects' ):\n raise Exception( 'Stateful Actor has no \"processDetects\" function' )\n\n self.initMachines( parameters )\n\n if not hasattr( self, 'machines' ):\n raise Exception( 'Stateful Actor has no associated detection machines' )\n if not hasattr( self, 'shardingKey' ):\n raise Exception( 'Stateful Actor has no associated shardingKey (or None)' )\n\n self._reporting = self.getActorHandle( 'analytics/report' )\n self.handle( 'process', self._process )\n\n self.schedule( 60 * 60, self._garbageCollectOldMachines )\n\n def _garbageCollectOldMachines( self ):\n for shard in self._machine_activity.keys():\n if self._machine_activity[ shard ] < time.time() - self._machine_ttl:\n del( self._machine_activity[ shard ] )\n del( self._compiled_machines[ shard ] )\n\n def _process( self, msg ):\n routing, event, mtd = msg.data\n\n shard = None\n if self.shardingKey is not None:\n shard = routing.get( self.shardingKey, None )\n\n if shard not in self._compiled_machines:\n self.log( 'creating new state machine' )\n self._compiled_machines[ shard ] = {}\n for mName, m in self.machines.iteritems():\n self._compiled_machines[ shard ][ mName ] = eval( '(%s)' % m, SAMLoadWidgets(), { 'self' : self } )\n\n actual_machines = self._compiled_machines[ shard ]\n self._machine_activity[ shard ] = time.time()\n\n machineEvent = { 'event' : event, 'routing' : routing }\n\n for mName, m in actual_machines.iteritems():\n detects = m._execute( machineEvent )\n if detects is not None and 0 != len( detects ):\n detects = self.processDetects( detects )\n for detect in detects:\n self._reporting.shoot( 'report',\n GenerateDetectReport( tuple( Set( [ e[ 'routing' ][ 'agentid' ] for e in detect ] ) ),\n tuple( Set( [ e[ 'routing' ][ 'event_id' ] for e in detect ] ) ),\n '%s/%s' % ( self.__class__.__name__, mName ),\n detect ) )\n return ( True, )","sub_path":"cloud/beach/hcp/Detects.py","file_name":"Detects.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"146989581","text":"\"\"\"\n\"\"\"\n\nimport unittest\nimport os\nimport arcpy\nimport filecmp\nimport tempfile\n\n\nclass TestDataTypeENVIRasterSeries(unittest.TestCase):\n \"\"\"Tests the ENVIRASTERSERIES task datatype\"\"\"\n config = None\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Class setup creates a toolbox file wrapper\"\"\"\n cls.config.setup_toolbox('ENVI', 'qa_envitaskengine_datatype_envirasterseries',\n 'test_datatype_envirasterseries')\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n def test_datatype_envirasterseries_file(self):\n \"\"\"Tests the envirasterseries datatype with file uri.\"\"\"\n input = os.path.join(self.config.test_data_dir, 'checkerboard.series')\n\n result = arcpy.qa_envitaskengine_datatype_envirasterseries_TEST(input)\n\n # Verify result exists.\n self.assertTrue(result.getOutput(0), 'Output ENVIRasterSeries URI not set')\n path = result.getOutput(0)\n\n output_filename = os.path.splitext(os.path.basename(path))[0]\n output_dir = os.path.dirname(path)\n\n input_dir = os.path.dirname(input)\n input_filename = os.path.splitext(os.path.basename(input))[0]\n\n # Verify file(s)\n output_file = os.path.join(output_dir, output_filename + '.series')\n input_file = os.path.join(input_dir, input_filename + '.series')\n\n self.assertTrue(os.path.isfile(output_file), 'Output file does not exist: ' + output_file)\n self.assertTrue(filecmp.cmp(output_file, input_file), 'Output does not match expected: ' + output_file)","sub_path":"envipyarclib/test/datatype/envirasterseries.py","file_name":"envirasterseries.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"269562379","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport pprint, sys\n\nclass Debug(object):\n ENABLE = False\n OUTPUT_FILE = ''\n _count = 0\n\n @classmethod\n def output_file(cls, filename):\n cls.OUTPUT_FILE = filename\n if filename: open(filename, 'w').close()\n\n @classmethod\n def print_(cls, *str):\n if cls.ENABLE:\n for s in str:\n pp = pprint.PrettyPrinter(width = 200, depth = 10, stream = open(cls.OUTPUT_FILE, 'a') if cls.OUTPUT_FILE else None)\n pp.pprint(s)\n sys.stdout.flush()\n\n @classmethod\n def count(cls):\n cls._count += 1\n return cls._count\n \n","sub_path":"debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"564472897","text":"from typing import List\n\n\nclass Solution:\n \"\"\"\n 56. 合并区间\n https://leetcode-cn.com/problems/merge-intervals/\n 给出一个区间的集合,请合并所有重叠的区间。\n \"\"\"\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n # 1. 按照起点排序\n res = []\n cur = -1\n intervals.sort(key=lambda k: k[0])\n # 2. 遍历循环\n for i in range(len(intervals)):\n # 3. 如果第二个值大于前一项的第一个值,则合并区间\n if i == 0 or res[cur][1] < intervals[i][0]:\n res.append(intervals[i])\n cur += 1\n else:\n res[cur][1] = max(res[cur][1], intervals[i][1])\n\n return res\n\n\nso = Solution()\nprint(so.merge([[2,6],[8,10],[15,18],[1,3]]))\n","sub_path":"sort.merge-intervals.py","file_name":"sort.merge-intervals.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"334580096","text":"import requests as req\nimport json\nimport functools\nimport utm\nimport math\nfrom models import Dea\nfrom models import User\nfrom geopy import distance\nfrom utm import to_latlon\n# 440547,4473344\n# a = utm.to_latlon(443123, 4475002, 30, \"N\")\n# print(a)\ndef write_json(url):\n response = req.get(url).json()\n with open(\"deas.json\", \"w\", encoding=\"utf8\") as file:\n json.dump(response, file, ensure_ascii = False, indent=4) \nurl = \"https://datos.comunidad.madrid/catalogo/dataset/35609dd5-9430-4d2e-8198-3eeb277e5282/resource/c38446ec-ace1-4d22-942f-5cc4979d19ed/download/desfibriladores_externos_fuera_ambito_sanitario.json\"\n# write_json(url)\ndeas_json = \"deas.json\"\nusers_json = \"users.json\"\n\n\ndef get_data(file_to_open):\n with open(file_to_open, encoding=\"utf8\") as file:\n data = json.load(file)[\"data\"]\n return data\ndata = get_data(deas_json)\ndef write_data(lista, fichero):\n with open(fichero, \"w\", encoding=\"utf8\") as file:\n toappend = {\"data\": lista}\n json.dump(toappend, file)\ndef dea_by_id(dea_code):\n return next(filter(lambda dea: dea[\"codigo_dea\"] == dea_code,data))\n\ndef change_latlong(dataset):\n result = {\"data\": []}\n for i,dea in enumerate(dataset):\n print(i)\n try:\n latlong = utm.to_latlon(int(dea[\"direccion_coordenada_x\"]), int(dea[\"direccion_coordenada_y\"]), 30, \"N\")\n except:\n continue\n dea[\"direccion_coordenada_x\"] = latlong[0]\n dea[\"direccion_coordenada_y\"] = latlong[1]\n result[\"data\"].append(dea)\n with open(\"deas_latlon.json\", \"w\", encoding=\"utf8\") as file:\n json.dump(result,file,ensure_ascii=False)\n\n# change_latlong(data)\n\ndef get_title(given, title):\n counter = 0\n for dea in given:\n counter += 1 if dea[\"tipo_titularidad\"] == title else 0\n return counter\n\ndef get_inside_M30(given):\n target =(\"28029\", \"28036\", \"28046\", \"28039\", \"28016\", \"28020\", \"28002\", \"28003\",\n \"28015\", \"28010\", \"28006\", \"28028\", \"28008\", \"28004\", \"28001\", \"280013\", \"28014\",\n \"28009\", \"28007\", \"28012\", \"28005\", \"28045\")\n counter = 0\n for dea in given:\n counter += 1 if dea[\"direccion_codigo_postal\"] in target else 0\n return counter\n\n# user = User(439653, 4465806)\n# test = user.get_nearest_dea(data[0:1])\n# print(test)\n\ndef menu():\n print(\"-----------------\")\n print(\"DEA\")\n print(\"1. Crear usuario\")\n print(\"2. Acceder\")\n print(\"3. Admin\")\n print(\"4. Salir\")\n print(\"-----------------\")\nmenu()\nuser = input(\"Elija opción: \")\n\nwhile user.lower() != \"q\":\n\n # CREAR USUARIO\n if user == \"1\":\n name = input(\"Nombre: \")\n password = input(\"Contraseña: \")\n new_user = {\"name\": name, \"password\": password}\n def get_users():\n with open(\"users.json\") as file:\n users = json.load(file)\n return users\n users = get_users()\n users[\"data\"].append(new_user)\n with open(\"users.json\", \"w\") as file:\n json.dump(users ,file)\n menu()\n user = input(\": \")\n\n # ACCEDER\n elif user == \"2\":\n def sub_menu():\n print(\"-----------------\")\n print(\"1. Buscar DEA por código\")\n print(\"2. Buscar DEA por distancia\")\n print(\"3. Buscar DEA por radio\")\n print(\"4. Volver atrás\")\n print(\"-----------------\")\n\n def by_code(code):\n filter_applied = filter(lambda dea: dea[\"codigo_dea\"]==code, data)\n dea = next(filter_applied, \"No encontrado\")\n print(dea)\n sub_menu()\n user = input(\"Elija opción: \")\n \n\n name = input(\"Nombre: \")\n password = input(\"Contraseña: \")\n with open(\"users.json\") as file:\n users = json.load(file)[\"data\"]\n validation = map(lambda user: True if user[\"name\"] == name and user[\"password\"] == password else False, users)\n if next(validation):\n sub_menu()\n user = input(\"Elija opción: \")\n\n # DEA POR CÓDIGO\n if user == \"1\":\n code = input(\"Introduzca código: \")\n by_code(code)\n sub_menu()\n user = input(\"Elija opción: \")\n\n # DEA POR DISTANCIA\n elif user == \"2\":\n user_x = int(input(\"Introduzca coordenada X: \"))\n user_y = int(input(\"Introduzca coordenada Y: \"))\n userlatlong=utm.to_latlon(user_x,user_y,30,\"N\")\n \n user = User(user_x, user_y)\n dea, H = user.get_nearest_dea(data)\n latlong = utm.to_latlon(int(dea[\"direccion_coordenada_x\"]), int(dea[\"direccion_coordenada_y\"]), 30, \"N\")\n def get_meters(user_latlong, dea_latlong):\n return distance.distance(user_latlong, dea_latlong).m\n distance_meters = get_meters(userlatlong,latlong)\n print(dea)\n print(f\"https://www.google.com/maps/search/?api=1&query={latlong[0]},{latlong[1]}\")\n print(f\"https://www.google.com/maps/dir/{userlatlong[0]},+{userlatlong[1]}/{latlong[0]},{latlong[1]}\")\n print(\"Usted está a \",distance_meters,\" metros\", \"Hipotenusa: \", H)\n user = input(\"Elija opción: \")\n\n elif user == \"3\":\n user_x = int(input(\"Introduzca coordenada X: \"))\n user_y = int(input(\"Introduzca coordenada Y: \"))\n user_latlong=utm.to_latlon(user_x,user_y,30,\"N\")\n # dea_latlong = utm.to_latlon(int(dea[\"direccion_coordenada_x\"]), int(dea[\"direccion_coordenada_y\"]), 30, \"N\")\n\n user = User(user_x, user_y)\n deas_list = user.get_nearest_by_radio(data, 100)\n print(f\"Se han encontrado {len(deas_list)} D.E.A.s:\")\n all_points = f\"https://www.google.com/maps/dir/{user_latlong[0]},+{user_latlong[1]}/\"\n for dea in deas_list:\n dea_latlong = utm.to_latlon(int(dea[\"direccion_coordenada_x\"]), int(dea[\"direccion_coordenada_y\"]), 30, \"N\")\n all_points+=f\"{dea_latlong[0]},{dea_latlong[1]}/\"\n print(all_points)\n sub_menu()\n user = input(\"Elija opción: \")\n else:\n print(\"Usuario o contraseña incorrectos\")\n menu()\n user = input(\"Elija opción: \")\n \n # ADMIN:\n elif user == \"3\":\n def sub_menu():\n print(\"-----------------\")\n print(\"1. Agregar DEA\")\n print(\"2. Modificar DEA\")\n print(\"3. Eliminar DEA\")\n print(\"4. Volver atrás\")\n print(\"-----------------\")\n sub_menu()\n user = input(\"Elija opción: \")\n\n # CREATE DEA\n\n if user == \"1\":\n data = get_data(deas_json)\n dea_keys = list(data[0])\n new_dea = {}\n for key in dea_keys:\n print(\"-----------------\")\n new_dea[key] = input(f\"{key}--->\")\n print(new_dea)\n data.append(new_dea) \n user = input(\"Introduzca ID: \")\n write_data(data, deas_json)\n\n # UPDATE DEA\n\n elif user == \"2\":\n user = input(\"Introduzca ID: \")\n data = get_data(deas_json)\n dea_to_change = list(filter(lambda dea: dea[\"codigo_dea\"] == user,data))[0]\n dea_keys = list(dea_to_change)\n print(\"-----------------\")\n print(\"Elija clave a modificar\")\n for i,key in enumerate(dea_keys):\n print(i,\".\", key)\n print(\"-----------------\")\n user_key = input(\"Elija opción: \")\n print(\"-----------------\")\n print(dea_to_change[dea_keys[int(user_key)]]) # dea_to_change[\"direccion_puerta\"]\n print(\"-----------------\")\n user = input(\"Introduzca valor: \")\n dea_to_change[dea_keys[int(user_key)]] = user\n print(\"DEA modificado\")\n print(dea_to_change)\n user = input(\"Elija opción: \")\n\n # DELETE DEA\n \n elif user == \"3\":\n user = input(\"Introduzca ID: \")\n data = get_data(deas_json)\n dea_to_delete = dea_by_id(user)\n print(\"DEA a eliminar -->\", dea_to_delete)\n user = input(\"¿Está seguro que quiere elminarlo? (s/n): \")\n if user.lower() == \"s\":\n data.remove(dea_to_delete)\n write_data(data, deas_json)\n sub_menu()\n user = input(\"Elija opción: \")\n\n else:\n user = \"q\"\n\n# a = utm.to_latlon(443123, 4475002, 30, \"N\") ID:2021-54\n\n","sub_path":"deas/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"355326494","text":"#!/usr/bin/env python3\n# HW06_ch09_ex05.py\n\n# (1)\n# Write a function called is_abecedarian that returns True if the letters in a\n# word appear in alphabetical order (double letters are ok).\n# - write is_abecedarian\n# (2)\n# How many abecedarian words are there?\n# - write additional function(s) to assist you\n# - number of abecedarian words: 596\n##############################################################################\n# Imports\n\n# Body\ndef is_abecedarian(word):\n\tflag = True\n\tif len(word) == 1:\n\t\treturn flag\n\tif word[0] <= word[1]:\n\t\tflag = is_abecedarian(word[1:])\n\telse:\n\t\tflag = False\n\n\treturn flag\n\ndef how_many_abecedarian():\n\tabecedarian_list = list()\n\tfin = open('words.txt', 'r')\n\tfor word in fin:\n\t\tif is_abecedarian(word.strip()) == True:\n\t\t\tabecedarian_list.append(word.strip())\n\t# print(abecedarian_list)\n\treturn len(abecedarian_list)\n\n\n##############################################################################\ndef main():\n\t# print(is_abecedarian('aa'))\n\tprint(how_many_abecedarian())\n\tpass # Call your function(s) here.\n\nif __name__ == '__main__':\n main()\n","sub_path":"HW06_ch09_ex06.py","file_name":"HW06_ch09_ex06.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"565914742","text":"# -*- coding: utf-8 -*-\r\nimport BarrowmanFlow as bf\r\n\r\n################## User Input ###################\r\nlength_body = 2.7 # [m] from nose tip to body. without tail\r\ndiameter_body = 0.154 # [m]\r\nlength_cg = 0.885 # [m] from nose tip\r\n\r\nshape_nose = 'double' # 'ogive' or 'double' or 'parabolic' or 'ellipse'\r\nlength_nose = 0.305 # [m]\r\n\r\ndiameter_tail = 0.1 # [m]\r\nlength_tail = 0.1 # [m]\r\n\r\noffset_fin = 0.0 # [mm] from body end to fin end\r\nroot_chord = 200.0 # [mm]\r\ntip_chord = 90.0 # [mm]\r\nleading_edge_chord = root_chord - tip_chord\r\nspan = 130.0 # [mm]\r\nthickness_fin = 2.0 # [mm]\r\nyoung_modulus = 3.0 # [GPa]\r\npoisson_ratio = 0.3 # [-]\r\nmax_altitude = 10000.0 # [m]\r\n#################################################\r\n\r\ndef mm2m(value):\r\n return value / 1000.0\r\n\r\noffset_fin = mm2m(offset_fin)\r\nroot_chord = mm2m(root_chord)\r\ntip_chord = mm2m(tip_chord)\r\nleading_edge_chord = mm2m(leading_edge_chord)\r\nspan = mm2m(span)\r\nthickness_fin = mm2m(thickness_fin)\r\n\r\nbf.initialize(diameter_body, length_body)\r\nnose = bf.Nose(shape_nose, length_nose)\r\nfin = bf.Fin(root_chord, tip_chord, leading_edge_chord, span, length_body-offset_fin-root_chord)\r\nfin.flutter_speed(young_modulus, poisson_ratio, thickness_fin, max_altitude)\r\ntail = bf.TaperBody(diameter_body, diameter_tail, length_tail, length_body)\r\nstage = bf.integral(length_cg, nose, fin, tail)\r\n\r\nprint('*=============Result==============*')\r\nprint('Length of C.P.:', stage.Lcp, '[m]')\r\nprint('Coefficient of Normal Force:', stage.CNa, '[deg^-1]')\r\nprint('Coefficient of Pitch Damping Moment:', stage.Cmq, '[-]')\r\nprint('Flutter Velocity:', max(fin.Vf), '[m/s]')\r\nprint('*=================================*')\r\n\r\nstage.plot()\r\n\r\n\r\n\r\n","sub_path":"simple_rocket.py","file_name":"simple_rocket.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"557087955","text":"\"\"\"\nCREATE TABLE `user` (\n `user_id` int NOT NULL AUTO_INCREMENT,\n `user_name` varchar(100) DEFAULT NULL,\n PRIMARY KEY (`user_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci\n\nCREATE TABLE `user_asset` (\n `user_id` int NOT NULL,\n `asset_amount` int DEFAULT NULL,\n PRIMARY KEY (`user_id`),\n CONSTRAINT `user_asset_ibfk_1`\n FOREIGN KEY (`user_id`) REFERENCES `user` (`user_id`)\n ON DELETE CASCADE ON UPDATE CASCADE\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci\n\nCREATE TABLE `audit` (\n `transaction_id` int NOT NULL AUTO_INCREMENT,\n `from_id` int DEFAULT NULL,\n `to_id` int DEFAULT NULL,\n `transfer_amount` int DEFAULT NULL,\n `transfer_time` datetime DEFAULT NULL,\n PRIMARY KEY (`transaction_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci\n\n\"\"\"\nfrom datetime import datetime\n\nfrom sqlalchemy import Column, String, Integer, Float, create_engine, ForeignKey, DateTime\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nBase = declarative_base()\nclass User(Base):\n __tablename__ = 'user'\n\n user_id = Column(Integer, primary_key=True)\n user_name = Column(String(100), nullable=True)\n\nclass UserAsset(Base):\n __tablename__ = 'user_asset'\n\n user_id = Column(ForeignKey('user.user_id', ondelete='CASCADE', onupdate='CASCADE'), primary_key=True)\n asset_amount = Column(Integer)\n\nclass Audit(Base):\n __tablename__ = 'audit'\n\n transaction_id = Column(Integer, primary_key=True, autoincrement=True)\n from_id = Column(Integer)\n to_id = Column(Integer)\n transfer_amount = Column(Integer)\n transfer_time = Column(DateTime)\n\n# implement an engine\ndburl = \"mysql+pymysql://testuser:testpass@localhost:3306/testdb?charset=utf8mb4\"\nengine = create_engine(dburl, echo=True, encoding=\"utf-8\")\n\n# create table\nBase.metadata.create_all(engine)\n\n# create session\nSessionClass = sessionmaker(bind=engine)\nsession = SessionClass()\n\ndef find_user(user_name):\n user_id = session.query(User.user_id).filter(User.user_name == user_name).one()\n return user_id[0]\n\ndef find_asset(user_name):\n user_id = find_user(user_name)\n asset_amount = session.query(UserAsset.asset_amount).filter(UserAsset.user_id == user_id).one()\n return asset_amount[0]\n\ndef transfer_asset(from_name, to_name, amount):\n try:\n from_user_id = find_user(from_name)\n to_user_id = find_user(to_name)\n from_user_asset = find_asset(from_name)\n if from_user_asset < amount:\n return f\"The remaining amount is not enough.\"\n\n # update the asset for from person\n query_from = session.query(UserAsset.asset_amount).filter(UserAsset.user_id == from_user_id)\n query_from.update({UserAsset.asset_amount: UserAsset.asset_amount - amount})\n\n # update the asset for the to person\n query_to = session.query(UserAsset.asset_amount).filter(UserAsset.user_id == to_user_id)\n query_to.update({UserAsset.asset_amount: UserAsset.asset_amount + amount})\n\n # update audit info\n transfer_item = Audit(from_id=from_user_id,\n to_id=to_user_id,\n transfer_amount=amount,\n transfer_time=datetime.now()\n )\n session.add(transfer_item)\n session.flush()\n session.commit()\n except Exception as e:\n print(f\"failed due to {e}\")\n session.rollback()\n finally:\n session.close()\n\ndef prepare_data():\n session1 = SessionClass()\n user1 = User(user_id=1, user_name='张三')\n user2 = User(user_id=2, user_name='李四')\n session1.add(user1)\n session1.add(user2)\n session1.flush()\n session1.commit()\n\n session2 = SessionClass()\n asset1 = UserAsset(user_id=1, asset_amount=500)\n asset2 = UserAsset(user_id=2, asset_amount=1000)\n session2.add(asset1)\n session2.add(asset2)\n session2.flush()\n session2.commit()\n\ndef test1():\n prepare_data()\n transfer_asset('张三', \"李四\", 100)\n\nif __name__ == '__main__':\n test1()\n\n\n","sub_path":"week03/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"380154896","text":"from BaseHandler import BaseHandler\r\nfrom spoons.model.Spoon import Spoon\r\nfrom spoons.model.SpoonStep import SpoonStep\r\nfrom google.appengine.api.images import Image\r\nfrom google.appengine.api import images, taskqueue\r\nfrom google.appengine.ext import db\r\n\r\nclass CheckInSpoonStep(BaseHandler):\r\n \r\n def get(self):\r\n try:\r\n spoonNumber = int(self.request.get('spoonNumber'))\r\n except ValueError :\r\n context = {'error' : self.request.get('spoonNumber') + \" is not a valid SpoonNumber !\"}\r\n self.render_response('error.html', **context)\r\n return \r\n \r\n spoon = Spoon.get_by_id(spoonNumber)\r\n context = {'spoonNumber' : spoon.spoonNumber()}\r\n self.render_response('CheckInSpoonStep.html', **context)\r\n \r\n def post(self):\r\n try:\r\n spoonNumber = int(self.request.get('spoonNumber'))\r\n except ValueError :\r\n context = {'error' : self.request.get('spoonNumber') + \" is not a valid SpoonNumber !\"}\r\n self.render_response('error.html', **context)\r\n return\r\n \r\n spoon = Spoon.get_by_id(spoonNumber)\r\n \r\n #Creating spoonStep\r\n spoonStep = SpoonStep()\r\n spoonStep.comment = self.request.get('comment')\r\n spoonStep.place = self.request.get('place')\r\n spoonStep.currentOwner = self.request.get('currentOwner')\r\n if self.request.get('email') :\r\n spoonStep.email = self.request.get('email')\r\n spoonStep.spoon = spoon\r\n picture = self.request.get('img')\r\n if picture :\r\n resizedPicture = picture\r\n if Image(picture).width > 620 or Image(picture).height > 400:\r\n resizedPicture = images.resize(picture, 620, 400)\r\n spoonStep.image_blob = db.Blob(resizedPicture)\r\n spoonStep.put()\r\n \r\n # Add the task to the default queue.\r\n taskqueue.add(url='/sendMail', params={'spoonNumber': spoonNumber})\r\n \r\n self.redirect(\"/trackSpoon?spoonNumber=%s\" % spoonNumber)\r\n","sub_path":"src/spoons/CheckInSpoonStep.py","file_name":"CheckInSpoonStep.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"213930724","text":"from lib.unpack import unpack_drawings\n\n\ndef flatten_strokes(strokes):\n \"\"\"\n Flatten a list of strokes. Add stroke state in the process.\n\n For each point j, the stroke state is a tuple (pj, qj, rj) where:\n * pj=1 indicates the point is not the end of a stroke.\n * qj=1 indicates the point is the end of a stroke (but not the end of the drawing).\n * rj=1 indicates the point is the end of the drawing.\n By construction, pj + qj + rj = 1\n\n Input:\n [\n ((x1, x2, ..., xi-1, xi), (y1, y2, ..., yi-1, yi)),\n ((xi+1, ...), (yi+1, ...)),\n ...,\n ((..., xn-1, xn), (..., yn-1, yn))\n ]\n\n Output:\n [\n [x1, y1, 1, 0, 0],\n [x2, y2, 1, 0, 0],\n ...,\n [xi-1, yi-1, 1, 0, 0],\n [xi, yi, 0, 1, 0]\n [xi+1, yi+1, 1, 0, 0],\n ...,\n [xn-1, yn-1, 1, 0, 0]\n [xn, yn, 0, 0, 1]\n ]\n \"\"\"\n\n flat_strokes = []\n\n for xs, ys in strokes:\n\n for x, y in zip(xs, ys):\n # Mark stroke in progress by default\n flat_strokes.append([x, y, 1, 0, 0])\n\n # Mark end of stroke\n x, y, *_ = flat_strokes[-1]\n flat_strokes[-1] = [x, y, 0, 1, 0]\n\n # Mark end of drawing\n x, y, *_ = flat_strokes[-1]\n flat_strokes[-1] = [x, y, 0, 0, 1]\n\n return flat_strokes\n\n\ndef transform_strokes(strokes):\n \"\"\"\n First flatten strokes, then transform them from [(x, y)] points to [(dx, dy)] displacements\n between two successive points in cartesian coordinates, while preserving stroke state.\n \"\"\"\n\n flat_strokes = flatten_strokes(strokes)\n\n transformed_strokes = []\n for point, next_point in zip(flat_strokes, flat_strokes[1:]):\n x, y, *stroke_state = point\n x_next, y_next, *_ = next_point\n new_point = [x_next - x, y_next - y] + stroke_state\n transformed_strokes.append(new_point)\n\n # Mark end of drawing. That state might have been lost in transformation process.\n dx, dy, *_ = transformed_strokes[-1]\n transformed_strokes[-1] = [dx, dy, 0, 0, 1]\n\n return transformed_strokes\n\n\ndef inverse_transform_strokes(transformed_strokes):\n \"\"\"\n First untransform transformed strokes, then unflatten them.\n\n The first point has been lost in the transformation process. It is therefore\n not possible to reconstruct the exact original strokes. The reconstructed strokes\n will thus be a translation of the original strokes.\n \"\"\"\n\n strokes = []\n stroke_xs, stroke_ys = [0], [0]\n x, y = 0, 0\n\n for dx, dy, *stroke_state in transformed_strokes:\n\n _, is_end_of_stroke, is_end_of_drawing = stroke_state\n\n if is_end_of_stroke:\n # Start a new stroke\n strokes.append([stroke_xs, stroke_ys])\n stroke_xs, stroke_ys = [], []\n\n x += dx\n y += dy\n\n stroke_xs.append(x)\n stroke_ys.append(y)\n\n if is_end_of_drawing:\n break\n\n # Flush last stroke\n if stroke_xs and stroke_ys:\n strokes.append((stroke_xs, stroke_ys))\n\n return strokes\n\n\ndef get_n_points(strokes):\n \"\"\"\n Get number of points in a drawing.\n \"\"\"\n\n n_points = 0\n for x, y in strokes:\n n_points += len(x)\n return n_points\n\n\ndef cut_strokes(strokes, n_points):\n \"\"\"\n Reduce a drawing to its n first points.\n \"\"\"\n\n result_strokes = []\n current_n_points = 0\n\n for xs, ys in strokes:\n stroke_size = len(xs)\n n_points_remaining = max(0, n_points - current_n_points)\n result_strokes.append((xs[:n_points_remaining], ys[:n_points_remaining]))\n current_n_points += stroke_size\n\n return result_strokes\n\n\nif __name__ == '__main__':\n from pprint import pprint\n\n dataset = unpack_drawings('./data/The Eiffel Tower.bin')\n strokes = next(dataset)['image']\n\n pprint(strokes)\n\n transformed_strokes = transform_strokes(strokes)\n pprint(transformed_strokes)\n\n reconstructed_strokes = inverse_transform_strokes(transformed_strokes)\n pprint(reconstructed_strokes)\n","sub_path":"lib/strokes.py","file_name":"strokes.py","file_ext":"py","file_size_in_byte":4097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"554960936","text":"import pandas as pd\r\nfrom pandas import read_csv\r\nfrom pandas import DataFrame\r\nfrom numpy import *\r\nimport numpy as np\r\n\r\n\r\ndef mul_info_weightC(gru_valid_file, le_valid_file):\r\n gru_valid_dataset = read_csv(gru_valid_file, header=0, index_col=None)\r\n gru_accurary = gru_valid_dataset.values[:, 3]\r\n\r\n le_valid_dataset = read_csv(le_valid_file, header=0, index_col=None)\r\n le_accurary = le_valid_dataset.values[:, 3]\r\n\r\n threshold = 0.7\r\n gru_num = len([x for x in gru_accurary if x >= threshold])\r\n le_num = len([x for x in le_accurary if x >= threshold])\r\n\r\n gru_threshold_ratio = le_num / (gru_num + le_num)\r\n le_threshold_ratio = gru_num / (gru_num + le_num)\r\n\r\n\r\n #得出gru准确率的频数\r\n gru_dic = {}\r\n for i,a in enumerate(gru_accurary):\r\n if a not in gru_dic:\r\n gru_dic[a] = 1\r\n else:\r\n gru_dic[a] += 1\r\n gru_accurary_frequency = []\r\n for i,a in enumerate(gru_accurary):\r\n gru_accurary_frequency.append(gru_dic[a])\r\n\r\n #gru频数所占概率\r\n gru_frequency_probability = np.array(gru_accurary_frequency)/sum(gru_accurary_frequency)\r\n\r\n #gru概率的对数\r\n gru_probability_log = log2(gru_frequency_probability)\r\n\r\n #gru的自信息熵\r\n gru_self_entrocy = []\r\n for i in range(len(gru_accurary)):\r\n if gru_accurary[i] >= threshold:\r\n gru_self_entrocy.append(- (gru_frequency_probability[i] * gru_probability_log[i]) * gru_threshold_ratio)\r\n else:\r\n gru_self_entrocy.append(- (gru_frequency_probability[i] * gru_probability_log[i]))\r\n\r\n\r\n\r\n\r\n\r\n # 得出le准确率的频数\r\n le_dic = {}\r\n for i, a in enumerate(le_accurary):\r\n if a not in le_dic:\r\n le_dic[a] = 1\r\n else:\r\n le_dic[a] += 1\r\n le_accurary_frequency = []\r\n for i, a in enumerate(le_accurary):\r\n le_accurary_frequency.append(le_dic[a])\r\n\r\n # le频数所占概率\r\n le_frequency_probability = np.array(le_accurary_frequency) / sum(le_accurary_frequency)\r\n\r\n # le概率的对数\r\n le_probability_log = log2(le_frequency_probability)\r\n\r\n # le的自信息熵\r\n le_self_entrocy = []\r\n for i in range(len(le_accurary)):\r\n if le_accurary[i] >= threshold:\r\n le_self_entrocy.append(- (le_frequency_probability[i] * le_probability_log[i]) * le_threshold_ratio)\r\n else:\r\n le_self_entrocy.append(- (le_frequency_probability[i] * le_probability_log[i]))\r\n\r\n\r\n\r\n\r\n #gru与le准确率的乘积\r\n mul_accurary_multiply = gru_accurary * le_accurary\r\n\r\n #gru与le准确率的商\r\n mul_accurary_division = gru_accurary/le_accurary\r\n\r\n #乘积与商的和\r\n multiply_add_division = mul_accurary_multiply + mul_accurary_division\r\n\r\n #公共频数\r\n mul_dic = {}\r\n for i, a in enumerate(multiply_add_division):\r\n if a not in mul_dic:\r\n mul_dic[a] = 1\r\n else:\r\n mul_dic[a] += 1\r\n mul_frequency = []\r\n for i, a in enumerate(multiply_add_division):\r\n mul_frequency.append(mul_dic[a])\r\n\r\n #共概率\r\n mul_probability = np.array(mul_frequency)/sum(mul_frequency)\r\n\r\n #共概率除以单概率\r\n mul_division_single = mul_probability/(gru_frequency_probability * le_accurary_frequency)\r\n\r\n #对mul_division_single取对数\r\n mul_division_single_log = log2(mul_division_single)\r\n\r\n #互信息\r\n mutual_information = -mul_probability * mul_division_single_log\r\n\r\n #根据gru_self_entrocy, le_self_entrocy, mutual_information求权重\r\n gru_ratio = sum(mutual_information) / sum(gru_self_entrocy)\r\n le_ratio = sum(mutual_information) / sum(le_self_entrocy)\r\n\r\n gru_weight = gru_ratio / (gru_ratio + le_ratio)\r\n le_weight = le_ratio / (gru_ratio + le_ratio)\r\n\r\n Data = {\"le_accurary\":le_accurary, \"le_accurary_frequency\":le_accurary_frequency,\"le_frequency_probability\":le_frequency_probability,\r\n \"le_probability_log\":le_probability_log,\"le_self_entrocy\":le_self_entrocy,\"gru_accurary\":gru_accurary,\r\n \"gru_accurary_frequency\":gru_accurary_frequency,\"gru_frequency_probability\":gru_frequency_probability,\r\n \"gru_probability_log\":gru_probability_log,\"gru_self_entrocy\":gru_self_entrocy,\r\n \"mul_accurary_multiply\":mul_accurary_multiply,\"mul_accurary_division\":mul_accurary_division,\r\n \"multiply_add_division\":multiply_add_division,\"mul_frequency\":mul_frequency,\"mul_probability\":mul_probability,\r\n \"mul_division_single\":mul_division_single,\"mul_division_single_log\":mul_division_single_log,\"mutual_information\":mutual_information}\r\n df = pd.DataFrame(Data,columns=[\"le_accurary\",\"le_accurary_frequency\",\"le_frequency_probability\",\"le_probability_log\",\r\n \"le_self_entrocy\",\"gru_accurary\",\"gru_accurary_frequency\",\"gru_frequency_probability\",\r\n \"gru_probability_log\",\"gru_self_entrocy\",\"mul_accurary_multiply\",\"mul_accurary_division\",\r\n \"multiply_add_division\",\"mul_frequency\",\"mul_probability\",\"mul_division_single\",\r\n \"mul_division_single_log\",\"mutual_information\"])\r\n df.to_csv(\"experimentC_mutual_information.csv\", index=False)\r\n print(gru_weight,le_weight)\r\n return gru_weight, le_weight\r\n\r\n # dataset1[\"frequency\"] = frequency\r\n # dataset1.to_csv(filename,index=None)\r\n\r\n\r\ngru_weight, le_weight = mul_info_weightC(\"experimentC_GRU_validation_result.csv\", \"experimentC_LE_validation_result.csv\")\r\n\r\n\r\n","sub_path":"GRA_GRU/program/weights&prediction_results/mul_info_weightC.py","file_name":"mul_info_weightC.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"146313760","text":"from tqdm import tqdm\nimport spacy\n\nif __name__ == \"__main__\":\n with open('train_lawmt.txt') as f:\n dataset = [line.strip() for line in f.readlines()]\n\n with open('base_data.txt', 'w') as f:\n counter = 0\n for line in tqdm(dataset):\n string = f'{line.strip()}\\t{counter},0\\n'\n f.write(string)\n counter += 1\n \n\n\n","sub_path":"data/copygeneration_lawmt/process_base_data.py","file_name":"process_base_data.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"313361064","text":"import numpy as np\nimport open3d as o3d\n\nprint(\"Load a ply point cloud, print it, and render it\")\npcd = o3d.io.read_triangle_mesh(\"room/source/pokojjaponski2.rar\\pokoj japonski.obj\")\no3d.visualization.draw_geometries([pcd],\n zoom=0.3412,\n front=[0.4257, -0.2125, -0.8795],\n lookat=[2.6172, 2.0475, 1.532],\n up=[-0.0694, -0.9768, 0.2024])","sub_path":"test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"255127776","text":"# Copyright (C) 2019 * Ltd. All rights reserved.\n# author : SangHyeon Jo \n\nimport os\nimport cv2\nimport sys\nimport glob\nimport time\nimport random\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom Define import *\nfrom Utils import *\nfrom Teacher import *\n\nfrom FCOS import *\nfrom FCOS_Loss import *\nfrom FCOS_Utils import *\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = GPU_INFO\n\n# 1. dataset\ntrain_data_list = np.load('./dataset/train_detection.npy', allow_pickle = True)\nvalid_data_list = np.load('./dataset/validation_detection.npy', allow_pickle = True)\nvalid_count = len(valid_data_list)\n\nopen('log.txt', 'w')\nlog_print('[i] Train : {}'.format(len(train_data_list)))\nlog_print('[i] Valid : {}'.format(len(valid_data_list)))\n\n# 2. build\ninput_var = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL])\nis_training = tf.placeholder(tf.bool)\n\ninput_vars = tf.split(input_var, NUM_GPU)\n\npred_bboxes_ops = []\npred_centers_ops = []\npred_classes_ops = []\n\nfor gpu_id in range(NUM_GPU):\n reuse = gpu_id != 0\n \n with tf.device(tf.DeviceSpec(device_type = \"GPU\", device_index = gpu_id)):\n with tf.variable_scope(tf.get_variable_scope(), reuse = reuse):\n print(input_vars[gpu_id], is_training, reuse)\n\n fcos_dic, fcos_sizes = FCOS(input_vars[gpu_id], is_training)\n if not reuse:\n fcos_utils = FCOS_Utils(fcos_sizes)\n\n pred_bboxes_ops.append(fcos_dic['pred_bboxes'])\n pred_centers_ops.append(fcos_dic['pred_centers'])\n pred_classes_ops.append(fcos_dic['pred_classes'])\n\npred_bboxes_op = tf.concat(pred_bboxes_ops, axis = 0)\npred_centers_op = tf.concat(pred_centers_ops, axis = 0)\npred_classes_op = tf.concat(pred_classes_ops, axis = 0)\n\nlog_print('[i] pred_bboxes_op : {}'.format(pred_bboxes_op))\nlog_print('[i] pred_centers_op : {}'.format(pred_centers_op))\nlog_print('[i] pred_classes_op : {}'.format(pred_classes_op))\n\n_, fcos_size, _ = pred_bboxes_op.shape.as_list()\ngt_bboxes_var = tf.placeholder(tf.float32, [BATCH_SIZE, fcos_size, 4])\ngt_centers_var = tf.placeholder(tf.float32, [BATCH_SIZE, fcos_size, 1])\ngt_classes_var = tf.placeholder(tf.float32, [BATCH_SIZE, fcos_size, CLASSES])\n\nlog_print('[i] gt_bboxes_var : {}'.format(gt_bboxes_var))\nlog_print('[i] gt_centers_var : {}'.format(gt_centers_var))\nlog_print('[i] gt_classes_var : {}'.format(gt_classes_var))\n\npred_ops = [pred_bboxes_op, pred_centers_op, pred_classes_op]\ngt_ops = [gt_bboxes_var, gt_centers_var, gt_classes_var]\nloss_op, focal_loss_op, center_loss_op, giou_loss_op = FCOS_Loss(pred_ops, gt_ops)\n\nvars = tf.trainable_variables()\nl2_reg_loss_op = tf.add_n([tf.nn.l2_loss(var) for var in vars]) * WEIGHT_DECAY\nloss_op += l2_reg_loss_op\n\nlearning_rate_var = tf.placeholder(tf.float32)\nwith tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n # train_op = tf.train.AdamOptimizer(learning_rate_var).minimize(loss_op, colocate_gradients_with_ops = True)\n train_op = tf.train.MomentumOptimizer(learning_rate_var, momentum = 0.9).minimize(loss_op, colocate_gradients_with_ops = True)\n\ntrain_summary_dic = {\n 'Loss/Total_Loss' : loss_op,\n 'Loss/Focal_Loss' : focal_loss_op,\n 'Loss/Center_Loss' : center_loss_op,\n 'Loss/GIoU_Loss' : giou_loss_op,\n 'Loss/L2_Regularization_Loss' : l2_reg_loss_op,\n 'Learning_rate' : learning_rate_var,\n}\n\ntrain_summary_list = []\nfor name in train_summary_dic.keys():\n value = train_summary_dic[name]\n train_summary_list.append(tf.summary.scalar(name, value))\ntrain_summary_op = tf.summary.merge(train_summary_list)\n\nlog_image_var = tf.placeholder(tf.float32, [None, SAMPLE_IMAGE_HEIGHT, SAMPLE_IMAGE_WIDTH, IMAGE_CHANNEL])\nlog_image_op = tf.summary.image('Image/Train', log_image_var[..., ::-1], SAMPLES)\n\n# 3. train\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n'''\npretrained_vars = []\nfor var in vars:\n if 'resnet_v1_50' in var.name:\n pretrained_vars.append(var)\n\npretrained_saver = tf.train.Saver(var_list = pretrained_vars)\npretrained_saver.restore(sess, './resnet_v1_model/resnet_v1_50.ckpt')\n'''\n\nsaver = tf.train.Saver(max_to_keep = 100)\nsaver.restore(sess, './model/FCOS_{}.ckpt'.format(150000))\n\nlearning_rate = INIT_LEARNING_RATE\n\nlog_print('[i] max_iteration : {}'.format(MAX_ITERATION))\nlog_print('[i] decay_iteration : {}'.format(DECAY_ITERATIONS))\n\nloss_list = []\nfocal_loss_list = []\ncenter_loss_list = []\ngiou_loss_list = []\nl2_reg_loss_list = []\ntrain_time = time.time()\n\ntrain_writer = tf.summary.FileWriter('./logs/train')\n\ntrain_threads = []\nfor i in range(NUM_THREADS):\n train_thread = Teacher('./dataset/train_detection.npy', fcos_sizes, debug = False)\n train_thread.start()\n train_threads.append(train_thread)\n\nsample_data_list = train_data_list[:SAMPLES]\n\nfor iter in range(1, MAX_ITERATION + 1):\n if iter in DECAY_ITERATIONS:\n learning_rate /= 10\n log_print('[i] learning rate decay : {} -> {}'.format(learning_rate * 10, learning_rate))\n\n # Thread\n find = False\n while not find:\n for train_thread in train_threads:\n if train_thread.ready:\n find = True\n batch_image_data, batch_encode_bboxes, batch_encode_centers, batch_encode_classes = train_thread.get_batch_data() \n break\n \n _feed_dict = {input_var : batch_image_data, gt_bboxes_var : batch_encode_bboxes, gt_centers_var : batch_encode_centers, gt_classes_var : batch_encode_classes, \n is_training : True, learning_rate_var : learning_rate}\n log = sess.run([train_op, loss_op, focal_loss_op, center_loss_op, giou_loss_op, l2_reg_loss_op, train_summary_op], feed_dict = _feed_dict)\n # print(log[1:-1])\n \n if np.isnan(log[1]):\n print('[!]', log[1:-1])\n input()\n\n loss_list.append(log[1])\n focal_loss_list.append(log[2])\n center_loss_list.append(log[3])\n giou_loss_list.append(log[4])\n l2_reg_loss_list.append(log[5])\n train_writer.add_summary(log[6], iter)\n\n if iter % LOG_ITERATION == 0:\n loss = np.mean(loss_list)\n focal_loss = np.mean(focal_loss_list)\n center_loss = np.mean(center_loss_list)\n giou_loss = np.mean(giou_loss_list)\n l2_reg_loss = np.mean(l2_reg_loss_list)\n train_time = int(time.time() - train_time)\n \n log_print('[i] iter : {}, loss : {:.4f}, focal_loss : {:.4f}, center_loss : {:.4f}, giou_loss : {:.4f}, l2_reg_loss : {:.4f}, train_time : {}sec'.format(iter, loss, focal_loss, center_loss, giou_loss, l2_reg_loss, train_time))\n\n loss_list = []\n focal_loss_list = []\n center_loss_list = []\n giou_loss_list = []\n l2_reg_loss_list = []\n train_time = time.time()\n\n if iter % SAMPLE_ITERATION == 0:\n sample_images = []\n batch_image_data = np.zeros((BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL), dtype = np.float32)\n\n for i, data in enumerate(sample_data_list):\n image_name, gt_bboxes, gt_classes = data\n\n image = cv2.imread(TRAIN_DIR + image_name)\n tf_image = cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), interpolation = cv2.INTER_CUBIC)\n\n batch_image_data[i] = tf_image.copy()\n \n total_pred_bboxes, total_pred_centers, total_pred_classes = sess.run([pred_bboxes_op, pred_centers_op, pred_classes_op], feed_dict = {input_var : batch_image_data, is_training : False})\n\n for i in range(BATCH_SIZE):\n image = batch_image_data[i]\n pred_bboxes, pred_classes = fcos_utils.Decode(total_pred_bboxes[i], total_pred_centers[i], total_pred_classes[i], [IMAGE_WIDTH, IMAGE_HEIGHT], detect_threshold = 0.20)\n \n for bbox, class_index in zip(pred_bboxes, pred_classes):\n xmin, ymin, xmax, ymax = bbox[:4].astype(np.int32)\n conf = bbox[4]\n class_name = CLASS_NAMES[class_index]\n \n string = \"{} : {:.2f}%\".format(class_name, conf * 100)\n cv2.putText(image, string, (xmin, ymin - 10), 1, 1, (0, 255, 0))\n cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)\n\n image = cv2.resize(image, (SAMPLE_IMAGE_WIDTH, SAMPLE_IMAGE_HEIGHT))\n sample_images.append(image.copy())\n \n image_summary = sess.run(log_image_op, feed_dict = {log_image_var : sample_images})\n train_writer.add_summary(image_summary, iter)\n\n if iter % SAVE_ITERATION == 0:\n saver.save(sess, './model/FCOS_{}.ckpt'.format(iter))","sub_path":"Train_MultiGPU.py","file_name":"Train_MultiGPU.py","file_ext":"py","file_size_in_byte":8560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"64007240","text":"###############################################################################\n# PowerSphericalPotentialwCutoff.py: spherical power-law potential w/ cutoff\n#\n# amp\n# rho(r)= --------- e^{-(r/rc)^2}\n# r^\\alpha\n###############################################################################\nimport numpy as nu\nfrom scipy import special, integrate\nfrom galpy.potential_src.Potential import Potential, kms_to_kpcGyrDecorator, \\\n _APY_LOADED\nif _APY_LOADED:\n from astropy import units\nclass PowerSphericalPotentialwCutoff(Potential):\n \"\"\"Class that implements spherical potentials that are derived from \n power-law density models\n\n .. math::\n\n \\\\rho(r) = \\\\mathrm{amp}\\,\\\\left(\\\\frac{r_1}{r}\\\\right)^\\\\alpha\\\\,\\\\exp\\\\left(-(r/rc)^2\\\\right)\n\n \"\"\"\n def __init__(self,amp=1.,alpha=1.,rc=1.,normalize=False,r1=1.,\n ro=None,vo=None):\n \"\"\"\n NAME:\n\n __init__\n\n PURPOSE:\n\n initialize a power-law-density potential\n\n INPUT:\n\n amp= amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass density or Gxmass density\n\n alpha= inner power\n\n rc= cut-off radius (can be Quantity)\n\n r1= (1.) reference radius for amplitude (can be Quantity)\n\n normalize= if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.\n\n ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)\n\n OUTPUT:\n\n (none)\n\n HISTORY:\n\n 2013-06-28 - Written - Bovy (IAS)\n\n \"\"\"\n Potential.__init__(self,amp=amp,ro=ro,vo=vo,amp_units='density')\n if _APY_LOADED and isinstance(r1,units.Quantity):\n r1= r1.to(units.kpc).value/self._ro\n if _APY_LOADED and isinstance(rc,units.Quantity):\n rc= rc.to(units.kpc).value/self._ro\n self.alpha= alpha\n # Back to old definition\n self._amp*= r1**self.alpha\n self.rc= rc\n self._scale= self.rc\n if normalize or \\\n (isinstance(normalize,(int,float)) \\\n and not isinstance(normalize,bool)): #pragma: no cover\n self.normalize(normalize)\n self.hasC= True\n self.hasC_dxdv= True\n self._nemo_accname= 'PowSphwCut'\n\n def _evaluate(self,R,z,phi=0.,t=0.):\n \"\"\"\n NAME:\n _evaluate\n PURPOSE:\n evaluate the potential at R,z\n INPUT:\n R - Galactocentric cylindrical radius\n z - vertical height\n phi - azimuth\n t - time\n OUTPUT:\n Phi(R,z)\n HISTORY:\n 2013-06-28 - Started - Bovy (IAS)\n \"\"\"\n r= nu.sqrt(R**2.+z**2.)\n return 2.*nu.pi*self.rc**(3.-self.alpha)/r*(r/self.rc*special.gamma(1.-self.alpha/2.)*special.gammainc(1.-self.alpha/2.,(r/self.rc)**2.)-special.gamma(1.5-self.alpha/2.)*special.gammainc(1.5-self.alpha/2.,(r/self.rc)**2.))\n\n def _Rforce(self,R,z,phi=0.,t=0.):\n \"\"\"\n NAME:\n _Rforce\n PURPOSE:\n evaluate the radial force for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n z - vertical height\n phi - azimuth\n t - time\n OUTPUT:\n the radial force\n HISTORY:\n 2013-06-26 - Written - Bovy (IAS)\n \"\"\"\n r= nu.sqrt(R*R+z*z)\n return -self._mass(r)*R/r**3.\n\n def _zforce(self,R,z,phi=0.,t=0.):\n \"\"\"\n NAME:\n _zforce\n PURPOSE:\n evaluate the vertical force for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n z - vertical height\n phi - azimuth\n t - time\n OUTPUT:\n the vertical force\n HISTORY:\n 2013-06-26 - Written - Bovy (IAS)\n \"\"\"\n r= nu.sqrt(R*R+z*z)\n return -self._mass(r)*z/r**3.\n\n def _R2deriv(self,R,z,phi=0.,t=0.):\n \"\"\"\n NAME:\n _Rderiv\n PURPOSE:\n evaluate the second radial derivative for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n z - vertical height\n phi - azimuth\n t - time\n OUTPUT:\n the second radial derivative\n HISTORY:\n 2013-06-28 - Written - Bovy (IAS)\n \"\"\"\n r= nu.sqrt(R*R+z*z)\n return 4.*nu.pi*r**(-2.-self.alpha)*nu.exp(-(r/self.rc)**2.)*R**2.\\\n +self._mass(r)/r**5.*(z**2.-2.*R**2.)\n\n def _z2deriv(self,R,z,phi=0.,t=0.):\n \"\"\"\n NAME:\n _z2deriv\n PURPOSE:\n evaluate the second vertical derivative for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n z - vertical height\n phi - azimuth\n t- time\n OUTPUT:\n the second vertical derivative\n HISTORY:\n 2013-06-28 - Written - Bovy (IAS)\n \"\"\"\n r= nu.sqrt(R*R+z*z)\n return 4.*nu.pi*r**(-2.-self.alpha)*nu.exp(-(r/self.rc)**2.)*z**2.\\\n +self._mass(r)/r**5.*(R**2.-2.*z**2.)\n\n def _Rzderiv(self,R,z,phi=0.,t=0.):\n \"\"\"\n NAME:\n _Rzderiv\n PURPOSE:\n evaluate the mixed R,z derivative for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n z - vertical height\n phi - azimuth\n t- time\n OUTPUT:\n d2phi/dR/dz\n HISTORY:\n 2013-08-28 - Written - Bovy (IAS)\n \"\"\"\n r= nu.sqrt(R*R+z*z)\n return R*z*(4.*nu.pi*r**(-2.-self.alpha)*nu.exp(-(r/self.rc)**2.)\n -3.*self._mass(r)/r**5.)\n\n def _dens(self,R,z,phi=0.,t=0.):\n \"\"\"\n NAME:\n _dens\n PURPOSE:\n evaluate the density force for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n z - vertical height\n phi - azimuth\n t - time\n OUTPUT:\n the density\n HISTORY:\n 2013-06-28 - Written - Bovy (IAS)\n \"\"\"\n r= nu.sqrt(R**2.+z**2.)\n return 1./r**self.alpha*nu.exp(-(r/self.rc)**2.)\n\n def _mass(self,R,z=0.,t=0.):\n \"\"\"\n NAME:\n _mass\n PURPOSE:\n evaluate the mass within R for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n z - vertical height\n t - time\n OUTPUT:\n the mass enclosed\n HISTORY:\n 2013-XX-XX - Written - Bovy (IAS)\n \"\"\"\n if z is None: r= R\n else: r= nu.sqrt(R**2.+z**2.)\n return 2.*nu.pi*self.rc**(3.-self.alpha)*special.gammainc(1.5-self.alpha/2.,(r/self.rc)**2.)*special.gamma(1.5-self.alpha/2.)\n\n @kms_to_kpcGyrDecorator\n def _nemo_accpars(self,vo,ro):\n \"\"\"\n NAME:\n\n _nemo_accpars\n\n PURPOSE:\n\n return the accpars potential parameters for use of this potential with NEMO\n\n INPUT:\n\n vo - velocity unit in km/s\n\n ro - length unit in kpc\n\n OUTPUT:\n\n accpars string\n\n HISTORY:\n\n 2014-12-18 - Written - Bovy (IAS)\n\n \"\"\"\n ampl= self._amp*vo**2.*ro**(self.alpha-2.)\n return \"0,%s,%s,%s\" % (ampl,self.alpha,self.rc*ro)\n","sub_path":"galpy/potential_src/PowerSphericalPotentialwCutoff.py","file_name":"PowerSphericalPotentialwCutoff.py","file_ext":"py","file_size_in_byte":7522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"23740589","text":"\"\"\"\nPong game algorithm simulation\nAuthor: Igor Kim\nE-mail: igor.skh@gmail.com\n\n(c) January, 2017\n\"\"\"\nfrom pong import PongGame\n\nif __name__ == '__main__':\n game = PongGame(10, 12)\n while game.started:\n game.step2()\n if game.started:\n game.show_table()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"128516767","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('data.csv',header=None)\nX = dataset.iloc[:, 1].values\ny = dataset.iloc[:, 1].values.tolist()\n#\n#dataset = pd.read_csv('result.csv',header=None)\n#X_2 = dataset.iloc[:, 0:1].values\n#y_2 = dataset.iloc[:, 1].values\n\ncount = 120\nfrom statsmodels.tsa.arima_model import ARIMA\npredictions = []\nhistory = [float(x) for x in X]\n\n#if len(history) < 240:\n# temp = history[:]\n# for i in range(10):\n# history = history + temp\n\nfor t in range(count):\n\tmodel = ARIMA(history, order=(5,0,0))\n\tmodel_fit = model.fit(disp=0)\n\toutput = model_fit.forecast()\n\tyhat = output[0][0]\n\tpredictions.append(yhat)\n\thistory.append(yhat)\n\nplt.plot(history)\n","sub_path":"Machine Learning A-Z Template Folder/Other/Test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"252910481","text":"import os\nimport re\n\n\nclass CreateModel:\n\n def __init__(self, table_desc, model_text):\n self.table_desc = table_desc\n self.model_text = model_text\n\n def parse_text(self):\n \"\"\"\n 解析文本\n :return: list -> [[field_i, field_name, field_type, field_max_len, field_desc],]\n \"\"\"\n table_desc = self.table_desc\n item_list = table_desc.split('\\n')\n fields_list = []\n for line in item_list:\n # 清除空白行\n if re.match(r'^[\\s|\\t]*$', line) is None:\n item_split = re.split(r'[\\t|\\s]+', line)\n # 清除空列表\n fields_list.append(filter(lambda x: bool(x), item_split))\n return fields_list\n\n def help_text_to_field(self):\n \"\"\"\n 生成字段对段的help_text\n :return: dict -> filed: help_text\n \"\"\"\n field_array = self.parse_text()\n d = {}\n for i in field_array:\n [field_i, field_name, field_type, field_max_len, field_desc] = i\n d[field_name] = field_desc\n d[field_name.lower()] = field_desc\n return d\n\n def beatify_model(self, lower_case=False):\n \"\"\"\n field 字段用数据库对应的,\n 取db_column的值,删db的值\n :param lower_case: 生成字段是否转成小写字母\n :return:\n \"\"\"\n strings = self.model_text\n ls = [i for i in strings.split('\\n') if i and not re.match(r'^\\s*?$', i)]\n re_db_column = re.compile(r\"\\s*(\\w+)\\s=\\smodels.*?(db_column=\\'(\\w+)\\',*\\s*)\")\n beatify_ls = []\n for i in ls:\n (db_filed, db_column_parent, db_column) = re_db_column.search(i).groups()\n if not lower_case:\n i = i.replace(db_filed, db_column)\n i = i.replace(db_column_parent, '')\n # 删注释\n i = re.sub(r\"\\s*?#\\s.*?\\.\", '', i)\n beatify_ls.append(i)\n return '\\n'.join(beatify_ls)\n\n def add_help_text(self, model_text):\n \"\"\"\n 加入help_text 参数\n \"\"\"\n strings = model_text\n d = self.help_text_to_field()\n ls = [i for i in strings.split('\\n') if i]\n re_filed = re.compile(r\"\\s*(\\w+)\\s=\")\n re_has_params = re.compile(r\"\\((.*?)\\)\")\n beatify_ls = []\n for i in ls:\n filed = re_filed.search(i).group(1)\n has_params = re_has_params.search(i).group(1)\n help_text = d.get(filed, 'fuck')\n if has_params:\n i = i.replace(\"(\", f\"(help_text='{help_text}', \")\n else:\n i = i.replace(\"(\", f\"(help_text='{help_text}'\")\n beatify_ls.append(i)\n return '\\n'.join(beatify_ls)\n \n def holy_cow(self, lower_case=False):\n \"\"\"\n 添加help_text前可以加一些骚操作\n \"\"\"\n beatify_model = self.beatify_model(lower_case)\n string = self.add_help_text(beatify_model)\n return string\n\n\nif __name__ == '__main__':\n\n # table 结构\n table_ds = \"\"\"\n 1\tOrder_Id\tbigint\t8\t积分订单头部\n 2\tOrder_No\tvarchar\t8000\t订单编号\n 3\tOrder_GUID\tuniqueidentifier\t16\t订单GUID\n 4\tBuyer_User_Id\tbigint\t8\t购买者Id\n 5\tBuyer_Enterprise_Id\tbigint\t8\t购买者企业Id\n 6\tTransaction_Integral\tbigint\t8\t交易积分\n 7\tEarnest_Integral\tbigint\t8\t实际扣除积分\n 8\tDeduction_Type\tint\t4\t积分扣除的方式\n 9\tSignature\tnvarchar\t8000\t加密后的数据\n 10\tCreate_Date\tdatetime\t8\t创建时间\n 11\tModify_Date\tdatetime\t8\t修改时间\n 12\tCreate_User_Id\tbigint\t8\t创建者\n 13\tModify_User_Id\tbigint\t8\t修改者\n \"\"\"\n\n origin_model = \"\"\"\n order_id = models.BigIntegerField(db_column='Order_Id') # Field name made lowercase.\n order_no = models.CharField(db_column='Order_No', max_length=32) # Field name made lowercase.\n order_guid = models.CharField(db_column='Order_GUID', max_length=36) # Field name made lowercase.\n buyer_user_id = models.BigIntegerField(db_column='Buyer_User_Id') # Field name made lowercase.\n buyer_enterprise_id = models.BigIntegerField(db_column='Buyer_Enterprise_Id') # Field name made lowercase.\n transaction_integral = models.BigIntegerField(db_column='Transaction_Integral') # Field name made lowercase.\n earnest_integral = models.BigIntegerField(db_column='Earnest_Integral') # Field name made lowercase.\n deduction_type = models.IntegerField(db_column='Deduction_Type', blank=True, null=True) # Field name made lowercase.\n signature = models.TextField(db_column='Signature') # Field name made lowercase.\n create_date = models.DateTimeField(db_column='Create_Date') # Field name made lowercase.\n modify_date = models.DateTimeField(db_column='Modify_Date') # Field name made lowercase.\n create_user_id = models.BigIntegerField(db_column='Create_User_Id') # Field name made lowercase.\n modify_user_id = models.BigIntegerField(db_column='Modify_User_Id') # Field name made lowercase.\n \"\"\"\n holy = CreateModel(table_ds, origin_model)\n models = holy.holy_cow()\n # models = holy.beatify_model()\n print(models)\n \n with open('models.txt', 'a+', encoding='utf-8') as f:\n f.write(models)\n\n\n","sub_path":"python/django反向模型.py","file_name":"django反向模型.py","file_ext":"py","file_size_in_byte":5232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"307572127","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 8 21:15:57 2020\n\n@author: Nika\n\"\"\"\n\n# -*- coding: utf-8 -*-\nimport pygame\nimport numpy as np\nimport random\n\nWINDOW_SIZE = WINDOW_WIDTH, WINDOW_HEIGHT = 800, 600\npygame.init()\nscreen = pygame.display.set_mode(WINDOW_SIZE)\n\n\ndef screen_update(x, y):\n \"\"\"\n нужна чтобы обновлять экран каждый раз\n x - кордината по х, нужна для обновления цвета\n y - указатель на время суток в зависимости от остатка при делении на 2\n \"\"\"\n if x<0:\n time_of_day = (64, 64, 128 + 64)\n else:\n if x > 800:\n time_of_day = (64, 64, 128 + 64)\n else:\n t = ((x / 800) * 128) // 1\n\n\n if (y % 2) == 0:\n if x <= 400:\n time_of_day = (64 + t, 64 + t, 127 + 62 + t)\n else:\n time_of_day = (128 + 64 - t, 128 + 64 - t, 254 + 62 - t)\n else:\n if x <= 400:\n time_of_day = (64 - t, 64 - t, 128 + 63 - t)\n else:\n time_of_day = (t - 64, t - 64, 64 + t)\n\n screen = pygame.display.set_mode(WINDOW_SIZE)\n pygame.draw.rect(screen, time_of_day, (0, 0, 800, 300))\n pygame.draw.rect(screen, (0, 96, 0), (0, 300, 800, 600))\n\n\ndef blit_with_scale(x, y, scale, surface):\n \"\"\"\n накладывает поверхность на экран.\n просто не трогай она нужна для работы других функций.\n \"\"\"\n w, h = surface.get_size()\n surface = pygame.transform.scale(surface,\n (int(w * scale), int(h * scale)))\n screen.blit(surface, (x - w * scale // 2,\n y - h * scale // 2))\n\n\ndef draw_home(x, y, scale):\n \"\"\"\n Нарисовать дом\n x, y {int} - координаты центра домика\n scale {float} - масштабирование домика. линейные размеры\n умножаются на эту величину\n по умолчанию размеры домика 200 х 280\n \"\"\"\n surface = pygame.Surface((200, 280), pygame.SRCALPHA)\n home_rect = pygame.Rect(0, 130, 200, 150)\n pygame.draw.rect(surface, pygame.Color(\"#a06117\"), home_rect)\n pygame.draw.polygon(surface, pygame.Color(\"#f93838\"),\n [(0, 130), (100, 0), (200, 130)])\n window_rect = pygame.Rect((0, 0), (60, 40))\n window_rect.center = home_rect.center\n pygame.draw.rect(surface, pygame.Color(\"#3790d5\"), window_rect)\n\n blit_with_scale(x, y, scale, surface)\n\n\ndef draw_tree(x, y, scale):\n \"\"\"\n Нарисовать дерево\n x, y {int} - координаты центра дерева\n scale {float} - масштабирование дерева. линейные размеры\n умножаются на эту величину\n по умолчанию размеры дерева 150 х 230\n \"\"\"\n surface = pygame.Surface((150, 230), pygame.SRCALPHA)\n\n pygame.draw.rect(surface, pygame.Color(\"black\"), (70, 70, 20, 500))\n for coords in [(610, 205), (560, 190),\n (600, 160), (645, 190),\n (570, 230), (640, 230)]:\n coords = (coords[0] - 525, coords[1] - 130)\n pygame.draw.circle(surface, pygame.Color(\"#256927\"), coords, 30)\n pygame.draw.circle(surface, pygame.Color(\"black\"), coords, 30, 1)\n\n blit_with_scale(x, y, scale, surface)\n\n\ndef draw_clouds(x, y, scale):\n \"\"\"\n Нарисовать облака\n x, y {int} - координаты центра.\n scale {float} - масштабирование. линейные размеры\n умножаются на эту величину\n \"\"\"\n surface = pygame.Surface((210, 140), pygame.SRCALPHA)\n for coords in [(350, 80), (480, 80), (410, 70), (450, 120), (390, 130)]:\n coords = (coords[0] - 310, coords[1] - 30) # немотивированное действие\n pygame.draw.circle(surface, pygame.Color(\"white\"), coords, 40)\n pygame.draw.circle(surface, pygame.Color(\"#484646\"), coords, 40, 1)\n\n blit_with_scale(x, y, scale, surface)\n\n\ndef draw_sun(x, y, radius, z):\n \"\"\"\n Нарисовать солнце\n x, y {int} - координаты центра.\n radius {int} - радиус\n остаток от деления числа z на 2 определяет солнце либо луну\n \"\"\"\n\n if (z % 2) == 0:\n a = np.arange(0, 360, 5)\n p1_x = x + radius * np.cos(np.radians(a))\n p1_y = y + radius * np.sin(np.radians(a))\n\n p2_x = x + (radius + radius / 10) * np.cos(np.radians(a))\n p2_y = y + (radius + radius / 10) * np.sin(np.radians(a))\n\n coords = [(p1_x[i], p1_y[i]) if i % 2 == 0 else (p2_x[i], p2_y[i])\n for i in range(360 // 5)]\n pygame.draw.polygon(screen, pygame.Color(\"#f77658\"), coords)\n\n else:\n pygame.draw.circle(screen, (192, 192, 192), (x, y), radius)\n\n\n\n\n\"\"\"\n создаем тик и название приложения, улучшаем все, что можно\n\"\"\"\n\npygame.display.set_caption(\"My Game\")\ndone = False\nclock = pygame.time.Clock()\n\npygame.mixer.music.load('LOLIPOP.mp3')\npygame.mixer.music.play()\nsc = pygame.display.set_mode((400, 300))\nlambo_surf = pygame.image.load('lambo.png')\nlambo_surf.set_colorkey((255, 255, 255))\n\n\"\"\"\n набор изначально заданных констант, которые далее изменяются \n\"\"\"\na = 1000\ny = 0\nx_sun = -50\ny_sun = 250\nx_cloud = np.array([100, 300, 500])\ny_cloud = np.array([200, 100, 150])\nz_cloud = np.array([0.5, 0.7, 1])\nx_cloud_speed = np.array([1, 2, 1])\n\n\"\"\"\n основной цикл, в котором происходят все действия \n\"\"\"\n\nwhile done == False:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n screen_update(x_sun, y)\n\n \"\"\"\n рисуем все объекты \n \"\"\"\n\n draw_sun(x_sun, y_sun, 30, y)\n\n draw_clouds(x_cloud[2], y_cloud[2], z_cloud[2])\n draw_clouds(x_cloud[1], y_cloud[1], z_cloud[1])\n draw_clouds(x_cloud[0], y_cloud[0], z_cloud[0])\n\n draw_home(400, 300, 0.75)\n\n draw_tree(100, 300, 1)\n draw_tree(700, 300, 0.75)\n draw_tree(550, 300, 0.5)\n draw_tree(250, 300, 0.75)\n\n \"\"\"\n двигаем все объекты \n \"\"\"\n\n for i in range(3):\n if x_cloud[i] >= 900:\n x_cloud[i] = -100\n y_cloud[i] = random.randint(50, 250)\n z_cloud[i] = random.randint(5, 10) / 10\n x_cloud_speed[i] = random.randint(1, 3)\n x_cloud[i] += x_cloud_speed[i]\n\n if x_sun >= 950:\n x_sun = -50\n y_sun = 250\n y += 1\n else:\n x_sun += 2\n if x_sun < 400:\n y_sun -= 1\n else:\n y_sun += 1\n\n lambo_rect = lambo_surf.get_rect(center=(a, 500))\n sc.blit(lambo_surf, lambo_rect)\n\n a -= 3\n if a < -400:\n a = 1200\n\n pygame.display.flip()\n clock.tick(30)\n\npygame.quit()","sub_path":"Lab_3/akulov жгёт/povolot.py","file_name":"povolot.py","file_ext":"py","file_size_in_byte":7308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"389390506","text":"import sys\nimport os\nimport struct\nimport pandas as pd\nimport numpy as np\nfrom collections import OrderedDict\nfrom subprocess import Popen, PIPE\n# get input nodes and define neighbourhood\n# input filename\n# n-hop neighbourhood -- n\n\ndef appendCSV(final_results, sep, out_file):\n\tresult = pd.DataFrame(final_results, columns = final_results[0].keys())\n\tif(os.path.isfile(out_file)):\n\t\tresult.to_csv(out_file, mode = 'a', header = False, index = False)\n\telse:\n\t\tresult.to_csv(out_file, sep = sep, index=False)\n\ndef main():\n\tif(len(sys.argv) != 5):\n\t\tprint(\"[Usage:] python3 script.py data_file pair_data output_file n\")\n\t\texit()\n\n\tdata_file = sys.argv[1]\n\tpair_data = sys.argv[2]\n\tout_file = sys.argv[3]\n\thop = int(sys.argv[4])\n\n\tif(os.path.exists(\"/Users/admin/Desktop/Project/files/outputs/cora/n_\" + str(hop)) == False):\n\t\tos.system(\"mkdir /Users/admin/Desktop/Project/files/outputs/cora/n_\" + str(hop))\n\n\t# compile all c++ files\n\n\n\t# get all the reachable pairs in the graph to test\n\tos.system(\"/Users/admin/Desktop/Project/code/src_v2/johnson --dump_pairs \" + data_file + \" /Users/admin/Desktop/Project/files/outputs/cora/n_\" + str(hop) + \"/global.txt /Users/admin/Desktop/Project/files/outputs/cora/n_\" + str(hop) + \"/global_sparse.txt\" )\n\t\n\t# some example pair data to use\n\t# data = \"/Users/admin/Desktop/Project/files/outputs/cora/dumped.txt\"\n\t# data = \"/Users/admin/Desktop/Project/code/src_v2/random_select.txt\"\n\n\t# define a list of ordered dict to save the results in excel\n\tfinal_results = []\n\tlines = []\n\ttotal_pairs = 0\n\n\t# open the data file and repeat the process for each pair\n\tf = open(pair_data, \"r\")\n\tlines = f.readlines()\n\tlines = [l.strip().split() for l in lines]\n\tf.close()\n\n\t# if file exists, resume \n\tresume_pos = 0\n\n\tif(os.path.isfile(out_file)):\n\t\tdf = pd.read_csv(out_file)\n\t\tresume_pos = df.shape[0]\n\t\tprint(resume_pos)\n\t\t\n\tfor l in lines:\n\t\tnode_a = int(l[1])\n\t\tnode_b = int(l[2])\n\t\ttemp = OrderedDict()\n\t\ttemp[\"node_a\"] = node_a\n\t\ttemp[\"node_b\"] = node_b\n\t\ttemp[\"distance\"] = int(l[0])\n\t\ttotal_pairs+=1\n\n\t\tif(total_pairs <= resume_pos):\n\t\t\tcontinue\n\n\t\t# define file names for persistence diagrams\n\n\t\tdgm1_file = \"/Users/admin/Desktop/Project/files/outputs/cora/n_\" + str(hop) + \"/dipha_\" + str(node_a)\n\t\tdgm2_file = \"/Users/admin/Desktop/Project/files/outputs/cora/n_\" + str(hop) + \"/dipha_\" + str(node_b)\n\t\tdgmCombine_file = \"/Users/admin/Desktop/Project/files/outputs/cora/n_\" + str(hop) + \"/dipha_\" + str(node_a) + \"_\" + str(node_b)\n\t\tdgmComplete_file = \"/Users/admin/Desktop/Project/files/outputs/cora/n_\" + str(hop) + \"/dipha_complete_\" + str(node_a) + \"_\" + str(node_b)\n\n\t\t# obtain persistence diagrams for node_a, node_b and combined\n\n\t\tif(not os.path.isfile(dgm1_file)):\n\t\t\tos.system(\"python3 /Users/admin/Desktop/Project/code/src_v2/get_persDiag.py \" + data_file + \" \" + str(node_a) + \" \" + str(hop))\n\t\tif(not os.path.isfile(dgm2_file)):\n\t\t\tos.system(\"python3 /Users/admin/Desktop/Project/code/src_v2/get_persDiag.py \" + data_file + \" \" + str(node_b) + \" \" + str(hop))\n\n\t\tos.system(\"python3 /Users/admin/Desktop/Project/code/src_v2/get_persDiag.py \" + data_file + \" \" + str(node_a) + \" \" + str(node_b) + \" \" + str(hop))\n\n\t\t# get persistence diagram for complete graph\n\t\tin_file = \"/Users/admin/Desktop/Project/files/outputs/cora/n_\" + str(hop) + \"/apsp_complete_full_\" + str(node_a) + \"_\" + str(node_b)\n\t\tf = open(in_file, \"rb\")\n\t\tdata = f.read()\n\t\tnum_processors = struct.unpack(' 3):\n\t\t\tnum_processors = 3\n\t\tcommand = \"mpiexec -n \" + str(num_processors) + \" dipha --upper_dim 2 \" + in_file + \" \" + dgmComplete_file\n\t\tos.system(command)\t\t\t\n\n\t\t# compare pairwise diagrams\n\t\tprocess_a_b = Popen([\"/Users/admin/Desktop/Project/code/src_v2/baseline\", data_file, \"/Users/admin/Desktop/Project/code/src_v2/test.txt\", str(node_a), str(node_b)], stdout=PIPE)\n\t\tprocess_a_0 = Popen([\"python3\", \"/Users/admin/Desktop/Project/code/src_v2/compare_diagram.py\", dgmCombine_file, dgm1_file, str(2),str(0)], stdout=PIPE)\n\t\tprocess_a_1 = Popen([\"python3\", \"/Users/admin/Desktop/Project/code/src_v2/compare_diagram.py\", dgmCombine_file, dgm1_file, str(2),str(1)], stdout=PIPE)\n\t\tprocess_b_0 = Popen([\"python3\", \"/Users/admin/Desktop/Project/code/src_v2/compare_diagram.py\", dgmCombine_file, dgm2_file, str(2),str(0)], stdout=PIPE)\n\t\tprocess_b_1 = Popen([\"python3\", \"/Users/admin/Desktop/Project/code/src_v2/compare_diagram.py\", dgmCombine_file, dgm2_file, str(2),str(1)], stdout=PIPE)\n\t\tprocess_complete_0 = Popen([\"python3\", \"/Users/admin/Desktop/Project/code/src_v2/compare_diagram.py\", dgmCombine_file, dgmComplete_file, str(2),str(0)], stdout=PIPE)\n\t\tprocess_complete_1 = Popen([\"python3\", \"/Users/admin/Desktop/Project/code/src_v2/compare_diagram.py\", dgmCombine_file, dgmComplete_file, str(2),str(1)], stdout=PIPE)\n\t\t\n\t\tprint(node_a, node_b)\n\t\t(output_a_b,err) = process_a_b.communicate()\n\t\toutput_a_b = output_a_b.strip().splitlines()\n\n\t\t(output_a_0,err) = process_a_0.communicate()\n\t\toutput_a_0 = output_a_0.strip().splitlines()\n\n\t\t(output_a_1,err) = process_a_1.communicate()\n\t\toutput_a_1 = output_a_1.strip().splitlines()\n\n\t\t(output_b_0,err) = process_b_0.communicate()\n\t\toutput_b_0 = output_b_0.strip().splitlines()\n\n\t\t(output_b_1,err) = process_b_1.communicate()\n\t\toutput_b_1 = output_b_1.strip().splitlines()\n\n\t\t(output_complete_0, err) = process_complete_0.communicate()\n\t\toutput_complete_0 = output_complete_0.strip().split()\n\n\t\t(output_complete_1, err) = process_complete_1.communicate()\n\t\toutput_complete_1 = output_complete_1.strip().split()\n\n\t\t# create a temporary dictionary\n\t\t\t\n\t\ttemp[\"W_a_0\"] = float(output_a_0[0])\n\t\ttemp[\"B_a_0\"] = float(output_a_0[1])\n\n\t\ttemp[\"W_b_0\"] = float(output_b_0[0])\n\t\ttemp[\"B_b_0\"] = float(output_b_0[1])\n\n\t\ttemp[\"W_a_1\"] = float(output_a_1[0])\n\t\ttemp[\"B_a_1\"] = float(output_a_1[1])\n\n\t\ttemp[\"W_b_1\"] = float(output_b_1[0])\n\t\ttemp[\"B_b_1\"] = float(output_b_1[1])\n\n\t\ttemp[\"W_comp_0\"] = float(output_complete_0[0])\n\t\ttemp[\"B_comp_0\"] = float(output_complete_0[1])\n\n\t\ttemp[\"W_comp_1\"] = float(output_complete_1[0])\n\t\ttemp[\"B_comp_1\"] = float(output_complete_1[1])\n\t\t\n\t\ttemp[\"AA\"] = float(output_a_b[0])\n\t\ttemp[\"MW\"] = float(output_a_b[1])\n\n\t\tfinal_results.append(temp)\n\n\t\tif(total_pairs%100 == 0):\n\t\t\tprint(total_pairs)\n\t\t\tappendCSV(final_results, ',', out_file)\n\t\t\tdel final_results[:]\n\n\tif(len(final_results) > 0):\n\t\tappendCSV(final_results, ',', out_file)\n\tsorted_result = pd.read_csv(out_file)\n\tsorted_result.sort_values(by=['distance'], inplace = True, ascending = True)\n\tsorted_result.to_csv(out_file, sep = ',')\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"src_old/src_v2/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"151438057","text":"def evennumbers(a):\r\n #a = int(input(\"enter value of a : \"))\r\n #for a in range(1,a+1):\r\n if (a%2==0):\r\n return a\r\n#evennumbers()\r\na = int(input(\"Enter the number : \"))\r\nfor i in range(1,a+1):\r\n print(evennumbers(i),end=\" \")\r\n# function with return needs to write print statement\r\n#function without return can be called wihtou a print statement.\r\n\"\"\"\r\ndef evennumbers():\r\n a = int(input(\"enter the number: \"))\r\n for a in range(1,a+1):\r\n if (a%2==0):\r\n print(\"Even numbers are : \",a)\r\n\r\nevennumbers()\r\n\r\n#HW\r\n#write a function to print fibanaci series\r\n\r\n# take a string from user\r\n#strng as habeeb,\r\n#ask user which letter he want to find,print how many times the letter is repeated.\r\n\r\n#1HW\r\n\r\ndef fibanaci_series(a):\r\n a=\r\n\"\"\"\r\n","sub_path":"even.py","file_name":"even.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"390693862","text":"import json\nimport sqlite3\nfrom werkzeug.exceptions import HTTPException\nimport atexit\nfrom flask import Flask, request, Response, render_template, jsonify\nAPP = Flask(__name__)\nis_crashed = False\n\n##################################################################\n# START/END FUNCTIONS #\n#################################################################\n\ndef startup():\n \"\"\"\n Function to present the index page\n \"\"\"\n json = {}\n results =[]\n temp = {}\n temp[[0][0]] = {}\n temp[[0][0]][\"title\"] = \"Welcome to Selfieless Acts\"\n results.append(temp[[0][0]])\n json[\"index\"] = results\n return results\n\ndef shutdown():\n \"\"\"\n Function to ensure the database is closed during app exit\n \"\"\"\n global conn\n\n #close database\n conn.commit()\n conn.close()\n\n##################################################################\n# DATABASE FUNCTIONS #\n#################################################################\n\ndef category_create(category_name, acts):\n \"\"\"\n This function creates a category\n\n :param category_name: category name\n :type category_name: str\n :param acts: no. of acts\n :type acts: int\n \"\"\"\n global DBB_CONN\n global DBB_CUR\n\n try:\n cursor.execute('PRAGMA foreign_keys = ON')\n cursor.execute(\n \"\"\"INSERT INTO categories (category_name, acts)\n VALUES (?, ?)\"\"\",\n (category_name, acts)\n )\n conn.commit()\n return True\n except:\n return False\n\ndef act_create(act_id, username, timestamp, caption, category, image, upvotes):\n \"\"\"\n This function creates an act.\n\n :param act_id: Act ID\n :type act_id: int\n :param username: User name\n :type user_name: str\n :param timestamp: Time and date of the act\n :type timestamp: str\n :param caption: Caption for the act\n :type caption: str\n :param category: Category of the act\n :type catgory: str\n :param image: Base64 string of the image binary\n :type catgory: str\n :param upvotes: Upvotes received on the act\n :type upvotes: int\n \"\"\"\n global conn\n global cursor\n\n try:\n cursor.execute('PRAGMA foreign_keys = ON')\n cursor.execute(\n \"\"\"INSERT INTO acts (act_id, user_name, timestamp, caption, category, imgB64, upvotes)\n VALUES (?, ?, ?, ?, ?, ?, ?)\"\"\",\n (act_id, username, timestamp, caption, category, image, upvotes)\n )\n conn.commit()\n #Update the number of acts in the category table\n cursor.execute(\n \"UPDATE categories SET acts = acts + 1 WHERE category_name=?\",\n (category,)\n )\n conn.commit()\n return True\n except:\n return False\n\ndef act_exists(act_id):\n \"\"\"\n This function checks if an act exists\n\n :param act_id: Act ID\n :type act_id: int\n \"\"\"\n global cursor\n\n cursor.execute(\n \"SELECT EXISTS (SELECT 1 FROM acts WHERE act_id=?);\",\n (act_id,)\n )\n exists= cursor.fetchone()[0]\n if exists == 1:\n return True\n else:\n return False\n\ndef act_upvote(act_id):\n \"\"\"\n This function adds an upvote to the act\n\n :param act_id: Act ID\n :type user_id: int\n \"\"\"\n global conn\n global cursor\n\n try:\n cursor.execute(\n \"UPDATE acts SET upvotes = upvotes + 1 WHERE act_id=?\",\n (act_id,)\n )\n conn.commit()\n return True\n except:\n return False\n\ndef category_remove(category_name):\n \"\"\"\n This function removes a category\n\n :param category_name: category name\n :type category_name: str\n \"\"\"\n global conn\n global cursor\n\n try:\n cursor.execute(\n \"DELETE FROM categories WHERE category_name=?\",\n (category_name,)\n )\n conn.commit()\n #check whether an user was removed\n if cursor.rowcount > 0:\n return True\n return False\n except:\n return False\n\ndef act_remove(act_id):\n \"\"\"\n This function removes an act\n\n :param act_id: Act ID\n :type act_id: int\n \"\"\"\n global conn\n global cursor\n\n try:\n cursor.execute(\n \"DELETE FROM acts WHERE act_id=?\",\n (act_id,)\n )\n conn.commit()\n #check whether an user was removed\n if cursor.rowcount > 0:\n return True\n return False\n except:\n return False\n\ndef category_get(category_name):\n \"\"\"\n This function retrieves all properties of a category\n\n :param category_name: Category name\n :type category_name: str\n \"\"\"\n global cursor\n\n #execute database query\n if category_name:\n cursor.execute(\n \"SELECT * FROM categories WHERE category_name=?;\",\n (category_name,)\n )\n else:\n cursor.execute(\"SELECT * FROM categories;\")\n\n json = {}\n results =[]\n temp = {}\n\n for row in cursor:\n results.append({\n row[0]: row[1]\n })\n return results\n\ndef acts_get(act_id):\n \"\"\"\n This function retrieves all properties of an act\n\n :param act_id: Act ID\n :type act_id: int\n \"\"\"\n global cursor\n\n\n if act_id:\n cursor.execute(\n \"SELECT * FROM acts WHERE act_id=?;\",\n (act_id,)\n )\n else:\n cursor.execute(\"SELECT * FROM acts;\")\n\n json = {}\n results =[]\n temp = {}\n for row in cursor:\n temp[row[0]] = {}\n temp[row[0]][\"act_id\"] = row[0]\n temp[row[0]][\"user_name\"] = row[1]\n temp[row[0]][\"timestamp\"] = row[2]\n temp[row[0]][\"caption\"] = row[3]\n temp[row[0]][\"category\"] = row[4]\n temp[row[0]][\"imgB64\"] = row[5]\n temp[row[0]][\"upvotes\"] = row[6]\n results.append(temp[row[0]])\n json[\"results\"] = results\n return results\n\ndef category_acts_count(category_name):\n \"\"\"\n This function retrieves the number of acts in a category\n\n :param category_name: Category name\n :type category_name: str\n \"\"\"\n global cursor\n\n try:\n cursor.execute(\n \"SELECT count(*) FROM acts WHERE category=?;\",\n (category_name,)\n )\n except:\n return -1\n\n results =[]\n temp = {}\n for row in cursor:\n temp[row[0]] = row[0]\n results.append(temp[row[0]])\n return results\n\ndef category_exists(category_name):\n \"\"\"\n This function checks if a category exists\n\n :param category_name: Category name\n :type category_name: str\n \"\"\"\n global cursor\n\n cursor.execute(\n \"SELECT EXISTS (SELECT 1 FROM categories WHERE category_name=?);\",\n (category_name,)\n )\n exists= cursor.fetchone()[0]\n if exists == 1:\n return True\n else:\n return False\n\ndef category_acts_get(category_name):\n \"\"\"\n This function retrieves all the acts under a category\n\n :param category_name: Category name\n :type category_name: str\n \"\"\"\n global cursor\n\n cursor.execute(\n \"SELECT count(*) FROM acts WHERE category=?;\",\n (category_name,)\n )\n count= cursor.fetchone()[0]\n if (count > 100):\n return False\n else:\n cursor.execute(\n \"SELECT * FROM acts WHERE category=?;\",\n (category_name,)\n )\n\n json = {}\n results =[]\n temp = {}\n for row in cursor:\n temp[row[0]] = {}\n temp[row[0]][\"act_id\"] = row[0]\n temp[row[0]][\"user_name\"] = row[1]\n temp[row[0]][\"timestamp\"] = row[2]\n temp[row[0]][\"caption\"] = row[3]\n temp[row[0]][\"category\"] = row[4]\n temp[row[0]][\"imgB64\"] = row[5]\n temp[row[0]][\"upvotes\"] = row[6]\n results.append(temp[row[0]])\n json[\"results\"] = results\n return results\n\ndef acts_get_count_range(category_name, startRange, endRange):\n \"\"\"\n This function retrieves the acts under a category within a given range\n\n :param category_name: Category name\n :type category_name: str\n :param startRange: Starting range\n :type startRange: int\n :param endRange: Ending range\n :type endRange: int\n \"\"\"\n global cursor\n try:\n cursor.execute(\n \"SELECT * FROM acts WHERE category=? AND ROWID > ? AND ROWID <= ?;\",\n (category_name, startRange, endRange,)\n )\n except:\n return -1\n\n json = {}\n results =[]\n temp = {}\n for row in cursor:\n temp[row[0]] = {}\n temp[row[0]][\"act_id\"] = row[0]\n temp[row[0]][\"user_name\"] = row[1]\n temp[row[0]][\"timestamp\"] = row[2]\n temp[row[0]][\"caption\"] = row[3]\n temp[row[0]][\"category\"] = row[4]\n temp[row[0]][\"imgB64\"] = row[5]\n temp[row[0]][\"upvotes\"] = row[6]\n results.append(temp[row[0]])\n json[\"results\"] = results\n return results\n\n\n##################################################################\n# FLASK FUNCTIONS #\n#################################################################\n\n@APP.route(\"/\")\ndef index():\n \"\"\"\n This function presents the main page.\n \"\"\"\n return jsonify(startup())\n\n@APP.route(\"/api/v1/categories\", methods=[\"GET\"])\ndef api_list_categories():\n \"\"\"\n This function lists all categories\n \"\"\"\n categories = category_get(0)\n if len(categories) == 0:\n #We process a 200 error and display it as 204 since\n #204 is NOT an error and will not contain any body\n return jsonify(error=204), 200\n else:\n return jsonify(categories)\n\n@APP.route(\"/api/v1/categories\", methods=[\"POST\"])\ndef api_create_category():\n \"\"\"\n This function adds a category\n \"\"\"\n if category_create(\n request.form[\"categoryName\"], 0\n ):\n return jsonify(success=201), 201\n return jsonify(error=400), 400\n\n@APP.route(\"/api/v1/categories/\", methods=[\"DELETE\"])\ndef api_delete_category(category_name):\n \"\"\"\n This function deletes a particular category.\n\n :param category_name: category name\n :type category_name: str\n \"\"\"\n result = category_remove(category_name)\n if result:\n return jsonify(success=200), 200\n else:\n return jsonify(error=400), 400\n\n@APP.route(\"/api/v1/categories//acts\", methods=[\"GET\"])\ndef api_list_acts_category(category_name):\n \"\"\"\n This function lists all acts for a given category\n :param category_name: category name\n :type category_name: str\n \"\"\"\n exists = category_exists(category_name)\n if not exists:\n return jsonify(error=400), 400\n else:\n acts = category_acts_get(category_name)\n if len(acts) == 0:\n #We process a 200 error and display it as 204 since\n #204 is NOT an error and will not contain any body\n return jsonify(error=204), 200\n else:\n return jsonify(acts)\n\n@APP.route(\"/api/v1/categories//acts/size\", methods=[\"GET\"])\ndef api_category_acts_count(category_name):\n \"\"\"\n This function lists the number of acts for a given category\n \"\"\"\n exists = category_exists(category_name)\n if not exists:\n return jsonify(error=400), 400\n else:\n acts = category_acts_count(category_name)\n if len(acts) == 0:\n #We process a 200 error and display it as 204 since\n #204 is NOT an error and will not contain any body\n return jsonify(error=204), 200\n elif len(acts) < 0:\n return jsonify(error=400), 400\n else:\n return jsonify(acts)\n\n#TODO - This API is not working\n@APP.route(\"/api/v1/categories//acts\", methods=[\"GET\"])\ndef api_range_acts(category_name):\n \"\"\"\n This function lists all acts for a given category in a given range\n \"\"\"\n startRange = request.args.get('start', None)\n endRange = request.args.get('end', None)\n acts = acts_get_count_range(category_name, startRange, endRange)\n if len(acts) == 0:\n #We process a 200 error and display it as 204 since\n #204 is NOT an error and will not contain any body\n return jsonify(error=204), 200\n elif len(acts) < 0:\n return jsonify(error=400), 400\n else:\n return jsonify(acts)\n\n@APP.route(\"/api/v1/acts/upvote\", methods=[\"POST\"])\ndef api_act_upvote():\n \"\"\"\n This function upvotes a specific act\n \"\"\"\n exists = act_exists(request.form[\"act_id\"])\n if not exists:\n return jsonify(error=400), 400\n else:\n if act_upvote(\n request.form[\"act_id\"]\n ):\n return jsonify(error=200), 200\n return jsonify(error=400), 400\n\n@APP.route(\"/api/v1/acts/\", methods=[\"DELETE\"])\ndef api_delete_act(act_id):\n \"\"\"\n This function deletes a particular act.\n\n :param act_id: Act ID\n :type act_id: int\n \"\"\"\n result = act_remove(act_id)\n if result:\n return jsonify(success=200), 200\n else:\n return jsonify(error=400), 400\n\n@APP.route(\"/api/v1/acts\", methods=[\"POST\"])\ndef api_create_act():\n \"\"\"\n This function creates an act\n \"\"\"\n #At the time of act creation, the upvotes is 0\n upvotes = 0\n result = act_create(\n request.form[\"actId\"], request.form[\"username\"],\n request.form[\"timestamp\"], request.form[\"caption\"],\n request.form[\"categoryName\"], request.form[\"imgB64\"],upvotes\n )\n if result:\n return jsonify(success=200), 200\n else:\n return jsonify(error=400), 400\n\n@APP.errorhandler(HTTPException)\ndef handle_error(e):\n try:\n if e.code < 400:\n return flask.Response.force_type(e, flask.request.environ)\n elif e.code == 404:\n return jsonify(error=404), 404\n raise e\n except:\n return jsonify(error=405), 500\n\n@APP.route(\"/api/v1/_health\", methods=[\"GET\"])\ndef api_act_health():\n \"\"\"\n This function returns health of container\n \"\"\"\n if is_crashed:\n return jsonify(error=500), 500\n else:\n return jsonify(success=200), 200\n\n@APP.route(\"/api/v1/_crash\", methods=[\"POST\"])\ndef api_act_crash():\n \"\"\"\n This function returns health of container\n \"\"\"\n if is_crashed:\n return jsonify(error=500), 500\n else:\n global is_crashed\n is_crashed = True\n return jsonify(success=200), 200\n\n##################################################################\n# MAIN FUNCTION #\n#################################################################\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8081)\n global conn\n global cursor\n # 'At exit' calls functions when a program is closing down\n # Here, the shutdown function is called on program exit\n atexit.register(shutdown)\n #Start the database\n #check_same_thread is set to False to allow the connection to run on multiple threads\n conn = sqlite3.connect(\"selfieless.db\", check_same_thread=False)\n cursor = conn.cursor()\n APP.run(debug=False)\n","sub_path":"acts/acts.py","file_name":"acts.py","file_ext":"py","file_size_in_byte":15593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"277001855","text":"#-*- coding:utf-8 -*-\n\n'''\n\t· 제목 : 장대양봉\n\t· 매매 시점 : 다음날 종가\n\t· 목표 기간 : 1주일\n\t· 목표 수익률 : 5%\n\t· 목표 승률 : 80%\n\t· 알고리즘 상세\n\t1) 장대양봉 찾기 : 시가 > 종가(5%이상, 저가=시가 고가=종가)\n\t2) 하락신호가 오기 전까지 들고 있기\n'''\n\nimport pandas as pd\nimport datetime\n\nfrom os import listdir\nfrom os.path import isfile, join\n\ntargetPeriod = 20\ntargetProfitRate = 1.05\n\nstockDirectory = 'data/2017-10-30/'\nstockFiles = (f for f in listdir(stockDirectory) if isfile(join(stockDirectory, f)))\n\nportfolio = {'000660', '035720'}\n\nfor fileName in stockFiles:\n # if(portfolio.__contains__(fileName[:fileName.index('_')])):\n stockDF = pd.read_csv(stockDirectory + fileName).sort(['datetime'])\n\n successCnt = 0\n failCnt = 0\n totalProfit = 0\n\n fromStartDate = datetime.datetime.strptime('2010-01-01', \"%Y-%m-%d\").date()\n\n\n for i in range(1, len(stockDF)):\n if(datetime.datetime.strptime(stockDF[i:i + 1]['datetime'].values[0], \"%Y-%m-%d\").date() >= fromStartDate\n and int(stockDF[i:i+1]['close']) == int(stockDF[i:i+1]['high'])\n and int(stockDF[i:i+1]['open']) == int(stockDF[i:i+1]['low'])\n and int(stockDF[i:i+1]['open']) < int(stockDF[i-1:i]['close'] * (1+0.05))\n ):\n profit = 0\n buyPrice = int(stockDF[i:i + 1]['close'])\n buyDate = str(stockDF[i:i + 1]['datetime'])\n targetPrice = round(buyPrice * targetProfitRate, -2)\n # print('매수가 : ', buyPrice, ' 매수일자 : ', buyDate, '목표가 : ', targetPrice)\n\n for j in range(i+1, i+1+targetPeriod):\n # print(stockDF[j:j+1]['datetime'].values[0])\n # print(stockDF[j:j + 1]['high'].values[0])\n highPrice = int(stockDF[j:j+1]['high'])\n if(targetPrice < highPrice):\n sellDate = str(stockDF[j:j+1]['datetime'])\n profit = targetPrice-buyPrice\n totalProfit += profit\n print('(성공) 매도가 : ', targetPrice, ' 매도일자 : ', sellDate, ' 수익 : ', profit, ' 누적 수익 : ', totalProfit)\n successCnt += 1\n break\n if(totalProfit < 0.95):\n profit = int(stockDF[j:j + 1]['close']) - buyPrice\n totalProfit += profit\n print('(실패) 매도가 : ', int(stockDF[j:j + 1]['close']), ' 매도일자 : ', stockDF[j:j + 1]['datetime'], ' 손실 : ', profit,' 누적 수익 : ', totalProfit)\n failCnt += 1\n\n\n print(fileName.replace('.csv', ''), ' : ' '성공 : ', successCnt, ' 실패 : ', failCnt,' 누적 수익 : ', totalProfit)","sub_path":"AlgorithmTrading/A2.py","file_name":"A2.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"338407525","text":"# Configuration\nimport os\nimport re\nimport pandas as pd\nfrom lxml import etree\nfrom tqdm import tqdm\nimport xml_custom as xml\n\npaper_folder = '../data/articles/wellcome_articles/'\n# paper_folder = os.path.abspath(paper_folder) + '/'\n\n# Extract reviews\npaper_ID = []\nreview_ID = []\nreview = []\nfor doc in tqdm(os.listdir(paper_folder)):\n if not doc.endswith('.xml'):\n continue\n fullname = os.path.join(paper_folder, doc)\n if os.stat(fullname).st_size != 0:\n tree = etree.parse(fullname)\n # Select article DOI\n ID = tree.findall(\"//article-meta/article-id[@pub-id-type='doi']\")\n # Select reviews\n rev_ID = tree.findall(\"//sub-article/front-stub/article-id[@pub-id-type='doi']\")\n rev_text = tree.findall(\"//sub-article[@article-type='ref-report']/body\")\n for i, _ in enumerate(rev_text):\n paper_ID.append(re.sub(r'\\t+', '', xml.get_text_content(ID[0])))\n review_ID.append(re.sub(r'\\t+', '', xml.get_text_content(rev_ID[i])))\n review.append(re.sub(r'\\t+', '', xml.get_text_content(rev_text[i])))\n\n# Put reviews in a dataframe\ndf = pd.DataFrame({'manuscript_ID': paper_ID,\n 'review_ID': review_ID,\n 'review': review})\n\n# Save the dataframe\ndf.to_csv('../data/reviews/wellcome_reviews.csv')\n","sub_path":"peertax/extract_reviews_from_xml_wellcome.py","file_name":"extract_reviews_from_xml_wellcome.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"269345657","text":"\"\"\"\nRuntime: 144 ms, faster than 26.04% of Python online submissions for Search a 2D Matrix II.\nMemory Usage: 19.5 MB, less than 11.70% of Python online submissions for Search a 2D Matrix II.\n\"\"\"\n\nclass Solution(object):\n def searchMatrix(self, matrix, target):\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n j = -1\n for row in matrix:\n while j + len(row) >= 0 and row[j] > target:\n j -= 1\n if j + len(row) >= 0 and row[j] == target:\n return True\n return False\n\n\"\"\"\ncommon solution\nBy range check and 1D binary search\n\"\"\"\nclass Solution(object):\n def searchMatrix(self, matrix, target):\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n M = len(matrix)\n N = len(matrix[0])\n \n def binary_search(row):\n l, r = 0, N-1\n while l<=r:\n mid = (l+r) // 2\n if target < matrix[row][mid]:\n r = mid - 1\n elif target > matrix[row][mid]:\n l = mid + 1\n else: \n return True\n \n for row in range(M):\n if binary_search(row):\n return True\n return False","sub_path":"Problems/Binary Search/显式/2D/240_Search_a_2D_Matrix_II.py","file_name":"240_Search_a_2D_Matrix_II.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"284936968","text":"\n## @package SegmentCreator\n# RichRecSegment creator options for RICH Reconstruction\n# @author Chris Jones (Christopher.Rob.Jones@cern.ch)\n# @date 15/08/2008\n\n__version__ = \"$Id: SegmentCreator.py,v 1.5 2009-09-16 13:37:28 jonrob Exp $\"\n__author__ = \"Chris Jones \"\n\nfrom RichKernel.Configuration import *\nfrom Configurables import ( Rich__Rec__SegmentCreator )\n\n# ----------------------------------------------------------------------------------\n\n## @class RichSegmentCreatorConf\n# RichRecSegment creator options for RICH Reconstruction\n# @author Chris Jones (Christopher.Rob.Jones@cern.ch)\n# @date 15/08/2008\nclass RichSegmentCreatorConf(RichConfigurableUser):\n\n ## Steering options\n __slots__ = {\n \"Context\" : \"Offline\" # The context within which to run\n ,\"EnergyBins\" : None\n ,\"OutputLevel\" : INFO # The output level to set all algorithms and tools to use\n }\n\n ## Initialize \n def initialize(self):\n \n ## Default number of energy bins for each radiator\n self.setRichDefaults( \"EnergyBins\", { \"Offline\" : [ 5, 5, 5 ],\n \"HLT\" : [ 5, 5, 5 ] } )\n\n ## @brief Apply the configuration\n def applyConf(self):\n \n context = self.getProp(\"Context\")\n\n nickname = \"RichSegmentCreator\"\n \n segCreator = Rich__Rec__SegmentCreator( \"ToolSvc.\"+context+\"_\"+nickname )\n\n segCreator.EnergyBins = self.getProp(\"EnergyBins\")\n\n if self.isPropertySet(\"OutputLevel\") :\n segCreator.OutputLevel = self.getProp(\"OutputLevel\")\n","sub_path":"Rec/Rich/RichRecSys/python/RichRecSys/SegmentCreator.py","file_name":"SegmentCreator.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"118433740","text":"import math, stock\nclass AmPut:\n def __init__(self, r, sigma, T, S0, K, Nstep):\n self.r = r\n self.sigma = sigma\n self.T = T\n self.S0 = S0\n self.K = K\n self.Nstep = Nstep\n self.pathtree = []\n self.optiontree = []\n self.delta_t = T/Nstep\n self.u = math.exp(sigma * math.sqrt(self.delta_t))\n self.d = math.exp(-sigma * math.sqrt(self.delta_t))\n self.p = (math.exp(r * self.delta_t) - self.d) / (self.u - self.d)\n self.discount = math.exp(-r * self.delta_t)\n self.stock_tree = stock.Stock_binary_tree(self.r, self.sigma, self.T, self.S0, self.K, self.Nstep)\n self.option_tree = stock.Stock_binary_tree(self.r, self.sigma, self.T, self.S0, self.K, self.Nstep)\n \n\n def generate(self):\n self.stock_tree.generate()\n self.option_tree.generate()\n\n def evaluate_layer(self, layer):#if Nstep = 4 then layer = 0 1 2 3\n if layer == len(self.option_tree.paths) - 1:\n for i in range(0, layer + 1):\n self.option_tree.paths[layer][i] = max(0, self.K - self.option_tree.paths[layer][i])\n else:\n for i in range(0, layer + 1):\n holding_before_discount = self.p * self.option_tree.paths[layer + 1][i] + (1 - self.p) * self.option_tree.paths[layer + 1][i + 1]\n holding_after_discount = holding_before_discount * self.discount\n exercising = max(0, self.K - self.option_tree.paths[layer][i])\n self.option_tree.paths[layer][i] = max(holding_after_discount, exercising)\n\n\n def evaluate(self):\n for i in range(self.Nstep):\n self.evaluate_layer(self.Nstep - i - 1)\n\n def get_price(self):\n self.generate()\n self.evaluate()\n return self.option_tree.paths[0][0]\n \n \n\n\n \n\n\n \n\n\n\n \n","sub_path":"Binary_Tree.py","file_name":"Binary_Tree.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"451205321","text":"\"\"\"\n--- Day 17: Reservoir Research ---\n\nYou arrive in the year 18. If it weren't for the coat you got in 1018, you \nwould be very cold: the North Pole base hasn't even been constructed.\n\nRather, it hasn't been constructed yet. The Elves are making a little progress, \nbut there's not a lot of liquid water in this climate, so they're getting very \ndehydrated. Maybe there's more underground?\n\nYou scan a two-dimensional vertical slice of the ground nearby and discover \nthat it is mostly sand with veins of clay. The scan only provides data with a \ngranularity of square meters, but it should be good enough to determine how \nmuch water is trapped there. In the scan, x represents the distance to the \nright, and y represents the distance down. There is also a spring of water near \nthe surface at x=500, y=0. The scan identifies which square meters are clay \n(your puzzle input).\n\nFor example, suppose your scan shows the following veins of clay:\n\nx=495, y=2..7\ny=7, x=495..501\nx=501, y=3..7\nx=498, y=2..4\nx=506, y=1..2\nx=498, y=10..13\nx=504, y=10..13\ny=13, x=498..504\n\nRendering clay as #, sand as ., and the water spring as +, and with x \nincreasing to the right and y increasing downward, this becomes:\n\n 44444455555555\n 99999900000000\n 45678901234567\n 0 ......+.......\n 1 ............#.\n 2 .#..#.......#.\n 3 .#..#..#......\n 4 .#..#..#......\n 5 .#.....#......\n 6 .#.....#......\n 7 .#######......\n 8 ..............\n 9 ..............\n10 ....#.....#...\n11 ....#.....#...\n12 ....#.....#...\n13 ....#######...\n\nThe spring of water will produce water forever. Water can move through sand, \nbut is blocked by clay. Water always moves down when possible, and spreads to \nthe left and right otherwise, filling space that has clay on both sides and \nfalling out otherwise.\n\nFor example, if five squares of water are created, they will flow downward \nuntil they reach the clay and settle there. Water that has come to rest is \nshown here as ~, while sand through which water has passed (but which is now \ndry again) is shown as |:\n\n......+.......\n......|.....#.\n.#..#.|.....#.\n.#..#.|#......\n.#..#.|#......\n.#....|#......\n.#~~~~~#......\n.#######......\n..............\n..............\n....#.....#...\n....#.....#...\n....#.....#...\n....#######...\n\nTwo squares of water can't occupy the same location. If another five squares of \nwater are created, they will settle on the first five, filling the clay \nreservoir a little more:\n\n......+.......\n......|.....#.\n.#..#.|.....#.\n.#..#.|#......\n.#..#.|#......\n.#~~~~~#......\n.#~~~~~#......\n.#######......\n..............\n..............\n....#.....#...\n....#.....#...\n....#.....#...\n....#######...\n\nWater pressure does not apply in this scenario. If another four squares of \nwater are created, they will stay on the right side of the barrier, and no \nwater will reach the left side:\n\n......+.......\n......|.....#.\n.#..#.|.....#.\n.#..#~~#......\n.#..#~~#......\n.#~~~~~#......\n.#~~~~~#......\n.#######......\n..............\n..............\n....#.....#...\n....#.....#...\n....#.....#...\n....#######...\n\nAt this point, the top reservoir overflows. While water can reach the tiles \nabove the surface of the water, it cannot settle there, and so the next five \nsquares of water settle like this:\n\n......+.......\n......|.....#.\n.#..#||||...#.\n.#..#~~#|.....\n.#..#~~#|.....\n.#~~~~~#|.....\n.#~~~~~#|.....\n.#######|.....\n........|.....\n........|.....\n....#...|.#...\n....#...|.#...\n....#~~~~~#...\n....#######...\n\nNote especially the leftmost |: the new squares of water can reach this tile, \nbut cannot stop there. Instead, eventually, they all fall to the right and \nsettle in the reservoir below.\n\nAfter 10 more squares of water, the bottom reservoir is also full:\n\n......+.......\n......|.....#.\n.#..#||||...#.\n.#..#~~#|.....\n.#..#~~#|.....\n.#~~~~~#|.....\n.#~~~~~#|.....\n.#######|.....\n........|.....\n........|.....\n....#~~~~~#...\n....#~~~~~#...\n....#~~~~~#...\n....#######...\n\nFinally, while there is nowhere left for the water to settle, it can reach a \nfew more tiles before overflowing beyond the bottom of the scanned data:\n\n......+....... (line not counted: above minimum y value)\n......|.....#.\n.#..#||||...#.\n.#..#~~#|.....\n.#..#~~#|.....\n.#~~~~~#|.....\n.#~~~~~#|.....\n.#######|.....\n........|.....\n...|||||||||..\n...|#~~~~~#|..\n...|#~~~~~#|..\n...|#~~~~~#|..\n...|#######|..\n...|.......|.. (line not counted: below maximum y value)\n...|.......|.. (line not counted: below maximum y value)\n...|.......|.. (line not counted: below maximum y value)\n\nHow many tiles can be reached by the water? To prevent counting forever, ignore \ntiles with a y coordinate smaller than the smallest y coordinate in your scan \ndata or larger than the largest one. Any x coordinate is valid. In this \nexample, the lowest y coordinate given is 1, and the highest is 13, causing the \nwater spring (in row 0) and the water falling off the bottom of the render (in \nrows 14 through infinity) to be ignored.\n\nSo, in the example above, counting both water at rest (~) and other sand tiles \nthe water can hypothetically reach (|), the total number of tiles the water can \nreach is 57.\n\nHow many tiles can the water reach within the range of y values in your scan?\n\"\"\"\nfrom typing import List, Tuple\n\n\ndef create_grid(scan: List[str]) -> List[List[str]]:\n\n coord = []\n for line in scan:\n first, second = line.split(',')\n first_axis, first_coord = first.split('=')\n second_axis, second_range = second.split('=')\n start, end = map(int, second_range.split('..'))\n if first_axis == 'y':\n y = int(first_coord)\n for x in range(start, end + 1):\n coord.append((x, y))\n elif first_axis == 'x':\n x = int(first_coord)\n for y in range(start, end + 1):\n coord.append((x, y))\n\n min_y = 0\n max_y = max(y for x, y in coord)\n min_x = min(x for x, y in coord)\n max_x = max(x for x, y in coord)\n\n grid = [['.'] * (max_x - min_x + 3) for _ in range(max_y - min_y + 1)]\n\n grid[0][500 - min_x + 1] = '+'\n\n for x, y in coord:\n grid[y][x - min_x + 1] = '#'\n\n return grid\n\n\ndef fill_water(grid: List[List[str]]) -> None:\n\n x, y = 0, 0\n\n for i in range(len(grid) - 1):\n for j in range(len(grid[i])):\n if ((grid[i][j] == '+' and grid[i + 1][j] == '.') or\n (grid[i][j] == '|' and grid[i + 1][j] == '.')):\n x, y = j, i + 1\n break\n\n if x == y == 0:\n return\n\n while 0 < y < len(grid):\n\n while y < len(grid) and grid[y][x] == '.':\n grid[y][x] = '|'\n y += 1\n\n left = right = x\n while (0 <= y < len(grid) and left >= 0\n and grid[y][left] in '#~' and grid[y - 1][left] in '.|'):\n left -= 1\n\n while (0 <= y < len(grid) and right < len(grid[y])\n and grid[y][right] in '#~' and grid[y - 1][right] in '.|'):\n right += 1\n\n if grid[y - 1][left] == '#' and grid[y - 1][right] == '#':\n for i in range(left + 1, right):\n grid[y - 1][i] = '~'\n y -= 1\n elif grid[y - 1][left] == '#':\n for i in range(left + 1, right + 1):\n grid[y - 1][i] = '|'\n return\n elif grid[y - 1][right] == '#':\n for i in range(left, right):\n grid[y - 1][i] = '|'\n return\n else:\n for i in range(left, right + 1):\n grid[y - 1][i] = '|'\n return\n\n\ndef count_tiles(grid: List[List[str]]) -> int:\n\n prev_count = 0\n while True:\n fill_water(grid)\n curr_count = sum(sum(x.count(c) for x in grid) for c in '~|')\n if curr_count == prev_count:\n break\n prev_count = curr_count\n\n first_clay_line = next(i for i, x in enumerate(grid) if '#' in x)\n ignore_tiles = sum(x.count('|') for x in grid[:first_clay_line])\n\n return curr_count - ignore_tiles\n\n\ndef test_create_grid():\n\n example = [\n \"x=495, y=2..7\",\n \"y=7, x=495..501\",\n \"x=501, y=3..7\",\n \"x=498, y=2..4\",\n \"x=506, y=1..2\",\n \"x=498, y=10..13\",\n \"x=504, y=10..13\",\n \"y=13, x=498..504\"\n ]\n\n initial_grid = [\n \"......+.......\",\n \"............#.\",\n \".#..#.......#.\",\n \".#..#..#......\",\n \".#..#..#......\",\n \".#.....#......\",\n \".#.....#......\",\n \".#######......\",\n \"..............\",\n \"..............\",\n \"....#.....#...\",\n \"....#.....#...\",\n \"....#.....#...\",\n \"....#######...\"\n ]\n\n grid = create_grid(example)\n\n assert initial_grid == [''.join(row) for row in grid]\n\n\ndef test_fill_water():\n\n grid = [\n \"......+.......\",\n \"............#.\",\n \".#..#.......#.\",\n \".#..#..#......\",\n \".#..#..#......\",\n \".#.....#......\",\n \".#.....#......\",\n \".#######......\",\n \"..............\",\n \"..............\",\n \"....#.....#...\",\n \"....#.....#...\",\n \"....#.....#...\",\n \"....#######...\"\n ]\n grid = [list(row) for row in grid]\n\n assert count_tiles(grid) == 57\n\n\nif __name__ == \"__main__\":\n\n test_create_grid()\n test_fill_water()\n print(\"all tests passed.\")\n\n grid = create_grid([line.strip() for line in open(\"input.txt\")])\n answer = count_tiles(grid)\n print(\"answer:\", answer)\n","sub_path":"day17/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":9388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"457989385","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm\nfrom django.contrib.auth import get_user_model\nfrom .utils import SendActivationEmailMixin\n\nclass UserCreationForm(SendActivationEmailMixin, BaseUserCreationForm):\n tags = ['Home', 'Work', 'Meetings']\n \n class Meta:\n model = get_user_model()\n fields = {'email', 'username'}\n\n def save(self, request):\n email = self.cleaned_data['email']\n \n user = super().save(commit=False)\n user.is_active = False\n user.save()\n self.save_m2m()\n \n for name in self.tags:\n tag = user.tag_set.create(name=name)\n\n self.send_mail(request, None, email)\n \n return user\n \nclass EmailResendForm(SendActivationEmailMixin, forms.Form):\n email = forms.EmailField()\n\n def save(self, request):\n email = self.cleaned_data['email']\n UserModel = get_user_model()\n try:\n user = UserModel._default_manager.get(email=email)\n except:\n user = None\n\n if user:\n self.send_mail(request, None, email)\n\n return user\n \n","sub_path":"user/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"615026573","text":"# Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for perfkitbenchmarker.providers.gcp.gcp_dpb_dataproc.\"\"\"\n\nimport unittest\nfrom absl import flags\nimport mock\n\nfrom perfkitbenchmarker import vm_util\nfrom perfkitbenchmarker.providers.gcp import gcp_dpb_dataproc\nfrom perfkitbenchmarker.providers.gcp import gcs\nfrom tests import pkb_common_test_case\n\nTEST_RUN_URI = 'fakeru'\nGCP_ZONE_US_CENTRAL1_A = 'us-central1-a'\nBUCKET_NAME = 'foo'\n\nFLAGS = flags.FLAGS\n\n\nclass LocalGcpDpbDataproc(gcp_dpb_dataproc.GcpDpbDataproc):\n\n def __init__(self):\n self.dpb_service_zone = FLAGS.dpb_service_zone\n self.region = self.dpb_service_zone.rsplit('-', 1)[0]\n self.storage_service = gcs.GoogleCloudStorageService()\n self.storage_service.PrepareService(location=self.region)\n\n\nclass GcpDpbDataprocTestCase(pkb_common_test_case.PkbCommonTestCase):\n\n def setUp(self):\n super(GcpDpbDataprocTestCase, self).setUp()\n FLAGS.run_uri = TEST_RUN_URI\n FLAGS.dpb_service_zone = GCP_ZONE_US_CENTRAL1_A\n FLAGS.zones = [GCP_ZONE_US_CENTRAL1_A]\n\n def testCreateBucket(self):\n local_dataproc = LocalGcpDpbDataproc()\n with mock.patch(\n vm_util.__name__ + '.IssueCommand',\n return_value=('out_', 'err_', 0)) as mock_issue:\n local_dataproc.CreateBucket(BUCKET_NAME)\n self.assertEqual(mock_issue.call_count, 1)\n call_arg_list, _ = mock_issue.call_args\n self.assertListEqual([\n 'gsutil', 'mb', '-l',\n GCP_ZONE_US_CENTRAL1_A.rsplit('-', 1)[0], '-c', 'regional',\n 'gs://{}'.format(BUCKET_NAME)\n ], call_arg_list[0])\n\n def testDeleteBucket(self):\n local_dataproc = LocalGcpDpbDataproc()\n with mock.patch(\n vm_util.__name__ + '.IssueCommand',\n return_value=('out_', 'err_', 0)) as mock_issue:\n local_dataproc.DeleteBucket(BUCKET_NAME)\n self.assertEqual(mock_issue.call_count, 2)\n call_arg_list, _ = mock_issue.call_args\n self.assertListEqual(['gsutil', 'rb', 'gs://{}'.format(BUCKET_NAME)],\n call_arg_list[0])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/providers/gcp/gcp_dpb_dataproc_test.py","file_name":"gcp_dpb_dataproc_test.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"492565452","text":"from __future__ import print_function\nimport os\nfrom glob import glob\nimport numpy as np\nfrom astrometry.util.file import read_file\nfrom legacypipe.survey import LegacySurveyData\n\ndef delete_scaled_images(name, old_bricks, new_bricks):\n from map.views import get_layer\n layer = get_layer(name)\n modlayer = get_layer(name + '-model')\n print('Got layer:', layer)\n print('Got model layer:', modlayer)\n for scale in range(1, 8):\n if scale >= len(old_bricks):\n print('No old bricks for scale', scale)\n break\n sbricks = set()\n delfiles = set()\n scale_bricks = layer.get_bricks_for_scale(scale)\n for b in new_bricks:\n SB = layer.bricks_touching_radec_box(b.ra1, b.ra2, b.dec1, b.dec2,\n scale=scale, bricks=scale_bricks)\n #band = 'r'\n #bwcs = layer.get_scaled_wcs(b, band, scale-1)\n #SB = layer.bricks_touching_aa_wcs(bwcs, scale=scale)\n for sb in SB.brickname:\n sbricks.add(sb)\n for sb in SB:\n for band in ['g','r','z']:\n fn = layer.get_scaled_filename(sb, band, scale)\n delfiles.add(fn)\n fn = modlayer.get_scaled_filename(sb, band, scale)\n delfiles.add(fn)\n\n print(len(sbricks), 'scaled bricks at scale', scale, 'are updated')\n print('Deleting', len(delfiles), 'scaled files (if they exist!)')\n ndel = 0\n for fn in delfiles:\n if os.path.exists(fn):\n #print(' Removing', fn)\n os.remove(fn)\n ndel += 1\n print('Actually deleted', ndel, 'scaled files at scale', scale)\n allbricks = layer.get_bricks_for_scale(scale)\n I_touched = np.array([i for i,b in enumerate(allbricks.brickname)\n if b in sbricks])\n new_bricks = allbricks[I_touched]\n\n\ndef main():\n\n # indir = '/global/cscratch1/sd/dstn/dr8test-1'\n # name = 'dr8-test1'\n # pretty = 'DR8 test1'\n\n # indir = '/scratch1/scratchdirs/desiproc/dr8test002/'\n # name = 'dr8-test2'\n # pretty = 'DR8 test2 (outliers)'\n\n # indir = '/scratch1/scratchdirs/desiproc/dr8test003/'\n # name = 'dr8-test3'\n # pretty = 'DR8 test3 (outliers)'\n # \n # indir = '/scratch1/scratchdirs/desiproc/dr8test004/'\n # name = 'dr8-test4'\n # pretty = 'DR8 test4 (large-galaxies)'\n\n # indir = '/global/cscratch1/sd/dstn/dr8test005/'\n # name = 'dr8-test5'\n # pretty = 'DR8 test5 (trident)'\n\n # indir = '/global/cscratch1/sd/dstn/dr8test006/'\n # name = 'dr8-test6'\n # pretty = 'DR8 test6 (sky)'\n\n # indir = '/global/cscratch1/sd/dstn/dr8test007/'\n # name = 'dr8-test7'\n # pretty = 'DR8 test7 (outliers)'\n\n #indir = '/global/cscratch1/sd/dstn/dr8test14/'\n #name = 'dr8-test14'\n #pretty = 'DR8 test14 (rc)'\n\n #indir = '/global/project/projectdirs/cosmo/work/legacysurvey/dr8a/'\n #name = 'dr8a'\n #pretty = 'DR8a (rc)'\n\n # indir = '/global/project/projectdirs/cosmo/work/legacysurvey/dr8b/runbrick-decam/'\n # name = 'dr8b-decam'\n # pretty = 'DR8b DECam'\n # survey_dir = '/global/project/projectdirs/cosmo/work/legacysurvey/dr8b/runbrick-decam'\n\n # indir = '/global/project/projectdirs/cosmo/work/legacysurvey/dr8b/runbrick-90prime-mosaic/'\n # name = 'dr8b-90p-mos'\n # pretty = 'DR8b BASS+MzLS'\n # survey_dir = '/global/project/projectdirs/cosmo/work/legacysurvey/dr8b/runbrick-90prime-mosaic'\n\n rsync = False\n #indir = '/global/project/projectdirs/cosmo/work/legacysurvey/dr8c/90prime-mosaic/'\n #name = 'dr8c-90p-mos'\n #pretty = 'DR8c BASS+MzLS'\n survey_dir = '/global/cscratch1/sd/landriau/dr8'\n # ln -s /global/project/projectdirs/cosmo/work/legacysurvey/dr8b/runbrick-decam/coadds-only/coadd/ .\n\n #indir = '/scratch1/scratchdirs/desiproc/dr8/decam/'\n indir = 'data/dr8c-decam'\n name = 'dr8c-decam'\n pretty = 'DR8c DECam'\n #rsync = True\n\n indir = 'data/dr8i-decam'\n name = 'dr8i-decam'\n pretty = 'DR8i DECam'\n\n indir = 'data/dr8i-90p-mos'\n name = 'dr8i-90p-mos'\n pretty = 'DR8i MzLS+BASS'\n\n\n\n sublayers = ['', '-model', '-resid']\n subpretty = {'':' images', '-model':' models', '-resid':' residuals'}\n \n # #indir = '/global/cscratch1/sd/ziyaoz/dr9c/'\n # #indir = '/global/cscratch1/sd/dstn/dr9c-fpack/'\n # #rsync = True\n # indir = 'data/dr9c'\n # name = 'dr9c'\n # pretty = 'DR9c'\n # survey_dir = indir\n # \n # indir = '/global/cscratch1/sd/ziyaoz/dr9d-south/'\n # #rsync = True\n # name = 'dr9d-south'\n # pretty = 'DR9d south'\n # survey_dir = indir\n\n \n # indir = '/global/cscratch1/sd/ziyaoz/dr9d-north/'\n # #rsync = True\n # name = 'dr9d-north'\n # pretty = 'DR9d north'\n # survey_dir = indir\n\n # code runs:\n # rsync -LRarv /global/cscratch1/sd/ziyaoz/dr9d-south//./{coadd/*/*/*-{image-,model-,ccds}*.fits*,tractor} data/dr9d-south\n # add my image-coadds:\n # rsync -LRarv /global/cscratch1/sd/dstn/dr9d-coadds/./coadd/*/*/*-{image-,ccds}*.fits* data/dr9d-south\n \n # survey_dir = '/global/cscratch1/sd/desiproc/dr7'\n\n # sublayers = ['']\n # subpretty = {'':' images'}\n \n #survey_dir = '/global/cscratch1/sd/dstn/dr8-depthcut'\n #survey_dir = '/global/project/projectdirs/cosmo/work/legacysurvey/dr8a/'\n\n rsync = True\n #survey_dir = '/global/cfs/cdirs/cosmo/work/legacysurv\n\n side = 'north'\n #side = 'south'\n\n survey_dir = '/global/cscratch1/sd/ziyaoz/dr9e4/%s' % side\n indir = survey_dir\n name = 'dr9sv-%s' % side\n pretty = 'DR9-SV %s' % side\n\n rsync = False\n survey_dir = '/global/cfs/cdirs/cosmo/work/legacysurvey/dr9'\n indir = '/global/cscratch1/sd/dstn/fornax'\n name = 'fornax'\n pretty = 'Fornax'\n\n rsync = True\n survey_dir = '/global/cfs/cdirs/cosmo/work/legacysurvey/dr9'\n indir = '/global/cscratch1/sd/dstn/dr9.2'\n name = 'dr9-test-9.2'\n pretty = 'DR9.2 test'\n\n survey_dir = '/global/cscratch1/sd/ziyaoz/dr9j/south'\n indir = survey_dir\n name = 'dr9j-south'\n pretty = 'DR9j south'\n\n rsync = False\n if False:\n #indir = '/global/cscratch1/sd/ziyaoz/dr9m/north/'\n indir = '/global/cfs/cdirs/cosmo/work/legacysurvey/dr9m/north'\n name = 'dr9m-north'\n pretty = 'DR9m-north'\n survey_dir = '/global/cfs/cdirs/cosmo/work/legacysurvey/dr9m'\n if True:\n #indir = '/global/cscratch1/sd/ziyaoz/dr9m/south/'\n indir = '/global/cfs/cdirs/cosmo/work/legacysurvey/dr9m/south'\n #name = 'dr9m-south'\n name = 'ls-dr9-south'\n pretty = 'DR9m-south'\n survey_dir = '/global/cfs/cdirs/cosmo/work/legacysurvey/dr9m'\n\n #update = True\n update = False\n queue = True\n\n # rsync = True\n # update = False\n # queue = False\n # indir = '/global/cscratch1/sd/dstn/m33-2/south/'\n # name = 'dr9-m33'\n # pretty = 'DR9m-M33'\n # survey_dir = '/global/cfs/cdirs/cosmo/work/legacysurvey/dr9m'\n \n \n datadir = 'data'\n\n survey = LegacySurveyData(survey_dir=survey_dir)\n allbricks = survey.get_bricks_readonly()\n basedir = os.path.join(datadir, name)\n\n from astrometry.util.fits import fits_table\n \n if update:\n old_bricks_dir = None\n for i in range(100):\n old_bricks_dir = os.path.join(basedir, 'old-bricks-%i' % i)\n if os.path.exists(old_bricks_dir):\n #print('exists:', old_bricks_dir)\n continue\n os.makedirs(old_bricks_dir)\n print('Created', old_bricks_dir)\n break\n if old_bricks_dir is None:\n sys.exit(-1)\n\n old_bricks = []\n for scale in range(8):\n if scale == 0:\n fn = 'survey-bricks.fits.gz'\n else:\n fn = 'survey-bricks-%i.fits.gz' % scale\n pathfn = os.path.join(basedir, fn)\n if os.path.exists(pathfn):\n T = fits_table(pathfn)\n print('Read', len(T), 'old bricks from', pathfn)\n old_bricks.append(T)\n #### os.rename(pathfn, os.path.join(old_bricks_dir, fn))\n\n if rsync:\n for sub in ['image-g', 'image-r', 'image-z', 'model-g', 'model-r', 'model-z', 'ccds']:\n cmd = 'rsync -LRarv %s/./coadd/*/*/*-%s*.fits* %s/%s' % (indir, sub, datadir, name)\n print(cmd)\n os.system(cmd)\n\n cmd = 'rsync -LRarv %s/./tractor %s/%s' % (indir, datadir, name)\n print(cmd)\n os.system(cmd)\n \n # cmd = 'rsync -LRarv %s/./{coadd/*/*/*-{image-,model-,ccds}*.fits*,tractor} %s/%s' % (indir, datadir, name)\n # print(cmd)\n # os.system(cmd)\n\n # ...?\n cmd = 'rsync -Rarv %s/./{images,survey-ccds*.fits} %s/%s' % (survey_dir, datadir, name)\n print(cmd)\n os.system(cmd)\n else:\n # symlink\n if os.path.exists(basedir):\n print('Not symlinking', indir, 'to', basedir, ': already exists!')\n else:\n os.makedirs(basedir)\n for subdir in ['coadd', 'tractor']:\n os.symlink(os.path.join(indir, subdir), os.path.join(basedir, subdir), target_is_directory=True)\n for subdir in ['images', 'calib']:\n os.symlink(os.path.join(indir, subdir), os.path.join(basedir, subdir), target_is_directory=True)\n for pat in ['survey-ccds-*']:\n fns = glob(os.path.join(indir, pat))\n print('fns', fns)\n for fn in [os.path.basename(f) for f in fns]:\n print('symlink', os.path.join(indir, subdir), os.path.join(basedir, subdir))\n os.symlink(os.path.join(indir, fn), os.path.join(basedir, fn), target_is_directory=False)\n\n # Find new available bricks\n print('Searching for new extra-image files...')\n extraimagefns = glob(os.path.join(basedir, 'extra-images', 'coadd', '*', '*', '*-image-*.fits*'))\n print('Found', len(extraimagefns), 'extra images')\n\n # Update all bricks in extra-images...\n if update:\n brickset = set()\n\n # Read list of new bricks\n f = open('bricks.txt')\n for line in f.readlines():\n brickname = line.strip()\n brickset.add(brickname)\n\n # for fn in extraimagefns:\n # dirs = fn.split('/')\n # brickname = dirs[-2]\n # brickset.add(brickname)\n print(len(brickset), 'bricks found')\n I, = np.nonzero([b in brickset for b in allbricks.brickname])\n bricks = allbricks[I]\n delete_scaled_images(name, old_bricks, bricks)\n sys.exit(0)\n\n print('Searching for new coadd image files...')\n imagefns = glob(os.path.join(basedir, 'coadd', '*', '*', '*-image-*.fits*'))\n print('Image filenames:', len(imagefns), 'plus', len(extraimagefns), 'extras')\n imagefns += extraimagefns\n\n brickset = set()\n for fn in imagefns:\n dirs = fn.split('/')\n brickname = dirs[-2]\n brickset.add(brickname)\n print(len(brickset), 'bricks found')\n I, = np.nonzero([b in brickset for b in allbricks.brickname])\n bricks = allbricks[I]\n\n brickfn = os.path.join(basedir, 'survey-bricks.fits.gz')\n bricks.writeto(brickfn)\n print('Wrote', brickfn)\n\n for x in sublayers:\n cmd = 'python3 -u render-tiles.py --kind %s%s --bricks' % (name, x)\n print(cmd)\n os.system(cmd)\n\n if update:\n # Find and remove existing scaled images touching new bricks.\n old = old_bricks[0]\n old_names = set([str(b) for b in old.brickname])\n I_new = np.array([i for i,b in enumerate(bricks.brickname)\n if not str(b) in old_names])\n # Newly added bricks\n new_bricks = bricks[I_new]\n print('Added', len(new_bricks), 'bricks')\n\n delete_scaled_images(name, old_bricks, new_bricks)\n\n fn = 'map/test_layers.py'\n txt = open(fn).read()\n for x in sublayers:\n txt = txt + '\\n' + 'test_layers.append((\"%s%s\", \"%s%s\"))\\n' % (name, x, pretty, subpretty[x])\n open(fn, 'wb').write(txt.encode())\n print('Wrote', fn)\n\n threads = 32\n tharg = '--threads %i ' % threads\n #tharg = ''\n\n if queue:\n\n # from map.views import get_layer\n # imglayer = get_layer(name)\n # modlayer = get_layer(name + '-model')\n\n ras = np.linspace(0, 360, 361)\n for scale in range(1,8):\n #for layer,layerobj in [(name,imglayer), (name+'-model',modlayer)]:\n for layer in [name, name+'-model']:\n for ralo,rahi in zip(ras, ras[1:]):\n cmd = 'python3 -u render-tiles.py --kind %s --scale --zoom %i --minra %f --maxra %f' % (layer, scale, ralo, rahi)\n print(cmd)\n return\n\n # images\n for scale in range(1,8):\n cmd = 'python3 -u render-tiles.py --kind %s --scale --zoom %i %s' % (name, scale, tharg)\n print(cmd)\n os.system(cmd)\n\n # models\n for scale in range(1,8):\n cmd = 'python3 -u render-tiles.py --kind %s-model --scale --zoom %i %s' % (name, scale, tharg)\n print(cmd)\n os.system(cmd)\n\n # resids\n for scale in range(1,8):\n cmd = 'python3 -u render-tiles.py --kind %s-resid --scale --zoom %i %s' % (name, scale, tharg)\n print(cmd)\n os.system(cmd)\n\n for x in sublayers:\n cmd = 'python3 -u render-tiles.py --kind %s%s --top' % (name, x)\n print(cmd)\n os.system(cmd)\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"load-layer.py","file_name":"load-layer.py","file_ext":"py","file_size_in_byte":13537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"462061411","text":"from appium import webdriver\nimport layout_tree as LayoutTree\nimport time\nfrom selenium.webdriver.common.keys import Keys\nfrom appium.webdriver.common.touch_action import TouchAction\n\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\n\n\nclass Explorer:\n def __init__(self):\n self.desiredCapabilities = {\n \"platformName\": \"Android\",\n \"deviceName\": \"Pixel_4_API_30\",\n \"newCommandTimeout\": 10000\n }\n\n d = webdriver.Remote('http://localhost:4723/wd/hub', self.desiredCapabilities)\n assert d is not None\n self.driver = d\n\n def check_text_exist_assertion(self, text, state_name_path):\n # self.extract_state(state_name_path)\n print(text)\n # print(EC.text_to_be_present_in_element((By.XPATH, \"/hierarchy/android.widget.FrameLayout\"), text))\n\n def extract_state(self, state_name_path):\n layout = LayoutTree.LayoutTree(self.driver, state_name_path)\n curr_state = layout.extract_state()\n for element in curr_state.nodes:\n exec_identifier = \"\"\n if element.interactable:\n if 'content-desc' in element.attributes.keys():\n element.add_data('content-desc', element.attributes['content-desc'])\n element.add_exec_identifier('accessibility-id', element.attributes['content-desc'])\n exec_identifier_val = element.attributes['content-desc']\n exec_identifier = 'accessibility-id'\n if 'id' in element.attributes.keys():\n element.add_data('id', element.attributes['id'])\n element.add_exec_identifier('id', element.attributes['id'])\n exec_identifier_val = element.attributes['id']\n exec_identifier = 'id'\n if 'resource-id' in element.attributes.keys():\n element.add_data('resource-id', element.attributes['resource-id'])\n element.add_exec_identifier('resource-id', element.attributes['resource-id'])\n exec_identifier_val = element.attributes['resource-id']\n exec_identifier = 'resource-id'\n\n if 'text' in element.attributes.keys():\n element.add_data('text', element.attributes['text'])\n\n\n if 'text' in element.attributes.keys():\n pass\n # element.ad\n # for interaction in element.interactions:\n # if interaction == \"click\":\n # curr_state.add_action(element.id, exec_identifier_val, interaction)\n\n return curr_state\n\n def execute_event(self, event):\n elem = None\n alreadyClicked = False\n print(event)\n if event.action == \"oracle-text_exists\" or event.action == \"oracle-widget_exists\":\n return\n if event.action == \"oracle-assert_equal\":\n text_input = event.text_input\n attribute = text_input.split(\" : \")[0]\n value = text_input.split(\" : \")[1]\n if event.action == \"back\":\n self.driver.back()\n else:\n if event.exec_id_type == \"accessibility-id\":\n time.sleep(6)\n print(event.exec_id_val)\n elem = self.driver.find_element_by_accessibility_id(event.exec_id_val)\n\n if event.exec_id_type == \"xPath\":\n time.sleep(3)\n elem = self.driver.find_element_by_xpath(event.exec_id_val[0])\n\n if event.exec_id_type == \"resource-id\":\n time.sleep(3)\n elem = self.driver.find_element_by_id(event.exec_id_val)\n\n if event.action == \"send_keys\":\n time.sleep(2)\n elem.click()\n alreadyClicked = True\n time.sleep(1)\n elem.send_keys(event.text_input)\n\n if event.action == \"send_keys_enter\":\n time.sleep(2)\n elem.click()\n alreadyClicked = True\n time.sleep(1)\n elem.send_keys(event.text_input)\n self.driver.press_keycode(66)\n\n if not alreadyClicked:\n elem.click()\n\n\n\n\nif __name__ == \"__main__\":\n explorer = Explorer()\n\n # explorer.driver.scroll(10,100)\n # handle_one_size = explorer.driver.get_window_size()\n # print(handle_one_size)\n # #scroll_down\n # TouchAction(explorer.driver).long_press(x=handle_one_size['height']/2, y=handle_one_size['height']-1, duration=1000).move_to(x=handle_one_size['height']/2, y=1).release().perform()\n # #scroll_up\n # TouchAction(explorer.driver).long_press(x=handle_one_size['height']/2, y=200, duration=1000).move_to(x=handle_one_size['height']/2, y=handle_one_size['height']-1).release().perform()\n elem = explorer.driver.find_element_by_id(\"com.contextlogic.wish:id/action_bar_item_icon\")\n elem.click()\n WebDriverWait(explorer.driver, 10).until(EC.presence_of_element_located((By.ID, \"com.contextlogic.wish:id/browse_button\")))\n WebDriverWait(explorer.driver, 10).until(EC.presence_of_element_located((By.ID, \"com.contextlogic.wish:id/action_bar_item_icon\")))","sub_path":"python-android/scroll_swipe_test.py","file_name":"scroll_swipe_test.py","file_ext":"py","file_size_in_byte":5288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"488103655","text":"#####################Notes\n## need to change\n## adl_name\n## storage_account_name\n## storage_account_key\n\n# coding: utf-8\n\n# In[91]:\n\n''' Add Web App site-packages to path so we can import them '''\nimport sys, os\n#sys.path.append('D:\\\\home\\\\site\\\\wwwroot\\\\env\\\\Lib\\\\site-packages')\n\nimport pandas as pd\nfrom azure.storage.blob import BlockBlobService\nfrom io import StringIO\nimport json\nimport numpy as np\nfrom ast import literal_eval\nfrom copy import deepcopy\nfrom pyspark import SparkConf\nfrom pyspark import SparkContext \n#from AttributeDescription import AttributeDescription\n#from Store import Store\n#from Inventory import Inventory\nsc = SparkContext()\n## adl Name\nadl_name=\"\"\n## adl link\nadl_loc=\"adl://\"+adl_name+\".azuredatalakestore.net/\"\n## recommended prices path\nprice_change_d_loc=adl_loc+\"medium_results/suggested_prices\"\n## function for convert format in recommended prices\ndef format_converting(p):\n ProductID,StoreID,DateTime,Price=p\n return([str(ProductID),int(str(StoreID)),str(DateTime),float(str(Price))])\n\nclass AttributeDescription:\n def __init__(self):\n '''\n Load parameter file and create AttributeDescription\n @param string input_filename: local CSV file containing parameters, including blob storage account credentials\n @param bool load_from_blob: if true, loads an existing hierarchy on blob storage; otherwise generates a new one\n '''\n \n number_of_stores = 6\n number_of_brands = 3\n number_of_departments = 20\n number_of_weeks = 2\n storage_account_name = \"\"\n storage_account_key = \"\"\n \n kwargs = {\"n_stores\":number_of_stores, \"n_brands\":number_of_brands, \"n_departments\":number_of_departments, \"start_date\":\"2016-10-02 00:00:00\", \"n_weeks\":number_of_weeks, \"blob_account_name\":storage_account_name, \"blob_account_key\":storage_account_key, \"blob_raw_data_container\":\"rawdata\", \"blob_public_parameters_container\":\"publicparameters\", \"shipment_frequency\":'1 days', \"shipment_size\":300, \"shelf_life\":'2 days', \"blob_private_parameters_container\":\"privateparameters\"\n }\n \n self.hierarchy = {}\n self.hierarchy['InitialDate'] = kwargs['start_date']\n self.hierarchy['InitialWeeksToSimulate'] = kwargs['n_weeks']\n self.hierarchy['BlobAccountName'] = kwargs['blob_account_name']\n self.hierarchy['BlobAccountKey'] = kwargs['blob_account_key']\n self.hierarchy['BlobRawDataContainer'] = kwargs['blob_raw_data_container']\n self.hierarchy['BlobPublicParametersContainer'] = kwargs['blob_public_parameters_container']\n self.hierarchy['BlobPrivateParametersContainer'] = kwargs['blob_private_parameters_container']\n \n \n \n load_from_blob = self.connect_to_blob_storage()\n if load_from_blob:\n self.load_hierarchy()\n return\n \n '''\n If not loading from blob, need to generate new attributes for stores, products, etc.\n The assumptions used in generating these may change in the future.\n '''\n self.hierarchy['Stores'] = []\n price_elasticities = np.random.uniform(-1.5,-0.5, size=kwargs['n_departments']).tolist()\n desirabilities = np.random.uniform(0.7, 1.3, size=kwargs['n_brands']).tolist()\n msrps_and_costs = np.matrix(np.random.multivariate_normal(mean=[20, 10],\n cov=[[25, 6.25], [6.25, 6.25]],\n size=kwargs['n_brands']*kwargs['n_departments']).round(2))\n msrps_and_costs[:, 1] = np.apply_along_axis(max, 1, msrps_and_costs[:, 1], 1.)\n msrps_and_costs[:, 0] = np.maximum(msrps_and_costs[:, 1]*1.1, msrps_and_costs[:, 0])\n msrps_and_costs = msrps_and_costs.tolist()\n \n for StoreID in range(1, kwargs['n_stores']+1):\n AvgHouseholdIncome, AvgTraffic, LossRate = np.random.multivariate_normal(mean = [5E4, 100, 10],\n cov = [[1E8, -1E4, 1E4],\n [-1E4, 100, 10],\n [1E4, 10, 4]]).tolist()\n store_dict = {}\n store_dict['StoreID'] = StoreID\n store_dict['AvgHouseholdIncome'] = AvgHouseholdIncome\n store_dict['AvgTraffic'] = AvgTraffic\n store_dict['Departments'] = []\n for DepartmentID in range(1, kwargs['n_departments']+1):\n department_dict = {}\n department_dict['DepartmentID'] = DepartmentID\n department_dict['PriceElasticity'] = price_elasticities[DepartmentID-1]\n department_dict['Brands'] = []\n for BrandID in range(1, kwargs['n_brands']+1):\n brand_dict = {}\n brand_dict['BrandID'] = BrandID\n brand_dict['Desirability'] = desirabilities[BrandID-1]\n brand_dict['Products'] = []\n \n ''' For the time being, only one product per brand-department combo '''\n MSRP, Cost = msrps_and_costs[(DepartmentID - 1)*kwargs['n_brands'] + BrandID - 1]\n product_dict = {}\n product_dict['ProductID'] = '{}_{}'.format(DepartmentID, BrandID)\n product_dict['Cost'] = round(Cost, 2)\n product_dict['MSRP'] = round(MSRP, 2)\n product_dict['LossRate'] = LossRate\n product_dict['ShipmentFreq'] = kwargs['shipment_frequency']\n product_dict['ShipmentSize'] = kwargs['shipment_size']\n product_dict['ShelfLife'] = kwargs['shelf_life']\n brand_dict['Products'].append(product_dict)\n \n department_dict['Brands'].append(brand_dict)\n store_dict['Departments'].append(department_dict)\n self.hierarchy['Stores'].append(store_dict)\n self.store_hierarchy() # writes the JSON file to blob storage for later access\n self.write_csv_attributes() # writes the same information in the original CSV form\n \n return\n\n\n def read_parameters_file(self, input_filename):\n ''' Read the CSV file listing parameters (created by hand or during CIQS) '''\n kwargs = {}\n try:\n input_file = open(input_filename, 'r')\n except IOError as e:\n raise Exception('Error opening parameter file {} for reading:\\n{}'.format(input_filename, e))\n for line in input_file:\n try:\n parameter_name, parameter_value = line.strip().split(',')\n kwargs[parameter_name] = literal_eval(parameter_value)\n except Exception as e:\n raise Exception('Error parsing line of parameter file:\\n{}\\n{}'.format(line, e))\n return(kwargs)\n\n\n def connect_to_blob_storage(self):\n ''' Connects to blob storage and creates new containers (doesn't overwrite if present) '''\n self.block_blob_service = BlockBlobService(account_name=self.hierarchy['BlobAccountName'],\n account_key=self.hierarchy['BlobAccountKey'])\n self.block_blob_service.create_container(self.hierarchy['BlobRawDataContainer'])\n self.block_blob_service.create_container(self.hierarchy['BlobPublicParametersContainer'])\n self.block_blob_service.create_container(self.hierarchy['BlobPrivateParametersContainer'])\n\n ''' Check if hierarchy.json file exists; if so, we can load from blob '''\n blob_list = [i.name for i in self.block_blob_service.list_blobs(self.hierarchy['BlobPrivateParametersContainer'])]\n if 'hierarchy.json' in blob_list:\n load_from_blob = True\n else:\n load_from_blob = False\n return load_from_blob\n\n \n def load_hierarchy(self):\n ''' Loads JSON-formatted hierarchy (from private container) '''\n hierarchy_string = self.block_blob_service.get_blob_to_text(self.hierarchy['BlobPrivateParametersContainer'],\n 'hierarchy.json').content\n self.hierarchy = json.loads(hierarchy_string)\n return\n\n \n def store_hierarchy(self):\n ''' Write JSON-formatted hierarchy to both public and private containers '''\n hierarchy_string = json.dumps(self.hierarchy, sort_keys=True, indent=4, separators=(',', ': '))\n self.block_blob_service.create_blob_from_text(container_name = self.hierarchy['BlobPrivateParametersContainer'],\n blob_name = 'hierarchy.json',\n text = hierarchy_string)\n \n ''' Hide info that shouldn't be shared before writing to public container '''\n reduced_hierarchy = deepcopy(self.hierarchy)\n for i, store in enumerate(reduced_hierarchy['Stores']):\n for j, department in enumerate(store['Departments']):\n del department['PriceElasticity']\n for k, brand in enumerate(department['Brands']):\n del brand['Desirability']\n for ell, product in enumerate(brand['Products']):\n del product['LossRate']\n brand['Products'][ell] = product\n department['Brands'][k] = brand\n store['Departments'][j] = department\n reduced_hierarchy['Stores'][i] = store\n hierarchy_string = json.dumps(reduced_hierarchy, sort_keys=True, indent=4, separators=(',', ': '))\n self.block_blob_service.create_blob_from_text(container_name = self.hierarchy['BlobPublicParametersContainer'],\n blob_name = 'hierarchy.json',\n text = hierarchy_string)\n return\n \n def write_csv_attributes(self):\n '''\n Under current assumptions about desirability, price elasticity, product offering, etc.\n constancy across stores and which features should remain hidden, writes:\n - stores.csv\n - brands.csv\n - departments.csv\n - products.csv\n This function will probably become obsolete if we decide to allow product offering,\n desirability, price elasticity, etc. to vary between stores.\n '''\n stores = []\n for store in self.hierarchy['Stores']:\n stores.append([store['StoreID'], store['AvgHouseholdIncome'], store['AvgTraffic']])\n store_df = pd.DataFrame(stores, columns=['StoreID', 'AvgHouseholdIncome', 'AvgTraffic'])\n self.block_blob_service.create_blob_from_text(container_name = self.hierarchy['BlobPublicParametersContainer'],\n blob_name = 'stores.csv',\n text = store_df.to_csv(index=False))\n \n ''' Currently, departments have the same properties across stores. We don't share price elasticity info. '''\n departments = []\n for department in self.hierarchy['Stores'][0]['Departments']:\n departments.append([department['DepartmentID']])\n department_df = pd.DataFrame(departments, columns=['DepartmentID'])\n self.block_blob_service.create_blob_from_text(container_name = self.hierarchy['BlobPublicParametersContainer'],\n blob_name = 'departments.csv',\n text = department_df.to_csv(index=False))\n \n ''' Currently, every brand offers a product in every department. We don't share desirability info. '''\n brands = []\n for brand in self.hierarchy['Stores'][0]['Departments'][0]['Brands']:\n brands.append([brand['BrandID']])\n brand_df = pd.DataFrame(brands, columns=['BrandID'])\n self.block_blob_service.create_blob_from_text(container_name = self.hierarchy['BlobPublicParametersContainer'],\n blob_name = 'brands.csv',\n text = brand_df.to_csv(index=False))\n \n products = []\n for department in self.hierarchy['Stores'][0]['Departments']:\n for brand in department['Brands']:\n product = brand['Products'][0]\n products.append([department['DepartmentID'], brand['BrandID'], product['ProductID'],\n product['MSRP'], product['Cost']])\n product_df = pd.DataFrame(products, columns=['DepartmentID', 'BrandID', 'ProductID', 'MSRP', 'Cost'])\n self.block_blob_service.create_blob_from_text(container_name = self.hierarchy['BlobPublicParametersContainer'],\n blob_name = 'products.csv',\n text = product_df.to_csv(index=False))\n return\n\n \n def get_product_features(self):\n ''' Loads product features in relational form '''\n features = []\n for store in self.hierarchy['Stores']:\n for department in store['Departments']:\n for brand in department['Brands']:\n for product in brand['Products']:\n features.append([store['StoreID'], store['AvgHouseholdIncome'], store['AvgTraffic'],\n department['DepartmentID'], department['PriceElasticity'],\n brand['BrandID'], brand['Desirability'],\n product['ProductID'], product['Cost'], product['MSRP'], product['LossRate'],\n product['ShelfLife'], product['ShipmentFreq'], product['ShipmentSize']])\n self.feature_df = pd.DataFrame(features, columns=['StoreID', 'AvgHouseholdIncome', 'AvgTraffic',\n 'DepartmentID', 'PriceElasticity', 'BrandID', 'Desirability',\n 'ProductID', 'Cost', 'MSRP', 'LossRate', 'ShelfLife',\n 'ShipmentFreq', 'ShipmentSize'])\n return\n \n \n def get_prices(self):\n ''' Load/generate prices for each product-store-date combination needed '''\n self.get_product_features() # get the MSRP and Cost for each product-store combination, among other things\n price_change_df = None\n try:\n # Try loading the suggested price changes and merging with the product features.\n # This will fail during the initial round because there will be no suggested prices yet.\n #price_change_string = self.block_blob_service.get_blob_to_text(self.hierarchy['BlobPublicParametersContainer'],\n # 'suggested_prices.csv')\n #price_change_string = price_change_string.content\n #price_change_df = pd.read_csv(StringIO(price_change_string), header=None,\n # names=['ProductID', 'StoreID', 'DateTime', 'Price'])\n price_change_df = sc.textFile(price_change_d_loc).map(lambda p: format_converting(p.split(','))).collect()\n price_change_df=pd.DataFrame(price_change_df, columns=['ProductID', 'StoreID', 'DateTime', 'Price'])\n price_change_df['DateTime'] = pd.to_datetime(price_change_df['DateTime'])\n if price_change_df.DateTime.min() != price_change_df.DateTime.max():\n raise Exception('Could not load suggested_prices.csv because prices are suggested ' +\n 'for multiple dates (expected only one date).')\n price_change_df = self.feature_df.merge(price_change_df, on=['ProductID', 'StoreID'], how='left')\n price_change_df['DateTime'] = price_change_df['DateTime'].max()\n except Exception:\n print('Could not load suggested_prices.csv -- assuming this is the initial round')\n\n if price_change_df is None:\n # We need to do a full outer join between price change dates and product features.\n # Create a dummy column called \"ones\" for this purpose, and remove it afterward.\n dates = [pd.to_datetime(self.hierarchy['InitialDate']) + pd.to_timedelta('{} days'.format(7*i)) for i in range(self.hierarchy['InitialWeeksToSimulate'])]\n price_change_df = pd.DataFrame(dates, columns=['DateTime'])\n price_change_df['ones'] = 1\n feature_df = self.feature_df.copy(deep=True)\n feature_df['ones'] = 1\n price_change_df = feature_df.merge(price_change_df, on='ones', how='outer')\n price_change_df.drop('ones', axis=1, inplace=True)\n price_change_df['Price'] = np.NaN # all prices must be randomly generated\n \n ''' Record which dates the U-SQL query will need to process '''\n #sales_start = price_change_df.DateTime.min() + pd.to_timedelta('1 days') # first sales summary\n #sales_end = price_change_df.DateTime.max() + pd.to_timedelta('7 days') # last sales summary\n #output_str = 'SalesStart,SalesEnd\\n{},{}\\n'.format(sales_start.strftime('%Y-%m-%d %H:%M:%S'),\n #sales_end.strftime('%Y-%m-%d %H:%M:%S'))\n #self.block_blob_service.create_blob_from_text(container_name = self.hierarchy['BlobPublicParametersContainer'],\n #blob_name = 'unprocessed_dates.csv',\n #text = output_str)\n\n ''' Randomly generate any unspecified prices '''\n price_change_df.Price = price_change_df.apply(self.choose_new_price, axis=1)\n \n ''' Store the prices and write to JSON files '''\n self.price_change_df = price_change_df[['ProductID', 'StoreID', 'DateTime', 'Price']]\n self.write_price_changes_to_blob()\n return\n\n\n def choose_new_price(self, row):\n ''' Pick a random price between a product's cost and MSRP '''\n if not np.isnan(row.Price):\n return(row.Price)\n mu = row.Cost + 0.8 * (row.MSRP - row.Cost)\n sd = (0.5 * (row.MSRP - row.Cost))**2\n result = np.random.normal(loc=mu, scale=sd)\n while (result > row.MSRP) or (result < row.Cost):\n result = np.random.normal(loc=mu, scale=sd)\n return(round(result, 2))\n \n\n def write_price_changes_to_blob(self):\n ''' Write one price change JSON file per store/date combination (after erasing the old ones) '''\n for blob in self.block_blob_service.list_blobs(self.hierarchy['BlobRawDataContainer']):\n if 'pc_' in blob.name:\n self.block_blob_service.delete_blob(self.hierarchy['BlobRawDataContainer'], blob.name)\n\n for record in self.price_change_df.groupby(['StoreID', 'DateTime']):\n blob_name = 'pc_store{}_{}.json'.format(record[0][0], record[0][1].strftime('%Y_%m_%d_%H_%M_%S'))\n\n ''' Create a dictionary of info to be encoded in JSON format '''\n price_change_dict = {}\n price_change_dict['StoreID'] = int(record[0][0])\n price_change_dict['PriceDate'] = str(record[0][1])\n entries = []\n for row in record[1].itertuples():\n entry_dict = {}\n entry_dict['ProductID'] = str(row.ProductID)\n entry_dict['Price'] = float(row.Price)\n entries.append(entry_dict)\n price_change_dict['PriceUpdates'] = entries\n\n output_str = json.dumps(price_change_dict, sort_keys=True, indent=4, separators=(',', ': '))\n self.block_blob_service.create_blob_from_text(container_name = self.hierarchy['BlobRawDataContainer'],\n blob_name = blob_name,\n text = output_str)\n return\n \n \n def get_demand(self):\n ''' Calculates demand values (currently based on a modified formula suggested by Yiyu) '''\n demand_df = self.price_change_df.merge(self.feature_df, on=['ProductID', 'StoreID'], how='left')\n demand_df['RelativePrice'] = demand_df['Price'] / demand_df.groupby('DepartmentID')['Price'].transform('mean')\n demand_df['FracDiscountOverMSRP'] = (demand_df.MSRP - demand_df.Price) / demand_df.MSRP\n demand_df['Demand'] = demand_df.AvgTraffic * demand_df.Desirability / (1 - demand_df.FracDiscountOverMSRP)\n demand_df.Demand += (demand_df.RelativePrice - 1) * demand_df.Price * demand_df.PriceElasticity\n demand_df.Demand /= demand_df.RelativePrice**2\n demand_df.Demand = demand_df.Demand.apply(lambda x: max(x, 5))\n\t\t#demand_df.Demand /= np.log(max(demand_df.Price, 2))**2\n #demand_df.Demand = 20 * demand_df.Demand.apply(lambda x: max(x, 0.01))\n demand_df = demand_df[['ProductID', 'StoreID', 'DateTime', 'Price', 'LossRate', 'ShelfLife', 'ShipmentFreq',\n 'ShipmentSize', 'Demand']]\n self.demand_df = demand_df\n return\n\n\n\n\n\n\n\n\nclass Inventory:\n ''' Helper class to report inventory/sales/losses '''\n def __init__(self, description, row):\n ''' Load last inventory record if possible; otherwise, create a new inventory '''\n self.block_blob_service = description.block_blob_service\n self.container = description.hierarchy['BlobRawDataContainer']\n self.store_id = row.StoreID\n self.product_id = row.ProductID\n self.price = row.Price\n self.shipment_size = row.ShipmentSize\n self.shipment_frequency = pd.to_timedelta(str(row.ShipmentFreq))\n self.shelf_life = pd.to_timedelta(str(row.ShelfLife))\n self.inventory = []\n self.arrivals = 0\n self.last_write_date = row.DateTime\n \n try:\n blob_name = 'inv_store{}_{}.json'.format(self.store_id,\n row.DateTime.strftime('%Y_%m_%d_%H_%M_%S'))\n inv_string = self.block_blob_service.get_blob_to_text(self.container, blob_name).content\n last_inventory = json.loads(inv_string)\n for product in last_inventory['Products']:\n if product['ProductID'] == self.product_id:\n break\n for batch in product['CurrentInventory']:\n self.inventory.append([pd.to_datetime(batch['ExpiryDateTime']), batch['Units']])\n self.start_date = self.inventory[0][0] - self.shelf_life # maintain shipment phase\n except Exception as e:\n print('Could not load last inventory record {}; creating a brand new inventory'.format(blob_name))\n self.start_date = row.DateTime\n self.arrivals = self.shipment_size\n self.inventory = [[self.start_date + self.shelf_life, self.shipment_size]]\n \n self.sales = 0\n self.losses = 0\n self.spoilages = 0\n return\n \n \n def update_price(self, price):\n ''' Used to update the price included in sales records '''\n self.price = price\n return\n \n \n def remove_unit(self, time, is_sale = True):\n ''' Checks whether a new sale/loss event is possible, and if so, updates inventory and sale records '''\n time = time.round(freq='1s')\n if len(self.inventory) > 0:\n if is_sale:\n self.sales += 1\n else:\n self.losses += 1\n if self.inventory[0][1] == 1:\n self.inventory.pop(0) # last item from this expiry date; remove its entry\n else:\n self.inventory[0][1] -= 1\n event_dict = {}\n if is_sale:\n ''' Create a JSONable description of a sale (no need for loss events) '''\n event_dict['TransactionDateTime'] = str(time)\n event_dict['ProductID'] = self.product_id\n event_dict['Units'] = 1\n event_dict['Price'] = round(float(self.price), 2)\n return event_dict\n else:\n return None # this return value indicates this item is sold out\n\n\n def end_of_day(self):\n ''' Receive shipments, remove expired products, write inventory record, and reset daily event tallies '''\n ''' Receive a shipment, if appropriate '''\n time_elapsed = pd.to_timedelta('1 days')\n current_write_date = self.last_write_date + time_elapsed\n if (current_write_date - self.start_date).days % self.shipment_frequency.days == 0:\n self.arrivals = self.shipment_size\n self.inventory.append([current_write_date + self.shelf_life, self.shipment_size])\n self.last_write_date = current_write_date\n \n ''' Remove any products now expired '''\n self.spoilages = sum([i[1] for i in self.inventory if i[0] <= self.last_write_date])\n self.inventory = [i for i in self.inventory if i[0] > self.last_write_date]\n \n ''' Write and reset sale/loss/arrival tallies '''\n inventory_summary = self.write_inventory_summary()\n self.arrivals = 0\n self.sales = 0\n self.losses = 0\n self.spoilages = 0\n return inventory_summary\n \n def write_inventory_summary(self):\n ''' Create a JSONable description of the inventory on this date '''\n inventory_dict = {}\n inventory_dict['ProductID'] = str(self.product_id)\n inventory_dict['Arrivals'] = int(self.arrivals)\n inventory_dict['Sales'] = int(self.sales)\n inventory_dict['Losses'] = int(self.losses)\n inventory_dict['Spoilages'] = int(self.spoilages)\n current_inventory = []\n for i in self.inventory:\n entry_dict = {}\n entry_dict['ExpiryDateTime'] = str(i[0])\n entry_dict['Units'] = int(i[1])\n current_inventory.append(entry_dict)\n inventory_dict['CurrentInventory'] = current_inventory\n return inventory_dict\n\n\n\n\n\n\n\n\n\nclass Store:\n def __init__(self, description, store_id):\n ''' Create a new store given the full simulation AttributeDescription and StoreID '''\n self.description = description\n self.container = description.hierarchy['BlobRawDataContainer']\n self.store_id = store_id\n \n '''\n Workday length and operating time is not currently a tunable parameter.\n Opening hours are hard-coded as 7 AM - 9 PM, seven days a week\n '''\n self.workday_length = 14./24\n self.opening_time = pd.to_timedelta('7 hours')\n self.closing_time = pd.to_timedelta('21 hours')\n self.todays_sales = []\n \n ''' Create an inventory for each product '''\n self.demand_df = description.demand_df.loc[description.demand_df.StoreID == store_id]\n self.product_ids = self.demand_df['ProductID'].unique()\n first_date = self.demand_df['DateTime'].min()\n self.inventories = {}\n for product_id in self.product_ids:\n row = self.demand_df.loc[(self.demand_df['ProductID'] == product_id) &\n (self.demand_df['DateTime'] == first_date)]\n self.inventories[product_id] = Inventory(description, row.iloc[0])\n \n ''' Now we can safely remove old sales/inv for this store from the rawdata container '''\n for blob in self.description.block_blob_service.list_blobs(self.container):\n if ('inv_store{}_'.format(store_id) in blob.name) or ('sales_store{}_'.format(store_id) in blob.name):\n self.description.block_blob_service.delete_blob(self.container, blob.name) \n return\n\n\n def run(self):\n ''' Iterate through the dates, generating sales and loss events '''\n conversion_factor = 7 * self.workday_length\n for StartDate, date_df in self.demand_df.groupby('DateTime', sort=True):\n ''' Find the sales and loss rates for each product in that week '''\n rates = []\n events = []\n for row in date_df.itertuples():\n self.inventories[row.ProductID].update_price(row.Price)\n rates.extend([row.Demand, row.LossRate])\n events.extend([[row.ProductID, True], [row.ProductID, False]])\n ''' Beta is the expectation of inverse time until another event occurs '''\n beta = conversion_factor / sum(rates)\n rate_ids = list(range(len(rates)))\n probabilities = [i / sum(rates) for i in rates]\n \n for i in range(7):\n my_start_date = StartDate + i * pd.to_timedelta('1 days') + self.opening_time\n workday_elapsed = 0.\n \n while True:\n ''' Choose a time elapsed until the next event '''\n workday_elapsed += np.random.exponential(scale=beta)\n if workday_elapsed > self.workday_length:\n break\n \n ''' Choose which event occurred at that time and attempt it '''\n event_id = np.random.choice(a=rate_ids, p=probabilities)\n product_id, is_sale = events[event_id]\n result = self.inventories[product_id].remove_unit(is_sale = is_sale,\n time = my_start_date + pd.to_timedelta(workday_elapsed, unit='d'))\n\n ''' If a sale was successfully attempted (the item was in stock), record the sale '''\n if (result is not None) and is_sale:\n self.todays_sales.append(result)\n self.end_of_day()\n return\n\n\n def poor_mans_zero_truncated_poisson(self, k):\n ''' Draw x>=1 from a Poisson distribution (to determine # of items in a transaction) '''\n result = np.random.poisson(k)\n while (result == 0):\n result = np.random.poisson(k)\n return(result)\n\n\n def group_sales_into_transaction(self, sale_list):\n ''' Group individual item sales records into a single receipt '''\n products = []\n subtotal = 0.\n for sale in sale_list:\n entry_dict = {}\n entry_dict['ProductID'] = sale['ProductID']\n entry_dict['Price'] = sale['Price']\n products.append(entry_dict)\n subtotal += sale['Units'] * sale['Price']\n transaction = {}\n transaction['TransactionDateTime'] = sale_list[-1]['TransactionDateTime'] \n transaction['Subtotal'] = round(subtotal, 2)\n transaction['Tax'] = round(subtotal * 0.07, 2)\n transaction['Total'] = round(subtotal + transaction['Tax'], 2)\n transaction['Products'] = products\n return(transaction)\n\n \n def end_of_day(self):\n ''' Write out sales transactions and inventory for the day '''\n ''' Begin by writing the inventory summary '''\n inventory_summaries = []\n for product_id in self.product_ids:\n inventory_summary = self.inventories[product_id].end_of_day()\n inventory_summaries.append(inventory_summary)\n write_date = self.inventories[self.product_ids[0]].last_write_date.strftime('%Y-%m-%d %H:%M:%S')\n write_date_file_format = self.inventories[self.product_ids[0]].last_write_date.strftime('%Y_%m_%d_%H_%M_%S')\n inventory_dict = {}\n inventory_dict['StoreID'] = int(self.store_id)\n inventory_dict['InventoryDateTime'] = write_date\n inventory_dict['Products'] = inventory_summaries\n inventory_blob_name = 'inv_store{}_{}.json'.format(self.store_id, write_date_file_format)\n inventory_blob_string = json.dumps(inventory_dict, sort_keys=True, indent=4, separators=(',', ': '))\n #self.description.block_blob_service.create_blob_from_text(self.container,\n # inventory_blob_name,\n # inventory_blob_string)\n\n ''' Now create the sales summary '''\n sales_dict = {}\n sales_dict['StoreID'] = int(self.store_id)\n sales_dict['SalesLogDateTime'] = write_date\n transactions = []\n idx = 0\n while(idx < len(self.todays_sales)):\n n = self.poor_mans_zero_truncated_poisson(2) # max num of items in next transaction\n transactions.append(self.group_sales_into_transaction(self.todays_sales[idx:min(idx+n,\n len(self.todays_sales))]))\n idx += n\n sales_dict['Transactions'] = transactions\n sales_blob_name = 'sales_store{}_{}.json'.format(self.store_id, write_date_file_format)\n sales_blob_string = json.dumps(sales_dict, sort_keys=True, indent=4, separators=(',', ': '))\n\n self.description.block_blob_service.create_blob_from_text(self.container,\n sales_blob_name,\n sales_blob_string)\n self.todays_sales = []\n return \n''' Local classes for this simulation '''\n\nif __name__ == '__main__':\n description = AttributeDescription()\n description.get_prices()\n description.get_demand()\n store_ids = description.demand_df['StoreID'].unique()\n ####################################output the processed_time_df.csv\n processed_time_df=pd.DataFrame([[description.demand_df.DateTime.min(),description.demand_df.DateTime.max()+pd.to_timedelta('7 days')]],columns=['startdate','enddate'])\n description.block_blob_service.create_blob_from_text(container_name = description.hierarchy['BlobPublicParametersContainer'],blob_name = 'processed_time_df.csv',text = processed_time_df.to_csv(index=False))\n ####################################output the processed_time_df.csv\n for store_id in store_ids:\n my_store = Store(description, store_id)\n my_store.run()\n\n","sub_path":"Manual Deployment Guide/Scripts/Data Simulator Job/RetailDataSimulator.py","file_name":"RetailDataSimulator.py","file_ext":"py","file_size_in_byte":34676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"444329112","text":"\nimport re\nimport string\ndef get_user_id():\n user_id=input(\"Введите id\")\n if len(user_id)!=16:\n return get_user_id()\n if (re.match(r\"\\d{1,16}\", user_id)):\n return user_id\n else:\n return get_user_id()\ndef get_bank():\n user_bank=input(\"Введите банк\")\n if user_bank[0] not in string.ascii_uppercase:\n return get_bank()\n if (re.match(r\"^\\w+[\\w+|\\d+]$\", user_bank)):\n return user_bank\n else:\n return get_bank()\ndef get_user_money():\n user_money=input(\"Введите суму кредита\")\n if (re.match(r\"^\\(\\d+\\.0\\sgrn\\)$\", user_money)):\n return user_money\n else:\n return get_user_money()","sub_path":"validators/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"248732697","text":"\"\"\"Test the vertical, expanded table formatter.\"\"\"\nfrom textwrap import dedent\n\nfrom mycli.output_formatter.expanded import expanded_table\nfrom mycli.encodingutils import text_type\n\n\ndef test_expanded_table_renders():\n results = [('hello', text_type(123)), ('world', text_type(456))]\n\n expected = dedent(\"\"\"\\\n ***************************[ 1. row ]***************************\n name | hello\n age | 123\n ***************************[ 2. row ]***************************\n name | world\n age | 456\n \"\"\")\n assert expected == expanded_table(results, ('name', 'age'))\n","sub_path":"test/test_expanded.py","file_name":"test_expanded.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"167365780","text":"#import graph_tool.all as gt\n#import matplotlib.pyplot as plt\nimport networkx as nx\nimport metis\n\n# Load the graph\nht = \"ilmastonmuutos\"\nG = nx.read_gml(ht + \"/\" + ht + \"_retweet_network_giant.gml\")\n\n#(edgecuts, parts) = metis.part_graph(G, 2, ptype=\"rb\", contig=True, objtype=\"cut\", ufactor=445, niter = 100)\n(edgecuts, parts) = metis.part_graph(G, 2, ptype=\"kway\", contig=True, objtype=\"cut\", niter = 100)\ncombined = dict(list(zip(G.nodes(), parts)))\nnx.set_node_attributes(G, combined, 'comx')\nnx.write_graphml(G, ht + \"_with_com_metis.graphml\")\n#G_int = nx.convert_node_labels_to_integers(G)\n\n#print(nx.info(G))\n#print(list(G.edges)[:10])\n\n#d = {}\n#with open(\"testaa.txt\") as f:\n# for line in f:\n# (key, val) = line.split()\n# d[int(key)] = val\n\n\n#print(d)\n\n#nx.set_node_attributes(G_int, d, \"section\")\n#nx.write_gml(G_int, \"final_test.gml\")\n\n#nx.write_gml(G_int, \"climate_int.gml\")\n#nx.write_edgelist(G_int, \"test.edgelist\", data=False)\n#G_int = nx.convert_node_labels_to_integers(G)\n#nx.write_gml(G_int, \"fixed.gml\")\n#nx.write_edgelist(G_int, \"fixed_edgelist.csv\", delimiter = \",\", data=False)\n#print(nx.info(G_int))\n#G = metis.example_networkx()\n\n# Compute the \"ground truth\"\n#state = gt.minimize_blockmodel_dl(g, B_min=2, B_max=2, deg_corr=True, verbose=True)\n#e = state.get_matrix()\n#print(e)\n#matshow(e.todense())\n#savefig(\"football-edge-counts.svg\")\n#state = state.copy(B=g.num_vertices()) NOT SURE WHAT THIS DOES\n\n#dS, nattempts, nmoves = state.mcmc_sweep(niter=1000)\n\n#print(\"Change in description length:\", dS)\n#print(\"Number of accepted vertex moves:\", nmoves)\n\n# Visualize and save the block model\n\n#state.draw()\n\n# Get the node membership\n#b = state.get_blocks()\n\n# New node property\n#community = g.new_vertex_property(\"int16_t\")\n\n#for v in g.vertices(): \n# community[v] = b[v]\n\n#g.vertex_properties[\"community\"] = community\n#print(community)\n#g.list_properties()\n#g.save(ht + \"_with_com.graphml\")","sub_path":"metis_wrapper_for_partition.py","file_name":"metis_wrapper_for_partition.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"592511234","text":"\nimport requests\nimport json\nupdateid=19382941\nwhile True:\n params={'offset':updateid}\n r = requests.get('https://api.telegram.org/bot125565817:AAHOkcqmnqq2B7PHYWlEINq-uwvdJBO1Fnk/getUpdates',params)\n\n if len(r.text) > 23:\n message ='testtest'\n r=r.json()\n\n mes=r['result'][0]['message']\n updateid=r['result'][0][\"update_id\"]+1\n print(mes)\n #print('got message \"',mes['text'], '\" from @',mes['from']['username'], sep='')\n text = mes['text']\n print(text=='/help' or text=='/start')\n if text == '/help':\n f=open('help.txt',\"r\")\n message = f.read()\n print(\"help\")\n f.close()\n elif text == '/missions':\n f=open('missions.txt')\n message = f.read()\n print(\"help\")\n f.close()\n elif text == '/1str':\n f=open('1str.txt')\n message = f.read()\n print(\"1str\")\n f.close()\n elif text == '/2str':\n f=open('2str.txt')\n message = f.read()\n f.close()\n elif text == '/3str':\n f=open('3str.txt')\n message = f.read()\n f.close()\n elif text == '/4str':\n f=open('4str.txt')\n message = f.read()\n f.close()\n elif text == '/5str':\n f=open('5str.txt')\n message = f.read()\n f.close()\n elif text == '/6str':\n f=open('6str.txt')\n message = f.read()\n f.close()\n elif text.find('str_'):\n num=text[6:len(text)]\n strn=text[1]\n flag=1\n err=\"Not filled yet\"\n try:\n multiple_files = [('photo', ('1.png', open('./'+strn+'/'+num+'.png', 'rb'), 'image/png'))]\n except FileNotFoundError:\n message= err\n flag=0\n if flag:\n\n multiple_files = [('photo', ('1.png', open('./'+strn+'/'+num+'.png', 'rb'), 'image/png'))]\n photoparams={'chat_id':mes['from']['id']}\n f=open('./'+strn+'/'+num+'.txt','r')\n message = f.read()\n\n f.close()\n g=requests.post('https://api.telegram.org/bot125565817:AAHOkcqmnqq2B7PHYWlEINq-uwvdJBO1Fnk/sendPhoto',photoparams,files=multiple_files)\n\n if message=='testtest':\n message='got message \"'+str(mes['text'])+'\" from @'+str(mes['from']['username'])\n sendparams={'chat_id':mes['from']['id'],'text':message}\n\n\n g=requests.get('https://api.telegram.org/bot125565817:AAHOkcqmnqq2B7PHYWlEINq-uwvdJBO1Fnk/sendMessage',sendparams)\n","sub_path":"telbot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"88137989","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\n###########################################################\n# Copyright 2011 Daniel 'grindhold' Brendle and Team\n#\n# This file is part of Kitchen of Awesome.\n#\n# Kitchen of Awesome is free software: you can redistribute it and/or \n# modify it under the terms of the GNU General Public License \n# as published by the Free Software Foundation, either \n# version 3 of the License, or (at your option) any later \n# version.\n#\n# Kitchen of Awesome is distributed in the hope that it will be \n# useful, but WITHOUT ANY WARRANTY; without even the implied \n# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR \n# PURPOSE. See the GNU General Public License for more details.\n#\n# You should have hreceived a copy of the GNU General Public \n# License along with Kitchen of Awesome. \n# If not, see http://www.gnu.org/licenses/.\n###########################################################\nfrom datetime import datetime\n\nimport pymongo\n\n\ndef setApplicationReference(app):\n global APPLICATION\n APPLICATION = app\n\nAPPLICATION = None\n\n\n\"\"\"\nKoaObject is the root object for all data saved in Koa\nIt maps all data to the database mongodb\n\"\"\"\nclass KoaObject():\n CON = pymongo.Connection()\n FORBIDDEN_KEYWORDS = [\"application\",\"id\", \"updateCallbacks\"]\n CLASSES = {}\n OBJECTSTORE = {}\n\n \"\"\"\n registerClass registers a class given as parameter to KoaObject,\n theClass will be needed in order to load an object of this class\n from the database and restore it.\n \"\"\"\n @classmethod\n def registerClass(cls,newclass):\n KoaObject.CLASSES[newclass.__name__] = newclass\n\n \"\"\"\n load loads a KoaObject from the database by it's mongodb ObjectId\n and stores it in the local objectstore.\n If an object by the given ObjectId id exists in the ObjectStore,\n the existing instance will be returned\n \"\"\"\n @classmethod\n def load(cls,objid):\n if not KoaObject.OBJECTSTORE.has_key(objid):\n res = APPLICATION.getStorage().find_one({'_id':objid})\n assert res is not None, \"Object not Known\"\n obj = KoaObject.CLASSES[res[\"_className\"]]()\n dbid = res['_id']\n del(res['_id'])\n del(res['_className'])\n obj.__dict__ = res\n obj.id = dbid\n obj.application = APPLICATION\n KoaObject.OBJECTSTORE[objid] = obj\n return KoaObject.OBJECTSTORE[objid]\n\n \"\"\"\n unload removes this KoaObject from the ObjectStore\n \"\"\"\n def unload(self):\n if self.id is not None:\n del(KoaObject.OBJECTSTORE[self.id])\n del(self)\n\n \"\"\"\n The Constructor \n \"\"\"\n def __init__(self):\n self.id = None\n self.application = APPLICATION\n self.updateCallbacks = []\n \n \"\"\"\n _validateListTypes recursively validates wheter a list consists of datatypes\n that mongodb can handle.\n \"\"\" \n def _validateListTypes(self,list):\n result=True\n for el in list:\n if type(el) == list and self._validateListTypes(el):\n continue\n if type(el) not in (int,str,float,long,datetime,None, pymongo.objectid.ObjectId): \n result = False\n break\n return result\n\n \"\"\"\n _cleanDict cleans a given dictionary from all datatypes that do not\n belong into the database and returns it\n If this algorithm finds an object, in an attribute \"attrib1\", it will\n determine the id of the object, store it in \"attrib1_id\" and then delete\n \"attrib1\"\n \"\"\"\n def _cleanDict(self,uncleanDict):\n for keyword in self.FORBIDDEN_KEYWORDS:\n if uncleanDict.has_key(keyword):\n del(uncleanDict[keyword])\n for key, el in uncleanDict.items():\n if type(el) == list:\n if self._validateListTypes(el):\n continue\n if type(el) not in (int,str,float,long,datetime,None):\n if hasattr(el,'id') and type(el.id) == pymongo.objectid.ObjectId:\n uncleanDict[key+'_id'] = el.id\n del(uncleanDict[key])\n return uncleanDict\n\n \"\"\"\n store stores this object in the database\n \"\"\"\n def store(self):\n saveDict = {}\n saveDict.update(self.__dict__)\n if self.id is None:\n saveDict['_className'] = self.__class__.__name__\n saveDict = self._cleanDict(saveDict)\n self.id = APPLICATION.getStorage().insert(saveDict)\n KoaObject.OBJECTSTORE[self.id] = self\n else:\n saveDict = self._cleanDict(saveDict)\n APPLICATION.getStorage().update({'_id':self.id},{'$set':saveDict})\n return self.id\n\n \"\"\"\n remove this object from the database and/or from RAM\n \"\"\"\n\n def __del__(self,pyOnly=False):\n del(KoaObject.OBJECTSTORE[self.id])\n if not pyOnly:\n APPLICATION.getStorage().remove(self.id)\n\n \"\"\"\n this behavior checks if the called attribute has a corresponding id stored\n in the object. if yes, it loads the object, returns it and stores it as the attribute\n \"\"\"\n def __getattr__(self, name):\n if self.__dict__.has_key(name+'_id'):\n self.__dict__[name]=KoaObject.load(self.__dict__[name+'_id'])\n return self.__dict__[name]\n else:\n raise AttributeError()\n\n\n \"\"\"\n add a function to call in the case that the object changed,\n for example to update gui if a value changed\n \"\"\"\n def addCallback(self, callback):\n if callback not in self.updateCallbacks:\n self.updateCallbacks.append(callback)\n \n\n \"\"\"\n removes a callbackfunction from the object\n \"\"\"\n def removeCallback(self, callback):\n if callback in self.updateCallbacks:\n self.updateCallbacks.remove(callback)\n \n\n \"\"\"\n updated() must be called, when the object has changed.\n \"\"\"\n def updated(self):\n for cb in self.updateCallbacks:\n cb()\n","sub_path":"src/data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"634118150","text":"from scipy.interpolate import interp1d\nfrom numpy import *\n\n# routines for accurate and stable representation of 1-e^-tau, (1-e^-tau)/tau\n\n# smooth factor for optical depth\ndef taufun(tau, taumin, taumax):\n '''\n calculates 1-exp(-x) in a reasonably smooth way trying to avoid round-off errors for small and large x\n '''\n wtrans = where(tautaumax)\n wmed = where((tau>=taumin) & (tau<=taumax))\n tt = copy(tau)\n if(size(wtrans)>0):\n tt[wtrans] = (tau[wtrans]+abs(tau[wtrans]))/2.\n if(size(wopaq)>0):\n tt[wopaq] = 1.\n if(size(wmed)>0):\n tt[wmed] = 1. - exp(-tau[wmed])\n return tt\n\ndef tratfac(x, taumin, taumax):\n '''\n a smooth and accurate smooth version of (1-e^{-x})/x\n '''\n xmin = taumin ; xmax = taumax # limits the same as for optical depth\n nx = size(x)\n tt = copy(x)\n if nx>1:\n w1 = where(x<= xmin) ; w2 = where(x>= xmax) ; wmed = where((x < xmax) & (x > xmin))\n if(size(w1)>0):\n tt[w1] = 1.\n if(size(w2)>0):\n tt[w2] = 1./x[w2]\n if(size(wmed)>0):\n tt[wmed] = (1.-exp(-x[wmed]))/x[wmed]\n wnan=where(isnan(x))\n if(size(wnan)>0):\n tt[wnan] = 0.\n print(\"trat = \"+str(x.min())+\"..\"+str(x.max()))\n ip = input('trat')\n return tt\n else:\n if x <= xmin:\n return 1.\n else:\n if x>=xmax:\n return 1./x\n else:\n return (1.-exp(-x))/x\n \n","sub_path":"tauexp.py","file_name":"tauexp.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"224199695","text":"#!/usr/bin/env python2.7\n'''\nannonex2embl wrapper\n'''\n\n#####################\n# IMPORT OPERATIONS #\n#####################\n\nimport sys\nimport os\n\n# Add specific directory to sys.path in order to import its modules\n# NOTE: THIS RELATIVE IMPORTING IS AMATEURISH.\n# NOTE: COULD THE FOLLOWING IMPORT BE REPLACED WITH 'import annonex2embl'?\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'annonex2embl'))\n\n# IMPORTANT: TFL must be after \"sys.path.append\"\nimport Annonex2emblMain as AN2EMBLMain\nimport argparse\n\n###############\n# AUTHOR INFO #\n###############\n\n__author__ = 'Michael Gruenstaeudl '\n__copyright__ = 'Copyright (C) 2016-2019 Michael Gruenstaeudl'\n__info__ = 'annonex2embl'\n__version__ = '2019.05.15.1500'\n\n#############\n# DEBUGGING #\n#############\n\nimport pdb\n# pdb.set_trace()\n\n####################\n# GLOBAL VARIABLES #\n####################\n\n########\n# TODO #\n########\n\n''' Include selection on topol of submission (linear [default] or circular) '''\n\n############\n# ARGPARSE #\n############\n\nclass CLI():\n\n def __init__(self):\n self.client()\n\n def client(self):\n\n parser = argparse.ArgumentParser(description=\" -- \".join([__author__, __copyright__, __info__, __version__]))\n\n ### REQUIRED ###\n parser.add_argument('-n',\n '--nexus',\n help='absolute path to infile; infile in NEXUS format; Example: /path_to_input/test.nex',\n default='/home/username/Desktop/test.nex',\n required=True)\n\n parser.add_argument('-c',\n '--csv',\n help='absolute path to infile; infile in CSV format; Example: /path_to_input/test.csv',\n default='/home/username/Desktop/test.csv',\n required=True)\n\n parser.add_argument('-d',\n '--descript',\n help='text string characterizing the DNA alignment; Example: \"chloroplast trnR-atpA intergenic spacer\"',\n default='[PLACEHOLDER]',\n required=True)\n\n parser.add_argument('-e',\n '--email',\n help='Your email address; Example: \"my.username@gmail.com\"',\n default='my.username@gmail.com',\n required=True)\n\n parser.add_argument('-a',\n '--authors',\n help='Author names; Example: \"Gruenstaeudl M.; LastName I.\"',\n default='Gruenstaeudl M.; LastName I.',\n required=True)\n\n parser.add_argument('-o',\n '--outfile',\n help='absolute path to outfile; outfile in EMBL format; Example: /path_to_output/test.embl',\n default='/home/username/Desktop/test.embl',\n required=True)\n\n ### OPTIONAL ###\n parser.add_argument('-ms',\n '--manifeststudy',\n help='Name of the study which appears in the manifest file',\n default='',\n required=False)\n\n parser.add_argument('-mn',\n '--manifestname',\n help='Name which appears in the manifest file',\n required=False)\n\n parser.add_argument('-md',\n '--manifestdescription',\n help='Description for the manifest file',\n default='',\n required=False)\n\n parser.add_argument('--taxcheck',\n help='A logical; Shall taxon names be checked against NCBI Taxonomy?',\n default='False',\n required=False)\n\n parser.add_argument('--linemask',\n help='A logical; Shall the ID and the AC lines be masked for EntryUpload submissions?',\n default='False',\n required=False)\n\n parser.add_argument('--topol',\n help='`circular` or `linear`.',\n default='linear',\n required=False)\n\n parser.add_argument('--taxdiv',\n help='Any of the three letter codes specified in section 3.2 of the EMBL user manual.',\n default='PLN',\n required=False)\n\n parser.add_argument('--collabel',\n #metavar='column specifying sequence names',\n help='Name of column that specifies the sequence names.',\n default='isolate',\n required=False)\n\n parser.add_argument('--ttable',\n #metavar='translation table',\n help='Number of the translation table to translate coding regions with.'\\\n 'For details, see: http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi',\n default='11',\n required=False)\n\n parser.add_argument('--organelle',\n #metavar='translation table',\n help='Type of membrane-bound intracellular structure from which the sequence was obtained.'\\\n 'For details, see: http://www.insdc.org/files/feature_table.html',\n default='plastid',\n required=False)\n\n parser.add_argument('--seqvers',\n #metavar='sequence version',\n help='An integer',\n default='1',\n required=False)\n\n parser.add_argument('--version',\n help='Print version information and exit',\n action='version',\n version='%(prog)s ' + __version__)\n\n args = parser.parse_args()\n\n\n AN2EMBLMain.annonex2embl( args.nexus,\n args.csv,\n args.descript,\n args.email,\n args.authors,\n args.outfile,\n\n args.manifeststudy,\n args.manifestname,\n args.manifestdescription,\n args.taxcheck,\n args.linemask,\n args.topol,\n args.taxdiv,\n args.collabel,\n args.ttable,\n args.organelle,\n args.seqvers )\n\n########\n# MAIN #\n########\n\ndef start_annonex2embl():\n CLI()\n","sub_path":"annonex2embl/CLIOps.py","file_name":"CLIOps.py","file_ext":"py","file_size_in_byte":7200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"373705507","text":"'''Write a recursive program which accept number from user and return\r\nsummation of its digits.\r\nInput : 879\r\nOutput : 24'''\r\n\r\n\r\ndef FindSum(N, sum):\r\n if N == 0:\r\n return sum\r\n sum = sum + ( N % 10)\r\n return FindSum(N//10, sum)\r\n\r\ndef main():\r\n N = int(input(\"Enter A Number: \"))\r\n Ans = FindSum(N,0)\r\n print(\"Sum is:\",Ans)\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"Assignment_5_Python/Assignment_5_4.py","file_name":"Assignment_5_4.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"570370959","text":"import time\nimport pytest\nfrom selenium.webdriver.common.keys import Keys\nfrom BaseTest import BaseTest\nfrom ClientsManagement.Locators.locators import BonusAccout\nfrom ClientsManagement.CustomMethods import CustomMethod\nimport allure\nfrom allure_commons.types import Severity\n\n@allure.severity(Severity.MINOR)\nclass TestBonusAccount(BaseTest):\n\n @allure.title('Бонусный счет-Типы операций')\n def test_operations_kinds(self):\n driver=self.driver\n cm = CustomMethod(driver)\n cm.wait_visibility(BonusAccout.operation_kind)\n cm.s(BonusAccout.operation_kind).click()\n\n with allure.step('Добавляю новый тип операции'):\n cm.s(BonusAccout.add_operation).click()\n cm.s(BonusAccout.identifire).send_keys(\"5\")\n with allure.step('Проверяю кнопку все тайтлы'):\n cm.s(BonusAccout.all_titles_btn).click()\n all_titles=driver.find_elements_by_xpath(BonusAccout.all_titles)\n languages=21\n assert len(all_titles)==languages,\"Кол-во возможных языков не равно {}\".format(languages)\n for i in range(0,languages):\n cm.s(\"//*[@class='multilang-edit']/div/span\").click()\n assert len(driver.find_elements_by_xpath(BonusAccout.all_titles)) == 1, \"Не удалилсь все языки\"\n with allure.step('Добавляю ru title'):\n cm.wait_visibility(BonusAccout.ru_title)\n cm.s(BonusAccout.ru_title).click()\n assert cm.check_exists_by_xpath(\"//*[@name='rus']\"),\" Не появилось поле с тайтлом РУ\"\n\n time.sleep(1)\n cm.s(\"//*[@name='rus']\").send_keys(\"Autotest title\")\n cm.s(BonusAccout.add_form).click()\n with allure.step('Проверяю уникальность Id'):\n cm.flash_check('error')\n\n cm.s(BonusAccout.identifire).send_keys(\"6\")\n cm.s(BonusAccout.add_form).click()\n #time.sleep(2)\n cm.s(\"//*[@class='toolbar__btn _style_blue']\").click()\n assert len(driver.find_elements_by_xpath(\"//*[@class='curtain__list']/li\"))==6,\"Новый тип не добавился\"\n with allure.step('Удаляю добавленный тип'):\n cm.s(\"//*[@class='curtain__list']/li[1]\").click()\n self.delete_form(cm)\n\n @allure.title('Бонусный счет-Условия уровня счета')\n @pytest.mark.flaky(reruns=3, reruns_delay=1)\n def test_account_conditions(self):\n driver=self.driver\n cm = CustomMethod(driver)\n cm.wait_visibility(BonusAccout.account_conditions)\n cm.s(BonusAccout.account_conditions).click()\n\n with allure.step('Добавляю новое условие'):\n cm.s(BonusAccout.add_operation).click()\n assert len(driver.find_elements_by_xpath(\"//*[@class[contains(.,'_state_error')]]\"))==5,\"Не все элементы не обязательны\"\n with allure.step('Выбираю валюту'):\n cm.s(BonusAccout.currency_dropdown).click()\n cm.s(BonusAccout.dropdown_input).send_keys(\"рубли\")\n time.sleep(1)\n cm.s(BonusAccout.dropdown_click).click()\n with allure.step('Заполняю условия с проверкой сумм'):\n for i in range(1,4):\n cm.s(BonusAccout.currency_list %i).click()\n cm.s(BonusAccout.currency_list %i).send_keys(Keys.BACKSPACE)\n if i==1:\n cm.s(BonusAccout.currency_list %i).send_keys(\"10\")\n elif i==2:\n cm.s(BonusAccout.currency_list % i).send_keys(\"9\")\n element = driver.find_element_by_xpath(BonusAccout.currency_list % i)\n if \"_state_error\" not in element.get_attribute(\"class\"):\n raise ValueError(\"Нет подсказки что сумма золотого выше серебрянного\")\n cm.s(BonusAccout.currency_list % i).click()\n cm.s(BonusAccout.currency_list % i).send_keys(Keys.BACKSPACE)\n cm.s(BonusAccout.currency_list % i).send_keys(\"10.01\")\n else:\n cm.s(BonusAccout.currency_list % i).send_keys(\"10.01\")\n element = driver.find_element_by_xpath(BonusAccout.currency_list % i)\n if \"_state_error\" not in element.get_attribute(\"class\"):\n raise ValueError(\"Нет подсказки что сумма платины выше золотого\")\n time.sleep(1)\n cm.s(BonusAccout.currency_list % str(i-1)).click()\n cm.s(BonusAccout.currency_list % i).click()\n cm.s(BonusAccout.currency_list % i).send_keys(Keys.BACKSPACE)\n cm.s(BonusAccout.currency_list %i).send_keys(\"10.99\")\n with allure.step('Сохраняю форму'):\n cm.s(BonusAccout.add_form).click()\n cm.flash_check()\n with allure.step('Проверяю клонирование. Должен быть уникальный айди'):\n cm.s(BonusAccout.clone_form).click()\n cm.s(BonusAccout.currency_dropdown).click()\n cm.s(BonusAccout.dropdown_input).send_keys(\"рубли\")\n time.sleep(1)\n element=driver.find_element_by_xpath(\"//*[@id='drop-down-items']/div/div\")\n if \"_state_disabled\" not in element.get_attribute(\"class\"):\n raise ValueError(\"Возможно повторно выбрать рубль\")\n cm.s(BonusAccout.cancel_form).click()\n with allure.step('Удаляю создананное условие'):\n cm.s(\"//*[text()='Российские рубли']\").click()\n self.delete_form(cm)\n cm.flash_check()\n\n\n def delete_form(self, cm):\n cm.s(BonusAccout.delete_form).click()\n time.sleep(1)\n cm.wait_visibility(BonusAccout.modal_yes)\n cm.s(BonusAccout.modal_yes).click()\n\n def check_currency_value(self, cm, driver, i,start_value):\n cm.s(BonusAccout.currency_list % i).send_keys(start_value)\n element = driver.find_element_by_xpath(BonusAccout.currency_list % i)\n if \"_state_error\" not in element.get_attribute(\"class\"):\n raise ValueError(\"Нет подсказки что сумма золотого выше серебрянного\")\n cm.s(BonusAccout.currency_list % i).click()\n cm.s(BonusAccout.currency_list % i).send_keys(Keys.BACKSPACE)\n","sub_path":"ClientsManagement/BonusAccount/test_bonus_account.py","file_name":"test_bonus_account.py","file_ext":"py","file_size_in_byte":6955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"383611809","text":"#!/usr/bin/env python3\nfrom time import sleep\nfrom mailroom import create_table\n\ndefault_donor_dict = {'TOOGII DASHDAVAA': {\"donation\": 3, \"donation_amnt\": 30000},'MARK ZACHERBERG': {\"donation\": 1, \"donation_amnt\": 1000}, 'JEFF BEZOS': {\"donation\": 3, \"donation_amnt\": 15000}, 'BILL GATES': {\"donation\": 1, \"donation_amnt\": 500}, 'LARRY PAGE': {\"donation\": 2, \"donation_amnt\": 10000}}\nchoices = ('Send a Thank You', 'Create Report')\ndonations = {}\ndonators_obj_list = []\ndonators_name_list = []\n\nclass create_donor:\n def __init__(self, name: str, donation_amnt: int, donation: int=1):\n self.name = name\n self.donation = donation\n self.donation_amnt = donation_amnt\n\n\n def add_donor(self, donation_amnt):\n self.donation_amnt = self.donation_amnt + donation_amnt\n self.donation = self.donation + 1\n\n\n def email_composer(self,donation_amnt):\n self.email = f\"Dear {self.name}, \\n\\nThank you for your generous gift of $ {donation_amnt} to our organization. We are thrilled to have your support.\\nThrough your donation we have been able to accomplish our charity work around the world.\\n \\nThank you\"\n print(\"\\n\\n\")\n print(\"=\"*30)\n print(self.email)\n print(\"=\"*30)\n print(\"\\n\\n\")\n\n\ndef create_donor_object(name, donation_amnt, donation=1, default=False):\n # If the donor is new, will create new object,\n if name not in donators_name_list:\n donators_name_list.append(name)\n if default == True: # Checks if the donor is default/called from main(), if yes we have to update donation\n donators_obj_list.append(create_donor(name, donation_amnt, donation))\n else:\n donators_obj_list.append(create_donor(name, donation_amnt))\n else:\n donators_obj_list[donators_name_list.index(name)].add_donor(donation_amnt) # If donor already exist in the list, update donation amount\n\n\ndef create_report(donators_obj_list):\n # Preparing data for the report table\n for obj in donators_obj_list:\n donations.update({obj.name: [obj.donation, obj.donation_amnt, int(obj.donation_amnt/obj.donation)]})\n column = ['Donor Name', 'Num Gifts', 'Total Given', 'Average Gift']\n\n create_table(column,donations) # Call this function to create table. Pass column and list nested dict\n\n\ndef send_a_thank_you():\n # Prompting for the donor name and amount of donations\n while True:\n name = str(input(\"Enter the name of the Donor: \"))\n if name.upper() == \"QUIT\":\n return None\n if name.upper() == \"LIST\":\n for donor in donators_name_list:\n print(donor)\n continue\n donation_amnt = int(input(\"Enter the donation amount for the {}: \".format(name))) # asking for donation amount\n create_donor_object(name, donation_amnt) # Creating donor object\n donators_obj_list[donators_name_list.index(name)].email_composer(donation_amnt) # Sending an appreciation email to the Donor\n return None\n\n\ndef main():\n # Add default donors:\n for name in default_donor_dict:\n create_donor_object(name, default_donor_dict[name][\"donation_amnt\"], default_donor_dict[name][\"donation\"], default=True)\n print(donators_obj_list)\n print(donators_obj_list[0].name, donators_obj_list[0].donation_amnt,donators_obj_list[0].donation)\n while True:\n # Welcome screen with prompt:\n print('\\n\\n\\n\\t Welcome to Donations!\\n\\n')\n print('\\t1. {}\\n\\t2. {}'.format(*choices))\n choice = input(\"\\n\\nPlease Enter Your choice(1 or 2): \")\n sleep(1)\n if choice == '1':\n print(\"\\nYour choice was {}\\n\".format(choices[0]))\n send_a_thank_you()\n elif choice == '2':\n print(\"\\nYour choice was {}\\n\".format(choices[1]))\n create_report(donators_obj_list)\n else:\n print(\"Please enter a valid choice!\")\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"students/toogii/lesson03/mailroom_using_class.py","file_name":"mailroom_using_class.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"33989486","text":"#Faça um programa para a leitura de duas notas parciais de um aluno.\n# O programa deve calcular a média alcançada por aluno e apresentar\n# A mensagem \"Aprovado\", se a média alcançada for maior ou igual a sete;\n# A mensagem \"Reprovado\", se a média for menor do que sete;\n# A mensagem \"Aprovado com Distinção\", se a média for igual a dez.\ndef alunos():\n ''\nprint('Media das Notas dos Alunos ')\naluno = input('Digite o nome do Aluno : ')\nnota1 = float(input('Digite a primeira nota do aluno : '))\nnota2 = float(input('Digite a segunda note do aluno : '))\nalunos = [aluno,nota1,nota2]\nmedia = (((alunos.__getitem__(1))+(alunos.__getitem__(2)))/((alunos.__len__())-1))\nprint(f'A média de {alunos.__getitem__(0)} é {media}')\nif media == 10:\n print(f'{alunos.__getitem__(0)} Aprovado com Distinçãoo')\nelif media < 7:\n print(f'{alunos.__getitem__(0)} Reprovado')\nelif media >= 7:\n print(f'{alunos.__getitem__(0)} Aprovado')\n\n","sub_path":"estruturadedecisao/decisao_5.py","file_name":"decisao_5.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"634679015","text":"\n\nfrom xai.brain.wordbase.nouns._intensifier import _INTENSIFIER\n\n#calss header\nclass _INTENSIFIERS(_INTENSIFIER, ):\n\tdef __init__(self,): \n\t\t_INTENSIFIER.__init__(self)\n\t\tself.name = \"INTENSIFIERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"intensifier\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_intensifiers.py","file_name":"_intensifiers.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"485183955","text":"# Copyright (C) 2020 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"Tests of assessment notifications.\"\"\"\nimport collections\nimport ddt\nimport mock\n\nfrom ggrc import db\nfrom ggrc.notifications import common\nfrom ggrc.models import Person, Assessment, AccessControlRole\nfrom ggrc.models import all_models\nfrom integration.ggrc import api_helper\nfrom integration.ggrc import TestCase\nfrom integration.ggrc.access_control import acl_helper\nfrom integration.ggrc.generator import ObjectGenerator\nfrom integration.ggrc.models import factories\n\n\n@ddt.ddt\nclass TestAssessmentNotification(TestCase):\n \"\"\"Tests of assessment notifications\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up test cases for all tests.\"\"\"\n cls.primary_role_id = AccessControlRole.query.filter_by(\n object_type=\"Assessment\",\n name=\"Primary Contacts\"\n ).first().id\n\n cls.secondary_role_id = AccessControlRole.query.filter_by(\n object_type=\"Assessment\",\n name=\"Secondary Contacts\"\n ).first().id\n\n def setUp(self):\n super(TestAssessmentNotification, self).setUp()\n self.client.get(\"/login\")\n self.api = api_helper.Api()\n self.auditor = Person.query.filter_by(email=\"user@example.com\").one()\n self.api.set_user(self.auditor)\n audit = factories.AuditFactory()\n assignee_acr = all_models.AccessControlRole.query.filter_by(\n object_type=\"Assessment\",\n name=\"Assignees\",\n ).first()\n\n self.api.post(Assessment, {\n \"assessment\": {\n \"title\": \"Assessment1\",\n \"context\": None,\n \"audit\": {\n \"id\": audit.id,\n \"type\": \"Audit\",\n },\n \"access_control_list\": [\n acl_helper.get_acl_json(self.primary_role_id, self.auditor.id),\n acl_helper.get_acl_json(assignee_acr.id, self.auditor.id)\n ],\n \"status\": \"In Progress\",\n }\n })\n\n self.assessment = Assessment.query.filter_by(title=\"Assessment1\").one()\n\n self.cad1 = factories.CustomAttributeDefinitionFactory(\n definition_type=\"assessment\",\n title=\"ca1\",\n )\n factories.CustomAttributeValueFactory(\n custom_attribute=self.cad1,\n attributable=self.assessment\n )\n\n self.cad3 = factories.CustomAttributeDefinitionFactory(\n definition_type=\"assessment\",\n attribute_type=\"Checkbox\",\n title=\"ca3\",\n )\n factories.CustomAttributeValueFactory(\n custom_attribute=self.cad3,\n attributable=self.assessment\n )\n\n db.engine.execute(\n \"\"\"\n UPDATE notifications\n SET sent_at = NOW()\n \"\"\"\n )\n\n def test_common_attr_change(self):\n \"\"\"Test notification when common attribute value is changed\"\"\"\n response = self.api.put(self.assessment, {\"test_plan\": \"steps\"})\n self.assert200(response)\n\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"user@example.com\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"ASSESSMENT PROCEDURE\"],\n (\"steps\", \"\")\n )\n\n def test_custom_attr_change(self):\n \"\"\"Test notification when custom attribute value is changed\"\"\"\n custom_attribute_values = [{\n \"custom_attribute_id\": self.cad1.id,\n \"attribute_value\": \"test value\",\n }]\n response = self.api.put(self.assessment, {\n \"custom_attribute_values\": custom_attribute_values\n })\n self.assert200(response)\n\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"user@example.com\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"].keys(), [\"CA1\"])\n\n def test_description_custom_change(self):\n \"\"\"Test notification updated data when custom attribute value is changed\"\"\"\n response = self.api.put(self.assessment, {\n \"title\": \"test_title\",\n \"description\": \"test_description\"\n })\n self.assert200(response)\n\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"user@example.com\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"TITLE\"],\n (\"test_title\", \"Assessment1\")\n )\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"DESCRIPTION\"],\n (\"test_description\", \"\")\n )\n\n def test_evidence_change_assmt(self):\n \"\"\"Test notification updated data when evidence values is changed\"\"\"\n with factories.single_commit():\n evidence_url = \"test.com\"\n evidence_file = \"test_gdrive.file\"\n evidence_1 = factories.EvidenceUrlFactory(link=evidence_url,\n title=evidence_url)\n evidence_2 = factories.EvidenceFileFactory(link=evidence_file,\n title=evidence_file)\n response = self.api.put(self.assessment, {\n \"actions\": {\"add_related\": [\n {\n \"id\": evidence_1.id,\n \"type\": \"Evidence\",\n },\n {\n \"id\": evidence_2.id,\n \"type\": \"Evidence\",\n },\n ]}\n })\n self.assert200(response)\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"user@example.com\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"EVIDENCE URL\"],\n (evidence_url, \"\")\n )\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"EVIDENCE FILE\"],\n (evidence_file, \"\")\n )\n\n def test_labels_change(self):\n \"\"\"Test notification updated data when labels are changed\"\"\"\n label_new = factories.LabelFactory(name=\"test_label\",\n object_type='Assessment')\n response = self.api.put(self.assessment, {'labels': [{\n \"name\": label_new.name,\n \"id\": label_new.id\n }]})\n self.assert200(response)\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"user@example.com\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"LABELS\"],\n (\"test_label\", \"\")\n )\n\n def test_ca_change_by_import(self):\n \"\"\"Test notification when custom attribute value is changed by import\"\"\"\n\n with factories.single_commit():\n assessment = factories.AssessmentFactory(status=\"Completed\")\n factories.CustomAttributeDefinitionFactory(\n definition_type=\"assessment\",\n title=\"Test GCAD\",\n )\n assessment_slug = assessment.slug\n assessment_id = assessment.id\n user = all_models.Person.query.filter_by(\n email=\"user@example.com\").first()\n assessment.add_person_with_role_name(user, \"Assignees\")\n\n from flask import g\n setattr(g, '_current_user', user)\n\n import_data = collections.OrderedDict([\n (\"object_type\", \"Assessment\"),\n (\"Code*\", assessment_slug),\n (\"Test GCAD\", \"test value\"),\n ])\n response = self.import_data(import_data)\n self._check_csv_response(response, {})\n\n notifs, _ = common.get_daily_notifications()\n\n self.assertEqual(len(notifs), 2)\n assessment = all_models.Assessment.query.get(assessment_id)\n cad = assessment.get_custom_attribute_definitions().filter_by(\n title=\"Test GCAD\").first()\n self.assertEqual(\n [i.attribute_value for i in cad.attribute_values], [\"test value\"])\n\n def test_checkbox_attr_change(self):\n \"\"\"Test notification when person attribute value is changed\"\"\"\n custom_attribute_values = [{\n \"custom_attribute_id\": self.cad3.id,\n \"attribute_value\": \"1\",\n }]\n response = self.api.put(self.assessment, {\n \"custom_attribute_values\": custom_attribute_values\n })\n self.assert200(response)\n\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"user@example.com\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"].keys(), [\"CA3\"])\n\n def test_access_conrol_list(self):\n \"\"\"Test notification when access conrol list is changed\"\"\"\n creator_acr = all_models.AccessControlRole.query.filter_by(\n object_type=\"Assessment\",\n name=\"Creators\",\n ).first()\n assignee_acr = all_models.AccessControlRole.query.filter_by(\n object_type=\"Assessment\",\n name=\"Assignees\",\n ).first()\n response = self.api.put(self.assessment, {\n \"access_control_list\": [\n acl_helper.get_acl_json(self.secondary_role_id, self.auditor.id),\n acl_helper.get_acl_json(assignee_acr.id, self.auditor.id),\n acl_helper.get_acl_json(creator_acr.id, self.auditor.id),\n ],\n })\n self.assert200(response)\n\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"user@example.com\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"PRIMARY CONTACTS\"],\n (\"\", \"user@example.com\")\n )\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"SECONDARY CONTACTS\"],\n (\"user@example.com\", \"\")\n )\n\n def test_multiple_updates(self):\n \"\"\"Test notification for multiple updates\"\"\"\n response = self.api.put(self.assessment, {\"test_plan\": \"steps\"})\n self.assert200(response)\n\n response = self.api.put(self.assessment, {\"title\": \"new title\"})\n self.assert200(response)\n\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"user@example.com\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"TITLE\"],\n (\"new title\", \"Assessment1\")\n )\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"ASSESSMENT PROCEDURE\"],\n (\"steps\", \"\")\n )\n\n # pylint: disable=invalid-name\n def test_multiple_updates_return_old_value(self):\n \"\"\"Test notification for multiple updates if value did not changed\"\"\"\n old_test_plan = self.assessment.test_plan\n response = self.api.put(self.assessment, {\"test_plan\": \"steps\"})\n self.assert200(response)\n\n response = self.api.put(self.assessment, {\"test_plan\": old_test_plan})\n self.assert200(response)\n\n notifs, notif_data = common.get_daily_notifications()\n self.assertEqual(len(notifs), 1)\n self.assertEqual({}, notif_data)\n\n def test_multiply_mapping(self):\n \"\"\"Test notification for multiply mapping\"\"\"\n controls = [factories.ControlFactory() for _ in xrange(5)]\n snapshots = self._create_snapshots(self.assessment.audit, controls)\n\n def get_relation_dict(destination_obj):\n return {\n \"relationship\": {\n \"context\": {\"id\": self.assessment.audit.context.id,\n \"type\": self.assessment.audit.context.type},\n \"source\": {\"id\": self.assessment.id,\n \"type\": self.assessment.type},\n \"destination\": {\"id\": destination_obj.id,\n \"type\": destination_obj.type}\n }\n }\n notifs, _ = common.get_daily_notifications()\n self.assertFalse(len(notifs))\n self.assessment.status = \"In Progress\"\n post_data = [get_relation_dict(s) for s in snapshots]\n db.session.add(self.assessment)\n resp = self.api.send_request(\n self.api.client.post, obj=all_models.Relationship, data=post_data)\n self.assert200(resp)\n notifs, _ = common.get_daily_notifications()\n self.assertEqual(len(notifs), 1)\n\n def assert_asmnt_notifications(self):\n \"\"\"Check if Assessment reopen notifications are sent.\"\"\"\n notifs, _ = common.get_daily_notifications()\n self.assertGreaterEqual(len(notifs), 2)\n\n with mock.patch(\"ggrc.notifications.common.send_email\") as send_email_mock:\n self.client.get(\"/_notifications/send_daily_digest\")\n _, _, content = send_email_mock.call_args[0]\n self.assertIn(\"has been updated\", content)\n self.assertIn(\"Reopened assessments\", content)\n\n @ddt.data(\n all_models.Assessment.DONE_STATE,\n all_models.Assessment.FINAL_STATE,\n )\n def test_import_evidence_mapped(self, status):\n \"\"\"Test notifications for '{}' Assessment if Evidence mapped in import.\"\"\"\n object_generator = ObjectGenerator()\n _, user = object_generator.generate_person(user_role=\"Creator\")\n assessment = factories.AssessmentFactory()\n assessment.add_person_with_role_name(user, \"Verifiers\")\n assessment.status = status\n db.session.commit()\n\n response = self.import_data(collections.OrderedDict([\n (\"object_type\", \"Assessment\"),\n (\"Code*\", assessment.slug),\n (\"Evidence URL\", \"some url\"),\n ]))\n self._check_csv_response(response, {})\n self.assert_asmnt_notifications()\n\n @ddt.data(\n all_models.Assessment.DONE_STATE,\n all_models.Assessment.FINAL_STATE,\n )\n def test_import_lcad_changed(self, status):\n \"\"\"Test notifications for '{}' Assessment if LCAD changed in import.\"\"\"\n object_generator = ObjectGenerator()\n _, user = object_generator.generate_person(user_role=\"Creator\")\n with factories.single_commit():\n assessment = factories.AssessmentFactory()\n factories.CustomAttributeDefinitionFactory(\n title=\"Test LCAD\",\n definition_type=\"assessment\",\n definition_id=assessment.id,\n attribute_type=\"Text\",\n )\n assessment.add_person_with_role_name(user, \"Verifiers\")\n assessment.status = status\n db.session.commit()\n\n response = self.import_data(collections.OrderedDict([\n (\"object_type\", \"Assessment\"),\n (\"Code*\", assessment.slug),\n (\"Test LCAD\", \"some value\"),\n ]))\n self._check_csv_response(response, {})\n self.assert_asmnt_notifications()\n\n @ddt.data(\n all_models.Assessment.DONE_STATE,\n all_models.Assessment.FINAL_STATE,\n )\n def test_import_snapshot_mapped(self, status):\n \"\"\"Test notifications for '{}' Assessment if snapshot mapped in import.\"\"\"\n object_generator = ObjectGenerator()\n _, user = object_generator.generate_person(user_role=\"Creator\")\n with factories.single_commit():\n assessment = factories.AssessmentFactory()\n control = factories.ControlFactory()\n # pylint: disable=expression-not-assigned\n self._create_snapshots(assessment.audit, [control])[0]\n assessment.add_person_with_role_name(user, \"Verifiers\")\n assessment.status = status\n db.session.commit()\n\n response = self.import_data(collections.OrderedDict([\n (\"object_type\", \"Assessment\"),\n (\"Code*\", assessment.slug),\n (\"Map:control versions\", control.slug),\n ]))\n self._check_csv_response(response, {})\n self.assert_asmnt_notifications()\n","sub_path":"test/integration/ggrc/notifications/test_assessment_notifications.py","file_name":"test_assessment_notifications.py","file_ext":"py","file_size_in_byte":14994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"49677796","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /usr/lib/python2.7/site-packages/consul_srv/query.py\n# Compiled at: 2017-06-29 12:11:10\n__doc__ = '\\nSimple wrapper around dnspython to query a Consul agent over its DNS port and\\nextract ip address/port information.\\n'\nfrom dns.resolver import Resolver\nfrom dns import rdatatype\nfrom collections import namedtuple\nSRV = namedtuple('SRV', ['host', 'port'])\n\nclass Resolver(Resolver):\n \"\"\"\n Wrapper around the dnspython Resolver class that implements the `srv`\n method. Takes the address and optional port of a DNS server.\n \"\"\"\n\n def __init__(self, server_address, port=8600):\n super(Resolver, self).__init__()\n self.nameservers = [server_address]\n self.nameserver_ports = {server_address: port}\n\n def _get_host(self, answer):\n for resource in answer.response.additional:\n for record in resource.items:\n if record.rdtype == rdatatype.A:\n return record.address\n\n raise ValueError('No host information.')\n\n def _get_port(self, answer):\n for resource in answer:\n if resource.rdtype == rdatatype.SRV:\n return resource.port\n\n raise ValueError('No port information.')\n\n def srv(self, resource):\n \"\"\"\n Query this resolver's nameserver for the name consul service. Returns a\n named host/port tuple from the first element of the response.\n \"\"\"\n domain_name = ('{}.service.consul').format(resource)\n answer = self.query(domain_name, 'SRV', tcp=True)\n host = self._get_host(answer)\n port = self._get_port(answer)\n return SRV(host, port)","sub_path":"pycfiles/consul_utils-0.1.3-py3-none-any/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"7285133","text":"# -*- coding: utf-8 -*-\n\nimport csv\n\nwith open('C:\\\\Users\\\\espos\\\\Documents\\\\GitHub\\\\mlh-hackathon-flask-starter\\\\app\\\\properties-24060.csv') as csv_file:\n stringList = []\n zPid = []\n allPid = []\n blacksburgEstate = csv.DictReader(csv_file)\n for blacksburg in blacksburgEstate:\n stringList = blacksburg['url'].split('/')\n print(stringList[5])\n zPid.append(stringList[5])\n\n for i in range(len(zPid)):\n allPid.append(zPid[i].split('_')[0])\n \n print(allPid)","sub_path":"app/urlScrape.py","file_name":"urlScrape.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"457193513","text":"class Solution:\n def countPrimes(self, n: int) -> int:\n if n <= 2:\n return 0\n\n table = [True]*n\n table[0], table[1] = False,False\n\n i = 2\n while i*i < n:\n if table[i]:\n for j in range(i*i, n, i):\n table[j] = False\n i+=1\n\n return sum(table)\n","sub_path":"backend/dataset/countPrimes/sieve_1.py","file_name":"sieve_1.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"331527708","text":"import json\nimport sys\nimport requests\n\ndef getrepos():\n next_page = True\n username = sys.argv[1]\n api = 'https://api.github.com/users/{}/repos?per_page=100'.format(username)\n results = []\n while next_page: # the list of repos is paginated\n r = requests.get(api)\n json_format= json.loads(r.text)\n for repo in json_format:\n results.append(repo['html_url'])\n head_request = requests.head(url=r.url)\n if 'next' in head_request.links: # check if there is another page of repos\n api = head_request.links['next']['url']\n else:\n next_page = False\n return results\n\ndef main():\n for repo in getrepos():\n print(repo)\n print(len(getrepos()))\n\nif __name__ == \"__main__\":\n main()","sub_path":"Bai9/githubrepos.py","file_name":"githubrepos.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"430603651","text":"import numpy as np\nimport os\nimport cv2\n#from scipy.misc import imrea, imresize\n#import xml.etree.ElemntTree as ET\n\nprint(\"Package loaded\")\nlabels={\"fp\":0, \"lr\":1, \"rl\":2}\ncwd = os.getcwd()\n\n#print(\"Current folder is %s\"%cwd)\n\ndef jpg2npz(size=(), filepath=(), IMREAD_TYPE = 0, refDir=[]):\n\n\n idx = 0\n\n path = {0:filepath[0], 1:filepath[1]}\n\n\n for dir in refDir:\n origin_path = path[0] + '/' + dir\n print(dir)\n for original_file in os.listdir(origin_path):\n label = labels[dir] # print: 0,1,2\n\n for real_file in os.listdir(origin_path + '/' + original_file):\n\n # Grayscale(0) vs COLOR(1)\n if IMREAD_TYPE == 0:\n img = cv2.imread(origin_path + \"/\" + original_file + \"/\" + real_file, cv2.IMREAD_GRAYSCALE)\n else:\n img = cv2.imread(origin_path + \"/\" + original_file + \"/\" + real_file, cv2.IMREAD_COLOR)\n\n shrink = cv2.resize(img, (size[0], size[1]), None, interpolation=cv2.INTER_AREA)\n\n encoding = np.eye(3)[label]\n\n # save numpy files\n np.savez(path[1] + \"/\" + str(10000 + idx) + \".npz\", train=shrink, training_labels=encoding)\n idx += 1\n\n\nimgpath = \"/home/pirl/Pictures/new_img\"\nsavePath = \"/home/pirl/Pictures/save_img\"\njpg2npz((64, 64), (imgpath,savePath), 0, refDir=['fp', 'lr', 'rl'])","sub_path":"imgPreprocessing/jpg2npz.py","file_name":"jpg2npz.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"405312042","text":"def bisect(func,a,b,tol=5e-5):\n fa = func(a)\n fb = func(b)\n if fa*fb >= 0:\n print('f(a)f(b)<0 not satisfied!')\n else:\n while (b-a)/2>tol:\n c = (a+b)/2\n fc = func(c)\n if fc == 0:\n break\n elif fa*fc<0:\n fb,b = fc,c\n else:\n fa,a = fc,c\n return (a+b)/2\n\n# f = lambda x: x**3+x-1\n# xc = bisect(f,0.0,1.0,5e-5)\n# print(xc)\n","sub_path":"bisect.py","file_name":"bisect.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"258129850","text":"import os\nimport pandas as pd\n\n\nparent_dir = os.path.dirname(os.getcwd())\ninput_dir = os.path.join(parent_dir, \"test_logs\")\nmodel_dirs = os.listdir(input_dir)\nmodels_num = len(model_dirs) + 1\nres_list = []\nfor i in range(models_num):\n model_dir = \"model\" + str(i)\n print(model_dir)\n res_path = os.path.join(input_dir, model_dir, \"test_metrics.csv\")\n\n if not os.path.isfile(res_path):\n continue\n\n df = pd.read_csv(res_path)\n df[\"model\"] = model_dir\n res_list.append(df)\nres_df = pd.concat(res_list)\nres_df.to_csv(\"res.csv\", index=False)\n","sub_path":"new_src/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"312332424","text":"import cv2\n\nimg = cv2.imread('drone_pic.JPG')\nc\n\ncv2.namedWindow('image', cv2.WINDOW_NORMAL)\ncv2.resizeWindow('image', 250, 10)\ncv2.imshow('image',img)\n\nk = cv2.waitKey(0)\nif k == 27: # esc key\n cv2.destroyAllWindow()\nelif k == ord('s'): # 's' key\n cv2.imwrite('testgray.png',img)\n cv2.destroyAllWindow()","sub_path":"window_size.py","file_name":"window_size.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"542416446","text":"\"\"\"\nsort.py 排序算法训练\n\"\"\"\n\n# 冒泡\ndef bubble(list_):\n n = len(list_)\n #  外层表示比较多少轮\n for i in range(n - 1):\n # 表示每轮两两比较的次数\n for j in range(n - 1 - i):\n # 从小到大排序\n if list_[j] > list_[j + 1]:\n list_[j],list_[j + 1] = list_[j + 1],list_[j]\n\n# 完成一轮交换\ndef sub_sort(list_,low,high):\n # 选定基准\n x = list_[low]\n # low向后 high向前\n while low < high:\n # 后面的数往前放\n while list_[high] >= x and high > low:\n high -= 1\n list_[low] = list_[high]\n # 前面的数往后放\n while list_[low] < x and low < high:\n low += 1\n list_[high] = list_[low]\n\n list_[low] = x\n return low\n\n\ndef quick(list_,low,high):\n # low 表示列表第一个元素索引,high表示最后一个元素索引\n if low < high:\n key = sub_sort(list_,low,high)\n quick(list_,low,key - 1)\n quick(list_, key + 1,high)\n\n\n\nl = [4,9,3,1,2,5,8,4]\n# bubble(l)\nquick(l,0,len(l)-1)\nprint(l)\n\n\n# list01.sort()\n\n","sub_path":"data/day03/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"271961001","text":"## ItemCF-余弦算法\r\nimport math\r\ndef ItemSimilarity_cos(train):\r\n C = dict() ##同时购买的次数\r\n N = dict() ##购买⽤户数\r\n for u, items in train.items():\r\n for i in items.keys():\r\n if i not in N.keys():\r\n\r\n N[i] = 0\r\n N[i] += items[i] * items[i]\r\n for j in items.keys():\r\n if i == j:\r\n continue\r\n if i not in C.keys():\r\n C[i] = dict()\r\n if j not in C[i].keys():\r\n C[i][j] = 0\r\n ##当⽤户同时购买了i和j,则加评分乘积\r\n C[i][j] += items[i] * items[j]\r\n W = dict() ##相似分数\r\n for i, related_items in C.items():\r\n if i not in W.keys():\r\n W[i] = dict()\r\n for j, cij in related_items.items():\r\n W[i][j] = cij / (math.sqrt(N[i])* math.sqrt(N[j]))\r\n return W\r\n\r\nif __name__ == '__main__':\r\n Train_Data = {\r\n 'A': {'苹果': 2, '⾹蕉': 2, '⻄⽠': 2},\r\n 'B': {'苹果': 2, '⻄⽠': 2},\r\n 'C': {'苹果': 2, '⾹蕉': 2, '菠萝': 2},\r\n 'D': {'⾹蕉': 2, '葡萄': 2},\r\n 'E': {'葡萄': 2, '菠萝': 2},\r\n 'F': {'⾹蕉': 2, '⻄⽠': 2}\r\n }\r\n W = ItemSimilarity_cos(Train_Data)\r\n print(W)","sub_path":"AI/自然语言处理/11.26/Collaborative algorithm based on item/Cosine-based.py","file_name":"Cosine-based.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"413228985","text":"f_index = lambda x, y: x % y\n\nmount_list = lambda max_list: [[] for _ in range(max_list)]\n\ndef main():\n qntd_tests = int(input())\n\n while qntd_tests > 0:\n\n enderecos, chaves = input().rsplit(' ')\n enderecos = int(enderecos)\n #chaves = int(chaves)\n\n ll = mount_list(enderecos)\n nums = input().rsplit(' ')\n\n for num in nums:\n num = int(num)\n index = f_index(num, enderecos)\n ll[index].append(num)\n\n #show lista of list\n i=0\n for l in ll:\n print('{} ->'.format(i), end=' ')\n for num in l:\n print('{} ->'.format(num), end=' ')\n print('/')\n i += 1\n\n qntd_tests -= 1\n\n if(qntd_tests > 0):\n print()\n\n\nmain()","sub_path":"uri/1256.py","file_name":"1256.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"409134446","text":"# encoding: utf-8\nfrom __future__ import unicode_literals\nimport emails\n\n\ndef test_lazy_http():\n IMG_URL = 'http://lavr.github.io/python-emails/tests/python-logo.gif'\n f = emails.store.LazyHTTPFile(uri=IMG_URL)\n assert f.filename == 'python-logo.gif'\n assert f.content_disposition is None\n assert len(f.data) == 2549\n\n\ndef test_store_commons():\n FILES = [{'data': 'aaa', 'filename': 'aaa.txt'}, {'data': 'bbb', 'filename': 'bbb.txt'}, ]\n store = emails.store.MemoryFileStore()\n [store.add(_) for _ in FILES]\n for i, stored_file in enumerate(store):\n orig_file = FILES[i]\n for (k, v) in orig_file.items():\n assert v == getattr(stored_file, k)\n\n","sub_path":"emails/testsuite/loader/test_store.py","file_name":"test_store.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"480883910","text":"#! /usr/bin/env python\n#coding=utf-8\n\nimport threading\n\nfrom log import *\nfrom const import *\nfrom espn import *\n\n\ndef get_team_schedule(teamname):\n log(\"[INFO] - Getting %s schedule\" % teamname)\n data = espn_get_schedule(teamname)\n \n\n\n\ndef schedule_spider():\n for team in TEAMLDICT.values():\n t = threading.Thread(target=get_team_schedule, args=(team,))\n t.start()\n\n \ndef main():\n init_log(\"schedule_spider\")\n schedule_spider()\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"materialtools/NBASpider-master/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"292842502","text":"from inspect import CO_GENERATOR\nimport discord\nfrom discord import voice_client\nfrom discord.ext import commands\nimport os\nfrom discord import FFmpegPCMAudio\nfrom discord.ext.commands import bot\nfrom discord.ext.commands.core import command\nimport youtube_dl\nimport asyncio\nfrom variables import *\nfrom music_cog import music_cog\nfrom fun_cog import fun_cog\n\nintents = discord.Intents.default()\nclient = commands.Bot(command_prefix = '&', intents = intents)\n\nclient.add_cog(music_cog(client))\nclient.add_cog(fun_cog(client))\nsocial = {\"134117892747821056\": {\"twitter\": \"totheskye_\"}}\n\n@client.event\nasync def on_ready():\n print('YuumBot is operational')\n\n@client.command()\nasync def bio(ctx):\n await ctx.channel.send(\"I am YuumBot. Current Version: 1.3. As of right now, I am primarily a music bot. Full list of features coming soon via &help. Ask <@!134117892747821056> about anything regarding me.\")\n\n@client.event\nasync def on_message(message):\n if client.user.mentioned_in(message):\n await message.channel.send(\"Don't @ me please .\")\n await client.process_commands(message)\n\n@client.command()\nasync def join(ctx):\n if(ctx.author.voice is None):\n await ctx.channel.send(\"You're not in a voice channel bro\")\n else:\n channel = ctx.author.voice.channel\n await channel.connect()\n\n@client.command()\nasync def leave(ctx):\n if(ctx.voice_client is None):\n await ctx.channel.send(\"I'm not in a voice channel bro\")\n else:\n await ctx.voice_client.disconnect()\n\n@client.command()\nasync def addTwitter(ctx, username):\n user = str(ctx.author.id)\n twitter = str(username)\n add = { user : {\"twitter\": twitter}}\n social.update(add)\n await ctx.send(social)\n\n@client.command()\nasync def printS(ctx):\n #u2 = { \"12312622\": {\"twitter\": \"idk\"}}\n #u3 = { \"52562141\": {\"twitter\": \"idk\"}}\n #social.update(u2)\n #social.update(u3)\n username = social[\"134117892747821056\"][\"twitter\"]\n await ctx.send(username)\n await ctx.send(social)\n\n@client.command()\nasync def embed(ctx):\n id = str(ctx.author.id)\n username = social[id][\"twitter\"]\n embed=discord.Embed(title=\"Profile\", description=\"<@!\" + id + \">\", color=0x38ccc9)\n embed.set_author(name=ctx.message.author.name)\n embed.set_thumbnail(url=ctx.message.author.avatar_url)\n embed.add_field(name=\"Date Joined\", value = ctx.message.author.joined_at.strftime(\"%b %d, %Y\"))\n embed.add_field(name=\"Twitter\", value = username)\n embed.set_footer(text=\"Profile\")\n #embed.set_image(ctx.message.author.user.avatar_url)\n await ctx.send(embed=embed)\n\nclient.run(token)\n\n ","sub_path":"YuumBot.py","file_name":"YuumBot.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"127686585","text":"# -*- coding: utf-8 -*-\n\"\"\"acknowledge.py module.\n\nProject: garage.smartanalytics.sva\nCreated: 07/2017 - u.maurer@enbw.com\n\n(c) Copyright EnBW AG 2017.\n\"\"\"\n\nimport http.client\n\n# conn = http.client.HTTPSConnection(\"10.67.0.26:38880\")\nconn = http.client.HTTPConnection(\"localhost:8000\")\n\npayload = '{ \"event_id\": \"354b8bb1-5448-4d73-ad4f-be4a1c126494\", \"event_classified_as\": \"ALARM\"}'\n\nheaders = {\n 'authorization': \"Basic bHlueDpMeW54NHN2YQ==\",\n 'content-type': \"application/json\",\n 'cache-control': \"no-cache\",\n }\n\nconn.request(\"PUT\", \"/events/acknowledge-event/\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\nprint(data.decode(\"utf-8\"))","sub_path":"acknowledge.py","file_name":"acknowledge.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"222598977","text":"import json\n\nimport pytest\nfrom django.test import override_settings\n\nfrom channels.generic.http import AsyncHttpConsumer\nfrom channels.layers import get_channel_layer\nfrom channels.testing import HttpCommunicator\n\n\n@pytest.mark.asyncio\nasync def test_async_http_consumer():\n \"\"\"\n Tests that AsyncHttpConsumer is implemented correctly.\n \"\"\"\n\n class TestConsumer(AsyncHttpConsumer):\n async def handle(self, body):\n data = json.loads(body.decode(\"utf-8\"))\n await self.send_response(\n 200,\n json.dumps({\"value\": data[\"value\"]}).encode(\"utf-8\"),\n headers={b\"Content-Type\": b\"application/json\"},\n )\n\n # Open a connection\n communicator = HttpCommunicator(\n TestConsumer,\n method=\"POST\",\n path=\"/test/\",\n body=json.dumps({\"value\": 42, \"anything\": False}).encode(\"utf-8\"),\n )\n response = await communicator.get_response()\n assert response[\"body\"] == b'{\"value\": 42}'\n assert response[\"status\"] == 200\n assert response[\"headers\"] == [(b\"Content-Type\", b\"application/json\")]\n\n\n@pytest.mark.asyncio\nasync def test_async_http_consumer_with_channel_layer():\n \"\"\"\n Tests that AsyncHttpConsumer is implemented correctly.\n \"\"\"\n\n class TestConsumer(AsyncHttpConsumer):\n \"\"\"\n Abstract consumer that provides a method that handles running a command and getting a response on a\n device.\n \"\"\"\n\n channel_layer_alias = \"testlayer\"\n\n async def handle(self, body):\n # Add consumer to a known test group that we will use to send events to.\n await self.channel_layer.group_add(\"test_group\", self.channel_name)\n await self.send_headers(\n status=200, headers=[(b\"Content-Type\", b\"application/json\")]\n )\n\n async def send_to_long_poll(self, event):\n received_data = str(event[\"data\"]).encode(\"utf8\")\n # We just echo what we receive, and close the response.\n await self.send_body(received_data, more_body=False)\n\n channel_layers_setting = {\n \"testlayer\": {\"BACKEND\": \"channels.layers.InMemoryChannelLayer\"}\n }\n\n with override_settings(CHANNEL_LAYERS=channel_layers_setting):\n # Open a connection\n communicator = HttpCommunicator(\n TestConsumer,\n method=\"POST\",\n path=\"/test/\",\n body=json.dumps({\"value\": 42, \"anything\": False}).encode(\"utf-8\"),\n )\n\n # We issue the HTTP request\n await communicator.send_request()\n\n # Gets the response start (status and headers)\n response_start = await communicator.get_response_start(timeout=1)\n\n # Make sure that the start of the response looks good so far.\n assert response_start[\"status\"] == 200\n assert response_start[\"headers\"] == [(b\"Content-Type\", b\"application/json\")]\n\n # Send now a message to the consumer through the channel layer. Using the known test_group.\n channel_layer = get_channel_layer(\"testlayer\")\n await channel_layer.group_send(\n \"test_group\",\n {\"type\": \"send.to.long.poll\", \"data\": \"hello from channel layer\"},\n )\n\n # Now we should be able to get the message back on the remaining chunk of body.\n body = await communicator.get_body_chunk(timeout=1)\n assert body == b\"hello from channel layer\"\n","sub_path":"tests/test_generic_http.py","file_name":"test_generic_http.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"199258008","text":"# -*- coding: utf-8 -*-\n#。—————————————————————————————————————————— \n#。 \n#。 main.py \n#。 \n#。 @Time : 18-11-10 下午1:38 \n#。 @Author : capton \n#。 @Software: PyCharm \n#。 @Blog : http://ccapton.cn \n#。 @Github : https://github.com/ccapton \n#。 @Email : chenweibin1125@foxmail.com \n#。__________________________________________\n\nfrom __future__ import print_function # 同时兼容python2、Python3\nfrom __future__ import division # 同时兼容python2、Python3\n\nimport sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom language import *\nimport globalvar as gl\ngl._init()\ngl.set_value('current_edit_service_index', None)\ngl.set_value('current_edit_service', None)\n\nfrom my_qt_define import setActionEvent,showDialog\n\nfrom add_server import *\nfrom trayicon import TrayIcon\n\nUI_REFRESHING = False\n\nimport time\n\nclass BrookMainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setFixedHeight(400)\n self.ti = None\n self.table = None\n self.menu_list=[]\n self.add_brook_action = None\n self.add_brook_stream_action = None\n self.import_link_action = None\n self.start_connect = None\n self.stop_connect = None\n self.initUI()\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n\n def initUI(self):\n self.statusBar()\n\n self.addTopMenu('&'+get_word('start'))\n self.addTopMenu('&'+get_word('connection'))\n self.addTopMenu('&'+get_word('edit'))\n self.addTopMenu('&'+get_word('settings'))\n\n self.setFileMenu()\n self.setConnectionMenu()\n self.setEditMenu()\n self.setSettingsMenu()\n\n self.addServiceList()\n\n self.setGeometry(300, 300, 700, 410)\n self.setMinimumWidth(550)\n self.setWindowTitle(get_word('app_name',0))\n self.setWindowIcon(QIcon(os.path.join(basedir,'ico/brook-qt5.ico')))\n self.center()\n self.show()\n\n def setFileMenu(self):\n self.add_brook_action = self.createAction('Brook', 'Brook服务')\n setActionEvent(self.add_brook_action, self.createAddSeverDialog)\n self.add_brook_stream_action = self.createAction('Brook Stream', 'Brook Stream服务')\n setActionEvent(self.add_brook_stream_action, self.createAddSeverDialog2)\n add_service_action = self.createMenu('&'+get_word('add'), '添加一个服务')\n add_service_action.addAction(self.add_brook_action)\n add_service_action.addAction(self.add_brook_stream_action)\n\n import_link_action = self.createMenu('&'+get_word('import'),'从粘贴板或扫码导入新的服务')\n self.import_from_clipboard = self.createAction('粘贴板导入','先复制你的brook链接,然后到这一步')\n self.scan_qr = self.createAction('扫码导入','从屏幕中扫描二维码导入')\n self.scan_qr.setEnabled(False)\n setActionEvent(self.import_from_clipboard,self.createImportLinkDialog)\n setActionEvent(self.scan_qr,self.createScanQrDialog)\n import_link_action.addAction(self.import_from_clipboard)\n import_link_action.addAction(self.scan_qr)\n\n self.menu_list[0].addMenu(add_service_action)\n self.menu_list[0].addMenu(import_link_action)\n self.menu_list[0].addSeparator()\n self.addExitAction(self.menu_list[0])\n\n def setConnectionMenu(self):\n self.start_connect = self.createAction(get_word('start_connect'),'开始当前连接')\n self.stop_connect = self.createAction(get_word('stop_connect'),'断开当前连接')\n\n setActionEvent(self.start_connect,self.switchConnection)\n setActionEvent(self.stop_connect,self.switchConnection)\n\n self.start_connect.setEnabled(False)\n self.stop_connect.setEnabled(False)\n\n self.menu_list[1].addAction(self.start_connect)\n self.menu_list[1].addAction(self.stop_connect)\n\n def setEditMenu(self):\n self.editAction = self.createAction('编辑选中', '编辑当前服务')\n setActionEvent(self.editAction, self.createEditionDialog)\n self.menu_list[2].addAction(self.editAction)\n\n def setSettingsMenu(self):\n self.normal_settings = self.createAction('常规','常规设置')\n self.about = self.createAction('关于','软件详情')\n setActionEvent(self.normal_settings,self.createSettingsDialog)\n setActionEvent(self.about,self.createAboutDialog)\n self.menu_list[3].addAction(self.normal_settings)\n self.menu_list[3].addAction(self.about)\n\n\n def refreshConnectionUi(self):\n if gl.get_value('current_edit_service') == None or gl.get_value('current_edit_service_index') == None:\n self.start_connect.setEnabled(False)\n self.stop_connect.setEnabled(False)\n else:\n if has_service_start(gl.get_value('current_edit_service').get('ip'),\n gl.get_value('current_edit_service').get('remote_port'),\n gl.get_value('current_edit_service').get('local_port'),\n gl.get_value('current_edit_service').get('type')):\n self.start_connect.setEnabled(False)\n self.stop_connect.setEnabled(True)\n else:\n self.start_connect.setEnabled(True)\n self.stop_connect.setEnabled(False)\n\n def addTopMenu(self,menu_name):\n menu = self.menuBar().addMenu(menu_name)\n self.menu_list.append(menu)\n return menu\n\n def createMenu(self,menu_name,status_tip=''):\n menu = QMenu(menu_name,self)\n menu.setStatusTip(status_tip)\n return menu\n\n def createAction(self,action_name,status_tip='',icon_path=''):\n act = QAction(QIcon(icon_path), '&' + action_name, self)\n act.setStatusTip(status_tip)\n return act\n\n\n def update_connection_menu(self):\n pass\n print('hover')\n\n def before_quit(self):\n turnoffAll()\n qApp.quit()\n\n def addExitAction(self,menu,custom_exitAct=None):\n if custom_exitAct:\n menu.addAction(custom_exitAct)\n # exitAct = QAction(QIcon('exit.png'), '&' + get_word('exit'), self)\n exitAct = QAction('&' + get_word('exit'), self)\n #exitAct.setShortcut('Ctrl+Q')\n exitAct.setStatusTip('退出程序')\n exitAct.triggered.connect(self.before_quit)\n menu.addAction(exitAct)\n\n def createAddSeverDialog(self):\n dialog = QDialog()\n ui = Ui_Form()\n ui.setupUi(dialog,'brook',type=SERVER_ACTION_ADD,rootUi=self)\n dialog.exec_()\n dialog.show()\n\n def createAddSeverDialog2(self):\n dialog = QDialog()\n ui = Ui_Form()\n ui.setupUi(dialog,'brook-stream',type=SERVER_ACTION_ADD,rootUi=self)\n dialog.exec_()\n dialog.show()\n\n def createImportLinkDialog(self):\n import pyperclip\n clipboard_text = pyperclip.paste()\n new_data_json = load_data_json()\n link_imported_json = match_brook_service(clipboard_text)\n if link_imported_json:\n new_data_json.get('services').append(link_imported_json)\n save_data_json(new_data_json)\n self.reset_row()\n self.statusBar().showMessage(link_imported_json.get('ip')+':'+str(link_imported_json.get('remote_port'))+'添加成功')\n else:\n showDialog(self,'链接格式有误')\n\n def createScanQrDialog(self):\n pass\n\n def createAboutDialog(self):\n import about\n dialog = QDialog()\n dialog.setFixedSize(431,280)\n about_form = about.Ui_Form()\n about_form.setupUi(dialog)\n dialog.exec_()\n dialog.show()\n\n\n def createSettingsDialog(self):\n import settings_normal\n dialog = QDialog()\n settings = settings_normal.Ui_Form()\n settings.setupUi(dialog,get_host_ip())\n dialog.exec_()\n dialog.show()\n\n def createEditionDialog(self,service_name):\n if gl.get_value('current_edit_service') == None or gl.get_value('current_edit_service_index') == None :\n showDialog(self,'请选中一个连接进行操作!')\n return\n dialog = QDialog()\n ui = Ui_Form()\n if not service_name:\n if gl.get_value('current_edit_service').get('type') == 0:\n service_name = 'brook'\n elif gl.get_value('current_edit_service').get('type') == 1:\n service_name = 'brook-stream'\n if not service_name:\n showDialog(self, '请选中一个连接进行操作!')\n return\n ui.setupUi(dialog, service_name,type=SERVER_ACTION_EDIT,rootUi=self)\n dialog.exec_()\n dialog.show()\n\n def closeEvent(self, a0):\n a0.ignore()\n self.showMinimized()\n\n # 开始刷新界面、状态\n def startRefresh(self):\n self.thread = UiRreshThread()\n self.thread.signal.connect(self.ui_callback)\n self.thread.start() # 启动线程\n\n def ui_callback(self):\n pass\n #self.add_service_item(load_data_json())\n\n def addServiceList(self):\n self.table = MyTableWigdet(self)\n\n self.table.setGeometry(0,25,800,355)\n font = QFont('微软雅黑', 10)\n self.table.horizontalHeader().setFont(font) # 设置表头字体\n\n self.table.setFrameShape(QFrame.NoFrame) ##设置无表格的外框\n self.table.horizontalHeader().setFixedHeight(25) ##设置表头高度\n\n self.table.verticalHeader().hide()\n #self.table.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch) # 设置第五列宽度自动调整,充满屏幕\n #self.table.horizontalHeader().setStretchLastSection(True) ##设置最后一列拉伸至最大\n self.table.setSelectionMode(QAbstractItemView.SingleSelection) # 设置只可以单选,可以使用ExtendedSelection进行多选\n self.table.setSelectionBehavior(QAbstractItemView.SelectRows) # 设置 不可选择单个单元格,只可选择一行。\n\n self.table.setColumnCount(7) ##设置表格一共有五列\n self.table.setHorizontalHeaderLabels(['名称', '状态','服务器', '端口', '本地端口', '服务类型','转化类型']) # 设置表头文字\n self.table.horizontalHeader().setSectionsClickable(False) # 可以禁止点击表头的列\n self.table.horizontalHeader().setStyleSheet('QHeaderView::section{background:white}') # 设置表头的背景色为绿色\n self.table.setEditTriggers(QAbstractItemView.NoEditTriggers) # 设置表格不可更改\n self.table.setSortingEnabled(True) # 设置表头可以自动排序\n self.table.setRowCount(0)\n self.table.itemClicked.connect(self.itemClick)\n self.table.itemDoubleClicked.connect(self.itemDoubleClick)\n for index in range(len(load_data_json()['services'])):\n row = self.table.rowCount()\n self.table.setRowCount(row + 1)\n self.table.setItem(row,0,QTableWidgetItem(load_data_json()['services'][index].get('info')))\n self.table.setItem(row,1,QTableWidgetItem('断开'))\n self.table.setItem(row,2,QTableWidgetItem(load_data_json()['services'][index].get('ip')))\n self.table.setItem(row,3,QTableWidgetItem(str(load_data_json()['services'][index].get('remote_port'))))\n self.table.setItem(row,4,QTableWidgetItem(str(load_data_json()['services'][index].get('local_port'))))\n type_name = 'brook'\n if load_data_json()['services'][index].get('type') == 1:\n type_name = 'brook-stream'\n self.table.setItem(row,5,QTableWidgetItem(type_name))\n t_type_name = 'socks5'\n if load_data_json()['services'][index].get('transported_type') == 1:\n t_type_name='http'\n self.table.setItem(row,6,QTableWidgetItem(t_type_name))\n self.table.setShowGrid(False)\n self.update_row()\n\n def add_row(self):\n self.reset_row()\n\n def reset_row(self):\n for index in range(self.table.rowCount())[::-1]:\n self.table.removeRow(index)\n for index in range(len(load_data_json()['services'])):\n row = self.table.rowCount()\n self.table.setRowCount(row + 1)\n self.table.setItem(row,0,QTableWidgetItem(load_data_json()['services'][index].get('info')))\n self.table.setItem(row,1,QTableWidgetItem('断开'))\n self.table.setItem(row,2,QTableWidgetItem(load_data_json()['services'][index].get('ip')))\n self.table.setItem(row,3,QTableWidgetItem(str(load_data_json()['services'][index].get('remote_port'))))\n self.table.setItem(row,4,QTableWidgetItem(str(load_data_json()['services'][index].get('local_port'))))\n type_name = 'brook'\n if load_data_json()['services'][index].get('type') == 1:\n type_name = 'brook-stream'\n self.table.setItem(row,5,QTableWidgetItem(type_name))\n t_type_name = 'socks5'\n if load_data_json()['services'][index].get('transported_type') == 1:\n t_type_name='http'\n self.table.setItem(row,6,QTableWidgetItem(t_type_name))\n\n def update_row(self):\n for index in range(self.table.rowCount()):\n if has_service_start(load_data_json().get('services')[index].get('ip'),\n load_data_json().get('services')[index].get('remote_port'),\n load_data_json().get('services')[index].get('local_port'),\n load_data_json().get('services')[index].get('type')):\n self.table.item(index,1).setText('已连接')\n else:\n self.table.item(index,1).setText('断开')\n\n\n def itemClick(self,item):\n gl.set_value('current_edit_service_index', item.row())\n gl.set_value('current_edit_service', load_data_json().get('services')[item.row()])\n self.refreshConnectionUi()\n\n\n def itemDoubleClick(self,item):\n gl.set_value('current_edit_service_index',item.row())\n gl.set_value('current_edit_service',load_data_json().get('services')[item.row()])\n\n if has_service_start(gl.get_value('current_edit_service').get('ip'),\n gl.get_value('current_edit_service').get('remote_port'),\n gl.get_value('current_edit_service').get('local_port'),\n gl.get_value('current_edit_service').get('type')):\n return\n\n service_name = 'brook'\n if load_data_json().get('services')[item.row()].get('type') == 1:\n service_name = 'brook-stream'\n self.refreshConnectionUi()\n self.createEditionDialog(service_name=service_name)\n\n\n def switchConnection(self):\n if self.table:\n self.table.switchConnection(None)\n\n def cloneService(self):\n new_data_json = load_data_json()\n new_data_json.get('services').append(gl.get_value('current_edit_service'))\n save_data_json(new_data_json)\n self.reset_row()\n self.update_row()\n self.statusBar().showMessage('克隆了一个服务')\n\n\n def update_tray(self):\n self.ti.stateAction.setText('已断开')\n for service in load_data_json().get('services'):\n if has_service_start(service.get('ip'),service.get('remote_port'),service.get('local_port'),service.get('type')):\n self.ti.stateAction.setText('已连接')\n break\n\n\nclass MyTableWigdet(QTableWidget):\n def contextMenuEvent(self, event):\n self.edit_index = self.row(self.itemAt(event.pos()))\n\n gl.set_value('current_edit_service_index', self.edit_index)\n gl.set_value('current_edit_service', load_data_json().get('services')[self.edit_index])\n\n self.menu = QMenu(self)\n connectAction = QAction('连接',self)\n cloneAction = QAction('克隆',self)\n editAction = QAction('编辑',self)\n # print(gl.get_value('current_edit_service').get('local_port'))\n # print(gl.get_value('current_edit_service').get('type'))\n if has_service_start(gl.get_value('current_edit_service').get('ip'),\n gl.get_value('current_edit_service').get('remote_port'),\n gl.get_value('current_edit_service').get('local_port'),\n gl.get_value('current_edit_service').get('type')):\n connectAction.setText('断开')\n editAction.setEnabled(False)\n\n deletaAction = QAction('移除',self)\n setActionEvent(connectAction, self.switchConnection)\n setActionEvent(editAction,self.parent().createEditionDialog)\n setActionEvent(cloneAction,self.parent().cloneService)\n setActionEvent(deletaAction,self.remove_row)\n self.menu.addAction(connectAction)\n self.menu.addSeparator()\n self.menu.addAction(editAction)\n self.menu.addAction(cloneAction)\n self.menu.addSeparator()\n self.menu.addAction(deletaAction)\n self.menu.popup(QCursor.pos())\n\n def switchConnection(self,event):\n if has_service_start(gl.get_value('current_edit_service').get('ip'),\n gl.get_value('current_edit_service').get('remote_port'),\n gl.get_value('current_edit_service').get('local_port'),\n gl.get_value('current_edit_service').get('type')):\n stop_service(gl.get_value('current_edit_service').get('ip'),\n gl.get_value('current_edit_service').get('remote_port'),\n gl.get_value('current_edit_service').get('local_port'),\n service_type=gl.get_value('current_edit_service').get('type'))\n self.parent().statusBar().showMessage('已断开与' + gl.get_value('current_edit_service').get('info')+'的连接')\n self.parent().update_tray()\n else:\n if load_config_json().get('enable_lan_connection'):\n woring_ip = get_host_ip()\n else:\n woring_ip = '127.0.0.1'\n state = start_service(service_type=gl.get_value('current_edit_service').get('type'),\n transported_type=gl.get_value('current_edit_service').get('transported_type'),\n remote_ip=gl.get_value('current_edit_service').get('ip'),\n remote_port=gl.get_value('current_edit_service').get('remote_port'),\n local_port=gl.get_value('current_edit_service').get('local_port'),\n local_ip=woring_ip,\n psw=gl.get_value('current_edit_service').get('psw'))\n if not state:\n showDialog(self,'连接失败,请检查服务配置')\n else:\n if gl.get_value('current_edit_service').get('info'):\n self.parent().statusBar().showMessage('已连接到' + gl.get_value('current_edit_service').get('info'))\n self.parent().update_row()\n self.parent().refreshConnectionUi()\n self.parent().update_tray()\n\n def remove_row(self,event):\n self.sd = showDialog(self.parent(),'确定要删除该连接吗?',callback=self.confirm_delete_service)\n\n\n def confirm_delete_service(self):\n if self.sd:\n self.sd.close()\n\n stop_service(gl.get_value('current_edit_service').get('ip'),\n gl.get_value('current_edit_service').get('remote_port'),\n gl.get_value('current_edit_service').get('local_port'),\n service_type=gl.get_value('current_edit_service').get('type'))\n self.removeRow(self.edit_index)\n new_data_json = load_data_json()\n # gl.set_value('current_edit_service',new_data_json['services'][self.edit_index])\n # gl.set_value('current_edit_service_index',self.edit_index)\n new_data_json['services'].remove(new_data_json['services'][self.edit_index])\n save_data_json(new_data_json)\n self.parent().reset_row()\n self.sd = None\n return False\n\n\nclass UiRreshThread(QThread):\n signal = pyqtSignal() # 括号里填写信号传递的参数\n def __init__(self):\n super().__init__()\n\n def __del__(self):\n self.wait()\n\n def run(self):\n while True:\n # 进行任务操作\n self.signal.emit() # 发射信号\n time.sleep(1)\n record_all_state()\n\ndef is_Mac():\n import platform\n sys_name = platform.system()\n # machine_name = platform.machine().lower()\n if 'Darwin' == sys_name:\n return True\n elif 'Linux' == sys_name:\n return False\n return False\n\ndef download_brook(url,save_name='brook'):\n print(' 开始下载brook ' + url)\n brook_name = save_name\n command = 'curl -o '+os.path.join(basedir, 'brook_temp')+' -L ' + url\n code = os.system(command)\n if code != 0:\n print('')\n print(' 下载brook错误,请重新运行本程序')\n os.system('rm -rf '+os.path.join(basedir, 'brook_temp'))\n return\n command2 = 'rm -rf ' + os.path.join(basedir, brook_name) + ' && mv ' + os.path.join(basedir, 'brook_temp') + \\\n ' ' + os.path.join(basedir, brook_name)\n os.system(command2)\n command3 = 'chmod +x ' + os.path.join(basedir, brook_name)\n os.system(command3)\n\n\nif __name__ == '__main__':\n if python_version == '2':\n reload(sys) # python3解释器下可能会提示错误,没关系,因为只有python2运行本程序才会走到这步\n sys.setdefaultencoding(\"utf-8\") # 同上\n\n # amd_64位 cpu架构 linux\n gl.set_value('brook_name', 'brook')\n\n if is_Mac():\n mac_brook = 'https://github.com/txthinking/brook/releases/download/v20180909/brook_darwin_amd64'\n if not os.path.exists(os.path.join(basedir,'brook_darwin_amd64')):\n download_brook(mac_brook,save_name='brook_darwin_amd64')\n gl.set_value('brook_name','brook_darwin_amd64') # amd_64位 cpu架构 macos\n else:\n import platform\n if platform.architecture()[0].find('32bit') != -1:\n x86_brook = 'https://github.com/txthinking/brook/releases/download/v20180909/brook_linux_386'\n if not os.path.exists(os.path.join(basedir,'brook_linux_386')):\n download_brook(x86_brook, save_name='brook_linux_386')\n gl.set_value('brook_name','brook_linux_x86') # 32位 cpu架构 linux\n elif platform.architecture()[0].find('64bit') != -1:\n brook = 'https://github.com/txthinking/brook/releases/download/v20180909/brook'\n if not os.path.exists(os.path.join(basedir,'brook')):\n download_brook(brook, save_name='brook')\n gl.set_value('brook_name','brook')\n app = QApplication(sys.argv)\n mw = BrookMainWindow()\n mw.statusBar().showMessage('Brook-Qt5客户端')\n ti = TrayIcon(mw)\n ti.show()\n mw.ti = ti\n #ex.startRefresh()\n sys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":23458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"179895584","text":"#Program asks user for name of file, diplay first 5 lines of file\ndef main():\n #get name\n filename = input('Enter filename: ')\n #open that file\n infile = open(filename, 'r')\n #read that file \n line = infile.readline()\n linecount = 1\n while line != '' and linecount <= 5:\n print(line)\n linecount += 1\n line = infile.readline()\n #close file\n infile.close()\nmain()\n","sub_path":"homeclasswork/io/fileheaddisplay.py","file_name":"fileheaddisplay.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"496488374","text":"#REDDIT CHALLENGE 20\n'''Create a program that will find all prime numbers below 2000'''\n\ndef primes(n=200):\n for p in range(2, n+1): #for a number in range 2 to 201\n for i in range(2, p): # for another number in range 2 to 201\n if p % i == 0: \n break #if its not prime the loop goes back to the first loop\n else:\n print(p) #if prime it prints then goes back to loop\n\nprimes()\n \n\n\n","sub_path":"Easy_Challenges/Easy11to20/easy20.py","file_name":"easy20.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"501742119","text":"###################################################################################################################### \n# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # \n# # \n# Licensed under the Apache License Version 2.0 (the \"License\"). You may not use this file except in compliance # \n# with the License. A copy of the License is located at # \n# # \n# http://www.apache.org/licenses/ # \n# # \n# or in the \"license\" file accompanying this file. This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES # \n# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions # \n# and limitations under the License. # \n######################################################################################################################\nfrom services.aws_service import AwsService\n\nACCOUNT_LIMIT = \"AccountLimit\"\nCHANGE_INFO = \"ChangeInfo\"\nGEO_LOCATION = \"GeoLocation\"\nHEALTH_CHECK = \"HealthCheck\"\nHEALTH_CHECK_COUNT = \"HealthCheckCount\"\nHEALTH_CHECK_LAST_FAILURE_REASON = \"HealthCheckLastFailureReason\"\nHEALTH_CHECK_STATUS = \"HealthCheckStatus\"\nHOSTED_ZONE = \"HostedZone\"\nHOSTED_ZONE_COUNT = \"HostedZoneCount\"\nHOSTED_ZONE_LIMIT = \"HostedZoneLimit\"\nQUERY_LOGGING_CONFIG = \"QueryLoggingConfig\"\nREUSABLE_DELEGATION_SET = \"ReusableDelegationSet\"\nRESOURCE_RECORD_SET_LIMIT = \"ReusableDelegationSetLimit\"\nTRAFFIC_POLICY = \"TrafficPolicy\"\nTRAFFIC_POLICY_INSTANCE = \"TrafficPolicyInstance\"\nTRAFFIC_POLICY_INSTANCE_COUNT = \"TrafficPolicyInstanceCount\"\n\nGEO_LOCATIONS = \"GeoLocations\"\nHEALTH_CHECKS = \"HealthChecks\"\nHOSTED_ZONES = \"HostedZones\"\nHOSTED_ZONES_BY_NAME = \"HostedZonesByName\"\nQUERY_LOGGING_CONFIGS = \"QueryLoggingConfigs\"\nRESOURCE_RECORD_SETS = \"ResourceRecordSets\"\nREUSABLE_DELEGATION_SET_LIMIT = \"ReusableDelegationSetLimit\"\nREUSABLE_DELEGATION_SETS = \"ReusableDelegationSets\"\nTAGS_FOR_RESOURCE = \"TagsForResource\"\nTAGS_FOR_RESOURCES = \"TagsForResources\"\nTRAFFIC_POLICIES = \"TrafficPolicies\"\nTRAFFIC_POLICIES_INSTANCES = \"TrafficPolicyInstances\"\nTRAFFIC_POLICY_INSTANCES_BY_HOSTED_ZONE = \"TrafficPolicyInstancesByHostedZone\"\nTRAFFIC_POLICY_INSTANCES_BY_POLICY = \"TrafficPolicyInstancesByPolicy\"\nTRAFFIC_POLICY_VERSIONS = \"TrafficPolicyVersions\"\nVPC_ASSOCIATION_AUTHORIZATION = \"VpcAssociationAuthorizations\"\n\nNEXT_TOKEN_ARGUMENT = \"Marker\"\nNEXT_TOKEN_RESULT = \"NextMarker\"\n\nRESOURCE_NAMES = [\n ACCOUNT_LIMIT,\n CHANGE_INFO,\n GEO_LOCATION,\n HEALTH_CHECK,\n HEALTH_CHECK_COUNT,\n HEALTH_CHECK_LAST_FAILURE_REASON,\n HEALTH_CHECK_STATUS,\n HOSTED_ZONE,\n HOSTED_ZONE_COUNT,\n HOSTED_ZONE_LIMIT,\n QUERY_LOGGING_CONFIG,\n REUSABLE_DELEGATION_SET,\n RESOURCE_RECORD_SET_LIMIT,\n TRAFFIC_POLICY,\n TRAFFIC_POLICY_INSTANCE,\n TRAFFIC_POLICY_INSTANCE_COUNT,\n GEO_LOCATIONS,\n HEALTH_CHECKS,\n HOSTED_ZONES,\n HOSTED_ZONES_BY_NAME,\n QUERY_LOGGING_CONFIGS,\n RESOURCE_RECORD_SETS,\n REUSABLE_DELEGATION_SET_LIMIT,\n REUSABLE_DELEGATION_SETS,\n TAGS_FOR_RESOURCE,\n TAGS_FOR_RESOURCES,\n TRAFFIC_POLICIES,\n TRAFFIC_POLICIES_INSTANCES,\n TRAFFIC_POLICY_INSTANCES_BY_HOSTED_ZONE,\n TRAFFIC_POLICY_INSTANCES_BY_POLICY,\n TRAFFIC_POLICY_VERSIONS,\n VPC_ASSOCIATION_AUTHORIZATION\n]\n\nRESOURSES_WITH_TAGS = [\n HEALTH_CHECK,\n HEALTH_CHECKS,\n HOSTED_ZONE,\n HOSTED_ZONES]\n\nCUSTOM_RESULT_PATHS = {\n ACCOUNT_LIMIT: \"{Limit:@.Limit, Count:@.Count}\",\n CHANGE_INFO: \"{\" + \",\".join(['\"{}\":@.ChangeInfo.{}'.format(i, i) for i in [\"Id\", \"Status\", \"SubmittedAt\", \"Comment\"]]) + \"}\",\n GEO_LOCATION: \"{\" + \",\".join(['\"{}\":@.GeoLocationDetails.{}'.format(i, i) for i in\n [\"ContinentCode\", \"ContinentName\"\"CountryCode\", \"CountryName\", \"SubdivisionCode\",\n \"SubdivisionName\"]]) + \"}\",\n HEALTH_CHECK_COUNT: \"{HealthCheckCount:HealthCheckCount}\",\n HEALTH_CHECK_LAST_FAILURE_REASON: \"HealthCheckObservations\",\n HEALTH_CHECK_STATUS: \"HealthCheckObservations\",\n HOSTED_ZONE: \"{\" + \",\".join(['\"{}\":{}'.format(i, i) for i in [\"HostedZone\", \"DelegationSet\", \"VPCs\"]]) + \"}\",\n HOSTED_ZONE_COUNT: \"{HostedZoneCount:HostedZoneCount}\",\n HOSTED_ZONE_LIMIT: \"{Limit:@.Limit, Count:@.Count}\",\n QUERY_LOGGING_CONFIG: \"{\" + \",\".join(\n ['\"{}\":@.QueryLoggingConfig.{}'.format(i, i) for i in [\"Id\", \"HostedZoneId\", \"CloudWatchLogsLogGroupArn\"]]) + \"}\",\n REUSABLE_DELEGATION_SET: \"{\" + \",\".join(\n ['\"{}\":@.DelegationSet.{}'.format(i, i) for i in [\"Id\", \"CallerReference\", \"NameServers\"]]) + \"}\",\n REUSABLE_DELEGATION_SET_LIMIT: \"{Limit:@.Limit, Count:@.Count}\",\n TRAFFIC_POLICY: \"{\" + \",\".join(\n ['\"{}\":@.TrafficPolicy.{}'.format(i, i) for i in [\"Id\", \"Version\", \"Name\", \"Type\", \"Document\", \"Comment\"]]) + \"}\",\n TRAFFIC_POLICY_INSTANCE: \"{\" + \",\".join(['\"{}\":@.TrafficPolicyInstance.{}'.format(i, i) for i in\n [\"Id\", \"HostedZoneId\", \"Name\", \"TTL\", \"State\", \"Message\", \"TrafficPolicyId\",\n \"TrafficPolicyVersion\", \"TrafficPolicyType\"]]) + \"}\",\n TRAFFIC_POLICY_INSTANCE_COUNT: \"{TrafficPolicyInstanceCount:TrafficPolicyInstanceCount}\",\n GEO_LOCATIONS: \"GeoLocationDetailsList\",\n HOSTED_ZONES_BY_NAME: HOSTED_ZONES,\n REUSABLE_DELEGATION_SETS: \"DelegationSets\",\n TAGS_FOR_RESOURCE: \"ResourceTagSet\",\n TAGS_FOR_RESOURCES: \"ResourceTagSets\",\n TRAFFIC_POLICIES: \"TrafficPolicySummaries\",\n TRAFFIC_POLICY_INSTANCES_BY_HOSTED_ZONE: TRAFFIC_POLICIES_INSTANCES,\n TRAFFIC_POLICY_INSTANCES_BY_POLICY: TRAFFIC_POLICIES_INSTANCES,\n TRAFFIC_POLICY_VERSIONS: TRAFFIC_POLICIES,\n VPC_ASSOCIATION_AUTHORIZATION: \"{HostedZoneId:@.HostedZoneId, VPCs:@.VPCs}\"\n\n}\n\nMULTI_ELEMENT_CONTINUATION_MARKERS = {\n GEO_LOCATIONS: [\n (\"StartContinentCode\", \"NextContinentCode\"),\n (\"StartCountryCode\", \"NextCountryCode\"),\n (\"StartSubdivisionCode\", \"NextSubdivisionCode\")\n ],\n RESOURCE_RECORD_SETS: [\n (\"StartRecordName\", \"NextRecordName\"),\n (\"StartRecordType\", \"NextRecordType\"),\n (\"StartRecordIdentifier\", \"NextRecordIdentifier\")\n ],\n TRAFFIC_POLICIES_INSTANCES: [\n (\"HostedZoneIdMarker\", \"HostedZoneIdMarker\"),\n (\"TrafficPolicyInstanceNameMarker\", \"TrafficPolicyInstanceNameMarker\"),\n (\"TrafficPolicyInstanceTypeMarker\", \"TrafficPolicyInstanceTypeMarker\")\n ],\n TRAFFIC_POLICY_INSTANCES_BY_HOSTED_ZONE: [\n (\"TrafficPolicyInstanceNameMarker\", \"TrafficPolicyInstanceNameMarker\"),\n (\"TrafficPolicyInstanceTypeMarker\", \"TrafficPolicyInstanceTypeMarker\")\n ],\n TRAFFIC_POLICY_INSTANCES_BY_POLICY: [\n (\"HostedZoneIdMarker\", \"HostedZoneIdMarker\"),\n (\"TrafficPolicyInstanceNameMarker\", \"TrafficPolicyInstanceNameMarker\"),\n (\"TrafficPolicyInstanceTypeMarker\", \"TrafficPolicyInstanceTypeMarker\")\n ]\n}\n\n\nclass Route53Service(AwsService):\n def __init__(self, role_arn=None, session=None, tags_as_dict=True, as_named_tuple=False, service_retry_strategy=None):\n \"\"\"\n :param role_arn: Optional (cross account) role to use to retrieve services\n :param session: Optional session to use to retrieve services\n :param tags_as_dict: Set to True true to convert resource tags to dictionaries\n :param as_named_tuple: Set to True to return resources as named tuples instead of a dictionary\n :param service_retry_strategy: Retry strategy for service\n :param service_retry_strategy: service retry strategy for making boto api calls\n \"\"\"\n\n AwsService.__init__(self,\n service_name='route53',\n resource_names=RESOURCE_NAMES,\n role_arn=role_arn,\n session=session,\n tags_as_dict=tags_as_dict,\n resources_with_tags=RESOURSES_WITH_TAGS,\n as_named_tuple=as_named_tuple,\n custom_result_paths=CUSTOM_RESULT_PATHS,\n mapped_parameters={},\n next_token_argument=NEXT_TOKEN_ARGUMENT,\n next_token_result=NEXT_TOKEN_RESULT,\n service_retry_strategy=service_retry_strategy)\n\n @staticmethod\n def is_regional():\n return False\n\n def describe_resources_function_name(self, resource_name):\n \"\"\"\n Returns the name of the boto client method call to retrieve the specified resource.\n :param resource_name:\n :return: Name of the boto3 client function to retrieve the specified resource type\n \"\"\"\n s = AwsService.describe_resources_function_name(self, resource_name=resource_name)\n if resource_name in [\n ACCOUNT_LIMIT,\n CHANGE_INFO,\n GEO_LOCATION,\n HEALTH_CHECK,\n HEALTH_CHECK_COUNT,\n HEALTH_CHECK_LAST_FAILURE_REASON,\n HEALTH_CHECK_STATUS,\n HOSTED_ZONE,\n HOSTED_ZONE_COUNT,\n HOSTED_ZONE_LIMIT,\n QUERY_LOGGING_CONFIG,\n REUSABLE_DELEGATION_SET,\n RESOURCE_RECORD_SET_LIMIT,\n TRAFFIC_POLICY,\n TRAFFIC_POLICY_INSTANCE,\n TRAFFIC_POLICY_INSTANCE_COUNT\n ]:\n s = s.replace(\"describe_\", \"get_\")\n\n else:\n s = s.replace(\"describe_\", \"list_\")\n\n return s\n\n def _get_tags_for_resource(self, client, resource):\n \"\"\"\n Returns the tags for specific resources that require additional boto calls to retrieve their tags. Most likely this\n method needs to be overwritten for specific services/resources\n :param client: Client that can be used to make the boto call to retrieve the tags\n :return: Tags for the specified resource\n \"\"\"\n tags = client.list_tag_for_resource(ResourceId=resource[\"ResourceId\"],\n ResourceType=self._resource_name.lower()).get(\"ResourceTagSet\", {}).get(\"Tags\", {})\n return [\n {\n \"Key\": t, \"Value\": tags[t]\n } for t in tags\n ]\n\n def _get_tag_resource(self):\n \"\"\"\n Returns the name of the resource to retrieve the tags for the resource of type specified by resource name\n :return: Name of the resource that will be used to retrieve the tags\n \"\"\"\n if self._resource_name in [\n HOSTED_ZONE,\n HOSTED_ZONES,\n HEALTH_CHECK,\n HEALTH_CHECKS\n ]:\n return TAGS_FOR_RESOURCE\n else:\n return None\n\n def _next_token_argument_name(self, resources):\n \"\"\"\n Returns the name of the continuation token parameter to be used in the describe call for a specific resource. Most likely\n needs to be overwritten in inherited service class for service/resource specific parameter names\n :param resources: Name of the resource type\n :return: Name of the continuation token parameter\n \"\"\"\n if resources == TRAFFIC_POLICIES:\n return \"TrafficPolicyIdMarker\"\n\n if resources == TRAFFIC_POLICY_VERSIONS:\n return \"TrafficPolicyVersionMarker\"\n\n if resources == VPC_ASSOCIATION_AUTHORIZATION:\n return \"NextToken\"\n\n return self._nexttoken_argument\n\n def _next_token_result_name(self, resources):\n \"\"\"\n Return the name of the continuation token attribute from the result of the describe response for a specific resource. Most\n likely needs to be overwritten in inherited service class for service/resource specific attribute names\n :param resources: Name of the resource type\n :return: Name of the continuation token attribute\n \"\"\"\n if resources in [\n GEO_LOCATIONS,\n RESOURCE_RECORD_SETS\n ]:\n return \"IsTruncated\"\n\n if resources == TRAFFIC_POLICIES:\n return \"TrafficPolicyIdMarker\"\n\n if resources == TRAFFIC_POLICY_VERSIONS:\n return \"TrafficPolicyVersionMarker\"\n\n if resources == VPC_ASSOCIATION_AUTHORIZATION:\n return \"NextToken\"\n\n return self._nexttoken_result\n\n def set_continuation_call_parameters(self, function_args, next_token, resp):\n if self._resource_name in MULTI_ELEMENT_CONTINUATION_MARKERS:\n for marker in MULTI_ELEMENT_CONTINUATION_MARKERS[self._resource_name]:\n if marker[1] in resp:\n function_args[marker[0]] = resp[marker[1]]\n else:\n function_args.pop(marker[0], None)\n else:\n AwsService.set_continuation_call_parameters(self, function_args, next_token, resp)\n","sub_path":"source/code/services/route53_service.py","file_name":"route53_service.py","file_ext":"py","file_size_in_byte":13363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"451832747","text":"def _get_lr_ops(self):\n lr_ops = []\n lr_vars = set()\n for op in self.optimize_ops:\n if self._is_opt_op(op):\n lr_vars.add(op.input('LearningRate')[0])\n find_ops = []\n block = self.origin_program.global_block()\n for op in block.ops:\n if (set(op.output_arg_names) & lr_vars):\n find_ops.append(op)\n ufind = UnionFind(block.ops)\n for op1 in block.ops:\n for op2 in block.ops:\n if ((op1 != op2) and self._is_op_connected(op1, op2) and (not self._is_opt_op(op1)) and (not self._is_opt_op(op2))):\n ufind.union(op1, op2)\n for op1 in block.ops:\n for op2 in find_ops:\n if ufind.is_connected(op1, op2):\n lr_ops.append(op1)\n return lr_ops","sub_path":"Data Set/bug-fixing-5/9bc0c23b178d44051c5ddeaaf726c771cea2dcd6-<_get_lr_ops>-bug.py","file_name":"9bc0c23b178d44051c5ddeaaf726c771cea2dcd6-<_get_lr_ops>-bug.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"652998377","text":"\nfrom collections import Counter\nimport os\nimport json\n\nfrom stem import CircStatus\nfrom stem.control import Controller\nimport stem\n\nfrom tor_ import TorPlugin, authenticate, gen_controller\n\nDEFAULT_GEOIP_PATH = \"/usr/share/GeoIP/GeoIP.dat\"\nCACHE_FNAME = 'munin_tor_country_stats.json'\n\n\ndef simplify(cn):\n \"\"\"Simplify country name\"\"\"\n cn = cn.replace(' ', '_')\n cn = cn.replace(\"'\", '_')\n cn = cn.split(',', 1)[0]\n return cn\n\n\nclass TorCountries(TorPlugin):\n def __init__(self):\n # Configure plugin\n self.cache_dir_name = os.environ.get('torcachedir', None)\n if self.cache_dir_name is not None:\n self.cache_dir_name = os.path.join(self.cache_dir_name,\n CACHE_FNAME)\n\n max_countries = os.environ.get('tormaxcountries', 15)\n self.max_countries = int(max_countries)\n\n geoip_path = os.environ.get('torgeoippath', DEFAULT_GEOIP_PATH)\n try:\n import GeoIP\n self.geodb = GeoIP.open(geoip_path, GeoIP.GEOIP_MEMORY_CACHE)\n self.available = True\n except Exception:\n self.available = False\n\n def conf(self):\n \"\"\"Configure plugin\"\"\"\n if not self.available:\n return\n\n graph = {'title': 'Countries',\n 'args': '-l 0 --base 1000',\n 'vlabel': 'countries',\n 'category': 'Tor',\n 'info': 'OR connections by state'}\n labels = {}\n\n countries_num = self.top_countries()\n\n for c, v in countries_num:\n labels[c] = {'label': c, 'min': 0, 'max': 25000, 'type': 'GAUGE'}\n\n TorPlugin.conf_from_dict(graph, labels)\n\n # If needed, create cache file at config time\n if self.cache_dir_name:\n with open(self.cache_dir_name, 'w') as f:\n json.dump(countries_num, f)\n\n def fetch(self):\n \"\"\"Generate metrics\"\"\"\n\n # If possible, read cached data instead of doing the processing twice\n if not self.available:\n return\n\n try:\n with open(self.cache_dir_name) as f:\n countries_num = json.load(f)\n except:\n # Fallback if cache_dir_name is not set, unreadable or any other\n # error\n countries_num = self.top_countries()\n\n for c, v in countries_num:\n print(\"%s.value %d\" % (c, v))\n\n @staticmethod\n def _gen_ipaddrs_from_circuits(controller):\n \"\"\"Generate a sequence of ipaddrs for every built circuit\"\"\"\n # Currently unused\n for circ in controller.get_circuits():\n if circ.status != CircStatus.BUILT:\n continue\n\n for entry in circ.path:\n fingerprint, nickname = entry\n\n desc = controller.get_network_status(fingerprint, None)\n if desc:\n ipaddr = desc.address\n yield ipaddr\n\n @staticmethod\n def _gen_ipaddrs_from_statuses(controller):\n \"\"\"Generate a sequence of ipaddrs for every network status\"\"\"\n for desc in controller.get_network_statuses():\n ipaddr = desc.address\n yield ipaddr\n\n def _gen_countries(self, controller):\n \"\"\"Generate a sequence of countries for every built circuit\"\"\"\n for ipaddr in self._gen_ipaddrs_from_statuses(controller):\n country = self.geodb.country_name_by_addr(ipaddr)\n if country is None:\n yield 'Unknown'\n continue\n\n yield simplify(country)\n\n def top_countries(self):\n \"\"\"Build a list of top countries by number of circuits\"\"\"\n with gen_controller() as controller:\n try:\n authenticate(controller)\n c = Counter(self._gen_countries(controller))\n return sorted(c.most_common(self.max_countries))\n except stem.connection.AuthenticationFailure as e:\n print('Authentication failed ({})'.format(e))\n return []\n","sub_path":"circuits_by_country.py","file_name":"circuits_by_country.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"644198206","text":"import json\nfrom datetime import date, datetime\n\nfrom django.http import HttpResponse\n\n\n__author__ = 'dany'\n\ndate_handler = lambda obj: (\n obj.isoformat()\n if isinstance(obj, datetime)\n or isinstance(obj, date)\n else None)\n\n\ndef enable_json(fun):\n def wrapper(request, *args, **kwargs):\n if request.body:\n data = json.loads(request.body)\n else:\n data = None\n ret_value = fun(request, *args, json_data=data, **kwargs)\n return HttpResponse(json.dumps(ret_value, default=date_handler), content_type=\"application/json\")\n\n return wrapper\n\n\n","sub_path":"core/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"192983966","text":"# -*- coding: utf-8 -*-\n# Author: Florian Mayer \n\n\"\"\" Classes for spectral analysis. \"\"\"\n\nfrom __future__ import absolute_import\n\nimport datetime\n\nfrom random import randint\nfrom itertools import izip\nfrom copy import copy, deepcopy\nfrom math import floor\n\nimport numpy as np\nfrom numpy import ma\n\nfrom scipy import ndimage\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import FuncFormatter, MaxNLocator\nfrom matplotlib.colorbar import Colorbar\n\nfrom sunpy.time import parse_time\nfrom sunpy.util.util import to_signed\nfrom sunpy.spectra.spectrum import Spectrum\n\n# This should not be necessary, as observations do not take more than a day\n# but it is used for completeness' and extendibility's sake.\n# XXX: Leap second?\nSECONDS_PER_DAY = 86400\n\n# Used for COPY_PROPERTIES\nREFERENCE = 0\nCOPY = 1\nDEEPCOPY = 2\n\n# Maybe move to util.\ndef get_day(dt):\n \"\"\" Return datetime for the beginning of the day of given datetime. \"\"\"\n return datetime.datetime(dt.year, dt.month, dt.day)\n\n\n# XXX: Find out why imshow(x) fails!\nclass Spectrogram(np.ndarray):\n \"\"\" Base class for spectral analysis in SunPy.\n \n Parameters\n ----------\n data : np.ndarray\n two-dimensional array of the image data of the spectrogram.\n time_axis : np.ndarray\n one-dimensional array containing the offset from the start\n for each column of data.\n freq_axis : np.ndarray\n one-dimensional array containing information about the\n frequencies each row of the image corresponds to.\n start : datetime\n starting time of the measurement\n end : datetime\n end time of the measurement\n t_init : int\n offset from the start of the day the measurement began. If None\n gets automatically set from start.\n t_label : str\n label for the time axis\n f_label : str\n label for the frequency axis\n content : str\n header for the image\n \"\"\"\n # Contrary to what pylint may think, this is not an old-style class.\n # pylint: disable=E1002,W0142,R0902\n\n # This needs to list all attributes that need to be\n # copied to maintain the object and how to handle them.\n COPY_PROPERTIES = [\n ('time_axis', COPY),\n ('freq_axis', COPY),\n ('start', REFERENCE),\n ('end', REFERENCE),\n ('t_label', REFERENCE),\n ('f_label', REFERENCE),\n ('content', REFERENCE),\n ('t_init', REFERENCE),\n ]\n\n def as_class(self, cls):\n \"\"\" Implementation detail. \"\"\"\n if not issubclass(cls, Spectrogram):\n raise ValueError\n\n dct = {}\n var = vars(self)\n for prop, _ in cls.COPY_PROPERTIES:\n if not prop in var:\n raise ValueError\n dct[prop] = var[prop]\n return cls(self, **dct)\n\n def get_params(self):\n \"\"\" Implementation detail. \"\"\"\n return dict(\n (name, getattr(self, name)) for name, _ in self.COPY_PROPERTIES\n )\n \n def _slice(self, y_range, x_range):\n \"\"\" Return new spectrogram reduced to the values passed\n as slices. Implementation detail. \"\"\"\n data = super(Spectrogram, self).__getitem__([y_range, x_range])\n params = self.get_params()\n\n soffset = 0 if x_range.start is None else x_range.start\n eoffset = self.shape[1] if x_range.stop is None else x_range.stop # pylint: disable=E1101\n eoffset -= 1\n\n fsoffset = 0 if y_range.start is None else y_range.start\n feoffset = self.shape[0] if y_range.stop is None else y_range.stop # pylint: disable=E1101\n \n params.update({\n 'time_axis': self.time_axis[\n x_range.start:x_range.stop:x_range.step\n ] - self.time_axis[soffset],\n 'freq_axis': self.freq_axis[\n y_range.start:y_range.stop:y_range.step],\n 'start': self.start + datetime.timedelta(\n seconds=self.time_axis[soffset]),\n 'end': self.start + datetime.timedelta(\n seconds=self.time_axis[eoffset]),\n 't_init': self.t_init + self.time_axis[soffset],\n })\n return self.__class__(data, **params)\n\n # This accepting arbitrary arguments makes it easier to subclass this.\n def __new__(cls, data, *args, **kwargs):\n return np.asarray(data).view(cls)\n\n def __init__(self, data, time_axis, freq_axis, start, end, t_init=None,\n t_label=\"Time\", f_label=\"Frequency\", content=\"\"):\n # Because of how object creation works, there is no avoiding\n # unused arguments in this case.\n if t_init is None:\n diff = start - get_day(start)\n t_init = diff.seconds\n self.start = start\n self.end = end\n\n self.t_label = t_label\n self.f_label = f_label\n\n self.t_init = t_init\n\n self.time_axis = time_axis\n self.freq_axis = freq_axis\n\n self.content = content\n\n def time_formatter(self, x, pos):\n \"\"\" This returns the label for the tick of value x at\n a specified pos on the time axis. \"\"\"\n # Callback, cannot avoid unused arguments.\n # pylint: disable=W0613\n try:\n return self.format_time(\n self.start + datetime.timedelta(\n seconds=self.time_axis[int(x)]\n )\n )\n except IndexError:\n return None\n\n def freq_formatter(self, x, pos):\n \"\"\" This returns the label for the tick of value x at\n a specified pos on the frequency axis. \"\"\"\n # Callback, cannot avoid unused arguments.\n # pylint: disable=W0613\n try:\n return self.format_freq(self.freq_axis[x])\n except IndexError:\n return None \n\n def __array_finalize__(self, obj):\n if self is obj:\n return\n\n for prop, cpy in self.COPY_PROPERTIES:\n elem = getattr(obj, prop, None)\n if cpy == COPY:\n elem = copy(elem)\n if cpy == DEEPCOPY:\n elem = deepcopy(elem)\n\n setattr(self, prop, elem)\n \n @staticmethod\n def format_time(time):\n \"\"\" Override to configure default plotting \"\"\"\n return time.strftime(\"%H:%M:%S\")\n \n @staticmethod\n def format_freq(freq):\n \"\"\" Override to configure default plotting \"\"\"\n return \"%.1f\" % freq\n\n def show(self, *args, **kwargs):\n \"\"\" Draw spectrogram on figure with highest index or new one if\n none exists. For parameters see :py:meth:`plot`. \"\"\"\n nums = plt.get_fignums()\n figure = None\n if nums:\n figure = plt.figure(max(nums))\n self.plot(figure, *args, **kwargs).show()\n\n def plot(self, figure=None, overlays=[], colorbar=True, min_=None, max_=None, \n **matplotlib_args):\n \"\"\"\n Plot spectrogram onto figure.\n \n Parameters\n ----------\n figure : matplotlib.figure.Figure\n Figure to plot the spectrogram on. If None, new Figure is created.\n overlays : list\n List of overlays (functions that receive figure and axes and return\n new ones) to be applied after drawing.\n colorbar : bool\n Flag that determines whether or not to draw a colorbar. If existing\n figure is passed, it is attempted to overdraw old colorbar.\n min_ : float\n Clip intensities lower than min_ before drawing.\n max_ : float\n Clip intensities higher than max_ before drawing.\n \"\"\"\n # [] as default argument is okay here because it is only read.\n # pylint: disable=W0102,R0914\n\n data = np.array(self.clip(min_, max_))\n newfigure = figure is None\n if figure is None:\n figure = plt.figure(frameon=True)\n axes = figure.add_subplot(111)\n else:\n axes = figure.axes[0]\n \n params = {\n 'origin': 'lower',\n 'aspect': 'auto',\n }\n params.update(matplotlib_args)\n im = axes.imshow(data, **params)\n \n xa = axes.get_xaxis()\n ya = axes.get_yaxis()\n\n xa.set_major_formatter(\n FuncFormatter(self.time_formatter)\n )\n\n ya.set_major_locator(MaxNLocator(integer=True, steps=[1, 5, 10]))\n ya.set_major_formatter(\n FuncFormatter(self.freq_formatter)\n )\n \n axes.set_xlabel(self.t_label)\n axes.set_ylabel(self.f_label)\n figure.suptitle(self.content)\n \n for tl in xa.get_ticklabels():\n tl.set_fontsize(10)\n tl.set_rotation(30)\n figure.add_axes(axes)\n figure.subplots_adjust(bottom=0.2)\n figure.subplots_adjust(left=0.2)\n if colorbar:\n if newfigure:\n figure.colorbar(im).set_label(\"Intensity\")\n else:\n Colorbar(figure.axes[1], im).set_label(\"Intensity\")\n\n for overlay in overlays:\n figure, axes = overlay(figure, axes)\n \n for ax in figure.axes:\n ax.autoscale()\n \n return figure\n\n def __getitem__(self, key):\n only_y = not isinstance(key, tuple)\n \n if only_y:\n return super(Spectrogram, self).__getitem__(key)\n elif isinstance(key[0], slice) and isinstance(key[1], slice):\n return self._slice(key[0], key[1])\n elif isinstance(key[1], slice):\n # return Spectrum( # XXX: Right class\n # super(Spectrogram, self).__getitem__(key),\n # self.time_axis[key[1].start:key[1].stop:key[1].step]\n # )\n return np.array(super(Spectrogram, self).__getitem__(key))\n elif isinstance(key[0], slice):\n return Spectrum(\n super(Spectrogram, self).__getitem__(key),\n self.freq_axis[key[0].start:key[0].stop:key[0].step]\n )\n \n return super(Spectrogram, self).__getitem__(key)\n\n def clip_freq(self, min_=None, max_=None):\n \"\"\" Return a new spectrogram only consisting of frequencies\n in the interval [min_, max_].\n \n Parameters\n ----------\n min_ : float\n All frequencies in the result are larger than this.\n max_ : float\n All frequencies in the result are smaller than this.\n \"\"\"\n left = 0\n if max_ is not None:\n while self.freq_axis[left] > max_:\n left += 1\n\n right = len(self.freq_axis) - 1\n\n if min_ is not None:\n while self.freq_axis[right] < min_:\n right -= 1\n\n return self[left:right, :]\n\n def auto_const_bg(self):\n \"\"\" Automatically determine background. \"\"\"\n # pylint: disable=E1101,E1103\n data = self.astype(to_signed(self.dtype))\n # Subtract average value from every frequency channel.\n tmp = (data - np.average(self, 1).reshape(self.shape[0], 1))\n # Get standard deviation at every point of time.\n # Need to convert because otherwise this class's __getitem__\n # is used which assumes two-dimensionality.\n sdevs = np.asarray(np.std(tmp, 0))\n\n # Get indices of values with lowest standard deviation.\n cand = sorted(xrange(self.shape[0]), key=lambda y: sdevs[y])\n # Only consider the best 5 %.\n realcand = cand[:max(1, int(0.05 * len(cand)))]\n\n # Average the best 5 %\n bg = np.average(self[:, realcand], 1)\n return bg.reshape(self.shape[0], 1)\n\n def subtract_bg(self):\n \"\"\" Perform constant background subtraction. \"\"\"\n return self - self.auto_const_bg()\n\n def randomized_auto_const_bg(self, amount):\n \"\"\" Automatically determine background. Only consider a randomly\n chosen subset of the image.\n \n Parameters\n ----------\n amount : int\n Size of random sample that is considered for calculation of\n the background.\n \"\"\"\n cols = [randint(0, self.shape[1] - 1) for _ in xrange(amount)]\n\n # pylint: disable=E1101,E1103\n data = self.astype(to_signed(self.dtype))\n # Subtract average value from every frequency channel.\n tmp = (data - np.average(self, 1).reshape(self.shape[0], 1))\n # Get standard deviation at every point of time.\n # Need to convert because otherwise this class's __getitem__\n # is used which assumes two-dimensionality.\n tmp = tmp[:, cols]\n sdevs = np.asarray(np.std(tmp, 0))\n\n # Get indices of values with lowest standard deviation.\n cand = sorted(xrange(amount), key=lambda y: sdevs[y])\n # Only consider the best 5 %.\n realcand = cand[:max(1, int(0.05 * len(cand)))]\n\n # Average the best 5 %\n bg = np.average(self[:, [cols[r] for r in realcand]], 1)\n\n return bg.reshape(self.shape[0], 1)\n \n def randomized_subtract_bg(self, amount):\n \"\"\" Perform randomized constant background subtraction. \n Does not produce the same result every time it is run.\n \n Parameters\n ----------\n amount : int\n Size of random sample that is considered for calculation of\n the background.\n \"\"\"\n return self - self.randomized_auto_const_bg(amount)\n \n def clip(self, min_=None, max_=None):\n \"\"\" Clip intensities to be in the interval [min_, max_]. Any values\n greater than the maximum will be assigned the maximum, any values\n lower than the minimum will be assigned the minimum. If either is\n left out or None, do not clip at that side of the interval.\n \n Parameters\n ----------\n min_ : int or float\n New minimum value for intensities.\n max_ : int or float\n New maximum value for intensities\n \"\"\"\n # pylint: disable=E1101\n if min_ is None:\n min_ = int(self.min())\n\n if max_ is None:\n max_ = int(self.max())\n\n new = self.copy()\n new[new < min_] = min_\n new[new > max_] = max_\n\n return new\n\n def rescale(self, min_=0, max_=1, dtype_=np.dtype('float32')):\n u\"\"\" Rescale intensities to [min_, max_]. Note that min_ ≠ max_\n and spectrogram.min() ≠ spectrogram.max().\n \n Parameters\n ----------\n min_ : float or int\n New minimum value in the resulting spectogram.\n max_ : float or int\n New maximum value in the resulting spectogram.\n dtype_ : np.dtype\n Data-type of the resulting spectogram.\n \"\"\"\n if max_ == min_:\n raise ValueError(\"Maximum and minimum must be different.\")\n if self.max() == self.min():\n raise ValueError(\"Spectrogram needs to contain distinct values.\")\n data = self.astype(dtype_) # pylint: disable=E1101\n return (\n min_ + (max_ - min_) * (data - self.min()) / # pylint: disable=E1101\n (self.max() - self.min()) # pylint: disable=E1101\n )\n\n def interpolate(self, frequency):\n \"\"\" Linearly interpolate intensity at unknown frequency using linear\n interpolation of its two neighbours.\n \n Parameters\n ----------\n frequency : float or int\n Unknown frequency for which to lineary interpolate the intensities.\n freq_axis[0] >= frequency >= self_freq_axis[-1]\n \"\"\"\n lfreq, lvalue = None, None\n for freq, value in izip(self.freq_axis, self[:, :]):\n if freq < frequency:\n break\n lfreq, lvalue = freq, value\n else:\n raise ValueError(\"Frequency not in interpolation range\")\n if lfreq is None:\n raise ValueError(\"Frequency not in interpolation range\")\n diff = frequency - freq # pylint: disable=W0631\n ldiff = lfreq - frequency\n return (ldiff * value + diff * lvalue) / (diff + ldiff) # pylint: disable=W0631\n\n @staticmethod\n def _merge(items, key=lambda x: x):\n \"\"\" Implementation detail. \"\"\"\n state = {}\n for item in map(iter, items):\n try:\n first = item.next()\n except StopIteration:\n continue\n else:\n state[item] = (first, key(first))\n\n while state:\n for item, (value, tk) in state.iteritems():\n # Value is biggest.\n if all(tk >= k for it, (v, k)\n in state.iteritems() if it is not item):\n yield value\n break\n try:\n n = item.next()\n state[item] = (n, key(n))\n except StopIteration:\n del state[item]\n\n def linearize_freqs(self, delta_freq=None):\n \"\"\" Rebin frequencies so that the frequency axis is linear.\n \n Parameters\n ----------\n delta_freq : float\n Difference between consecutive values on the new frequency axis.\n Defaults to half of smallest delta in current frequency axis.\n \"\"\"\n if delta_freq is None:\n delta_freq = (self.freq_axis[:-1] - self.freq_axis[1:])\n # Multiple values at the same frequency are just thrown away\n # in the process of linearizaion\n delta_freq = delta_freq[delta_freq != 0].min() / 2.\n nsize = (self.freq_axis.max() - self.freq_axis.min()) / delta_freq + 1\n new = np.zeros((nsize, self.shape[1]), dtype=self.dtype)\n\n freqs = self.freq_axis - self.freq_axis.min()\n freqs = freqs / delta_freq\n\n midpoints = np.round((freqs[:-1] + freqs[1:]) / 2)\n fillto = np.concatenate(\n [midpoints, np.round([freqs[-1]])]\n )\n fillfrom = np.concatenate(\n [np.round([freqs[0] + 1]), midpoints]\n )\n\n for row, from_, to_ in izip(self, fillfrom, fillto):\n new[to_:from_] = row\n\n vrs = self.get_params()\n vrs.update({\n 'freq_axis': np.linspace(\n self.freq_axis.max(), self.freq_axis.min(), nsize\n )\n })\n\n return self.__class__(new, **vrs)\n\n def freq_overlap(self, other):\n \"\"\" Get frequency range present in both spectrograms. Returns\n (min, max) tuple.\n \n Parameters\n ----------\n other : Spectrogram\n other spectrogram with which to look for frequency overlap\n \"\"\"\n lower = max(self.freq_axis[-1], other.freq_axis[-1])\n upper = min(self.freq_axis[0], other.freq_axis[0])\n if lower > upper:\n raise ValueError(\"No overlap.\")\n return lower, upper\n \n def time_to_x(self, time):\n \"\"\" Return x-coordinate in spectrogram that corresponds to the\n passed datetime value.\n \n Parameters\n ----------\n time : parse_time compatible\n Datetime to find the x coordinate for.\n \"\"\" \n diff = time - self.start\n diff_s = SECONDS_PER_DAY * diff.days + diff.seconds\n if self.time_axis[-1] < diff_s < 0:\n raise ValueError(\"Out of bounds\")\n for n, elem in enumerate(self.time_axis):\n if diff_s < elem:\n return n - 1\n # The last element is the searched one.\n return n\n\n\nclass LinearTimeSpectrogram(Spectrogram):\n \"\"\" Spectrogram evenly sampled in time.\n \n Additional (not inherited) parameters\n -------------------------------------\n t_delt : float\n difference between the items on the time axis\n \"\"\"\n # pylint: disable=E1002\n COPY_PROPERTIES = Spectrogram.COPY_PROPERTIES + [\n ('t_delt', REFERENCE),\n ]\n \n def __init__(self, data, time_axis, freq_axis, start, end,\n t_init, t_delt, t_label=\"Time\", f_label=\"Frequency\",\n content=\"\"):\n super(LinearTimeSpectrogram, self).__init__(\n data, time_axis, freq_axis, start, end, t_init, t_label, f_label,\n content\n )\n self.t_delt = t_delt\n\n @staticmethod\n def make_array(shape, dtype_=np.dtype('float32')):\n \"\"\" Function to create an array with shape and dtype.\n \n Parameters\n ----------\n shape : tuple\n shape of the array to create\n dtype_ : np.dtype\n data-type of the array to create\n \"\"\"\n return np.zeros(shape, dtype=dtype_)\n\n @staticmethod\n def memmap(filename):\n \"\"\" Return function that takes shape and dtype and returns a\n memory mapped array.\n \n Parameters\n ----------\n filename : str\n File to store the memory mapped array in.\n \"\"\"\n return (\n lambda shape, dtype_=np.dtype('float32'): np.memmap(\n filename, mode=\"write\", shape=shape, dtype=dtype_\n )\n )\n \n def resample_time(self, new_delt):\n \"\"\" Rescale image so that the difference in time between pixels is\n new_delt seconds.\n \n Parameters\n ----------\n new_delt : float\n New delta between consecutive values.\n \"\"\"\n if self.t_delt == new_delt:\n return self\n factor = self.t_delt / float(new_delt)\n\n # The last data-point does not change!\n new_size = floor((self.shape[1] - 1) * factor + 1) # pylint: disable=E1101\n data = ndimage.zoom(self, (1, new_size / self.shape[1])) # pylint: disable=E1101\n\n params = self.get_params()\n params.update({\n 'time_axis': np.linspace(\n self.time_axis[0],\n self.time_axis[(new_size - 1) * new_delt / self.t_delt],\n new_size\n ),\n 't_delt': new_delt,\n })\n return self.__class__(data, **params)\n \n @classmethod\n def join_many(cls, specs, mk_arr=None, nonlinear=False,\n maxgap=0, fill=0):\n \"\"\" Produce new Spectrogram that contains spectrograms\n joined together in time.\n \n Parameters\n ----------\n specs : list\n List of spectrograms to join together in time.\n nonlinear : bool\n If True, leave out gaps between spectrograms. Else, fill them with\n the value specified in fill. \n maxgap : float, int or None\n Largest gap to allow in second. If None, allow gap of arbitrary\n size.\n fill : float or int\n Value to fill missing values (assuming nonlinear=False) with.\n mk_array: function\n Function that is called to create the resulting array. Can be set\n to Spectrogram.memap(filename) to create a memory mapped\n result array.\n \"\"\"\n # XXX: Only load header and load contents of files\n # on demand.\n mask = None\n\n if mk_arr is None:\n mk_arr = cls.make_array\n\n specs = sorted(specs, key=lambda x: x.start)\n\n freqs = specs[0].freq_axis\n if not all(np.array_equal(freqs, sp.freq_axis) for sp in specs):\n raise ValueError(\"Frequency channels do not match.\")\n\n # Smallest time-delta becomes the common time-delta.\n min_delt = min(sp.t_delt for sp in specs)\n dtype_ = max(sp.dtype for sp in specs)\n\n specs = [sp.resample_time(min_delt) for sp in specs]\n size = sum(sp.shape[1] for sp in specs)\n\n data = specs[0]\n init = data.t_init\n start_day = data.start\n\n xs = []\n last = data\n for elem in specs[1:]:\n e_init = (\n SECONDS_PER_DAY * (\n get_day(elem.start) - get_day(start_day)\n ).days + elem.t_init\n )\n x = int((e_init - last.t_init) / min_delt)\n xs.append(x)\n diff = last.shape[1] - x\n\n if maxgap is not None and -diff > maxgap / min_delt:\n raise ValueError(\"Too large gap.\")\n\n # If we leave out undefined values, we do not want to\n # add values here if x > t_res.\n if nonlinear:\n size -= max(0, diff)\n else:\n size -= diff\n\n last = elem\n\n # The non existing element after the last one starts after\n # the last one. Needed to keep implementation below sane.\n xs.append(specs[-1].shape[1])\n\n # We do that here so the user can pass a memory mapped\n # array if they'd like to.\n arr = mk_arr((data.shape[0], size), dtype_)\n time_axis = np.zeros((size,))\n sx = 0\n # Amount of pixels left out due to nonlinearity. Needs to be\n # considered for correct time axes.\n sd = 0\n for x, elem in izip(xs, specs):\n diff = x - elem.shape[1]\n e_time_axis = elem.time_axis\n \n if x > elem.shape[1]:\n if nonlinear:\n x = elem.shape[1]\n else:\n # If we want to stay linear, fill up the missing\n # pixels with placeholder zeros.\n filler = np.zeros((data.shape[0], diff))\n filler[:] = fill\n minimum = elem.time_axis[-1]\n e_time_axis = np.concatenate([\n elem.time_axis,\n np.linspace(\n minimum + min_delt,\n minimum + diff * min_delt,\n diff\n )\n ])\n elem = np.concatenate([elem, filler], 1)\n \n arr[:, sx:sx + x] = elem[:, :x]\n if diff > 0:\n if mask is None:\n mask = np.zeros((data.shape[0], size), dtype=np.uint8)\n mask[:, sx + x - diff:sx + x] = 1\n time_axis[sx:sx + x] = e_time_axis[:x] + data.t_delt * (sx + sd)\n if nonlinear:\n sd += max(0, diff)\n sx += x\n params = {\n 'time_axis': time_axis,\n 'freq_axis': data.freq_axis,\n 'start': data.start,\n 'end': specs[-1].end,\n 't_delt': data.t_delt,\n 't_init': data.t_init,\n 't_label': data.t_label,\n 'f_label': data.f_label,\n 'content': data.content,\n }\n if mask is not None:\n arr = ma.array(arr, mask=mask)\n if nonlinear:\n del params['t_delt']\n return Spectrogram(arr, **params)\n return LinearTimeSpectrogram(arr, **params)\n\n def time_to_x(self, time):\n \"\"\" Return x-coordinate in spectrogram that corresponds to the\n passed datetime value.\n \n Parameters\n ----------\n time : parse_time compatible\n Datetime to find the x coordinate for.\n \"\"\"\n # This is impossible for frequencies because that mapping\n # is not injective.\n time = parse_time(time)\n diff = time - self.start\n diff_s = SECONDS_PER_DAY * diff.days + diff.seconds\n result = diff_s // self.t_delt\n if 0 <= result <= self.shape[1]: # pylint: disable=E1101\n return result\n raise ValueError(\"Out of range.\")\n\n @staticmethod\n def intersect_time(specs):\n \"\"\" Return slice of spectrograms that is present in all of the ones\n passed.\n \n Parameters\n ----------\n specs : list\n List of spectrograms of which to find the time intersections.\n \"\"\"\n delt = min(sp.t_delt for sp in specs)\n start = max(sp.t_init for sp in specs)\n\n # XXX: Could do without resampling by using\n # sp.t_init below, not sure if good idea.\n specs = [sp.resample_time(delt) for sp in specs]\n cut = [sp[:, (start - sp.t_init) / delt:] for sp in specs]\n\n length = min(sp.shape[1] for sp in cut)\n return [sp[:, :length] for sp in cut]\n\n @classmethod\n def combine_frequencies(cls, specs):\n \"\"\" Return new spectrogram that contains frequencies from all the\n spectrograms in spec. Only returns time intersection of all of them.\n \n Parameters\n ----------\n spec : list\n List of spectrograms of which to combine the frequencies into one.\n \"\"\"\n specs = cls.intersect_time(specs)\n\n one = specs[0]\n\n dtype_ = max(sp.dtype for sp in specs)\n fsize = sum(sp.shape[0] for sp in specs)\n\n new = np.zeros((fsize, one.shape[1]), dtype=dtype_)\n\n freq_axis = np.zeros((fsize,))\n\n\n for n, (data, row) in enumerate(cls._merge(\n [\n [(sp, n) for n in xrange(sp.shape[0])] for sp in specs\n ],\n key=lambda x: x[0].freq_axis[x[1]]\n )):\n new[n, :] = data[row, :]\n freq_axis[n] = data.freq_axis[row]\n params = {\n 'time_axis': one.time_axis, # Should be equal\n 'freq_axis': freq_axis,\n 'start': one.start,\n 'end': one.end,\n 't_delt': one.t_delt,\n 't_init': one.t_init,\n 't_label': one.t_label,\n 'f_label': one.f_label,\n 'content': one.content,\n }\n return LinearTimeSpectrogram(new, **params)\n\n def check_linearity(self, err=None, err_factor=None):\n \"\"\" Check linearity of time axis. If err is given, tolerate absolute\n derivation from average delta up to err. If err_factor is given,\n tolerate up to err_factor * average_delta. If both are given,\n TypeError is raised. Default to err=0.\n \n Parameters\n ----------\n err : float\n Absolute difference each delta is allowed to diverge from the\n average. Cannot be used in combination with err_factor.\n err_factor : float\n Relative difference each delta is allowed to diverge from the\n average, i.e. err_factor * average. Cannot be used in combination\n with err.\n \"\"\"\n deltas = self.time_axis[:-1] - self.time_axis[1:]\n avg = np.average(deltas)\n if err is None and err_factor is None:\n err = 0\n elif err is None:\n err = abs(err_factor * avg)\n elif err_factor is not None:\n raise TypeError(\"Only supply err or err_factor, not both\")\n return (abs(deltas - avg) <= err).all()\n \n def in_interval(self, start=None, end=None):\n \"\"\" Return part of spectrogram that lies in [start, end).\n \n Parameters\n ----------\n start : None or datetime or parse_time compatible string or time string\n Start time of the part of the spectrogram that is returned. If the\n measurement only spans over one day, a colon seperated string\n representing the time can be passed.\n end : None or datetime or parse_time compatible string or time string\n See start.\n \"\"\"\n if start is not None:\n try:\n start = parse_time(start)\n except ValueError:\n # XXX: We could do better than that.\n if get_day(self.start) != get_day(self.end):\n raise TypeError(\n \"Time ambiguous because data spans over more than one day\"\n )\n start = datetime.datetime(\n self.start.year, self.start.month, self.start.day,\n *map(int, start.split(\":\"))\n )\n start = self.time_to_x(start)\n if end is not None:\n try:\n end = parse_time(end)\n except ValueError:\n if get_day(self.start) != get_day(self.end):\n raise TypeError(\n \"Time ambiguous because data spans over more than one day\"\n ) \n end = datetime.datetime(\n self.start.year, self.start.month, self.start.day,\n *map(int, end.split(\":\"))\n )\n end = self.time_to_x(end)\n return self[:, start:end]\n","sub_path":"sunpy/spectra/spectrogram.py","file_name":"spectrogram.py","file_ext":"py","file_size_in_byte":32265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"422929939","text":"import time\nfrom rest_framework import serializers\n\n\nclass ValidationEmpty:\n\t\"\"\"Валидация пустого запроса\"\"\"\n\n\trequires_context = True\n\n\tdef __call__(self, data, serializer):\n\t\tif len(serializer.initial_data) == 0:\n\t\t\traise serializers.ValidationError({'empty_data': 'Запрос не должен быть пустым.'})\n\t\treturn data\n\n\nclass ValidationFields:\n\t\"\"\"Валидация неописанных полей\"\"\"\n\n\trequires_context = True\n\n\tdef __call__(self, data, serializer):\n\t\tif len(set(serializer.initial_data.keys()) - set(serializer.fields.keys())) != 0:\n\t\t\traise serializers.ValidationError({\n\t\t\t\t'fields': 'Получены неописанные поля: %s.' % ', '.join(\n\t\t\t\t\tset(serializer.initial_data.keys()) - set(serializer.fields.keys())\n\t\t\t\t)\n\t\t\t})\n\t\treturn data\n\n\nclass ValidationTimeFormat:\n\t\"\"\"Валидация формата времени\"\"\"\n\n\tdef __call__(self, value):\n\t\terror = {'time_format': 'Неверный формат времени. Нужен: HH:MM-HH:MM'}\n\t\tfor time_str in value:\n\t\t\ttime_list = time_str.split('-')\n\t\t\tif len(time_list) != 2:\n\t\t\t\traise serializers.ValidationError(error)\n\t\t\ttry:\n\t\t\t\ttime.strptime(time_list[0], '%H:%M')\n\t\t\t\ttime.strptime(time_list[1], '%H:%M')\n\t\t\texcept ValueError:\n\t\t\t\traise serializers.ValidationError(error)\n\t\treturn value\n\n\nclass ValidationCourierID:\n\t\"\"\"Валидация ID курьера на наличие его в базе\"\"\"\n\n\tdef __init__(self, courier):\n\t\tself.courier = courier\n\n\tdef __call__(self, value):\n\t\terror = {'not_found': 'Курьер с таким идентификатором не найден.'}\n\t\ttry:\n\t\t\tself.courier.objects.get(courier_id=value)\n\t\t\treturn value\n\t\texcept self.courier.DoesNotExist:\n\t\t\traise serializers.ValidationError(error)\n\n\nclass ValidationOrderID:\n\t\"\"\"Валидация ID заказа на наличие его в базе\"\"\"\n\n\tdef __init__(self, order):\n\t\tself.order = order\n\n\tdef __call__(self, value):\n\t\terror = {'not_found': 'Заказ с таким идентификатором не найден.'}\n\t\ttry:\n\t\t\tself.order.objects.get(order_id=value)\n\t\t\treturn value\n\t\texcept self.order.DoesNotExist:\n\t\t\traise serializers.ValidationError(error)\n\n\nclass ValidationOrderBelongsCourier:\n\t\"\"\"Валидация отношения заказа к курьеру\"\"\"\n\n\tdef __init__(self, courier, order, order_detail):\n\t\tself.order_detail = order_detail\n\t\tself.courier = courier\n\t\tself.order = order\n\n\tdef __call__(self, data):\n\t\terror = {'belongs_courier': 'Заказ не принадлежит курьеру.'}\n\t\ttry:\n\t\t\torder = self.order.objects.get(order_id=data.get('order_id'))\n\t\t\tcourier = self.courier.objects.get(courier_id=data.get('courier_id'))\n\t\t\tself.order_detail.objects.get(order=order, courier=courier)\n\t\t\treturn data\n\t\texcept self.order_detail.DoesNotExist:\n\t\t\traise serializers.ValidationError(error)\n","sub_path":"core/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"472792036","text":"# 定义一个程序员类\nclass Programmer(object):\n hobby = 'Computer science' # 类属性\n\n # 创建类实例的时候会先调用 __new__ 然后才是 __init__\n def __new__(cls, *args, **kwargs):\n print(\"Programmer __new__ called\")\n # 重写object子类的__new__方法时,只要传入cls参数即可,后两个参数不要传,不然报错: TypeError: object() takes no parameters\n return super(Programmer, cls).__new__(cls)\n\n def __init__(self, name, age, weight):\n self.name = name # 对象变量,可以直接访问,类似public\n self._age = age # 避免直接访问, 类似private\n self.__weight = weight # 类似private\n\n # 该注解表明本方法返回对象属性,使用时不要添加括号,测试加括号的话会报错: TypeError: 'int' object is not callable\n @property\n def get_weight(self):\n return self.__weight\n\n # 该注解表明本方法为类方法\n @classmethod\n def get_hobby(cls):\n return cls.hobby\n\n # 判断两个对象是否相等\n def __eq__(self, other):\n if isinstance(other, Programmer):\n if self._age == other._age:\n return True\n else:\n return False\n else:\n raise Exception(\"The type of object must be Programmer\")\n\n # 重写方法,用于将对象转换为字符串输出信息\n # python 中有三个可以输出对象的函数 __str__ , __repr__ , __unicode__\n def __str__(self):\n return '%s is %s years old' % (self.name, self._age)\n\n\n# 后端程序员类\nclass BackProgrammer(Programmer):\n def __new__(cls, *args, **kwargs):\n print(\"BackProgrammer __new__ called\")\n # 普通类的子类,__new__方法需要传入所有参数\n return super(BackProgrammer, cls).__new__(cls, *args, **kwargs)\n\n def __init__(self, name, age, weight, language):\n # 使用父类方法, super关键字\n super(BackProgrammer, self).__init__(name, age, weight)\n self.language = language\n\n\nif __name__ == '__main__':\n programmer = BackProgrammer('lynxz', 30, 75, 'Python')\n # print(dir(programer)) # 打印所有属性\n print(Programmer.get_hobby()) # 类方法,使用类名来调用即可\n print(programmer.get_weight) # 已添加注解 @property ,调用时不要加括号\n print(programmer.__dict__) # 打印对象变量\n print(\"%s %s %s\" % (programmer.name, programmer.get_weight, programmer._Programmer__weight)) # 字符串模板\n print(type(programmer)) # 打印对象类型 => \n print(isinstance(programmer, Programmer)) # 判断对象是否是某个类型的实例\n print(programmer.__str__())\n","sub_path":"basic_knowledge/FieldDemo.py","file_name":"FieldDemo.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"124880113","text":"train_entries = open('../dataset/train_val_list.txt')\ntest_entries = open('../dataset/test_list.txt')\n\ntrain_list = []\nline = train_entries.readline()\n\nwhile line != \"\":\n train_list.append(line.strip())\n line = train_entries.readline()\n\ntest_list = []\nline = test_entries.readline()\nwhile line != \"\":\n test_list.append(line.strip())\n line = test_entries.readline()\n\n\n\nprint(len(train_list))\nprint(len(test_list))\n\nall_images_path = 'all_images'\ntrain_dir_path = 'train'\ntest_dir_path = 'test'\n\nscript = '#!/bin/bash\\n'\nfor image in train_list:\n script += 'cp ' + all_images_path + '/' + image + ' ' + train_dir_path + ';\\n'\n\nscript += '\\n'\n\nfor image in test_list:\n script += 'cp ' + all_images_path + '/' + image + ' ' + test_dir_path + ';\\n'\n\n\noutputFile = open('organize-train-test-images.sh', 'w')\n\noutputFile.write(script)\noutputFile.flush()\noutputFile.close()\n\n","sub_path":"scripts/organize-train-test-images.py","file_name":"organize-train-test-images.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"515656675","text":"import os\nfrom matplotlib import pyplot as plt\nimport csv\n\nruta = \"./CrimesUK_2011_2017/\"\nfecha = os.listdir(ruta) #lista los directorios que se encuentran dentro de la carpeta principal\nfecha.sort() #ordena el arreglo de menor a mayor\n\narchivo = open(ruta+\"2011-01/2011-01-avon-and-somerset-street.csv\")\ncampos = []\nfor line in archivo:\n\tcampos = line\n\tbreak\narchivo.close()\ncampos2 = []\nfor i in range(12):\n\tcampos2.append(campos.split(\",\")[i])\ncamposF = [0]*12\nresultados = open(\"campos_faltantes.txt\", \"w\")\nfor f in fecha:\n\trutaSecundaria = ruta+f+\"/\"\n\tarchivos = os.listdir(rutaSecundaria)\n\tarchivos.sort()\n\tfor e in archivos:\n\t\tarchivo = open(rutaSecundaria+e) #abrir el archivo\n\t\tcLine = 0 #variable para que no cuente la primera línea, que es la del encabezado de cada archivo\n\t\tfor line in archivo:\n\t\t\tif(cLine!=0):\n\t\t\t\tfor r in range(12):\n\t\t\t\t\tfaltantes = 0\n\t\t\t\t\tif(list(csv.reader([line.strip()],delimiter=','))[0][r]==\"\"):\n\t\t\t\t\t\tfaltantes+=1\n\t\t\t\t\tcamposF[r]+=faltantes\n\t\t\tcLine+=1\n\n#sacar porcentajes\ntotalRegistros = 41286964\nfor l in range(len(camposF)):\n\tporcentaje = round(((camposF[l]*100)/totalRegistros),2)\n\tcadena = str(camposF[l])+\"-\"+str(porcentaje)\n\tresultados.write(cadena)\n\tresultados.write('\\n')\n\n#--------------------------------------GRAFICAR-------------------------------------------\n#este codigo si muestra la grafica, pero las cantidades las desordena y no hay una distribucion correcta\n#se utilizo el archivo txt generado y la grafica se hizo en excel\narchivoDatos = open(\"./campos_faltantes.txt\")\ni=0\ncamposPor = [0]*12\nfor line in archivoDatos:\n\tcamposF[i] = line.split(\"-\")[0]\n\tcamposPor[i] = line.split(\"-\")[1]\n\ti+=1\n\nfig = plt.figure(\"Cantidad de datos faltantes por campo\")\nax = fig.add_subplot(111)\nxx = range(len(camposF))\n\nax.bar(xx, camposF, color=['blue','red','green','orange','brown','purple','pink','black','gray','yellow','violet','gold'])\nax.set_xticks(xx)\nax.set_xticklabels(campos2, rotation=90)\nplt.xlabel(\"Nombre de los campos\")\nplt.ylabel('Cantidad de datos faltantes')\n\nrects = ax.patches\n\nfor rect, label, i in zip(rects,camposPor,xx):\n\tax.text(rect.get_width()+i-1,rect.get_height()+0.2,label+\"%\",fontsize=10)\n \nplt.show()\n","sub_path":"pregunta20.py","file_name":"pregunta20.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"360397707","text":"\nimport numpy as np\nimport sklearn as sk\nimport sklearn.utils as sk_utils\nfrom sklearn.model_selection import train_test_split\nfrom random import uniform\nimport pandas as pd\nimport decimal\nimport matplotlib.pyplot as plt\n\n#**********************FUNÇÃO DE ATIVAÇÃO***************************\ndef getFnet(net):\n return (1 / (1 + np.exp(-net)))\n \n#**********************DERIVADA DA FUNÇÃO DE ATIVAÇÃO***************\ndef getDerFnet(fnet):\n return (np.multiply(fnet, (1 - fnet)))\n\n#**********************MODELO INICIAL DA ARQUITETURA****************\nclass MLP_Arquitetura:\n \n def __init__(self, entrada, oculta, saida):\n \n self.entrada = entrada\n self.oculta = oculta\n self.saida = saida\n self.hidden_model = self.initHiddenModel()\n self.output_model = self.initOutputModel()\n \n #**********************INICIAR O MODELO DA CAMADA OCULTA********\n def initHiddenModel(self):\n \n #************PESOS E THETAS PARA A CAMADA OCULTA************\n vetor_oculta = np.array([])\n cont = 0\n while(cont < (self.oculta * (self.entrada+1))):\n vetor_oculta = np.append(vetor_oculta,uniform(-1,1))\n cont+=1\n \n aux = vetor_oculta.reshape(self.oculta, (self.entrada+1))\n return np.asmatrix(aux)\n \n #**********************INICIAR O MODELO DA CAMADA DE SAÍDA******\n def initOutputModel(self):\n \n #************PESOS E THETAS PARA A CAMADA DE SAÍDA**********\n vetor_saida = np.array([])\n cont = 0\n while(cont < (self.saida * (self.oculta+1))):\n vetor_saida = np.append(vetor_saida,uniform(-1,1))\n cont+=1\n \n aux = vetor_saida.reshape(self.saida, (self.oculta+1))\n return np.asmatrix(aux)\n\n#**********************FNET = ONDE GUARDO OS RESULTADOS**********************\nclass Fnet():\n\n net_hidden = []\n fnet_hidden = []\n net_output = []\n fnet_output = []\n\n##########################################################################################\n\n# NET = SOMATÓRIA DOS PESOS PELA ENTRADA\ndef MLP_Forward(model, teste, fnet):\n\t\n\t#**********VETOR DE ENTRADAS**********\n teste = np.squeeze(np.asarray(teste))\n teste = np.append(teste,1)\n\t\n\t#*****VETOR DE ENTRADAS TRANSPOSTO*****\n hidden = np.asmatrix(model.hidden_model)\n \n teste = np.asmatrix(teste)\n teste = np.transpose(teste)\n \n #********** SAÍDA DA CAMADA OCULTA**********\n net_hidden = hidden*teste\n \n fnet.net_hidden = net_hidden\n\n fnet_hidden = getFnet(net_hidden)\n fnet.fnet_hidden = fnet_hidden\n \n #*******************************************\n output = model.output_model\n output = np.asmatrix(output)\n \n fnet_hidden = np.squeeze(np.asarray(fnet_hidden))\n fnet_hidden = np.append(fnet_hidden,1)\n fnet_hidden = np.asmatrix(fnet_hidden) \n fnet_hidden = np.transpose(fnet_hidden)\n \n #********** SAÍDA DA CAMADA DE SAÍDA**********\n net_output = output*fnet_hidden\n \n fnet.net_output = net_output \n \n\n fnet_output = getFnet(net_output)\n \n fnet.fnet_output = fnet_output\n #*******************************************\n\n return fnet\n\ndef MLP_Backpropagation(model, dataset, eta, limiar, epocas):\n\n limiar_backpropagation = 2 * limiar\n cont_epocas = 0\n dados_saida1 = []\n dados_saida2 = []\n resultados = Fnet()\n\t\t\n while(limiar_backpropagation > limiar and cont_epocas <= epocas):\n \n limiar_backpropagation = 0\n dataset=np.asmatrix(dataset)\n\n for linha in range(dataset.A.shape[0]):\n \n entradas_desejadas = dataset[linha,0:model.entrada]\n saidas_desejadas = dataset[linha,model.entrada:dataset.A.shape[1]]\n \n #*********************FORWARD**********************\n resultados = MLP_Forward(model , entradas_desejadas,resultados)\n\n \n # print(\"Saida camada intermediaria: \", dados_saida1)\n \n #*********************ERROS************************\n error = saidas_desejadas - np.transpose(resultados.fnet_output)\n \n limiar_backpropagation += np.sum(np.power(error,2))\n \n #*********************DELTA DA SAÍDA************************\n\t\t\t\t # delta_o = (Yp - Op) * f_o_p'(net_o_p)\n delta_saida = np.multiply(error , np.transpose(getDerFnet(resultados.fnet_output)))\n \n #***********************************************************\n \n #*********************DELTA DA OCULTA***********************\n # delta_hidden = f_h_p'(net_h_p) * sum =(delta_o * w_o_kj)\n w_o=np.asmatrix(model.output_model[:,0:model.oculta])\n \n delta_oculta = np.multiply(np.transpose(getDerFnet(resultados.fnet_hidden)),(delta_saida*w_o))\n \n\t\t\t\t #*********************TREINAMENTO***************************\n fnet1 = np.squeeze(np.asarray(resultados.fnet_hidden))\n fnet1 = np.append(fnet1,1)\n fnet1 = np.asmatrix(fnet1)\n\n print(\"Saida camada intermediaria: \", fnet1[0,0])\n dados_saida1.append(fnet1[0,0])\n print(\"Saida camada intermediaria: \", fnet1[0,1])\n dados_saida2.append(fnet1[0,1])\n\n print(fnet1[0,1])\n # Treinamento da camada de saída\n # w(t+1) = w(t) + eta * dE2_o * i_pj\n \n #************************momentum * modelo***************************\n aux_eta = eta*(np.transpose(delta_saida)*fnet1)\n \n #*****MODELO OUTPU ANTIGO*****\n modelo_output_antigo = model.output_model\n \n #*****MODELO OUTPUT NOVO*****\n modelo_output_novo = model.output_model+aux_eta\n \n #*****INCLUSÃO DO TERMO MOMENTUM E ATUALIZAÇÃO DOS PESOS*****\n model.output_model = model.output_model + (modelo_output_novo - modelo_output_antigo) * 0.5\n \n x1=np.squeeze(np.asarray(entradas_desejadas))\n x1=np.append(x1,1)\n x1=np.asmatrix(x1) \n \n #************************momentum * modelo***************************\n \n aux_eta = eta*(np.transpose(delta_oculta)*x1)\n \n model.hidden_model = model.hidden_model+aux_eta\n \n #*****MODELO OUTPU ANTIGO*****\n modelo_hidden_antigo = model.hidden_model\n \n #*****MODELO OUTPUT NOVO*****\n modelo_hidden_novo = model.hidden_model+aux_eta\n \n #*****INCLUSÃO DO TERMO MOMENTUM E ATUALIZAÇÃO DOS PESOS*****\n model.hidden_model = model.hidden_model + (modelo_hidden_novo - modelo_hidden_antigo) * 0.5\n \n limiar_backpropagation = limiar_backpropagation / dataset.A.shape[0]\n \n #print('Erro Médio Quadrático = ',limiar_backpropagation)\n cont_epocas +=1\n\n print('--------------TERMINOU--------------')\n print('Quantidade de Épocas = ',cont_epocas)\n #print(dados_saida1)\n\n plt.plot(dados_saida1, label='MLP Treinamento')\n plt.plot(dados_saida2, label='MLP')\n\n plt.xlabel('quantidade dados')\n plt.ylabel('Acurácia (%)')\n plt.title(\"Dados das duas saidas\")\n plt.tight_layout()\n plt.show()\n \n if(cont_epocas < epocas):\n print('--------------TREINADO--------------')\n \n return(model, resultados, cont_epocas)\n\n##########################################################################################\n\n#QUANTIDADE DE NEURÔNIOS NAS CAMADAS\nqtd_entradas = 7\nqtd_saidas = 3\nv_ocultas = [2]\n\n#TAXA DE APRENDIZADO\neta = 0.5\n\n#LIMITE MÁXIMO DE ÉPOCAS\nepocas = 10\n\n#LIMITE MÁXIMO DE ÉPOCAS\nporc_teste = 0.2\n\n#LIMIAR \nlimiar = 0.00001\n \n#****************************************LEITURA DA BASE DE DADOS**********************************\ndataset = pd.read_csv('seeds.csv')\n\n\nlabels = dataset['S1'].values\nlabels = set(labels)\n\n#*********************************************PRÉ-PROCESSAMENTO************************************\n\ndataset_bin = pd.DataFrame(columns= ['E1','E2','E3','E4','E5','E6','E7','S1','S2','S3'])\n\nfor x in labels:\n \n d = dataset[dataset['S1'] == x]\n \n if(x == 1):\n d.loc[:,'S1'] = 0\n d.loc[:,'S2'] = 0\n d.loc[:,'S3'] = 1\n \n elif (x == 2):\n d.loc[:,'S1'] = 0\n d.loc[:,'S2'] = 1\n d.loc[:,'S3'] = 0\n \n else:\n d.loc[:,'S1'] = 1\n d.loc[:,'S2'] = 0\n d.loc[:,'S3'] = 0\n \n dataset_bin = pd.concat([dataset_bin, d])\n\n\n#********************************NORMALIZAÇÃO ENTRE 0 E 1 DA BASE DE DADOS***************************\n \nnormalizer = sk.preprocessing.MinMaxScaler(feature_range = (0,1))\ndataset_normalizado = normalizer.fit_transform(dataset_bin)\ndataset_normalizado = pd.DataFrame(dataset_normalizado, columns= ['E1','E2','E3','E4','E5','E6','E7','S1','S2','S3'])\n\nfor o in v_ocultas:\n \n qtd_ocultas = o\n \n #**************************************************EMBARALHANDO**************************************\n \n dataset_teste = pd.DataFrame(columns= ['E1','E2','E3','E4','E5','E6','E7','S1','S2','S3'])\n dataset_treinamento = pd.DataFrame(columns= ['E1','E2','E3','E4','E5','E6','E7','S1','S2','S3'])\n \n dataset_normalizado = sk_utils.shuffle(dataset_normalizado)\n \n #********************************SELECIONANDO A PARCELA PARA TREINAMENTO E TESTE**********************\n \n saidas = ['S1','S2','S3']\n \n for x in saidas:\n \n d = dataset_normalizado[dataset_normalizado[x] == 1] \n \n treinamento, teste = train_test_split(d, test_size=porc_teste)\n \n dataset_teste = dataset_teste.append(teste)\n \n dataset_treinamento = dataset_treinamento.append(treinamento)\n \n \n mlp = MLP_Arquitetura(qtd_entradas,qtd_ocultas,qtd_saidas)\n \n dataset_treinamento=np.asmatrix(dataset_treinamento)\n dataset_teste=np.asmatrix(dataset_teste)\n \n modelo, resultados, cont_epocas = MLP_Backpropagation(mlp, dataset_treinamento, eta, limiar , epocas)\n \n #*******************************************ACURÁCIA**********************************************\n somatoria = 0\n for linha in range(dataset_teste.A.shape[0]):\n aux_sd = dataset_teste[linha,mlp.entrada:dataset_teste.A.shape[1]]\n aux_sd = np.squeeze(np.asarray(aux_sd))\n \n aux_saida = MLP_Forward(modelo,dataset_teste[linha,0:mlp.entrada],resultados)\n aux_saida = np.squeeze(np.asarray(aux_saida.fnet_output)) \n aux_saida = np.around(aux_saida)\n \n #COMPARAÇÃO SE O VETOR DE SAÍDA É IGUAL AO DESEJADO \n sum_equal = 0\n for i in range(len(aux_sd) - 1):\n if(aux_sd[i] != aux_saida[i]):\n sum_equal = 1\n break\n #CASO O VETOR DE SAÍDA SEJA IGUAL, LOGO SOMO 1 \n if(sum_equal == 0):\n somatoria +=1\n \n #print('Saida Desejada: '+str(aux_sd)+' Saida Obtida: '+str(aux_saida))\n \n # ACURACIA É A RAZÃO ENTRE A \"SOMATORIA\" E A QUANTIDADE DE EXEMPLOS DE TESTES\n acuracia = (somatoria / dataset_teste.A.shape[0]) * 100\n \n d = decimal.Decimal(acuracia)\n acuracia = round(d,2) \n \n #***************************************************************************************************\n \n print('Qtd de neurônios na camada de entrada: '+str(qtd_entradas))\n print('Qtd de neurônios na camada oculta: '+str(qtd_ocultas))\n print('Qtd de neurônios na camada de saída: '+str(qtd_saidas))\n print('Parcela de teste: '+str(porc_teste))\n print('Taxa de aprendizado: '+str(eta))\n print('Acurácia: '+str(acuracia))\n print('Quantidade de epocas: '+str(cont_epocas))\n","sub_path":"prova1/1/main-sigmoid.py","file_name":"main-sigmoid.py","file_ext":"py","file_size_in_byte":12265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"445691571","text":"# -*- coding:utf-8 -*-\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context # 全局都取消验证 SSL 证书\n# import random\nimport requests\nimport time\n# import datetime\nimport pymysql\nimport traceback\nfrom bs4 import BeautifulSoup as bs\nimport re\nimport importlib,sys \nimportlib.reload(sys)\n#sys.setdefaultencoding('utf-8')\n\n# def get_html(url):\n# try:\n# response=requests.get(url)\n# if response.status_code==200:\n# soup=BeautifulSoup(response.text,'lxml')\n# return soup\n# else:\n# print response.status_code\n#\n# except Exception,e:\n# print \"失败\"\n# print e\n\n######获得代理\n# def get_proxies():\n# proxies = list(set(requests.get(\"http://localhost:8080\").text.split('\\n')))\n# return proxies\n\ndef get_html(shop_url):\n while True:\n try:\n headers = {\n 'Host': 'stockdata.stock.hexun.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',\n 'Cookie':'__jsluid=1da46053ffc9dfabb1199c2c2717555f; HexunTrack=SID=20180506102758146b13f49c4605844a2b14c80e610fb0577&CITY=0&TOWN=0'\n }\n # proxies = get_proxies()\n # index = random.randint(1, len(proxies) - 1)\n # proxy = {\"http\": \"http://\" + str(proxies[index]), \"https\": \"http://\" + str(proxies[index])}\n # print ('Now Proxy is : ' + str(proxy) + ' @ ' + str(datetime.datetime.now()))\n try:\n response = requests.get(shop_url, timeout=50, headers=headers)\n except Exception as e:\n if str(e).find('10061') >= 0 or str(e).find('403') >= 0:\n time.sleep(1)\n # index = random.randint(1, len(proxies) - 1)\n # proxy = {\"http\": \"http://\" + str(proxies[index]), \"https\": \"http://\" + str(proxies[index])}\n try:\n #driver = selenium.webdriver.Chrome()\n driver = selenium.webdriver.PhantomJS()\n driver.get(shop_url)\n time.sleep(1)\n driver.maximize_window()\n cookie = [item[\"name\"] + \"=\" + item[\"value\"] for item in driver.get_cookies()]\n # print cookie\n cookiestr = ';'.join(item for item in cookie)\n headers['Cookie'] = cookiestr\n driver.quit()\n print ('Cookie 获取成功')\n except:\n print ('Cookie 获取失败')\n try:\n response = requests.get(shop_url, timeout=50, headers=headers)\n except:\n print ('再次尝试失败')\n return shop_url\n else:\n print (traceback.format_exc())\n return shop_url\n print (response.status_code)\n if response.status_code == 200:\n soup = bs(response.text, 'lxml')\n if str(soup.text).find('验证中心'):\n print ('需要输入验证码')\n try:\n driver = selenium.webdriver.PhantomJS()\n driver.get(shop_url)\n time.sleep(1)\n cookie = [item[\"name\"] + \"=\" + item[\"value\"] for item in driver.get_cookies()]\n # print cookie\n cookiestr = ';'.join(item for item in cookie)\n headers['Cookie'] = cookiestr\n driver.quit()\n print ('Cookie 获取成功')\n except:\n print ('Cookie 获取失败')\n try:\n response = requests.get(shop_url, timeout=50, headers=headers)\n response.encoding = 'gb2312'\n print ('解析成功')\n return response.text\n except:\n print ('验证失败')\n return shop_url\n else:\n response.encoding = 'gb2312'\n return response.text\n elif response.status_code == 404:\n return '页面不存在'\n else:\n return 'error'\n except:\n print (traceback.format_exc())\n return 'error'\n\n\ndef get_detail(url,p):\n try:\n conn = None\n conn = pymysql.connect(host=\"127.0.0.1\", port=3306, user=\"root\", passwd=\"594740\",db=\"test\",charset=\"utf8\")\n cursor = conn.cursor()\n html = get_html(url)\n print (html)\n while html=='页面不存在':\n print ('页面不存在')\n break\n while html=='error' or html=='url':\n print ('需要重新解析该网页')\n sql_url = \"INSERT hexun_error1 VALUES ('%s')\" % (p)\n try:\n cursor.execute(sql_url)\n conn.commit()\n break\n except:\n print (traceback.format_exc())\n conn.rollback()\n break\n else:\n try:\n industry=re.findall(\"industry:'(.*?)',\",html)\n print (industry[0])\n print (len(industry))\n industryrate=re.findall(\"industryrate:'(.*?)',\",html)\n Pricelimit=re.findall(\"Pricelimit:'(.*?)',\",html)\n stockNumber=re.findall(\"stockNumber:'(.*?)',\",html)\n lootingchips=re.findall(\"lootingchips:'(.*?)',\",html)\n Scramble=re.findall(\"Scramble:'(.*?)',\",html)\n rscramble=re.findall(\"rscramble:'(.*?)',\",html)\n Strongstock=re.findall(\"Strongstock:'(.*?)',\",html)\n for i in range(len(industry)):\n print (industry[i],industryrate[i],Pricelimit[i],stockNumber[i],lootingchips[i],Scramble[i],rscramble[i],Strongstock[i])\n while True:\n try:\n cursor.execute(\"insert into hexun_2015 values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')\" % \\\n (0,p,industry[i],industryrate[i],Pricelimit[i],stockNumber[i],lootingchips[i],Scramble[i],rscramble[i],Strongstock[i]))\n conn.commit()\n print(\"p=\",p,'------------save end--------------')\n break\n\n except Exception as e:\n print (\"插入错误\")\n print (e)\n sql_url = \"INSERT hexun_error1 VALUES ('%s')\"%(p)\n try:\n cursor.execute(sql_url)\n conn.commit()\n break\n except:\n print (traceback.format_exc())\n conn.rollback()\n\n except Exception as e:\n print ('正则匹配错误')\n sql_url = \"INSERT hexun_error1 VALUES ('%s')\" %(p)\n try:\n cursor.execute(sql_url)\n conn.commit()\n except:\n print (traceback.format_exc())\n conn.rollback()\n except Exception as e:\n print (traceback.format_exc())\n print ('数据库连接错误')\n finally:\n if conn != None:\n cursor.close() # 关闭游标\n conn.close() # 释放数据库资源\n\n\n# if __name__=='__main__':\n# pages=[32,37,85,86,88,89,120,145,167]\n# for page in pages:\n# url='http://stockdata.stock.hexun.com/zrbg/data/zrbList.aspx?date=2015-12-31&count=20&pname=20&titType=null&page={}&callback=hxbase_json11525657153794'.format(page)\n# get_detail(url,page)\n#\nif __name__=='__main__':\n for page in range(1,178):\n url='http://stockdata.stock.hexun.com/zrbg/data/zrbList.aspx?date=2015-12-31&count=20&pname=20&titType=null&page={}&callback=hxbase_json11525657153794'.format(page)\n get_detail(url,page)\n\n\n","sub_path":"和讯数据/chen_hexun_2015.py","file_name":"chen_hexun_2015.py","file_ext":"py","file_size_in_byte":8354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"586244251","text":"from fasttext_helper import *\nimport fasttext\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('N', type=int, help='number of models to train')\nargs = parser.parse_args()\nN = args.N\n\nprint(f'Reading {N} validation predictions...')\npredictions = []\nfor i in range(1, N+1):\n print(f'Reading validation predictions from model {i}...')\n preds = load_preds(VAL_PREDS.format(i), probs=True)\n predictions.append(preds)\n\nprint('Aggregating validation probabilities...')\naveraged = np.array(predictions).mean(axis=0)\nagg = ptol(averaged)\n\nprint('Loading validation labels...')\n_, val_y = load_set(VALIDATION)\n\nprint('Computing validation accuracy...')\nprint(accuracy(agg, val_y))\n\nprint(f'Reading {N} test predictions...')\npredictions = []\nfor i in range(1, N+1):\n print(f'Reading test predictions from model {i}...')\n preds = load_preds(TEST_PREDS.format(i), probs=True)\n predictions.append(preds)\n\nprint('Aggregating test probabilities...')\naveraged = np.array(predictions).mean(axis=0)\nagg = ptol(averaged)\n\nprint('Writing submission...')\nsubmission(agg)\n","sub_path":"scripts/fasttext_mean.py","file_name":"fasttext_mean.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"84391527","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 30 10:53:25 2020\n\n@author: ELCOT\n\"\"\"\nimport sqlite3\nimport pandas as pd\n\n# read the file in to a varaible \ndp=\"C:/Users/ELCOT/Documents/STUD DET.xlsx\"\ndf=pd.read_excel(dp)\n\n#db connection\ndb_name=\"school.db\"\nconn = sqlite3.connect(db_name)\ncur=conn.cursor()\n\n# push into database\ndf.to_sql(name=\"students_detail\",con=conn,if_exists=\"append\")\n\n#read and dislay data\ncur.execute('select * from students_detail')\n\nfor row in cur:\n print(row)\n \nprint(\"------printing student master---------\")\n\ncur.execute('select * from students_master')\n\nfor row1 in cur:\n print(row1)\n\ncur.close()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"162222883","text":"from dataclasses import dataclass\nfrom typing import Dict, List, Tuple, TypeVar, Union\nimport onnxruntime\nimport cv2\nimport numpy as np \nfrom config import Config\n\nopencv_model = TypeVar(\"opencv_model\")\n\n@dataclass\nclass Loader: \n model_weights_path: str\n model_config_path: str\n coco_names: str\n\n @classmethod\n def load_from_config(cls, \n config: Union[Dict, Config], \n **kwargs):\n \"\"\"\n Class method responsible for more user friendly\n config loading.\n Arguments:\n ------------------------------\n config - Dict - loaded config dict with predefined\n variables for loading YOLO model \n Returns: \n ------------------------------\n cls - Loader class object with defined instance variables from config\n \"\"\"\n if isinstance(config, Dict):\n for item, value in config.items():\n if value is None: \n raise ValueError(\n f\"Encountered None value for item {item}, loading yolo suspended\"\n )\n return cls(**config)\n if isinstance(config, Config):\n for value in [config.model_weights_path, config.model_config_path, config.coco_names]:\n if value is None:\n raise ValueError(\n f\"Encountered None value for value {value}, loading yolo suspended\"\n )\n return cls(config.model_weights_path,\n config.model_config_path,\n config.coco_names)\n\n def load_cv2_yolo(self) -> Tuple[opencv_model,\n List[str],\n List[str],\n np.ndarray]:\n \"\"\"\n Function responsible for loading YOLO model from OpenCV\n which is more efficient and faster way than reading YOLO\n from Darknet for example (recommended to use this function\n when you dealing with no GPU problems)\n Returns: \n -------------------------------\n opencv_model - loaded_opencv_model ready to be used in inference\\n\n classes - List[str] - COCO classes_names\\n\n output_layers - List[str] - List of model layers which will be used\n to propagate input in inference mode\\n\n colors: np.ndarray - unique color array for each class\n \"\"\"\n model = cv2.dnn.readNet(self.model_weights_path,\n self.model_config_path)\n with open(self.coco_names, 'r') as f: \n classes = list(map(lambda x: x.strip(), f.readlines()))\n layers_name = model.getLayerNames()\n output_layers = list(map(lambda x: layers_name[x[0] - 1],\n model.getUnconnectedOutLayers()))\n colors: np.ndarray = np.random.uniform(0, 255, size=(len(classes), 3))\n\n return model, classes, output_layers, colors\n \n def load_onnx_yolo(self) -> Tuple[onnxruntime.InferenceSession,\n List[str], np.ndarray]:\n \"\"\"\n Load YOLOv3 compatible with ONNX ecosystem\n Returns: \n -------------------------------\n session - onnxruntime.InferenceSession - ONNX model instance\\n\n classes - List[str] - COCO classes_names\\n\n colors: np.ndarray - unique color array for each class\n \"\"\"\n session = onnxruntime.InferenceSession(self.model_weights_path)\n session.get_modelmeta()\n with open(self.coco_names, 'r') as f: \n classes = list(map(lambda x: x.strip(), f.readlines()))\n colors: np.ndarray = np.random.uniform(0, 255, size=(80, 3))\n return session, classes, colors\n","sub_path":"LCD/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"639311455","text":"\"\"\" Compiled: 2020-09-18 10:38:53 \"\"\"\n\n#__src_file__ = \"extensions/operations_document/etc/FOperationsDocumentXMLDOM.py\"\nfrom xml.dom import minidom\n\nATTRIBUTE_TYPE_NODE = 1\nTEXT_NODE = 3\nBLANK = ''\nXML_ENCODING = None\n\ndef CreateXMLMiniDom(templateXML):\n return minidom.parseString(templateXML)\n\ndef FindTopLoops(node):\n loops = list()\n for i in node.childNodes:\n if i.attributes and i.hasAttribute('acmLoop'):\n loops.append(i)\n elif HasChildLoops(i):\n loops.append(i)\n return loops\n\ndef HasChildLoops(node):\n if ToXml(node).find('acmLoop') != -1:\n return True\n else:\n return False\n\ndef DeleteNodes(templateXML, tagName):\n nodesToDelete = list()\n templateXMLSwiftTag = templateXML.getElementsByTagName('SWIFT')[0]\n for aNodeToDelete in GetNodesToDelete(templateXMLSwiftTag, tagName, nodesToDelete):\n aNodeToDelete.parentNode.removeChild(aNodeToDelete.previousSibling)\n aNodeToDelete.parentNode.removeChild(aNodeToDelete)\n\ndef GetNodesToDelete(parentNode, tagName, nodesToDelete):\n for aNode in parentNode.childNodes:\n if aNode.nodeType == parentNode.ELEMENT_NODE and \\\n (tagName == \"*\" or aNode.tagName.startswith(tagName)):\n nodesToDelete.append(aNode)\n GetNodesToDelete(aNode, tagName, nodesToDelete)\n return nodesToDelete\n\ndef MergeXML(templateXML, overrideXML):\n\n if templateXML and overrideXML:\n templateXMLSwiftTag = templateXML.getElementsByTagName('SWIFT')[0]\n overrideXMLSwiftTag = overrideXML.getElementsByTagName('SWIFT')[0]\n\n childNodes = [child for child in overrideXMLSwiftTag.childNodes if child.nodeType != TEXT_NODE]\n for aChildNode in childNodes:\n nodeToBeInserted = None\n nodeToOverride = None\n\n nodeToBeInserted = overrideXML.importNode(aChildNode, True)\n if aChildNode.nodeName != 'acmDelete':\n nodeToOverride = templateXML.getElementsByTagName(nodeToBeInserted.nodeName)\n\n if nodeToOverride:\n nodeToOverride = nodeToOverride[0]\n templateXMLSwiftTag.replaceChild(nodeToBeInserted, nodeToOverride)\n else:\n templateXMLSwiftTag.insertBefore(templateXML.createTextNode(str('\\n ')), templateXMLSwiftTag.lastChild)\n templateXMLSwiftTag.insertBefore(nodeToBeInserted, templateXMLSwiftTag.lastChild)\n return templateXML\n\n elif templateXML:\n return templateXML\n elif overrideXML:\n return overrideXML\n else:\n return None\n\ndef InsertFileAttribute(templateXML):\n swiftTag = templateXML.getElementsByTagName('SWIFT')[0]\n if swiftTag.hasAttribute('file'):\n fileName = swiftTag.attributes['file']\n else:\n fileName = BLANK\n\n for child in swiftTag.childNodes:\n if fileName and not child.nodeType == TEXT_NODE and not child.hasAttribute('file'):\n child.attributes['file'] = fileName\n\ndef RemoveFileAttributes(templateXML):\n topNodeName = templateXML.lastChild.tagName\n if topNodeName == 'MESSAGE':\n if templateXML.lastChild.hasAttribute('file'):\n templateXML.lastChild.removeAttribute('file')\n RemoveFileAttributeFromNodes(templateXML.lastChild)\n\ndef RemoveFileAttributeFromNodes(parentNode):\n for node in parentNode.childNodes:\n if node.nodeType == ATTRIBUTE_TYPE_NODE:\n if node.hasAttribute('file'):\n node.removeAttribute('file')\n RemoveFileAttributeFromNodes(node)\n\ndef GetAncestors(node):\n if node.parentNode:\n if node.parentNode.parentNode:\n return node.parentNode, node.parentNode.parentNode\n return node.parentNode, None\n return None, None\n\ndef RemoveNodeByName(node, name):\n for i in node.getElementsByTagName(name):\n if HasChildLoops(i):\n i.parentNode.removeChild(i)\n break\n\ndef FindFileAttribute(node):\n\n while node.tagName != 'MESSAGE':\n if node.hasAttribute('file'):\n return str(node.getAttribute('file'))\n node = node.parentNode\n else:\n if node.hasAttribute('file'):\n return str(node.getAttribute('file'))\n\ndef RemoveTrailingTextNodes(listOfNodes):\n\n if not listOfNodes:\n return listOfNodes\n\n firstNode = listOfNodes[0]\n if firstNode.nodeType == TEXT_NODE:\n listOfNodes = listOfNodes[1:]\n\n if not listOfNodes:\n return listOfNodes\n\n lastNode = listOfNodes[-1]\n if lastNode.nodeType == TEXT_NODE:\n listOfNodes = listOfNodes[:-1]\n\n return listOfNodes\n\ndef RemoveSwiftChilds(template):\n templateXML = minidom.parseString(template)\n swiftTag = templateXML.getElementsByTagName('SWIFT')[0]\n childNodes = swiftTag.childNodes[:]\n for child in childNodes:\n swiftTag.removeChild(child)\n return templateXML.toxml()\n\ndef FilterSwiftTemplateTags(template, tags):\n xml = CreateXMLMiniDom(template)\n swiftTag = xml.getElementsByTagName('SWIFT')[0]\n\n childNodes = swiftTag.childNodes[:]\n for child in childNodes:\n if child.nodeName not in tags:\n swiftTag.removeChild(child)\n\n return xml.toxml()\n\ndef SetXMLEncoding(encoding):\n global XML_ENCODING\n XML_ENCODING = encoding\n\ndef ToXml(template):\n if template:\n return template.toxml(XML_ENCODING)\n else:\n return None\n\n\n","sub_path":"Extensions/Default/FPythonCode/FOperationsDocumentXMLDOM.py","file_name":"FOperationsDocumentXMLDOM.py","file_ext":"py","file_size_in_byte":5369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"82393097","text":"from django.conf.urls.defaults import patterns, url\n\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic import ListView\n\nfrom mongonaut import views\n\nurlpatterns = patterns('',\n url(\n regex=r'^$',\n view=views.IndexView.as_view(),\n name=\"index\"\n ),\n url(\n regex=r'^(?P[_\\-\\w]+)/(?P[_\\-\\w]+)/$',\n view=views.DocumentListView.as_view(),\n name=\"document_list\"\n ),\n url(\n regex=r'^(?P[_\\-\\w]+)/(?P[_\\-\\w]+)/(?P[\\w]+)/$',\n view=views.DocumentDetailView.as_view(),\n name=\"document_detail\"\n ),\n url(\n regex=r'^(?P[_\\-\\w]+)/(?P[_\\-\\w]+)/(?P[\\w]+)/edit/$',\n view=views.DocumentDetailFormView.as_view(),\n name=\"document_detail_form\"\n ) \n)\n\n\n","sub_path":"mongonaut/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"58205307","text":"class Person:\n\tdef __init__(self, name, surname, age, eye_color, hair_color):\n\t\tself.name = name\n\t\tself.surname = surname\n\t\tself.age = age\n\t\tself.eye_color = eye_color\n\t\tself.hair_color = hair_color\n\tdef name_surname(self):\n\t\treturn self.name + \" \" + self.surname\n\nmy_person = Person(\"Gevorg\", \"Stepanyan\", 30, \"Black\", \"Black\")\n\nprint(my_person.name)\nprint(my_person.age)\nprint(my_person.name_surname())\n\nmy_person1 = Person(\"Poghos\", \"Poghosyan\", 25, \"Green\", \"Green\")\n\nprint(my_person1.name)\nprint(my_person1.age)\nprint(my_person1.name_surname())\n","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"202927953","text":"import logging\nfrom functools import wraps\n\nfrom django.http import Http404\n\nfrom readthedocs.projects.models import Project, ProjectRelationship\n\nlog = logging.getLogger(__name__) # noqa\n\n\ndef map_subproject_slug(view_func):\n \"\"\"\n A decorator that maps a ``subproject_slug`` URL param into a Project.\n\n :raises: Http404 if the Project doesn't exist\n\n .. warning:: Does not take into account any kind of privacy settings.\n \"\"\"\n\n @wraps(view_func)\n def inner_view( # noqa\n request, subproject=None, subproject_slug=None, *args, **kwargs\n ):\n if subproject is None and subproject_slug:\n # Try to fetch by subproject alias first, otherwise we might end up\n # redirected to an unrelated project.\n # Depends on a project passed into kwargs\n rel = ProjectRelationship.objects.filter(\n parent=kwargs['project'],\n alias=subproject_slug,\n ).first()\n if rel:\n subproject = rel.child\n else:\n rel = ProjectRelationship.objects.filter(\n parent=kwargs['project'],\n child__slug=subproject_slug,\n ).first()\n if rel:\n subproject = rel.child\n else:\n log.warning(\n 'The slug is not subproject of project. subproject_slug=%s project_slug=%s',\n subproject_slug, kwargs['project'].slug\n )\n raise Http404('Invalid subproject slug')\n return view_func(request, subproject=subproject, *args, **kwargs)\n\n return inner_view\n\n\ndef map_project_slug(view_func):\n \"\"\"\n A decorator that maps a ``project_slug`` URL param into a Project.\n\n :raises: Http404 if the Project doesn't exist\n\n .. warning:: Does not take into account any kind of privacy settings.\n \"\"\"\n\n @wraps(view_func)\n def inner_view( # noqa\n request, project=None, project_slug=None, *args, **kwargs\n ):\n if project is None:\n # Get a slug from the request if it can't be found in the URL\n if not project_slug:\n project_slug = request.host_project_slug\n log.debug(\n 'Inserting project slug from request slug=[%s]',\n project_slug\n )\n try:\n project = Project.objects.get(slug=project_slug)\n except Project.DoesNotExist:\n raise Http404('Project does not exist.')\n return view_func(request, project=project, *args, **kwargs)\n\n return inner_view\n","sub_path":"readthedocs/proxito/views/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"277464430","text":"from numpy import arange\nimport timeit\nimport math\n\nf = None\n\na, b, c, d = 0, 0, 0, 0\n\nepsilon = 0.001\n\ndef test():\n\n vol = 0\n for x in arange(a, b, epsilon):\n for y in arange(c, d, epsilon):\n vol += f(x+epsilon/2, y+epsilon/2)*epsilon*epsilon\n\n return vol\n\ndef test_optimized():\n\n vol = 0\n for x in arange(a + epsilon/2, b, epsilon):\n for y in arange(c + epsilon/2, d, epsilon):\n vol += f(x, y)\n vol *= epsilon**2\n\n return vol\n\ndef test_optimized_2():\n\n vol = 0\n\n x = a + epsilon/2\n while x < b:\n y = c + epsilon/2\n while y < d:\n vol += f(x, y)\n y += epsilon\n x += epsilon\n\n vol *= epsilon**2\n\n return vol\n\n\ndef test_optimized_3():\n\n vol = 0\n\n x = a + epsilon / 2\n initial_y = c + epsilon / 2\n while x < b:\n y = initial_y\n while y < d:\n vol += f(x, y)\n y += epsilon\n x += epsilon\n\n vol *= epsilon**2\n\n return vol\n\nif __name__ == '__main__':\n # f must be a function of x and y. e.x. to write the function 'x', input x+0*y\n f = eval(compile('lambda x, y: ' + input('Enter a function to integrate: '), '', 'eval'))\n a, b, c, d = map(lambda n: float(n), [input('x\\u2080: '), input('x\\u2081: '), input('y\\u2080: '), input('y\\u2081: ')])\n\n n_trials = 2\n print(timeit.timeit(\"test()\", setup=\"from __main__ import test\", number=n_trials)/n_trials)\n print(timeit.timeit(\"test_optimized()\", setup=\"from __main__ import test_optimized\", number=n_trials)/n_trials)\n print(timeit.timeit(\"test_optimized_2()\", setup=\"from __main__ import test_optimized_2\", number=n_trials)/n_trials)\n print(timeit.timeit(\"test_optimized_3()\", setup=\"from __main__ import test_optimized_3\", number=n_trials)/n_trials)\n","sub_path":"integral_solver.py","file_name":"integral_solver.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"538402662","text":"\"\"\"\n图像与文字共存时,可以用compound设置图像与文字的位置关系\nleft:图像在左\nright:图像在右\ntop:图像在上\nbottom:图像在下\ncenter:文字覆盖在图像的上方\n\"\"\"\n\nfrom tkinter import *\n\nroot = Tk()\nlabel = Label(root, bitmap='hourglass', compound='left', text='我的天空')\nlabel.pack()\nroot.mainloop()","sub_path":"004-GUI编程/Tkinter/tkinter-learn/组件学习/017-compound参数.py","file_name":"017-compound参数.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"305451554","text":"import sys\nimport re\n\ndef remchar(filename):\n file = open(filename)\n for line in file:\n string1 = str(line.rsplit(',', 1)[0])\n string2 = str(line.rsplit(',', 1)[1])\n for char in string1:\n for word in string2.strip():\n for char in word:\n string1=re.sub(char, '', string1)\n print(string1)\n file.close()\nremchar(sys.argv[1])\n","sub_path":"Python/complete/removecharacters.py3","file_name":"removecharacters.py3","file_ext":"py3","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"239600006","text":"from tkinter import *\r\nnames = []\r\npresent = []\r\nwith open('names.txt', 'r') as f:\r\n for line in f:\r\n line = line.strip('\\n')\r\n names.append(line)\r\n print('loaded register')\r\nroot = Tk()\r\nfor i in names:\r\n from tkinter import messagebox\r\n qu = ('is ' + i + ' present')\r\n hear = messagebox.askyesno('', qu)\r\n if hear == True:\r\n present.append(i)\r\n \r\nwith open('reg.csv', 'w+') as f:\r\n for n in names:\r\n if n in present:\r\n f.write(n + ', present\\n')\r\n else:\r\n f.write(n + ', absent\\n')\r\n\r\nroot.destroy()\r\n","sub_path":"takeregister.py","file_name":"takeregister.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"346910413","text":"import os\nfrom flask import Flask, request, redirect, url_for, render_template\nfrom pymongo import MongoClient\n\napp = Flask(__name__)\nclient = MongoClient(os.environ['MYFLASKAPP_DB_1_PORT_27017_TCP_ADDR'], 27017)\ndb = client.itemsdb\n\n\n@app.route('/')\ndef homepage():\n items = db.itemsdb.find()\n items = [item for item in items]\n return render_template('base.html', items=items)\n\n\n@app.route('/new', methods=['POST'])\ndef new():\n items_doc = {'name': request.form['name'],\n 'description': request.form['description']\n }\n db.itemsdb.insert_one(items_doc)\n return redirect(url_for('homepage'))\n\n\n@app.route('/hello')\ndef hello():\n return 'Hello everyone!!'\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80, debug=True)\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"466005551","text":"import os\n\nfrom flask import Flask, render_template, request, redirect\n\nfrom inference import get_prediction\nfrom base64 import b64encode\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n if 'file' not in request.files:\n return redirect(request.url)\n file = request.files.get('file')\n if not file:\n return\n img_bytes = file.read()\n image = b64encode(img_bytes).decode(\"utf-8\")\n predict_post = get_prediction(image_bytes=img_bytes)\n return render_template('result.html', predict_post=predict_post, image=image)\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=int(os.environ.get('PORT', 5000)))\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"392560039","text":"\"\"\"Platform for binary sensor integration.\"\"\"\nimport logging\n\nfrom smarttub import SpaReminder\n\nfrom homeassistant.components.binary_sensor import (\n DEVICE_CLASS_CONNECTIVITY,\n DEVICE_CLASS_PROBLEM,\n BinarySensorEntity,\n)\n\nfrom .const import ATTR_REMINDERS, DOMAIN, SMARTTUB_CONTROLLER\nfrom .entity import SmartTubEntity, SmartTubSensorBase\n\n_LOGGER = logging.getLogger(__name__)\n\n# whether the reminder has been snoozed (bool)\nATTR_REMINDER_SNOOZED = \"snoozed\"\n\n\nasync def async_setup_entry(hass, entry, async_add_entities):\n \"\"\"Set up binary sensor entities for the binary sensors in the tub.\"\"\"\n\n controller = hass.data[DOMAIN][entry.entry_id][SMARTTUB_CONTROLLER]\n\n entities = []\n for spa in controller.spas:\n entities.append(SmartTubOnline(controller.coordinator, spa))\n entities.extend(\n SmartTubReminder(controller.coordinator, spa, reminder)\n for reminder in controller.coordinator.data[spa.id][ATTR_REMINDERS].values()\n )\n\n async_add_entities(entities)\n\n\nclass SmartTubOnline(SmartTubSensorBase, BinarySensorEntity):\n \"\"\"A binary sensor indicating whether the spa is currently online (connected to the cloud).\"\"\"\n\n def __init__(self, coordinator, spa):\n \"\"\"Initialize the entity.\"\"\"\n super().__init__(coordinator, spa, \"Online\", \"online\")\n\n @property\n def is_on(self) -> bool:\n \"\"\"Return true if the binary sensor is on.\"\"\"\n return self._state is True\n\n @property\n def device_class(self) -> str:\n \"\"\"Return the device class for this entity.\"\"\"\n return DEVICE_CLASS_CONNECTIVITY\n\n\nclass SmartTubReminder(SmartTubEntity, BinarySensorEntity):\n \"\"\"Reminders for maintenance actions.\"\"\"\n\n def __init__(self, coordinator, spa, reminder):\n \"\"\"Initialize the entity.\"\"\"\n super().__init__(\n coordinator,\n spa,\n f\"{reminder.name.title()} Reminder\",\n )\n self.reminder_id = reminder.id\n\n @property\n def unique_id(self):\n \"\"\"Return a unique id for this sensor.\"\"\"\n return f\"{self.spa.id}-reminder-{self.reminder_id}\"\n\n @property\n def reminder(self) -> SpaReminder:\n \"\"\"Return the underlying SpaReminder object for this entity.\"\"\"\n return self.coordinator.data[self.spa.id][\"reminders\"][self.reminder_id]\n\n @property\n def is_on(self) -> bool:\n \"\"\"Return whether the specified maintenance action needs to be taken.\"\"\"\n return self.reminder.remaining_days == 0\n\n @property\n def extra_state_attributes(self):\n \"\"\"Return the state attributes.\"\"\"\n return {\n ATTR_REMINDER_SNOOZED: self.reminder.snoozed,\n }\n\n @property\n def device_class(self) -> str:\n \"\"\"Return the device class for this entity.\"\"\"\n return DEVICE_CLASS_PROBLEM\n","sub_path":"homeassistant/components/smarttub/binary_sensor.py","file_name":"binary_sensor.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"397265952","text":"\nfrom django.urls import path,include\n\nfrom . import views\n\nurlpatterns = [\n\n path('',views.home,name=\"home\" ),\n path('event//', views.full_event, name=\"full_event\"),\n path('account/', include('accounts.urls')),\n path('event/create',views.create,name=\"create\" ),\n ]\n\n","sub_path":"event_manager/event/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"248168923","text":"import io\r\n\r\nclass XmlWriterSettings:\r\n def __init__(self): \r\n self.encoding = None\r\n self.indent = False\r\n self.indentChars = '\\r\\n'\r\n\r\nclass XmlWriter:\r\n \r\n def __init__(self) -> None:\r\n self.settings = None\r\n self.__m_stream = None\r\n self.__m_str_build = None\r\n self.__m_file_name = None\r\n self.__m_nodes = list()\r\n self.__m_elem_not_ended = False\r\n self.__m_elem_has_child = False\r\n \r\n def __enter__(self): return self\r\n def __exit__(self, typ, val, traceback): self.close()\r\n \r\n \r\n @staticmethod\r\n def create_stream(output : io.IOBase, settings_ : XmlWriterSettings=None) -> 'XmlWriter':\r\n if (settings_ is None): \r\n settings_ = XmlWriterSettings()\r\n res = XmlWriter()\r\n res.settings = settings_\r\n res.__m_stream = output\r\n return res\r\n \r\n @staticmethod\r\n def create_file(output_file_name : str, settings_ : XmlWriterSettings=None) -> 'XmlWriter':\r\n if (settings_ is None): \r\n settings_ = XmlWriterSettings()\r\n res = XmlWriter()\r\n res.settings = settings_\r\n res.__m_stream = (open(output_file_name, mode=\"r+b\"))\r\n return res\r\n \r\n @staticmethod\r\n def create_string(output : io.StringIO, settings_ : XmlWriterSettings=None) -> 'XmlWriter':\r\n if (settings_ is None): \r\n settings_ = XmlWriterSettings()\r\n res = XmlWriter()\r\n res.settings = settings_\r\n res.__m_str_build = output\r\n return res\r\n \r\n def close(self) -> None:\r\n if (self.__m_stream is not None): \r\n self.__m_stream.close()\r\n self.__m_stream = None\r\n \r\n def flush(self) -> None:\r\n if (self.__m_stream is not None): \r\n self.__m_stream.flush()\r\n \r\n def __out(self, str0_ : str) -> None:\r\n if (str0_ is None): \r\n return\r\n if (self.__m_str_build is not None): \r\n print(str0_, end=\"\", file=self.__m_str_build)\r\n elif (self.__m_stream is not None): \r\n if (self.__m_stream.position == 0): \r\n arr = bytearray()\r\n arr.append(0xEF)\r\n arr.append(0xBB)\r\n arr.append(0xBF)\r\n self.__m_stream.write(arr, 0, 3)\r\n dat = str0_.encode('utf-8', 'ignore')\r\n self.__m_stream.write(dat, 0, len(dat))\r\n \r\n def write_start_document(self) -> None:\r\n self.__out(\"\")\r\n \r\n def write_end_document(self) -> None:\r\n pass\r\n \r\n def write_start_element(self, local_name : str) -> None:\r\n if (self.__m_elem_not_ended): \r\n self.__out(\">\")\r\n self.__m_elem_not_ended = False\r\n self.__m_nodes.append(local_name)\r\n if (self.settings.indent): \r\n self.__out(\"\\r\\n\")\r\n if (self.settings.indentChars is not None): \r\n i = 0\r\n while i < (len(self.__m_nodes) - 1): \r\n self.__out(self.settings.indentChars)\r\n i += 1\r\n self.__out(\"<{0}\".format(local_name))\r\n self.__m_elem_not_ended = True\r\n self.__m_elem_has_child = False\r\n \r\n def write_start_element2(self, local_name : str, ns : str) -> None:\r\n if(ns is None):\r\n self.write_start_element(local_name)\r\n else:\r\n self.write_start_element(local_name)\r\n self.write_attribute_string(\"xmlns\", ns)\r\n \r\n def write_start_element3(self, prefix : str, local_name : str, ns : str) -> None:\r\n if(prefix is None):\r\n self.write_start_element2(local_name, ns)\r\n elif(ns is None):\r\n self.write_start_element(\"{0}:{1}\".format(prefix, local_name))\r\n else:\r\n self.write_start_element(\"{0}:{1}\".format(prefix, local_name, ns))\r\n self.write_attribute_string(\"xmlns:{0}\".format(prefix), ns)\r\n \r\n def write_end_element(self) -> None:\r\n if (self.__m_elem_not_ended): \r\n self.__out(\" />\")\r\n self.__m_elem_not_ended = False\r\n del self.__m_nodes[len(self.__m_nodes) - 1]\r\n self.__m_elem_has_child = True\r\n return\r\n if (self.settings.indent and self.__m_elem_has_child): \r\n self.__out(\"\\r\\n\")\r\n if (self.settings.indentChars is not None): \r\n i = 0\r\n while i < (len(self.__m_nodes) - 1): \r\n self.__out(self.settings.indentChars)\r\n i += 1\r\n if (len(self.__m_nodes) > 0): \r\n self.__out(\"\".format(self.__m_nodes[len(self.__m_nodes) - 1]))\r\n del self.__m_nodes[len(self.__m_nodes) - 1]\r\n self.__m_elem_has_child = True\r\n \r\n \r\n def __correct_value(self, val : str, is_attr : bool) -> str:\r\n tmp = io.StringIO()\r\n if (val is not None): \r\n for ch in val: \r\n o = ord(ch)\r\n if (ch == '<'): \r\n print(\"<\", end=\"\", file=tmp)\r\n elif (ch == '&'): \r\n print(\"&\", end=\"\", file=tmp)\r\n elif (ch == '>'): \r\n print(\">\", end=\"\", file=tmp)\r\n elif (is_attr and ch == '\"'): \r\n print(\""\", end=\"\", file=tmp)\r\n elif (is_attr and ch == '\\''): \r\n print(\"'\", end=\"\", file=tmp)\r\n elif (o < 0x20 and o != 0xA and o != 0xD and o != 9): \r\n print(' ', end=\"\", file=tmp)\r\n else: \r\n print(ch, end=\"\", file=tmp)\r\n v = tmp.getvalue()\r\n if(len(v) > tmp.tell()):\r\n return v[0:tmp.tell()]\r\n return v\r\n \r\n def write_attribute_string(self, local_name : str, value : str) -> None:\r\n self.__out(\" {0}=\\\"{1}\\\"\".format(local_name, self.__correct_value(value, True)))\r\n \r\n def write_attribute_string2(self, local_name : str, ns : str, value : str) -> None:\r\n if(ns is None):\r\n self.write_attribute_string(local_name, value)\r\n else:\r\n self.write_attribute_string(\"{0}:{1}\".format(\"p2\", local_name), value)\r\n self.write_attribute_string(\"xmlns:p2\", ns)\r\n \r\n def write_attribute_string3(self, prefix : str, local_name : str, ns : str, value : str) -> None:\r\n if(prefix is None):\r\n self.write_attribute_string2(local_name, ns, value)\r\n elif(ns is None):\r\n self.write_attribute_string(\"{0}:{1}\".format(prefix, local_name), value)\r\n else:\r\n self.write_attribute_string(\"{0}:{1}\".format(prefix, local_name), value)\r\n self.write_attribute_string(\"{0}:{1}\".format(\"xmlns\", prefix), ns)\r\n \r\n def write_element_string(self, local_name : str, value : str) -> None:\r\n self.write_start_element(local_name)\r\n self.write_string(value)\r\n self.write_end_element()\r\n \r\n def write_element_string2(self, local_name : str, ns : str, value : str) -> None:\r\n self.write_start_element2(local_name, ns)\r\n self.write_string(value)\r\n self.write_end_element()\r\n \r\n def write_element_string3(self, prefix : str, local_name : str, ns : str, value : str) -> None:\r\n self.write_start_element3(prefix, local_name, ns)\r\n self.write_string(value)\r\n self.write_end_element()\r\n \r\n def write_string(self, text : str) -> None:\r\n if (self.__m_elem_not_ended): \r\n self.__out(\">\")\r\n self.__m_elem_not_ended = False\r\n self.__out(self.__correct_value(text, False))\r\n \r\n def write_value(self, value : object) -> None:\r\n if (value is None): \r\n return\r\n self.write_string(str(value))\r\n\r\n def write_comment(self, text : str) -> None:\r\n if (self.__m_elem_not_ended): \r\n self.__out(\">\")\r\n self.__m_elem_not_ended = False\r\n self.__out(\"\".format(text))\r\n \r\n def write_cdata(self, text : str) -> None:\r\n if (self.__m_elem_not_ended): \r\n self.__out(\">\")\r\n self.__m_elem_not_ended = False\r\n self.__out(\"\".format(text))\t\t\r\n\r\n ","sub_path":"pullenti/unisharp/Xml.py","file_name":"Xml.py","file_ext":"py","file_size_in_byte":8239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"60741024","text":"#\n# (c) 2016 Red Hat Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n\n# Make coding more python3-ish\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nfrom ansible_collections.cisco.iosxr.tests.unit.compat.mock import (\n patch,\n MagicMock,\n)\nfrom ansible_collections.cisco.iosxr.plugins.modules import iosxr_config\nfrom ansible_collections.cisco.iosxr.plugins.cliconf.iosxr import Cliconf\nfrom ansible_collections.cisco.iosxr.tests.unit.modules.utils import (\n set_module_args,\n)\nfrom .iosxr_module import TestIosxrModule, load_fixture\n\n\nclass TestIosxrConfigModule(TestIosxrModule):\n\n module = iosxr_config\n\n def setUp(self):\n super(TestIosxrConfigModule, self).setUp()\n\n self.patcher_get_config = patch(\n \"ansible_collections.cisco.iosxr.plugins.modules.iosxr_config.get_config\"\n )\n self.mock_get_config = self.patcher_get_config.start()\n\n self.patcher_exec_command = patch(\n \"ansible_collections.cisco.iosxr.plugins.modules.iosxr_config.load_config\"\n )\n self.mock_exec_command = self.patcher_exec_command.start()\n\n self.mock_get_connection = patch(\n \"ansible_collections.cisco.iosxr.plugins.modules.iosxr_config.get_connection\"\n )\n self.get_connection = self.mock_get_connection.start()\n\n self.conn = self.get_connection()\n self.conn.edit_config = MagicMock()\n\n self.cliconf_obj = Cliconf(MagicMock())\n self.running_config = load_fixture(\"iosxr_config_config.cfg\")\n\n def tearDown(self):\n super(TestIosxrConfigModule, self).tearDown()\n\n self.patcher_get_config.stop()\n self.patcher_exec_command.stop()\n self.mock_get_connection.stop()\n\n def load_fixtures(self, commands=None):\n config_file = \"iosxr_config_config.cfg\"\n self.mock_get_config.return_value = load_fixture(config_file)\n self.mock_exec_command.return_value = \"dummy diff\"\n\n def test_iosxr_config_unchanged(self):\n src = load_fixture(\"iosxr_config_config.cfg\")\n set_module_args(dict(src=src))\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(src, src)\n )\n self.execute_module()\n\n def test_iosxr_config_src(self):\n src = load_fixture(\"iosxr_config_src.cfg\")\n set_module_args(dict(src=src))\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(src, self.running_config)\n )\n commands = [\n \"hostname foo\",\n \"interface GigabitEthernet0/0\",\n \"no ip address\",\n ]\n self.execute_module(changed=True, commands=commands)\n\n def test_iosxr_config_backup(self):\n set_module_args(dict(backup=True))\n result = self.execute_module()\n self.assertIn(\"__backup__\", result)\n\n def test_iosxr_config_lines_wo_parents(self):\n lines = [\"hostname foo\"]\n set_module_args(dict(lines=lines))\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(\n \"\\n\".join(lines), self.running_config\n )\n )\n commands = [\"hostname foo\"]\n self.execute_module(changed=True, commands=commands)\n\n def test_iosxr_config_lines_w_parents(self):\n lines = [\"shutdown\"]\n parents = [\"interface GigabitEthernet0/0\"]\n set_module_args(dict(lines=lines, parents=parents))\n module = MagicMock()\n module.params = {\"lines\": lines, \"parents\": parents, \"src\": None}\n candidate_config = iosxr_config.get_candidate(module)\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(\n candidate_config, self.running_config\n )\n )\n commands = [\"interface GigabitEthernet0/0\", \"shutdown\"]\n self.execute_module(changed=True, commands=commands)\n\n def test_iosxr_config_before(self):\n lines = [\"hostname foo\"]\n set_module_args(dict(lines=lines, before=[\"test1\", \"test2\"]))\n commands = [\"test1\", \"test2\", \"hostname foo\"]\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(\n \"\\n\".join(lines), self.running_config\n )\n )\n self.execute_module(changed=True, commands=commands, sort=False)\n\n def test_iosxr_config_after(self):\n lines = [\"hostname foo\"]\n set_module_args(dict(lines=lines, after=[\"test1\", \"test2\"]))\n commands = [\"hostname foo\", \"test1\", \"test2\"]\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(\n \"\\n\".join(lines), self.running_config\n )\n )\n self.execute_module(changed=True, commands=commands, sort=False)\n\n def test_iosxr_config_before_after_no_change(self):\n lines = [\"hostname router\"]\n set_module_args(\n dict(\n lines=lines,\n before=[\"test1\", \"test2\"],\n after=[\"test3\", \"test4\"],\n )\n )\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(\n \"\\n\".join(lines), self.running_config\n )\n )\n self.execute_module()\n\n def test_iosxr_config_config(self):\n config = \"hostname localhost\"\n lines = [\"hostname router\"]\n set_module_args(dict(lines=[\"hostname router\"], config=config))\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(\"\\n\".join(lines), config)\n )\n commands = [\"hostname router\"]\n self.execute_module(changed=True, commands=commands)\n\n def test_iosxr_config_replace_block(self):\n lines = [\"description test string\", \"test string\"]\n parents = [\"interface GigabitEthernet0/0\"]\n set_module_args(dict(lines=lines, replace=\"block\", parents=parents))\n commands = parents + lines\n\n module = MagicMock()\n module.params = {\"lines\": lines, \"parents\": parents, \"src\": None}\n candidate_config = iosxr_config.get_candidate(module)\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(\n candidate_config,\n self.running_config,\n diff_replace=\"block\",\n path=parents,\n )\n )\n self.execute_module(changed=True, commands=commands)\n\n def test_iosxr_config_force(self):\n lines = [\"hostname router\"]\n set_module_args(dict(lines=lines, force=True))\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(\n \"\\n\".join(lines), self.running_config, diff_match=\"none\"\n )\n )\n self.execute_module(changed=True, commands=lines)\n\n def test_iosxr_config_admin(self):\n lines = [\"username admin\", \"group root-system\", \"secret P@ssw0rd\"]\n set_module_args(dict(lines=lines, admin=True))\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(\n \"\\n\".join(lines), self.running_config\n )\n )\n self.execute_module(changed=True, commands=lines)\n\n def test_iosxr_config_match_none(self):\n lines = [\"ip address 1.2.3.4 255.255.255.0\", \"description test string\"]\n parents = [\"interface GigabitEthernet0/0\"]\n set_module_args(dict(lines=lines, parents=parents, match=\"none\"))\n commands = parents + lines\n module = MagicMock()\n module.params = {\"lines\": lines, \"parents\": parents, \"src\": None}\n candidate_config = iosxr_config.get_candidate(module)\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(\n candidate_config,\n self.running_config,\n diff_match=\"none\",\n path=parents,\n )\n )\n\n self.execute_module(changed=True, commands=commands, sort=False)\n\n def test_iosxr_config_match_strict(self):\n lines = [\n \"ip address 1.2.3.4 255.255.255.0\",\n \"description test string\",\n \"shutdown\",\n ]\n parents = [\"interface GigabitEthernet0/0\"]\n set_module_args(dict(lines=lines, parents=parents, match=\"strict\"))\n commands = parents + [\"shutdown\"]\n module = MagicMock()\n module.params = {\"lines\": lines, \"parents\": parents, \"src\": None}\n candidate_config = iosxr_config.get_candidate(module)\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(\n candidate_config,\n self.running_config,\n diff_match=\"strict\",\n path=parents,\n )\n )\n\n self.execute_module(changed=True, commands=commands, sort=False)\n\n def test_iosxr_config_match_exact(self):\n lines = [\n \"ip address 1.2.3.4 255.255.255.0\",\n \"description test string\",\n \"shutdown\",\n ]\n parents = [\"interface GigabitEthernet0/0\"]\n set_module_args(dict(lines=lines, parents=parents, match=\"exact\"))\n commands = parents + lines\n module = MagicMock()\n module.params = {\"lines\": lines, \"parents\": parents, \"src\": None}\n candidate_config = iosxr_config.get_candidate(module)\n self.conn.get_diff = MagicMock(\n return_value=self.cliconf_obj.get_diff(\n candidate_config,\n self.running_config,\n diff_match=\"exact\",\n path=parents,\n )\n )\n\n self.execute_module(changed=True, commands=commands, sort=False)\n\n def test_iosxr_config_src_and_lines_fails(self):\n args = dict(src=\"foo\", lines=\"foo\")\n set_module_args(args)\n self.execute_module(failed=True)\n\n def test_iosxr_config_src_and_parents_fails(self):\n args = dict(src=\"foo\", parents=\"foo\")\n set_module_args(args)\n self.execute_module(failed=True)\n\n def test_iosxr_config_match_exact_requires_lines(self):\n args = dict(match=\"exact\")\n set_module_args(args)\n self.execute_module(failed=True)\n\n def test_iosxr_config_match_strict_requires_lines(self):\n args = dict(match=\"strict\")\n set_module_args(args)\n self.execute_module(failed=True)\n\n def test_iosxr_config_replace_block_requires_lines(self):\n args = dict(replace=\"block\")\n set_module_args(args)\n self.execute_module(failed=True)\n\n def test_iosxr_config_replace_config_requires_src(self):\n args = dict(replace=\"config\")\n set_module_args(args)\n self.execute_module(failed=True)\n","sub_path":"intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/cisco/iosxr/tests/unit/modules/network/iosxr/test_iosxr_config.py","file_name":"test_iosxr_config.py","file_ext":"py","file_size_in_byte":11300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"134394661","text":"# -*- coding: utf-8 -*-\n# Author: Jorge A. Toro\n#\nimport sys\nimport StringIO\nfrom UserDict import UserDict\n\n\ndef tagData(dFile, position, bit, seek=0):\n \"\"\"\n Toma un punto de partida (position), cantidad de bit y un punto de \n referencia para leer los bit(según el método seek() de los fichero).\n Además dataFile el cual es objeto StringIO.\n \"\"\"\n try:\n dFile.seek(position, seek)\n tagdata = dFile.read(bit)\n except: sys.stderr.write(\"Error al obtener el Tag Data\")\n \n return tagdata\n\n\n# Clase que actua como un diccionario\nclass Device(UserDict):\n \"\"\" Store Device\"\"\"\n def __init__(self, deviceData=None):\n UserDict.__init__(self)\n self[\"data\"] = deviceData\n\n\nclass ANTDevice(Device):\n \"\"\"\n Dispositivo Antares\n \"\"\"\n tagDataANT = { # (position, bit, seek, function)\n \"id\" : ( -7, 6, 2, tagData)\n }\n\n\n def __parse(self, data):\n self.clear()\n try:\n dataFile = StringIO.StringIO(data)\n #\n for tag, (position, bit, seek, parseFunc) in self.tagDataANT.items():\n self[tag] = parseFunc(dataFile, position, bit, seek)\n\n except: sys.stderr.write('Error Inesperado:', sys.exc_info())\n finally: dataFile.close()\n\n\n def __setitem__(self, key, item):\n if key == \"data\" and item:\n self.__parse(item)\n # Llamamos a __setitem__ de nuestro ancestro\n Device.__setitem__(self, key, item) \n\n \n \nclass SKPDevice(Device):\n \"\"\"\n Dispositivo Skypatrol\n \"\"\"\n pass\n\n\nclass HUNTDevice(Device):\n \"\"\"\n Dispositivo Hunter\n \"\"\"\n pass\n\n\n\ndef typeDevice(data):\n \"\"\"\n Determina que tipo de Dispositivo GPS es dueña de la data.\n\n Usage:\n >>> import devices\n >>> \n >>> data='>REV041674684322+0481126-0757378200000012;ID=ANT001<'\n >>> devices.typeDevice(data)\n 'ANT'\n >>>\n >>> type(devices.typeDevice(''))\n \n >>>\n >>> if devices.typeDevice('') is not None: print \"Seguir con el programa...\"\n ... \n >>> if devices.typeDevice(data) is not None: print \"Seguir con el programa...\"\n ... \n Seguir con el programa...\n >>> \n \"\"\"\n # Dispositivos soportados:\n types = ('ANT', 'SKP', 'HUNT')\n\n typeDev = lambda dat: (\"\".join(\n [d for d in types \n if dat.find(d) is not -1])\n )\n return typeDev(data) or None #raise\n\n\n#\ndef getTypeClass(data, module=sys.modules[Device.__module__]):\n \"\"\"\n Determina que clase debe manejar un determinado dispositivo y\n retorna un diccionario con la trama procesada.\n\n Recibe la data enviada por el dispositivo (data), y opcionalmente \n el nombre del módulo donde se encuentra la clase que manipula este \n tipo de dispositivo (module).\n\n Usage:\n >>> import devices\n >>> \n >>> data='>REV041674684322+0481126-0757378200000012;ID=ANT001<'\n >>> devices.getTypeClass(data)\n {'data': '>REV041674684322+0481126-0757378200000012;ID=ANT001<', 'id': 'ANT001'}\n >>> \n\n \"\"\"\n # Determinamos la clase manejadora adecuado según el dispositivo\n dev = \"%sDevice\" % typeDevice(data)\n\n #return dev\n def getClass(module, dev): \n \"\"\" \n Retorna una referencia a la clase manejadora. \n Usage:\n >>> getClass(module, 'ANTDevice')\n \n >>> getClass(module, 'SKPDevice')\n \n >>> getClass(module, '')\n \n >>> \n \"\"\"\n return hasattr(module, dev) and getattr(module, dev) or Device\n\n return getClass(module, dev)(data)\n \n\n","sub_path":"Devices/BK/devices-bk13.py","file_name":"devices-bk13.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"4995730","text":"# This file is part of the GBI project.\n# Copyright (C) 2012 Omniscale GmbH & Co. KG \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\n\nfrom flask import (\n Flask,\n g,\n request,\n make_response,\n jsonify,\n render_template,\n flash,\n redirect,\n url_for,\n session,\n abort,\n)\n\n# XXX olt: do not import from flask.ext, makes trouble with pyinstaller\nfrom flaskext.babel import Babel\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\ndef create_app(app_state):\n app = Flask(__name__)\n app.debug = True\n\n if getattr(sys, \"frozen\", None):\n # set root_path to data dir from PyInstaller\n basedir = sys._MEIPASS\n app.root_path = os.path.join(basedir, os.path.join(*__name__.split(\".\")))\n\n app.config.geobox_state = app_state\n\n app.config[\"SECRET_KEY\"] = app_state.config.get(\"web\", \"secret_key\")\n\n from . import views\n\n app.register_blueprint(views.main)\n app.register_blueprint(views.map)\n app.register_blueprint(views.tasks)\n app.register_blueprint(views.project)\n app.register_blueprint(views.user)\n app.register_blueprint(views.admin)\n app.register_blueprint(views.vector)\n app.register_blueprint(views.downloads)\n\n @app.before_request\n def before_request():\n from helper import request_for_static\n\n g.db = app_state.user_db_session()\n if request_for_static():\n return\n\n username = session.get(\"username\", False)\n if not username and request.endpoint != \"user_view.login\":\n abort(403)\n\n @app.teardown_request\n def teardown_request(exception):\n \"\"\"Closes the database again at the end of the request.\"\"\"\n if hasattr(g, \"db\"):\n g.db.close()\n\n from .helper import css_alert_category, add_auth_to_url\n\n app.jinja_env.globals.update(\n css_alert_category=css_alert_category, add_auth_to_url=add_auth_to_url\n )\n\n configure_i18n(app, app_state.locale())\n configure_errorhandlers(app)\n return app\n\n\ndef configure_i18n(app, locale):\n babel = Babel(app)\n\n @babel.localeselector\n def get_locale():\n return locale\n\n\ndef configure_errorhandlers(app):\n\n if app.testing:\n return\n\n from flaskext.babel import _\n\n @app.errorhandler(405)\n def not_allowed(error):\n if request.is_xhr:\n return jsonify(error=_(\"Sorry, method not allowed\"))\n return make_response(render_template(\"errors/405.html\", error=error), 405)\n\n @app.errorhandler(404)\n def page_not_found(error):\n if request.is_xhr:\n return jsonify(error=_(\"Sorry, page not found\"))\n return make_response(render_template(\"errors/404.html\", error=error), 404)\n\n @app.errorhandler(403)\n def forbidden(error):\n if request.is_xhr:\n return jsonify(error=_(\"Sorry, not allowed\"))\n flash(_(\"Please log in first...\"), \"error\")\n login_url = \"%s?next=%s\" % (url_for(\"user_view.login\"), request.url)\n return redirect(login_url)\n\n @app.errorhandler(500)\n def server_error(error):\n if request.is_xhr:\n return jsonify(error=_(\"Sorry, an error has occurred\"))\n return make_response(render_template(\"errors/500.html\", error=error), 500)\n\n @app.errorhandler(401)\n def unauthorized(error):\n if request.is_xhr:\n return jsonify(error=_(\"Login required\"))\n flash(_(\"Please login to see this page\"), \"error\")\n return redirect(url_for(\"user.login\", next=request.url))\n","sub_path":"gr/gbi-client/app/geobox/web/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"3773500","text":"import sqlite3\r\nconexao = sqlite3.connect(\"Controle_de_Chave.db\")\r\ncursor=conexao.cursor()\r\n\r\nmemorandos=[]\r\nmemorando=''\r\nfor tupla in cursor.execute('select * from Memorando'):\r\n local=tupla[0]\r\n professor=tupla[1]\r\n if memorando == '':\r\n memorando=list(tupla)\r\n memorandos.append(memorando)\r\n elif memorando[0] == local and memorando[1] != professor:\r\n memorando = list(tupla)\r\n memorandos.append(memorando)\r\n elif memorando[0] != local and memorando[1] == professor:\r\n memorando = list(tupla)\r\n memorandos.append(memorando)\r\n\r\nfor memorando in memorandos:\r\n local=memorando[0]\r\n professor=memorando[1]\r\n memorando.pop(2)\r\n memorando.insert(2,[])\r\n for aluno in cursor.execute('select aluno from Memorando where local=? and professor=?', (local, professor)):\r\n memorando[2].insert(2,aluno[0])\r\n\r\n\r\nprint(memorandos)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"511314932","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import Context, loader\nfrom .forms import UserForm\n\nimport os\nimport operator\nimport random\nimport sqlite3\ndjango_base_folder = os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) )\n\n# Create your views here.\ndef index(request):\n\n if request.method == \"POST\":\n template=loader.get_template(\"response.html\")\n languages = get_languages_list()\n form_dict = extract_formData_from_request(request,languages)\n register_form(form_dict)\n ranked_projects = rank_projects(form_dict)\n context = { \"ranked_projects\": ranked_projects }\n return HttpResponse(template.render(context))\n\n else:\n template=loader.get_template(\"template.html\")\n languages = get_languages_list()\n context = { \"languages\":languages}\n return HttpResponse(template.render(context))\n\n\ndef rank_projects(form_dict):\n projects = query_db(form_dict)\n projects_name = [p[1] for p in projects]\n #projects_index = [random.randint(0,len(projects)-1) for i in range(3)]\n ranked_projects = projects_name#operator.itemgetter(*projects_index)(projects_name)\n return ranked_projects\n\n\ndef query_db(form_dict):\n lang_user_knows = [str(l) for l in list(form_dict[\"checked\"].keys())]\n db_filepath = os.path.join(\\\n django_base_folder, \"opensource_projects.sqlite\")\n db = sqlite3.connect(db_filepath)\n cursor = db.cursor()\n query = queryStr_generator(lang_user_knows)\n cursor.execute(query)\n projects = cursor.fetchall()\n print(projects)\n return projects\n\ndef queryStr_generator(lang_user_knows):\n query ='SELECT *\\\n FROM projects\\\n WHERE languages in ('\n for idx,lang in enumerate(lang_user_knows):\n is_last_idx = idx == len(lang_user_knows)-1\n query+='\"'\n if is_last_idx:\n query+=lang\n query+='\");'\n else:\n query+=lang\n query+='\",'\n print(query)\n return query\n\ndef extract_formData_from_request(request,languages):\n answers = dict(request.POST.lists())\n form_dict = {\"checked\":{},\"not_checked\":{}}\n for language in languages:\n checked = answers.get(language,[False])[0]\n if checked:\n form_dict[\"checked\"][language] = 1\n else:\n form_dict[\"not_checked\"][language] = 0\n for (item_name,item_val) in answers.items():\n if not form_dict.get(item_name,False):\n form_dict[item_name] = item_val[0]\n return form_dict\n\n\ndef get_languages_list():\n unique_languages_filepath = os.path.join(\\\n django_base_folder, \"unique_languages.csv\")\n unique_languages = []\n with open(unique_languages_filepath,\"r\") as inpf:\n for language in inpf:\n language_name=language.replace(\"\\n\",\"\")\n unique_languages.append(language_name)\n return unique_languages\n\ndef register_form(form_dict):\n sep=\",\"\n db_abspath = os.path.join(django_base_folder, \"forms_db.csv\")\n with open(db_abspath,\"a\") as outf:\n outf.write(\"{\")\n for idx,(key,val) in enumerate(form_dict.items()):\n if idx == len(form_dict.items())-1:\n sep=\"\"\n outf.write(str(key))\n outf.write(\":\")\n outf.write(str(val))\n outf.write(sep)\n outf.write(\"}\")\n\n outf.write(\"\\n\")\n\ndef learn_github(request):\n context = {}\n template = loader.get_template(\"first_contrib.html\")\n return HttpResponse(template.render(context))\n","sub_path":"webserver/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"353545095","text":"#!/usr/bin/env python\n\nimport argparse\nimport shlex\nimport subprocess\nimport ipaddress\nimport re\n\n#from multiprocessing import Process\n#from threading import Thread\nimport threading\n\nBASE_IP='10.1.1.2'\nNUM_THREADS=8\n\ndef get_ip_range(base_ip, num):\n try:\n base_ip = ipaddress.ip_address(base_ip)\n except:\n print('Invalid ip address: {}'.format(base_ip))\n sys.exit(1)\n ips = [base_ip + i for i in range(num)]\n return ips\n\ndef init_ping(ip):\n cmd='/sbin/ping -c 2 {}'.format(ip)\n subprocess.check_call(shlex.split(cmd))\n\ndef setup_output_files(file, ip):\n ip_string=str(ip)\n sub_ip=ip_string.split('.')\n o_str='{}/out{}.txt'.format(file, sub_ip[3])\n return o_str\n \ndef format_test_cmd(folder, ip):\n out_file=setup_output_files(folder, ip)\n cmd='/sbin/ping -c 20 {} > {}'\n cmd=cmd.format(ip, out_file)\n return cmd\n\nclass Ping_test(object):\n queue=[]\n thread_count=NUM_THREADS\n lock=threading.Lock()\n\n def exec_test(self, cmd_str):\n subprocess.check_output(cmd_str, shell=True)\n\n def pop_queue(self):\n cmd_str=None\n self.lock.acquire()\n if self.queue:\n cmd_str=self.queue.pop()\n self.lock.release()\n return cmd_str\n\n def dequeue(self):\n while True:\n cmd_str=self.pop_queue()\n if not cmd_str:\n return None\n self.exec_test(cmd_str)\n\n def start(self):\n threads=[]\n for i in range(self.thread_count):\n t=threading.Thread(target=self.dequeue)\n t.start()\n threads.append(t)\n [ t.join() for t in threads ]\n \n\ndef main():\n parser=argparse.ArgumentParser(description='Run N simultaneous ping tests/requrests')\n parser.add_argument('--number', '-n', required=True, type=int)\n parser.add_argument('--output', '-o', required=True, type=str)\n args=parser.parse_args()\n test_ips = get_ip_range(BASE_IP, args.number)\n test_args=[format_test_cmd(args.output,ip) for ip in test_ips]\n for ip in test_ips:\n init_ping(ip)\n test=Ping_test()\n test.thread_count=NUM_THREADS\n test.queue=test_args\n test.start()\n \n\nif __name__=='__main__':\n main()\n \n","sub_path":"tests/multi_ping.py","file_name":"multi_ping.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"365488166","text":"import tables as tb\nimport numpy as np\nimport misc.db\nimport parser\nimport compute\n\nclass Database(misc.db.Database):\n \"\"\"docstring for Database\"\"\"\n def __init__(self, path, mode='w'):\n super(Database, self).__init__(path, mode)\n self.parse = parser.Parser()\n \n def create_groups(self):\n '''Creates the default groups in the database.\n '''\n self._create_group('energies', self._handle.root)\n self._create_group('distances', self._handle.root)\n self._create_group('start_trajectories', self._handle.root)\n self._create_group('end_trajectories', self._handle.root)\n self._create_group('meta', self._handle.root)\n self._create_group('trajectory', '/meta')\n self._create_group('trajectories', self._handle.root)\n\n def get_active_site_data(self):\n return self._load_table('/meta', 'active_site')\n\n def get_sequence_Data(self):\n return self._load_table('/meta', 'sequence')\n\n def save_meta_data(self, config):\n meta_data = self.parse.meta(config)\n self._save_table(meta_data['sequence'], '/meta', 'sequence')\n self._save_table(meta_data['weights'], '/meta', 'weights')\n self._save_table(meta_data['misc'], '/meta', 'misc')\n self._save_table(meta_data['active_site'], '/meta', 'active_site')\n self._save_table(meta_data['parameter'], '/meta', 'parameter')\n \n def save_distance_ts(self, path, ID):\n distance_ts = self.parse.distance(path)\n self._save_table(distance_ts, '/distances', 'd%d' % ID)\n return distance_ts\n\n def save_energy_ts(self, path, ID):\n energy = self.parse.energy(path)\n self._save_array(energy, '/energies', 'e%d' % ID)\n return energy\n\n def save_trajectory(self, path, kind, ID):\n '''Save trajectory data to one of two tables.\n tables specified in 'kind'.\n \n kind: ['start_trajectories' | 'end_trajectories']\n '''\n traj = self.parse.xyz(path)\n self._save_table(traj, '/%s' % kind, 't%d' % ID)\n return traj\n\n def save_runs(self, runs):\n endstates = np.zeros(len(runs), dtype=[('ID', '>i2'), \n ('Energy', '>f4'), \n ('TotalEnergy', '>f4'), \n ('Distance', '>f4')])\n polymer_ids = np.array(np.unique(self.get_sequence_Data()['ID']))\n active_site_pos = self.get_active_site_data()['xyz']\n for run in runs:\n # Save First trajectory\n B_traj = self.save_trajectory(run.start_traj.as_posix(), \n 'start_trajectories', run.Id)\n # Save Last trajectory\n E_traj = self.save_trajectory(run.end_traj.as_posix(), \n 'end_trajectories', run.Id)\n self.save_complete_trajectory(run.full_traj.as_posix(), run.Id)\n # Save Energy timeseries\n energy = self.save_energy_ts(run.energy.as_posix(), run.Id)\n # Save Distance Timeseries\n if run.distance:\n distance_ts = self.save_distance_ts(run.distance.as_posix(), run.Id)\n # endstate calculations\n distance = compute.distance_to_active_site(E_traj, polymer_ids, \n active_site_pos)\n endstates[run.Id] = (run.Id, energy[-1,0], energy[-1,1], distance)\n # Save end-states\n self._save_table(endstates, '/', 'end_state')\n\n def save_trajectory_meta(self, path):\n type_list, step_size = self.parse.trajectory_meta(path)\n\n self._save_traj_type_order(type_list)\n self._save_traj_info(len(type_list), step_size)\n\n def _save_traj_type_order(self, data):\n self._save_array(data,'/meta/trajectory/', 'type_order')\n\n def _save_traj_info(self, particle_number, step_size):\n data = np.array([('particle_number', particle_number),\n ('step_size', step_size)], dtype=[('key', '|S15'),\n ('value', np.int)])\n self._save_table(data, '/meta/trajectory', 'info')\n\n def save_complete_trajectory(self, path, id_):\n data = self.parse.trajectory(path)\n self._save_array(data, '/trajectories', 'traj_%d' % id_, compress=True)\n\n def save_versions(self, version_info):\n np_format = [('program', '|S8'), ('version', '|S40')]\n data = np.array(version_info.items(), dtype=np_format)\n \n self._save_table(data, '/meta', 'versions')\n","sub_path":"Save/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"523493172","text":"#!/usr/bin/env python\n# vim: ai ts=4 sts=4 et sw=4\n\n##\n##\n## @author UWANTWALI ZIGAMA Didier\n## d.zigama@pivotaccess.com/zigdidier@gmail.com\n##\n\n__author__=\"Zigama Didier\"\n__date__ =\"$Nov 22, 2017 1:29:30 PM$\"\n\n\nfrom datetime import datetime, timedelta\nfrom util.mch_security import GESTATION, TRACKING_DAYS\nfrom controller.main import RSMSRWController\nfrom model.nutrition import Nutrition\nfrom util import queries\n\nfrom util.mch_util import makecol, give_me_table\n\n\nclass NutritionController(RSMSRWController):\n \n def __init__(self, navb):\n navb.gap= timedelta(days = 0)## USE THIS GAP OF ZERO DAYS TO DEFAULT TO CURRENT SITUATION\n self.navb = navb\n\n def get_total(self):\n cnds = self.navb.conditions()\n exts = {}\n cols = ['COUNT(*) AS total'] \n total = Nutrition.fetch_nutritions(cnds, cols, exts)[0]\n return total\n\n def get_stats(self):\n chi_cnds = self.navb.conditions()\n five_years_ago = self.navb.start - timedelta(days = TRACKING_DAYS)\n chi_cnds.update({\"(birth_date) <= '%s'\" % (self.navb.finish) : ''})\n chi_cnds.update({\"(birth_date) >= '%s'\" % (five_years_ago) : ''})\n chi_attrs = queries.CHILD_NUTR.keys()\n chi_exts = dict([(x, ('COUNT(*)', queries.CHILD_NUTR[x][0])) for x in chi_attrs])\n cols = ['COUNT(*) AS total']\n chi_nutr = Nutrition.fetch_children(chi_cnds, cols, chi_exts)\n\n pre_cnds = self.navb.conditions()\n pre_cnds.update({\"(created_at) <= '%s'\" % (self.navb.finish) : ''})\n pre_cnds.update({\"(recent_lmp + INTERVAL \\'%d days\\') >= '%s'\" % (GESTATION, self.navb.start) : ''})\n pre_attrs = queries.MOTHER_NUTR.keys()\n pre_exts = dict([(x, ('COUNT(*)', queries.MOTHER_NUTR[x][0])) for x in pre_attrs])\n pre_nutr = Nutrition.fetch_mothers(pre_cnds, cols, pre_exts)\n #print chi_nutr[0].__dict__, pre_nutr[0].__dict__\n return [chi_nutr, pre_nutr]\n\n\n\n def get_tables(self):\n chi_cnds = self.navb.conditions()\n five_years_ago = self.navb.start - timedelta(days = TRACKING_DAYS)\n chi_cnds.update({\"(birth_date) <= '%s'\" % (self.navb.finish) : ''})\n chi_cnds.update({\"(birth_date) >= '%s'\" % (five_years_ago) : ''})\n chi_attrs = queries.CHILD_NUTR.keys()\n \n pre_cnds = self.navb.conditions()\n pre_cnds.update({\"(created_at) <= '%s'\" % (self.navb.finish) : ''})\n pre_cnds.update({\"(recent_lmp + INTERVAL \\'%d days\\') >= '%s'\" % (GESTATION, self.navb.start) : ''})\n pre_attrs = queries.MOTHER_NUTR.keys()\n\n chi_cnds, markup, chicols = self.navb.neater_tables(cnds = chi_cnds, extras = [\n ('national_id', 'Mother ID'),\n ('user_phone', 'Reporter Phone'),\n ('birth_date', 'Birth date'),\n ('child_weight', 'Weight'),\n ('recent_muac', 'Muac'),\n ('created_at', 'Submission Date'),\n ('indexcol', \"ID\")\n \n ])\n\n CHIINDICS = [('child', 'total', 'Total Children'),\n ] + [(makecol(x), queries.CHILD_NUTR[x][0], queries.CHILD_NUTR[x][1]) for x in chi_attrs ]\n CHIINDICSDICT = {x[0]: (x[1], x[2]) for x in CHIINDICS}\n \n pre_cnds, markup, precols = self.navb.neater_tables(cnds = pre_cnds, extras = [\n ('national_id', 'Mother ID'),\n ('user_phone', 'Reporter Phone'),\n ('recent_lmp', 'LMP'),\n #('gravidity', 'Gravidity'),\n #('parity', 'Parity'),\n ('recent_mother_weight', 'Weight'),\n ('recent_mother_height', 'Height'),\n ('recent_bmi', 'BMI'),\n ('recent_muac', 'MUAC'),\n ('created_at', 'Submission Date'),\n ('indexcol', \"ID\")\n \n ])\n\n \n PREINDICS = [('mother', 'total', 'Total Mothers'),\n ] + [(makecol(x), queries.MOTHER_NUTR[x][0], queries.MOTHER_NUTR[x][1]) for x in pre_attrs ]\n PREINDICSDICT = {x[0]: (x[1], x[2]) for x in PREINDICS}\n \n \n INDICS = CHIINDICS\n title, sc, group, attrs, nat, tabular, locateds, INDICS_HEADERS = ('', '', '', [], [], [],[],[])\n\n if self.navb.kw.get('subcat'):\n sc = self.navb.kw.get('subcat')\n if self.navb.kw.get('group') == 'mother' or self.navb.kw.get('subcat') == 'mother':\n wcl = PREINDICSDICT[sc]#;print wcl, CMRINDICSDICT \n INDICS = [(sc, wcl[0], wcl[1])] if wcl else []\n INDICS_HEADERS = dict([ ( makecol(x[0]), x[2]) for x in INDICS])\n if wcl and wcl[0] != 'total': pre_cnds.update({wcl[0]: ''})\n dcols, cols = [x[0] for x in precols ], precols\n markup.update({'indexcol': lambda x, _, __: 'View' % (x), }) \n nat = Nutrition.fetch_log_mothers(pre_cnds, dcols)\n else:\n wcl = CHIINDICSDICT[sc]#;print wcl, CCMINDICSDICT \n INDICS = [(sc, wcl[0], wcl[1])] if wcl else []\n INDICS_HEADERS = dict([ ( makecol(x[0]), x[2]) for x in INDICS])\n if wcl and wcl[0] != 'total': chi_cnds.update({wcl[0]: ''})\n dcols, cols = [x[0] for x in chicols ], chicols\n markup.update({'indexcol': lambda x, _, __: 'View' % (x), }) \n nat = Nutrition.fetch_log_children(chi_cnds, dcols) \n \n else:\n if self.navb.kw.get('group') == 'mother' or self.navb.kw.get('subcat') == 'mother':\n dcols, cols = [x[0] for x in precols], precols\n markup.update({'indexcol': lambda x, _, __: 'View' % (x), }) \n nat = Nutrition.fetch_log_mothers(pre_cnds, dcols)\n else:\n dcols, cols = [x[0] for x in chicols], chicols\n markup.update({'indexcol': lambda x, _, __: 'View' % (x), }) \n nat = Nutrition.fetch_log_children(chi_cnds, dcols)\n\n if self.navb.kw.get('view') == 'table' or self.navb.kw.get('view') != 'log' :\n group_by = []\n group_by += ['province_pk'] if self.navb.kw.get('nation') or not group_by else []\n group_by += ['district_pk'] if self.navb.kw.get('province') else []\n group_by += ['referral_facility_pk'] if self.navb.kw.get('district') else []\n group_by += ['facility_pk'] if self.navb.kw.get('hd') else [] \n #print \"\\nGROUP BY: %s \\n\" % group_by\n LOCS = {'nation': self.navb.kw.get('nation'),\n 'province': self.navb.kw.get('province'),\n 'district': self.navb.kw.get('district'),\n 'hospital': self.navb.kw.get('hd'),\n 'location': self.navb.kw.get('hc')\n }\n \n if self.navb.kw.get('subcat') in [x[0] for x in CHIINDICS]:\n #print PRE_INDICS, LOCS, cnds, group_by\n locateds = Nutrition.fetch_children_by_location(chi_cnds, group_by = group_by, INDICS = CHIINDICS)\n elif self.navb.kw.get('subcat') in [x[0] for x in PREINDICS]:\n #print INDICS, LOCS, cnds, group_by\n locateds = Nutrition.fetch_mothers_by_location(pre_cnds, group_by = group_by, INDICS = PREINDICS)\n else:\n INDICS = CHIINDICS + PREINDICS\n locateds = Nutrition.fetch_children_by_location(chi_cnds, group_by = group_by, INDICS = CHIINDICS)\n locateds += Nutrition.fetch_mothers_by_location(pre_cnds, group_by = group_by, INDICS = PREINDICS)\n #print [[y.__dict__ for y in x] for x in locateds]#, INDICS, LOCS, self.navb.locs()\n\n tabular = give_me_table(locateds, self.navb.locs(), INDICS = INDICS, LOCS = LOCS)\n #print locateds, \"\\n\", tabular \n \n INDICS_HEADERS = dict([ ( makecol(x[0]), x[2]) for x in INDICS])\n desc = 'Nutrition%s' % (' (%s)' % ( self.navb.find_descr( [(makecol(x[0]), x[2]) for x in INDICS], sc or group ) ) )\n\n #print INDICS_HEADERS, tabular, locateds\n return (title, desc, group, attrs, markup, cols, nat, tabular, locateds, INDICS_HEADERS)\n\n\n\n\n \n \n \n","sub_path":"src/com/rwanda/mch/controller/nutritions.py","file_name":"nutritions.py","file_ext":"py","file_size_in_byte":9117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"192506683","text":"from OWDTestToolkit.global_imports import *\n \nclass main(GaiaTestCase):\n\n def toggleViaStatusBar(self, p_type):\n #\n # Uses the statusbar to toggle items on or off.
\n # NOTE: Doesn't care if it's toggling items ON or OFF. It just toggles!\n #

\n # Accepted 'types' are:
\n # data
\n # wifi
\n # airplane
\n # bluetooth\n #\n self.logResult(\"info\", \"Toggling \" + p_type + \" mode via statusbar ...\")\n orig_iframe = self.currentIframe()\n \n #\n # Open the status bar.\n #\n self.displayStatusBar()\n \n #\n # Toggle (and wait).\n #\n _wifi = {\"name\":\"wifi\" , \"notif\":DOM.Statusbar.wifi , \"toggle\":DOM.Statusbar.toggle_wifi}\n _data = {\"name\":\"data\" , \"notif\":DOM.Statusbar.dataConn , \"toggle\":DOM.Statusbar.toggle_dataconn}\n _bluetooth = {\"name\":\"bluetooth\", \"notif\":DOM.Statusbar.bluetooth, \"toggle\":DOM.Statusbar.toggle_bluetooth}\n _airplane = {\"name\":\"airplane\" , \"notif\":DOM.Statusbar.airplane , \"toggle\":DOM.Statusbar.toggle_airplane}\n\n if p_type == \"data\" : typedef = _data\n if p_type == \"wifi\" : typedef = _wifi\n if p_type == \"bluetooth\": typedef = _bluetooth\n if p_type == \"airplane\" : typedef = _airplane\n \n boolReturn = self._sb_doToggle(typedef, p_type)\n \n #\n # Close the statusbar and return to the original frame (if required).\n #\n self.touchHomeButton() \n if orig_iframe: self.switchToFrame(\"src\", orig_iframe)\n \n return boolReturn\n \n def _sb_doToggle(self, p_def, p_type):\n #\n # (private) Toggle a button in the statusbar.\n # Don't call this directly, it's used by toggleViaStatusBar().\n #\n boolWasEnabled = self.isNetworkTypeEnabled(p_type)\n\n x = self.getElement(p_def[\"toggle\"], \"Toggle \" + p_def[\"name\"] + \" icon\")\n x.tap()\n\n boolReturn = True\n if boolWasEnabled:\n boolReturn = self.waitForNetworkItemDisabled(p_type)\n else:\n boolReturn = self.waitForNetworkItemEnabled(p_type)\n \n return boolReturn","sub_path":"OWDTestToolkit/utils/statusbar/toggleViaStatusBar.py","file_name":"toggleViaStatusBar.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"286079154","text":"from flask import Flask, request\nfrom flask_restful import Resource, Api\nfrom novelsList import fetchNovelsList\n\napp = Flask(__name__)\napi = Api(app)\n\nclass GetNovelsList(Resource):\n def get(self, page):\n return fetchNovelsList(page)\n\napi.add_resource(GetNovelsList, '/novels/page/')\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"71486451","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 29 20:15:19 2018\r\n\r\n@author: Sami\r\n\"\"\"\r\n\r\n\r\n\r\nfrom random import randint\r\nfrom collections import defaultdict\r\n\r\n ######################\r\n\r\ndef getGraph(filename):\r\n\r\n\r\n with open(filename, 'r') as f_in:\r\n G = defaultdict(list)\r\n for row in f_in:\r\n row = row.split()\r\n G[row[0]] = row[1 : ]\r\n return G\r\n\r\n ######################\r\n\r\ndef getVEPair(range):\r\n\r\n v = randint(1, range)\r\n e = randint(1, range)\r\n return v, e\r\n\r\n ######################\r\n\r\ndef removeVEPair(G, V, E):\r\n\r\n while E in G[V]:\r\n G[V].remove(E)\r\n return G\r\n ######################\r\ndef contractNodes(G, V, E):\r\n\r\n edges = G[E]\r\n for edge in edges:\r\n if edge != V:\r\n G[V].append(edge)\r\n return G\r\n ######################\r\ndef removeNode(G, V, E):\r\n\r\n del G[E]\r\n for Vertex in G:\r\n while E in G[Vertex]:\r\n G[Vertex].remove(E)\r\n if V != Vertex:\r\n G[Vertex].append(V)\r\n return G\r\n\r\n ######################\r\n\r\ndef kargerMinCut():\r\n\r\n minCut = []\r\n for i in range(0, 100):\r\n G = getGraph('data1.txt')\r\n while(len(G) > 2):\r\n v, e = getVEPair(8)\r\n V = str(v)\r\n E = str(e)\r\n keys = G.keys()\r\n if V in keys and E != V:\r\n if E in G[V]:\r\n G = removeVEPair(G, V, E)\r\n G = contractNodes(G, V, E)\r\n G = removeNode(G, V, E)\r\n else:\r\n continue\r\n print (G)\r\n for v in G:\r\n minCut.append(len(G[v]))\r\n break\r\n return minCut\r\n ######################\r\n\r\nminCut = kargerMinCut()\r\nprint ('######################')\r\nprint (minCut)\r\nprint (min(minCut))","sub_path":"mincut-4.py","file_name":"mincut-4.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"74552345","text":"#!/usr/bin/python3\n\n#=========================== begin_copyright_notice ============================\n#\n# Copyright (C) 2021 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n#\n#============================ end_copyright_notice =============================\n\nimport argparse\nimport json\n\n\nOUTPUT_HEADER = \"\"\"// AUTOGENERATED FILE, DO NOT EDIT!\n// Generated by GenerateTranslationCode.py script.\"\"\"\n# C++ declarations separator.\nINTERVAL_BETWEEN_DECLS = \"\\n\\n\"\nBUILTIN_PREFIX = \"__cm_cl_\"\n# The name of the enum with operand kinds and the suffix of builtin operand\n# kind arrays.\nOPERAND_KIND = \"OperandKind\"\n# The suffix of builtin operand name enums.\nOPERAND_NAME = \"Operand\"\n\nparser = argparse.ArgumentParser(\n description=\"Generate translation code from JSON description.\")\nparser.add_argument(\"--desc\", required=True,\n help=\"JSON file with a description\", metavar=\".json\")\nparser.add_argument(\"--output\", required=True, help=\"output file\",\n metavar=\".inc\")\n\n# Opens \\p desc_filename JSON file and parses it.\n# Parsed structures are returned.\ndef get_description_from_json(desc_filename):\n with open(desc_filename, \"r\") as desc_file:\n return json.load(desc_file)\n\n# Generates:\n# namespace name {\n# enum Enum {\n# values[0],\n# values[1],\n# ...\n# };\n# } // namespace name\n#\n# The generated text is returned.\ndef generate_enum(name, values):\n text = \"namespace {n} {{\\nenum Enum {{\\n\".format(n=name)\n text += \",\\n\".join([\" {v}\".format(v=value) for value in values])\n return text + \"\\n}};\\n}} // namespace {n}\".format(n=name)\n\n# Generates:\n# constexpr c_type name[] = {\n# values[0],\n# values[1],\n# ...\n# };\n#\n# The generated text is returned.\ndef generate_array(c_type, name, values):\n assert values, \"cannot generate an empty array\"\n text = \"constexpr {t} {n}[] = {{\\n\".format(t=c_type, n=name)\n text += \",\\n\".join([' {v}'.format(v=value) for value in values])\n return text + \"\\n};\"\n\n# Generate enumerations that are not describing builtins but values of which\n# are used to describe builtins.\ndef generate_helper_enums(helper_structures):\n return INTERVAL_BETWEEN_DECLS.join(\n [generate_enum(struct, helper_structures[struct])\n for struct in helper_structures])\n\ndef validate_builtin_desc(builtin_name, desc, helper_structures):\n if not all(operand[\"Kind\"] in helper_structures[OPERAND_KIND]\n for operand in desc[\"Operands\"]):\n raise RuntimeError(\"Some of {b} operand kinds is illegal because it's not \"\n \"presented in OperandKind list\".format(b=builtin_name))\n\n# Raises an exception when some description inconsistency is found.\ndef validate_description(builtin_descs, helper_structures):\n for item in builtin_descs.items():\n validate_builtin_desc(*item, helper_structures)\n\n# Returns a new list with additional \"Size\" element at the back.\ndef append_size(lst):\n return [*lst, \"Size\"]\n\n# Generates an array with all the builtin names:\n# costexpr const char* BuiltinNames[] = {\n# \"__cm_cl_builtin0\",\n# \"__cm_cl_builtin1\",\n# ...\n# };\ndef generate_builtin_names_array(builtin_descs):\n return generate_array(\"const char*\", \"BuiltinNames\",\n ['\"' + BUILTIN_PREFIX + desc[\"Name\"] + '\"'\n for desc in builtin_descs.values()])\n\n# Generates:\n# namespace BuiltinOperand {\n# enum Enum {\n# OperandName0,\n# OperandName1,\n# ...\n# };\n# } // namespace BuiltinOperand\ndef generate_operand_names_enum(builtin, desc):\n return generate_enum(\n builtin + OPERAND_NAME,\n append_size(operand[\"Name\"] for operand in desc[\"Operands\"]))\n\n# Generates an enum for every builtin with its operands names to later use them\n# as indices.\n# Simplified output:\n# enum Builtin0Operand { SRC };\n# enum Builtin1Operand { DST, SRC };\n# ...\ndef generate_operand_names_enums(builtin_descs):\n return INTERVAL_BETWEEN_DECLS.join(\n [generate_operand_names_enum(*builtin)\n for builtin in builtin_descs.items()])\n\n# Generates an array with the number of operands for every builtin:\n# constexpr int BuiltinOperandSize[] = {\n# Builtin0Operand::Size,\n# Builtin1Operand::Size,\n# ...\n# };\ndef generate_operand_size_array(builtin_descs):\n return generate_array(\"int\", \"BuiltinOperandSize\",\n [builtin + OPERAND_NAME + \"::Size\"\n for builtin in builtin_descs])\n\n# Generates:\n# constexpr OperandKind::Enum BuiltinOperandKind[] = {\n# OperandKind::Kind0,\n# OperandKind::Kind1,\n# ...\n# };\ndef generate_operand_kinds_array(builtin, desc):\n return generate_array(OPERAND_KIND + \"::Enum\", builtin + OPERAND_KIND,\n [OPERAND_KIND + \"::\" + operand[\"Kind\"]\n for operand in desc[\"Operands\"]])\n\n# Generates an array for every builtin with the list its operand kinds.\n# Simplified output:\n# constexpr OperandKind::Enum Builtin0OperandKind[] = {OperandKind::VectorIn};\n# constexpr OperandKind::Enum Builtin1OperandKind[] = {\n# OperandKind::VectorOut, OperandKind::VectorIn};\ndef generate_operand_kinds_arrays(builtin_descs):\n return INTERVAL_BETWEEN_DECLS.join(\n generate_operand_kinds_array(builtin, desc)\n for builtin, desc in builtin_descs.items()\n if desc[\"Operands\"])\n\n# If there's an array of operand kinds, returns its name (array name degrades to\n# pointer), otherwise returns nullptr. The can be operand kinds array if the\n# builtin has no operands.\ndef get_operand_kinds_array_pointer(builtin, desc):\n if desc[\"Operands\"]:\n return builtin + OPERAND_KIND\n return \"nullptr\"\n\n# Generate an array of pointers to operand kinds arrays. So to get a kind of\n# BuiltinN's M-th operand one can write BuiltinOperandKind[BuiltinN][M].\n# Output:\n# constexpr const OperandKind::Enum* BuiltinOperandKind[] = {\n# Builtin0OperandKind,\n# Builtin1OperandKind,\n# nullptr,\n# ...\n# };\ndef generate_combined_operand_kinds_array(builtin_descs):\n return generate_array(\"const \" + OPERAND_KIND + \"::Enum*\",\n \"Builtin\" + OPERAND_KIND,\n [get_operand_kinds_array_pointer(*builtin)\n for builtin in builtin_descs.items()])\n\n# Generate enums and arrays that describe CMCL builtins.\ndef generate_builtin_descriptions(builtin_descs):\n decls = [generate_enum(\"BuiltinID\", append_size(builtin_descs.keys())),\n generate_builtin_names_array(builtin_descs),\n generate_operand_names_enums(builtin_descs),\n generate_operand_size_array(builtin_descs),\n generate_operand_kinds_arrays(builtin_descs),\n generate_combined_operand_kinds_array(builtin_descs)]\n return INTERVAL_BETWEEN_DECLS.join(decls)\n\n# Generate output file text.\ndef get_generated_file(whole_desc):\n validate_description(whole_desc[\"BuiltinDescriptions\"],\n whole_desc[\"HelperStructures\"])\n fragments = [OUTPUT_HEADER,\n generate_helper_enums(whole_desc[\"HelperStructures\"]),\n generate_builtin_descriptions(whole_desc[\"BuiltinDescriptions\"])]\n return INTERVAL_BETWEEN_DECLS.join(fragments)\n\nargs = parser.parse_args()\nwhole_desc = get_description_from_json(args.desc)\noutput_str = get_generated_file(whole_desc)\nwith open(args.output, \"w\") as output_file:\n output_file.write(output_str)\n","sub_path":"IGC/VectorCompiler/CMCL/lib/Support/GenerateTranslationInfo.py","file_name":"GenerateTranslationInfo.py","file_ext":"py","file_size_in_byte":7232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"575881426","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/kyle/fcms/flask-cms/flask_cms/settings.py\n# Compiled at: 2016-01-26 17:48:16\n\"\"\"\n settings\n ~~~~~~~~\n\n Global settings for project.\n\"\"\"\nimport os\nfrom local_settings import LocalConfig\n\nclass BaseConfig(LocalConfig):\n SYSTEM_MESSAGE_CATEGORIES = [\n 'successinfo',\n 'warning',\n 'danger']\n ADMIN_PER_PAGE = 5\n CODEMIRROR_LANGUAGES = ['python', 'python2', 'python3', 'php', 'javascript', 'xml', 'jinja2']\n CODEMIRROR_THEME = 'blackboard'\n SQLALCHEMY_ECHO = True\n SQLALCHEMY_COMMIT_ON_TEARDOWN = True\n CSRF_ENABLED = True\n ROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n URL_MODULES = [\n 'flask_cms.core.urls.routes',\n 'flask_cms.admin.urls.routes',\n 'flask_cms.auth.urls.routes',\n 'flask_cms.blog.urls.routes',\n 'flask_cms.member.urls.routes',\n 'flask_cms.page.urls.routes',\n 'flask_cms.fileviewer.urls.routes']\n BLUEPRINTS = [\n 'core.core',\n 'member.member',\n 'admin.admin',\n 'menu.menu',\n 'blog.blog',\n 'page.page',\n 'auth.auth',\n 'fileviewer.fileviewer']\n EXTENSIONS = [\n 'ext.alembic']\n CONTEXT_PROCESSORS = [\n 'core.context_processors.common_context',\n 'menu.context_processors.frontend_nav',\n 'menu.context_processors.admin_nav',\n 'auth.context_processors.user_context',\n 'core.context_processors.add_is_page',\n 'core.context_processors.add_is_list',\n 'core.context_processors.add_get_model',\n 'core.context_processors.add_get_button',\n 'core.context_processors.add_get_icon',\n 'core.context_processors.get_context',\n 'core.context_processors.add_get_block',\n 'core.context_processors.add_urlfor',\n 'core.context_processors.add_layouts',\n 'core.context_processors.add_layout_mode',\n 'menu.context_processors.get_navbar',\n 'menu.context_processors._add_navbar',\n 'make_base.base',\n 'auth.context_processors.auth_context',\n 'blog.context_processors.add_admin_head',\n 'core.context_processors.add_size_converters']\n TEMPLATE_FILTERS = [\n 'flask.ext.xxl.filters.date',\n 'flask.ext.xxl.filters.date_pretty',\n 'flask.ext.xxl.filters.datetime',\n 'flask.ext.xxl.filters.pluralize',\n 'flask.ext.xxl.filters.month_name',\n 'flask.ext.xxl.filters.markdown',\n 'core.context_processors.fix_body',\n 'core.filters.split',\n 'blog.filters.markdown']\n CONTACT_FORM_SETTINGS = {'HEADING': 'Send Us a message', \n 'SUBHEADING': 'Or a Comment', \n 'OPTIONS': (\n ('test', 'opt1'),\n ('test2', 'opt2'),\n ('test3', 'opt3'),\n ('test4', 'opt4'),\n ('test5', 'opt5'),\n ('test6', 'opt6')), \n 'SUBMIT_TEXT': 'Send to Us', \n 'COMPANY_TITLE': 'Level2designs', \n 'COMPANY_ADDRESS': {'NAME': 'level2designs', \n 'STREET': '1045 w katella', \n 'CITY': 'Orange', \n 'STATE': 'CA', \n 'ZIP': '92804'}, \n 'COMPANY_PHONE': '714-783-6369', \n 'CONTACT_NAME': 'Roux', \n 'CONTACT_EMAIL': 'kyle@level2designs.com'}\n NAVBAR_TEMPLATE_FILES = (\n ('bootstrap-std', 'navbars/bs_std.html'),\n ('bootstrap-inverse', 'navbars/bs_inverse.html'),\n ('blog', 'navbars/blog.html'),\n ('clean', 'navbars/clean.html'))\n DEFAULT_NAVBAR = 'clean'\n LAYOUT_FILES = {'blog': 'layouts/1col_leftsidebar.html', \n 'post_form': 'layouts/1col_rightsidebar.html', \n 'one_col_left': 'layouts/1col_leftsidebar.html', \n 'one_col_right': 'layouts/1col_rightsidebar.html', \n 'two_col_left': 'layouts/2col_leftsidebar.html', \n 'two_col_right': 'layouts/2col_rightsidebar.html', \n 'three_col_left': 'layouts/3col_leftsidebar.html'}\n BASE_TEMPLATE_FILES = [\n ('one_col_left', '1_col_left.html'),\n ('one_col_right', '1_col_right.html'),\n ('two_col_left', '2_col_left.html'),\n ('two_col_right', '2_col_right.html'),\n ('three_col', '3_col.html')]\n BLOG_SIDEBAR_LEFT = True\n BLOG_TITLE = 'Dynamic'\n BLOG_CONTENT = 'some text to put into my
Blog'\n DEFAULT_ICON_LIBRARY = 'octicon'\n\n\ndef get_choices():\n return BaseConfig.CONTACT_FORM_SETTINGS['OPTIONS']\n\n\nclass DevelopmentConfig(BaseConfig):\n DEBUG = True\n DEBUG_TB_PROFILER_ENABLED = True\n DEBUG_TB_INTERCEPT_REDIRECTS = False\n\n\nclass TestingConfig(BaseConfig):\n TESTING = True\n SQLALCHEMY_ECHO = False\n SQLALCHEMY_DATABASE_URI = 'mysql://test:test@174.140.227.137:3306/test_test5'","sub_path":"pycfiles/FlaskCms-0.0.4.tar/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"254472438","text":"# 11055.py\n# 2018.04.28\n\nimport sys\n\nr = sys.stdin.readline\n\ndef mymax(p, p_len):\n\tdp = [p[0]]\t\n\tfor n in range(1, p_len):\n\t\tn_value = max([dp[i] for i in range(n) if p[i] < p[n]], default=0) + p[n]\n\t\tdp.append(n_value)\t\n\treturn max(dp)\n\nn = int(r())\np = [int(num) for num in r().split()]\nresult = mymax(p, n)\nprint(result)\n\n# dp[n] : n을 선택했을 때 p[n]까지의 수열 중에서 더한 값이 가장 큰 값\n# dp[n] = max(dp[j] + p[n]) (0 <= j < n and dp[j] < dp[p])\n","sub_path":"11000/11055.py","file_name":"11055.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"58300462","text":"# Copyright 2018-2023 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module contains the classes/functions specific for simulation of superconducting transmon hardware systems\"\"\"\nimport warnings\n\nfrom dataclasses import dataclass\nfrom typing import Callable, List, Union\n\nimport pennylane as qml\nimport pennylane.numpy as np\nfrom pennylane.pulse import HardwareHamiltonian\nfrom pennylane.typing import TensorLike\nfrom pennylane.wires import Wires\n\n\n# TODO ladder operators once there is qudit support\n# pylint: disable=unused-argument\ndef a(wire, d=2):\n \"\"\"creation operator\"\"\"\n return qml.s_prod(0.5, qml.PauliX(wire)) + qml.s_prod(0.5j, qml.PauliY(wire))\n\n\ndef ad(wire, d=2):\n \"\"\"annihilation operator\"\"\"\n return qml.s_prod(0.5, qml.PauliX(wire)) + qml.s_prod(-0.5j, qml.PauliY(wire))\n\n\n# pylint: disable=too-many-arguments\ndef transmon_interaction(\n omega: Union[float, list],\n connections: list,\n g: Union[float, list],\n wires: list,\n anharmonicity=None,\n d=2,\n):\n r\"\"\"Returns a :class:`ParametrizedHamiltonian` representing the circuit QED Hamiltonian of a superconducting transmon system.\n\n The Hamiltonian is given by\n\n .. math::\n\n H = \\sum_{q\\in \\text{wires}} \\omega_q a^\\dagger_q a_q\n + \\sum_{(i, j) \\in \\mathcal{C}} g_{ij} \\left(a^\\dagger_i a_j + a_j^\\dagger a_i \\right)\n + \\sum_{q\\in \\text{wires}} \\alpha_q a^\\dagger_q a^\\dagger_q a_q a_q\n\n where :math:`[a^\\dagger_p, a_q] = i \\delta_{pq}` are bosonic creation and annihilation operators.\n The first term describes the dressed qubit frequencies :math:`\\omega_q`, the second term their\n coupling :math:`g_{ij}` and the last the anharmonicity :math:`\\alpha_q`, which all can vary for\n different qubits. In practice, the bosonic operators are restricted to a finite dimension of the\n local Hilbert space (default ``d=2`` corresponds to qubits).\n In that case, the anharmonicity is set to :math:`\\alpha=0` and ignored.\n\n The values of :math:`\\omega` and :math:`\\alpha` are typically around :math:`5 \\times 2\\pi \\text{GHz}` and :math:`0.3 \\times 2\\pi \\text{GHz}`, respectively.\n It is common for different qubits to be out of tune with different energy gaps. The coupling strength\n :math:`g` typically varies betwewen :math:`[0.001, 0.1] \\times 2\\pi \\text{GHz}`. For some example parameters,\n see e.g. `arXiv:1804.04073 `_,\n `arXiv:2203.06818 `_, or `arXiv:2210.15812 `_.\n\n .. note:: Currently only supporting ``d=2`` with qudit support planned in the future.\n\n .. seealso::\n\n :func:`~.drive`\n\n Args:\n omega (Union[float, list[float]]): List of dressed qubit frequencies in GHz. Needs to match the length of ``wires``.\n When passing a single float all qubits are assumed to have that same frequency.\n connections (list[tuple(int)]): List of connections ``(i, j)`` between qubits i and j.\n When the wires in ``connections`` are not contained in ``wires``, a warning is raised.\n g (Union[float, list[float]]): List of coupling strengths in GHz. Needs to match the length of ``connections``.\n When passing a single float need explicit ``wires``.\n anharmonicity (Union[float, list[float]]): List of anharmonicities in GHz. Ignored when ``d=2``.\n When passing a single float all qubits are assumed to have that same anharmonicity.\n wires (list): Needs to be of the same length as omega. Note that there can be additional\n wires in the resulting operator from the ``connections``, which are treated independently.\n d (int): Local Hilbert space dimension. Defaults to ``d=2`` and is currently the only supported value.\n\n Returns:\n HardwareHamiltonian: a :class:`~.ParametrizedHamiltonian` representing the transmon interaction\n\n **Example**\n\n We can set up the transmon interaction Hamiltonian with uniform coefficients by passing ``float`` values.\n\n .. code-block::\n\n connections = [[0, 1], [1, 3], [2, 1], [4, 5]]\n H = qml.pulse.transmon_interaction(omega=0.5, connections=connections, g=1.)\n\n The resulting :class:`~.ParametrizedHamiltonian` consists of ``4`` coupling terms and ``6`` qubits\n because there are six different wire indices in ``connections``.\n\n >>> print(H)\n ParametrizedHamiltonian: terms=10\n\n We can also provide individual values for each of the qubit energies and connections.\n\n .. code-block::\n\n omega = [0.5, 0.4, 0.3, 0.2, 0.1, 0.]\n g = [1., 2., 3., 4.]\n H = qml.pulse.transmon_interaction(omega=omega, connections=connections, g=g)\n\n \"\"\"\n if d != 2:\n raise NotImplementedError(\n \"Currently only supporting qubits. Qutrits and qudits are planned in the future.\"\n )\n\n # if wires is None and qml.math.ndim(omega) == 0:\n # raise ValueError(\n # f\"Cannot instantiate wires automatically. Either need specific wires or a list of omega.\"\n # f\"Received wires {wires} and omega of type {type(omega)}\"\n # )\n\n # wires = wires or list(range(len(omega)))\n\n n_wires = len(wires)\n\n if not Wires(wires).contains_wires(Wires(np.unique(connections).tolist())):\n warnings.warn(\n f\"Caution, wires and connections do not match. \"\n f\"I.e., wires in connections {connections} are not contained in the wires {wires}\"\n )\n\n # Prepare coefficients\n if anharmonicity is None:\n anharmonicity = [0.0] * n_wires\n\n # TODO: make coefficients callable / trainable. Currently not supported\n if qml.math.ndim(omega) == 0:\n omega = [omega] * n_wires\n if len(omega) != n_wires:\n raise ValueError(\n f\"Number of qubit frequencies omega = {omega} does not match the provided wires = {wires}\"\n )\n\n if qml.math.ndim(g) == 0:\n g = [g] * len(connections)\n if len(g) != len(connections):\n raise ValueError(\n f\"Number of coupling terms {g} does not match the provided connections = {connections}\"\n )\n\n # qubit term\n coeffs = list(omega)\n observables = [ad(i, d) @ a(i, d) for i in wires]\n\n # coupling term term\n coeffs += list(g)\n observables += [ad(i, d) @ a(j, d) + ad(j, d) @ a(i, d) for (i, j) in connections]\n\n # TODO Qudit support. Currently not supported but will be in the future.\n # if d>2:\n # if anharmonicity is None:\n # anharmonicity = [0.] * n_wires\n # if qml.math.ndim(anharmonicity)==0:\n # anharmonicity = [anharmonicity] * n_wires\n # if len(anharmonicity) != n_wires:\n # raise ValueError(f\"Number of qubit anharmonicities anharmonicity = {anharmonicity} does not match the provided wires = {wires}\")\n # # anharmonicity term\n # coeffs += list(anharmonicity)\n # observables += [ad(i, d) @ ad(i, d) @ a(i, d) @ a(i, d) for i in wires]\n\n settings = TransmonSettings(connections, omega, g, anharmonicity=anharmonicity)\n\n return HardwareHamiltonian(coeffs, observables, settings=settings)\n\n\n@dataclass\nclass TransmonSettings:\n \"\"\"Dataclass that contains the information of a Transmon setup.\n\n .. see-also:: :func:`transmon_interaction`\n\n Args:\n connections (List): List `[[idx_q0, idx_q1], ..]` of connected qubits (wires)\n omega (List[float, Callable]):\n anharmonicity (List[float, Callable]):\n g (List[list, TensorLike, Callable]):\n\n \"\"\"\n\n connections: List\n omega: Union[float, Callable]\n g: Union[list, TensorLike, Callable]\n anharmonicity: Union[float, Callable]\n\n def __eq__(self, other):\n return (\n qml.math.all(self.connections == other.connections)\n and qml.math.all(self.omega == other.omega)\n and qml.math.all(self.g == other.g)\n and qml.math.all(self.anharmonicity == other.anharmonicity)\n )\n\n def __add__(self, other):\n if other is not None:\n new_connections = list(self.connections) + list(other.connections)\n new_omega = list(self.omega) + list(other.omega)\n new_g = list(self.g) + list(other.g)\n new_anh = list(self.anharmonicity) + list(other.anharmonicity)\n return TransmonSettings(new_connections, new_omega, new_g, anharmonicity=new_anh)\n\n return self\n","sub_path":"pennylane/pulse/transmon.py","file_name":"transmon.py","file_ext":"py","file_size_in_byte":8939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"648165382","text":"from django import forms\n\nfrom django_select2 import forms as s2forms\n\nfrom . import models\nfrom novav1.models import Packages ,Patient\nfrom django.forms import ModelChoiceField\nfrom django.forms.widgets import *\nfrom crispy_forms.layout import *\nfrom crispy_forms.helper import *\nfrom django.template import Template, Context\nfrom django.forms.widgets import DateInput, TextInput ,DateTimeBaseInput ,TimeInput\n\nfrom django_countries.fields import CountryField\nfrom django_countries.widgets import CountrySelectWidget\n\n\n\n\n\nclass EventForm(forms.ModelForm): \n\n \n class Meta:\n model = models.Events\n fields = ['event_name','start_date','end_date'] \n\n def __init__(self, *args, **kwargs):\n super(EventForm, self).__init__(*args, **kwargs)\n self.fields['event_name'] = ModelChoiceField(queryset=Patient.objects.all())\n \n\nclass ArriveForm(forms.ModelForm): \n class Meta:\n model = models.Events\n fields = ['arrive'] \n\n\n\nclass SessionDetail(forms.ModelForm): \n class Meta:\n model = models.Events\n fields = ['start','start_date','session_end','session_clinic','session_doctor','session_area','session_used_balls','session_branch','end','session_area'] \n\n labels = {\n 'start_date': (''),\n 'session_end': (''),\n 'session_clinic': (''),\n 'session_doctor': (''),\n 'session_area': (''),\n 'session_used_balls': (''),\n 'session_branch': (''),\n \n \n },\n\n\n\nclass callsFormsEvents(forms.ModelForm): \n class Meta:\n model = models.Events\n fields = ['event_type','event_note'] \n\n labels = {\n 'event_name': (''),\n 'start_date': (''),\n 'end_date': (''),\n 'event_type': (''),\n 'event_note': (''),\n \n \n \n }\n\n\nclass ParametersForms(forms.ModelForm): \n class Meta:\n model = models.deviceparameters\n fields =['Joule','msec','PulseCount','OperatorName'] \n\n \n\n\nclass callsFormsEvents(forms.ModelForm): \n class Meta:\n model = models.Events\n fields = ['event_name','start_date','end_date','event_type','event_doctor','event_clinic','branch_event','event_area'] \n\n labels = {\n 'event_name': (''),\n 'start_date': (''),\n 'end_date': (''),\n 'event_type': (''),\n 'event_note': (''),\n \n \n \n } ","sub_path":"event_manage/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"157260328","text":"__author__ = 'one'\n\nfrom openerp import models, api, fields, _\n\nclass ModelLookupWizard(models.TransientModel):\n _name = 'builder.ir.action.lookup.wizard'\n\n action_id = fields.Many2one('ir.actions.act_window', 'Action')\n lookup_mode = fields.Selection([('id', 'ID'), ('name', 'Name'), ('field', 'Field'), ('ref', 'Reference')], 'Lookup Mode', default='name', required=True)\n lookup_value = fields.Char('Lookup Value')\n\n @api.onchange('action_id', 'lookup_mode')\n def lookup_value_update(self):\n self.lookup_value = self.get_value()\n\n @api.multi\n def action_lookup(self):\n\n active_model = self.env[self.env.context.get('active_model')].search([('id', '=', self.env.context.get('active_id'))])\n\n if active_model.id:\n setattr(active_model, self.env.context.get('target_field'), self.get_value())\n\n return {'type': 'ir.actions.act_window_close'}\n\n @api.multi\n def get_value(self):\n raw_value = self.action_id\n\n if self.lookup_mode == 'id':\n return raw_value.id\n elif self.lookup_mode == 'name':\n return getattr(raw_value, raw_value._rec_name, False)\n elif self.lookup_mode == 'field':\n return getattr(raw_value, self.env.context.get('lookup_field'), False)\n elif self.lookup_mode == 'ref':\n data = self.env['ir.model.data'].search([('model', '=', 'ir.actions.act_window'), ('res_id', '=', raw_value.id)])\n if data:\n return getattr(raw_value, \"{module}.{id}\".format(module=data.module, id=data.name), False)\n else:\n return False\n","sub_path":"wizard/action_lookup_wizard.py","file_name":"action_lookup_wizard.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"505782150","text":"from shapely.geometry import Polygon\nimport csv, json, requests\nimport pkg_resources as pkg_r\n\nclass csv_parser:\n \n def __init__(self):\n self.coordinates = []\n \n # def parse(self, idx):\n # filepath = pkg_r.resource_filename(__name__, 'dataset/' + str(idx) + '.csv')\n # with open(filepath, 'r', encoding='cp949') as dataset:\n # reader = csv.reader(dataset)\n # size = sum(1 for line in open(filepath, encoding='cp949'))\n # first = True\n # for line in reader:\n # if first is True: first = False; continue\n # if '서울' not in line[4]: continue\n # corrected = line[14].replace('}}', '}').replace('][', '],[').replace('\"Polygon\"coordinates', '\"Polygon\",\"coordinates\"')\n # try:\n # data = json.loads(corrected, encoding='cp949')\n # except json.decoder.JSONDecodeError:\n # continue\n # self.coordinates.append(data['coordinates'][0])\n\n def parse(self, idx):\n filepath = pkg_r.resource_filename(__name__, 'dataset/' + str(idx) + '.csv')\n with open(filepath, 'r', encoding='cp949') as dataset:\n reader = csv.reader(dataset)\n size = sum(1 for line in open(filepath, encoding='cp949'))\n first = True\n for line in reader:\n if first is True: first = False; continue\n if '서울' not in line[4]: continue\n self.coordinates.append(f'{line[13]}, {line[12]}')\n\n def result(self):\n return self.coordinates\n\nclass parser:\n \n def __init__(self, YOUR_API_KEY):\n self.API_KEY = YOUR_API_KEY\n \n def check_key(self):\n print('[*] API_KEY : ' + self.API_KEY)\n \n # def find_center(self, border_coord):\n # poly = Polygon([[point[1], point[0]] for point in border_coord])\n # return f'{poly.centroid.x}, {poly.centroid.y}'\n \n # def load_result(self, centroid):\n # print('[*] CENTROID : ' + centroid)\n # URL = 'https://maps.googleapis.com/maps/api/geocode/json'\n # params = {\n # 'latlng' : centroid,\n # 'language' : 'ko',\n # 'key' : self.API_KEY\n # }\n # return requests.get(URL, params=params).json()\n\n def result(self, coord):\n URL = 'https://maps.googleapis.com/maps/api/geocode/json'\n params = {\n 'latlng' : coord,\n 'language' : 'ko',\n 'key' : self.API_KEY\n }\n r = requests.get(URL, params=params)\n # print(r.url)\n return r.json()\n\nclass roadname:\n\n def __init__(self):\n self.json_data = {}\n self.load()\n \n def load(self):\n filepath = pkg_r.resource_filename(__name__, 'SeoulRoadNameInfo.json')\n with open(filepath, encoding='utf-8') as f:\n data = f.read()\n self.json_data = json.loads(data)['DATA']\n\n def search(self, query):\n for data in self.json_data:\n # print(query)\n # print(data['road_nm'])\n if query == data['road_nm']:\n print(data)\n \n \n# if __name__ == '__main__':\n# roadname = roadname()\n# roadname.load()\n# roadname.search('삼양로')\n","sub_path":"getRoadData/dataminer/RevGeocode.py","file_name":"RevGeocode.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"240079377","text":"#!/usr/bin/python\n\nimport pygatt, array\n\nadapter = pygatt.GATTToolBackend()\nvalue = '0000FF00'\n\nvalue_data = value.decode(\"hex\")\nvalue_array = array.array('B', value_data)\n\ntry:\n\tadapter.start()\n\tdevice = adapter.connect('51:75:4B:0A:AC:E6')\n\tdevice.char_write_handle(0x001b, value_array)\nfinally:\n\tadapter.stop()","sub_path":"extras/pygatt_test.py","file_name":"pygatt_test.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"360123113","text":"import requests\nimport time\nimport json\n\nclass TelegramBot:\n CONNECTION_LOST_TIMEOUT = 60\n\n def __init__(self, token, proxies=None):\n self.token = token\n self.url = f\"https://api.telegram.org/bot{self.token}/\"\n self.update_id = 0\n self.proxies = proxies\n self._check_token()\n\n def _check_token(self):\n try:\n r = requests.get(self.url + \"getMe\", proxies=self.proxies)\n except:\n raise ValueError(\"Can't connect\")\n\n if r.json()['ok'] == True:\n return True\n else:\n raise ValueError(\"Invalid token!\")\n\n def get_messages(self):\n out = []\n try:\n r = requests.get(f\"{self.url}getUpdates?offset={self.update_id}\", timeout=self.CONNECTION_LOST_TIMEOUT, proxies=self.proxies).json()\n except requests.exceptions.RequestException as e:\n print(f\"{int(time.time())} | Error while getting messages:\", e)\n return out\n except json.decoder.JSONDecodeError as e:\n print(f\"{int(time.time())} | Error while getting messages:\", e)\n return out\n\n if r['ok'] and 'result' in r.keys():\n for i in r['result']:\n if 'message' in i.keys() and 'text' in i['message'].keys():\n out.append(i['message'])\n if i['update_id'] >= self.update_id:\n self.update_id = i['update_id']+1\n return out\n\n def send_message(self, chat_id, text, parse_mode=''):\n try:\n r = requests.post(f\"{self.url}sendMessage\", \n data={'chat_id': chat_id, 'text': text, 'parse_mode': parse_mode},\n timeout=self.CONNECTION_LOST_TIMEOUT,\n proxies=self.proxies)\n except requests.exceptions.RequestException as e:\n print(f\"{int(time.time())} | Error while sending message:\", e)\n return None\n\n return r\n","sub_path":"telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"502852399","text":"#!usr/bin/env python\r\n# _*_ coding:utf-8 _*_\r\n\r\ni=0\r\n\r\nnumbers = []\r\nm = int (input(\"number1:\"))\r\nn = int(input(\"number2:\"))\r\n\r\nif i < n:\r\n print(\"At the top i is %d\" %m)\r\n numbers.append(i)\r\n i=i+m\r\n print(\"Numbers now:\", numbers)\r\n print(\"At the bottom i is %d\" %i)\r\nelse:\r\n exit(0)\r\n\r\n\r\nprint(\"The numbers:\")\r\n\r\nfor num in numbers:\r\n print(num)\r\n","sub_path":"Chap0/ex33-1.py","file_name":"ex33-1.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"112897268","text":"\nimport tensorflow as tf\nimport numpy as np\nfrom helpers import *\nimport time\nimport json\nimport sys\nimport math\nfrom model_bn import Model\nimport data_input\n\nnpop = 300 # population size\nsigma = 0.1 # noise standard deviation\nalpha = 0.008 # learning rate\n\nboxmin = 0\nboxmax = 1\nboxplus = (boxmin + boxmax) / 2.\nboxmul = (boxmax - boxmin) / 2.\n\nepsi = 0.03\n\nsteps = []\n\ndef softmax(x):\n return np.divide(np.exp(x),np.sum(np.exp(x),-1,keepdims=True))\ndef main():\n with open('config.json') as config_file:\n config = json.load(config_file)\n \n model_file = tf.train.latest_checkpoint(config['model_dir'])\n if model_file is None:\n print('No model found')\n sys.exit()\n\n\n totalImages = 0\n succImages = 0\n faillist = []\n\n input_xs = tf.placeholder(tf.float32, [None, 32, 32, 3])\n y_input = tf.placeholder(tf.int64, shape=[None, 100])\n model = Model(input_xs, y_input, mode='eval')\n \n real_logits_pre = model.pre_softmax\n real_logits = tf.nn.softmax(real_logits_pre)\n \n saver = tf.train.Saver()\n \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n saver.restore(sess, model_file)\n \n \n start = 0\n end = 1500\n total = 0\n successlist = []\n printlist = []\n attack_start = time.time()\n \n fashion_mnist = data_input.Data(one_hot=False)\n \n for i in range(start, end):\n success = False\n print('evaluating %d of [%d, %d)' % (i, start, end), file=sys.stderr)\n\n inputs, targets= fashion_mnist.eval_data.xs[i], fashion_mnist.eval_data.ys[i]\n modify = np.random.randn(1,3,32,32) * 0.001\n\n logits = sess.run(real_logits, feed_dict={input_xs: [inputs]})\n #print(logits)\n\n if np.argmax(logits) != targets:\n print('skip the wrong example ', i)\n continue\n totalImages += 1\n for runstep in range(200):\n Nsample = np.random.randn(npop, 3,32,32)\n\n modify_try = modify.repeat(npop,0) + sigma*Nsample\n\n newimg = torch_arctanh((inputs-boxplus) / boxmul).transpose(2,0,1)\n\n inputimg = np.tanh(newimg+modify_try) * boxmul + boxplus\n if runstep % 10 == 0:\n realinputimg = np.tanh(newimg+modify) * boxmul + boxplus\n realdist = realinputimg - (np.tanh(newimg) * boxmul + boxplus)\n realclipdist = np.clip(realdist, -epsi, epsi)\n realclipinput = realclipdist + (np.tanh(newimg) * boxmul + boxplus)\n l2real = np.sum((realclipinput - (np.tanh(newimg) * boxmul + boxplus))**2)**0.5\n #l2real = np.abs(realclipinput - inputs.numpy())\n #print(inputs.shape)\n outputsreal = sess.run(real_logits, feed_dict={input_xs: realclipinput.transpose(0,2,3,1)})\n #print(outputsreal)\n\n #print('lireal: ',np.abs(realclipdist).max())\n #print('l2real: '+str(l2real.max()))\n #print(outputsreal)\n if (np.argmax(outputsreal) != targets) and (np.abs(realclipdist).max() <= epsi):\n succImages += 1\n success = True\n #print('clipimage succImages: '+str(succImages)+' totalImages: '+str(totalImages))\n #print('lirealsucc: '+str(realclipdist.max()))\n successlist.append(i)\n printlist.append(runstep)\n\n steps.append(runstep)\n# imsave(folder+classes[targets[0]]+'_'+str(\"%06d\" % batch_idx)+'.jpg',inputs.transpose(1,2,0))\n break\n dist = inputimg - (np.tanh(newimg) * boxmul + boxplus)\n clipdist = np.clip(dist, -epsi, epsi)\n clipinput = (clipdist + (np.tanh(newimg) * boxmul + boxplus)).reshape(npop,3,32,32)\n target_onehot = np.zeros((1,100))\n\n\n target_onehot[0][targets]=1.\n\n outputs = sess.run(real_logits, feed_dict={input_xs: clipinput.transpose(0,2,3,1)})\n\n target_onehot = target_onehot.repeat(npop,0)\n\n\n\n real = np.log((target_onehot * outputs).sum(1)+1e-30)\n other = np.log(((1. - target_onehot) * outputs - target_onehot * 10000.).max(1)[0]+1e-30)\n\n loss1 = np.clip(real - other, 0.,1000)\n\n Reward = 0.5 * loss1\n\n Reward = -Reward\n\n A = (Reward - np.mean(Reward)) / (np.std(Reward)+1e-7)\n\n\n modify = modify + (alpha/(npop*sigma)) * ((np.dot(Nsample.reshape(npop,-1).T, A)).reshape((3,32,32)))\n if not success:\n faillist.append(i)\n print('failed:',faillist)\n else:\n print('successed:',successlist)\n print('runstep :', printlist)\n print('now id', i)\n print('successed num', len(successlist))\n print('failed num', len(faillist))\n success_rate = succImages/float(totalImages)\n print('attack time : ', time.time()-attack_start,flush=True)\n print('succ rate', success_rate)\n print(model_file)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"cifar_100/nattack.py","file_name":"nattack.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"493516447","text":"import requests\nfrom my_retro_store_operations import CustomerOperations, RentalOperations\nfrom my_retro_store_operations import VideoOperations\nimport time,sys\nfrom datetime import date, datetime\n\ndef print_stars():\n print(\"\\n**********\\n\")\n\n#progress bar loading animation\ndef progress_bar(count, total, status=''):\n bar_len = 10\n filled_len = int(round(bar_len * count / float(total)))\n\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n sys.stdout.write('[%s] %s%s ...%s\\r' % (bar, percents, '%', status))\n sys.stdout.flush()\n\ndef call_bar():\n total = 10\n i = 0\n while i < total:\n i += 1\n progress_bar(i, total, status='Please wait. Loading options for you')\n time.sleep(0.3)\n progress_bar(i, total, status='Loading is complete. Thanks for waiting')\n\ndef list_options():\n options = {\n #Video\n \"1\": \"Create a video\",\n \"2\": \"Update a video\",\n \"3\": \"Delete a video\",\n \"4\": \"List all videos\",\n \"5\": \"Get one video\", \n #Customer\n \"6\": \"Create a customer\",\n \"7\": \"Update a customer\",\n \"8\": \"Delete a customer\",\n \"9\": \"Select one customer\", \n \"10\": \"List all customers\",\n #Rental\n \"11\": \"Check out a video to a customer\",\n \"12\": \"Check in a video from a customer\",\n #Operations\n \"*\": \"List all options\",\n \"#\": \"Quit\"\n }\n print_stars()\n print(\"WELCOME TO RETRO VIDEO STORE\")\n print(\"These are the actions you can perform\")\n \n for choice_num in options:\n print(f\"Option {choice_num}. {options[choice_num]}\")\n print_stars()\n return options\n\ndef make_choice(options, customer_operations, video_operations, rental_operations):\n valid_choices = options.keys()\n choice = None\n\n while choice not in valid_choices:\n choice = input(\"Please make your selection using the one of the listed option number: \")\n\n return choice\n\ndef valid_release_date(date_string):\n format = \"%m-%d-%Y\"\n\n try:\n datetime.strptime(date_string, format)\n return True \n except ValueError:\n print(\"Invalid entry. Please enter date as MM-DD-YYYY\")\n return False\n\nurl=\"https://aida-retro-video-store-api.herokuapp.com\"\n\ndef main(play=True):\n \n print_stars()\n print(\"... \\N{smiling face with smiling eyes} ... WELCOME TO AIDA'S RETRO VIDEO STORE ... \\N{hugging face}\")\n call_bar()\n\n customer_operations = CustomerOperations(url)\n video_operations = VideoOperations(url)\n rental_operations = RentalOperations(url)\n options = list_options()\n\n while play==True:\n\n choice = make_choice(options, customer_operations, video_operations, rental_operations)\n\n if choice=='1':\n print(\"Hashing it out! Let's create a new video!\")\n title=input(\"What is the name of the video: \")\n release_date=input(\"Please enter a release date: \")\n if valid_release_date(release_date):\n release_date = release_date\n else:\n release_date=input(\"Please enter a date as (MM-DD-YYYY): \")\n if valid_release_date(release_date):\n release_date = release_date\n else:\n print(\"Oops, your entry is still invalid. Date will be stored as a default current date. You can update it later with option 2\")\n release_date = str(datetime.now())\n total_inventory=input(\"How many copies are there in total? \")\n call_bar()\n response = video_operations.create_video(title=title, release_date=release_date, total_inventory=total_inventory)\n print_stars()\n print(f\"Here is the ID of new video record: {response['id']} \")\n\n elif choice=='2':\n list_videos = video_operations.get_all_videos()\n for video in list_videos:\n print(f\"Video Id: {video['id']}, Video Title: {video['title']}\")\n video_id = input(\"Which video would you like to update? Please enter ID: \")\n if video_id.isnumeric():\n video_id = int(video_id)\n print(f\"Great, let's update the video with ID: {video_id}\")\n title=input(\"What is the new title of the movie? \")\n release_date=input(\"Please enter a new release date: \")\n if valid_release_date(release_date):\n release_date = release_date\n else:\n release_date=input(\"Please enter a valid new date: \")\n if valid_release_date(release_date):\n release_date = release_date\n else:\n print(\"Oops, your entry is still invalid. Date will be stored as a default current date. You can update it later with option 2\")\n release_date = str(datetime.now())\n total_inventory=input(\"How many copies are there total? \")\n response = video_operations.update_video(video_id, title=title, release_date=release_date,total_inventory=total_inventory)\n print_stars()\n print(f\"Successfully updated the video with ID: {response['id']} - title: {response['title']} - release date: {response['release_date']} - inventory: {response['total_inventory']}\")\n else:\n print(\"Id type is integer. Please enter valid id.\")\n\n elif choice=='3':\n list_videos = video_operations.get_all_videos()\n for video in list_videos:\n print(f\"Id:{video['id']}, Title:{video['title']}\")\n video_id = input(\"Which video would you like to delete? Please enter ID: \")\n if video_id.isnumeric():\n video_id = int(video_id)\n video_operations.delete_video(video_id)\n print_stars()\n print(f\"Success! Video with ID {video_id} has been deleted\")\n else:\n print(\"Id type is integer. Please enter valid id.\")\n\n elif choice=='4':\n print_stars()\n for video in video_operations.get_all_videos():\n print(video)\n\n elif choice=='5':\n print(\"Here are the videos:\")\n list_videos = video_operations.get_all_videos()\n for video in list_videos:\n print(f\"Id: {video['id']}, name: {video['title']}\")\n video_id = input(\"Which video id would you like to select? \")\n if video_id.isnumeric():\n video_id = int(video_id)\n video_operations.selected_video = video_operations.get_one_video(video_id=video_id)\n if video_operations.selected_video:\n print(f\"Selected video: {video_operations.selected_video}\")\n else:\n print(\"Id type is integer. Please enter valid id.\")\n \n elif choice=='6':\n print(\"Splendid! New customer more money! \")\n name=input(\"What is the name of the customer \")\n postal_code=input(\"What is the postal code? \")\n phone=input(\"Please write down a phone number to contact \")\n call_bar()\n response = customer_operations.create_customer(name=name, postal_code=postal_code,phone=phone)\n print_stars()\n print(f\"Here is the ID of new customer: {response['id']}\")\n\n elif choice=='7':\n list_customer = customer_operations.get_all_customers()\n for customer in list_customer:\n print(f\"Customer Id: {customer['id']}, name: {customer['name']}\")\n customer_id = input(\"Which customer would you like to update? Please enter ID: \")\n if customer_id.isnumeric():\n customer_id = int(customer_id)\n print(f\"Great! Let's update the customer with ID: {customer_id}\")\n name=input(\"What is the new name of your customer? \")\n postal_code=input(\"What is the new postal code of your customer? \")\n phone=input(\"Please write down a new phone number : \")\n response = customer_operations.update_customer(customer_id, name=name, postal_code=postal_code,phone=phone)\n print_stars()\n print(f\"Successfully updated the customer with ID: {response['id']} - name: {response['name']} - postal code: {response['postal_code']} - phone: {response['phone']}\")\n else:\n print(\"Id type is integer. Please enter valid id.\")\n\n elif choice=='8':\n list_customer = customer_operations.get_all_customers()\n for customer in list_customer:\n print(f\"Customer Id: {customer['id']}, name: {customer['name']}\")\n customer_id = input(\"Which customer you would like to delete? Please enter ID: \")\n if customer_id.isnumeric():\n customer_id = int(customer_id)\n customer_operations.delete_customer(customer_id)\n print_stars()\n print(f\"Customer with ID {customer_id} has been deleted\")\n else:\n print(\"Id type is integer. Please enter valid id.\")\n\n elif choice=='9':\n print(\"Here are the customers:\")\n list_customers = customer_operations.get_all_customers()\n for customer in list_customers:\n print(f\"Customer Id: {customer['id']}, name: {customer['name']}\")\n\n customer_id = input(\"Which customer id would you like to select? \")\n if customer_id.isnumeric():\n customer_id = int(customer_id)\n customer_operations.selected_customer = customer_operations.get_one_customer(customer_id=customer_id)\n if customer_operations.selected_customer:\n print(f\"Selected customer: {customer_operations.selected_customer}\")\n else:\n print(\"Id type is integer. Please enter valid id.\")\n\n elif choice=='10':\n print_stars()\n for customer in customer_operations.get_all_customers():\n print(customer)\n\n elif choice=='11':\n customer_id=input(\"Starting a rental procedure. Please enter a customer id: \")\n video_id=input(\"Please enter a video id: \")\n if customer_id.isnumeric():\n customer_id = int(customer_id)\n else:\n print(\"Id type is integer. Please enter valid id.\")\n if video_id.isnumeric():\n video_id = int(video_id)\n else:\n print(\"Id type is integer. Please enter valid id.\")\n call_bar()\n response = rental_operations.check_out(customer_id=customer_id, video_id=video_id)\n print(f\"*** Calling the shots! Selected video id {video_id} has been checked out! \")\n\n elif choice=='12':\n customer_id=input(\"Starting a return procedure. Please enter a customer id: \")\n video_id=input(\"Please enter a video id: \")\n if customer_id.isnumeric():\n customer_id = int(customer_id)\n else:\n print(\"Id type is integer. Please enter valid id.\")\n if video_id.isnumeric():\n video_id = int(video_id)\n else:\n print(\"Id type is integer. Please enter valid id.\")\n call_bar()\n response = rental_operations.check_in(customer_id=customer_id, video_id=video_id)\n print(\"Here is the updated return report:\", response)\n print(f\"*** Thank you customer id {customer_id}! Selected video id {video_id} has been checked in! ***\")\n \n elif choice =='*':\n list_options()\n elif choice=='#':\n play=False\n print(\"... \\N{smiling face with smiling eyes} ... Thanks for using the Retro Video store created by Aida ... \\N{winking face}\")\n\n print_stars()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"491039999","text":"# -*- coding: utf-8 -*-\nimport re\n\nfrom django import template\nregister = template.Library()\n\n\n@register.filter()\ndef remove_accent(word):\n \"\"\"\n Removes accents from the given word or phrase. If word is an instance of a class and is not a string or unicode\n type then, try to call it's __unicode__ method to get the word. If fail, return nothing.\n Else, return the word or phrase as is if no accent presented. Else the word or phrase without accents.\n This is applied ONLY for the Greek language where small accented letters (ά, ί etc) are transformed via CSS\n to uppercase letters but the accent remains making it look ugly (ΚΑΛΗΜΈΡΑ instead of ΚΑΛΗΜΕΡΑ).\n\n REPLACEMENT TABLE\n ------------------------------------------------------------\n Small GR letter | Unicode | Small GR letter | Unicode\n (with accent) | | (wo accent) |\n ------------------------------------------------------------\n ά | U+03AC | α | U+03B1\n έ | U+03AD | ε | U+03B5\n ή | U+03AE | η | U+03B7\n ί | U+03AF | ι | U+03B9\n ό | U+03CC | ο | U+03BF\n ύ | U+03CD | υ | U+03C5\n ώ | U+03CE | ω | U+03C9\n \"\"\"\n if not (isinstance(word, str) or isinstance(word, unicode)):\n try:\n word = word.__unicode__()\n except AttributeError:\n return ''\n\n replacement_table = { # Πεζά με τόνο\n u'ά': u'α',\n u'έ': u'ε',\n u'ή': u'η',\n u'ί': u'ι',\n u'ό': u'ο',\n u'ύ': u'υ',\n u'ώ': u'ω',\n # Κεφαλαία με τόνο\n u'Ά': u'Α',\n u'Έ': u'Ε',\n u'Ή': u'Η',\n u'Ί': u'Ι',\n u'Ό': u'Ο',\n u'Ύ': u'Υ',\n u'Ώ': u'Ω',}\n for letter in replacement_table:\n word = re.sub(letter, replacement_table.get(letter), word)\n return word\n\n\n# replacement_table = {u'\\\\u03ac': u'\\\\u03b1',\n # u'\\\\u03ad': u'\\\\u03b5',\n # u'\\\\u03ae': u'\\\\u03b7',\n # u'\\\\u03af': u'\\\\u03b9',\n # u'\\\\u03cc': u'\\\\u03bf',\n # u'\\\\u03cd': u'\\\\u03c5',\n # u'\\\\u03ce': u'\\\\u03c9'}","sub_path":"homepage/templatetags/homepage_filters.py","file_name":"homepage_filters.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"382387886","text":"from vtk.numpy_interface import dataset_adapter as dsa\nfrom vtk.util.vtkAlgorithm import VTKPythonAlgorithmBase\nimport vtk\n\nclass StreamExtents(VTKPythonAlgorithmBase):\n def __init__(self):\n super(StreamExtents, self).__init__(\n nInputPorts=1, inputType='vtkImageData',\n nOutputPorts=1, outputType='vtkMultiBlockDataSet')\n\n self.Contour = vtk.vtkContourFilter()\n self.Contour.SetValue(0, 180)\n self.UpdateIndex = 0\n self.NumberOfBlocks = 20\n\n def RequestUpdateExtent(self, request, inInfo, outInfo):\n info = inInfo[0].GetInformationObject(0)\n # Ask for the next extent.\n info.Set(vtk.vtkStreamingDemandDrivenPipeline.UPDATE_NUMBER_OF_PIECES(),\n self.NumberOfBlocks)\n info.Set(vtk.vtkStreamingDemandDrivenPipeline.UPDATE_PIECE_NUMBER(),\n self.UpdateIndex)\n return 1\n\n def RequestData(self, request, inInfo, outInfo):\n info = inInfo[0].GetInformationObject(0)\n inp = dsa.WrapDataObject(vtk.vtkDataSet.GetData(info))\n output = vtk.vtkMultiBlockDataSet.GetData(outInfo)\n\n if output.GetNumberOfBlocks() == 0:\n output.SetNumberOfBlocks(self.NumberOfBlocks)\n\n self.Contour.SetInputData(inp.VTKObject)\n self.Contour.Update()\n #print self.UpdateIndex, self.Contour.GetOutput().GetNumberOfCells()\n contour = self.Contour.GetOutput()\n if contour.GetNumberOfCells() > 0:\n block = vtk.vtkPolyData()\n block.ShallowCopy(contour)\n output.SetBlock(self.UpdateIndex, block)\n\n if self.UpdateIndex < self.NumberOfBlocks - 1:\n # If we are not done, ask the pipeline to re-execute us.\n print(\"Set CONTINUE_EXECUTING\")\n self.UpdateIndex += 1\n request.Set(\n vtk.vtkStreamingDemandDrivenPipeline.CONTINUE_EXECUTING(),\n 1)\n else:\n # Stop execution\n print(\"Remove CONTINUE_EXECUTING\")\n request.Remove(\n vtk.vtkStreamingDemandDrivenPipeline.CONTINUE_EXECUTING())\n # Reset for next potential execution.\n self.UpdateIndex = 0\n return 1\n\nw = vtk.vtkRTAnalyticSource()\nw.SetWholeExtent(-100, 100, -100, 100, -100, 100)\n\ns = StreamExtents()\ns.SetInputConnection(w.GetOutputPort())\n\nm = vtk.vtkCompositePolyDataMapper()\nm.SetInputConnection(s.GetOutputPort())\n\na = vtk.vtkActor()\na.SetMapper(m)\n\nren = vtk.vtkRenderer()\nren.AddActor(a)\n\niren = vtk.vtkRenderWindowInteractor()\nrenWin = vtk.vtkRenderWindow()\nrenWin.SetSize(800, 800)\nrenWin.AddRenderer(ren)\nrenWin.SetInteractor(iren)\n\nrenWin.Render()\n\niren.Start()\n","sub_path":"vtk/streaming/SpatialStreaming1.py","file_name":"SpatialStreaming1.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"461769589","text":"import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = input(\"\\nPlease type one of the folowing cities you want to be filtered 1-Chicago, 2-New York City, 3-Washington: \").lower()\n while city not in CITY_DATA.keys():\n city = input(\"\\nOops! You haven't choosen a correct city, please check spelling!: \").lower()\n\n # TO DO: get user input for month (all, january, february, ... , june)\n MONTH_LIST = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\n month = input(\"\\nPlease type a month you want to filter by and choose from January to June or type All: \").lower()\n while month not in MONTH_LIST:\n month = input(\"\\nSorry! Please try again by choosing a month from January to June!: \").lower()\n\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n days = ['saturday', 'sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'all']\n day = input(\"\\nWhat day of week would like to filter by?: \").lower()\n while day not in days:\n day = input(\"\\nsorry! incorrect day, please try again: \" ).lower()\n\n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n days = ['saturday', 'sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'all']\n df = df[df['day_of_week'] == day.title()]\n\n\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n common_month = df['month'].mode()[0]\n\n print('The most common month is:' ,common_month)\n\n # TO DO: display the most common day of week\n common_day = df['day_of_week'].mode()[0]\n\n print('The most common day of the week is:' ,common_day)\n\n\n # TO DO: display the most common start hour\n df['start_hour'] = df['Start Time'].dt.hour\n\n common_strt_hr = df['start_hour'].mode()[0]\n\n print('The most common start hour is:' ,common_strt_hr)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n start_station = df['Start Station'].mode()[0]\n print('The most common start station is:' ,start_station)\n\n # TO DO: display most commonly used end station\n end_station = df['End Station'].mode()[0]\n print('The most common end station is:' ,end_station)\n\n # TO DO: display most frequent combination of start station and end station trip\n df['start_end_stations'] = df['Start Station'] + \" - \" + df['End Station']\n freq_strt_end_stations = df['start_end_stations'].mode()[0]\n print('The most frequent start and end station combination are:' ,freq_strt_end_stations)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n df['travel time'] = pd.to_datetime(df['End Time']) - pd.to_datetime(df['Start Time'])\n\n # TO DO: display total travel time\n total_trvl_time = df['travel time'].sum()\n print(\"The total travel time is:\" ,total_trvl_time)\n\n\n # TO DO: display mean travel time\n mean_trvl_time = df['travel time'].mean()\n print(\"The mean travel time is:\" ,mean_trvl_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types_count = df['User Type'].value_counts()\n print('Counts of user types:\\n' ,user_types_count)\n\n\n # TO DO: Display counts of gender\n try:\n gender_types = df['Gender'].value_counts()\n print('\\nGender types:\\n' ,gender_types)\n except:\n print('\\nThere is no gender data available for this city')\n\n\n # TO DO: Display earliest, most recent, and most common year of birth\n try:\n earliest_birth_year = int(df['Birth Year'].min())\n print('\\nThe earliest birth year is:' ,earliest_birth_year)\n recent_birth_year = int(df['Birth Year'].max())\n print('\\nThe latest birth year is:' ,recent_birth_year)\n common_birth_year = int(df['Birth Year'].mode()[0])\n print('\\nThe most common birth year is:' ,common_birth_year)\n except:\n print('\\nThere is no birth year data available for this city')\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef show_data(df):\n \"\"\"\n Raw data is displayed upon request by the user.\n \"\"\"\n\n see_data = input(\"Would you like to see the raw data?: \").lower()\n\n\n start_index = 0\n end_index = 5\n\n\n while end_index in range(df.shape[0]):\n if see_data == 'yes':\n print(df.iloc[start_index:end_index])\n start_index += 5\n end_index += 5\n\n no_more = input(\"Would you like to see more?: \").lower()\n if no_more == 'no':\n break\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n show_data(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":7646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"152400804","text":"import bpy\nfrom bpy.props import IntProperty\nimport bmesh\nimport bgl\nfrom .. utils.ui import draw_init, draw_end, draw_title, draw_prop\nfrom .. utils.developer import output_traceback\nfrom .. utils import MACHIN3 as m3\n\n\n# TODO: vcreate\n\n\nclass VSelect(bpy.types.Operator):\n bl_idname = \"machin3.vselect\"\n bl_label = \"MACHIN3: VSelect\"\n bl_options = {'REGISTER', 'UNDO'}\n\n selidx = IntProperty(name=\"Selection Index\")\n\n @classmethod\n def poll(cls, context):\n active = context.active_object\n return active.vertex_groups\n\n\n def draw_HUD(self, args):\n draw_init(self, args)\n\n draw_title(self, \"Vertex Group Select\")\n\n draw_prop(self, \"Groups\", \"%d/%d\" % (self.selidx + 1, len(self.groups[\"list\"])), key=\"scroll UP/DOWN\")\n self.offset += 10\n\n draw_prop(self, \"Name\", self.active.vertex_groups[self.gidx].name, offset=18)\n draw_prop(self, \"Select\", self.groups[self.gidx][\"select\"], offset=18, key=\"toggle individual S, toggle all A\")\n # self.offset += 10\n\n draw_end()\n\n def draw_VIEW3D(self, args):\n white = (1, 1, 1)\n green = (0, 1, 0)\n alpha = 1\n pointcolor = (*white, alpha)\n greencolor = (*green, 1)\n\n mx = self.active.matrix_world\n\n\n bgl.glEnable(bgl.GL_BLEND)\n\n # if self.xray:\n bgl.glDisable(bgl.GL_DEPTH_TEST)\n\n\n # draw the vgroups groups marked as select in green\n bgl.glColor4f(*greencolor)\n bgl.glPointSize(8)\n bgl.glBegin(bgl.GL_POINTS)\n\n for group in self.green:\n for v in self.green[group]:\n # bring the coordinates into world space\n vco = mx * v.co\n\n bgl.glVertex3f(*vco)\n\n bgl.glEnd()\n\n # draw the currently selected/highlightes group of verts in white\n bgl.glColor4f(*pointcolor)\n bgl.glPointSize(4)\n bgl.glBegin(bgl.GL_POINTS)\n\n for v in self.groups[self.gidx][\"verts\"]:\n # bring the coordinates into world space\n vco = mx * v.co\n\n bgl.glVertex3f(*vco)\n\n\n draw_end()\n\n def modal(self, context, event):\n context.area.tag_redraw()\n\n # update mouse postion for HUD\n if event.type == \"MOUSEMOVE\":\n self.mouse_x = event.mouse_region_x\n self.mouse_y = event.mouse_region_y\n\n\n if event.type in ['WHEELUPMOUSE', 'ONE', 'WHEELDOWNMOUSE', 'TWO']:\n\n # CHANGE vertex group selection\n\n if event.type in ['WHEELUPMOUSE', 'ONE'] and event.value == \"PRESS\":\n self.selidx += 1\n elif event.type in ['WHEELDOWNMOUSE', 'TWO'] and event.value == \"PRESS\":\n self.selidx -= 1\n\n # loop vertex groups\n if self.selidx > len(self.groups[\"list\"]) - 1:\n self.selidx = 0\n elif self.selidx < 0:\n self.selidx = len(self.groups[\"list\"]) - 1\n\n self.gidx = self.groups[\"list\"][self.selidx]\n\n # TOGGLE per cgroup select state\n\n if event.type in ['S'] and event.value == \"PRESS\":\n self.groups[self.gidx][\"select\"] = not self.groups[self.gidx][\"select\"]\n\n if self.groups[self.gidx][\"select\"]:\n self.green[self.gidx] = self.groups[self.gidx][\"verts\"]\n else:\n self.green[self.gidx] = []\n\n # TOGGLE/INVERT all vgroups selected/unsellected\n\n elif event.type in ['A'] and event.value == \"PRESS\":\n for gidx in self.green:\n if self.green[gidx]:\n self.green[gidx] = []\n self.groups[gidx][\"select\"] = False\n else:\n self.green[gidx] = self.groups[gidx][\"verts\"]\n self.groups[gidx][\"select\"] = True\n\n\n # VIEWPORT control\n\n elif event.type in {'MIDDLEMOUSE'}:\n return {'PASS_THROUGH'}\n\n # FINISH\n\n elif event.type in ['LEFTMOUSE', 'SPACE']:\n self.select_vgroup(self.active)\n\n bpy.types.SpaceView3D.draw_handler_remove(self.HUD, 'WINDOW')\n bpy.types.SpaceView3D.draw_handler_remove(self.VIEW3D, 'WINDOW')\n return {'FINISHED'}\n\n # CANCEL\n\n elif event.type in {'RIGHTMOUSE', 'ESC'}:\n bpy.types.SpaceView3D.draw_handler_remove(self.HUD, 'WINDOW')\n bpy.types.SpaceView3D.draw_handler_remove(self.VIEW3D, 'WINDOW')\n return {'CANCELLED'}\n\n return {'RUNNING_MODAL'}\n\n def invoke(self, context, event):\n self.active = m3.get_active()\n\n # mouse positions\n self.mouse_x = self.init_mouse_x = self.fixed_mouse_x = event.mouse_region_x\n self.mouse_y = self.init_mouse_y = self.fixed_mouse_y = event.mouse_region_y\n\n # get the selection, find the common vgroups, prepare self.groups dictionary\n try:\n self.ret = self.main(self.active)\n except:\n output_traceback(self)\n return {'FINISHED'}\n\n\n # if there's no vgroup, finish without doing anything\n if not self.ret:\n return {'FINISHED'}\n\n # if there's only one group, select an finish immedeately\n elif len(self.groups[\"list\"]) == 1:\n self.green[self.gidx] = self.groups[self.gidx][\"verts\"]\n self.select_vgroup(self.active)\n return {'FINISHED'}\n\n # if there are multiple vgroups, run the modal, to select which ones should be selected\n else:\n args = (self, context)\n self.HUD = bpy.types.SpaceView3D.draw_handler_add(self.draw_HUD, (args, ), 'WINDOW', 'POST_PIXEL')\n self.VIEW3D = bpy.types.SpaceView3D.draw_handler_add(self.draw_VIEW3D, (args, ), 'WINDOW', 'POST_VIEW')\n\n context.window_manager.modal_handler_add(self)\n return {'RUNNING_MODAL'}\n\n def execute(self, context):\n active = m3.get_active()\n\n try:\n self.main(active)\n except:\n output_traceback(self)\n\n return {'FINISHED'}\n\n def main(self, active):\n debug = False\n # debug = True\n\n if debug:\n m3.clear()\n\n # all vgroup indices\n all_vgroups = set(range(len(active.vertex_groups)))\n\n # create bmesh\n self.bm = bmesh.from_edit_mesh(active.data)\n self.bm.normal_update()\n self.bm.verts.ensure_lookup_table()\n\n groups = self.bm.verts.layers.deform.verify()\n\n verts = [v for v in self.bm.verts if v.select]\n\n # get common vgroup indices of the selected verts\n if verts:\n # vgroups = self.get_common_vgroups(verts, all_vgroups, groups, debug=debug)\n vgroups = self.get_selected_vgroups(verts, groups, debug=debug)\n\n # if nothing is selected use all vgroup indices\n else:\n vgroups = list(all_vgroups)\n\n if vgroups:\n # loop vertex groups\n if self.selidx > len(vgroups) - 1:\n self.selidx = 0\n elif self.selidx < 0:\n self.selidx = len(vgroups) - 1\n\n\n # create a dict with group idx keys, and verts + select bool as attributes\n # also add a simple group list used with the selidx prop\n self.groups = {\"list\": vgroups}\n\n # create a similar dict for groups marked as select in the modal\n self.green = {}\n\n for vg in vgroups:\n self.groups[vg] = {}\n self.groups[vg][\"verts\"] = []\n self.groups[vg][\"select\"] = False\n self.green[vg] = []\n\n self.gidx = self.groups[\"list\"][self.selidx]\n\n for v in self.bm.verts:\n for vg in vgroups:\n if vg in v[groups]:\n self.groups[vg][\"verts\"].append(v)\n\n\n # select\n # self.select_vgroup(active)\n\n return True\n else:\n return False\n\n def select_vgroup(self, active):\n for group in self.green:\n for v in self.green[group]:\n v.select = True\n\n self.bm.select_flush(True)\n\n bmesh.update_edit_mesh(active.data)\n\n\n def get_selected_vgroups(self, verts, deform_layer, debug=False):\n # update the set for each vert in the selection, only leaving the groups common to all of them\n selected = []\n for v in verts:\n selected.extend(v[deform_layer].keys())\n\n # make unique\n selected = list(set(selected))\n\n if debug:\n print(\" » selected vgroups:\", selected)\n\n return selected\n\n\n def get_common_vgroups(self, verts, vgroups, deform_layer, debug=False):\n if debug:\n print(\" » all vgroups:\", vgroups)\n\n # update the set for each vert in the selection, only leaving the groups common to all of them\n for v in verts:\n vgroups.intersection_update(set(v[deform_layer].keys()))\n\n if debug:\n print(\" » common vgroups:\", vgroups)\n\n return list(vgroups)\n","sub_path":"All_In_One/addons/MESHmachine/operators/vselect.py","file_name":"vselect.py","file_ext":"py","file_size_in_byte":9022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"282590839","text":"#! /usr/bin/env python\n'''\nCSC450 SP2020 Group 4 (Cory Jackson)\nMissouri State University\n\nLoads a saved model\nRuns a spectrogram through loaded model\n\nFUNCTIONAL REQUIREMENTS\nFR.01\nFR.04\nNFR.05\nNFR.06\nDC.03\n'''\n# ********************************** imports **********************************\n# general imports\nimport numpy as np\nimport os\nimport time\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# pytorch imports3\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom torchvision import transforms\nimport torch.optim as optim\n\n# ********************************** class run_model **********************************\n\nclass run_model:\n '''\n init function\n parameters:\n metrics (npy array): npy array that represents the mfcc of a WAV file\n optional parameters: (the weights do not need to be positive, sum to 1, or be below 1)\n anger_weight (float): float containing weight value for anger output, change per user customization, default of 0.75\n fear_weight (float): float containing weight value for fear output, change per user customization, default of 0.835\n happy_weight (float): float containing weight value for happy output, change per user customization, default of 0.8\n neutral_weight (float): float containing weight value for neutral output, change per user customization, default of 0.6\n sad_weight (float): float containing weight value for sad output, change per user customization, default of 0.75\n '''\n def __init__(self, metrics, anger_weight=0.75, fear_weight=0.835, happy_weight=0.8, neutral_weight=0.6, sad_weight=0.75):\n self.metrics = metrics.copy()\n self.anger_weight = anger_weight\n self.fear_weight = fear_weight\n self.happy_weight = happy_weight\n self.neutral_weight = neutral_weight\n self.sad_weight = sad_weight\n\n # sets the device to either the available GPU or system CPU\n self.device = torch.device(\"cpu\" if not (torch.cuda.is_available()) else \"cuda:0\")\n\n # transform sets the incoming number array into a tensor \n # normalizes the image based off RGB values \n # as per stated by ResNet's preferred input specifications\n self.transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n\n '''\n FR.01 EDGAR must classify the emotion of a speaker\n NFR.05 EDGAR must classify the emotion of the speaker in less than 3 seconds\n\n function load_model\n loads and evaluates model\n class variables:\n model (RecursiveScriptModule): instance of the model modelsavewhole1.py\n '''\n def load_model(self):\n self.model = torch.load(\"modelsavewhole1.pt\", map_location=self.device)\n self.model = self.model.to(self.device)\n\n self.model.eval() # transform passed numpy array into a tensor\n\n\n '''\n FR.01 EDGAR must classify the emotion of a speaker\n NFR.05 EDGAR must classify the emotion of the speaker in less than 3 seconds\n \n function transform_metrics\n transforms metrics into a tensor of the correct shape and format\n class variables:\n metrics (Tensor): metrics parameter transformed into a tensor\n '''\n def transform_metrics(self):\n self.metrics = self.transform(self.metrics)\n \n # reform tensor into correct shape, send to device, and data format\n self.metrics = self.metrics.unsqueeze(0)\n self.metrics = self.metrics.to(self.device)\n self.metrics = self.metrics.float()\n\n\n '''\n FR.01 EDGAR must classify the emotion of a speaker\n NFR.05 EDGAR must classify the emotion of the speaker in less than 3 seconds\n DC.03 EDGAR shall not use semantic context to identify emotion\n \n function run_model\n runs model on tensor\n class variables:\n output (Tensor): Tensor that represents output of model\n local variables:\n m (Softmax): modifies outputs to sum to 1\n s (Sigmoid): sigmoid that can be applied to output\n '''\n def run_model(self):\n self.output = self.model(self.metrics)\n\n # modify output based off Softmax to modify outputs to sum to 1\n m = nn.Softmax(dim=1)\n self.output = m(self.output)\n \n # apply sigmoid\n s = nn.Sigmoid()\n self.output = s(self.output)\n\n '''\n FR.01 EDGAR must classify the emotion of a speaker\n NFR.05 EDGAR must classify the emotion of the speaker in less than 3 seconds\n NFR.06 EDGAR must correctly identify emotion at least 75% of the time\n\n function fine_tune\n applies weights to output to generate a more accurate prediction\n class variables:\n weight (Tensor): tensor list of weights of each emotion to balance output\n '''\n def fine_tune(self):\n # original weights: 0.757352941, 0.990686275, 0.870588235, 0.624019608, 0.757352941\n # 'fine-tuned' weights: 0.75, 0.835, 0.80, 0.6, 0.75\n self.weight = torch.tensor([self.anger_weight, self.fear_weight, self.happy_weight, self.neutral_weight, self.sad_weight])\n self.weight = self.weight.to(self.device)\n self.output = (self.output*self.weight)\n\n\n '''\n FR.04 EDGAR must show classification to the user\n\n function print_output\n prints the weights of each emotion in an understandable way\n '''\n def print_output(self):\n print(\"\\n\" + \"\".join(\"WEIGHTS:\".center(50)))\n\n print(\"\".join(\"ANGER\".ljust(10)), \n \"\".join(\"FEAR\".ljust(10)), \n \"\".join(\"HAPPY\".ljust(10)), \n \"\".join(\"NEUTRAL\".ljust(10)), \n \"\".join(\"SAD\".ljust(10)))\n\n print(\"\".join(str(round(self.output[0][0].item(), 3)).ljust(10)), \n \"\".join(str(round(self.output[0][1].item(), 3)).ljust(10)), \n \"\".join(str(round(self.output[0][2].item(), 3)).ljust(10)), \n \"\".join(str(round(self.output[0][3].item(), 3)).ljust(10)), \n \"\".join(str(round(self.output[0][4].item(), 3)).ljust(10)))\n\n\n '''\n FR.01 EDGAR must classify the emotion of a speaker\n NFR.05 EDGAR must classify the emotion of the speaker in less than 3 seconds\n DC.03 EDGAR shall not use semantic context to identify emotion\n \n function get_prediction\n gets the prediction from the output\n returns:\n prediction (int): integer representation of largest (most common) value in output\n '''\n def get_prediction(self):\n # get predicted emotion\n self.prediction = int(torch.argmax(self.output)) \n \n return self.prediction\n\n\n# ********************************** main **********************************\nif __name__ == '__main__':\n print(\"main function of run_torch_model.py\")\n","sub_path":"dev_tools/functions/run_torch_model.py","file_name":"run_torch_model.py","file_ext":"py","file_size_in_byte":6872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"72406988","text":"#! /usr/bin/env python\r\n#coding=utf-8\r\n\r\nimport wx\r\n\r\nclass BoxSizerFrame(wx.Frame):\r\n\r\n def __init__(self, parent):\r\n\r\n wx.Frame.__init__(self, parent, title='BoxSizer4')\r\n\r\n sizer = wx.BoxSizer(wx.VERTICAL)\r\n # Second button is center-aligned\r\n sizer.Add(wx.Button(self, -1, \"An extremely long button text\"), 0, 0, 0)\r\n sizer.Add(wx.Button(self, -1, \"Small Button\"), 0, wx.ALIGN_CENTER, 0)\r\n sizer.SetSizeHints(self)\r\n self.SetSizer(sizer)\r\n\r\nif __name__ == '__main__':\r\n app = wx.App(0)\r\n frame = BoxSizerFrame(None)\r\n frame.Show()\r\n app.MainLoop()","sub_path":"wxpython-es/scripts/boxsizer42.py","file_name":"boxsizer42.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"584736004","text":"import scrapy\n\n\nclass ShopcluesSpider(scrapy.Spider):\n # name of the spider\n name = 'shopclues'\n\n # list of allowed domain\n allowed_domains = [\n 'https://www.shopclues.com/mobiles-feature-phones.html?facet_brand%5b%5d=Rocktel&fsrc=facet_brand']\n # starting_url\n start_urls = [\n 'https://www.shopclues.com/mobiles-feature-phones.html?facet_brand%5b%5d=Rocktel&fsrc=facet_brand']\n # location of csv file\n custom_settings = {\n 'FEED_FORMAT': 'csv', # Used for pipeline 2\n 'FEED_URI': 'quoteresult.csv' # Used for pipeline 2\n }\n\n def parse(self, response):\n # Extract product information\n titles = response.css('img::attr(title)').extract()\n prices = response.css('.p_price::text').extract()\n images = response.css('img::attr(data-img)').extract()\n discounts = response.css('.prd_discount::text').extract()\n\n for item in zip(titles, prices, images, discounts):\n scraped_info = {\n 'title': item[0],\n 'price': item[1],\n 'image_urls': item[2],\n 'discount': item[3]\n\n }\n yield scraped_info\n","sub_path":"Scrapy_Practice.py","file_name":"Scrapy_Practice.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"511732219","text":"from itertools import combinations\nfrom scipy.misc import comb\nfrom tqdm import tqdm\n\n\ndef brute_force(f, k, constraint):\n if constraint == 'eq':\n return brute_force_eq(f, k)\n elif constraint == 'leq':\n max_val = 0\n for i in range(k + 1):\n val = brute_force_eq(f, i)\n if val >= max_val:\n max_val = val\n return max_val\n\n\ndef brute_force_eq(f, k):\n max_val = 0\n for S in tqdm(combinations(f.universe(), k)):\n val = f.eval(S)\n if val >= max_val:\n max_val = val\n return max_val\n","sub_path":"algorithms/brute_force.py","file_name":"brute_force.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"427851198","text":"print('Введите список через пробел: ')\r\nn = input().split()\r\nmaxNow = 0\r\nmaximum = 0\r\nmost = n[0]\r\nfor i in n:\r\n maxNow = n.count(i)\r\n if maxNow > maximum:\r\n maximum = maxNow\r\n most = i\r\nprint(most)\r\n","sub_path":"lab13.py","file_name":"lab13.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"55367823","text":"class creatures:\n eyes = 2\n def __init__(self):\n self.klasse = \"mammal\"\n \n def live(self):\n self.eyes = 24\n\nclass dog(creatures):\n legs = 4\n name = \"Basko\"\n\n def __init__(self):\n creatures.__init__(self) # constructor für zugang Oberklassse\n \n def add_number(self,new_number):\n self.eyes = new_number\n \n def look_number(self,look_new_number):\n self.eyes = look_new_number\n self.live()\n \n\nvererbteInstanz = dog() # weil dog = creature\nprint(creatures().eyes)\nprint(creatures().klasse)\nprint(creatures().eyes)\nprint()\nprint(dog().legs)\nprint(dog().name)\nprint(dog().eyes) # von creature\nprint()\nprint(vererbteInstanz.eyes) # dog eyes = creature eyes = 2\nprint(vererbteInstanz.klasse) \nvererbteInstanz.add_number(42) # dog > add_number > new_number = 42\nprint(vererbteInstanz.eyes) # gebe 42 aus\nvererbteInstanz.look_number(42) # dog > look_number > self.eyes = look_new_number \nprint(vererbteInstanz.eyes) # dog > look_number > self.eyes = look_new_number > self.live() > self.eyes = 24","sub_path":"exercise/vererbung.py","file_name":"vererbung.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"514784858","text":"#!/usr/bin/python3\n\"\"\"Text Indentation Module\n\nThis module prints text with 2 new lines after each of these\ncharacters: ., ? and : . Otherwise raise a TypeError.\n\n\"\"\"\n\n\ndef text_indentation(text):\n \"\"\"\n Args:\n text (string): String to be split.\n\n Returns:\n string: Split text by delimeter. TypeError otherwise.\n \"\"\"\n if type(text) != str:\n raise TypeError(\"text must be a string\")\n for i in range(len(text)):\n if text[i] != '.' and text[i] != '?' and text[i] != ':':\n if text[i] == ' ' and text[i - 1] >= 'a' and text[i - 1] <= 'z':\n print(text[i], end=\"\")\n else:\n continue\n else:\n print(text[i], end=\"\\n\")\n print()\n","sub_path":"0x07-python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"483358553","text":"\"\"\"\nTable Definitions for database structure\n\n$Id: schema.py 122 2006-11-25 23:38:13Z hazmat $\n\"\"\"\n\nfrom sqlalchemy import *\nfrom ore.alchemist.metadata import ZopeBoundMetaData\nfrom db import database\n\n__all__ = ['rdb_schema', 'PersonTable', 'AddressTable']\n\nrdb_schema = ZopeBoundMetaData(database)\n\nAddressTable = Table(\n 'Addresses',\n rdb_schema,\n autoload = True\n )\n\n# incidentally if we autoload person table first sqlalchemy will autoload addrreesses becaues\n# of the fk relationship. so we could just use the below.. but we won't have an addressable\n# table, though its retrievable from the metadata.\n\nStateTable = Table(\n 'States',\n rdb_schema,\n autoload = True\n )\n\nPersonTable = Table(\n 'Persons',\n rdb_schema,\n autoload = True \n )\n\n\n","sub_path":"ore.alchemist/tags/examples-0.2-ui-prerefactor/orgpeople/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"619077040","text":"from boto.dynamodb2.fields import HashKey, RangeKey, GlobalAllIndex\nfrom boto.dynamodb2.table import Table\nfrom boto.dynamodb2.items import Item\nimport logging\nfrom passlib.apps import custom_app_context as pwd_context\nimport boto\nfrom boto.exception import JSONResponseError\nfrom boto.dynamodb2.layer1 import DynamoDBConnection\nimport time\n\ndef log_boto_error(e):\n logging.warn('Got exception: ' + e.__class__.__name__)\n logging.warn('\\tStatus: ' + str(e.status))\n logging.warn('\\tReason: ' + e.reason)\n logging.warn('\\tMessage: ' + str(e.body))\n\n\nclass DBConnection(object):\n def __init__(self, env):\n if env == 'development':\n self.conn = DynamoDBConnection(\n host='localhost',\n port=8000,\n aws_access_key_id='development',\n aws_secret_access_key='development',\n is_secure=False)\n elif env == 'test':\n self.conn = DynamoDBConnection(\n host='localhost',\n port=8000,\n aws_access_key_id='test',\n aws_secret_access_key='test',\n is_secure=False)\n elif env == 'production':\n self.conn = boto.connect_dynamodb()\n else:\n logging.error(\"Invalid environment\")\n self.conn = None\n\n\nclass DBTable(object):\n item_class = None\n simple_schema = [HashKey('_id')]\n\n def __init__(self, table_name, schema, global_indexes, dbconn):\n self.table_name = table_name\n self._table = Table(table_name, connection=dbconn.conn)\n self.schema = schema\n self.global_indexes = global_indexes\n self.dbconn = dbconn.conn\n return self\n\n def get_table(self):\n return self._table\n\n def exists(self):\n return self._table.table_name in self.dbconn.list_tables()['TableNames']\n\n def create(self):\n try:\n # print self.schema\n # print self.table_name\n self._table = Table.create(\n self.table_name,\n schema=self.schema,\n global_indexes=self.global_indexes,\n connection=self.dbconn)\n except JSONResponseError as e:\n logging.warn(\"Error creating table \" + self.table_name)\n log_boto_error(e)\n if 'message' in e.body and e.body['message'].startswith('Table already exists'):\n self._table = Table(self.table_name, connection=self.dbconn)\n elif 'Message' in e.body and e.body['Message'].startswith(\"Cannot create preexisting\"):\n self._table = Table(self.table_name, connection=self.dbconn)\n else:\n raise e\n return self\n\n def delete(self):\n try:\n self._table.delete()\n except JSONResponseError as e:\n logging.warn(\"Error deleting table \" + self.table_name)\n log_boto_error(e)\n if e.body['Message'].startswith('Cannot do operations on a non-existent tabl'):\n return\n else:\n raise e\n return\n\n def insert(self, data):\n self._table.put_item(data=data, overwrite=True)\n return\n\n def get_item(self, **kwargs):\n item = self._table.get_item(**kwargs)\n # TODO return item or class??\n # new_item = self.item_class(self)\n # new_item.__dict__ = item._data\n # return new_item\n return item\n\n def query_2(self, **kwargs):\n return self._table.query_2(**kwargs)\n\n def scan(self, **kwargs):\n return self._table.scan(**kwargs)\n\n def remove(self):\n return self._table.remove()\n\n\nclass DBItem(object):\n def __init__(self, table):\n self._table = table\n self._item = Item(table.get_table(), data=self.get_attrs())\n\n def get_attrs(self):\n item_list = {}\n for key, value in self.__dict__.iteritems():\n if not key.startswith('_'):\n item_list[key] = value\n return item_list\n\n def save(self, **kwargs):\n self._item.save(**kwargs)\n\n def insert(self, **kwargs):\n return self._table.put_item(data=self._item._data)\n\n def delete(self):\n return self._item.delete()\n\nclass User(DBItem):\n def __init__(\n self, table, email=None, password=None,\n first_name=None, last_name=None, password_hash=None):\n\n if password is not None:\n self.password_hash = User.hash_password(password)\n else:\n self.password_hash = password_hash\n self.first_name = first_name\n self.last_name = last_name\n self.email = email\n self._table = table\n super(User, self).__init__(self._table)\n\n @staticmethod\n def hash_password(password):\n return pwd_context.encrypt(password)\n\n def verify_password(self, password):\n return pwd_context.verify(password, self.password_hash)\n\n\nclass UserCollection(DBTable):\n table_name = 'users'\n item_class = User\n schema = [HashKey('email')]\n\n def __init__(self, dbconn):\n super(UserCollection, self).__init__(\n UserCollection.table_name, UserCollection.schema, None, dbconn)\n\n\nclass Host(DBItem):\n def __init__(self, table, host_name=None):\n self.host_name = host_name\n self._table = table\n super(Host, self).__init__(self._table)\n\n\nclass HostCollection(DBTable):\n TABLE_NAME = 'host'\n item_class = Host\n schema = [HashKey('id')]\n\n def __init__(self, table_name=TABLE_NAME):\n self.table_name = table_name\n super(HostCollection, self).__init__()\n\n\nclass AthleteCollection(DBTable):\n table_name = 'athletes'\n global_indexes = [GlobalAllIndex(\n 'OrgIndex', parts=[HashKey('organization')])]\n\n def __init__(self, dbconn):\n super(AthleteCollection, self).__init__(\n AthleteCollection.table_name,\n DBTable.simple_schema,\n AthleteCollection.global_indexes,\n dbconn)\n\n\nclass OrganizationCollection(DBTable):\n table_name = 'organizations'\n global_indexes = [GlobalAllIndex('NameIndex', parts=[HashKey('name')])]\n\n def __init__(self, dbconn):\n super(OrganizationCollection, self).__init__(\n OrganizationCollection.table_name,\n DBTable.simple_schema,\n OrganizationCollection.global_indexes,\n dbconn)\n\n\nclass EventCollection(DBTable):\n table_name = 'events'\n global_indexes = [GlobalAllIndex('event-index', parts=[HashKey('event')])]\n\n def __init__(self, dbconn):\n super(EventCollection, self).__init__(\n EventCollection.table_name, DBTable.simple_schema, None, dbconn)\n self.schema = DBTable.simple_schema\n\n\nclass CrewCollection(DBTable):\n table_name = 'crew'\n global_indexes = [GlobalAllIndex('event-index', parts=[HashKey('event')])]\n\n def __init__(self, dbconn):\n super(CrewCollection, self).__init__(\n CrewCollection.table_name,\n DBTable.simple_schema,\n CrewCollection.global_indexes,\n dbconn)\n\n\nclass StageCollection(DBTable):\n table_name = 'stage'\n global_indexes = [GlobalAllIndex('event-index', parts=[HashKey('event')])]\n\n def __init__(self, dbconn):\n super(StageCollection, self).__init__(\n StageCollection.table_name,\n DBTable.simple_schema,\n StageCollection.global_indexes,\n dbconn)\n\n\nclass RaceCollection(DBTable):\n table_name = 'race'\n global_indexes = [GlobalAllIndex('event-index', parts=[HashKey('event')])]\n\n def __init__(self, dbconn):\n super(RaceCollection, self).__init__(\n RaceCollection.table_name,\n DBTable.simple_schema,\n RaceCollection.global_indexes,\n dbconn)\n\n\nclass Race(DBItem):\n def __init__(self, collection):\n super(Race, self).__init__(collection._table)\n\n\nclass RacingCrewCollection(DBTable):\n table_name = 'racing_crew'\n item_class = Race\n global_indexes = [GlobalAllIndex('race-index', parts=[HashKey('race')]),\n GlobalAllIndex('event-index', parts=[HashKey('event')])]\n\n def __init__(self, dbconn):\n super(RacingCrewCollection, self).__init__(\n RacingCrewCollection.table_name,\n DBTable.simple_schema,\n RacingCrewCollection.global_indexes,\n dbconn)\n\nclass RegattaCollection(DBTable):\n table_name = 'regatta'\n #item_class = Regatta\n global_indexes = [GlobalAllIndex('name-index', parts=[HashKey('name')])]\n\n def __init__(self, dbconn):\n super(RegattaCollection, self).__init__(\n RegattaCollection.table_name,\n DBTable.simple_schema,\n RegattaCollection.global_indexes,\n dbconn)\n\n\nclass Audit(DBTable):\n table_name = 'audit'\n schema = [HashKey('timeStamp'), RangeKey('user')]\n global_indexes = [GlobalAllIndex('race-index', parts=[HashKey('user')])]\n\n def __init__(self, dbconn):\n self.logger = logging.getLogger('audit')\n\n super(Audit, self).__init__(\n Audit.table_name,\n Audit.schema,\n Audit.global_indexes, dbconn)\n\n def info(self, message):\n self.logger.info(message)\n self.insert(data={\n 'message': message,\n 'timestamp': time.time()})\n","sub_path":"util/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"489963900","text":"\nimport sys\nsys.path.append(\"./././\")\n\n# Import lib\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport os\nimport pickle\nimport random\nfrom tqdm import tqdm\nimport glob\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa\n\nfrom sklearn import preprocessing\n\n# Import my own lib\nimport others.utilities as my_util\nfrom algorithms.paired_distance_alg import paired_distance_alg\n\n#############################################################################################\n\ngpu_id = 0\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id)\n# Clear GPU cache\ntf.keras.backend.clear_session()\ngpus = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(gpus[0], True)\n\ndataset_name = 'Diveface'\nexp = 'inno'\nexp_name = exp + '_alg_tl' # exp_7_alg_tl exp_9_alg_tl\ndataset_exacted = 'resnet50' # vgg16 resnet50 retinaface\nexp_name = exp_name + dataset_exacted\nexp_name_suffix = '_b_300_e_50_a_1' # 30 60 90 120 150 180 210 240 270 300 330 360\n\n# eval_set = ['training', 'valid'] # training valid test\neval_set = ['test']\n\n# epoch = range(0,101)\n\nrandom_seed = 0\ntest_size = 0.2\nvalid_size = 0\n\nmodel_feature_size = 512\n\n#############################################################################################\n\n# Path\n# Dataset path\ndataset_path = my_util.get_path(additional_path=['.', '.', 'mount', 'FaceRecognitionPython_data_store', 'Dataset', 'Diveface'])\n# Summary path\nsummary_path = my_util.get_path(additional_path=['.', '.', 'mount', 'FaceRecognitionPython_data_store', 'Result', 'summary', exp, exp_name + exp_name_suffix + '_run_' + str(random_seed)])\nmy_util.make_directory(summary_path)\n\n#############################################################################################\n\n# Load data\nmy_data = pd.read_csv((dataset_path + dataset_name + '_' + dataset_exacted + '_nonorm.txt'), sep=\" \", header=0)\n# Label\nclass_data = my_data.id.values\nid_data = my_data.data_id.values\nx_data = my_data.iloc[:,8:].values\ny_gender_data = my_data['gender'].values\ny_race_data = (my_data['gender'] + '-' + my_data['ethnicity']).values\n# Separate data\n[training_sep_idx, test_sep_idx, valid_sep_idx] = my_util.split_data_by_id_and_classes(my_data.id.values, y_race_data, test_size=test_size, valid_size=valid_size, random_state=random_seed)\n# Randomly exclude to be equal to number of training samples in race classes\n# exclude_perc = np.round(training_sep_idx.size/6/3/6)/np.round(training_sep_idx.size/6/3/2)\n# [_, tmp_test_sep_idx, _] = my_util.split_data_by_id_and_classes(my_data.id.values[training_sep_idx], (my_data['gender'].iloc[training_sep_idx] + '-' + my_data['ethnicity'].iloc[training_sep_idx]).values, test_size=exclude_perc, valid_size=0, random_state=random_seed)\n# training_sep_idx = training_sep_idx[tmp_test_sep_idx]\ndata_sep_idx = {'training': training_sep_idx, 'valid':valid_sep_idx, 'test':test_sep_idx}\n# del exclude_perc, tmp_test_sep_idx\ndel training_sep_idx, test_sep_idx, valid_sep_idx\ndel my_data\n\n#############################################################################################\n\n# Function\ndef preprocess_data(_model, tmp_x_data):\n _tmp_x_data = _model.predict(tmp_x_data)\n _tmp_x_data = preprocessing.normalize(_tmp_x_data, norm='l2', axis=1, copy=True, return_norm=False)\n return _tmp_x_data\n\ndef eval_perf(_combined_id, _combined_x, _combined_y):\n [predictedScores, predictedY, test_time] = distance_model.predict(_combined_x[:,0:model_feature_size], _combined_x[:,model_feature_size:], _combined_y, unique_class, 1, distance_metric='euclidean')\n _tmp_performance_metric = my_util.biometric_metric(_combined_y, predictedScores, 'POS', score_order='ascending')\n del _tmp_performance_metric['threshold'], _tmp_performance_metric['fmr'], _tmp_performance_metric['fnmr']\n return _tmp_performance_metric\n\n#############################################################################################\n\n# Init\ngender_class = np.array(['female', 'male'])\nrace_class = np.array(['female-asian', 'female-black', 'female-caucasian', 'male-asian', 'male-black', 'male-caucasian'])\n# uniqued_class = np.unique(y_race_data)\ngender_in_race_class = pd.DataFrame(race_class)[0].str.split('-')\ndistance_model = paired_distance_alg()\nunique_class = {'pos':'POS', 'neg':'NEG'}\nchkp_fn = my_util.get_path(additional_path=['.', '.', 'mount', 'FaceRecognitionPython_data_store', 'Result', 'gridsearch', exp, exp_name + exp_name_suffix + '_run_' + str(random_seed)])\nchkp_fn = glob.glob(chkp_fn + 'cp-????.ckpt.index')\nchkp_fn = [os.path.basename(name) for name in chkp_fn ]\nchkp_fn = sorted(chkp_fn)\n\n# Pair triplet\n# Race\ntriplet_paired_list = {}\nfor eval_set_idx in eval_set:\n triplet_paired_list[eval_set_idx] = my_util.triplet_loss_paring(id_data[data_sep_idx[eval_set_idx]], class_data[data_sep_idx[eval_set_idx]], randomseed=random_seed)\n\n# Initial triplets network model\nmodel_path = my_util.get_path(additional_path=['.', '.', 'mount', 'FaceRecognitionPython_data_store', 'Result', 'gridsearch', exp, exp_name + exp_name_suffix + '_run_' + str(random_seed)])\nproposed_model = tf.keras.models.Sequential()\nproposed_model.add(tf.keras.layers.Dense(1024, input_dim=2048, activation='linear'))\nproposed_model.add(tf.keras.layers.Dense(512, activation=None))\nproposed_model.add(tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1)))\nproposed_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss=tfa.losses.TripletSemiHardLoss())\n\n# Evaluate\nfor eval_set_idx in eval_set:\n print('eval set: ' + eval_set_idx)\n performance_metric = {}\n performance_metric[eval_set_idx] = {}\n for epoch_val in tqdm(chkp_fn):\n epoch_idx = np.int(epoch_val[3:7])\n tmp_epoch_idx = str(epoch_idx).zfill(4)\n performance_metric[eval_set_idx][epoch_idx] = {}\n \n proposed_model.load_weights(model_path + 'cp-' + tmp_epoch_idx + '.ckpt')\n tmp_class_data = class_data[data_sep_idx[eval_set_idx]]\n tmp_id_data = id_data[data_sep_idx[eval_set_idx]]\n tmp_x_data = x_data[data_sep_idx[eval_set_idx]]\n tmp_y_data = y_gender_data[data_sep_idx[eval_set_idx]]\n tmp_x_data = preprocess_data(proposed_model, tmp_x_data)\n eval_x_data, eval_y_data, eval_id_data = my_util.combination_rule_paired_list(tmp_x_data, tmp_id_data, triplet_paired_list[eval_set_idx], combine_rule='concatenate')\n del tmp_class_data, tmp_id_data, tmp_x_data, tmp_y_data\n\n # Evaluate overall\n performance_metric[eval_set_idx][epoch_idx] = eval_perf(eval_id_data, eval_x_data, eval_y_data)\n \n print('AUC: ' + str(performance_metric[eval_set_idx][epoch_idx]['auc']) + ' - ' + str(epoch_idx))\n print('TAR@0d01: ' + str(performance_metric[eval_set_idx][epoch_idx]['tar_0d01']) + ' - ' + str(epoch_idx))\n\n # Save\n pickle_write = open((summary_path + exp_name + exp_name_suffix + '_run_' + str(random_seed) + '(' + eval_set_idx + ').pickle'), 'wb')\n pickle.dump(performance_metric, pickle_write)\n pickle_write.close()\n del pickle_write, performance_metric\n print('Saved')\n\n\n\nprint()\n","sub_path":"run_experiments/inno/eval_perf.py","file_name":"eval_perf.py","file_ext":"py","file_size_in_byte":7087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"429835972","text":"DEBUG = True\nSECRET_KEY = 'no'\nSITE_TITLE = 'lobbytransparency.eu'\nREGISTRATION = False\nSQLALCHEMY_DATABASE_URI = 'sqlite:///grano.db'\nSQLALCHEMY_DATABASE_URI = 'postgresql://localhost/grano_lobby'\n\n\n# HACK HACK - Need a proper mechanism for this.\nSTORED_QUERIES = {\n 'test': {\n 'label': 'Test Query',\n 'query': 'SELECT * FROM entity_actor'\n }\n }\n","sub_path":"grano/default_settings.py","file_name":"default_settings.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"635216968","text":"from __future__ import print_function\nimport evo.tools.file_interface as file_interface\nimport evo.core.sync as sync\nimport evo.core.trajectory as trajectory\nimport evo.core.metrics as metrics\nimport evo.core.filters as filters\nimport os\nimport sys\nimport json\nfrom collections import OrderedDict\nimport numpy as np\n\n\ndef write(f, s):\n print(s)\n f.write(s + \"\\n\")\n\n\ngt_path = os.path.abspath(sys.argv[1])\nest_path = os.path.abspath(sys.argv[2])\ndump_dir_path = os.path.abspath(sys.argv[3])\nlog_path = os.path.abspath(sys.argv[4])\njson_data = OrderedDict()\n\ndump_json_file = open(os.path.join(dump_dir_path, \"stats.json\"), \"w\")\nlog_file = open(log_path, \"a\")\n\nwrite(log_file, \"================ evo_traj_full_eval =================\")\nwrite(log_file, \"gt_path \" + gt_path)\nwrite(log_file, \"est_path \" + est_path)\nwrite(log_file, \"dump_dir_path \" + dump_dir_path)\nwrite(log_file, \"log_path \" + log_path)\n\ngt_traj = file_interface.read_tum_trajectory_file(gt_path)\nest_traj = file_interface.read_tum_trajectory_file(est_path)\n\ntraj_gt_synced, traj_est_synced = sync.associate_trajectories(gt_traj, est_traj, max_diff=0.01)\ntraj_est_aligned = trajectory.align_trajectory(traj_est_synced, traj_gt_synced, correct_scale=False,\n correct_only_scale=False)\n\n# ape translation only metric\nmetric = metrics.APE(metrics.PoseRelation.translation_part)\nmetric.process_data((traj_gt_synced, traj_est_aligned,))\nstat = metric.get_statistic(metrics.StatisticsType.rmse)\nwrite(log_file, \"ape_trans: %.5f\" % stat)\njson_data[\"ape_trans\"] = stat\n\n# ape rotation metric\nmetric = metrics.APE(metrics.PoseRelation.rotation_angle_deg)\nmetric.process_data((traj_gt_synced, traj_est_aligned,))\nstat = metric.get_statistic(metrics.StatisticsType.rmse)\nwrite(log_file, \"ape_rot_deg: %.5f\" % stat)\njson_data[\"ape_rot_deg\"] = stat\n\n# rpe metrics at different length\nlengths = [8, 16, 24, 32, 40, 48]\nfor length in lengths:\n trans_metric = metrics.RPE(metrics.PoseRelation.translation_part,\n delta=length, delta_unit=metrics.Unit.meters, all_pairs=True)\n rot_metric = metrics.RPE(metrics.PoseRelation.rotation_angle_deg,\n delta=length, delta_unit=metrics.Unit.meters, all_pairs=True)\n\n try:\n trans_metric.process_data((traj_gt_synced, traj_est_aligned,))\n rot_metric.process_data((traj_gt_synced, traj_est_aligned,))\n trans_stat = trans_metric.get_statistic(metrics.StatisticsType.rmse)\n rot_stat = rot_metric.get_statistic(metrics.StatisticsType.rmse)\n\n trans_metric_errors = trans_metric.error\n rot_metric_errors = rot_metric.error\n except filters.FilterException:\n trans_stat = np.nan\n rot_stat = np.nan\n trans_metric_errors = []\n rot_metric_errors = []\n\n write(log_file, \"rpe_%dm_trans: %.5f\" % (length, trans_stat))\n write(log_file, \"rpe_%dm_rot: %.5f\" % (length, rot_stat))\n json_data[\"rpe_%dm_trans\" % length] = trans_stat\n json_data[\"rpe_%dm_rot\" % length] = rot_stat\n\n np.savetxt(os.path.join(dump_dir_path, \"rpe_%dm_trans.txt\" % length), trans_metric_errors, fmt='%.10f', )\n np.savetxt(os.path.join(dump_dir_path, \"rpe_%dm_rot.txt\" % length), rot_metric_errors, fmt='%.10f', )\n\njson.dump(json_data, dump_json_file)\ndump_json_file.close()\nlog_file.close()\n","sub_path":"scripts/evo_traj_full_eval.py","file_name":"evo_traj_full_eval.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"558933758","text":"from io import TextIOBase\nimport sys\nimport inspect\nimport argparse\n\n\nclass OutputWrapper(TextIOBase):\n \"\"\"\n Wrapper around stdout/stderr\n \"\"\"\n\n @property\n def style_func(self):\n return self._style_func\n\n @style_func.setter\n def style_func(self, style_func):\n if style_func and self.isatty():\n self._style_func = style_func\n else:\n self._style_func = lambda x: x\n\n def __init__(self, out, style_func=None, ending='\\n'):\n self._out = out\n self.style_func = None\n self.ending = ending\n\n def __getattr__(self, name):\n return getattr(self._out, name)\n\n def isatty(self):\n return hasattr(self._out, 'isatty') and self._out.isatty()\n\n def write(self, msg, style_func=None, ending=None):\n ending = self.ending if ending is None else ending\n if ending and not msg.endswith(ending):\n msg += ending\n style_func = style_func or self.style_func\n self._out.write(style_func(msg))\n\n\nclass InvalidCommand(Exception):\n \"\"\"\n This is a generic error for \"bad\" commands.\n It is not used in Flask-Script itself, but you should throw\n this error (or one derived from it) in your command handlers,\n and your main code should display this error's message without\n a stack trace.\n\n This way, we maintain interoperability if some other plug-in code\n supplies Flask-Script hooks.\n \"\"\"\n\n\nclass Group:\n \"\"\"\n Stores argument groups and mutually exclusive groups for\n `ArgumentParser.add_argument_group\n `\n or `ArgumentParser.add_mutually_exclusive_group\n `.\n\n Note: The title and description params cannot be used with the exclusive\n or required params.\n\n :param options: A list of Option classes to add to this group\n :param title: A string to use as the title of the argument group\n :param description: A string to use as the description of the argument group\n :param exclusive: A boolean indicating if this is an argument group or a\n mutually exclusive group\n :param required: A boolean indicating if this mutually exclusive group\n must have an option selected\n \"\"\"\n\n def __init__(self, *options, **kwargs):\n self.option_list = options\n\n self.title = kwargs.pop(\"title\", None)\n self.description = kwargs.pop(\"description\", None)\n self.exclusive = kwargs.pop(\"exclusive\", None)\n self.required = kwargs.pop(\"required\", None)\n\n if ((self.title or self.description) and\n (self.required or self.exclusive)):\n raise TypeError(\"title and/or description cannot be used with \"\n \"required and/or exclusive.\")\n\n super(Group, self).__init__(**kwargs)\n\n def get_options(self):\n \"\"\"\n By default, returns self.option_list. Override if you\n need to do instance-specific configuration.\n \"\"\"\n return self.option_list\n\n\nclass Option:\n \"\"\"\n Stores positional and optional arguments for `ArgumentParser.add_argument\n `_.\n\n :param name_or_flags: Either a name or a list of option strings,\n e.g. foo or -f, --foo\n :param action: The basic type of action to be taken when this argument\n is encountered at the command-line.\n :param nargs: The number of command-line arguments that should be consumed.\n :param const: A constant value required by some action and nargs selections.\n :param default: The value produced if the argument is absent from\n the command-line.\n :param type: The type to which the command-line arg should be converted.\n :param choices: A container of the allowable values for the argument.\n :param required: Whether or not the command-line option may be omitted\n (optionals only).\n :param help: A brief description of what the argument does.\n :param metavar: A name for the argument in usage messages.\n :param dest: The name of the attribute to be added to the object\n returned by parse_args().\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n\nclass Command:\n \"\"\"\n Base class for creating commands.\n :param func: Initialize this command by introspecting the function.\n \"\"\"\n\n option_list = ()\n help_args = None\n\n def __init__(self, func=None, stdout=None, stderr=None):\n self.parser = None\n self.parent = None\n\n self.stdout = OutputWrapper(stdout or sys.stdout)\n self.stderr = OutputWrapper(stderr or sys.stderr)\n\n if func is None:\n if not self.option_list:\n self.option_list = []\n return\n\n # args, varargs, keywords, defaults = inspect.getargspec(func)\n args, varargs, keywords, defaults, *_ = inspect.getfullargspec(func)\n if inspect.ismethod(func):\n args = args[1:]\n\n options = []\n\n # first arg is always \"app\" : ignore\n\n defaults = defaults or []\n kwargs = dict(zip(*[reversed(l) for l in (args, defaults)]))\n\n for arg in args:\n if arg in kwargs:\n default = kwargs[arg]\n if isinstance(default, bool):\n options.append(Option('-%s' % arg[0],\n '--%s' % arg,\n action=\"store_true\",\n dest=arg,\n required=False,\n default=default))\n else:\n options.append(Option('-%s' % arg[0],\n '--%s' % arg,\n dest=arg,\n type=str,\n required=False,\n default=default))\n else:\n options.append(Option(arg, type=str))\n\n self.run = func\n self.__doc__ = func.__doc__\n self.option_list = options\n\n @property\n def description(self):\n description = self.__doc__ or ''\n return description.strip()\n\n def add_option(self, option):\n \"\"\"\n Adds Option to option list.\n \"\"\"\n self.option_list.append(option)\n\n def get_options(self):\n \"\"\"\n By default, returns self.option_list. Override if you\n need to do instance-specific configuration.\n \"\"\"\n return self.option_list\n\n def create_parser(self, *args, **kwargs):\n func_stack = kwargs.pop('func_stack', ())\n parent = kwargs.pop('parent', None)\n parser = argparse.ArgumentParser(*args, add_help=False, **kwargs)\n help_args = self.help_args\n\n while help_args is None and parent is not None:\n help_args = parent.help_args\n parent = getattr(parent, 'parent', None)\n\n if help_args:\n from anthill.framework.core.management import add_help\n add_help(parser, help_args)\n\n for option in self.get_options():\n if isinstance(option, Group):\n if option.exclusive:\n group = parser.add_mutually_exclusive_group(required=option.required)\n else:\n group = parser.add_argument_group(title=option.title, description=option.description)\n for opt in option.get_options():\n group.add_argument(*opt.args, **opt.kwargs)\n else:\n parser.add_argument(*option.args, **option.kwargs)\n\n parser.set_defaults(func_stack=func_stack + (self,))\n\n self.parser = parser\n self.parent = parent\n\n return parser\n\n def __call__(self, app=None, *args, **kwargs):\n \"\"\"\n Handles the command with the given app.\n Default behaviour is to call ``self.run`` within a test request context.\n \"\"\"\n return self.run(*args, **kwargs)\n\n def run(self, *args, **kwargs):\n \"\"\"\n Runs a command. This must be implemented by the subclass. Should take\n arguments as configured by the Command options.\n \"\"\"\n raise NotImplementedError\n","sub_path":"core/management/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"578122420","text":"import dash\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\n\napp = dash.Dash()\n\napp.scripts.config.serve_locally = True\n\nvertical = True\n\napp.layout = html.Div([\n html.Div(\n dcc.Tabs(\n tabs=[\n {'label': 'Market Value', 'value': 1},\n {'label': 'Usage Over Time', 'value': 2},\n {'label': 'Predictions', 'value': 3},\n {'label': 'Target Pricing', 'value': 4},\n ],\n value=3,\n id='tabs',\n vertical=vertical,\n style={\n 'height': '100vh',\n 'borderRight': 'thin lightgrey solid',\n 'textAlign': 'left'\n }\n ),\n style={'width': '30%', 'float': 'left'}\n ),\n html.Div(\n html.Div(id='tab-output'),\n style={'width': '70%', 'float': 'right'}\n )\n], style={\n 'fontFamily': 'Sans-Serif',\n 'margin-left': 'auto',\n 'margin-right': 'auto',\n})\n\n\n@app.callback(Output('tab-output', 'children'), [Input('tabs', 'value')])\ndef display_content(value):\n data = [\n {\n 'x': [1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,\n 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012],\n 'y': [219, 146, 112, 127, 124, 180, 236, 207, 236, 263,\n 350, 430, 474, 526, 488, 537, 500, 439],\n 'name': 'Rest of world',\n 'marker': {\n 'color': 'rgb(55, 83, 109)'\n },\n 'type': ['bar', 'scatter', 'box'][int(value) % 3]\n },\n {\n 'x': [1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,\n 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012],\n 'y': [16, 13, 10, 11, 28, 37, 43, 55, 56, 88, 105, 156, 270,\n 299, 340, 403, 549, 499],\n 'name': 'China',\n 'marker': {\n 'color': 'rgb(26, 118, 255)'\n },\n 'type': ['bar', 'scatter', 'box'][int(value) % 3]\n }\n ]\n\n return html.Div([\n dcc.Graph(\n id='graph',\n figure={\n 'data': data,\n 'layout': {\n 'margin': {\n 'l': 30,\n 'r': 0,\n 'b': 30,\n 't': 0\n },\n 'legend': {'x': 0, 'y': 1}\n }\n }\n ),\n ])\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, host='0.0.0.0', port=5000)","sub_path":"dashboard/tabs.py","file_name":"tabs.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"50590805","text":"\"\"\"\n一、应用工厂:\n创建 flaskr文件夹并且文件夹内添加 __init__.py 文件。\n__init__.py 有两个作用:一是包含应用工厂;\n二是告诉Python flaskr文件夹应当视作为一个包。\n\n\"\"\"\n\nimport os\n\nfrom flask import Flask\n\n\ndef create_app(test_config=None):\n \"\"\"\n 创建一个工厂函数:模块化管理(配置、导入注册等)\n :param test_config:\n :return:\n \"\"\"\n # create and configure the app\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_mapping(\n SECRET_KEY='dev',\n DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),\n )\n\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile('config.py', silent=True)\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # a simple page that says hello\n @app.route('/hello')\n def hello():\n return 'Hello, World!'\n\n # 导入数据初始化命令\n from . import db\n\n db.init_app(app)\n\n # 导入注册认证蓝图:auth.bp\n from . import auth\n app.register_blueprint(auth.bp)\n\n # 导入注册博客蓝图:blog.bp\n\n from . import blog\n app.register_blueprint(blog.bp)\n\n\n # 导入markdown蓝图:mark.bp\n from . import mark\n app.register_blueprint(mark.bp)\n\n # 导入imag蓝图:mark.bp\n from . import imag\n app.register_blueprint(imag.bp)\n\n #app.add_url_rule('/', endpoint='index')\n\n\n return app\n\n","sub_path":"flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"26086123","text":"from util import *\n\n\n@apply\ndef apply(given, pivot=0):\n [*conds] = given.of(Or)\n\n if isinstance(pivot, tuple):\n eq = []\n for i in sorted(pivot, reverse=True):\n eq.append(conds.pop(i))\n eq = Or(*eq)\n else:\n eq = conds.pop(pivot)\n\n cond = eq.invert()\n\n return Infer(cond, given.func(*conds))\n\n\n@prove\ndef prove(Eq):\n from axiom import algebra\n\n n = Symbol(integer=True, positive=True)\n x, y = Symbol(complex=True, shape=(n,))\n f, g = Function(complex=True, shape=())\n Eq << apply(Unequal(f(x), 1) | Unequal(g(x), 1) | Equal(x, y), pivot=(0, 1))\n\n Eq << Eq[1].apply(algebra.infer.given.ou)\n\n\nif __name__ == '__main__':\n run()\n\n# created on 2018-03-21\n","sub_path":"axiom/algebra/ou/imply/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"151025287","text":"# Задание №2: Закодируйте любую строку из трех слов по алгоритму Хаффмана.\r\n\r\nfrom collections import deque as dq\r\n\r\nprint('Здравствуйте! Вас приветствует программа для кодировки любой строки из трёх '\r\n 'слов по алгоритму Хаффмана')\r\n\r\n\r\nclass UsersNode:\r\n def __init__(self, value, users_letter=None, left=None, right=None):\r\n self.value = value\r\n self.users_letter = users_letter\r\n self.right = right\r\n self.left = left\r\n\r\n\r\ndef search_users(node, path='',):\r\n if node.users_letter is not None:\r\n node.value = 0\r\n return node.users_letter, path\r\n if node.right is not None and node.right.value != 0:\r\n error_our = search_users(node.right, path=f'{path}1')\r\n if node.right.value == 0 and node.left.value == 0:\r\n spamnode.value = 0\r\n return error_our\r\n if node.left is not None and node.left.value != 0:\r\n error_our = search_users(node.left, path=f'{path}0')\r\n if node.right.value == 0 and node.left.value == 0:\r\n node.value = 0\r\n return error_our\r\n\r\n\r\ns = input('Введите строку для кодирования в двоичным коде на руском языке: \\n')\r\n\r\n\r\nusers_d = {}\r\nfor y in s:\r\n if y not in users_d :\r\n users_d [y] = 1\r\n else:\r\n users_d [y] += 1\r\n\r\nnode_list = dq([UsersNode(users_d [i], i) for i in users_d])\r\n\r\nfor i in range(len(users_d )-1):\r\n node_list = dq(sorted(node_list, key=lambda node: node.value))\r\n first_el = node_list.popleft()\r\n second_el = node_list.popleft()\r\n new_node = UsersNode(first_el.value + second_el.value, left=first_el, right=second_el)\r\n node_list.appendleft(new_node)\r\ntree_users = node_list[0]\r\n\r\n\r\nusers_l = {}\r\nfor _ in range(len(users_d )):\r\n k = search_users(tree_users)\r\n users_l[k[0]] = k[1]\r\ndel tree_users\r\n\r\nprint(f'Введённая Вами строка из трёх букв :\\n{s}')\r\n\r\nprint('Ваша строка в двоичном коде: ')\r\nfor p in s:\r\n print(users_l[p], end=' ')\r\n\r\nprint()\r\n\r\nprint('Спасибо, что воспользовались нашей программой! ')\r\n","sub_path":"task_2_dubinin_v_h_v_8.py","file_name":"task_2_dubinin_v_h_v_8.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"49100789","text":"# -*- coding: utf-8 -*-\n# pylint: disable=invalid-name, too-many-arguments, too-many-branches,\n# pylint: disable=too-many-locals, too-many-instance-attributes, too-many-lines\n\n\"\"\"\nThis module implements the linear Kalman filter in both an object\noriented and procedural form. The KalmanFilter class implements\nthe filter by storing the various matrices in instance variables,\nminimizing the amount of bookkeeping you have to do.\n\nAll Kalman filters operate with a predict->update cycle. The\npredict step, implemented with the method or function predict(),\nuses the state transition matrix F to predict the state in the next\ntime period (epoch). The state is stored as a gaussian (x, P), where\nx is the state (column) vector, and P is its covariance. Covariance\nmatrix Q specifies the process covariance. In Bayesian terms, this\nprediction is called the *prior*, which you can think of colloquially\nas the estimate prior to incorporating the measurement.\n\nThe update step, implemented with the method or function `update()`,\nincorporates the measurement z with covariance R, into the state\nestimate (x, P). The class stores the system uncertainty in S,\nthe innovation (residual between prediction and measurement in\nmeasurement space) in y, and the Kalman gain in k. The procedural\nform returns these variables to you. In Bayesian terms this computes\nthe *posterior* - the estimate after the information from the\nmeasurement is incorporated.\n\nWhether you use the OO form or procedural form is up to you. If\nmatrices such as H, R, and F are changing each epoch, you'll probably\nopt to use the procedural form. If they are unchanging, the OO\nform is perhaps easier to use since you won't need to keep track\nof these matrices. This is especially useful if you are implementing\nbanks of filters or comparing various KF designs for performance;\na trivial coding bug could lead to using the wrong sets of matrices.\n\nThis module also offers an implementation of the RTS smoother, and\nother helper functions, such as log likelihood computations.\n\nThe Saver class allows you to easily save the state of the\nKalmanFilter class after every update\n\nThis module expects NumPy arrays for all values that expect\narrays, although in a few cases, particularly method parameters,\nit will accept types that convert to NumPy arrays, such as lists\nof lists. These exceptions are documented in the method or function.\n\nExamples\n--------\nThe following example constructs a constant velocity kinematic\nfilter, filters noisy data, and plots the results. It also demonstrates\nusing the Saver class to save the state of the filter at each epoch.\n\n.. code-block:: Python\n\n import matplotlib.pyplot as plt\n import numpy as np\n from filterpy.kalman import KalmanFilter\n from filterpy.common import Q_discrete_white_noise, Saver\n\n r_std, q_std = 2., 0.003\n cv = KalmanFilter(dim_x=2, dim_z=1)\n cv.x = np.array([[0., 1.]]) # position, velocity\n cv.F = np.array([[1, dt],[ [0, 1]])\n cv.R = np.array([[r_std^^2]])\n f.H = np.array([[1., 0.]])\n f.P = np.diag([.1^^2, .03^^2)\n f.Q = Q_discrete_white_noise(2, dt, q_std**2)\n\n saver = Saver(cv)\n for z in range(100):\n cv.predict()\n cv.update([z + randn() * r_std])\n saver.save() # save the filter's state\n\n saver.to_array()\n plt.plot(saver.x[:, 0])\n\n # plot all of the priors\n plt.plot(saver.x_prior[:, 0])\n\n # plot mahalanobis distance\n plt.figure()\n plt.plot(saver.mahalanobis)\n\nThis code implements the same filter using the procedural form\n\n x = np.array([[0., 1.]]) # position, velocity\n F = np.array([[1, dt],[ [0, 1]])\n R = np.array([[r_std^^2]])\n H = np.array([[1., 0.]])\n P = np.diag([.1^^2, .03^^2)\n Q = Q_discrete_white_noise(2, dt, q_std**2)\n\n for z in range(100):\n x, P = predict(x, P, F=F, Q=Q)\n x, P = update(x, P, z=[z + randn() * r_std], R=R, H=H)\n xs.append(x[0, 0])\n plt.plot(xs)\n\n\nFor more examples see the test subdirectory, or refer to the\nbook cited below. In it I both teach Kalman filtering from basic\nprinciples, and teach the use of this library in great detail.\n\nFilterPy library.\nhttp://github.com/rlabbe/filterpy\n\nDocumentation at:\nhttps://filterpy.readthedocs.org\n\nSupporting book at:\nhttps://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python\n\nThis is licensed under an MIT license. See the readme.MD file\nfor more information.\n\nCopyright 2014-2018 Roger R Labbe Jr.\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nfrom copy import deepcopy\nfrom math import log, exp, sqrt\nimport sys\nimport numpy as np\nfrom numpy import dot, zeros, eye, isscalar, shape\nimport numpy.linalg as linalg\nfrom filterpy.stats import logpdf\nfrom filterpy.common import pretty_str, reshape_z\n\n\nclass KalmanFilter(object):\n r\"\"\" Implements a Kalman filter. You are responsible for setting the\n various state variables to reasonable values; the defaults will\n not give you a functional filter.\n\n For now the best documentation is my free book Kalman and Bayesian\n Filters in Python [2]_. The test files in this directory also give you a\n basic idea of use, albeit without much description.\n\n In brief, you will first construct this object, specifying the size of\n the state vector with dim_x and the size of the measurement vector that\n you will be using with dim_z. These are mostly used to perform size checks\n when you assign values to the various matrices. For example, if you\n specified dim_z=2 and then try to assign a 3x3 matrix to R (the\n measurement noise matrix you will get an assert exception because R\n should be 2x2. (If for whatever reason you need to alter the size of\n things midstream just use the underscore version of the matrices to\n assign directly: your_filter._R = a_3x3_matrix.)\n\n After construction the filter will have default matrices created for you,\n but you must specify the values for each. It’s usually easiest to just\n overwrite them rather than assign to each element yourself. This will be\n clearer in the example below. All are of type numpy.array.\n\n\n Examples\n --------\n\n Here is a filter that tracks position and velocity using a sensor that only\n reads position.\n\n First construct the object with the required dimensionality. Here the state\n (`dim_x`) has 2 coefficients (position and velocity), and the measurement\n (`dim_z`) has one. In FilterPy `x` is the state, `z` is the measurement.\n\n .. code::\n\n from filterpy.kalman import KalmanFilter\n f = KalmanFilter (dim_x=2, dim_z=1)\n\n\n Assign the initial value for the state (position and velocity). You can do this\n with a two dimensional array like so:\n\n .. code::\n\n f.x = np.array([[2.], # position\n [0.]]) # velocity\n\n or just use a one dimensional array, which I prefer doing.\n\n .. code::\n\n f.x = np.array([2., 0.])\n\n\n Define the state transition matrix:\n\n .. code::\n\n f.F = np.array([[1.,1.],\n [0.,1.]])\n\n Define the measurement function. Here we need to convert a position-velocity\n vector into just a position vector, so we use:\n\n .. code::\n\n f.H = np.array([[1., 0.]])\n\n Define the state's covariance matrix P. \n\n .. code::\n\n f.P = np.array([[1000., 0.],\n [ 0., 1000.] ])\n\n Now assign the measurement noise. Here the dimension is 1x1, so I can\n use a scalar\n\n .. code::\n\n f.R = 5\n\n I could have done this instead:\n\n .. code::\n\n f.R = np.array([[5.]])\n\n Note that this must be a 2 dimensional array.\n\n Finally, I will assign the process noise. Here I will take advantage of\n another FilterPy library function:\n\n .. code::\n\n from filterpy.common import Q_discrete_white_noise\n f.Q = Q_discrete_white_noise(dim=2, dt=0.1, var=0.13)\n\n\n Now just perform the standard predict/update loop:\n\n .. code::\n\n while some_condition_is_true:\n z = get_sensor_reading()\n f.predict()\n f.update(z)\n\n do_something_with_estimate (f.x)\n\n\n **Procedural Form**\n\n This module also contains stand alone functions to perform Kalman filtering.\n Use these if you are not a fan of objects.\n\n **Example**\n\n .. code::\n\n while True:\n z, R = read_sensor()\n x, P = predict(x, P, F, Q)\n x, P = update(x, P, z, R, H)\n\n See my book Kalman and Bayesian Filters in Python [2]_.\n\n\n You will have to set the following attributes after constructing this\n object for the filter to perform properly. Please note that there are\n various checks in place to ensure that you have made everything the\n 'correct' size. However, it is possible to provide incorrectly sized\n arrays such that the linear algebra can not perform an operation.\n It can also fail silently - you can end up with matrices of a size that\n allows the linear algebra to work, but are the wrong shape for the problem\n you are trying to solve.\n\n Parameters\n ----------\n dim_x : int\n Number of state variables for the Kalman filter. For example, if\n you are tracking the position and velocity of an object in two\n dimensions, dim_x would be 4.\n This is used to set the default size of P, Q, and u\n\n dim_z : int\n Number of of measurement inputs. For example, if the sensor\n provides you with position in (x,y), dim_z would be 2.\n\n dim_u : int (optional)\n size of the control input, if it is being used.\n Default value of 0 indicates it is not used.\n\n compute_log_likelihood : bool (default = True)\n Computes log likelihood by default, but this can be a slow\n computation, so if you never use it you can turn this computation\n off.\n\n Attributes\n ----------\n x : numpy.array(dim_x, 1)\n Current state estimate. Any call to update() or predict() updates\n this variable.\n\n P : numpy.array(dim_x, dim_x)\n Current state covariance matrix. Any call to update() or predict()\n updates this variable.\n\n x_prior : numpy.array(dim_x, 1)\n Prior (predicted) state estimate. The *_prior and *_post attributes\n are for convenience; they store the prior and posterior of the\n current epoch. Read Only.\n\n P_prior : numpy.array(dim_x, dim_x)\n Prior (predicted) state covariance matrix. Read Only.\n\n x_post : numpy.array(dim_x, 1)\n Posterior (updated) state estimate. Read Only.\n\n P_post : numpy.array(dim_x, dim_x)\n Posterior (updated) state covariance matrix. Read Only.\n\n z : numpy.array\n Last measurement used in update(). Read only.\n\n R : numpy.array(dim_z, dim_z)\n Measurement noise covariance matrix. Also known as the\n observation covariance.\n\n Q : numpy.array(dim_x, dim_x)\n Process noise covariance matrix. Also known as the transition\n covariance.\n\n F : numpy.array()\n State Transition matrix. Also known as `A` in some formulation.\n\n H : numpy.array(dim_z, dim_x)\n Measurement function. Also known as the observation matrix, or as `C`.\n\n y : numpy.array\n Residual of the update step. Read only.\n\n K : numpy.array(dim_x, dim_z)\n Kalman gain of the update step. Read only.\n\n S : numpy.array\n System uncertainty (P projected to measurement space). Read only.\n\n SI : numpy.array\n Inverse system uncertainty. Read only.\n\n log_likelihood : float\n log-likelihood of the last measurement. Read only.\n\n likelihood : float\n likelihood of last measurement. Read only.\n\n Computed from the log-likelihood. The log-likelihood can be very\n small, meaning a large negative value such as -28000. Taking the\n exp() of that results in 0.0, which can break typical algorithms\n which multiply by this value, so by default we always return a\n number >= sys.float_info.min.\n\n mahalanobis : float\n mahalanobis distance of the innovation. Read only.\n\n inv : function, default numpy.linalg.inv\n If you prefer another inverse function, such as the Moore-Penrose\n pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv\n\n This is only used to invert self.S. If you know it is diagonal, you\n might choose to set it to filterpy.common.inv_diagonal, which is\n several times faster than numpy.linalg.inv for diagonal matrices.\n\n alpha : float\n Fading memory setting. 1.0 gives the normal Kalman filter, and\n values slightly larger than 1.0 (such as 1.02) give a fading\n memory effect - previous measurements have less influence on the\n filter's estimates. This formulation of the Fading memory filter\n (there are many) is due to Dan Simon [1]_.\n\n References\n ----------\n\n .. [1] Dan Simon. \"Optimal State Estimation.\" John Wiley & Sons.\n p. 208-212. (2006)\n\n .. [2] Roger Labbe. \"Kalman and Bayesian Filters in Python\"\n https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python\n\n \"\"\"\n\n def __init__(self, dim_x, dim_z, dim_u=0):\n if dim_x < 1:\n raise ValueError('dim_x must be 1 or greater')\n if dim_z < 1:\n raise ValueError('dim_z must be 1 or greater')\n if dim_u < 0:\n raise ValueError('dim_u must be 0 or greater')\n\n self.dim_x = dim_x\n self.dim_z = dim_z\n self.dim_u = dim_u\n\n self.x = zeros((dim_x, 1)) # state\n self.P = eye(dim_x) # uncertainty covariance\n self.Q = eye(dim_x) # process uncertainty\n self.B = None # control transition matrix\n self.F = eye(dim_x) # state transition matrix\n self.H = zeros((dim_z, dim_x)) # measurement function\n self.R = eye(dim_z) # measurement uncertainty\n self._alpha_sq = 1. # fading memory control\n self.M = np.zeros((dim_x, dim_z)) # process-measurement cross correlation\n self.z = np.array([[None]*self.dim_z]).T\n\n # gain and residual are computed during the innovation step. We\n # save them so that in case you want to inspect them for various\n # purposes\n self.K = np.zeros((dim_x, dim_z)) # kalman gain\n self.y = zeros((dim_z, 1))\n self.S = np.zeros((dim_z, dim_z)) # system uncertainty\n self.SI = np.zeros((dim_z, dim_z)) # inverse system uncertainty\n\n # identity matrix. Do not alter this.\n self._I = np.eye(dim_x)\n\n # these will always be a copy of x,P after predict() is called\n self.x_prior = self.x.copy()\n self.P_prior = self.P.copy()\n\n # these will always be a copy of x,P after update() is called\n self.x_post = self.x.copy()\n self.P_post = self.P.copy()\n\n # Only computed only if requested via property\n self._log_likelihood = log(sys.float_info.min)\n self._likelihood = sys.float_info.min\n self._mahalanobis = None\n\n self.inv = np.linalg.inv\n\n\n def predict(self, u=None, B=None, F=None, Q=None):\n \"\"\"\n Predict next state (prior) using the Kalman filter state propagation\n equations.\n\n Parameters\n ----------\n\n u : np.array, default 0\n Optional control vector.\n\n B : np.array(dim_x, dim_u), or None\n Optional control transition matrix; a value of None\n will cause the filter to use `self.B`.\n\n F : np.array(dim_x, dim_x), or None\n Optional state transition matrix; a value of None\n will cause the filter to use `self.F`.\n\n Q : np.array(dim_x, dim_x), scalar, or None\n Optional process noise matrix; a value of None will cause the\n filter to use `self.Q`.\n \"\"\"\n\n if B is None:\n B = self.B\n if F is None:\n F = self.F\n if Q is None:\n Q = self.Q\n elif isscalar(Q):\n Q = eye(self.dim_x) * Q\n\n\n # x = Fx + Bu\n if B is not None and u is not None:\n self.x = dot(F, self.x) + dot(B, u)\n else:\n self.x = dot(F, self.x)\n\n # P = FPF' + Q\n self.P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q\n\n # save prior\n self.x_prior = self.x.copy()\n self.P_prior = self.P.copy()\n\n\n def update(self, z, R=None, H=None):\n \"\"\"\n Add a new measurement (z) to the Kalman filter.\n\n If z is None, nothing is computed. However, x_post and P_post are\n updated with the prior (x_prior, P_prior), and self.z is set to None.\n\n Parameters\n ----------\n z : (dim_z, 1): array_like\n measurement for this update. z can be a scalar if dim_z is 1,\n otherwise it must be convertible to a column vector.\n\n If you pass in a value of H, z must be a column vector the\n of the correct size.\n\n R : np.array, scalar, or None\n Optionally provide R to override the measurement noise for this\n one call, otherwise self.R will be used.\n\n H : np.array, or None\n Optionally provide H to override the measurement function for this\n one call, otherwise self.H will be used.\n \"\"\"\n\n # set to None to force recompute\n self._log_likelihood = None\n self._likelihood = None\n self._mahalanobis = None\n\n if z is None:\n self.z = np.array([[None]*self.dim_z]).T\n self.x_post = self.x.copy()\n self.P_post = self.P.copy()\n self.y = zeros((self.dim_z, 1))\n return\n\n if R is None:\n R = self.R\n elif isscalar(R):\n R = eye(self.dim_z) * R\n\n if H is None:\n z = reshape_z(z, self.dim_z, self.x.ndim)\n H = self.H\n\n # y = z - Hx\n # error (residual) between measurement and prediction\n self.y = z - dot(H, self.x)\n\n # common subexpression for speed\n PHT = dot(self.P, H.T)\n\n # S = HPH' + R\n # project system uncertainty into measurement space\n self.S = dot(H, PHT) + R\n self.SI = self.inv(self.S)\n # K = PH'inv(S)\n # map system uncertainty into kalman gain\n self.K = dot(PHT, self.SI)\n\n # x = x + Ky\n # predict new x with residual scaled by the kalman gain\n self.x = self.x + dot(self.K, self.y)\n\n # P = (I-KH)P(I-KH)' + KRK'\n # This is more numerically stable\n # and works for non-optimal K vs the equation\n # P = (I-KH)P usually seen in the literature.\n\n I_KH = self._I - dot(self.K, H)\n self.P = dot(dot(I_KH, self.P), I_KH.T) + dot(dot(self.K, R), self.K.T)\n\n # save measurement and posterior state\n self.z = deepcopy(z)\n self.x_post = self.x.copy()\n self.P_post = self.P.copy()\n\n def predict_steadystate(self, u=0, B=None):\n \"\"\"\n Predict state (prior) using the Kalman filter state propagation\n equations. Only x is updated, P is left unchanged. See\n update_steadstate() for a longer explanation of when to use this\n method.\n\n Parameters\n ----------\n\n u : np.array\n Optional control vector. If non-zero, it is multiplied by B\n to create the control input into the system.\n\n B : np.array(dim_x, dim_u), or None\n Optional control transition matrix; a value of None\n will cause the filter to use `self.B`.\n \"\"\"\n\n if B is None:\n B = self.B\n\n # x = Fx + Bu\n if B is not None:\n self.x = dot(self.F, self.x) + dot(B, u)\n else:\n self.x = dot(self.F, self.x)\n\n # save prior\n self.x_prior = self.x.copy()\n self.P_prior = self.P.copy()\n\n def update_steadystate(self, z):\n \"\"\"\n Add a new measurement (z) to the Kalman filter without recomputing\n the Kalman gain K, the state covariance P, or the system\n uncertainty S.\n\n You can use this for LTI systems since the Kalman gain and covariance\n converge to a fixed value. Precompute these and assign them explicitly,\n or run the Kalman filter using the normal predict()/update(0 cycle\n until they converge.\n\n The main advantage of this call is speed. We do significantly less\n computation, notably avoiding a costly matrix inversion.\n\n Use in conjunction with predict_steadystate(), otherwise P will grow\n without bound.\n\n Parameters\n ----------\n z : (dim_z, 1): array_like\n measurement for this update. z can be a scalar if dim_z is 1,\n otherwise it must be convertible to a column vector.\n\n\n Examples\n --------\n >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter\n >>> # let filter converge on representative data, then save k and P\n >>> for i in range(100):\n >>> cv.predict()\n >>> cv.update([i, i, i])\n >>> saved_k = np.copy(cv.K)\n >>> saved_P = np.copy(cv.P)\n\n later on:\n\n >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter\n >>> cv.K = np.copy(saved_K)\n >>> cv.P = np.copy(saved_P)\n >>> for i in range(100):\n >>> cv.predict_steadystate()\n >>> cv.update_steadystate([i, i, i])\n \"\"\"\n\n # set to None to force recompute\n self._log_likelihood = None\n self._likelihood = None\n self._mahalanobis = None\n\n if z is None:\n self.z = np.array([[None]*self.dim_z]).T\n self.x_post = self.x.copy()\n self.P_post = self.P.copy()\n self.y = zeros((self.dim_z, 1))\n return\n\n z = reshape_z(z, self.dim_z, self.x.ndim)\n\n # y = z - Hx\n # error (residual) between measurement and prediction\n self.y = z - dot(self.H, self.x)\n\n # x = x + Ky\n # predict new x with residual scaled by the kalman gain\n self.x = self.x + dot(self.K, self.y)\n\n self.z = deepcopy(z)\n self.x_post = self.x.copy()\n self.P_post = self.P.copy()\n\n # set to None to force recompute\n self._log_likelihood = None\n self._likelihood = None\n self._mahalanobis = None\n\n def update_correlated(self, z, R=None, H=None):\n \"\"\" Add a new measurement (z) to the Kalman filter assuming that\n process noise and measurement noise are correlated as defined in\n the `self.M` matrix.\n\n A partial derivation can be found in [1]\n\n If z is None, nothing is changed.\n\n Parameters\n ----------\n z : (dim_z, 1): array_like\n measurement for this update. z can be a scalar if dim_z is 1,\n otherwise it must be convertible to a column vector.\n\n R : np.array, scalar, or None\n Optionally provide R to override the measurement noise for this\n one call, otherwise self.R will be used.\n\n H : np.array, or None\n Optionally provide H to override the measurement function for this\n one call, otherwise self.H will be used.\n\n References\n ----------\n\n .. [1] Bulut, Y. (2011). Applied Kalman filter theory (Doctoral dissertation, Northeastern University).\n http://people.duke.edu/~hpgavin/SystemID/References/Balut-KalmanFilter-PhD-NEU-2011.pdf\n \"\"\"\n\n # set to None to force recompute\n self._log_likelihood = None\n self._likelihood = None\n self._mahalanobis = None\n\n if z is None:\n self.z = np.array([[None]*self.dim_z]).T\n self.x_post = self.x.copy()\n self.P_post = self.P.copy()\n self.y = zeros((self.dim_z, 1))\n return\n\n if R is None:\n R = self.R\n elif isscalar(R):\n R = eye(self.dim_z) * R\n\n # rename for readability and a tiny extra bit of speed\n if H is None:\n z = reshape_z(z, self.dim_z, self.x.ndim)\n H = self.H\n\n # handle special case: if z is in form [[z]] but x is not a column\n # vector dimensions will not match\n if self.x.ndim == 1 and shape(z) == (1, 1):\n z = z[0]\n\n if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3)\n z = np.asarray([z])\n\n # y = z - Hx\n # error (residual) between measurement and prediction\n self.y = z - dot(H, self.x)\n\n # common subexpression for speed\n PHT = dot(self.P, H.T)\n\n # project system uncertainty into measurement space\n self.S = dot(H, PHT) + dot(H, self.M) + dot(self.M.T, H.T) + R\n self.SI = self.inv(self.S)\n\n # K = PH'inv(S)\n # map system uncertainty into kalman gain\n self.K = dot(PHT + self.M, self.SI)\n\n # x = x + Ky\n # predict new x with residual scaled by the kalman gain\n self.x = self.x + dot(self.K, self.y)\n self.P = self.P - dot(self.K, dot(H, self.P) + self.M.T)\n\n self.z = deepcopy(z)\n self.x_post = self.x.copy()\n self.P_post = self.P.copy()\n\n def update_sequential(self, start, z_i, R_i=None, H_i=None):\n \"\"\"\n Add a single input measurement (z_i) to the Kalman filter.\n In sequential processing, inputs are processed one at a time.\n\n Parameters\n ----------\n start : integer\n Index of the first measurement input updated by this call.\n\n z_i : np.array or scalar\n Measurement of inputs for this partial update.\n\n R_i : np.array, scalar, or None\n Optionally provide R_i to override the measurement noise of\n inputs for this one call, otherwise a slice of self.R will\n be used.\n\n H_i : np.array, or None\n Optionally provide H[i] to override the partial measurement\n function for this one call, otherwise a slice of self.H will\n be used.\n \"\"\"\n\n if isscalar(z_i):\n length = 1\n else:\n length = len(z_i)\n z_i = np.reshape(z_i, [length, 1])\n stop = start + length\n\n if R_i is None:\n R_i = self.R[start:stop, start:stop]\n elif isscalar(R_i):\n R_i = eye(length) * R_i\n\n if H_i is None:\n H_i = self.H[start:stop]\n\n H_i = np.reshape(H_i, [length, self.dim_x])\n\n # y_i = z_i - H_i @ x\n # error (residual) between measurement and prediction\n y_i = z_i - dot(H_i, self.x)\n self.y[start:stop] = y_i\n\n # common subexpression for speed\n PHT = dot(self.P, H_i.T)\n\n # project system uncertainty into the measurement subspace\n S_i = dot(H_i, PHT) + R_i\n\n if length == 1:\n K_i = PHT * (1.0 / S_i)\n else:\n K_i = dot(PHT, linalg.inv(S_i))\n\n self.K[:,start:stop] = K_i\n I_KH = self._I - np.dot(K_i, H_i)\n\n # x = x + K_i @ y_i\n # update state estimation with residual scaled by the kalman gain\n self.x += dot(K_i, y_i)\n\n # compute the posterior covariance\n self.P = dot(dot(I_KH, self.P), I_KH.T) + dot(dot(K_i, R_i), K_i.T)\n\n # save measurement component #i and the posterior state\n self.z[start:stop] = z_i\n self.x_post = self.x.copy()\n self.P_post = self.P.copy()\n\n def batch_filter(self, zs, Fs=None, Qs=None, Hs=None,\n Rs=None, Bs=None, us=None, update_first=False,\n saver=None):\n \"\"\" Batch processes a sequences of measurements.\n\n Parameters\n ----------\n\n zs : list-like\n list of measurements at each time step `self.dt`. Missing\n measurements must be represented by `None`.\n\n Fs : None, list-like, default=None\n optional value or list of values to use for the state transition\n matrix F.\n\n If Fs is None then self.F is used for all epochs.\n\n Otherwise it must contain a list-like list of F's, one for\n each epoch. This allows you to have varying F per epoch.\n\n Qs : None, np.array or list-like, default=None\n optional value or list of values to use for the process error\n covariance Q.\n\n If Qs is None then self.Q is used for all epochs.\n\n Otherwise it must contain a list-like list of Q's, one for\n each epoch. This allows you to have varying Q per epoch.\n\n Hs : None, np.array or list-like, default=None\n optional list of values to use for the measurement matrix H.\n\n If Hs is None then self.H is used for all epochs.\n\n If Hs contains a single matrix, then it is used as H for all\n epochs.\n\n Otherwise it must contain a list-like list of H's, one for\n each epoch. This allows you to have varying H per epoch.\n\n Rs : None, np.array or list-like, default=None\n optional list of values to use for the measurement error\n covariance R.\n\n If Rs is None then self.R is used for all epochs.\n\n Otherwise it must contain a list-like list of R's, one for\n each epoch. This allows you to have varying R per epoch.\n\n Bs : None, np.array or list-like, default=None\n optional list of values to use for the control transition matrix B.\n\n If Bs is None then self.B is used for all epochs.\n\n Otherwise it must contain a list-like list of B's, one for\n each epoch. This allows you to have varying B per epoch.\n\n us : None, np.array or list-like, default=None\n optional list of values to use for the control input vector;\n\n If us is None then None is used for all epochs (equivalent to 0,\n or no control input).\n\n Otherwise it must contain a list-like list of u's, one for\n each epoch.\n\n update_first : bool, optional, default=False\n controls whether the order of operations is update followed by\n predict, or predict followed by update. Default is predict->update.\n\n saver : filterpy.common.Saver, optional\n filterpy.common.Saver object. If provided, saver.save() will be\n called after every epoch\n\n Returns\n -------\n\n means : np.array((n,dim_x,1))\n array of the state for each time step after the update. Each entry\n is an np.array. In other words `means[k,:]` is the state at step\n `k`.\n\n covariance : np.array((n,dim_x,dim_x))\n array of the covariances for each time step after the update.\n In other words `covariance[k,:,:]` is the covariance at step `k`.\n\n means_predictions : np.array((n,dim_x,1))\n array of the state for each time step after the predictions. Each\n entry is an np.array. In other words `means[k,:]` is the state at\n step `k`.\n\n covariance_predictions : np.array((n,dim_x,dim_x))\n array of the covariances for each time step after the prediction.\n In other words `covariance[k,:,:]` is the covariance at step `k`.\n\n Examples\n --------\n\n .. code-block:: Python\n\n # this example demonstrates tracking a measurement where the time\n # between measurement varies, as stored in dts. This requires\n # that F be recomputed for each epoch. The output is then smoothed\n # with an RTS smoother.\n\n zs = [t + random.randn()*4 for t in range (40)]\n Fs = [np.array([[1., dt], [0, 1]] for dt in dts]\n\n (mu, cov, _, _) = kf.batch_filter(zs, Fs=Fs)\n (xs, Ps, Ks, Pps) = kf.rts_smoother(mu, cov, Fs=Fs)\n \"\"\"\n\n #pylint: disable=too-many-statements\n n = np.size(zs, 0)\n if Fs is None:\n Fs = [self.F] * n\n if Qs is None:\n Qs = [self.Q] * n\n if Hs is None:\n Hs = [self.H] * n\n if Rs is None:\n Rs = [self.R] * n\n if Bs is None:\n Bs = [self.B] * n\n if us is None:\n us = [0] * n\n\n # mean estimates from Kalman Filter\n if self.x.ndim == 1:\n means = zeros((n, self.dim_x))\n means_p = zeros((n, self.dim_x))\n else:\n means = zeros((n, self.dim_x, 1))\n means_p = zeros((n, self.dim_x, 1))\n\n # state covariances from Kalman Filter\n covariances = zeros((n, self.dim_x, self.dim_x))\n covariances_p = zeros((n, self.dim_x, self.dim_x))\n\n if update_first:\n for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):\n\n self.update(z, R=R, H=H)\n means[i, :] = self.x\n covariances[i, :, :] = self.P\n\n self.predict(u=u, B=B, F=F, Q=Q)\n means_p[i, :] = self.x\n covariances_p[i, :, :] = self.P\n\n if saver is not None:\n saver.save()\n else:\n for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):\n\n self.predict(u=u, B=B, F=F, Q=Q)\n means_p[i, :] = self.x\n covariances_p[i, :, :] = self.P\n\n self.update(z, R=R, H=H)\n means[i, :] = self.x\n covariances[i, :, :] = self.P\n\n if saver is not None:\n saver.save()\n\n return (means, covariances, means_p, covariances_p)\n\n def rts_smoother(self, Xs, Ps, Fs=None, Qs=None, inv=np.linalg.inv):\n \"\"\"\n Runs the Rauch-Tung-Striebel Kalman smoother on a set of\n means and covariances computed by a Kalman filter. The usual input\n would come from the output of `KalmanFilter.batch_filter()`.\n\n Parameters\n ----------\n\n Xs : numpy.array\n array of the means (state variable x) of the output of a Kalman\n filter.\n\n Ps : numpy.array\n array of the covariances of the output of a kalman filter.\n\n Fs : list-like collection of numpy.array, optional\n State transition matrix of the Kalman filter at each time step.\n Optional, if not provided the filter's self.F will be used\n\n Qs : list-like collection of numpy.array, optional\n Process noise of the Kalman filter at each time step. Optional,\n if not provided the filter's self.Q will be used\n\n inv : function, default numpy.linalg.inv\n If you prefer another inverse function, such as the Moore-Penrose\n pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv\n\n\n Returns\n -------\n\n x : numpy.ndarray\n smoothed means\n\n P : numpy.ndarray\n smoothed state covariances\n\n K : numpy.ndarray\n smoother gain at each step\n\n Pp : numpy.ndarray\n Predicted state covariances\n\n Examples\n --------\n\n .. code-block:: Python\n\n zs = [t + random.randn()*4 for t in range (40)]\n\n (mu, cov, _, _) = kalman.batch_filter(zs)\n (x, P, K, Pp) = rts_smoother(mu, cov, kf.F, kf.Q)\n\n \"\"\"\n\n if len(Xs) != len(Ps):\n raise ValueError('length of Xs and Ps must be the same')\n\n n = Xs.shape[0]\n dim_x = Xs.shape[1]\n\n if Fs is None:\n Fs = [self.F] * n\n if Qs is None:\n Qs = [self.Q] * n\n\n # smoother gain\n K = zeros((n, dim_x, dim_x))\n\n x, P, Pp = Xs.copy(), Ps.copy(), Ps.copy()\n for k in range(n-2, -1, -1):\n Pp[k] = dot(dot(Fs[k+1], P[k]), Fs[k+1].T) + Qs[k+1]\n\n #pylint: disable=bad-whitespace\n K[k] = dot(dot(P[k], Fs[k+1].T), inv(Pp[k]))\n x[k] += dot(K[k], x[k+1] - dot(Fs[k+1], x[k]))\n P[k] += dot(dot(K[k], P[k+1] - Pp[k]), K[k].T)\n\n return (x, P, K, Pp)\n\n def get_prediction(self, u=None, B=None, F=None, Q=None):\n \"\"\"\n Predict next state (prior) using the Kalman filter state propagation\n equations and returns it without modifying the object.\n\n Parameters\n ----------\n\n u : np.array, default 0\n Optional control vector.\n\n B : np.array(dim_x, dim_u), or None\n Optional control transition matrix; a value of None\n will cause the filter to use `self.B`.\n\n F : np.array(dim_x, dim_x), or None\n Optional state transition matrix; a value of None\n will cause the filter to use `self.F`.\n\n Q : np.array(dim_x, dim_x), scalar, or None\n Optional process noise matrix; a value of None will cause the\n filter to use `self.Q`.\n\n Returns\n -------\n\n (x, P) : tuple\n State vector and covariance array of the prediction.\n \"\"\"\n\n if B is None:\n B = self.B\n if F is None:\n F = self.F\n if Q is None:\n Q = self.Q\n elif isscalar(Q):\n Q = eye(self.dim_x) * Q\n\n # x = Fx + Bu\n if B is not None and u is not None:\n x = dot(F, self.x) + dot(B, u)\n else:\n x = dot(F, self.x)\n\n # P = FPF' + Q\n P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q\n\n return x, P\n\n def get_update(self, z=None):\n \"\"\"\n Computes the new estimate based on measurement `z` and returns it\n without altering the state of the filter.\n\n Parameters\n ----------\n\n z : (dim_z, 1): array_like\n measurement for this update. z can be a scalar if dim_z is 1,\n otherwise it must be convertible to a column vector.\n\n Returns\n -------\n\n (x, P) : tuple\n State vector and covariance array of the update.\n \"\"\"\n\n if z is None:\n return self.x, self.P\n z = reshape_z(z, self.dim_z, self.x.ndim)\n\n R = self.R\n H = self.H\n P = self.P\n x = self.x\n\n # error (residual) between measurement and prediction\n y = z - dot(H, x)\n\n # common subexpression for speed\n PHT = dot(P, H.T)\n\n # project system uncertainty into measurement space\n S = dot(H, PHT) + R\n\n # map system uncertainty into kalman gain\n K = dot(PHT, self.inv(S))\n\n # predict new x with residual scaled by the kalman gain\n x = x + dot(K, y)\n\n # P = (I-KH)P(I-KH)' + KRK'\n I_KH = self._I - dot(K, H)\n P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)\n\n return x, P\n\n def residual_of(self, z):\n \"\"\"\n Returns the residual for the given measurement (z). Does not alter\n the state of the filter.\n \"\"\"\n z = reshape_z(z, self.dim_z, self.x.ndim)\n return z - dot(self.H, self.x_prior)\n\n def measurement_of_state(self, x):\n \"\"\"\n Helper function that converts a state into a measurement.\n\n Parameters\n ----------\n\n x : np.array\n kalman state vector\n\n Returns\n -------\n\n z : (dim_z, 1): array_like\n measurement for this update. z can be a scalar if dim_z is 1,\n otherwise it must be convertible to a column vector.\n \"\"\"\n\n return dot(self.H, x)\n\n @property\n def log_likelihood(self):\n \"\"\"\n log-likelihood of the last measurement.\n \"\"\"\n if self._log_likelihood is None:\n self._log_likelihood = logpdf(x=self.y, cov=self.S)\n return self._log_likelihood\n\n @property\n def likelihood(self):\n \"\"\"\n Computed from the log-likelihood. The log-likelihood can be very\n small, meaning a large negative value such as -28000. Taking the\n exp() of that results in 0.0, which can break typical algorithms\n which multiply by this value, so by default we always return a\n number >= sys.float_info.min.\n \"\"\"\n if self._likelihood is None:\n self._likelihood = exp(self.log_likelihood)\n if self._likelihood == 0:\n self._likelihood = sys.float_info.min\n return self._likelihood\n\n @property\n def mahalanobis(self):\n \"\"\"\"\n Mahalanobis distance of measurement. E.g. 3 means measurement\n was 3 standard deviations away from the predicted value.\n\n Returns\n -------\n mahalanobis : float\n \"\"\"\n if self._mahalanobis is None:\n self._mahalanobis = sqrt(float(dot(dot(self.y.T, self.SI), self.y)))\n return self._mahalanobis\n\n @property\n def alpha(self):\n \"\"\"\n Fading memory setting. 1.0 gives the normal Kalman filter, and\n values slightly larger than 1.0 (such as 1.02) give a fading\n memory effect - previous measurements have less influence on the\n filter's estimates. This formulation of the Fading memory filter\n (there are many) is due to Dan Simon [1]_.\n \"\"\"\n return self._alpha_sq**.5\n\n def log_likelihood_of(self, z):\n \"\"\"\n log likelihood of the measurement `z`. This should only be called\n after a call to update(). Calling after predict() will yield an\n incorrect result.\"\"\"\n\n if z is None:\n return log(sys.float_info.min)\n return logpdf(z, dot(self.H, self.x), self.S)\n\n @alpha.setter\n def alpha(self, value):\n if not np.isscalar(value) or value < 1:\n raise ValueError('alpha must be a float greater than 1')\n\n self._alpha_sq = value**2\n\n def __repr__(self):\n return '\\n'.join([\n 'KalmanFilter object',\n pretty_str('dim_x', self.dim_x),\n pretty_str('dim_z', self.dim_z),\n pretty_str('dim_u', self.dim_u),\n pretty_str('x', self.x),\n pretty_str('P', self.P),\n pretty_str('x_prior', self.x_prior),\n pretty_str('P_prior', self.P_prior),\n pretty_str('x_post', self.x_post),\n pretty_str('P_post', self.P_post),\n pretty_str('F', self.F),\n pretty_str('Q', self.Q),\n pretty_str('R', self.R),\n pretty_str('H', self.H),\n pretty_str('K', self.K),\n pretty_str('y', self.y),\n pretty_str('S', self.S),\n pretty_str('SI', self.SI),\n pretty_str('M', self.M),\n pretty_str('B', self.B),\n pretty_str('z', self.z),\n pretty_str('log-likelihood', self.log_likelihood),\n pretty_str('likelihood', self.likelihood),\n pretty_str('mahalanobis', self.mahalanobis),\n pretty_str('alpha', self.alpha),\n pretty_str('inv', self.inv)\n ])\n\n def test_matrix_dimensions(self, z=None, H=None, R=None, F=None, Q=None):\n \"\"\"\n Performs a series of asserts to check that the size of everything\n is what it should be. This can help you debug problems in your design.\n\n If you pass in H, R, F, Q those will be used instead of this object's\n value for those matrices.\n\n Testing `z` (the measurement) is problamatic. x is a vector, and can be\n implemented as either a 1D array or as a nx1 column vector. Thus Hx\n can be of different shapes. Then, if Hx is a single value, it can\n be either a 1D array or 2D vector. If either is true, z can reasonably\n be a scalar (either '3' or np.array('3') are scalars under this\n definition), a 1D, 1 element array, or a 2D, 1 element array. You are\n allowed to pass in any combination that works.\n \"\"\"\n\n if H is None:\n H = self.H\n if R is None:\n R = self.R\n if F is None:\n F = self.F\n if Q is None:\n Q = self.Q\n x = self.x\n P = self.P\n\n assert x.ndim == 1 or x.ndim == 2, \\\n \"x must have one or two dimensions, but has {}\".format(x.ndim)\n\n if x.ndim == 1:\n assert x.shape[0] == self.dim_x, \\\n \"Shape of x must be ({},{}), but is {}\".format(\n self.dim_x, 1, x.shape)\n else:\n assert x.shape == (self.dim_x, 1), \\\n \"Shape of x must be ({},{}), but is {}\".format(\n self.dim_x, 1, x.shape)\n\n assert P.shape == (self.dim_x, self.dim_x), \\\n \"Shape of P must be ({},{}), but is {}\".format(\n self.dim_x, self.dim_x, P.shape)\n\n assert Q.shape == (self.dim_x, self.dim_x), \\\n \"Shape of Q must be ({},{}), but is {}\".format(\n self.dim_x, self.dim_x, P.shape)\n\n assert F.shape == (self.dim_x, self.dim_x), \\\n \"Shape of F must be ({},{}), but is {}\".format(\n self.dim_x, self.dim_x, F.shape)\n\n assert np.ndim(H) == 2, \\\n \"Shape of H must be (dim_z, {}), but is {}\".format(\n P.shape[0], shape(H))\n\n assert H.shape[1] == P.shape[0], \\\n \"Shape of H must be (dim_z, {}), but is {}\".format(\n P.shape[0], H.shape)\n\n # shape of R must be the same as HPH'\n hph_shape = (H.shape[0], H.shape[0])\n r_shape = shape(R)\n\n if H.shape[0] == 1:\n # r can be scalar, 1D, or 2D in this case\n assert r_shape in [(), (1,), (1, 1)], \\\n \"R must be scalar or one element array, but is shaped {}\".format(\n r_shape)\n else:\n assert r_shape == hph_shape, \\\n \"shape of R should be {} but it is {}\".format(hph_shape, r_shape)\n\n\n if z is not None:\n z_shape = shape(z)\n else:\n z_shape = (self.dim_z, 1)\n\n # H@x must have shape of z\n Hx = dot(H, x)\n\n if z_shape == (): # scalar or np.array(scalar)\n assert Hx.ndim == 1 or shape(Hx) == (1, 1), \\\n \"shape of z should be {}, not {} for the given H\".format(\n shape(Hx), z_shape)\n\n elif shape(Hx) == (1,):\n assert z_shape[0] == 1, 'Shape of z must be {} for the given H'.format(shape(Hx))\n\n else:\n assert (z_shape == shape(Hx) or\n (len(z_shape) == 1 and shape(Hx) == (z_shape[0], 1))), \\\n \"shape of z should be {}, not {} for the given H\".format(\n shape(Hx), z_shape)\n\n if np.ndim(Hx) > 1 and shape(Hx) != (1, 1):\n assert shape(Hx) == z_shape, \\\n 'shape of z should be {} for the given H, but it is {}'.format(\n shape(Hx), z_shape)\n\n\ndef update(x, P, z, R, H=None, return_all=False):\n \"\"\"\n Add a new measurement (z) to the Kalman filter. If z is None, nothing\n is changed.\n\n This can handle either the multidimensional or unidimensional case. If\n all parameters are floats instead of arrays the filter will still work,\n and return floats for x, P as the result.\n\n update(1, 2, 1, 1, 1) # univariate\n update(x, P, 1\n\n\n\n Parameters\n ----------\n\n x : numpy.array(dim_x, 1), or float\n State estimate vector\n\n P : numpy.array(dim_x, dim_x), or float\n Covariance matrix\n\n z : (dim_z, 1): array_like\n measurement for this update. z can be a scalar if dim_z is 1,\n otherwise it must be convertible to a column vector.\n\n R : numpy.array(dim_z, dim_z), or float\n Measurement noise matrix\n\n H : numpy.array(dim_x, dim_x), or float, optional\n Measurement function. If not provided, a value of 1 is assumed.\n\n return_all : bool, default False\n If true, y, K, S, and log_likelihood are returned, otherwise\n only x and P are returned.\n\n Returns\n -------\n\n x : numpy.array\n Posterior state estimate vector\n\n P : numpy.array\n Posterior covariance matrix\n\n y : numpy.array or scalar\n Residua. Difference between measurement and state in measurement space\n\n K : numpy.array\n Kalman gain\n\n S : numpy.array\n System uncertainty in measurement space\n\n log_likelihood : float\n log likelihood of the measurement\n \"\"\"\n\n #pylint: disable=bare-except\n\n if z is None:\n if return_all:\n return x, P, None, None, None, None\n return x, P\n\n if H is None:\n H = np.array([1])\n\n if np.isscalar(H):\n H = np.array([H])\n\n Hx = np.atleast_1d(dot(H, x))\n z = reshape_z(z, Hx.shape[0], x.ndim)\n\n # error (residual) between measurement and prediction\n y = z - Hx\n\n # project system uncertainty into measurement space\n S = dot(dot(H, P), H.T) + R\n\n\n # map system uncertainty into kalman gain\n try:\n K = dot(dot(P, H.T), linalg.inv(S))\n except:\n # can't invert a 1D array, annoyingly\n K = dot(dot(P, H.T), 1./S)\n\n\n # predict new x with residual scaled by the kalman gain\n x = x + dot(K, y)\n\n # P = (I-KH)P(I-KH)' + KRK'\n KH = dot(K, H)\n\n try:\n I_KH = np.eye(KH.shape[0]) - KH\n except:\n I_KH = np.array([1 - KH])\n P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)\n\n\n if return_all:\n # compute log likelihood\n log_likelihood = logpdf(z, dot(H, x), S)\n return x, P, y, K, S, log_likelihood\n return x, P\n\n\ndef update_steadystate(x, z, K, H=None):\n \"\"\"\n Add a new measurement (z) to the Kalman filter. If z is None, nothing\n is changed.\n\n\n Parameters\n ----------\n\n x : numpy.array(dim_x, 1), or float\n State estimate vector\n\n\n z : (dim_z, 1): array_like\n measurement for this update. z can be a scalar if dim_z is 1,\n otherwise it must be convertible to a column vector.\n\n K : numpy.array, or float\n Kalman gain matrix\n\n H : numpy.array(dim_x, dim_x), or float, optional\n Measurement function. If not provided, a value of 1 is assumed.\n\n Returns\n -------\n\n x : numpy.array\n Posterior state estimate vector\n\n Examples\n --------\n\n This can handle either the multidimensional or unidimensional case. If\n all parameters are floats instead of arrays the filter will still work,\n and return floats for x, P as the result.\n\n >>> update_steadystate(1, 2, 1) # univariate\n >>> update_steadystate(x, P, z, H)\n \"\"\"\n\n\n if z is None:\n return x\n\n if H is None:\n H = np.array([1])\n\n if np.isscalar(H):\n H = np.array([H])\n\n Hx = np.atleast_1d(dot(H, x))\n z = reshape_z(z, Hx.shape[0], x.ndim)\n\n # error (residual) between measurement and prediction\n y = z - Hx\n\n # estimate new x with residual scaled by the kalman gain\n return x + dot(K, y)\n\n\ndef predict(x, P, F=1, Q=0, u=0, B=1, alpha=1.):\n \"\"\"\n Predict next state (prior) using the Kalman filter state propagation\n equations.\n\n Parameters\n ----------\n\n x : numpy.array\n State estimate vector\n\n P : numpy.array\n Covariance matrix\n\n F : numpy.array()\n State Transition matrix\n\n Q : numpy.array, Optional\n Process noise matrix\n\n\n u : numpy.array, Optional, default 0.\n Control vector. If non-zero, it is multiplied by B\n to create the control input into the system.\n\n B : numpy.array, optional, default 0.\n Control transition matrix.\n\n alpha : float, Optional, default=1.0\n Fading memory setting. 1.0 gives the normal Kalman filter, and\n values slightly larger than 1.0 (such as 1.02) give a fading\n memory effect - previous measurements have less influence on the\n filter's estimates. This formulation of the Fading memory filter\n (there are many) is due to Dan Simon\n\n Returns\n -------\n\n x : numpy.array\n Prior state estimate vector\n\n P : numpy.array\n Prior covariance matrix\n \"\"\"\n\n if np.isscalar(F):\n F = np.array(F)\n x = dot(F, x) + dot(B, u)\n P = (alpha * alpha) * dot(dot(F, P), F.T) + Q\n\n return x, P\n\n\ndef predict_steadystate(x, F=1, u=0, B=1):\n \"\"\"\n Predict next state (prior) using the Kalman filter state propagation\n equations. This steady state form only computes x, assuming that the\n covariance is constant.\n\n Parameters\n ----------\n\n x : numpy.array\n State estimate vector\n\n P : numpy.array\n Covariance matrix\n\n F : numpy.array()\n State Transition matrix\n\n u : numpy.array, Optional, default 0.\n Control vector. If non-zero, it is multiplied by B\n to create the control input into the system.\n\n B : numpy.array, optional, default 0.\n Control transition matrix.\n\n Returns\n -------\n\n x : numpy.array\n Prior state estimate vector\n \"\"\"\n\n if np.isscalar(F):\n F = np.array(F)\n x = dot(F, x) + dot(B, u)\n\n return x\n\n\n\ndef batch_filter(x, P, zs, Fs, Qs, Hs, Rs, Bs=None, us=None,\n update_first=False, saver=None):\n \"\"\"\n Batch processes a sequences of measurements.\n\n Parameters\n ----------\n\n zs : list-like\n list of measurements at each time step. Missing measurements must be\n represented by None.\n\n Fs : list-like\n list of values to use for the state transition matrix matrix.\n\n Qs : list-like\n list of values to use for the process error\n covariance.\n\n Hs : list-like\n list of values to use for the measurement matrix.\n\n Rs : list-like\n list of values to use for the measurement error\n covariance.\n\n Bs : list-like, optional\n list of values to use for the control transition matrix;\n a value of None in any position will cause the filter\n to use `self.B` for that time step.\n\n us : list-like, optional\n list of values to use for the control input vector;\n a value of None in any position will cause the filter to use\n 0 for that time step.\n\n update_first : bool, optional\n controls whether the order of operations is update followed by\n predict, or predict followed by update. Default is predict->update.\n\n saver : filterpy.common.Saver, optional\n filterpy.common.Saver object. If provided, saver.save() will be\n called after every epoch\n\n Returns\n -------\n\n means : np.array((n,dim_x,1))\n array of the state for each time step after the update. Each entry\n is an np.array. In other words `means[k,:]` is the state at step\n `k`.\n\n covariance : np.array((n,dim_x,dim_x))\n array of the covariances for each time step after the update.\n In other words `covariance[k,:,:]` is the covariance at step `k`.\n\n means_predictions : np.array((n,dim_x,1))\n array of the state for each time step after the predictions. Each\n entry is an np.array. In other words `means[k,:]` is the state at\n step `k`.\n\n covariance_predictions : np.array((n,dim_x,dim_x))\n array of the covariances for each time step after the prediction.\n In other words `covariance[k,:,:]` is the covariance at step `k`.\n\n Examples\n --------\n\n .. code-block:: Python\n\n zs = [t + random.randn()*4 for t in range (40)]\n Fs = [kf.F for t in range (40)]\n Hs = [kf.H for t in range (40)]\n\n (mu, cov, _, _) = kf.batch_filter(zs, Rs=R_list, Fs=Fs, Hs=Hs, Qs=None,\n Bs=None, us=None, update_first=False)\n (xs, Ps, Ks, Pps) = kf.rts_smoother(mu, cov, Fs=Fs, Qs=None)\n\n \"\"\"\n\n n = np.size(zs, 0)\n dim_x = x.shape[0]\n\n # mean estimates from Kalman Filter\n if x.ndim == 1:\n means = zeros((n, dim_x))\n means_p = zeros((n, dim_x))\n else:\n means = zeros((n, dim_x, 1))\n means_p = zeros((n, dim_x, 1))\n\n # state covariances from Kalman Filter\n covariances = zeros((n, dim_x, dim_x))\n covariances_p = zeros((n, dim_x, dim_x))\n\n if us is None:\n us = [0.] * n\n Bs = [0.] * n\n\n if update_first:\n for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):\n\n x, P = update(x, P, z, R=R, H=H)\n means[i, :] = x\n covariances[i, :, :] = P\n\n x, P = predict(x, P, u=u, B=B, F=F, Q=Q)\n means_p[i, :] = x\n covariances_p[i, :, :] = P\n if saver is not None:\n saver.save()\n else:\n for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):\n\n x, P = predict(x, P, u=u, B=B, F=F, Q=Q)\n means_p[i, :] = x\n covariances_p[i, :, :] = P\n\n x, P = update(x, P, z, R=R, H=H)\n means[i, :] = x\n covariances[i, :, :] = P\n if saver is not None:\n saver.save()\n\n return (means, covariances, means_p, covariances_p)\n\n\n\ndef rts_smoother(Xs, Ps, Fs, Qs):\n \"\"\"\n Runs the Rauch-Tung-Striebel Kalman smoother on a set of\n means and covariances computed by a Kalman filter. The usual input\n would come from the output of `KalmanFilter.batch_filter()`.\n\n Parameters\n ----------\n\n Xs : numpy.array\n array of the means (state variable x) of the output of a Kalman\n filter.\n\n Ps : numpy.array\n array of the covariances of the output of a kalman filter.\n\n Fs : list-like collection of numpy.array\n State transition matrix of the Kalman filter at each time step.\n\n Qs : list-like collection of numpy.array, optional\n Process noise of the Kalman filter at each time step.\n\n Returns\n -------\n\n x : numpy.ndarray\n smoothed means\n\n P : numpy.ndarray\n smoothed state covariances\n\n K : numpy.ndarray\n smoother gain at each step\n\n pP : numpy.ndarray\n predicted state covariances\n\n Examples\n --------\n\n .. code-block:: Python\n\n zs = [t + random.randn()*4 for t in range (40)]\n\n (mu, cov, _, _) = kalman.batch_filter(zs)\n (x, P, K, pP) = rts_smoother(mu, cov, kf.F, kf.Q)\n \"\"\"\n\n if len(Xs) != len(Ps):\n raise ValueError('length of Xs and Ps must be the same')\n\n n = Xs.shape[0]\n dim_x = Xs.shape[1]\n\n # smoother gain\n K = zeros((n, dim_x, dim_x))\n x, P, pP = Xs.copy(), Ps.copy(), Ps.copy()\n\n for k in range(n-2, -1, -1):\n pP[k] = dot(dot(Fs[k], P[k]), Fs[k].T) + Qs[k]\n\n #pylint: disable=bad-whitespace\n K[k] = dot(dot(P[k], Fs[k].T), linalg.inv(pP[k]))\n x[k] += dot(K[k], x[k+1] - dot(Fs[k], x[k]))\n P[k] += dot(dot(K[k], P[k+1] - pP[k]), K[k].T)\n\n return (x, P, K, pP)\n","sub_path":"filterpy/kalman/kalman_filter.py","file_name":"kalman_filter.py","file_ext":"py","file_size_in_byte":58739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"564265082","text":"from django.test import TestCase\nfrom ..models.products import Brand, Product\n\n\nclass ProductTestCase(TestCase):\n\n def setUp(self):\n self.brand = Brand.objects.create(\n name='Apple',\n brand_description='Apple product has the best hardware. The user '\n 'experience and oue designs are world class'\n )\n self.brand.save()\n\n def test_product_can_be_added(self):\n Product.objects.create(\n name='MacBook Air',\n product_description='This is a 14\\' inch machine. 16GB RAM and 256GB HDD',\n warranty=2,\n price=600,\n tagline='laptop, 16GB, backlight keyboard',\n brand=Brand.objects.get(name=self.brand.name)\n )\n created_product = Product.objects.get(name='MacBook Air')\n self.assertEqual(created_product.name, 'MacBook Air')\n self.assertEqual(int(created_product.warranty), 2)\n","sub_path":"core/tests/test_product_model.py","file_name":"test_product_model.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"525160381","text":"from django.core.exceptions import ValidationError\nfrom django.core.mail import EmailMessage\nfrom django.core.validators import validate_email\nfrom django.template import Context, RequestContext\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import get_language, activate\nfrom pynliner import Pynliner\n\n\ndef send_html_mail(subject, template_name, dictionary, from_email=None,\n to=None, cc=None, bcc=None, fail_silently=False,\n request=None, css_files=None, lang=None):\n \"\"\"Custom sned_mail for sending HTML emails rendered from a template\"\"\"\n\n if lang:\n current_lang = get_language()\n activate(lang)\n\n if request:\n context = RequestContext(dictionary)\n else:\n context = Context(dictionary)\n message_body = render_to_string(template_name, context_instance=context)\n message_body = Pynliner().from_string(message_body).run()\n\n message = EmailMessage(subject, message_body, from_email, to, bcc, cc=cc)\n message.content_subtype = 'html'\n result = message.send(fail_silently)\n\n if lang:\n activate(current_lang)\n\n return result\n\n\ndef valid_email_or_empty(email):\n \"\"\"Validates that the email is correct, or return an empty string.\"\"\"\n try:\n validate_email(email)\n return email\n except ValidationError:\n return \"\"\n","sub_path":"sgk/utils_sgk/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"484072471","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 27 16:48:52 2019\n\n@author: PetalSaya\n\"\"\"\nimport stats_word\nimport requests\nfrom pyquery import PyQuery\nfrom wxpy import *\n\nbot = Bot()\n\nmy_friend = bot.friends().search('Daybreak', sex=MALE, city=\"成都\")[0]\nmy_friend.send(\"随便转发一篇文章给我行不?\")\n\n@bot.register(my_friend)\ndef step1 (msg):\n if msg.type == 'Sharing':\n response = requests.get(msg.url)\n document = PyQuery(response.text)\n content = document('#js_content').text()\n result = stats_word.stats_text_cn(content,100)\n return result\n\nembed()","sub_path":"exercises/1901050027/Day 12/Mymodule/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"410419732","text":"#coding=utf8\n\nimport numpy as np\nimport math\n\ndef getHistogram(image,limit):\n \n array = image.flatten()\n hist = np.zeros(limit,dtype='int')\n \n for i in range(0,len(array)):\n hist[array[i]] = hist[array[i]] + 1\n \n return hist\n \n#def getHistogramProb(image, limit):\n# M = float(image, shape[0])\n# N = float(image, shape[0])\n \n# histograma = getHistogram(image,limit)\n \n# hist = np.zeros(limit, dtype='float64')\n","sub_path":"Linear Transformation/lib/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"341579887","text":"from task.bug_task import Bug\nfrom task.feature_task import Feature\nfrom task.story_task import Story\nfrom utilities import SubTrack\nfrom enum import Enum\nfrom datetime import datetime\nfrom sprint import Sprint\nfrom utilities import BugStatus, FeatureStatus, StoryStatus\nclass TaskPlanner:\n \n def __init__(self):\n self.tasks = {} # title to task mapping\n self.sprints = {} # sprint name to list of tasks title mapping\n self.user_tasks = {}\n \n def create_task(self, task_type, assignee, title, creator, due_date,\n feature_summary, severity,\n story_summary, impact):\n\n task = self.tasks.get(title, None)\n if task is not None:\n raise Exception(f\"{title} task already added\")\n\n if task_type == 'bug_task':\n status = BugStatus()\n task = Bug(title, creator, due_date, severity, status,assignee)\n \n if task_type == 'feature_task':\n status = FeatureStatus()\n task = Feature(title, creator, due_date, feature_summary, impact, status, assignee)\n \n if task_type == 'story_task':\n status = StoryStatus()\n task = Story(title, creator, due_date, story_summary, status, assignee)\n\n self.tasks[title] = task\n if assignee is not None:\n tasks = self.user_tasks.get(assignee, [])\n tasks.append(task)\n self.user_tasks[assignee] = tasks\n \n \n def create_sub_task(self, title_subtask, title_task):\n task = self.tasks.get(title_task, None)\n if task is None:\n raise Exception(f\"{title_task} not present\")\n \n if task.task_type != 'story_task':\n raise Exception(f\"{title_task} is not story_task\")\n \n if task.is_completed():\n raise Exception(f\"{title_task} is completed\")\n status = StoryStatus()\n sub_task = SubTrack(title_subtask, status)\n task.add_subtrack(sub_task)\n \n\n \n def change_status_task(self, title_task, new_status):\n task = self.tasks.get(title_task, None)\n if task is None:\n raise Exception(f\"{title_task} not present\")\n\n task.change_status(new_status)\n\n\n \n def change_assignee_task(self, title_task, assignee):\n task = self.tasks.get(title_task, None)\n if task is None:\n raise Exception(f\"{title_task} not present\")\n task.assignee = assignee\n\n \n def display_tasks_user_type(self, assigned_user, task_type):\n user_tasks = self.user_tasks.get(assigned_user, [])\n delayed_tasks = []\n other_tasks = []\n for task in user_tasks:\n if task_type is not None and task.task_type != task_type:\n continue\n\n if task.is_overdue(datetime.now()):\n delayed_tasks.append(task.title)\n else:\n other_tasks.append(task.title)\n print(f\"Tasks for user {assigned_user} for type {task_type}\")\n print(\"on track tasks :\")\n print(\"\\n\".join(other_tasks))\n print(\"Delayed tasks :\")\n print(\"\\n\".join(delayed_tasks))\n\n def display_tasks_user_all(self, assigned_user):\n user_tasks = self.user_tasks.get(assigned_user, [])\n tasks_map = {}\n for task in user_tasks:\n tasks = tasks_map.get(task.task_type, [])\n tasks.append(task)\n tasks_map[task.task_type] = tasks\n\n print(f\"user=>{assigned_user}\")\n for key, value in tasks_map.items():\n print(f\"task_type: {key}\")\n for ts in value:\n print(str(ts))\n\n \n def create_sprint(self, sprint_name):\n if sprint_name in self.sprints:\n raise Exception(f'{sprint_name} already exsists')\n sprint = Sprint(sprint_name)\n self.sprints[sprint_name] = sprint\n\n def delete_sprint(self, sprint_name):\n if sprint_name not in self.sprints:\n raise Exception(f'{sprint_name} does not exsists')\n sprint = self.sprints[sprint_name]\n for task_title in sprint.tasks:\n task = self.tasks.get(task_title)\n task.sprint = None\n\n self.sprints[sprint_name] = None\n\n def add_task_to_sprint(self, title_task, sprint_name):\n task = self.tasks.get(title_task, None)\n if task is None:\n raise Exception(f\"{title_task} not present\")\n if sprint_name not in self.sprints:\n raise Exception(f'{sprint_name} does not exsists')\n \n sprint = self.sprints[sprint_name]\n sprint.add_task(task)\n \n def remove_task_from_sprint(self, title_task, sprint_name):\n task = self.tasks.get(title_task, None)\n if task is None:\n raise Exception(f\"{title_task} not present\")\n if sprint_name not in self.sprints:\n raise Exception(f'{sprint_name} does not exsists')\n task.sprint = None\n sprint = self.sprints[sprint_name]\n sprint.remove_task(task)\n \n \n def display_sprint(self, sprint_name):\n if sprint_name not in self.sprints:\n raise Exception(f'{sprint_name} does not exsists')\n\n sprint = self.sprints[sprint_name]\n if sprint is None:\n raise Exception(f'{sprint_name} does not exsists')\n sprint.display_sprint()\n\n def is_completed_task(self, title_task):\n task = self.tasks.get(title_task, None)\n if task is None:\n raise Exception(f\"{title_task} not present\")\n\n print(task.is_completed())\n\nclass TaskType(Enum):\n BUG_TYPE = \"bug_task\"\n FEATURE_TYPE = \"feature_task\"\n STORY_TYPE = \"story_task\"\n\n\nif __name__ == '__main__':\n\n task_planner = TaskPlanner()\n task_planner.create_task(TaskType.BUG_TYPE.value,\"sandeep\", \"bug_1\", \"rajat\",\n datetime(2019,5,19), \"testing\", \"P0\", \"test\", \"low\")\n\n task_planner.create_task(TaskType.STORY_TYPE.value, \"sandeep\", \"story_1\", \"rajat\",\n datetime(2019, 7, 19), \"testing\", \"P1\", \"test story\", \"high\")\n\n task_planner.create_task(TaskType.STORY_TYPE.value, \"sandeep\", \"story_4\", \"rajat\",\n datetime(2019, 10, 19), \"testing\", \"P0\", \"check module\", \"high\")\n\n task_planner.create_task(TaskType.FEATURE_TYPE.value, \"sandeep\", \"feature_1\", \"rajat\",\n datetime(2019, 11, 19), \"testing\", \"P1\", \"ch\", \"low\")\n\n task_planner.create_task(TaskType.STORY_TYPE.value, \"rahul\", \"story_2\", \"rajat\",\n datetime(2019, 8, 19), \"testing\", \"P1\", \"test\", \"medium\")\n\n task_planner.create_task(TaskType.FEATURE_TYPE.value, \"rahul\", \"feature_2\", \"rajat\",\n datetime(2019, 6, 19), \"testing\", \"P1\", \"test\", \"high\")\n \n task_planner.create_task(TaskType.FEATURE_TYPE.value, \"rahul\", \"feature_3\", \"rajat\",\n datetime(2019, 8, 19), \"testing\", \"P2\", \"check low\", \"\")\n\n task_planner.create_task(TaskType.STORY_TYPE.value, \"kuldeep\", \"story_3\", \"rajat\",\n datetime(2019, 8, 19), \"testing\", \"P2\", \"check\", \"high\")\n\n print(\"\\n###(1)####\\n\")\n task_planner.display_tasks_user_type(\"sandeep\", TaskType.STORY_TYPE.value)\n print(\"\\n####(2)###\\n\")\n task_planner.display_tasks_user_all(\"sandeep\")\n\n try:\n task_planner.create_sprint(\"sprint_1\")\n task_planner.create_sprint(\"sprint_2\")\n task_planner.create_sprint(\"sprint_1\")\n except Exception as e:\n print(\"\\n###(3)####\\n\")\n print(str(e))\n\n task_planner.add_task_to_sprint(\"story_1\", \"sprint_1\")\n task_planner.add_task_to_sprint(\"story_2\", \"sprint_1\")\n task_planner.add_task_to_sprint(\"feature_1\", \"sprint_1\")\n task_planner.add_task_to_sprint(\"feature_2\", \"sprint_1\")\n task_planner.add_task_to_sprint(\"bug_1\", \"sprint_2\")\n\n try:\n task_planner.create_sub_task(\"subtask1\", \"story_1\")\n task_planner.create_sub_task(\"subtask2\", \"story_1\")\n task_planner.create_sub_task(\"subtask3\", \"story_1\")\n task_planner.create_sub_task(\"subtask4\", \"bug_1\")\n except Exception as e:\n print(\"\\n####(4)###\\n\")\n print(str(e))\n pass\n\n print(\"\\n###(5)####\\n\")\n task_planner.display_sprint(\"sprint_1\")\n print(\"\\n###(6)####\\n\")\n task_planner.display_sprint(\"sprint_2\")\n\n print(\"\\n####(7)###\\n\")\n task_planner.display_tasks_user_all(\"sandeep\")\n\n print(\"\\n####(8)###\\n\")\n try:\n task_planner.change_status_task(\"bug_1\", \"open\")\n except Exception as e:\n print(\"\\n####(9)###\\n\")\n print(str(e))\n try:\n task_planner.change_status_task(\"bug_1\", \"progress\")\n task_planner.change_status_task(\"bug_1\", \"fixed\")\n task_planner.change_status_task(\"bug_1\", \"open\")\n except Exception as e:\n print(\"\\n###(10)####\\n\")\n print(str(e))\n\n print(\"\\n####(11)###\\n\")\n task_planner.display_tasks_user_type(\"kuldeep\", TaskType.STORY_TYPE.value)\n\n task_planner.is_completed_task(\"bug_1\")\n task_planner.is_completed_task(\"feature_1\")\n\n\n task_planner.delete_sprint(\"sprint_2\")\n try:\n task_planner.display_sprint(\"sprint_2\")\n except Exception as e:\n print(\"\\n###(12)####\\n\")\n print(str(e))\n\n task_planner.remove_task_from_sprint(\"feature_2\", \"sprint_1\")\n print(\"\\n###(13)####\\n\")\n task_planner.display_sprint(\"sprint_1\")","sub_path":"utilities/task_planner/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"349326018","text":"\"\"\" Demo code for SE360 \"\"\"\n\nfrom setuptools import setup\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent\nlong_description = (this_directory / \"README.md\").read_text()\n\nVERSION = '0.1.2'\nDOCLINES = (__doc__ or '').split(\"\\n\")\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Manufacturing\",\n \"Intended Audience :: End Users/Desktop\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: OS Independent\",\n \"Natural Language :: English\",\n \"License :: Other/Proprietary License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n]\n\ndef run_setup():\n setup(\n name='se360demo',\n version=VERSION,\n description=DOCLINES[0],\n long_description=long_description,\n classifiers=CLASSIFIERS,\n author='Hank Anderson',\n author_email='hank@statease.com',\n license='Other/Proprietary License',\n url='https://github.com/statease/se360-python-demo',\n packages=['se360demo'],\n package_data={'se360demo': [ 'data/*.dxpx', 'data/*.csv', 'examples/*.py' ] },\n install_requires=['statease', 'requests', 'matplotlib', 'numpy', 'sklearn'],\n long_description_content_type='text/markdown',\n )\n\nrun_setup()\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"361931583","text":"txt = input(\"Digite algo: \")\ntxt2 = input(\"Digite novamente: \")\ntxt3 = input(\"Digite para o que vai ser mudado: \")\ndef replace(string,sub,m):\n palavra = \"\"\n cont1 = 0\n ver = True\n for i in range(len(string)):\n if string[i:i+len(sub)] == sub:\n palavra += m\n cont1 = i+len(sub)\n ver = False\n else:\n print(cont1)\n if i >= cont1 or ver: \n palavra += string[i] \n return palavra\n\nprint(replace(txt,txt2,txt3))","sub_path":"Ignorância Zero/050exercício3.py","file_name":"050exercício3.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"466113261","text":"class Node:\n def __init__(self, data):\n self._data = data\n self._nextNode = None\n\n @property\n def data(self):\n return self._data\n\n @property\n def nextNode(self):\n return self._nextNode\n\n @nextNode.setter\n def nextNode(self, value):\n self._nextNode = value\n\n\nclass LinkedList:\n def __init__(self):\n self._rootNode = None\n\n def add(self, data):\n tmp = Node(data)\n if self._rootNode == None:\n self._rootNode = tmp\n return\n v = self._rootNode\n while v.nextNode != None:\n v = v.nextNode\n v.nextNode = tmp\n\n def dump(self):\n if self._rootNode == None:\n return\n v = self._rootNode\n while v != None:\n print(v.data)\n v = v.nextNode\n\n def delete(self, data):\n if self._rootNode == None:\n return\n if self._rootNode.data == data:\n self._rootNode = self._rootNode.nextNode\n return\n v1 = self._rootNode\n v2 = v1.nextNode\n while v2 != None:\n if v2.data == data:\n v1.nextNode = v2.nextNode\n return\n v1 = v2\n v2 = v2.nextNode\n\n def findMid(self):\n if self._rootNode == None:\n print(\"LinkedList boştur.\")\n return\n if self._rootNode.nextNode == None:\n print(\"LinkedList tek elemanlıdır.\")\n return\n if self._rootNode.nextNode != None and self._rootNode.nextNode.nextNode == None:\n print(\"LinkedList 2 elemanlıdır orta elemanı yoktur.\")\n return\n slowptr = self._rootNode\n fastptr = slowptr.nextNode\n while fastptr != None:\n try:\n slowptr = slowptr\n fastptr = fastptr.nextNode\n slowptr = slowptr.nextNode\n fastptr = fastptr.nextNode\n except AttributeError:\n print(\"Linkedlist uzunluğu çift sayıdır. Ortada 2 eleman vardır.\")\n double = self._rootNode\n double2 = double.nextNode\n while double2 != slowptr:\n double = double2\n double2 = double2.nextNode\n print(\"Ortadaki Eleman : \", double.data)\n print(\"Ortadaki Eleman : \", slowptr.data)\n\n\nif __name__ == \"__main__\":\n import random\n\n print(\"--\" * 10)\n a = LinkedList()\n a.add(5)\n a.add(10)\n a.add(15)\n a.add(20)\n a.add(25)\n a.add(30)\n a.add(35)\n a.add(40)\n a.dump()\n print(\"--\" * 10)\n a.findMid()\n print(\"--\" * 10)\n b = LinkedList()\n x = random.randint(1, 100)\n print(\"Oluşturulan random linkedlist\", x, \"Elemanlıdır.\")\n for i in range(x):\n b.add(random.randint(1, 100))\n b.dump()\n print(\"--\" * 10)\n b.findMid()\n print(\"--\" * 10)","sub_path":"Week5/HW1.py","file_name":"HW1.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"607321323","text":"import tensorflow as tf\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# tensor1 = tf.constant(4.0)\n# tensor2 = tf.constant([1, 2, 3, 4])\n# x = tf.constant([[1,1],[1,1],[1,1]])\n# linear_squares = tf.constant([[4], [9], [16], [25]], dtype=tf.int32)\n#\n# print(tensor1.shape)\n# print(tensor2.shape)\n# print(x.shape)\n# print(linear_squares.shape)\n# # 0维:() 1维:(10, ) 2维:(3, 4) 3维:(3, 4, 5)\n# # tf.fill(dims,value,name=None)\n# y=tf.random_normal(shape=(3,4),mean=0,stddev=1.0,dtype=tf.float32,seed=1,name=\"random11\")\n# with tf.Session() as sess:\n#\n# print(sess.run(y))\n# print(y)\n# cast=tf.cast(x,dtype=tf.float32,name=None)\n# print(sess.run(cast))\ncon1 = tf.constant([[1,2],[3,4]])\nplt=tf.placeholder(dtype=tf.float32,shape=[None,4])\nprint(plt)\nplt.set_shape([5,4])\nprint(plt)\nplt_reshape=tf.reshape(plt,[4,5])\nprint(plt_reshape)","sub_path":"张量1.py","file_name":"张量1.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"406161483","text":"import bpy\r\nfrom mathutils import Matrix, Vector\r\n\r\nfrom ... utility import addon, view3d, screen\r\n\r\nLAST_BOOL_HIGHLIGHT = None\r\n\r\ndef update(ot, context, dots, mouse):\r\n obj = context.active_object\r\n\r\n ot.highlight_type = \"none\"\r\n ot.highlight = False\r\n ot.highlight_indices = []\r\n\r\n for index, point in enumerate(dots.points):\r\n location = view3d.location3d_to_location2d(point.location3d)\r\n point.location2d = location if location else (0, 0)\r\n point.highlight = False\r\n\r\n highlight = test_select(ot, point.location2d, mouse)\r\n\r\n if highlight:\r\n ot.highlight = True\r\n ot.highlight_indices.append(index)\r\n ot.highlight_location = obj.matrix_world.inverted() @ Vector((point.location3d[0], point.location3d[1], point.location3d[2]))\r\n\r\n if point.type == 'boolshape':\r\n location = view3d.location2d_intersect3d(ot.mouse.x, ot.mouse.y, point.location3d, Vector((0, 0, -1)))\r\n point_loc = Vector(point.location3d[:])\r\n if isinstance(location, Vector):\r\n distance = (location - point_loc).length\r\n else:\r\n distance = (Vector((0, 0, 0)) - point_loc).length\r\n fade_distance = addon.preference().display.dot_boolshape_fade_distance\r\n inverse = distance / fade_distance\r\n\r\n if inverse > 1:\r\n inverse = 1.0\r\n\r\n point.alpha = 1.0 - inverse\r\n point.display = inverse < 1.0\r\n\r\n if ot.highlight:\r\n closest = min([\r\n (mouse - Vector(dots.points[point].location2d[:]), point)\r\n for point in ot.highlight_indices])\r\n\r\n ot.active_point = dots.points[closest[1]]\r\n point = ot.active_point\r\n point.highlight = True\r\n dots.hit = True\r\n dots.location = point.location3d\r\n ot.highlight_type = point.type\r\n ot.highlight_modname = point.name\r\n\r\n\r\n # Show bool mesh wire on hover\r\n global LAST_BOOL_HIGHLIGHT\r\n if point.type == 'boolshape':\r\n for mod in context.active_object.modifiers:\r\n if mod.type == 'BOOLEAN':\r\n if mod.object:\r\n if mod.object.name == point.name:\r\n if mod.object != None:\r\n if LAST_BOOL_HIGHLIGHT != point.name:\r\n LAST_BOOL_HIGHLIGHT = point.name\r\n bpy.ops.hops.draw_wire_mesh_launcher(object_name=point.name)\r\n break\r\n else:\r\n LAST_BOOL_HIGHLIGHT = \"\"\r\n\r\n\r\n del point\r\n del closest\r\n\r\n else:\r\n dots.hit = False\r\n dots.location = (0.0, 0.0, 0.0)\r\n dots.normal = Vector()\r\n\r\n\r\ndef test_select(ot, location, mouse):\r\n size = addon.preference().display.dot_size * screen.dpi_factor() * addon.preference().display.dot_detect\r\n location = Vector(location[:])\r\n within_x = mouse.x > location[0] - size and mouse.x < location[0] + size\r\n within_y = mouse.y > location[1] - size and mouse.y < location[1] + size\r\n\r\n return within_x and within_y\r\n","sub_path":"addon/operator/tool/dots.py","file_name":"dots.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"596252453","text":"from django.db import models\nfrom wagtail.wagtailcore.models import Page\n\n\nfrom apps.contents.models import ContentSection\nfrom modelcluster.fields import ParentalKey\n\nfrom wagtail.wagtailadmin.edit_handlers import (\n FieldPanel,\n InlinePanel,\n)\n\n\nclass multiContents(ContentSection):\n\n page = ParentalKey(\n 'multiPage',\n related_name='multiContents',\n )\n\n\nclass multiPage(Page):\n\n class Meta:\n verbose_name = 'Multi Content Page'\n\n bodyId = models.CharField(\n max_length=50,\n default='defaultPage',\n blank=True\n )\n\n metaContent = models.TextField(\n default='Description is used by Search Engines to describe your page.',\n blank=True\n )\n\n content_panels = Page.content_panels + [\n FieldPanel('bodyId', classname='full'),\n InlinePanel('multiContents', label='contents'),\n FieldPanel('metaContent', classname='full'),\n ]\n","sub_path":"cms/apps/multipage/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"180861895","text":"# Shreyas Poyrekar\n# 07/1/2020\n# problem: Largest palindrome product (Project Euler)\n# Find the largest palindrome made from the product of two 3-digit numbers.\nimport time\n\n#returns a list of all digits in of a integer\ndef int_list(n):\n ret = []\n while n!=0:\n ret.insert(0,n%10)\n n=int(n/10)\n return ret\n\n#recursive function to check if a number is palindrome or not\ndef is_palindrome(lst,start,end):\n if( start >= end):\n return True\n if(lst[start] == lst[end]):\n return is_palindrome(lst,start+1,end-1)\n else:\n return False\n\n\"\"\"\n i = 1000 - a\n j = 1000 - b\n num = i*j\n\n (a + 1) = 1001 - i\n (b + 1) = 1001 - j\n \n 1000000 - 1000(a+b) + ab\n\n 100000x + 10000y + 1000z + 100z +10y + x = 1000001x + 10010y + 1010z\n assumed ab is a 3 digit number equal to shown below\n 100z + 10y + x = ab\n 1000(100x + 10y + z) = 1000(1000 - (a+b))\n\n 99z - 99x = ab + (a+b) - 1000\n z - x = 1/99(ab + a + b - 10) - 990/99 \n z - x = 1/99(ab + a + b - 10) - 10\n \n 11(9p + 1) = (a+1)(b+1)\n 11(9p + 1) = (1001 - i) * (1001 - j)\n 1001 - 11/(1001 - i) = j\n 1001 - 110/(1001 - i) = j\n and so on\n assumed p goes from 0 to 9\n p = (z-x) + 10\n \n\"\"\"\n\n\ndef main():\n s_flag = False\n for p in range(0,10):\n if (s_flag):\n break\n n = (11*(9*p + 1))\n for i in range(999,n,-1):\n j = 1001 - n*(1001 - i)\n num = i*j\n lst = int_list(num)\n if (is_palindrome(lst,0,len(lst)-1)):\n print(\"The largest palindrome made from the product of two 3-digit numbers is \" + str(num))\n s_flag = True\n break\n\n \nif __name__==\"__main__\":\n start_time = time.time()\n main()\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n","sub_path":"4. Largest Palindrome Product/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"314111733","text":"def isRotation(str1,str2):\n if len(str1) != len(str2):\n return False\n str = str1 + str1\n if str.find(str2) > -1:\n return True\n else:\n return False\n\nstr1 = \"waterbottle\"\nstr2 = \"erbottlewat\"\nif isRotation(str1, str2):\n print (\"str2 is a rotation of str1.\")\nelse:\n print (\"str2 is not a rotation of str1.\")","sub_path":"Algorithms/rotation.py","file_name":"rotation.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"212965272","text":"def start():\n print(\"\"\"这是一个测试游戏。请根据提示输入字母,进行操作。\n ok let's start!\n \"\"\")\n usrnm()\n print(\"\"\"there are two doors,left and right.\n Which one do you want to choice?\n 这有两扇门,左边和右边,你会选择哪一边?\n \"\"\")\n choice = input(\"> \")\n if choice == \"left\":\n Cthulhuroom()\n elif choice ==\"right\":\n snakeroom()\n else:\n print(\"\"\"\n good job the best choice is choose nothing.\n 没错,最好的选择就是什么都不选。\n \"\"\")\ndef usrnm():\n print(\"Hi,I'm N,what's your name?\")\n name = input(\"> \")\n print(f\"Ok,your name is {name}?\")\ndef dead(why):\n print(why,\"sorry you are lose,you can try it again.\")\n exit(0)\ndef snakeroom():\n dead(\"\"\"You enter the snakeroom and bited by snake,finally you dead.\"\"\")\ndef Cthulhuroom():\n dead(\"\"\"Hi,Man you see the Cthulhu,you dead.\"\"\")\n\nstart()\n","sub_path":"ex36.py","file_name":"ex36.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"109184407","text":"import yaml\nfrom pathlib import Path\nimport traceback\nfrom tabulate import tabulate\n\nfiles = {}\n\n\ndef set_data(filename, translation, data):\n if translation not in data:\n raise ValueError(f\"{filename}.{translation}.yml must begin with `{translation}:`\")\n if filename not in files:\n files[filename] = {}\n files[filename][translation] = data[translation]\n\n\ndef get_translation_counts(content):\n if isinstance(content, str):\n return 1\n return sum(get_translation_counts(v) for v in content.values())\n\n\ndef get_stats(file):\n current_file = {}\n for tr, content in file.items():\n if tr not in current_file:\n current_file[tr] = 0\n count = get_translation_counts(content)\n current_file[tr] += count\n\n max_count = max(current_file.values())\n for k, v in current_file.items():\n current_file[k] = f\"{v} ({round(100*v/max_count)}%)\"\n\n return current_file\n\n\ndef main():\n base_file = Path(__file__).parent\n for yml_file in base_file.rglob(\"*.yml\"):\n print(f\"Loading {yml_file}\")\n with open(yml_file, \"r\", encoding=\"utf-8\") as yml:\n filename, translation, _ = \".\".join(yml_file.relative_to(base_file).parts).rsplit(\".\", 2)\n data = yaml.safe_load(yml)\n set_data(filename, translation, data)\n\n langs = set()\n for f in files.values():\n langs.update(f.keys())\n langs = list(sorted(langs))\n stats = []\n for file, data in files.items():\n f_stats = get_stats(data)\n row = [file]\n for header in langs:\n if header in f_stats:\n row.append(f_stats[header])\n else:\n row.append(\"-\")\n stats.append(row)\n print(tabulate(stats, headers=langs))\n\n print(\"All files loaded successfully!\")\n\n\ntry:\n main()\nexcept Exception as e:\n traceback.print_exc()\nfinally:\n input(\"Press Enter to exit\")\n","sub_path":"test_translations.py","file_name":"test_translations.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"534748171","text":"'''\n\tsolution.py\n\tDecided to do this in Python, since it appeals\n\tto perform list/array manipulation in Python \n\trather than Java.\n'''\n\nimport unittest\n\ndef solve(arrs):\n\tcommons = []\n\tfor i in arrs[0]:\n\t\tif i in arrs[1]:\n\t\t\tcommons.append(i)\n\treturn list(set(commons))\n\nclass Test(unittest.TestCase):\n\t\n\tdata = [(([9,8,1,4,0,2],[14,7,6,3,3,2,5,0,10,11,1]), [0,1,2]),\n\t\t\t(([0,0,0,0,0,0],[1,1,1,1,1,1,1,1,1,1]), [])]\n\t\n\t\n\tdef test_solution(self):\n\t\tfor [case, expected] in self.data:\n\t\t\tactual = solve(case)\n\t\t\tself.assertEqual(actual, expected)\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"Self_Challenges/RedditChallenges/CommonElementsOfTwo/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"598023479","text":"from matplotlib.pyplot import *\n\nT = 0.1\nx = 1\ny = [0, 0]\nt = [0, T]\n\nrng = 200\nfor n in range(rng):\n newy = (2 * y[n]) - (1 + T ** 2) * y[n - 1] + (T ** 2) * x\n y.append(newy)\n t.append(t[n] + T)\n\nplot(t, y, \"b-\",)\nshow()\n","sub_path":"homework2/spring-mass-ct-vs-dt/diff-eqn-ct-vs-dt.py","file_name":"diff-eqn-ct-vs-dt.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"372113692","text":"from django.views.generic.edit import UpdateView\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Post, Comment\nfrom .forms import PostForm\nfrom django.shortcuts import get_object_or_404\n\n# Create your views here.\n\ndef post_list(request):\n posts = Post.objects.all().order_by('-date')\n query = request.GET.get(\"q\")\n if query:\n posts = posts.filter(title=query)\n return render(request,'blog/blog.html',{'posts':posts})\n\ndef personal_post_list(request,pk):\n posts = Post.objects.filter(author_id=pk).order_by('-date')\n pk = pk\n if posts:\n return render(request,'blog/personal_page.html',{'posts':posts})\n else:\n return render(request,'blog/personal_page.html',{'pk':pk})\n \n\ndef post_detail(request,pk):\n post = Post.objects.get(pk=pk)\n items = [post]\n items.extend(list(Comment.objects.filter(post=post)))\n return render(request,'blog/post.html',{'items':items})\n \n@login_required(login_url = '/login/')\ndef PostCreate(request):\n if request.method == 'POST':\n form = PostForm(request.POST or None ,request.FILES or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.author = request.user\n instance.save()\n return redirect('blog:list')\n else:\n form = PostForm()\n return render(request,'blog/post_form.html',{'form':form})\n\nclass PostUpdate(UpdateView):\n model = Post\n fields = ['title','image','body']\n\ndef PostDelete(request,pk):\n if pk!=None:\n if request.method == \"POST\":\n pk = request.POST['post_id']\n try:\n post = Post.objects.get(pk=pk)\n post.delete()\n return redirect('blog:list')\n except:\n message = \"Reading Error\"\n return render(request,'blog/blog.html')\n\ndef PostLike(request, pk):\n post = Post.objects.get(pk=pk)\n next = request.GET.get('next', '/')\n if request.user not in post.likes.all():\n post.likes.add(request.user)\n return redirect('http://127.0.0.1:8000/blog/'+str(pk)+'/')\n else:\n post.likes.remove(request.user)\n return redirect('http://127.0.0.1:8000/blog/'+str(pk)+'/')\n \n return render(request,'blog/post.html',{'post':post})\n\ndef commentCreate(request, postId):\n comment = request.POST.get('comment')\n if comment:\n comment = comment.strip()\n if not comment:\n return redirect('http://127.0.0.1:8000/blog/'+str(postId)+'/')\n post = get_object_or_404(Post, id=postId)\n Comment.objects.create(post=post, user=request.user, content=comment)\n return redirect('http://127.0.0.1:8000/blog/'+str(postId)+'/')\n\ndef commentDelete(request,pk):\n if pk!=None:\n if request.method == \"POST\":\n pk = request.POST['post_id']\n try:\n comment = Comment.objects.get(pk=pk)\n postId = comment.post_id\n comment.delete()\n return redirect('http://127.0.0.1:8000/blog/'+str(postId)+'/')\n except:\n message = \"Reading Error\"\n return render(request,'blog/blog.html')","sub_path":"mysite/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"496022597","text":"# Library to provide some simple controller functions for BB-8\n\nimport pygame\nimport time\nimport BB8_driver\n\n# Define arbitrary speeds\nFASTEST = 255\nVERY_FAST = 210\nFAST = 160\nMEDIUM = 100\nSLOW = 50\nVERY_SLOW = 32\nSLOWEST = 8\n\n\nclass BB8Controller(object):\n \"\"\"Simplified controller for BB-8\"\"\"\n\n def __init__(self):\n pygame.mixer.init()\n #pygame.mixer.music.load('sounds/connect.mp3')\n #pygame.mixer.music.play()\n\n self.bb8 = BB8_driver.Sphero()\n self.bb8.connect()\n self.bb8.start()\n time.sleep(2)\n self.flash_green(ntimes=2)\n\n def flash_color(self, color=(255, 255, 255), ntimes=3):\n \"\"\"Flash light with given with 1 second intervals\"\"\"\n\n for _ in xrange(ntimes):\n self.bb8.set_rgb_led(color[0], color[1], color[2], 0, False)\n time.sleep(1)\n self.bb8.join()\n self.bb8.set_rgb_led(0, 0, 0, 0, False)\n time.sleep(1)\n self.bb8.join()\n\n def flash_red(self, ntimes=3):\n \"\"\"Flash red light\"\"\"\n\n self.flash_color(color=(255, 0, 0), ntimes=ntimes)\n\n def flash_green(self, ntimes=3):\n \"\"\"Flash green light\"\"\"\n\n self.flash_color(color=(0, 255, 0), ntimes=ntimes)\n\n def flash_blue(self, ntimes=3):\n \"\"\"Flash blue light\"\"\"\n\n self.flash_color(color=(0, 0, 255), ntimes=ntimes)\n\n def turn_right(self):\n \"\"\"Turn sphero right\"\"\"\n\n pygame.mixer.music.load('sounds/turn.mp3')\n pygame.mixer.music.play()\n time.sleep(1)\n \n for _ in xrange(5):\n self.bb8.roll(150, 90, 1, True)\n self.bb8.set_heading(90, True)\n time.sleep(.1)\n\n def turn_left(self):\n \"\"\"Turn sphero left\"\"\"\n\n pygame.mixer.music.load('sounds/turn.mp3')\n pygame.mixer.music.play()\n time.sleep(1)\n\n for _ in xrange(5):\n self.bb8.roll(150, 270, 1, True)\n self.bb8.set_heading(270, True)\n time.sleep(.1)\n\n def displace(self, speed, direction, duration):\n \"\"\"Move and stop\"\"\"\n\n pygame.mixer.music.load('sounds/roll.mp3')\n pygame.mixer.music.play()\n\n self.bb8.roll(speed, direction, 1, True)\n time.sleep(duration)\n self.bb8.join()\n self.stop()\n\n def stop(self):\n \"\"\"Stop moving\"\"\"\n\n self.bb8.roll(0, 0, 0, True)\n time.sleep(.5)\n self.bb8.join()\n\n def go_forward(self, speed=SLOW, duration=1.5):\n \"\"\"Move forward\"\"\"\n\n self.displace(speed, 0, duration)\n\n def go_back(self, speed=SLOW, duration=1.5):\n \"\"\"Move back\"\"\"\n\n self.displace(speed, 180, duration)\n\n def go_right(self, speed=SLOW, duration=1.5):\n \"\"\"Move right\"\"\"\n\n self.displace(speed, 90, duration)\n\n def go_left(self, speed=SLOW, duration=1.5):\n \"\"\"Move left\"\"\"\n self.displace(speed, 270, duration)\n\n def disconnect(self):\n \"\"\"Disconnect from BB-8\"\"\"\n pygame.mixer.music.load('sounds/disconnect.mp3')\n pygame.mixer.music.play()\n\n self.bb8.disconnect()\n time.sleep(2)\n self.bb8.join()\n\n print(\"Disconnected from BB-8\")\n","sub_path":"BB8_simple.py","file_name":"BB8_simple.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"308333690","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# @Time : 2021/9/22 7:41 下午\n# @Author : Hanley\n# @File : dbClient.py\n# @Desc :\n\nimport copy\nimport time\nimport traceback\nfrom typing import Union\nfrom urllib.parse import quote_plus\n\nimport aioredis\nimport pymongo\nimport redis\nfrom aioredis import Redis\nfrom motor.motor_asyncio import AsyncIOMotorClient\nfrom peewee import PeeweeException, DoesNotExist, RawQuery\nfrom peewee_async import PooledMySQLDatabase, Manager\nfrom pymysql.err import Error\n\nfrom libs.log import logging\n\n\nclass AsyncMongodb:\n \"\"\"\n motor多连接\n peer_conn = host + port + user\n \"\"\"\n __slots__ = (\n \"config\",\n \"peer_conn\",\n \"client\"\n )\n __conn = {}\n\n def __init__(self, config: dict):\n self.config = dict(config)\n self.init_db()\n\n def init_db(self):\n config_client = {}\n self.peer_conn = \"_\".join([\n self.config[\"HOST\"], str(self.config[\"PORT\"])])\n if self.config[\"USER\"]:\n self.peer_conn = \"_\".join([self.peer_conn, self.config[\"USER\"]])\n if not self.__conn.get(self.peer_conn):\n url = self._connect_url()\n self.client = AsyncIOMotorClient(\n url, maxPoolSize=100, maxIdleTimeMS=300000,\n waitQueueMultiple=10, serverSelectionTimeoutMS=5000)\n config_client.setdefault(\"config\", self.config)\n config_client.setdefault(\"client\", self.client)\n self.__conn.setdefault(self.peer_conn, config_client)\n logging.debug(f\"connect mongodb {self.peer_conn} successful\")\n else:\n self.client = self.__conn[self.peer_conn][\"client\"]\n self.config = self.__conn[self.peer_conn][\"config\"]\n\n def _connect_url(self):\n url = \"mongodb://\"\n domain = \"{host}:{port}/\".format(\n host=self.config[\"HOST\"], port=self.config[\"PORT\"]\n )\n\n if self.config[\"USER\"] and self.config[\"PASSWORD\"] and self.config[\"AUTH_DB\"]:\n authentication = \"{username}:{password}@\".format(\n username=quote_plus(self.config[\"USER\"]),\n password=quote_plus(self.config[\"PASSWORD\"])\n )\n domain = \"{host}:{port}/\".format(\n host=self.config[\"HOST\"],\n port=self.config[\"PORT\"]\n )\n param = \"?authSource={auth_db}\".format(\n auth_db=self.config[\"AUTH_DB\"]\n )\n url = \"\".join([url, authentication, domain, param])\n else:\n url = \"\".join([url, domain])\n return url\n\n\nclass AsyncPeewee(PooledMySQLDatabase):\n \"\"\"\n 异步MySQL连接\n peer_conn: host + port + database\n \"\"\"\n __conn = {}\n\n @staticmethod\n def init_db(config: dict) -> PooledMySQLDatabase:\n _config = dict(config)\n peer_conn = \"_\".join([\n _config[\"HOST\"], str(_config[\"PORT\"]), _config[\"DATABASE\"]])\n if not AsyncPeewee.__conn.get(peer_conn):\n _database = AsyncPeewee(\n database=_config[\"DATABASE\"],\n max_connections=_config['MAX_CONNECTIONS'],\n host=_config['HOST'],\n user=_config['USER'],\n password=_config[\"PASSWORD\"],\n port=_config['PORT']\n )\n AsyncPeewee.__conn[peer_conn] = _database\n logging.debug(f\"connect mysql {peer_conn} successful\")\n return AsyncPeewee.__conn[peer_conn]\n\n def execute_sql(self, sql, params=None, commit=True):\n try:\n return super(AsyncPeewee, self).execute_sql(sql, params, commit)\n except Exception as exc:\n if not isinstance(exc, (PeeweeException, Error)):\n raise exc\n logging.warning(\"will retry connect mysql\")\n if not self.is_closed():\n self.close()\n self.connect()\n\n return super(AsyncPeewee, self).execute_sql(sql, params, commit)\n\n\nclass AsyncPeeweeManager(Manager):\n \"\"\"\n peewee_async高级API\n \"\"\"\n\n async def get_or_none(self, source_, *args, **kwargs):\n try:\n return await self.get(source_, *args, **kwargs)\n except DoesNotExist:\n return\n\n async def execute_sql(self, sql: str, params: Union[None, tuple, list] = None):\n \"\"\"\n 使用:\n sql = 'select * from user where name = %s and gender = %s'\n params = ['好帅的人', 'm']\n\n res = await AsyncManager().execute_sql(sql, params)\n for r in res:\n print(r['id'])\n :param sql:\n :param params:\n :return:\n \"\"\"\n query = RawQuery(sql, params, _database=self.database)\n\n # for r in await self.execute(query):\n # yield RawQueryResult(r)\n return await self.execute(query)\n\n\nclass AsyncRedis:\n \"\"\"\n 异步Redis连接\n peer_conn: address + db\n \"\"\"\n __slots__ = (\n \"config\",\n \"client\",\n )\n __conn = {}\n\n def __init__(self, config: dict):\n self.config = dict(config)\n\n async def init_db(self) -> Redis:\n peer_conn = \"_\".join([\n self.config[\"ADDRESS\"], str(self.config[\"DB\"])])\n if self.__conn.get(peer_conn):\n self.client = self.__conn[peer_conn]\n else:\n default_config = dict(address='', db=None, password=None, ssl=None,\n encoding=None, commands_factory=Redis,\n minsize=1, maxsize=10, parser=None,\n timeout=None, pool_cls=None,\n connection_cls=None, loop=None)\n connect_config = copy.copy(default_config)\n for key in connect_config:\n if key.upper() in self.config:\n connect_config[key] = self.config.pop(key.upper())\n self.client = await aioredis.create_redis_pool(**connect_config)\n self.__conn[peer_conn] = self.client\n logging.debug(f\"connect redis {peer_conn} successful\")\n return self.__conn[peer_conn]\n\n\nclass SyncMongodb:\n __slots__ = (\n \"config\",\n \"client\",\n )\n __conn = {}\n\n def __init__(self, config: dict):\n self.config = dict(config)\n self.init_db()\n\n def init_db(self):\n host = self.config[\"HOST\"]\n port = self.config[\"PORT\"]\n user = self.config[\"USER\"]\n password = self.config[\"PASSWORD\"]\n auth_db = self.config[\"AUTH_DB\"]\n\n peer_conn = \"_\".join([host, str(port)])\n if user:\n peer_conn += \"_\" + user\n if self.__conn.get(peer_conn):\n self.client = self.__conn[peer_conn]\n return self.client\n\n url = \"mongodb://\"\n domain = \"{host}:{port}/\".format(\n host=host, port=port\n )\n if user and password and auth_db:\n authentication = \"{username}:{password}@\".format(\n username=quote_plus(user),\n password=quote_plus(password)\n )\n domain = \"{host}:{port}/\".format(\n host=host,\n port=port\n )\n param = \"?authSource={auth_db}\".format(\n auth_db=auth_db\n )\n url = \"\".join([url, authentication, domain, param])\n else:\n url = \"\".join([url, domain])\n\n self.client = pymongo.MongoClient(url, serverSelectionTimeoutMS=5000)\n logging.debug(f\"mongodb connect successful\")\n\n\nclass SyncPeewee(PooledMySQLDatabase):\n __conn = {}\n\n @staticmethod\n def init_db(config: dict) -> PooledMySQLDatabase:\n peer_db = \"_\".join([\n config[\"HOST\"], str(config[\"PORT\"]), config[\"DATABASE\"]])\n if not SyncPeewee.__conn.get(peer_db):\n SyncPeewee.__conn[peer_db] = SyncPeewee(\n database=config[\"DATABASE\"],\n max_connections=config['MAX_CONNECTIONS'],\n stale_timeout=config['TIMEOUT'],\n timeout=config['TIMEOUT'],\n host=config['HOST'],\n user=config['USER'],\n password=config[\"PASSWORD\"],\n port=config['PORT']\n )\n return SyncPeewee.__conn[peer_db]\n\n def execute_sql(self, sql, params=None, commit=True):\n try:\n return super(SyncPeewee, self).execute_sql(sql, params, commit)\n except Exception as exc:\n if not isinstance(exc, (PeeweeException, Error)):\n raise exc\n logging.warning(\"will retry connect mysql\")\n if not self.is_closed():\n self.close()\n self.connect()\n\n return super(SyncPeewee, self).execute_sql(sql, params, commit)\n\n\nclass SyncRedis:\n __slots__ = (\n \"config\",\n \"client\"\n )\n __conn = {}\n\n def __init__(self, config: dict):\n self.config = dict(config)\n self.init_db()\n\n def init_db(self):\n peer_db = \"_\".join([\n self.config[\"HOST\"], str(self.config[\"PORT\"]), str(self.config[\"DB\"])])\n if self.__conn.get(peer_db):\n self.client = self.__conn[peer_db]\n return self.client\n retry = 3\n self.client, i = None, 0\n default_config = dict(\n host='',\n port='',\n db='',\n decode_responses=True)\n connect_config = copy.copy(default_config)\n for key in connect_config:\n if key in self.config:\n connect_config[key] = self.config.pop(key)\n while i < retry:\n try:\n pool = redis.ConnectionPool(**connect_config)\n self.client = redis.Redis(connection_pool=pool)\n if self.client:\n logging.debug(f\"redis connect successful\")\n break\n else:\n logging.warning(\"第[%d]连接失败,继续\" % i)\n except BaseException:\n logging.error(traceback.format_exc())\n time.sleep(1)\n i += 1\n self.__conn[peer_db] = self.client\n return self.client\n","sub_path":"utils/dbClient.py","file_name":"dbClient.py","file_ext":"py","file_size_in_byte":10138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"465342602","text":"#!/usr/bin/env python\n# -*- coding:UTF_8 -*-\nimport subprocess,os,time,datetime\nfrom module.Constant import dt_str\nfrom module.interface.ReqInterface import requestInterface\nfrom utils.Utils import getFileNameInDir\n\nclass hbaseReq(requestInterface):\n ''' describe mysql request object'''\n def __init__(self, content):\n requestInterface.__init__(self, content)\n self.file_tag = content['db']\n self.src_file_dir = content['srcfiledir']\n self.caculate_dir = content['hdfs_caculate_dir']\n self.business_type = content['business_type']\n self.dt_tag = self.initDtTag() \n self.is_del = True if self.delStep > 0 else False\n self.src_file_table = self.initSrcFile()\n\n def getDelStep(self):\n return self.delStep\n\n def getDB(self):\n return self.file_tag\n\n def getDtTag(self):\n return self.dt_tag\n\n def getSrcFileTable(self):\n return self.src_file_table\n\n def getCaculateDir(self):\n return self.caculate_dir\n\n def initDtTag(self):\n if self.condition != '' or self.condition:\n file_dt_tag = self.condition\n else:\n file_dt_tag = dt_str\n return file_dt_tag\n \n def initSrcFile(self):\n _src_file_table = {}\n hbase_file_tar = os.path.join(self.src_file_dir,\".\".join([self.dt_tag,'tar']));\n hbase_file_tar_gz = os.path.join(self.src_file_dir,\".\".join([self.dt_tag,'tar.gz']));\n delFile = 'unknown'\n unZipFileDir = os.path.join(self.local_dir,self.dt_tag)\n if os.path.exists(hbase_file_tar_gz):\n cmd = 'zxvf'\n zip = hbase_file_tar_gz\n elif os.path.exists(hbase_file_tar):\n cmd = 'xvf'\n zip = hbase_file_tar\n if self.is_del:\n _del_date = (datetime.datetime(int(self.dt_tag[0:4]),int(self.dt_tag[4:6]),int(self.dt_tag[6:8]),0,0) - datetime.timedelta(days=self.delStep)).strftime('%Y%m%d')\n hbase_del_tar = os.path.join(self.src_file_dir,\".\".join([_del_date,'tar']));\n hbase_del_tar_gz = os.path.join(self.src_file_dir,\".\".join([_del_date,'tar.gz']));\n if os.path.exists(hbase_del_tar_gz):\n cmd = 'zxvf'\n delFile = hbase_del_tar_gz\n elif os.path.exists(hbase_del_tar):\n cmd = 'xvf'\n delFile = hbase_del_tar\n for server in self.ser_list:\n host_key = '_'.join([server[0], str(server[1])])\n _src_file_table[host_key] = {}\n _src_file_table[host_key]['zip'] = zip\n _src_file_table[host_key]['unzip'] = unZipFileDir\n _src_file_table[host_key]['cmd'] = cmd\n _src_file_table[host_key]['del'] = delFile \n return _src_file_table\n\n def getDelFileName(self, key):\n if key in self.src_file_table:\n return self.src_file_table[key]['del']\n\n def getCmd(self, key):\n if key in self.src_file_table:\n return self.src_file_table[key]['cmd']\n\n def getZipFileName(self, key):\n if key in self.src_file_table:\n return self.src_file_table[key]['zip']\n\n def getUnZipFileName(self, key):\n if key in self.src_file_table:\n return self.src_file_table[key]['unzip']\n\n def getBusinessType(self):\n return self.business_type\n\n def iterSerList(self):\n for server in self.ser_list:\n yield server\n\n def getSrcFileDir(self):\n return self.src_file_dir\n\n def __str__(self):\n string = 'ser_type: %s, fields: %s, business_type: %s, condition: %s, local_dir: %s, hdfs_dir: %s, file_num: %s, src_file_dir: %s, src_file_table: %s, caculate_dir: %s, db: %s' % \\\n (self.ser_type, self.fields, self.business_type, self.condition, self.local_dir, self.hdfs_dir, self.file_num, self.src_file_dir, self.src_file_table, self.caculate_dir, self.file_tag)\n return string\n","sub_path":"utils_updates/data_extractor_new/module/hbase/HbaseReqImpl.py","file_name":"HbaseReqImpl.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"77668156","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nN = 5\nAs = (20, 35, 30, 35, 27)\nBs = (25, 32, 34, 20, 25)\nerrorBarsA = (1, 6, 1, 6, 1)\nerrorBarsB = (6, 1, 6, 1, 6)\nX = [0, 1, 2, 3, 4]\nbarWidth = 0.75\n\n# matplotlib.pyplot.bar(x, height, width=0.8, bottom=None, *, align='center', data=None, **kwargs)[source]\n\np1 = plt.bar(X, As, barWidth, yerr=errorBarsA) # returns all the artists\np2 = plt.bar(X, Bs, barWidth, bottom=As, yerr=errorBarsB)\n\nplt.xlabel('X Axis')\nplt.ylabel('Y Axis')\nplt.title('Bar Chart')\nplt.xticks(X, ('G1', 'G2', 'G3', 'G4', 'G5'))\nplt.yticks([0,10,20,30,40,50,60,70,80])\nplt.legend((p1[0], p2[0]), ('A', 'B'))\nplt.show()","sub_path":"Python3/src/13 Scientific Python/Matplotlib/basic/08_BarPlots.py","file_name":"08_BarPlots.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"399907427","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 1 02:24:36 2015\n\n@author: mjm\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nN = 11\n\nmeans = (26.47, 34.69, 4.22, 33.59, 16.64, 44.19, 13.50, 3.52, 20.30, 26.46, 24.81)\n\nspot = np.arange(N)\nwidth = 0.3\n\nfig, ax = plt.subplots()\ncolumns = ax.bar(spot, means, width)\n\nax.set_ylabel('% Lexical Density')\nax.set_title('Lexical Density by Category (Revised)')\nax.set_xticks(spot + width)\nax.set_xticklabels(('acad', 'anime', 'beer', 'bitcn', 'budhsm', 'chem', 'chess', 'coffee', 'fit', 'phil', 'puzz'))\n\ndef autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,\n '%d' % int(height),\n ha='center', va='bottom')\n\nautolabel(columns)\n\nplt.show()\n","sub_path":"lexvar_plot.py","file_name":"lexvar_plot.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"18698292","text":"import numpy as np\n\ndef measure(state):\n \"\"\"Measures the qubit state in the z-basis. Returns the measurement outcome (1 or -1)\"\"\"\n prob_1 = abs(state[0])**2 #calculates probability of measurement outcome of one\n comparison_for_prob=np.random.rand()\n #compares a random number to the probability of outcome of one. Iff number is smaller, return one.\n if prob_1>comparison_for_prob: \n outcome=1\n else:\n outcome=-1\n return(outcome)","sub_path":"measurement.py","file_name":"measurement.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"540695296","text":"import wx\n \ndef onButton(event):\n print(\"Button pressed.\")\n \ndef openfile(string):\n\tapp = wx.App()\n \n\tframe = wx.Frame(None, -1, 'win.py')\n\tframe.SetDimensions(0,0,200,50)\n \n\t# Create open file dialog\n\topenFileDialog = wx.FileDialog(frame, string, \"\", \"\", \n \t \"CSV Files (*.csv)|*.csv\", \n \t wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)\n \n\topenFileDialog.ShowModal()\n\tprint(\"\")\n\tprint(\"\")\n\tprint(\"Chosen\", string, \"location :\", openFileDialog.GetPath())\n\tprint(\"\")\n\tprint(\"\")\n\ts = openFileDialog.GetPath()\n\topenFileDialog.Destroy()\n\treturn s","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"518424936","text":"#!/usr/bin/env python\n\n# Copyright 2016 Nitor Creations Oy\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\nimport sys\nimport aws_infra_util\nimport os\nimport tempfile\nimport collections\nimport time\nimport datetime\nimport json\n\ndef deploy(stack_name, yaml_template, region):\n\n # Disable buffering, from http://stackoverflow.com/questions/107705/disable-output-buffering\n sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)\n\n # Get AMI metadata\n ami_id = os.getenv('paramAmi', '')\n if ami_id != \"\":\n describe_ami_command = [ \"aws\", \"ec2\", \"describe-images\", \"--region\", region, \"--image-ids\", ami_id ]\n print(\"Checking AMI \" + ami_id + \" metadata: \" + str(describe_ami_command))\n p = subprocess.Popen(describe_ami_command,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n output = p.communicate()\n if p.returncode:\n sys.exit(\"Failed to retrieve ami metadata for \" + ami_id)\n\n ami_meta = aws_infra_util.json_load(output[0])\n print(\"Result: \" + aws_infra_util.json_save(ami_meta))\n os.environ[\"paramAmiName\"] = ami_meta['Images'][0]['Name']\n os.environ[\"paramAmiCreated\"] = ami_meta['Images'][0]['CreationDate']\n\n print(\"\\n\\n**** Deploying stack '\" + stack_name + \"' with template '\" + yaml_template + \"' and ami_id '\" + ami_id + \"'\")\n\n # Load yaml template and import scripts and patch userdata with metadata hash & params\n\n template_doc = aws_infra_util.yaml_load(open(yaml_template))\n template_doc = aws_infra_util.import_scripts(template_doc, yaml_template)\n aws_infra_util.patch_launchconf_userdata_with_metadata_hash_and_params(template_doc)\n\n if \"Parameters\" not in template_doc:\n template_doc['Parameters'] = []\n template_parameters = template_doc['Parameters']\n if (not \"paramAmiName\" in template_parameters):\n template_parameters['paramAmiName'] = collections.OrderedDict([(\"Description\", \"AMI Name\"), (\"Type\", \"String\"), (\"Default\", \"\")])\n if (not \"paramAmiCreated\" in template_parameters):\n template_parameters['paramAmiCreated'] = collections.OrderedDict([(\"Description\", \"AMI Creation Date\"), (\"Type\", \"String\"), (\"Default\", \"\")])\n\n json_template = aws_infra_util.json_save(template_doc)\n json_small = aws_infra_util.json_save_small(template_doc)\n\n # save result\n\n print(\"** Final template:\")\n print(json_template)\n print(\"\")\n\n tmp = tempfile.NamedTemporaryFile(delete=False)\n tmp.write(json_small)\n tmp.close()\n\n # Load previous stack information to see if it has been deployed before\n\n describe_stack_command = [ 'aws', 'cloudformation', 'describe-stacks', \"--region\", region, '--stack-name', stack_name ]\n print(\"Checking for previous stack info: \" + str(describe_stack_command))\n p = subprocess.Popen(describe_stack_command,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n output = p.communicate()\n if p.returncode:\n if (not output[1].endswith(\"does not exist\\n\")):\n sys.exit(\"Failed to retrieve old stack for \" + stack_name + \": \" + output[1])\n stack_oper = 'create-stack'\n else:\n stack_oper = 'update-stack'\n\n # Dump original status, for the record\n\n stack_info = aws_infra_util.json_load(output[0])\n status = stack_info['Stacks'][0]['StackStatus']\n print(\"Status: \" + status)\n\n # Create/update stack\n\n params_doc = []\n for key in template_parameters.keys():\n if (key in os.environ):\n val = os.environ[key]\n print(\"Parameter \" + key + \": using \\033[32;1mCUSTOM value \" + val + \"\\033[m\")\n params_doc.append({ 'ParameterKey': key, 'ParameterValue': val })\n else:\n val = template_parameters[key]['Default']\n print(\"Parameter \" + key + \": using default value \" + str(val))\n\n stack_command = \\\n ['aws', 'cloudformation', stack_oper, \"--region\", region, '--stack-name',\n stack_name,\n '--template-body',\n 'file://' + tmp.name,\n '--capabilities',\n 'CAPABILITY_IAM',\n '--parameters',\n aws_infra_util.json_save(params_doc)\n ]\n\n currentTimeInCloudWatchFormat = datetime.datetime.utcnow().strftime(\"%FT%H%%253A%M%%253A%SZ\")\n\n print(stack_oper + \": \" + str(stack_command))\n p = subprocess.Popen(stack_command,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n output = p.communicate()\n os.remove(tmp.name)\n if p.returncode:\n sys.exit(stack_oper + \" failed: \" + output[1])\n\n print(output[0])\n\n # Wait for create/update to complete\n\n cloudWatchNotice = \"\\nCloudWatch url: https://console.aws.amazon.com/cloudwatch/home#logEvent:group=instanceDeployment;stream=\" + stack_name + \";start=\" + currentTimeInCloudWatchFormat + \"\\n\"\n print(cloudWatchNotice)\n\n print(\"Waiting for \" + stack_oper + \" to complete:\")\n while (True):\n p = subprocess.Popen(describe_stack_command,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n output = p.communicate()\n if p.returncode:\n sys.exit(\"Describe stack failed: \" + output[1])\n\n stack_info = aws_infra_util.json_load(output[0])\n status = stack_info['Stacks'][0]['StackStatus']\n print(\"Status: \" + status)\n if (not status.endswith(\"_IN_PROGRESS\")):\n break\n\n time.sleep(5)\n\n print(cloudWatchNotice)\n\n if ((stack_oper == \"create-stack\" and status != \"CREATE_COMPLETE\") or (stack_oper == \"update-stack\" and status != \"UPDATE_COMPLETE\")):\n sys.exit(stack_oper + \" failed: end state \" + status)\n\n print(\"Done!\")\n\nif __name__ == '__main__':\n if len(sys.argv) != 4:\n sys.exit(\"Usage: deploy.py stack_name yaml_template region\\nParameters taken from environment as-is, missing parameters use defaults from template\")\n deploy(sys.argv[1], sys.argv[2], sys.argv[3])\n","sub_path":"opt/nitor/cloudformation-update-stack.py","file_name":"cloudformation-update-stack.py","file_ext":"py","file_size_in_byte":6562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"171810769","text":"# shapes.py\n#\n# MQTT message format: x,y,z,rotX,rotY,rotZ,rotW,scaleX,scaleY,scaleZ,#colorhex,on/off\n\nimport socket,threading,SocketServer,time,random,os,sys\nimport paho.mqtt.publish as publish\n\nHOST=\"oz.andrew.cmu.edu\"\nTOPIC=\"/topic/transCubes\"\n\ndef randmove():\n rando=random.random() * 10 - 5\n return rando\n\ndef rando(val):\n rando=random.random() * val\n return str(\"{0:0.3f}\".format(rando))\n\ndef randrot():\n return str(\"{0:0.3f}\".format(random.random() * 2 - 1))\n\ndef unhex(a):\n return int(a, 16)\n\ndef randgold():\n return \"FF\"+ format(random.randint(128, 208), 'x')+ \"00\"\n\ndef randblue():\n return \"0000\" + format(random.randint(128, 255), 'x')\n\ndef randred():\n return format(random.randint(128, 255), 'x') + \"0000\"\n\ndef randcolor():\n rando=random.random()\n if rando < 0.2:\n return randgold()\n if rando < 0.4:\n return randblue()\n if rando < 0.6:\n return randgold()\n if rando < 0.8:\n return randblue()\n return randred()\n\ndef randobj():\n rando=random.random()\n if rando < 0.2:\n return \"cylinder\"\n if rando < 0.4:\n return \"sphere\"\n if rando < 0.6:\n return \"cube\"\n if rando < 0.8:\n return \"quad\"\n return \"cube\"\n\ndef do(name, randx, randy, randz, scalex, scaley, scalez, color):\n MESSAGE= name+\",\"+\"{0:0.3f}\".format(randx)+','+\"{0:0.3f}\".format(randy)+','+\"{0:0.3f}\".format(randz)+\",0,0,0,0,\"+scalex+\",\"+scaley+\",\"+scalez+\",#\"+color\n messages.append(MESSAGE)\n print(MESSAGE)\n publish.single(TOPIC+'/'+name, MESSAGE+\",on\", hostname=HOST, retain=False)\n publish.single(TOPIC+'/'+name+\"/material\", \"transparent: true; opacity: 0.5\", hostname=HOST, retain=False)\n\nmessages = []\ncounter=0\nwhile (True):\n name = \"cube_\"+str(counter)\n counter+=1\n randx = randmove()\n randy = randmove()\n randz = randmove()\n scalex = rando(8)\n scaley = rando(1)\n scalez = rando(4)\n color = randcolor()\n do(name, randx, randy, randz, scalex, scaley, scalez, color)\n do(name+'a', -randx, randy, randz, scalex, scaley, scalez, color)\n\n randx = randmove()\n randy = randmove()\n randz = randmove()\n scalex = rando(1)\n scaley = rando(8)\n scalez = rando(4)\n color = randcolor()\n do(name+'b', randx, -randy, randz, scalex, scaley, scalez, color)\n do(name+'c', -randx, -randy, randz, scalex, scaley, scalez, color)\n\n #os.system(\"mosquitto_pub -h \" + HOST + \" -t \" + TOPIC + \"/\" + name + \" -m \" + MESSAGE + \" -r\");\n if (len(messages) >= 100):\n pop=messages.pop(0)\n splits=pop.split(',')\n name=splits[0]\n publish.single(TOPIC+'/'+name, \"\",hostname=HOST, retain=False)\n pop=messages.pop(0)\n splits=pop.split(',')\n name=splits[0]\n publish.single(TOPIC+'/'+name, \"\",hostname=HOST, retain=False)\n pop=messages.pop(0)\n splits=pop.split(',')\n name=splits[0]\n publish.single(TOPIC+'/'+name, \"\",hostname=HOST, retain=False)\n pop=messages.pop(0)\n splits=pop.split(',')\n name=splits[0]\n publish.single(TOPIC+'/'+name, \"\",hostname=HOST, retain=False)\n time.sleep(0.1)\n","sub_path":"transCubes.py","file_name":"transCubes.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"385560015","text":"import numpy as np\r\ndef plot_training(training_losses,\r\n validation_losses,\r\n learning_rate,\r\n gaussian=True,\r\n sigma=2,\r\n figsize=(8, 6)\r\n ):\r\n \"\"\"\r\n Returns a loss plot with training loss, validation loss and learning rate.\r\n \"\"\"\r\n\r\n import matplotlib.pyplot as plt\r\n from matplotlib import gridspec\r\n from scipy.ndimage import gaussian_filter\r\n\r\n list_len = len(training_losses)\r\n x_range = list(range(1, list_len + 1)) # number of x values\r\n\r\n fig = plt.figure(figsize=figsize)\r\n grid = gridspec.GridSpec(ncols=2, nrows=1, figure=fig)\r\n\r\n subfig1 = fig.add_subplot(grid[0, 0])\r\n subfig2 = fig.add_subplot(grid[0, 1])\r\n\r\n subfigures = fig.get_axes()\r\n\r\n for i, subfig in enumerate(subfigures, start=1):\r\n subfig.spines['top'].set_visible(False)\r\n subfig.spines['right'].set_visible(False)\r\n\r\n if gaussian:\r\n training_losses_gauss = gaussian_filter(training_losses, sigma=sigma)\r\n validation_losses_gauss = gaussian_filter(validation_losses, sigma=sigma)\r\n\r\n linestyle_original = '.'\r\n color_original_train = 'lightcoral'\r\n color_original_valid = 'lightgreen'\r\n color_smooth_train = 'red'\r\n color_smooth_valid = 'green'\r\n alpha = 0.25\r\n else:\r\n linestyle_original = '-'\r\n color_original_train = 'red'\r\n color_original_valid = 'green'\r\n alpha = 1.0\r\n\r\n # Subfig 1\r\n subfig1.plot(x_range, training_losses, linestyle_original, color=color_original_train, label='Training',\r\n alpha=alpha)\r\n subfig1.plot(x_range, validation_losses, linestyle_original, color=color_original_valid, label='Validation',\r\n alpha=alpha)\r\n if gaussian:\r\n subfig1.plot(x_range, training_losses_gauss, '-', color=color_smooth_train, label='Training', alpha=0.75)\r\n subfig1.plot(x_range, validation_losses_gauss, '-', color=color_smooth_valid, label='Validation', alpha=0.75)\r\n subfig1.title.set_text('Training & validation loss')\r\n subfig1.set_xlabel('Epoch')\r\n subfig1.set_ylabel('Loss')\r\n\r\n subfig1.legend(loc='upper right')\r\n\r\n # Subfig 2\r\n subfig2.plot(x_range, learning_rate, color='black')\r\n subfig2.title.set_text('Learning rate')\r\n subfig2.set_xlabel('Epoch')\r\n subfig2.set_ylabel('LR')\r\n\r\n return fig\r\n\r\ndef show_input_target_pair_napari(gen_training, gen_validation=None):\r\n \"\"\"\r\n Press 't' to get a random sample of the next training batch.\r\n Press 'v' to get a random sample of the next validation batch.\r\n \"\"\"\r\n # Batch\r\n x, y = next(iter(gen_training))\r\n\r\n # Napari\r\n import napari\r\n with napari.gui_qt():\r\n viewer = napari.Viewer()\r\n img = viewer.add_image(x, name='input_training')\r\n tar = viewer.add_labels(y, name='target_training')\r\n\r\n @viewer.bind_key('t')\r\n def next_batch_training(viewer):\r\n x, y = next(iter(gen_training))\r\n img.data = x\r\n tar.data = y\r\n img.name = 'input_training'\r\n tar.name = 'target_training'\r\n\r\n if gen_validation:\r\n @viewer.bind_key('v')\r\n def next_batch_validation(viewer):\r\n x, y = next(iter(gen_validation))\r\n img.data = x\r\n tar.data = y\r\n img.name = 'input_validation'\r\n tar.name = 'target_validation'\r\n\r\n return viewer\r\n\r\n\r\nclass Input_Target_Pair_Generator:\r\n def __init__(self,\r\n dataloader,\r\n re_normalize=True,\r\n rgb=False,\r\n ):\r\n self.dataloader = dataloader\r\n self.re_normalize = re_normalize\r\n self.rgb = rgb\r\n\r\n def __iter__(self):\r\n return self\r\n\r\n def __next__(self):\r\n x, y = next(iter(self.dataloader))\r\n x, y = x.cpu().numpy(), y.cpu().numpy() # make sure it's a numpy.ndarray on the cpu\r\n\r\n # Batch\r\n batch_size = x.shape[0]\r\n rand_num = np.random.randint(low=0, high=batch_size)\r\n x, y = x[rand_num], y[rand_num] # Pick a random image from the batch\r\n\r\n # RGB\r\n if self.rgb:\r\n x = np.moveaxis(x, source=0, destination=-1) # from [C, H, W] to [H, W, C]\r\n\r\n # Re-normalize\r\n if self.re_normalize:\r\n from transformations import re_normalize\r\n x = re_normalize(x)\r\n\r\n return x, y\r\n","sub_path":"unet/src/visual.py","file_name":"visual.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"220927049","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 9 22:02:34 2021\n\n@author: dinhm\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\n\nclass tsp():\n 'This method reads file, initializes city names and their corresponded latitudes and longitudes, the distance array, and the tour array'\n def __init__(self, fileName):\n self.fileName = open(fileName)\n \n self.numCities = int(self.fileName.readline()) #number of cities\n self.cityNames = [] #initalizing city names\n self.latitudes = [] #initalzing latitudes\n self.longitudes = []\n \n self.distances = np.zeros((self.numCities, self.numCities))\n self.tour = np.zeros(self.numCities + 1, dtype='int')\n \n for row in self.fileName:\n column = row.strip().split(',')\n \n self.cityNames.append(column[0])\n self.latitudes.append(float(column[1]))\n self.longitudes.append(float(column[2]))\n \n self.cityNames = np.array(self.cityNames)\n self.latitudes = np.array(self.latitudes)\n self.longitudes = np.array(self.longitudes)\n \n # print(self.cityNames)\n \n 'this method computes the distances between any cities and put them in a 2D numpy array'\n def computeDistances(self):\n self.radius = 6356.752\n self.convertedLat = np.radians(self.latitudes)\n self.convertedLon = np.radians(self.longitudes)\n \n for i in range(self.numCities):\n for j in range(self.numCities): \n self.distances[i,j] += 2 * self.radius * np.arcsin( np.sqrt( np.abs( (np.sin((self.convertedLat[i] - self.convertedLat[j])/2))**2 + ( np.cos(self.convertedLat[i]) * np.cos(self.convertedLat[j]) * (np.sin((self.convertedLon[i] - self.convertedLon[j])/2) )**2 ))))\n \n '''This method takes the number of a city that user wants to start in \n then from there compute the shortest distance that go through every other city and marks off a city off the list once it's visited\n finally the method will add the total distance of the tour and return it'''\n def computeGreedyTour(self, startCityNumber):\n self.startCityNumber = startCityNumber\n self.tour[0] = startCityNumber #set a starting city\n self.tour[self.numCities] = startCityNumber #coming back to the starting city\n \n self.unvisitedCity = [cityNumber for cityNumber in range(self.numCities)] #list of unvisited cities\n \n self.tourDist = self.distances[self.tour[self.numCities -1], self.tour[self.numCities]] #initalizing the total distance to late add on to itself\n \n for i in range(self.numCities-1):\n shortestDist = max(self.distances[self.numCities-1]) #set something to compare the distance to\n self.unvisitedCity.remove(self.tour[i]) #remove the visited city\n \n for city in self.unvisitedCity:\n if self.distances[self.tour[i], city] < shortestDist:\n shortestDist = self.distances[self.tour[i], city]\n goToCity = city \n \n self.tourDist += shortestDist\n self.tour[i+1] = goToCity \n return self.tourDist\n \n 'This method print out the 2D distance array'\n def printDistances(self):\n print('Distance array: ')\n for row in self.distances: \n for i in range(len(row)): \n print('{:7.2f}'.format(row[i]), end = ' ')\n print('') \n print('')\n \n #print(self.distances)\n \"this method print out the city's number, name, latitude, and longitude\" \n def printCityInformation(self):\n print('{} {:15} {:11} {}'.format('City', '# City', 'Latitude', 'Longitude')) \n for i in range(self.numCities):\n print('{:6} {:14}{:8.4f}{:12.4f}'.format(i, self.cityNames[i], self.latitudes[i], self.longitudes[i]))\n \n print('') \n \n 'this method generates the numbers of the cities in the tour and returns a list of numbers' \n def getTour(self): \n getCityTour = []\n for cityNum in self.tour:\n getCityTour.append(self.cityNames[cityNum]) \n return getCityTour\n \n 'this method prints the tour with the city names and its corresponding number'\n def printTour(self):\n getCityTour = self.getTour()\n print('Tour constructed: ') \n i = 0\n while i < len(getCityTour) - 1:\n print('{}({})'.format(getCityTour[i], self.tour[i]), end = '-->')\n i +=1\n print('{}({})'.format(getCityTour[0], self.tour[0]))\n print('')\n tourLength = self.getTourDistance()\n print('Length of tour constructed = {} km.'.format(tourLength))\n \n \n 'this method gets the total distance of the tour in kilometers' \n def getTourDistance(self):\n tourLen = '{:.2f}'.format(self.computeGreedyTour(self.startCityNumber))\n return tourLen\n \n 'this method plots the tour'\n def plot(self):\n figure(num=None, figsize=(9,6), dpi=80, facecolor='w', edgecolor='k')\n # plt.plot(self.longitudes, self.latitudes,'bo-') #blue lines joining points\n tourLon = []\n tourLat = []\n \n for city in self.tour:\n tourLon.append(self.longitudes[city])\n tourLat.append(self.latitudes[city])\n \n plt.plot(tourLon, tourLat,'bo-') #blue lines joining points\n \n points = zip(self.longitudes,self.latitudes)\n \n i = 0 \n for a,b in points:\n plt.annotate((self.cityNames[i] + '({})'.format(i)), #label\n (a,b), #point\n textcoords=\"offset points\", # how to position the text\n xytext=(0,10), # distance from text to points (x,y)\n ha='center') # horizontal alignment can be left, right or center \n i += 1\n \n plt.xlabel('Longitude')\n plt.ylabel('Latitude')\n plt.show()","sub_path":"Travelling salesman problem/TSP.py","file_name":"TSP.py","file_ext":"py","file_size_in_byte":6208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"355526259","text":"\"\"\"\n@author:li kunlun\n@time:2020-10-20\n@description:处理丢失数据\n\"\"\"\nimport pandas as pd\nimport numpy as np\n\ndates = pd.date_range(\"20130101\", periods=3)\ndf = pd.DataFrame(np.arange(12).reshape(3, 4), index=dates, columns=['A', 'B', 'C', 'D'])\ndf.iloc[0, 0] = np.nan\ndf.iloc[0, 1] = np.nan\n# print(df)\n# 过滤数据中的缺失数据。\n# how='any'表示如果在一行中有Nan的数据,就把这一行丢掉\n# how='all'表示这一行全部等于Nan才会被过滤。\n# print(df.dropna(axis=0, how='all'))\n# 将nan数据替换为0\nprint(df.fillna(value=0))\n","sub_path":"src/com/xiaolun/chapter01/module02_pandas/python04_handle_missing_datas.py","file_name":"python04_handle_missing_datas.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"396733720","text":"\"\"\"# Functions | Assignment-1 - Paying Debt off in a Year\n\n#\"\"\"\n\ndef pay_debt_in_year(blc_1, annual_interest_rate, monthly_payment_rate):\n \"\"\"function debt off in a year\"\"\"\n i = 0\n for i in range(1, 13):\n m_int_rate = annual_interest_rate / 12.0\n min_month_pay = monthly_payment_rate * blc_1\n mnth_unpaid_bal = blc_1 - min_month_pay\n up_bal_each_month = mnth_unpaid_bal + m_int_rate * mnth_unpaid_bal\n blc_1 = up_bal_each_month\n i = i + 1\n return round(blc_1, 2)\n\n\ndef main():\n \"\"\"main\"\"\"\n data = input()\n data = data.split(' ')\n data = list(map(float, data))\n print(\"Remaining balance:\", pay_debt_in_year(data[0], data[1], data[2]))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"m7/p1/Functions - Assignment-1/assignment1.py","file_name":"assignment1.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"305335263","text":"#!/usr/bin/env python3\n\nimport unittest\n\nfrom norlinter.rule.ReplaceNewWithAllocInit import ReplaceNewWithAllocInit\n\nclass TestReplaceNewWithAllocInit (unittest.TestCase):\n\n def setUp(self):\n self.subject = ReplaceNewWithAllocInit()\n\n def test_when_object_by_itself(self):\n subject = \"\"\"\n MyObject *myvar = [MyObject new];\n \"\"\"\n expected = \"\"\"\n MyObject *myvar = [[MyObject alloc] init];\n \"\"\"\n result = self.subject.execute(subject)\n self.assertEqual(result.getSource(), expected)\n self.assertEqual(result.getNumberOfExceptions(), 1)\n exception = result.getExceptions()[0]\n self.assertEqual(exception.getLineNumber(), 1)\n self.assertEqual(exception.getDescription(), \"Always use 'alloc init'\")\n self.assertEqual(exception.getLine(), \" MyObject *myvar = [MyObject new];\")\n\n def test_when_object_is_in_method(self):\n subject = \"\"\"\n MyObject *myvar = [[MyObject alloc] initWithAnother:[AnotherObject new] another2:[AnotherObject new]];\n \"\"\"\n expected = \"\"\"\n MyObject *myvar = [[MyObject alloc] initWithAnother:[[AnotherObject alloc] init] another2:[[AnotherObject alloc] init]];\n \"\"\"\n result = self.subject.execute(subject)\n self.assertEqual(result.getSource(), expected)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"norlinter/specs/test_ReplaceNewWithAllocInit.py","file_name":"test_ReplaceNewWithAllocInit.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"423451218","text":"import base64\nimport json\nimport requests\nimport datetime\nimport logging\nimport os\nimport csv\nimport yaml\nimport sys\nimport argparse\nimport urllib3\nimport re\ntry:\n from jira.client import JIRA\n from jira.client import GreenHopper\nexcept ImportError:\n print(\"Errorinfo:please install jira first\")\n sys.exit(1)\n\n# Testrail variables\nPROJECT_FFV = 1\nTEST_SUITE_ATOM = 13\nTEST_SUITE_UEFI = 9\nTEST_SUITE_BMC = 2\nTEST_SUITE_DAE_ATOM = 1478\nTEST_SUITE_DAE_BMC = 1477\nCI_JIRA_URL = 'https://jira.cec.lab.emc.com:443'\ntestrail_url = r'https://psetrprd001.corp.emc.com/testrail'\n\n\"\"\"\ntestrail_jira.log format\ninfrasim@infrasim:~$ cat .testrail_jira.conf\njira:\n url: https://jira.cec.lab.emc.com:8443\n user_name: 111111\n user_key: xxxxxxxxx\n\"\"\"\nurllib3.disable_warnings()\n\n\ndef get_jira_config():\n \"\"\"\n get jira config from config file, contain url , name, password\n \"\"\"\n jira_config = {}\n conf_path = '{}/.testrail_jira.conf'.format(os.path.expanduser('~'))\n if os.path.isfile(conf_path):\n with open(conf_path, 'r') as f:\n config = yaml.safe_load(f)\n else:\n print(\"No Confluence config file.\")\n sys.exit(1)\n\n jira_config['url'] = config['jira'].get('url')\n jira_config['user'] = config['jira'].get('user_name')\n jira_config['pwd'] = config['jira'].get('user_key')\n return jira_config\n\n\nfile_path = '{}/testrail_jira.log'.format(os.path.expanduser('~'))\nlogging.basicConfig(level=logging.DEBUG,\n filename=file_path)\n\njira_config = get_jira_config()\njira_pwd = (base64.b64decode(jira_config['pwd'])).decode('utf-8')\nmyjira = JIRA(\n jira_config['url'],\n basic_auth=(jira_config['user'], (base64.b64decode(jira_config['pwd'])).decode('utf-8')),\n logging=True,\n validate=True,\n async_=True,\n async_workers=20,\n options={'verify': False},\n)\n\ngreenhopper = GreenHopper(\n options={'server': CI_JIRA_URL, 'verify': False},\n basic_auth=(jira_config['user'], jira_pwd)\n)\n\n\nclass Testclient(object):\n \"\"\"\n testrail client, send request, and get info and data\n \"\"\"\n def __init__(self, base_url):\n self.user = 'atom.dev@emc.com'\n self.password = '111111'\n if not base_url.endswith('/'):\n base_url += '/'\n self.__url = base_url + 'index.php?/api/v2/'\n\n def send_get(self, uri, filepath=None):\n \"\"\"Issue a GET request (read) against the API.\n\n Args:\n uri: The API method to call including parameters, e.g. get_case/1.\n filepath: The path and file name for attachment download; used only\n for 'get_attachment/:attachment_id'.\n\n Returns:\n A dict containing the result of the request.\n \"\"\"\n return self.__send_request('GET', uri, filepath)\n\n def get_cases(self, project_id, case_filter=None):\n rest_uri = 'get_cases/{}{}'.format(project_id, case_filter)\n return self.send_get(rest_uri)\n\n def get_case(self, case_id):\n rest_uri = 'get_case/{}'.format(case_id)\n return self.send_get(rest_uri)\n\n def send_post(self, uri, data):\n \"\"\"Issue a POST request (write) against the API.\n\n Args:\n uri: The API method to call, including parameters, e.g. add_case/1.\n data: The data to submit as part of the request as a dict; strings\n must be UTF-8 encoded. If adding an attachment, must be the\n path to the file.\n\n Returns:\n A dict containing the result of the request.\n \"\"\"\n return self.__send_request('POST', uri, data)\n\n def __send_request(self, method, uri, data):\n url = self.__url + uri\n logging.debug(url)\n if sys.version_info[0] < 3:\n auth = base64.b64encode('%s:%s' % (self.user, self.password))\n payload = bytes(json.dumps(data))\n else:\n auth = str(\n base64.b64encode(\n bytes('%s:%s' % (self.user, self.password), 'utf-8')\n ),\n 'ascii'\n ).strip()\n payload = bytes(json.dumps(data), 'utf-8')\n headers = {'Authorization': 'Basic ' + auth}\n\n if method == 'POST':\n if uri[:14] == 'add_attachment': # add_attachment API method\n files = {'attachment': (open(data, 'rb'))}\n response = requests.post(url, headers=headers, files=files, verify=False)\n files['attachment'].close()\n else:\n headers['Content-Type'] = 'application/json'\n response = requests.post(url, headers=headers, data=payload, verify=False)\n else:\n headers['Content-Type'] = 'application/json'\n response = requests.get(url, headers=headers, verify=False)\n\n if response.status_code > 201:\n try:\n error = response.json()\n except Exception: # response.content not formatted as JSON\n error = str(response.content)\n raise Exception('TestRail API returned HTTP %s (%s)' % (response.status_code, error))\n else:\n if uri[:15] == 'get_attachment/': # Expecting file, not JSON\n try:\n open(data, 'wb').write(response.content)\n return (data)\n except Exception:\n return (\"Error saving attachment.\")\n else:\n return response.json()\n\n\ndef same_time_check(test_createtime, expect_date):\n \"\"\"\n check if test_createtime is the same with expect date\n \"\"\"\n timestamp = datetime.datetime.fromtimestamp(test_createtime)\n if timestamp.date() == expect_date:\n return True\n else:\n return False\n\n\ndef get_suite_mapping_issue_info(suite, tag=None):\n \"\"\"\n bmc epic bmc :ATOM-3361\n uefi epic bios: ATOM-3360\n add dae_atom to epic ATOM-4496: Fornax Adaption\n add dae_bmc to epic ATOM-4581: DAE New Case\n DAE New Case - Redfish :ATOM-5157\n DAE New Case - SES: ATOM-5158\n tag: redfish:[6], Ses : [7]\n \"\"\"\n issue_info = {}\n if suite == TEST_SUITE_DAE_ATOM:\n issue_info['epic'] = 'ATOM-4496'\n issue_info['components'] = 'DAE script'\n elif suite == TEST_SUITE_DAE_BMC:\n issue_info['components'] = 'DAE script'\n if tag == [6]:\n issue_info['epic'] = 'ATOM-5157'\n elif tag == [7]:\n issue_info['epic'] = 'ATOM-5158'\n else:\n issue_info['epic'] = 'ATOM-4581'\n elif suite == TEST_SUITE_UEFI:\n issue_info['components'] = 'BIOS Script'\n issue_info['epic'] = 'ATOM-3360'\n elif suite == TEST_SUITE_BMC:\n issue_info['epic'] = 'ATOM-3466'\n issue_info['components'] = 'BMC Script'\n else:\n raise Exception('now we do not support suite :{}'.format(suite))\n return issue_info\n\n\ndef get_case_id_from_issue(issue_info):\n \"\"\"\n get case id from issue info\n \"\"\"\n line = issue_info.fields.summary\n searchObj = re.search('\\[C([0-9]{2,20})\\]', line, re.M|re.I)\n if searchObj:\n return searchObj.group(1)\n else:\n return None\n\n\ndef check_case_create_issue(case, timestamp, dayissues):\n \"\"\"\n 1: filter cases and find if there is case's created time match\n 2: check if case could automatable , automate value :\n unknown:1 No:2 Yes:3\n 3: check duplicate\n \"\"\"\n\n if not (same_time_check(case['created_on'], timestamp) or\n same_time_check(case['updated_on'], timestamp)):\n return False\n if not case['custom_ffv_automatable'] == 3:\n return False\n for issue in dayissues:\n if str(case['id']) == get_case_id_from_issue(issue):\n return False\n # csum = '[C{}]-{}'.format(case['id'], case['title'])\n dup_check = \"project = atom and summary ~ C{}\".format(case['id'])\n sameissue = get_issues_by_jql(dup_check)\n # import pdb\n # pdb.set_trace()\n for issue in sameissue:\n if str(case['id']) == get_case_id_from_issue(issue):\n return False\n return True\n\n\ndef check_new_case_create_issue(cases, timestamp, suite, issuesdata):\n \"\"\"\n filter cases and find if there is case's created time match\n add dae_atom to epic ATOM-4496: Fornax Adaption\n add dae_bmc to epic ATOM-4581: DAE New Case\n check if case could automatable , automate value :\n unknown:1 No:2 Yes:3\n \"\"\"\n for case in cases:\n case_tr_id = case['id']\n if check_case_create_issue(case, timestamp, issuesdata):\n try:\n summary = '[C{}]-{}'.format(case['id'], case['title'])\n logging.debug(summary)\n description = case.get('custom_preconds')\n if not description:\n description = 'test case script'\n case_tag = case.get('custom_ffv_cpu_specific')\n issue_info = get_suite_mapping_issue_info(suite, case_tag)\n issue_dict_info = {\n 'project': {'key': 'ATOM'},\n 'summary': summary,\n 'description': description,\n 'issuetype': {'name': 'Feature'},\n 'customfield_10006': 2,\n 'components': [{'name': issue_info['components']}],\n 'labels': ['atom', 'autogenerate'],\n }\n new_issue = myjira.create_issue(fields=issue_dict_info)\n logging.debug('case {} : create jira story success {}'.format(case_tr_id, new_issue.key))\n write_isssue_info_to_csv(new_issue, timestamp.strftime(\"%Y-%m-%d\"), case_tr_id)\n issue_list = []\n issue_list.append(new_issue.key)\n # ATOM-5159 EOL\n epic_link = issue_info['epic']\n greenhopper.add_issues_to_epic(epic_link, issue_list)\n except Exception as errorinfo:\n logging.error('case {} : create jira story fail on {}'.format(case_tr_id, timestamp))\n logging.error(errorinfo)\n\n\ndef filter_testrail_and_create_issue(day, issue_data):\n \"\"\"\n filter all suites and check if there is cases created on the day\n TODO: add TEST_SUITE_ATOM\n \"\"\"\n testrail_obj = Testclient(testrail_url)\n suites = [TEST_SUITE_BMC, TEST_SUITE_UEFI, TEST_SUITE_DAE_ATOM, TEST_SUITE_DAE_BMC]\n for suite in suites:\n case_filter = '&suite_id={}'.format(suite)\n cases = testrail_obj.get_cases(PROJECT_FFV, case_filter)\n check_new_case_create_issue(cases, day, suite, issue_data)\n\n\ndef write_isssue_info_to_csv(issuedata, dateday, case_id):\n \"\"\"\n write issue info to csv file\n \"\"\"\n filename = 'issue_create_by_tool.csv'\n logging.debug('add case {} to {}'.format(case_id, filename))\n if not os.path.isfile(filename):\n with open(filename, mode='w') as csv_file:\n field_names = ['case_id', 'issue_key', 'date', 'summary']\n writer = csv.DictWriter(csv_file, fieldnames=field_names)\n writer.writeheader()\n writer.writerow({'case_id': case_id,\n 'issue_key': issuedata.key,\n 'date': dateday,\n 'summary': issuedata.fields.summary})\n else:\n with open(filename, mode='a') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow([case_id, issuedata.key, dateday, issuedata.fields.summary])\n\n\ndef get_day_issue_info(date=None):\n \"\"\"\n get all issues create on 'date'\n \"\"\"\n if not date:\n timestamp = datetime.datetime.today()\n strdate_time = timestamp.strftime(\"%Y-%m-%d\")\n JQL = \"project = atom and created >= {} and Status != completed\".format(strdate_time)\n issuedata = myjira.search_issues(JQL)\n return issuedata\n\n\ndef get_cases_id_list(input, source='jira'):\n # source: jira or testrail\n # input: jira issues or testrail cases\n if source == 'jira':\n caseid_list = [int(get_case_id_from_issue(i)) for i in input if get_case_id_from_issue(i)]\n else:\n caseid_list = [i['id'] for i in input ]\n return caseid_list\n\n\n\ndef get_issues_by_jql(jql):\n if not jql:\n return None\n issuedata = myjira.search_issues(jql)\n return issuedata\n\nclass sync_cases_soluction(object):\n def get_jira_not_completed_cases_in_atomsuits(self):\n testrail_obj = Testclient(testrail_url)\n case_filter = '&suite_id={}'.format(TEST_SUITE_ATOM)\n automated_cases = testrail_obj.get_cases(PROJECT_FFV, case_filter)\n print (len(automated_cases))\n automated_cases_id_list = []\n for caseone in automated_cases:\n automated_cases_id_list.append(caseone['id'])\n #print(automated_cases_id_list)\n not_complete_case = {}\n for casetestrail in automated_cases_id_list:\n jql = 'project = ATOM and summary ~ C{} AND status != Completed AND \"Epic Link\" in (ATOM-4581,ATOM-4496,ATOM-5912)'.format(casetestrail)\n #print(jql)\n issues = get_issues_by_jql(jql)\n \n if len(issues) ==0:\n continue\n not_complete_case[str(issues[0].key)] = ['C{}'.format(casetestrail), str(issues[0].fields.status.name)]\n #issues[0].fields.status.name\n #print(issues)\n print (not_complete_case)\n print (len(not_complete_case))\n \n def get_need_auto_cases_not_created_in_jira(self):\n testrail_obj = Testclient(testrail_url)\n test_suites = [TEST_SUITE_BMC, TEST_SUITE_UEFI]\n automatable_cases = []\n automatable_cases_not_in_jira = []\n automatable_cases_in_jira_completed = {}\n for suite in test_suites:\n case_filter = '&suite_id={}'.format(suite)\n tr_cases = testrail_obj.get_cases(PROJECT_FFV, case_filter)\n print('all cases in suites {} total count is {}'.format(suite, len(tr_cases)))\n for tr_case in tr_cases:\n if tr_case['custom_ffv_need_physical_access'] == 2 and tr_case['custom_ffv_automatable'] == 3:\n automatable_cases.append(tr_case['id'])\n jql = 'project = ATOM and summary ~ C{} AND \"Epic Link\" in (ATOM-4581,ATOM-4496,ATOM-5912)'.format(tr_case['id'])\n issues = get_issues_by_jql(jql)\n if len(issues) == 0:\n automatable_cases_not_in_jira.append(tr_case['id'])\n else:\n for issue in issues:\n if str(issue.fields.status.name) == 'Completed':\n automatable_cases_in_jira_completed[issue.key] = tr_case['id']\n print('automatable cases in BMC and UEFI suites total count is {}'.format(len(automatable_cases)))\n print('automatable cases is ---------------------------------')\n print(automatable_cases)\n print('automatable cases in BMC and UEFI suites but not in jira count is {}'.format(len(automatable_cases_not_in_jira)))\n print('automatable cases in BMC and UEFI suites but not in jira---------------------')\n print(automatable_cases_not_in_jira)\n print('automatable cases in BMC and UEFI suites and in jira completed total count is {}'.format(len(automatable_cases_in_jira_completed)))\n print(automatable_cases_in_jira_completed)\n\n def sync_cases(self, suite, jira_epic):\n jql = \"project = atom and 'Epic Link' = {} and Status != 'completed'\".format(jira_epic)\n issues = get_issues_by_jql(jql)\n testrail_obj = Testclient(testrail_url)\n case_filter = '&suite_id={}'.format(suite)\n cases = testrail_obj.get_cases(PROJECT_FFV, case_filter)\n\n jira_cases = get_cases_id_list(issues)\n valid_cases = []\n for single_case in cases:\n if single_case['custom_ffv_need_physical_access'] == 2 and single_case['custom_ffv_automatable'] == 3:\n # six platform we care\n if set(single_case['custom_ffvplatform']).intersection([12, 13, 15 ,17, 18, 19]):\n valid_cases.append(single_case)\n testrail_cases = get_cases_id_list(valid_cases, 'testrail')\n diff_cases = (set(testrail_cases).difference(set(jira_cases)))\n same_cases = (set(testrail_cases).intersection(set(jira_cases)))\n print(diff_cases)\n print(\"diff case length is {}\".format(len(diff_cases)))\n print(\"----------------------\")\n print(same_cases)\n print(\"----------------------\")\n print(jira_cases)\n print(\"----------------------\")\n print(set(jira_cases).difference(set(same_cases)))\n print('33333333333333333333333333333333333')\n return [i for i in valid_cases if i['id'] in diff_cases]\n\n def create_cases_jira_issues(self, suite, cases):\n # cases list of case id\n for case in cases:\n case_tr_id = case['id']\n try:\n summary = '[C{}]-{}'.format(case['id'], case['title'])\n logging.debug(summary)\n description = case.get('custom_preconds')\n if not description:\n description = 'test case script'\n case_tag = case.get('custom_ffv_cpu_specific')\n issue_info = get_suite_mapping_issue_info(suite, case_tag)\n issue_dict_info = {\n 'project': {'key': 'ATOM'},\n 'summary': summary,\n 'description': description,\n 'issuetype': {'name': 'Feature'},\n 'customfield_10006': 2,\n 'components': [{'name': issue_info['components']}],\n 'labels': ['atom', 'autogenerate'],\n }\n new_issue = myjira.create_issue(fields=issue_dict_info)\n logging.debug('case {} : create jira story success {}'.format(case_tr_id, new_issue.key))\n write_isssue_info_to_csv(new_issue, case['created_on'], case_tr_id)\n issue_list = []\n issue_list.append(new_issue.key)\n # ATOM-5159 EOL\n epic_link = issue_info['epic']\n greenhopper.add_issues_to_epic(epic_link, issue_list)\n except Exception as errorinfo:\n logging.error('case {} : create jira story fail on {}'.format(case_tr_id, timestamp))\n logging.error(errorinfo)\n\n\n\nif __name__ == '__main__':\n #sync_cases_obj = sync_cases_soluction()\n #sync_cases_obj.get_jira_not_completed_cases_in_atomsuits()\n #sync_cases_obj.get_need_auto_cases_not_created_in_jira()\n \n parser = argparse.ArgumentParser(description='sync testrail cases to jira tool')\n\n parser.add_argument(\"-d\", \"--date\", type=str,\n help=\"date of testrail update time, example 2020-3-22\")\n\n commandList = parser.parse_args()\n if not commandList.date:\n timestamp = datetime.datetime.today()\n else:\n timestamp = datetime.datetime.strptime(commandList.date, '%Y-%m-%d')\n timestamp = timestamp.date()\n logging.debug('update date: {}'.format(timestamp))\n issue_data = get_day_issue_info()\n filter_testrail_and_create_issue(timestamp, issue_data)\n\n # used for sync dpe cases with jira\n \"\"\"\n test123 = sync_cases_soluction()\n bmc_diff_cases = test123.sync_cases(TEST_SUITE_BMC, 'ATOM-3466')\n uefi_diff_cases = test123.sync_cases(TEST_SUITE_UEFI, 'ATOM-3360')\n test123.create_cases_jira_issues(TEST_SUITE_UEFI, uefi_diff_cases)\n test123.create_cases_jira_issues(TEST_SUITE_BMC, bmc_diff_cases)\n \"\"\"\n","sub_path":"auto_analysis/testrail_jira.py","file_name":"testrail_jira.py","file_ext":"py","file_size_in_byte":19640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"499740252","text":"#Практикум. Карточная игра Пьяница. ПРОЕКТ НЕ РАБОТАЕТ!!!НАДО РАЗБИРАТЬСЯ!!!!\nfrom random import shuffle\n\nclass Card:\n \"\"\"Класс, моделирующий игру в карты\"\"\"\n suits = [\"пикей\",\"червей\",\"бубей\",\"треф\"]\n\n \"\"\"Элементы под первыми двумя индексами в списке являются NONE для того, чтобы\n строки в списке совпадали с индексами, которые они представляют\n (так строка 2 в списке values имеет индекс 2)\"\"\"\n values = [None, None, \"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"валета\",\"даму\",\"короля\",\"туза\"]\n\n def __init__(self, v, s):\n \"\"\"suit и value целые числа\"\"\"\n self.value = v\n self.suit = s\n\n def __lt__(self, c2):\n if self.value < c2.value:\n return True\n if self.value == c2.value:\n if self.suit < c2. suit:\n return True\n else:\n return False\n return False\n\n def __gt__(self,c2):\n if self.value > c2.value:\n return True\n if self.value == c2.value:\n if self.suit > c2.suit:\n return True\n else:\n return False\n return False\n\n def __repr__(self):\n v = self.values[self.value] + \" \" + self.suits[self.suit]\n return v\n\nclass Deck:\n \"\"\"Класс представляющий колоду карт\"\"\"\n def __init__(self):\n self.cards = []\n for i in range(2,15): #в цикле создаем колоду 52 карты\n for j in range(4):\n self.cards.append(Card(i,j))\n shuffle(self.cards) #перемешиваем колоду\n\n def rm_card(self): #этот метод изымает и возвращает карту из списка cards, или вовзращает None, если список пуст\n if len(self.cards) == 0:\n return\n return self.cards.pop()\n\n \nclass Player:\n \"\"\"Класс для представления игрока, чтобы отслеживать его карты в данный момент времени и количество выигранных раундов\"\"\"\n def __init__(self, name):\n self.wins = 0\n self.card = None\n self.name = name\n\nclass Game:\n \"\"\"Класс представяющий игру\"\"\"\n def __init__(self):\n name1 = input(\"имя игрока 1: \")\n name2 = input(\"имя игрока 2: \")\n self.deck = Deck()\n self.p1 = Player(name1)\n self.p2 = Player(name2)\n\n def wins (self,winner):\n w = \"{} забирает карты\"\n w = w.format(winner)\n print(w)\n\n def draw (self,p1n,p1c,p2n,p2c):\n d = \"{} кладет {},а {} кладет {}\"\n d = d.format(p1n,p1c,p2n,p2c)\n print(d)\n\n def play_game(self):\n cards = self.deck.cards\n print(\"Let's go!\")\n while len(cards) >= 2: #игра продолжается пока в колоде 2 или более карт\n m = \"Нажмите X для выхода. Нажмите любую другую клавишу для начала игры \"\n response = input(m)\n if response == 'X':\n break\n p1c = self.deck.rm_card()\n p2c = self.deck.rm_card()\n p1n = self.p1.name\n p2n = self.p2.name\n self.draw(p1n,p1c,p2n,p2c)\n if p1c > p2c:\n self.p1.wins += 1\n self.wins(self.p1.name)\n else:\n self.p2.wins += 1\n self.wins(self.p2.name)\n win = self.winner(self.p1,self.p2)\n\n print(\"Игра окончена. {} выиграл!\".format(win))\n\n def winner(self,p1,p2):\n if p1.wins > p2.wins:\n return p1.name\n if p1.wins < p2.wins:\n return p2.name\n return \"Ничья!\"\n\ngame = Game()\ngame.play_game()\n","sub_path":"drinker.py","file_name":"drinker.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"573631030","text":"from flask import Blueprint, request, jsonify\nfrom app.Models.SensorDataModel import SensorData\n\nsensorDataBluePrint = Blueprint('sensorDataBluePrint', __name__)\n\n\n# 텍스트 파일에 저장\n@sensorDataBluePrint.route(\"/api/save_text\", methods=['GET', 'POST'])\ndef save_data_text_file():\n data = request.get_json()\n print(data)\n\n import os\n cur_dir = os.getcwd()\n save_file_path = cur_dir + ('/Data/{}.txt'.format(data['userId'] + data['motionCode']))\n with open(save_file_path, 'a') as f:\n for sensor_data in data['Muscledatas']:\n f.write(str(sensor_data['1']) + '\\t' + str(sensor_data['2']) + '\\t' + str(sensor_data['3']) + '\\t'\n + str(sensor_data['4']) + '\\t' + str(sensor_data['5']) + '\\t' + str(sensor_data['6']))\n f.write('\\n')\n # f.write(sensor_data['data'])\n return jsonify(\"result\", \"success\")\n\n\n@sensorDataBluePrint.route(\"/api/save_mysql\", methods=['GET', 'POST'])\ndef save_data_mysql():\n data = request.get_json()\n print(data)\n\n if request.method == 'GET':\n sensor_data_list = SensorData.query.all()\n print('---------------')\n print(sensor_data_list)\n print('---------------')\n return sensor_data_list\n\n elif request.method == 'POST':\n result = request.get_json()\n print(result)\n user_id = result['userId']\n motion_code = result['motionCode']\n muscle_data = result['Muscledatas']\n store_ins = SensorData(user_id, motion_code, muscle_data)\n store_ins.add_database()\n return 'post'\n\n\n\n\n\n","sub_path":"Resight-Server/app/Controller/SensorDataController.py","file_name":"SensorDataController.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"321063840","text":"#!/usr/bin/python\n# ########################################################################################################################\n# #\n# MIT License #\n# #\n# Copyright (c) 2018 Telefonica R&D #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated #\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the #\n# rights in the Software without restriction, including without limitation the rights o use, copy, modify, merge, #\n# publish, to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and #\n# to permit persons to whom the Software is furnished to do so, subject to the following conditions: #\n# #\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO #\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.#\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN #\n# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #\n# DEALINGS IN THE SOFTWARE. #\n# #\n########################################################################################################################\nfrom __future__ import print_function\nfrom test import *\nfrom server_coap import *\nfrom server_udp import *\nfrom health_check import *\nimport threading\nimport traceback\n\nif __name__ == '__main__':\n\n try:\n config_file = read_config_file()\n logger.debug(config_file)\n config_cloud = cloud_configure(config_file)\n logger.debug(config_cloud)\n\n if config_file[\"cloudlog\"]:\n cloud_log(config_cloud)\n\n if config_cloud[\"code\"] == CODE_OK and test(config_file, config_cloud):\n\n if config_file[\"COAP\"][\"enable\"]:\n thread_coap = threading.Thread(name='CoAPserver', target=coap_loop, args=(config_file, config_cloud))\n thread_coap.setDaemon(True)\n thread_coap.start()\n\n if config_file[\"UDP\"][\"enable\"]:\n thread_udp = threading.Thread(name='UDP_socket', target=udp_loop, args=(config_file, config_cloud))\n thread_udp.setDaemon(True)\n thread_udp.start()\n\n thread_health = threading.Thread(name='healthtest', target=health_check)\n thread_health.start()\n else:\n logger.error(\"Failed tests\")\n\n except Exception as e:\n logger.error(\"exception main()\")\n logger.error(\"message:{}\".format(e.message))\n traceback.print_exc(file=sys.stdout)\n\n except (KeyboardInterrupt, SystemExit):\n logger.warning(\"KeyboardInterrupt main()\")\n\n\n\n\n","sub_path":"scripts/Data_Bridge/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"200648149","text":"import math\r\n\r\nclass NetworkDijkstra:\r\n\r\n def __init__(self, source_id, network_map):\r\n self._source_id = source_id\r\n self._network_map = network_map\r\n self._cost = None\r\n self._pred = None\r\n\r\n\r\n @property\r\n def source_id(self):\r\n return self._source_id\r\n\r\n\r\n @property\r\n def network_map(self):\r\n return self._network_map\r\n\r\n\r\n @network_map.setter\r\n def network_map(self, network_map):\r\n self._network_map = network_map\r\n\r\n\r\n @property\r\n def cost(self):\r\n return self._cost\r\n\r\n\r\n @cost.setter\r\n def cost(self, cost):\r\n self._cost = cost\r\n\r\n\r\n @property\r\n def pred(self):\r\n return self._pred\r\n\r\n\r\n @pred.setter\r\n def pred(self, pred):\r\n self._pred = pred\r\n\r\n\r\n # Get minimum cost node id from unvisited set\r\n def minCost(self, unvisited, cost):\r\n minCost = math.inf\r\n minCostNode = None\r\n\r\n for v in unvisited:\r\n if cost[v] < minCost:\r\n minCost = cost[v]\r\n minCostNode = v\r\n\r\n return minCostNode\r\n\r\n\r\n # Run Dijkstra's algorithm on network map and store cost and pred\r\n def run_dijkstra(self):\r\n # cost = dist, pred = prev, unvisted = Q \r\n # common alternatives in pseudo code\r\n cost = {}\r\n pred = {}\r\n unvisited = set()\r\n unvisited.add(self.source_id)\r\n\r\n # Set cost for routers to inf and pred to None then add ids to unvisited\r\n for key, val in self.network_map.items():\r\n cost[key] = math.inf\r\n pred[key] = None\r\n unvisited.add(key)\r\n\r\n # Cost for source is 0\r\n cost[self.source_id] = 0\r\n\r\n # Loop while there are still unvisited routers.\r\n while unvisited:\r\n # Set v to lowest cost router in unvisited, \r\n # at the start that is always the source.\r\n if self.source_id in unvisited:\r\n v = self.source_id\r\n else:\r\n v = self.minCost(unvisited, cost)\r\n\r\n # The router v is now visited remove it from unvisited.\r\n unvisited.remove(v)\r\n\r\n # Compare cost v-->u to cost[u] and choose the one that's lower\r\n for u, val in self.network_map[v].items():\r\n # Add the weight of the edge from v to u to the cost of v for path cost\r\n alt = cost[v] + val['weight']\r\n alt = round(alt, 1)\r\n\r\n # If new path is less that previous cost to get to u \r\n # change cost[u] and add to pred\r\n if alt < cost[u]:\r\n cost[u] = alt\r\n pred[u] = { 'id' : v, 'address' : val['address'] }\r\n\r\n # set cost and pred fields\r\n self.cost = cost\r\n self.pred = pred\r\n\r\n\r\n # Traverse through pred to get the lowest cost path for dest\r\n def getPath(self, dest):\r\n v = dest\r\n path = v\r\n\r\n while self.pred[v] is not None:\r\n path = self.pred[v]['id'] + path\r\n v = self.pred[v]['id']\r\n\r\n return path\r\n\r\n\r\n # Print lowest cost paths for source router\r\n def print_dijkstra(self):\r\n print(\"I am Router {0}\".format(self.source_id))\r\n\r\n for key, val in self.cost.items():\r\n if key != self.source_id:\r\n path = self.getPath(key)\r\n print(\"Least cost path to router {0}: {1} and the cost is {2}\".format(key, path, val))\r\n","sub_path":"NetworkDijkstra.py","file_name":"NetworkDijkstra.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"328871262","text":"import pyb\nimport utime as time\nfrom machine import I2C\nimport ssd1306\nimport BME280\nfrom time import sleep\n\nsw = pyb.Switch() # user push button\nled = pyb.LED(1) # on-board LED (blue), PC13 pin\n\nrtc = pyb.RTC()\n\ni2c = I2C(-1, scl=pyb.Pin.board.PB6, sda=pyb.Pin.board.PB7, freq=100000)\n\noled_width = 128\noled_height = 64\noled = ssd1306.SSD1306_I2C(oled_width, oled_height, i2c)\nbme = BME280.BME280(i2c=i2c)\nled.on()\n\ntry:\n while not sw.value():\n dt = rtc.datetime()\n temp = bme.temperature\n hum = bme.humidity\n pres = bme.pressure\n print('Temperature: ', temp)\n print('Humidity: ', hum)\n print('Pressure: ', pres)\n print(rtc.datetime()) \n print()\n \n oled.text('OLED BME280', 0, 0)\n oled.text(temp, 0, 20)\n oled.text(hum, 60, 20)\n oled.text(\"Time:{0:02}:{1:02}:{2:02}\".format(dt[4],dt[5],dt[6]),0,40)\n oled.text(\"Date:{0:02}/{1:02}/{2:02}\".format(dt[2],dt[1],dt[0]),0,50)\n oled.show()\n\n sleep(5)\n oled.fill(0)\nexcept KeyboardInterrupt:\n pass\nfinally:\n led.off() # turn off LED\n print('Done')\n","sub_path":"rtc.py","file_name":"rtc.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"376178942","text":"import os\nimport numpy as np\nimport json\nimport glob\nimport random\nfrom tqdm import tqdm\n\nsplit = 'train'\nbdd_labels_path = '/n/pana/scratch/ravi/bdd/bdd100k/labels/100k'\nbdd_labels_list = glob.glob(os.path.join(bdd_labels_path, split, '*.json'))\n\nimage_ids = []\nclasses = []\nannotations = []\n\nbdd_labels_list = random.sample(bdd_labels_list, int(len(bdd_labels_list) * 0.2))\n\nfor l in tqdm(bdd_labels_list):\n with open(l) as label_file:\n labels = json.load(label_file)\n image_id = l.split('/')[-1].split('.')[0]\n image_ids.append(image_id)\n\n for b in labels['frames'][0]['objects']:\n if 'box2d' in b:\n obj_dict= { 'bbox': [b['box2d']['x1'], b['box2d']['y1'],\n b['box2d']['x2'], b['box2d']['y2']],\n 'name': image_id,\n 'category': b['category']\n }\n annotations.append(obj_dict)\n\n if b['category'] not in classes:\n classes.append(b['category'])\n\nbdd_annotations = { 'image_ids' : image_ids,\n 'classes' : classes,\n 'annotations' : annotations\n }\nprint(classes)\nwith open('bdd_train_20.json', 'w') as fp:\n json.dump(bdd_annotations, fp)\n","sub_path":"tools/bdd/convert_bdd_labels_to_json.py","file_name":"convert_bdd_labels_to_json.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"219962758","text":"from random import randint\nfrom task import Task\nfrom individual import Individual\nfrom population import Population\nimport file_manager\n\nclass GA():\n\tdef __init__(self, filename, nTasks, nMachines, mutationFactor, crossoverFactor, elitismFactor, populationSize, crossoverOperator, mutationOperator, maxIterations):\n\t\tself.filename = filename\n\t\tself.nTasks = nTasks\n\t\tself.nMachines = nMachines\n\t\tself.mutationFactor = mutationFactor\n\t\tself.crossoverFactor = crossoverFactor\n\t\tself.elitismFactor = elitismFactor\n\t\tself.populationSize = populationSize\n\t\tself.maxIterations = maxIterations\n\t\tself.crossoverOperator = crossoverOperator\n\t\tself.mutationOperator = mutationOperator\n\t\tIndividual.nextIndividualIdentifier = 0\n\t\t# temporary log holders\n\t\tself.logs = []\n\t\tself.logs_makespan = []\n\t\tself.logs_average = []\n\t\tself.logs_flowtime = []\n\t\tself.logs_generations = []\n\n\tdef execute(self):\n\t\tIndividual.nMachines = self.nMachines\n\t\tIndividual.nTasks = self.nTasks\n\t\tIndividual.tasks = file_manager.load_tasks(self.filename, self.nTasks, self.nMachines)\n\t\tIndividual.crossoverMask = Individual.generate_crossover_mask(self.nTasks)\n\n\t\tmakespanPopulation = Population.generate(self.populationSize, Individual.tasks)\n\n\t\tfor i in range(self.maxIterations):\n\t\t\tcurrentMakespan = makespanPopulation.makespan_sum()\n\t\t\t# SELECTION\n\t\t\tparents = makespanPopulation.select_parents(self.crossoverFactor)\n\n\t\t\t# CROSSOVER\n\t\t\tchilds = []\n\t\t\tchilds = Individual.crossover_one_point(parents, i+1, currentMakespan)\n\t\t\t\n\t\t\t# MUTATION\n\t\t\tfor child in childs:\n\t\t\t\tchild.apply_mutation_simple(self.mutationFactor, currentMakespan)\n\t\t\t\tmakespanPopulation.insert_individual(child)\n\n\n\t\t\t# INSERTION\n\t\t\tnewPopulationIndividuals = makespanPopulation.update_population(self.elitismFactor)\n\n\t\t\t# LOGGING\n\t\t\tif(i % 100 == 0):\n\t\t\t\tself.save_log_information(i, makespanPopulation)\n\n\t\treturn makespanPopulation.best_individual(), self.logs_generations, self.logs_average, self.logs_makespan, self.logs_flowtime\n\n\n\tdef log_information(self):\n\t\tlogFileName = self.filename.replace('.txt', '_log.txt')\n\t\tfile = open(logFileName, \"w\")\n\t\tfor logLine in self.logs:\n\t\t\tfile.write(logLine)\n\t\tfile.close()\n\n\tdef save_log_information(self, generation, population):\n\t\taverage = population.average_fitness()\n\t\tdeviation = population.deviation(average)\n\t\tbest = population.bestIndividual\n\t\tbestMakespan = best.makespan\n\t\tbestFlowtime = best.flowtime\n\t\tbestGeneration = best.generation\n\t\tstringData = \"average: {0:5.12f} \\tdeviation: {1:.12f}\\tbest makespan: {2:.12f}\\tbest flowtime: {3:.12f}\\tgeneration: {4:d}\\n\".format(average, deviation, bestMakespan, bestFlowtime, bestGeneration) \n\t\tself.logs.append(stringData)\n\t\tself.logs_makespan.append(bestMakespan)\n\t\tself.logs_flowtime.append(bestFlowtime)\n\t\tself.logs_average.append(average)\n\t\tself.logs_generations.append(generation)\n\t\n\n","sub_path":"python/genetic_algorithm11.py","file_name":"genetic_algorithm11.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"233944864","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 1 11:30:08 2017\n\n@author: Wanying\n\"\"\"\n\n#class Pcr() also works\nclass Pcr(): \n def __init__(self):\n self.name = ''\n self.ct = 0.\n self.target = ''\n \n def normCt(self, normVal):\n return self.ct - normVal\n \n \n \n \nfh = open('2016_0816.csv', 'rb')\nfor line in fh:\n print(line.strip().split(','))","sub_path":"sciCoder_class.py","file_name":"sciCoder_class.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"517127779","text":"# Word counting and formating a text over multiple lines \n\nimport time \n\n############ Text ##############\nTextString = \"Hallo mein Name ist Tim und ich Arbeite an einem Programm, das diverse Übungen zum erlernen von Speedreading beinhaltet. Ich habe es schon geschafft mir einen Generellen Überblick über das Thema zu verschaffen, allerdings ist es im großen und ganzen recht Komplex. Als nächstes möchte ich gerne das Testen von der Lesegeschwindigkeit automatisieren. Dazu brauche ich diverse Grundvorraussezungen. Das ganze sollte mit jedem beliebigen Text funktionieren. Dazu kann man zum Beispiel Texte aus dem Internet verwenden, oder diese sogar selbst verfassen. Der Text sollte in der konsole zeilenweise dargestellt werden. Das Programm sollte erkennen können, wie viele wörter gerade gelesen werden, und dementsprechend auch die Zeit stoppen in welcher dies geschehen ist. - Tim\"\n\nFormatedString = []\n\n#print(TextString)\n\n######Parameter\nLineLength = 50 #character\n\nDisplayMode = 'n' #possilbe values: 'n', 'm', 'ml', 'mp', 'wpm'\nwpm = 200\n\n####### count number of words sum \ndef NumberOfWords(Text):\n #1 split the text in chunks \"similar to words\" e.g by spaces\n \n NumberSpace = -1\n ListOfSpaces = [-1] #Startvalue\n SpaceString = ' '\n NoLetterList = ['-',':','_',',','.',''] \n LetterList = set('abcdefghijklmnopqrstuvwxyzäüöABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÜ1234567890')\n #Word = 'Hallo'\n #print(LetterList.intersection(Word))\n while True: #identify all whitespace\n NumberSpace = Text.find(SpaceString,NumberSpace+1)\n if not NumberSpace == -1: \n ListOfSpaces.append(NumberSpace)\n else:\n break\n ListOfSpaces.append(len(Text)+1) # Last Value\n \n WordList = []\n for indexS in range(len(ListOfSpaces)-1):\n WordList.append(Text[ListOfSpaces[indexS]+1:ListOfSpaces[indexS+1]])\n #print(WordList)\n #2 identify if chunk = word \n # Remove remaining whitespaces: for easy detection \n \n # Remove < single length items\n RemoveList = []\n \n for Word in WordList:\n if len(Word) <= 1:\n RemoveList.append(Word)\n elif not LetterList.intersection(Word):\n RemoveList.append(Word)\n\n for Word in RemoveList:\n WordList.remove(Word)\n #print(WordList)\n # Remove items only consisting of these characters\n \n \n #3 cound words\n WordNum = len(WordList)\n return WordNum\n \n\n####### Format text \n# cut string after specified length \n# Do not seperate a word e.g. cut where a space is \n# do not start a new line with a whitespace \n# if there is a new line character start a new line \n# For now: no word seperation \nNumberSpace = -1\nListOfSpaces = []\nSpaceString = ' '\n\nwhile True:\n NumberSpace = TextString.find(SpaceString,NumberSpace+1)\n if not NumberSpace == -1: \n ListOfSpaces.append(NumberSpace)\n else:\n break\n\nNewLineLocations = [-1] \nfor i in ListOfSpaces:\n if i > LineLength + NewLineLocations[-1]:\n ind = ListOfSpaces.index(i)\n NewLineLocations.append(ListOfSpaces[ind-1])\n \nNewLineLocations.append(len(TextString)) # The last value\n\nfor ii in range(len(NewLineLocations)-1):\n FormatedString.append(TextString[NewLineLocations[ii]+1:NewLineLocations[ii+1]])\n\n\n#print(ListOfSpaces)\n#print (NewLineLocations)\n\n\n####### display text #######\n# Display modes:\n# Normal --> n\n# Mesure time --> m\n# Measure Time per line --> ml\n# Measure Time per multipe lines (paragraph) --> mp\n# Display with wpm speed --> wpm\n\nif DisplayMode == 'n': #Ok works as intended\n for line in FormatedString:\n Wordcount = NumberOfWords(line)\n dif = 4 - len(str(Wordcount))\n CountStr = str(Wordcount) + ' '*dif\n print(CountStr,line)\n Number = NumberOfWords(TextString)\n print('___')\n print(Number)\n \nelif DisplayMode == 'm':\n i = 1\nelif DisplayMode == 'ml':\n i = 1\nelif DisplayMode == 'mp':\n i = 1\nelif DisplayMode == 'wpm':\n for line in FormatedString:\n start = time.time()\n Wordcount = NumberOfWords(line)\n dif = 4 - len(str(Wordcount))\n CountStr = str(Wordcount) + ' '*dif\n print(CountStr,line)\n #calculate the waiting time depending on the number of words in the row\n WaitingTime = Wordcount/wpm*60 #in Seconds \n endtime = time.time()\n time.sleep(0.3)\n print (endtime-start)\n \n \n Number = NumberOfWords(TextString)\n print('___')\n print(Number)\nelse :\n print('Warning: Invalid DisplayMode - No output displayed!')\n \n \nstart = time.time()\nprint(1)\ntime.sleep(10)\n\nendtime = time.time()\nprint(endtime-start)\n\n\n\n\n#print('The End')\n","sub_path":"Scripts/ReadingSpeed.py","file_name":"ReadingSpeed.py","file_ext":"py","file_size_in_byte":4693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"158572797","text":"# discord.py library\nimport discord\n# command extension under discord\nfrom discord.ext import commands\n\n# discord app bot token\nTOKEN = \"MjcwNjgyNTMwMzM4ODMyMzk1.Diu1rw.JrpZ72K2jUjHQFlDaON6YBFb1dA\"\n\n# bot command prefix\nbot = commands.Bot(command_prefix= '!', description= 'A bot that greets the users!')\n\n@bot.event\n# on_ready() called when bot has successfully logged in\nasync def on_ready():\n print ('Connected to server!')\n print ('Name: ' + bot.user.name)\n print ('ID: ' + bot.user.id)\n print ('------------------------------')\n\n# bot commands\n@bot.command\nasync def greet():\n await bot.say(\":smiley: :wave: Hello, there!\")\n\nbot.run(TOKEN)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"380952857","text":"# -*- coding: UTF-8 -*-\n# ==============================================================================\n# Created for the MVA course: Object Recognition winter 2016.\n# Project: lego assembly instructions\n# ==============================================================================\n\n\n# import the required modules\nimport cv2\nimport numpy as np\nimport logging\nimport os\nimport sys\nimport time\nimport threading\n\n# define the global constants\nPREVIEW_DIR = \"steps_preview\" # folder containing the step previews\nPREVIEW_ROW = 50 # preview offset in pixel row\nPREVIEW_COL = 50 # preview offset in pixel column\nPREVIEW_INTER = 25 # inter preview offset in pixel row\nCOLOR_LIST = [(204,0,0),(0,0,204),(0,204,0),(255,128,0),(204,0,204)]\n # color list in BGR format\n\ndef _load_preview():\n \"\"\"\n Load the step preview images.\n \"\"\"\n # setup the logger\n logging.basicConfig(level=logging.INFO,format='%(asctime)s %(message)s')\n\n # check that the step previews folder exists\n if not os.path.isdir(PREVIEW_DIR):\n logging.info(\"Directory '%s' not found\"%PREVIEW_DIR)\n sys.exit(1)\n\n # retrieve the paths\n paths = os.listdir(PREVIEW_DIR)\n paths = [l for l in paths if l.lower().endswith('.png')]\n logging.info(\"%d previews found.\"%len(paths))\n\n previews = {}\n for i in np.arange(0,len(paths)):\n fullpath = os.path.join(PREVIEW_DIR,paths[i])\n step_number = int(paths[i][4:-4])\n previews[step_number] = cv2.imread(fullpath,-1)\n\n return previews\n\ndef _dummy_compute():\n \"\"\"\n Dummy function.\n \"\"\"\n\n # Define global variables\n global frame_shared\n global step_number_shared\n global loc_shared\n\n # Wait for webcam initialisation\n time.sleep(2)\n\n while True:\n # Copy frame\n frame_copy = np.copy(frame_shared)\n\n # Simulate time delay\n time.sleep(0.5)\n\n # Random step number\n step_number = np.random.randint(36)\n\n # Random locations\n n_loc = np.random.randint(5)\n loc = np.random.randint(400,size=(n_loc,2))\n loc = np.concatenate((loc,loc),axis=1)\n loc[:,1] = loc[:,0] + 200\n loc[:,2:4] = loc[:,2:4]+50+np.random.randint(400,size=(n_loc,2))\n\n # Update shared variables\n loc_shared = loc\n step_number_shared = step_number\n\n return\n\ndef _display():\n \"\"\"\n Update the global frame variable from the webcam and display the results\n \"\"\"\n\n # Define global variables\n global frame_shared\n global step_number_shared\n global loc_shared\n\n # Initialize the video capture with the first camera\n cap = cv2.VideoCapture(0)\n\n # Load the previews\n previews = _load_preview()\n\n # First initialization of the frame\n _, frame = cap.read()\n frame_shared = np.copy(frame)\n time.sleep(2)\n\n while(True):\n # Retrieve the frame and store it in the global variable\n _, frame = cap.read()\n frame_shared = np.copy(frame)\n\n # Display the locations\n loc_copy = np.copy(loc_shared)\n for l in np.arange(0, loc_copy.shape[0]):\n color = COLOR_LIST[l]\n cv2.rectangle(frame, (loc_copy[l,0] + 300, loc_copy[l,1]),\n (loc_copy[l,2]+500, loc_copy[l,3]), color, 5)\n\n # Display the instructions\n step_number_copy = step_number_shared\n befIm = previews[step_number_copy]\n aftIm = previews[step_number_copy+1]\n row,col = befIm.shape[0:2]\n col1 = PREVIEW_COL\n col2 = col1 + col / 2\n row1 = PREVIEW_ROW\n row2 = row1 + row / 2\n row3 = row2 + PREVIEW_INTER\n row4 = row3 + row / 2\n frame[row1:row2,col1:col2,:] = befIm[::2,::2,0:3]\n frame[row3:row4,col1:col2,:] = aftIm[::2,::2,0:3]\n\n # Write the text\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(frame,\"before\",(col1+5,row1+30),font,0.5,(255,255,255),2)\n cv2.putText(frame,\"after\",(col1+5,row3+30),font,0.5,(255,255,255),2)\n\n # Display the frame\n cv2.imshow('frame',frame)\n\n # Exit if any key is pressed\n if cv2.waitKey(1) != -1:\n cap.release()\n cv2.destroyAllWindows()\n sys.exit(1)\n\nif __name__ == \"__main__\":\n t = threading.Thread(target=_dummy_compute)\n t.setDaemon(True)\n t.start()\n _display()\n","sub_path":"demo/demo_webcam.py","file_name":"demo_webcam.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"173557118","text":"import asyncio\nimport argparse\nimport logging\nimport csv\nimport time\n# from io import StringIO\nfrom math import ceil\nfrom datetime import datetime, timedelta\nfrom collections import OrderedDict\n\nfrom .comm_center import ZeroCommServer, ZeroCommClient\nfrom .config_reader import ConfigReader\nfrom .subprocess_manager import ColonySpawner\nfrom .reports import Report\n\n_lg = logging.getLogger(\"zergswarm\")\n\n\nclass Overmind:\n # noinspection PyProtectedMember\n def __init__(self):\n _lg.debug(\"raising the overmind\")\n cmdline_ = Overmind._parse_cmdline()\n _lg.debug(\"command line parsed as %s\", cmdline_)\n # sanity check\n if cmdline_[\"log_level\"] not in logging._nameToLevel.keys():\n cmdline_[\"log_level\"] = \"INFO\"\n logging.basicConfig()\n logging.root.setLevel(logging._nameToLevel[cmdline_[\"log_level\"]])\n _lg.info(\"logging level is set to %s\", logging.getLevelName(logging.root.getEffectiveLevel()))\n if isinstance(cmdline_[\"central_server\"], str):\n self._comm_sender = ZeroCommClient(cmdline_[\"central_server\"])\n else:\n self._comm_sender = None\n self._stats_accumulator = Report()\n self._config_reader = ConfigReader(cmdline_[\"settings_file\"])\n self._configs = list(self._config_reader.get_hatchling_config())\n n_ = len(self._configs)\n self._comm_listener = ZeroCommServer(cmdline_[\"bind_address\"])\n _lg.debug(\"initializing 0MQ listener on %s\", self._comm_listener.server_address)\n self._comm_listener.register_callback(\"satellite_action\", self._satellite_action)\n self._comm_listener.register_callback(\"stats\", self._stats_accumulator_callback)\n self._comm_listener.register_callback(\"get_colony_config\", self._colony_config)\n self._comm_listener.register_callback(\"get_hatchlings_config\", self._hatchlings_config)\n req_cols_ = self.required_colony_count(n_, ColonySpawner.available_colony_slots())\n self._hatchery_file = cmdline_[\"hatchery_file\"]\n self._spawner = ColonySpawner(req_cols_)\n _lg.info(\"spawning %s colonies with %s hatchlings\", req_cols_, n_)\n n1_ = n_ % req_cols_\n x_ = n_ // req_cols_\n self._hatchlings_per_col = [x_ for _ in range(req_cols_ - n1_)] + [(x_ + 1) for _ in range(n1_)]\n self._colonies = dict()\n self._satellites = set()\n self._start_time = datetime.utcnow()\n\n async def _send_to_central(self, data: dict) -> dict:\n ans_ = {}\n if not self._comm_sender:\n return ans_\n async with self._comm_sender as sender: # type: ZeroCommClient\n ans_ = await sender.call(message_type=\"stats\", data=data)\n return ans_\n\n async def _notify_central(self, action: str) -> None:\n \"\"\"\n notify a central overmind, if any\n :param action: str, either \"register\" or \"unregister\"\n :return: None\n \"\"\"\n if not self._comm_sender:\n return\n async with self._comm_sender as sender: # type: ZeroCommClient\n _ = await sender.call(message_type=\"satellite_action\", data={\"action\": action})\n\n def _satellite_action(self, data: dict) -> dict:\n if not isinstance(data, dict) or 0 == len(data):\n return {}\n if not data.get(\"client_id\", None):\n id_ = \"unknown\"\n else:\n id_ = data[\"client_id\"]\n if id_ in self._satellites:\n if \"unregister\" == data[\"data\"].get(\"action\", \"register\"):\n self._satellites.remove(id_)\n return {\"client_id\": id_, \"data\": {\"result\": \"ok\"}}\n return {\"client_id\": id_, \"data\": {\"result\": \"error\", \"error\": \"invalid request\"}}\n if \"register\" == data[\"data\"].get(\"action\", \"register\"):\n self._satellites.add(id_)\n return {\"client_id\": id_, \"data\": {\"result\": \"ok\"}}\n return {\"client_id\": id_, \"data\": {\"result\": \"error\", \"error\": \"invalid request\"}}\n\n async def _stats_accumulator_callback(self, data: dict) -> dict:\n if not isinstance(data, dict) or 0 == len(data):\n return {}\n if self._comm_sender is not None:\n # pass this along to the central overmind\n try:\n ans_ = await asyncio.wait_for(self._send_to_central(data), 2)\n except asyncio.TimeoutError:\n err_ = \"timeout while attempting to send stats to central overmind\"\n _lg.error(err_)\n return {\"client_id\": data.get(\"client_id\", \"unknown\"),\n \"data\": {\"stats\": \"error\", \"error\": err_}}\n except Exception as e_:\n _lg.error(\"exception caught while attempting to send stats to central overmind: %s\", str(e_))\n return {\"client_id\": data.get(\"client_id\", \"unknown\"),\n \"data\": {\"stats\": \"error\", \"error\": str(e_)}}\n try:\n rdata_ = Report.from_dict(data[\"data\"])\n except Exception as e_:\n err_ = \"failed to construct report from stat message: [%s] %s\", e_.__class__.__name__, e_\n _lg.error(err_)\n return {\"client_id\": data.get(\"client_id\", \"unknown\"),\n \"data\": {\"stats\": \"error\", \"error\": err_}}\n if not data.get(\"client_id\", None):\n id_ = \"unknown\"\n else:\n id_ = data[\"client_id\"]\n now_ = datetime.utcnow()\n if not((now_ - self._start_time).seconds % (10 * 60)):\n # about once every ~ X minutes\n _lg.info(\"[%s] received data from %s: %s\", now_, id_, rdata_)\n\n try:\n self._stats_accumulator += rdata_\n return {\"data\": {\"result\": \"ok\"}}\n except Exception as e_:\n _lg.error(\"caught exception while adding stats: %s\", e_)\n return {\"client_id\": data.get(\"client_id\", \"unknown\"),\n \"data\": {\"stats\": \"error\", \"error\": str(e_)}}\n\n def _colony_config(self, data: dict):\n if not isinstance(data, dict) or data.get(\"client_id\", None) is None:\n id_ = \"unknown_{}\".format(len(self._colonies))\n else:\n id_ = data[\"client_id\"]\n _lg.debug(\"colony config requested for %s\", id_)\n\n if id_ not in self._colonies.keys():\n if len(self._colonies) >= len(self._hatchlings_per_col):\n self._colonies[id_] = 0\n else:\n self._colonies[id_] = self._hatchlings_per_col[len(self._colonies)]\n _lg.debug(\"returning colony config: %s\", self._colonies[id_])\n return {\"client_id\": data.get(\"client_id\", \"unknown\"), \"data\": {\"hatchlings\": self._colonies[id_]}}\n\n def _hatchlings_config(self, data: dict):\n if not self._configs or not isinstance(data, dict): # zero length array takes this branch\n ans_ = []\n else:\n id_ = data.get(\"client_id\", None)\n if not id_ or id_ not in self._colonies.keys():\n ans_ = []\n else:\n n_ = self._colonies[id_]\n cnf_ = self._configs[:n_]\n self._configs = self._configs[n_:]\n ans_ = cnf_\n _lg.debug(\"returning %s hatchling configs\", len(ans_))\n return {\"client_id\": data.get(\"client_id\", \"unknown\"), \"data\": {\"configs\": ans_}}\n\n @staticmethod\n def _parse_cmdline() -> dict:\n ap_ = argparse.ArgumentParser()\n default_bind_ = \"tcp://127.0.0.1:23176\"\n default_settings_ = \"settings.ini\"\n default_hatchery_ = \"hatchling.py\"\n ap_.add_argument(\"--bind_address\", \"-a\",\n help=(\"URI (tcp://IP:port) the overmind's 0MQ should bind to for listening \"\n \"(default is {})\").format(default_bind_),\n type=str, default=default_bind_, required=False)\n ap_.add_argument(\"--central_server\", \"-c\",\n help=\"full URI of the central server coordinating all overminds, if any (tcp://IP:port) (default is None)\",\n type=str, default=None, required=False)\n ap_.add_argument(\"--settings_file\", \"-s\", help=\"custom settings file (default is {})\".format(default_settings_),\n type=str, default=default_settings_, required=False)\n ap_.add_argument(\"--hatchery_file\", \"-x\", help=\"custom hatchery file (default is {})\".format(default_hatchery_),\n type=str, default=default_hatchery_, required=False)\n ap_.add_argument(\"--log_level\", \"-l\", type=str, choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\"],\n default=\"INFO\", required=False, help=\"log level for the overmind coordinator\")\n return ap_.parse_known_args()[0].__dict__\n\n def required_colony_count(self, hatchlings: int, colony_slots: int) -> int:\n max_hatchlings_per_colony = self._config_reader.max_hatchlings_per_colony\n min_hatchlings_per_colony = self._config_reader.min_hatchlings_per_colony\n if hatchlings / colony_slots > max_hatchlings_per_colony:\n ans_ = colony_slots\n elif hatchlings < colony_slots * min_hatchlings_per_colony:\n ans_ = ceil(hatchlings / min_hatchlings_per_colony)\n else:\n ans_ = ceil(hatchlings / max_hatchlings_per_colony)\n return ans_ if ans_ > 1 else 1\n\n async def _run_async(self):\n async with self._comm_listener as srv:\n # have to put this message out for external connections\n _lg.info(\"overmind listening to 0MQ connections on {}\".format(srv.server_address))\n if self._comm_sender:\n await self._notify_central(\"register\")\n await self._spawner.run_colonies(server_address=srv.server_address, hatchery_file=self._hatchery_file)\n if self._comm_sender:\n await self._notify_central(\"unregister\")\n else:\n if len(self._satellites) > 0:\n # wait a finite amount of time, to avoid cases where the satellites crash or\n # otherwise stop communicating\n wait_mins_ = 5\n _lg.info(\"waiting %d minutes for %d satellites\", wait_mins_, len(self._satellites))\n timeout_end_ = datetime.utcnow() + timedelta(minutes=wait_mins_)\n while datetime.utcnow() < timeout_end_:\n await asyncio.sleep(1)\n if len(self._satellites) == 0:\n break\n self._stop_time = datetime.utcnow()\n\n def run(self):\n loop_ = asyncio.get_event_loop()\n loop_.run_until_complete(self._run_async())\n self.print_stats()\n\n def print_stats(self):\n # buff_ = StringIO()\n # for k_, v_ in self._stats_accumulator.items():\n # print(\"{}: {}\".format(k_, v_), file=buff_)\n # _lg.info(\"reported stats:\\n%s\", buff_.getvalue())\n # buff_.close()\n if len(self._stats_accumulator) == 0:\n _lg.warning(\"empty accumulator, no stats printed\\n%s\", self._stats_accumulator)\n\n time_ = self._stop_time - self._start_time # type: timedelta\n _lg.info(\"reported stats over %d minutes:\\n%s\", time_.seconds / 60, self._stats_accumulator)\n\n # output_ = \"stats_output.csv\"\n # fields = set()\n # for v_ in self._stats_accumulator.values():\n # fields.update(v_[\"success\"].keys())\n # fields = [\"time\"] + sorted(fields)\n # with open(output_, \"wt\") as outf_:\n # dwrt_ = csv.DictWriter(outf_, fieldnames=list(fields), restval=0)\n # dwrt_.writeheader()\n # for k_, v_ in self._stats_accumulator.items():\n # row_ = {\"time\": k_.time()}\n # row_.update({k1_: v1_[\"count\"] for k1_, v1_ in v_[\"success\"].items()})\n # dwrt_.writerow(row_)\n","sub_path":"zergswarm/overmind.py","file_name":"overmind.py","file_ext":"py","file_size_in_byte":11753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"221046624","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 27 14:25:29 2018\n\n@author: owen\n\"\"\"\n\n# https://leetcode.com/problems/design-linked-list/discuss/139689/Python-solution\nclass Node(object): # singly linked node\n \n def __init__(self, val):\n self.val=val\n self.next=None\n \n \nclass MyLinkedList(object):\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.head=None\n self.size=0 # count of nodes in the list\n\n def get(self, index):\n \"\"\"\n Get the value of the index-th node in the linked list. If the index is invalid, return -1.\n :type index: int\n :rtype: int\n \"\"\"\n if index<0 or index>=self.size:\n return -1\n curr=self.head\n while index>0:\n curr=curr.next\n index-=1\n return curr.val\n\n def addAtHead(self, val):\n \"\"\"\n Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.\n :type val: int\n :rtype: void\n \"\"\"\n self.addAtIndex(0, val)\n\n def addAtTail(self, val):\n \"\"\"\n Append a node of value val to the last element of the linked list.\n :type val: int\n :rtype: void\n \"\"\"\n self.addAtIndex(self.size, val)\n\n def addAtIndex(self, index, val):\n \"\"\"\n Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.\n :type index: int\n :type val: int\n :rtype: void\n \"\"\"\n if index<0 or index>self.size: # when index==self.size, equals to insert after the last node\n return\n new=Node(val)\n if index==0:\n new.next=self.head\n self.head=new\n else:\n curr=self.head\n while index-1>0: # locate previous node\n curr=curr.next\n index-=1\n new.next=curr.next\n curr.next=new\n self.size+=1\n\n def deleteAtIndex(self, index):\n \"\"\"\n Delete the index-th node in the linked list, if the index is valid.\n :type index: int\n :rtype: void\n \"\"\"\n if index<0 or index>=self.size:\n return\n curr=self.head\n if index==0:\n self.head=curr.next\n else:\n while index-1>0:\n curr=curr.next\n index-=1\n curr.next=curr.next.next\n self.size-=1\n \nif __name__==\"__main__\":\n linkedList = MyLinkedList()\n linkedList.addAtHead(1)\n linkedList.addAtTail(3)\n linkedList.addAtIndex(1, 2)\n print(linkedList.get(1))\n linkedList.deleteAtIndex(1)\n print(linkedList.get(1))\n ","sub_path":"707. Design Linked List.py","file_name":"707. Design Linked List.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"572043974","text":"\"\"\"1. Написать функцию `arithmetic`, принимающую 3 аргумента: первые 2 - числа, третий - операция, которая должна быть\nпроизведена над ними. Если третий аргумент +, сложить их; если —, то вычесть; * — умножить; / — разделить (первое на\nвторое). В остальных случаях вернуть строку `\"Неизвестная операция\"`.\n\"\"\"\n\n\ndef arithmetic(a, b, action):\n if action == '/':\n return a/b\n elif action == '*':\n return a*b\n elif action == '+':\n return a+b\n elif action == '-':\n return a-b\n else:\n return \"unknown OPERATION\"\n\n\nv1 = int(input(\"Enter the first value: \"))\nv2 = int(input(\"Enter the second value: \"))\naction = input(\"Enter + or - or / or *\")\nresult = arithmetic(v1, v2, action)\nprint(result)\n\n","sub_path":"hillel8/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"546097118","text":"\"\"\"\nThis file contains all the functions used to make the recognition with OCR\n\n@authors: BARTH Werner, BRUNET Julien, THOMAS Morgan\n\"\"\"\n\n##### IMPORTS #####\nimport pytesseract\nfrom PIL import Image\nfrom sys import platform as _platform\n\n\n##### FUNCTIONS #####\ndef detect_number(image):\n \"\"\"\n This function is used to make the recognition\n :param image: the processed image\n :return: the number recognized by OCR\n \"\"\"\n # Depending of the platform, the tesseract executable is not located at the same place\n if _platform == \"linux\" or _platform == \"linux2\":\n pytesseract.pytesseract.tesseract_cmd = r\"/usr/local/bin/tesseract\"\n elif _platform == \"darwin\":\n pytesseract.pytesseract.tesseract_cmd = r\"/usr/local/Cellar/tesseract/4.1.0/bin/tesseract\"\n elif _platform == \"win32\":\n pytesseract.pytesseract.tesseract_cmd = r\"C:\\Program Files\\Tesseract-OCR\\tesseract.exe\"\n elif _platform == \"win64\":\n pytesseract.pytesseract.tesseract_cmd = r\"C:\\Program Files\\Tesseract-OCR\\tesseract.exe\"\n\n ready = Image.fromarray(image)\n config = r'--dpi 300 --psm 11 -c tessedit_char_whitelist=0123456789'\n text = pytesseract.image_to_string(ready, config = config)\n \" -psm 11 : Sparse text. Find as much text as possible in no particular order (See tesseract documentation) \"\n \" -c tessedit_char_whitelist=0123456789' : detect only digits \"\n return text","sub_path":"Recognition.py","file_name":"Recognition.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"134366292","text":"()# This script contains some basic methods to shorten code\n\nimport numpy as np\n\ndef rebin(cross, xshape, yshape):\n \"\"\"Rebins a 2-Dimensional array into an array with shape (xshape, yshape)\"\"\"\n copy = cross.reshape(cross.shape[0], yshape, cross.shape[1]//yshape).mean(axis=2)\n copy = copy.reshape(xshape, copy.shape[0]//xshape, copy.shape[1]).mean(axis=1)\n return copy\n\ndef get_gate(profile, minphase, maxphase, scale=True, flip_axis=True):\n \"\"\"Given a three dimensional pulse profile, returns a two dimensional dynamic sectrum\"\"\"\n total_phase_bins = profile.shape[-1]\n gate = profile[:,:,minphase:maxphase].mean(axis=-1)\n if scale:\n gate *=(total_phase_bins/(maxphase-minphase))\n if flip_axis:\n gate = gate.T\n \n return gate\n\ndef get_on_gate(profile, min_on, max_on, min_off, max_off):\n \"\"\"Returns the scaled on gate\"\"\"\n on = get_gate(profile, min_on, max_on)\n off = get_gate(profile, min_off, max_off)\n gate = on/off - 1\n return gate\n\n\n","sub_path":"Basic_Functions.py","file_name":"Basic_Functions.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"296215373","text":"NAME = 'name'\nLATITUDE = 'latitude'\nLONGITUDE = 'longitude'\nPOPULATION = 'population'\n\n\ndef file_to_node_list(file_name, r):\n node_list = []\n flowfile = open(file_name)\n for line in flowfile:\n the_data = line.split(',')\n node_list.append({NAME: the_data[2],\n LATITUDE: float(the_data[3]),\n LONGITUDE: float(the_data[4]),\n POPULATION: int(the_data[5])})\n flowfile.close()\n return node_list\n\n\nnode_list_base_100 = file_to_node_list(\"base_100.csv\", 1)\n\n\nfrom matplotlib import pyplot\nfrom math import *\n\ndef distance(dic_1,dic_2):\n return sqrt(((dic_1[LATITUDE]-dic_2[LATITUDE])*40000/360)**2 + ((dic_1[LONGITUDE]-dic_2[LONGITUDE])*40000/360)**2)\n\ndef dictionary_neighbours(node_list,i,length_list,distance_max) :\n neighbours = {}\n for j in range(length_list):\n if j != i :\n dist = distance(node_list[j],node_list[i])\n if dist <= distance_max :\n neighbours[node_list[j][NAME]] = dist\n return neighbours\n\ndef aretes_neighbours(node_list,length_list,distance_max) :\n x1=[]\n x2=[]\n y1=[]\n y2=[]\n for i in range (length_list):\n for j in range(length_list):\n if j != i :\n dist = distance(node_list[j],node_list[i])\n if dist <= distance_max :\n x1.append(node_list[j][LONGITUDE])\n x2.append(node_list[i][LONGITUDE])\n y1.append(node_list[j][LATITUDE])\n y2.append(node_list[i][LATITUDE])\n return ([x1,x2],[y1,y2])\n\ndef node_list_to_dictionary (node_list,distance_max):\n length_list = len(node_list)\n mon_graphe = {}\n for i in range(length_list) :\n mon_graphe[node_list[i][NAME]]={\"latitude\":node_list[i][LATITUDE],\"population\":node_list[i][POPULATION],\"longitude\":node_list[i][LONGITUDE],\"neighbours\":dictionary_neighbours(node_list,i,length_list,distance_max)}\n return mon_graphe\n\nimport time\n\nbeginning = time.clock()\n\nmon_graphe = node_list_to_dictionary(node_list_base_100, 230)\n\nthe_end = time.clock()\nprint(the_end - beginning)\n\n\n#for i in mon_graphe :\n# print(i,mon_graphe[i]['neighbours'])\n\ndef lists_lat_and_long (mon_graphe):\n latitudes = []\n longitudes = []\n for i in mon_graphe :\n latitudes.append(mon_graphe[i][\"latitude\"])\n longitudes.append(mon_graphe[i][\"longitude\"])\n return latitudes,longitudes\n\n(latitudes,longitudes) = lists_lat_and_long(mon_graphe)\n([x1,x2],[y1,y2])= aretes_neighbours(node_list_base_100, len(node_list_base_100), 230)\n\npyplot.scatter(longitudes, latitudes)\npyplot.plot([x1,x2],[y1,y2])\npyplot.show()\n\ndef trouver_min(non_traite,distances_to_start):\n deb=non_traite.pop()\n non_traite.add(deb)\n for ville in non_traite :\n if distances_to_start[ville] < distances_to_start[deb] :\n deb = ville\n return deb\n\n\ndef dijkstra (mon_graphe,start,end) :\n infini = inf\n distances_to_start = {}\n non_traite = set([])\n predecesseur = {}\n for ville in mon_graphe:\n non_traite.add(ville)\n distances_to_start[ville] = infini\n predecesseur[ville] = \" \"\n distances_to_start[start] = 0\n predecesseur[start] = start\n while non_traite != set([]) :\n sommet_min = trouver_min(non_traite,distances_to_start)\n non_traite.discard(sommet_min)\n for neighbour in mon_graphe[sommet_min][\"neighbours\"] :\n if distances_to_start[neighbour] > distances_to_start[sommet_min] + mon_graphe[sommet_min][\"neighbours\"][neighbour]:\n distances_to_start[neighbour] = distances_to_start[sommet_min] + mon_graphe[sommet_min][\"neighbours\"][neighbour]\n predecesseur[neighbour] = sommet_min\n chemin = [end]\n if predecesseur[end] != \" \" :\n while end != start :\n print(chemin)\n chemin.append(predecesseur[end])\n end=predecesseur[end]\n chemin.reverse()\n return chemin\n\nbeginning = time.clock()\n\nchemin = dijkstra(mon_graphe,\"Paris\",\"Bordeaux\")\nprint(chemin)\n\nthe_end = time.clock()\nprint(the_end - beginning)\n\ndef chemins_de_Marseille (mon_graphe) :\n x1 = []\n x2 = []\n y1 = []\n y2 = []\n for ville in mon_graphe :\n if ville != \"Marseille\" :\n chemin1 = dijkstra(mon_graphe, \"Marseille\", ville)\n for i in range (len(chemin1)-1):\n x1.append(mon_graphe[chemin1[i]]['longitude'])\n x2.append(mon_graphe[chemin1[i+1]]['longitude'])\n y1.append(mon_graphe[chemin1[i]]['latitude'])\n y2.append(mon_graphe[chemin1[i+1]]['latitude'])\n return ([x1, x2], [y1, y2])\n\n([x1, x2], [y1, y2]) = chemins_de_Marseille (mon_graphe)\n\npyplot.scatter(longitudes, latitudes)\npyplot.plot([x1,x2],[y1,y2])\npyplot.show()\n","sub_path":"projetMappy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"380189080","text":"# -*- coding: utf-8 -*-\n\n# external imports\nimport math\nimport collections\nimport re\nimport copy\nimport click\nimport flask\nimport werkzeug.routing\nimport peewee\n\n# import peewee error classes so projects can match these errors without importing peewee\nfrom peewee import fn, CompositeKey, IntegrityError, DoesNotExist, DataError, DatabaseError, OperationalError, ProgrammingError\n\n# parent imports\n#import poobrains\nfrom poobrains import app\nimport poobrains.helpers\nimport poobrains.rendering\nimport poobrains.form\n# internal imports\nfrom . import fields\n\nif isinstance(app.db, peewee.SqliteDatabase):\n\n @app.db.func('regexp')\n def nonretardedsqliteregexp(regexp, value):\n return re.search(regexp, value) is not None\n\n\ndef RegexpConstraint(field_name, regexp):\n\n operation = app.db._operations['REGEXP'] # peewee.OP.REGEXP used to always hold the correct value, what happen?\n\n if 'sqlite' in app.db.__class__.__name__.lower():\n regexp_compat = '\"%s\"' % regexp\n else:\n regexp_compat = \"'%s'\" % regexp\n\n return peewee.Check('\"%s\" %s %s' % (field_name, operation, regexp_compat))\n\n\nclass OrderableMetadata(peewee.Metadata):\n\n \"\"\"\n This class ports over peewee _meta.order_by functionality, which was dropped in 3.0\n \"\"\"\n\n order_by = None\n\n def prepared(self):\n\n if self.order_by:\n\n norm_order_by = []\n\n for item in self.order_by:\n\n if isinstance(item, peewee.Ordering):\n\n # Orderings .node references a field specific to an upstream model.\n # Therefore, we can't just adopt them.\n if item.direction == 'DESC':\n item = '-' + item.node.name\n else:\n item = item.node.name\n\n desc = False\n if item.startswith('-'):\n desc = True\n item = item[1:]\n\n field = self.fields[item]\n\n if desc:\n norm_order_by.append(field.desc())\n else:\n norm_order_by.append(field.asc())\n\n self.order_by = norm_order_by\n\n\nclass ModelBase(poobrains.helpers.MetaCompatibility, peewee.ModelBase):\n\n def __new__(cls, name, bases, attrs):\n\n cls = super(ModelBase, cls).__new__(cls, name, bases, attrs)\n cls._meta.prepared()\n\n return cls\n\n\nclass Model(peewee.Model, poobrains.helpers.ChildAware, metaclass=ModelBase):\n\n __metaclass__ = ModelBase\n\n class Meta:\n\n database = app.db\n model_metadata_class = OrderableMetadata # port of peewees dropped _meta.order_by feature\n order_by = ['-id']\n\n\n @classmethod\n def load(cls, handle):\n\n q = cls.select()\n\n if isinstance(handle, str):\n handle = cls.string_handle(handle)\n\n elif type(handle) not in (tuple, list):\n handle = [handle]\n\n assert len(handle) == len(cls._meta.handle_fields), \"Handle length mismatch for %s, expected %d but got %d!\" % (cls.__name__, len(cls._meta.handle_fields), len(handle))\n\n for field_name in cls._meta.handle_fields:\n field = getattr(cls, field_name)\n idx = cls._meta.handle_fields.index(field_name)\n q = q.where(field == handle[idx])\n\n return q.get()\n\n\n @property\n def handle_string(self):\n\n segments = []\n\n for field_name in self._meta.handle_fields:\n try:\n segment = getattr(self, field_name)\n except peewee.DoesNotExist: # Means we have a ForeignKey without assigned/valid value.\n segment = None\n\n if isinstance(segment, Model):\n segment = str(segment._pk)\n else:\n segment = str(segment)\n\n #segment = segment.replace('.', ',') # since dots are used in FormDataParser to split data into a hierarchy, dots in field names will fuck shit up\n\n segments.append(segment)\n\n return ':'.join(segments)\n\n\n @classmethod\n def string_handle(cls, string):\n \n if string.find(':'):\n return tuple(string.split(':'))\n\n return string\n\n\n def validate(self):\n pass\n\n\n @classmethod\n def ordered(cls, *fields):\n\n query = cls.select(*fields)\n\n if cls._meta.order_by:\n query = query.order_by(*cls._meta.order_by)\n\n return query\n\n\n def save(self, *args, **kwargs):\n\n self.validate()\n return super(Model, self).save(*args, **kwargs)\n\n\n def __repr__(self):\n try:\n return \"<%s[%s]>\" % (self.__class__.__name__, self._pk)\n except Exception:\n return \"<%s, unsaved/no primary key>\" % self.__class__.__name__\n\n\nclass Storable(Model, poobrains.rendering.Renderable):\n\n \"\"\"\n A `Renderable` `Model` associated to a single table in the database.\n \"\"\"\n\n class Meta:\n abstract = True\n modes = collections.OrderedDict([('full', 'read')])\n\n def __init__(self, *args, **kwargs):\n\n super(Storable, self).__init__(*args, **kwargs)\n self.url = self.instance_url # make .url callable for class and instances\n\n \n @property\n def title(self):\n if self.name:\n return self.name\n\n elif self._pk:\n return \"%s %s\" % (self.__class__.__name__, str(self._pk))\n\n return \"New %s\" % self.__class__.__name__\n\n\n def instance_url(self, mode='full', quiet=False, **url_params):\n\n if quiet:\n try:\n return app.get_url(self.__class__, handle=self.handle_string, mode=mode, **url_params)\n except:\n return False\n\n return app.get_url(self.__class__, handle=self.handle_string, mode=mode, **url_params)\n\n\n @classmethod\n def class_view(cls, mode='teaser', handle=None, **kwargs):\n\n instance = cls.load(handle)\n return instance.view(handle=handle, mode=mode, **kwargs)\n\n\n @classmethod\n def list(cls, op, user, handles=None, ordered=True, fields=[]):\n\n if ordered: # whether to use the default ordering for this model. mostly here because doing this *always* would break using this in UNIONs\n query = cls.ordered(*fields)\n else:\n query = cls.select(*fields)\n\n if handles:\n \n keyed_handles = collections.OrderedDict()\n for field_name in cls._meta.handle_fields:\n keyed_handles[field_name] = []\n\n for handle in handles:\n \n for field_name in cls._meta.handle_fields:\n idx = cls._meta.handle_fields.index(field_name) \n keyed_handles[field_name].append(handle[idx])\n\n for field_name in cls._meta.handle_fields:\n field = getattr(cls, field_name)\n query = query.where(field.in_(keyed_handles[field_name]))\n \n return query\n\n\nclass Named(Storable):\n\n class Meta:\n handle_fields = ['name']\n\n name = fields.CharField(index=True, unique=True, null=False, constraints=[RegexpConstraint('name', '^[a-z0-9_\\-]+$')])\n\n def __init__(self, *args, **kwargs):\n\n super(Named, self).__init__(*args, **kwargs)\n\n\n def instance_url(self, mode='full', quiet=False, **url_params):\n\n if quiet:\n try:\n return app.get_url(self.__class__, handle=self.name, mode=mode, **url_params)\n except:\n return False\n\n return app.get_url(self.__class__, handle=self.name, mode=mode, **url_params)\n\n @property\n def ref_id(self):\n\n return \"%s-%s\" % (self.__class__.__name__.lower(), self.name)\n\n\nclass Listing(poobrains.rendering.Renderable):\n\n #TODO: Make a Listing class that works with non-Storable Renderables?\n\n cls = None\n mode = None\n title = None\n offset = None\n limit = None\n items = None\n pagecount = None\n count = None\n pagination = None\n current_page = None\n menu_actions = None\n\n def __init__(self, cls, mode='teaser', title=None, query=None, offset=0, limit=None, menu_actions=None, menu_related=None, pagination_options=None, **kwargs):\n\n super(Listing, self).__init__(**kwargs)\n self.cls = cls\n self.mode = mode\n self.offset = offset\n self.menu_actions = menu_actions\n self.menu_related = menu_related\n\n if title is not None:\n self.title = title\n else:\n self.title = cls.__name__\n\n if limit is None:\n self.limit = app.config['PAGINATION_COUNT']\n else:\n self.limit = limit\n\n if query is None:\n op = cls._meta.modes[mode]\n query = cls.list(op, flask.g.user)\n\n if pagination_options is None:\n pagination_options = {}\n\n endpoint = flask.request.endpoint\n if not endpoint.endswith('_offset'):\n endpoint = '%s_offset' % (endpoint,)\n \n self.pagination = Pagination([query], offset, endpoint, **pagination_options)\n self.items = self.pagination.results\n\n\n def templates(self, mode=None):\n\n tpls = []\n\n for x in [self.__class__] + self.__class__.ancestors():\n\n name = x.__name__.lower()\n \n if mode:\n if issubclass(x, Listing):\n tpls.append('%s-%s-%s.jinja' % (name, mode, self.cls.__name__))\n tpls.append('%s-%s.jinja' % (name, mode))\n\n tpls.append('%s.jinja' % name)\n\n return tpls\n\n\nclass Pagination(object):\n\n menu = None # the actual pagination menu\n options = None # optional parameters for flask.url_for\n limit = None\n offset = None\n queries = None\n counts = None\n results = None\n page_info = None\n num_results = None\n num_pages = None\n current_page = None\n\n\n def __init__(self, queries, offset, endpoint, limit=None, **options):\n \n self.queries = queries\n self.offset = offset\n self.endpoint = endpoint\n self.options = options\n\n if limit is not None:\n self.limit = limit\n else:\n self.limit = app.config['PAGINATION_COUNT']\n\n self.menu = False\n self.counts = [(q, q.count()) for q in self.queries]\n self.results = []\n self.page_info = collections.OrderedDict()\n self.num_results = sum([x[1] for x in self.counts])\n self.num_pages = int(math.ceil(float(self.num_results) / self.limit))\n self.current_page = int(math.floor(self.offset / float(self.limit))) + 1\n\n position = 0\n\n range_lower = self.offset\n range_upper = self.offset + self.limit - 1\n\n for query, count in self.counts:\n\n if count > 0:\n\n first_position = position\n last_position = first_position + count - 1\n\n on_current_page = first_position <= range_upper and last_position >= range_lower\n\n if on_current_page:\n \n self.page_info[query] = {}\n\n starts_before_page = first_position < range_lower\n starts_within_page = first_position >= range_lower and first_position <= range_upper\n ends_after_page = last_position > range_upper\n\n if starts_before_page:\n query = query.offset(range_lower - first_position)\n else:\n query = query.offset(0)\n\n if starts_within_page and ends_after_page:\n query = query.limit(self.limit - (first_position - range_lower))\n else:\n query = query.limit(self.limit)\n\n for result in query:\n self.results.append(result)\n\n position += count\n\n if self.num_pages > 1:\n\n self.menu = poobrains.rendering.Menu('pagination')\n\n for i in range(0, self.num_pages):\n\n page_num = i + 1\n active = page_num == self.current_page\n kw = copy.copy(self.options)\n kw['offset'] = i * self.limit\n\n self.menu.append(\n flask.url_for(self.endpoint, **kw),\n page_num,\n active\n )\n\n\nclass StorableParamType(poobrains.form.types.ParamType):\n\n baseclass = None\n\n def __init__(self, baseclass=Storable):\n\n super(StorableParamType, self).__init__()\n self.baseclass = baseclass\n\n def convert(self, value, param, ctx):\n\n if value == '':\n return None\n\n if isinstance(value, self.baseclass):\n return value # apparently we need this function to be idempotent? Didn't even knew that was a real word.\n\n storables = self.baseclass.class_children_keyed(lower=True)\n\n if value.lower() in storables:\n return storables[value.lower()] # holy shit it's lined up! D:\n\n self.fail(u'Not a valid storable: %s. Try one of %s' % (value, storables.keys()))\n\npoobrains.form.types.StorableParamType = StorableParamType\npoobrains.form.types.STORABLE = StorableParamType()\n","sub_path":"poobrains/storage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"89983610","text":"import os\nimport numpy as np\nfrom sklearn.metrics import accuracy_score, classification_report\nfrom sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV\nfrom sklearn.svm import SVC, LinearSVC\n\ndef read_embeddings(fname):\n with open(fname) as f:\n f.readline()\n names = []\n data = []\n for line in f:\n line = line.strip().split()\n names.append(line[0])\n data.append([float(v) for v in line[1:]])\n return names, data\n \ndef read_labels(basedir):\n classes = os.listdir(basedir)\n graph2label = {}\n for clss in classes:\n path = os.path.join(basedir, clss)\n graphs = os.listdir(path)\n for graph in graphs:\n graph2label[graph.split('.')[0]] = clss\n return graph2label\n\ndef organize_data(graph2label, embed_vnames, embed_vecs):\n N = len(graph2label)\n data = [0 for _ in range(N)]\n labels = [0 for _ in range(N)]\n for i in range(len(embed_vnames)):\n if \"dt\" in embed_vnames[i]:\n _, gname = embed_vnames[i].split(\"_\")\n if gname == '0': continue\n data[int(gname)-1] = embed_vecs[i]\n labels[int(gname)-1] = int(graph2label[gname])\n return np.array(data), np.array(labels)\n\ndef load_data(folder, emb_fname):\n label_dir = \"{f}/graphs\".format(f=folder)\n names, data = read_embeddings(emb_fname)\n g2l = read_labels(label_dir)\n X, y = organize_data(g2l, names, data)\n return X, y\n\ndef learn(folder, emb):\n X, y = load_data(folder, emb)\n print(\"Dataset {}\".format(folder))\n print(\"Shapes {} {}\".format(X.shape, y.shape))\n\n params = {'C':[0.01,0.1,1,10,100,1000]}\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n classifier = GridSearchCV(LinearSVC(), params, cv=5, scoring='f1')\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n score = accuracy_score(y_test, y_pred)\n print(\"Score {}\".format(score))\n print(\"Best params {}\".format(classifier.best_params_))\n print(classification_report(y_test, y_pred))\n return score, classifier\n\nimport argparse\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Run classifier\")\n parser.add_argument(\"dataset\", help=\"The dataset folder to use\")\n parser.add_argument(\"embedding\", help=\"Which embedding file to use\")\n args = parser.parse_args()\n\n score, classifier = learn(args.dataset, args.embedding)\n","sub_path":"graph2vec/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"148124141","text":"from django.urls import path\nfrom .views import (TeacherLoginAPI, LogoutAPI, TeacherSemesterCourseAPI, TeacherNextSemesterCourseAPI,\n TeacherCourseStudentListAPI, TeacherCourseGradesAPI)\n\n\nurlpatterns = [\n path('login/', TeacherLoginAPI.as_view()),\n path('logout/', LogoutAPI.as_view()),\n path('course/', TeacherSemesterCourseAPI.as_view()),\n path('course/next/', TeacherNextSemesterCourseAPI.as_view()),\n path('course/students-list//', TeacherCourseStudentListAPI.as_view()),\n path('course/grades//', TeacherCourseGradesAPI.as_view()),\n]\n","sub_path":"jwc/teacher_urls.py","file_name":"teacher_urls.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"322166685","text":"#Enumerate: list 의 값을 추출할 때 번호를 붙여 추출\r\nfor i, v in enumerate(['tic','tac','tok']):\r\n print(i,v)\r\n\r\nmylist = [\"a\",\"b\",\"c\",\"d\"]\r\na = list(enumerate(mylist))\r\nfor lst in a:\r\n print(lst)\r\n\r\nb={i:j for i,j in enumerate('Gacheon University is an academic institute located in South Korea'.split()) }\r\nfor i,j in b.items():\r\n print(i,j)\r\n\r\n#zip : 두 개의 list 병렬적으로 추출\r\nalist = ['a1','a2','a3']\r\nblist = ['b1','b2','b3']\r\n\r\nfor a,b in zip(alist, blist):\r\n print(a,b)\r\n","sub_path":"1)pythonic_code/2.EnumerateandZip.py","file_name":"2.EnumerateandZip.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"360784460","text":"\"\"\"\nThis module contains the classes and methods for creating iso-surface structures\nfrom Pymatgen bandstrucutre objects. The iso-surfaces are found using the\nScikit-image package.\n\"\"\"\nimport itertools\nimport warnings\nfrom copy import deepcopy\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\nfrom monty.json import MSONable\nfrom skimage import measure\nfrom skimage.measure import marching_cubes_lewiner\nfrom trimesh.intersections import slice_faces_plane\n\nfrom ifermi.brillouin_zone import WignerSeitzCell, ReciprocalCell, ReciprocalSpace\nfrom pymatgen import Spin, Structure\nfrom pymatgen.electronic_structure.bandstructure import BandStructure\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n\n\nclass FermiSurface(MSONable):\n \"\"\"An object containing Fermi Surface data.\n\n Only stores information at k-points where energy(k) == Fermi energy.\n \"\"\"\n\n def __init__(\n self,\n isosurfaces: List[Tuple[np.ndarray, np.ndarray]],\n reciprocal_space: ReciprocalSpace,\n structure: Structure,\n ):\n \"\"\"\n Get a Fermi Surface object.\n\n Args:\n isosurfaces: The isosurfaces as a List of ``(vertices, faces)``.\n reciprocal_space: The reciprocal space associated with the Fermi surface.\n structure: The structure.\n \"\"\"\n self.isosurfaces = isosurfaces\n self.reciprocal_space = reciprocal_space\n self.structure = structure\n self.n_surfaces = len(self.isosurfaces)\n\n @classmethod\n def from_band_structure(\n cls,\n band_structure: BandStructure,\n kpoint_dim: np.ndarray,\n mu: float = 0.0,\n spin: Optional[Spin] = None,\n wigner_seitz: bool = False,\n symprec: float = 0.001,\n ) -> \"FermiSurface\":\n \"\"\"\n Args:\n band_structure: A band structure. The k-points must cover the full\n Brillouin zone (i.e., not just be the irreducible mesh). Use\n the ``ifermi.interpolator.Interpolator`` class to expand the k-points to\n the full Brillouin zone if required.\n kpoint_dim: The dimension of the grid in reciprocal space on which the\n energy eigenvalues are defined.\n mu: Energy offset from the Fermi energy at which the iso-surface is\n shape of the resulting iso-surface.\n spin: The spin channel to plot. By default plots both spin channels.\n wigner_seitz: Controls whether the cell is the Wigner-Seitz cell\n or the reciprocal unit cell parallelepiped.\n symprec: Symmetry precision for determining whether the structure is the\n standard primitive unit cell.\n \"\"\"\n if np.product(kpoint_dim) != len(band_structure.kpoints):\n raise ValueError(\n \"Number of k-points ({}) in band structure does not match number of \"\n \"k-points expected from mesh dimensions ({})\".format(\n len(band_structure.kpoints), np.product(kpoint_dim)\n )\n )\n\n band_structure = deepcopy(band_structure) # prevent data getting overwritten\n\n structure = band_structure.structure\n fermi_level = band_structure.efermi + mu\n bands = band_structure.bands\n frac_kpoints = [k.frac_coords for k in band_structure.kpoints]\n frac_kpoints = np.array(frac_kpoints)\n\n if wigner_seitz:\n prim = get_prim_structure(structure, symprec=symprec)\n if not np.allclose(prim.lattice.matrix, structure.lattice.matrix, 1e-5):\n warnings.warn(\"Structure does not match expected primitive cell\")\n\n reciprocal_space = WignerSeitzCell.from_structure(structure)\n bands, frac_kpoints, kpoint_dim = _expand_bands(\n bands, frac_kpoints, kpoint_dim\n )\n\n else:\n reciprocal_space = ReciprocalCell.from_structure(structure)\n\n kpoint_dim = tuple(kpoint_dim.astype(int))\n isosurfaces = compute_isosurfaces(\n bands,\n kpoint_dim,\n fermi_level,\n reciprocal_space,\n spin=spin,\n )\n\n return cls(isosurfaces, reciprocal_space, structure)\n\n def project_data(self, proj_plane: tuple):\n projected_band = []\n\n for i, band in enumerate(self.isosurfaces):\n verts = band[0]\n faces = band[1]\n projected_verts = []\n\n for vertex in verts:\n projected_verts.append(project(vertex, proj_plane))\n\n projected_band.append([projected_verts, faces])\n\n return projected_band\n\n\ndef compute_isosurfaces(\n bands: Dict[Spin, np.ndarray],\n kpoint_dim: Tuple[int, int, int],\n fermi_level: float,\n reciprocal_space: ReciprocalSpace,\n spin: Optional[Spin] = None,\n) -> List[Tuple[np.ndarray, np.ndarray]]:\n \"\"\"\n Compute the isosurfaces at a particular energy level.\n\n Args:\n bands: The band energies, given as a dictionary of ``{spin: energies}``, where\n energies has the shape (nbands, nkpoints).\n kpoint_dim: The k-point mesh dimensions.\n fermi_level: The energy at which to calculate the Fermi surface.\n reciprocal_space: The reciprocal space representation.\n spin: Which spin channel to calculate isosurfaces for. By default use both\n spin channels if available.\n\n Returns:\n A list of isosurfaces given as ``(vertices, faces)``.\n \"\"\"\n rlat = reciprocal_space.reciprocal_lattice\n\n if not spin:\n spin = list(bands.keys())\n elif isinstance(spin, Spin):\n spin = [spin]\n\n spacing = 1 / (np.array(kpoint_dim) - 1)\n\n isosurface = []\n for s in spin:\n ebands = bands[s]\n ebands -= fermi_level\n\n for band in ebands:\n # check if band crosses fermi level\n if np.nanmax(band) > 0 > np.nanmin(band):\n band_data = band.reshape(kpoint_dim)\n verts, faces, _, _ = marching_cubes_lewiner(band_data, 0, spacing)\n\n if isinstance(reciprocal_space, WignerSeitzCell):\n verts = np.dot(verts - 0.5, rlat) * 3\n verts, faces = _trim_surface(reciprocal_space, verts, faces)\n else:\n # convert coords to cartesian\n verts = np.dot(verts - 0.5, rlat)\n\n isosurface.append((verts, faces))\n\n return isosurface\n\n\ndef _trim_surface(\n wigner_seitz_cell: WignerSeitzCell,\n vertices: np.ndarray,\n faces: np.ndarray\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Trim the surface to remove parts outside the cell boundaries.\n\n Will add new triangles at the boundary edges as necessary to produce a smooth\n surface.\n\n Args:\n wigner_seitz_cell: The reciprocal space object.\n vertices: The surface vertices.\n faces: The surface faces.\n\n Returns:\n The trimmed surface as a tuple of ``(vertices, faces)``.\n \"\"\"\n for center, normal in zip(wigner_seitz_cell.centers, wigner_seitz_cell.normals):\n vertices, faces = slice_faces_plane(vertices, faces, -normal, center)\n return vertices, faces\n\n\ndef _expand_bands(\n bands: Dict[Spin, np.ndarray], frac_kpoints: np.ndarray, kpoint_dim: np.ndarray\n) -> Tuple[Dict[Spin, np.ndarray], np.ndarray, np.ndarray]:\n \"\"\"\n Expand the band energies and k-points with periodic boundary conditions to form a\n 3x3x3 supercell.\n\n Args:\n bands: The band energies, given as a dictionary of ``{spin: energies}``, where\n energies has the shape (nbands, nkpoints).\n frac_kpoints: The fractional k-point coordinates.\n kpoint_dim: The k-point mesh dimensions.\n\n Returns:\n The expanded band energies, k-points, and k-point mesh dimensions.\n \"\"\"\n final_ebands = {}\n for spin, ebands in bands.items():\n super_ebands = []\n images = (-1, 0, 1)\n\n super_kpoints = np.array([], dtype=np.int64).reshape(0, 3)\n for i, j, k in itertools.product(images, images, images):\n k_image = frac_kpoints + [i, j, k]\n super_kpoints = np.concatenate((super_kpoints, k_image), axis=0)\n\n sort_idx = np.lexsort(\n (super_kpoints[:, 2], super_kpoints[:, 1], super_kpoints[:, 0])\n )\n final_kpoints = super_kpoints[sort_idx]\n\n for band in ebands:\n super_band = np.array([], dtype=np.int64)\n for _ in range(27):\n super_band = np.concatenate((super_band, band), axis=0)\n super_ebands.append(super_band[sort_idx])\n\n final_ebands[spin] = np.array(super_ebands)\n\n return final_ebands, final_kpoints, kpoint_dim * 3\n\n\ndef get_prim_structure(structure, symprec=0.01) -> Structure:\n \"\"\"\n Get the primitive structure.\n\n Args:\n structure: The structure.\n symprec: The symmetry precision in Angstrom.\n\n Returns:\n The primitive cell as a pymatgen Structure object.\n \"\"\"\n analyzer = SpacegroupAnalyzer(structure, symprec=symprec)\n return analyzer.get_primitive_standard_structure()\n\n\nclass FermiSurface2D(FermiSurface):\n def __init__(\n self,\n bs: BandStructure,\n hdims: list,\n rlattvec,\n slice_plane: tuple,\n contour,\n mu: float = 0.0,\n soc: bool = False,\n ) -> None:\n \"\"\"\n Args:\n bs (BandStructure): A Pymatgen bandstructure object\n hdims (list): The dimension of the grid in reciprocal space on which the energy eigenvalues\n are defined.\n rlattvec (np.array): The reciprocal space lattice vectors. See\n pymatgen.electronic_structure.bandstructure.lattice_rec._matrix for format.\n slice_plane (tuple): The plane along which the surface is to be sliced. Only (0,0,1), (0,1,0)\n or (1,0,0) are currently supported.\n mu (float, optional): Enegy offset from the Fermi energy at which\n the iso-surface is defined. Useful for visualising the effect of\n dopants on the shape of the resulting iso-surface.\n kpoints (np.array): A numpy list of the kpoints in fractional coordinates\n soc (bool, optional): Set to True if the up and down spins are both to be plotted.\n Otherwsie, spins will be treated as degenerate and only one componenet will be\n plotted.\n is_spin_polarised (bool, optional): set to True if spin polarised.\n n_surfaces (int): Number of bands which cross the Fermi-Surface\n \"\"\"\n\n self._mu = mu\n\n self._fermi_level = bs.efermi + mu\n\n self._kpoints = np.array([k.frac_coords for k in bs.kpoints])\n\n self._hdims = hdims\n\n dims = 2 * hdims + 1\n\n self._dims = dims\n\n self._k_dim = (dims[0], dims[1], dims[2])\n\n self._rlattvec = rlattvec\n\n self._slice_plane = slice_plane\n\n self.slice_data(bs, self._slice_plane)\n\n self.compute_isosurfaces(self._energies, self._fermi_level)\n\n self._n_bands = len(self._iso_surface)\n\n self._soc = soc\n\n self._structure = bs.structure\n\n def slice_data(self, bs, slice_plane: tuple):\n\n for spin in self._bands.keys():\n\n ebands = self._bands[spin]\n\n plane_bands = []\n\n dis_array = [\n plane_dist(np.append(proj_plane, -contour), i) for i in self._kpoints\n ]\n\n for i, j in enumerate(slice_array):\n if not j == 0:\n if i == 0:\n sort_indx = np.lexsort(dis_array[dis_array[:, 0].argsort()])\n plane_mesh = [1, mesh[1], mesh[2]]\n if i == 1:\n sort_indx = np.lexsort(dis_array[dis_array[:, 1].argsort()])\n plane_mesh = [mesh[0], 1, mesh[2]]\n\n if i == 2:\n sort_indx = np.lexsort(dis_array[dis_array[:, 2].argsort()])\n plane_mesh = [mesh[0], mesh[1], 1]\n\n sorted_dist = dis_array(sort_index)\n sorted_energies = ebands(sort_indx)\n sorted_kpoints = self._kpoints(sort_index)\n\n for dist, index in enumerate(sorted_dist):\n while np.abs(dist - np.min(sorted_dist)) < 0.001:\n plane_bands.append(sorted_energies[index])\n\n sort_idx = np.lexsort(\n (sorted_kpoints[:, 2], sorted_kpoints[:, 1], sorted_kpoints[:, 0],)\n )\n energies_sorted = sorted_energies[sort_idx]\n\n self._energies = energies.reshape(plane_mesh)\n\n def compute_isosurfaces(self, energies, contour: float):\n\n contours = measure.find_contours(energies, contour)\n\n self._contours = contour\n\n\ndef project(vector, plane):\n theta = np.array([0, 0, np.pi])\n a = np.array(\n [\n [1, 0, 0],\n [0, np.cos(theta[0]), np.sin(theta[0])],\n [0, -np.sin(theta[0]), np.cos(theta[0])],\n ]\n )\n\n b = np.array(\n [\n [np.cos(theta[1]), 0, -np.sin(theta[1])],\n [0, 1, 0],\n [np.sin(theta[1]), 0, np.cos(theta[1])],\n ]\n )\n\n c = np.array(\n [\n [np.cos(theta[2]), np.sin(theta[2]), 0],\n [0, 1, 0],\n [np.sin(theta[1]), 0, np.cos(theta[1])],\n ]\n )\n\n d = np.matmul(np.matmul(a, np.matmul(b, c)), vector)\n\n e = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n\n f = np.matmul(e, np.append(d, 1))\n\n b_x = f[0] / f[3]\n b_y = f[1] / f[3]\n\n return [b_x, b_y, 0]\n\n\ndef plane_dist(slice_plane, vertex):\n return (\n np.linalg.norm(\n slice_plane[0] * vertex[0]\n + slice_plane[1] * vertex[1]\n + slice_plane[2] * vertex[2]\n + slice_plane[3]\n )\n ) / (np.sqrt(slice_plane[0] ** 2 + slice_plane[1] ** 2 + slice_plane[2] ** 2))\n","sub_path":"ifermi/fermi_surface.py","file_name":"fermi_surface.py","file_ext":"py","file_size_in_byte":14009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"437914520","text":"\"\"\"\nDefinition of Interval.\nclass Interval(object):\n def __init__(self, start, end):\n self.start = start\n self.end = end\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param intervals: Sorted interval list.\n @param newInterval: new interval.\n @return: A new interval list.\n \"\"\"\n\n def insert(self, intervals, newInterval):\n # write your code here\n s, e = newInterval.start, newInterval.end\n\n parts = merge, left, right = [], [], []\n\n for i in intervals:\n parts[(i.end < s) - (i.start > e)].append(i)\n\n if merge:\n s, e = min(s, merge[0].start), max(e, merge[-1].end)\n\n return left + [Interval(s, e)] + right\n","sub_path":"lintcode/30-insert-interval.py","file_name":"30-insert-interval.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"342940158","text":"import unittest\nimport wx\nfrom pyparsing import ParseException\n\nfrom svg.css.transform import *\n\n#list of tuples: parser, string, result\ntransformTestsGood = [\n (skewY, \"skewY(10)\", [\"skewY\", [10]]),\n (skewX, \"skewX(10)\", [\"skewX\", [10]]),\n (rotate, \"rotate(90)\", [\"rotate\", [90]]),\n (rotate, \"rotate(90, 10 10)\", [\"rotate\", [90,10,10]]),\n (scale, 'scale(.2, .2)', [\"scale\", [0.2, 0.2]])\n]\n\n#parse, string - exception is always ParseException\ntransformTestsError = [\n (skewY, \"skewY 10\"),\n (skewX, \"skewX (45\"),\n (rotate, \"rotate\"),\n]\n\nclass TestTransformParser(unittest.TestCase):\n def testTransformList(self):\n self.assertEqual(\n transformList.parseString(\n \"matrix(1,2,3,4,5,6) translate(-10), scale(23, 45.9)\"\n ).asList(),\n [\n [\"matrix\", [1,2,3,4,5,6]],\n [\"translate\", [-10]],\n [\"scale\", [23, 45.9]]\n ]\n )\n def testTransformGood(self):\n for parser, string, result in transformTestsGood:\n self.assertEqual(\n transform.parseString(string).asList(),\n result\n )\n def testTransformError(self):\n for parser, string in transformTestsError:\n self.assertRaises(\n ParseException,\n transform.parseString,\n string\n )\n def testPartsGood(self):\n for parser, string, result in transformTestsGood:\n self.assertEqual(\n parser.parseString(string).asList(),\n result\n )\n def testPartsError(self):\n for parser, string in transformTestsError:\n self.assertRaises(\n ParseException,\n parser.parseString,\n string\n )\n def testMatrixTransform(self):\n src = \"matrix(0.966764,0.000000,0.000000,1.062970,-8.322865,-4.427016)\"\n expected = [[\n 'matrix',\n [0.966764, 0.0, 0.0, 1.062970, -8.322865, -4.427016]\n ]]\n self.assertEqual(\n transformList.parseString(src).asList(),\n expected\n )\n\n\n\n","sub_path":"svg/tests/css/test_transform.py","file_name":"test_transform.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"349024259","text":"\"\"\"\n\nRu: Сортировка Пузырьком \n\nEng: Bubble sort\n\n\"\"\"\n\noldlist = [10, 5, 92, 85, 24, 23, 8, 203, 14, 67]\n\ndef bubble_sort(mylist):\n last_item = len(mylist) - 1\n for z in range(0, last_item):\n for x in range(0, last_item-z):\n if mylist[x] > mylist[x+1]:\n mylist[x], mylist[x+1] = mylist[x+1], mylist[x]\n return mylist\n\nprint('Oldlist =', oldlist)\nnewlist = bubble_sort(oldlist).copy()\nprint('Newlist =', newlist)","sub_path":"task-pro-3.py","file_name":"task-pro-3.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"116209926","text":"# Guss Number Game\n# Created by Kelvinzhao\n# This is a simple game, player has 5 chances to guess a random integer number\n# between 1 to 9. system will catch ValueError with 'try...except' statement.\n\nimport random\nimport pdb\n\nsecret_num = random.randrange(1, 10)\nguess_count = 0\nguess_limit = 5\n\npdb.set_trace()\n\nwhile guess_count < guess_limit:\n try:\n user_num = input(f\"guess a number from 1 to 9, \"\n f\"you still have {guess_limit-guess_count}\"\n f\"{' chance' if guess_count == guess_limit-1 else ' chances'} : \")\n user_num = int(user_num)\n except ValueError:\n print(\"invalid input, try again.\")\n else:\n if user_num == secret_num:\n print('You Win, the secret number is ', secret_num)\n break\n else:\n print('Ops, not correct.')\n guess_count += 1\nelse:\n print('You Lose !')\n","sub_path":"Scripts/practise/Guessnumber.py","file_name":"Guessnumber.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"449476675","text":"# COPYRIGHT: see https://github.com/x19290/py.lisp4u/blob/master/0rights.md\n\nfrom __future__ import unicode_literals\n\nfrom unittest import TestCase\n\nfrom ....main import user_global as env\nfrom ....misc.progn import progn\nfrom ....reader.lisp_src import lisp\n\n_FEED = r'''\n(define a 0)\n(define b 1)\n(define x (list a b))\n(define _0 (eq? x x))\n(define y (copy x))\n(define c (car y))\n(define d (car (cdr y)))\n(define _1 (eq? x y))\n(define _2 (eq? a c))\n(define _3 (eq? b d))\n(define _4 (equal? x y))\n(define _5 (equal? a c))\n(define _6 (equal? b d))\n(list _0 _1 _2 _3 _4 _5 _6)\n'''\n\n\nclass T(TestCase):\n def test(self):\n expected = lisp(r'(#t #f #f #f #t #t #t)')\n actual = progn(env, _FEED)\n self.assertEqual(expected, actual)\n","sub_path":"lisp4u/zztest/t4/t40/t4010copy.py","file_name":"t4010copy.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"384883069","text":"from django.test import TestCase\n\nfrom apps.contact.models import Contact, RequestEntry, Signal\n\n\nclass MainTesterModels(TestCase):\n\n def test_main(self):\n \"\"\"\n test_main for testing entry in base.\n \"\"\"\n contact = Contact.objects.all()\n self.assertEqual(len(contact), 1)\n self.assertEqual(contact[0].first_name, 'Sergii')\n\n\nclass SpyTesterModels(TestCase):\n\n def test_request_spy_for_creating(self):\n \"\"\"\n Checking status code of the page, and increasing number of the\n models entry after visiting 1 link.\n \"\"\"\n first_watched = len(RequestEntry.objects.filter(watched=False))\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)\n secound_watched = RequestEntry.objects.filter(watched=False)\n self.assertEqual(len(secound_watched), first_watched+1)\n\n\nclass SignalTester(TestCase):\n\n def test_signal_create(self):\n \"\"\"\n testing signal create.\n \"\"\"\n create_other_user()\n count = len(Signal.objects.all())\n self.assertNotEqual(count, 0)\n latest = Signal.objects.last()\n self.assertEqual(latest.action, 'create')\n\n def test_signal_delete(self):\n \"\"\"\n testing signal delete.\n \"\"\"\n Contact.objects.all().delete()\n latest = Signal.objects.last()\n self.assertEqual(latest.action, 'delete')\n\n def test_signal_save(self):\n \"\"\"\n testing signal save.\n \"\"\"\n sergii = Contact.objects.all()[0]\n sergii.first_name = 'Andrii'\n sergii.save()\n latest = Signal.objects.last()\n self.assertEqual(latest.action, 'save')\n\n\ndef create_other_user():\n Contact.objects.create(first_name='Andrii', last_name='Vanzha',\n email='andrii@mail.ru',\n contacts='+380662453012',\n bio='His little story!')\n","sub_path":"apps/contact/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"523247287","text":"# -*- coding: utf-8 -*-\n\nSUIT_CONFIG = {\n # header\n 'LIST_PER_PAGE': 200,\n 'ADMIN_NAME': 'Stocking',\n 'HEADER_DATE_FORMAT': 'l, j. F Y',\n 'HEADER_TIME_FORMAT': 'H:i',\n\n # menu\n 'SEARCH_URL': '/admin/auth/user/',\n 'MENU_ICONS': {\n 'sites': 'icon-leaf',\n 'auth': 'icon-lock',\n },\n 'MENU_OPEN_FIRST_CHILD': True,\n\n 'MENU': (\n '-',\n {'label': 'Money', 'icon': 'icon-user', 'models': (\n 'main.cardp2ptransfers',\n 'main.cryptotransfers',\n 'main.p24transin',\n 'main.liqpaytrans',\n \n )},\n {'label': 'Stock', 'icon': 'icon-user', 'models': (\n 'main.ordersmem',\n 'main.transmem',\n 'main.dealsmemory',\n )},\n {'label': 'Dashboard', 'icon': 'icon-user', 'models': (\n 'main.volatileconsts',\n 'main.stockstat',\n {'label': 'Баланс системы', 'url': '/admin/main/whole_balance'},\n )}, \n \n \n )\n \n}\n\n\n#'-',\n #{'label': 'Menu', 'icon': 'icon-user', \n #'models': (\n #'cwist.role',\n #'cwist.family',\n #'cwist.kidprofile',\n #'cwist.parentprofile',\n #{'label': 'Innovators', 'url': '/admin/innovators/'},\n #'cwist.shippingaddress',\n #{'label': 'Whole Balance', 'url': 'main/whole_balance'},\n #)},\n\n\n\n\n\n\n\n","sub_path":"crypton/suit.py","file_name":"suit.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"494229541","text":"\"\"\"Test the module easy ensemble.\"\"\"\n# Authors: Guillaume Lemaitre \n# Christos Aridas\n# License: MIT\n\n\nfrom __future__ import print_function\n\nfrom collections import Counter\n\nimport numpy as np\nfrom sklearn.utils.testing import assert_true\nfrom numpy.testing import assert_equal, assert_raises_regex\n\nfrom imblearn.datasets import make_imbalance\n\n# Generate a global dataset to use\nX = np.random.random((1000, 2))\nY = np.zeros(1000)\nY[500:] = 1\n\n\ndef test_make_imbalance_bad_ratio():\n min_c_ = 1\n\n # Define a zero ratio\n ratio = 0.0\n assert_raises_regex(ValueError, \"Ratio have to be strictly positive\",\n make_imbalance, X, Y, ratio, min_c_)\n\n # Define a negative ratio\n ratio = -2.0\n assert_raises_regex(ValueError, \"Ratio have to be strictly positive\",\n make_imbalance, X, Y, ratio, min_c_)\n\n # Define a ratio greater than 1\n ratio = 2.0\n assert_raises_regex(ValueError, \"Ratio cannot be greater than one\",\n make_imbalance, X, Y, ratio, min_c_)\n\n # Define ratio as a list which is not supported\n ratio = [.5, .5]\n assert_raises_regex(ValueError, \"Ratio must be a float between\",\n make_imbalance, X, Y, ratio, min_c_)\n\n\ndef test_make_imbalance_invalid_ratio():\n y_ = np.zeros((X.shape[0], ))\n y_[0] = 1\n\n ratio = 0.5\n assert_raises_regex(ValueError, \"Current imbalance ratio of data\",\n make_imbalance, X, y_, ratio)\n\n\ndef test_make_imbalance_single_class():\n y_ = np.zeros((X.shape[0], ))\n ratio = 0.5\n assert_raises_regex(ValueError, \"Not enough samples for desired ratio!\",\n make_imbalance, X, y_, ratio)\n\n\ndef test_make_imbalance_1():\n X_, y_ = make_imbalance(X, Y, ratio=0.5, min_c_=1)\n counter = Counter(y_)\n assert_equal(counter[0], 500)\n assert_equal(counter[1], 250)\n assert_true(np.all([X_i in X for X_i in X_]))\n\n\ndef test_make_imbalance_2():\n X_, y_ = make_imbalance(X, Y, ratio=0.25, min_c_=1)\n counter = Counter(y_)\n assert_equal(counter[0], 500)\n assert_equal(counter[1], 125)\n assert_true(np.all([X_i in X for X_i in X_]))\n\n\ndef test_make_imbalance_3():\n X_, y_ = make_imbalance(X, Y, ratio=0.1, min_c_=1)\n counter = Counter(y_)\n assert_equal(counter[0], 500)\n assert_equal(counter[1], 50)\n assert_true(np.all([X_i in X for X_i in X_]))\n\n\ndef test_make_imbalance_4():\n X_, y_ = make_imbalance(X, Y, ratio=0.01, min_c_=1)\n counter = Counter(y_)\n assert_equal(counter[0], 500)\n assert_equal(counter[1], 5)\n assert_true(np.all([X_i in X for X_i in X_]))\n\n\ndef test_make_imbalance_5():\n X_, y_ = make_imbalance(X, Y, ratio=0.01, min_c_=0)\n counter = Counter(y_)\n assert_equal(counter[1], 500)\n assert_equal(counter[0], 5)\n assert_true(np.all([X_i in X for X_i in X_]))\n\n\ndef test_make_imbalance_multiclass():\n # Make y to be multiclass\n y_ = np.zeros(1000)\n y_[100:500] = 1\n y_[500:] = 2\n\n # Resample the data\n X_, y_ = make_imbalance(X, y_, ratio=0.1, min_c_=0)\n counter = Counter(y_)\n assert_equal(counter[0], 90)\n assert_equal(counter[1], 400)\n assert_equal(counter[2], 500)\n assert_true(np.all([X_i in X for X_i in X_]))\n","sub_path":"imblearn/datasets/tests/test_make_imbalance.py","file_name":"test_make_imbalance.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"535535288","text":"from OpenGL.GL import *\nfrom OpenGL.GLUT import *\nimport time\nfrom math import pi\nw = 200\nh = 200\n\ncolor = [[244 / 255, 164 / 255, 96 / 255], [244 / 255, 164 / 255, 96 / 255], [244 / 255, 164 / 255, 96 / 255]]\n\npointdata = []\npointcolor = []\n\ndef create_data():\n\tglobal pointdata\n\tglobal pointcolor\n\timport random\n\n\trandom.seed(5)\n\tfor i in range(h): # по высоте\n\t\tfor j in range(w): # по ширине\n\t\t\tpointdata.append([i/h-0.5, 0, j/w-0.5])\n\t\t\tpointdata.append([(i+1)/h-0.5, 0, j/w-0.5])\n\t\t\tpointdata.append([(i+1)/h-0.5, 0, (j+1)/w-0.5])\n\t\t\tpointdata.append([i/h-0.5, 0, j/w-0.5])\n\t\t\tpointdata.append([i/h-0.5, 0, (j+1)/w-0.5])\n\t\t\tpointdata.append([(i+1)/h-0.5, 0, (j+1)/w-0.5])\n\t\t\tpointcolor.append(color)\n\t\t\tpointcolor.append(color)\n\n\ndef specialkeys(key, x, y):\n\tif key == GLUT_KEY_UP:\t\t # Клавиша вверх\n\t\tglRotatef(5, 1, 0, 0)\t # Вращаем на 5 градусов по оси X\n\tif key == GLUT_KEY_DOWN:\t\t# Клавиша вниз\n\t\tglRotatef(-5, 1, 0, 0)\t # Вращаем на -5 градусов по оси X\n\tif key == GLUT_KEY_LEFT:\t\t# Клавиша влево\n\t\tglRotatef(5, 0, 1, 0)\t # Вращаем на 5 градусов по оси Y\n\tif key == GLUT_KEY_RIGHT:\t # Клавиша вправо\n\t\tglRotatef(-5, 0, 1, 0)\t # Вращаем на -5 градусов по оси Y\n\tglutPostRedisplay()\n\n\ndef create_shader(shader_type, source):\n\tshader = glCreateShader(shader_type) # Создаем пустой объект шейдера\n\tglShaderSource(shader, source) # Привязываем текст шейдера к пустому объекту шейдера\n\tglCompileShader(shader) # Компилируем шейдер\n\treturn shader # Возвращаем созданный шейдер\n\ndef draw():\n\tglobal program\n\tvar = glGetUniformLocation(program, 'time')\n\tglUniform1f(var, time.time() % (2 * pi))\n\tglClear(GL_COLOR_BUFFER_BIT) # Очищаем экран и заливаем серым цветом\n\tglDrawArrays(GL_TRIANGLES, 0, 6*w*h)\n\tglutSwapBuffers() # Выводим все нарисованное в памяти на экран\n\n\nglutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB) # Использовать двойную буферезацию и цвета в формате RGB (Красный Синий Зеленый)\nglutInitWindowSize(500, 500) # Указываем начальный размер окна (ширина, высота)\nglutInitWindowPosition(500, 200) # Указываем начальное положение окна относительно левого верхнего угла экрана\nglutInit(sys.argv) # Инициализация OpenGl\nglutCreateWindow(\"lab_5\") # Создаем окно с заголовком\nglutDisplayFunc(draw) # Определяем процедуру, отвечающую за перерисовку\nglutIdleFunc(draw) # Определяем процедуру, выполняющуюся при \"простое\" программы\nglutSpecialFunc(specialkeys) # Определяем процедуру, отвечающую за обработку клавиш\nglClearColor(1., 1., 1., 1) # Задаем серый цвет для очистки экрана\ncreate_data()\nvertex = create_shader(GL_VERTEX_SHADER, \"\"\"\nuniform float time;\nvarying vec4 vertex_color;\n\t\t\tvoid main(){\n\t\t\t\tvec4 point = gl_Vertex;\n\t\t\t\tpoint.y = sin(point.x * 25.0 + time) / 20.0;\n\t\t\t\tgl_Position = gl_ModelViewProjectionMatrix * point;\n\t\t\t\tvec4 color = gl_Color;\n\t\t\t\tcolor = color + 1.0 * (point.y + 0.05);\n\t\t\t\tvertex_color = color;\n\t\t\t}\"\"\")\n\nfragment = create_shader(GL_FRAGMENT_SHADER, \"\"\"\nvarying vec4 vertex_color;\n\t\t\tvoid main() {\n\t\t\t\tgl_FragColor = vertex_color;\n}\"\"\")\n\nprogram = glCreateProgram() # Создаем пустой объект шейдерной программы\nglAttachShader(program, vertex) # Приcоединяем вершинный шейдер к программе\nglAttachShader(program, fragment) # Присоединяем фрагментный шейдер к программе\nglLinkProgram(program) # \"Собираем\" шейдерную программу\nglUseProgram(program) # Сообщаем OpenGL о необходимости использовать данную шейдерну программу при отрисовке объект\nglEnableClientState(GL_VERTEX_ARRAY) # Включаем использование массива вершин\nglEnableClientState(GL_COLOR_ARRAY)\t# Включаем использование массива цветов\nglVertexPointer(3, GL_FLOAT, 0, pointdata)\nglColorPointer(3, GL_FLOAT, 0, pointcolor)\nglutMainLoop() # Запускаем основной цикл\n","sub_path":"lab5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"208861585","text":"import sys\nimport time\nimport random\nfrom sklearn.model_selection import train_test_split\nfrom configs.configs import Configs\nfrom utils import get_data, get_words, create_vocab, essay_to_ids_flat, convert_original_scores_to_new_scores, \\\n pad_flat_text_sequences, scale_down_scores, load_word_embedding_dict, \\\n build_embedd_table, create_id_dict, convert_to_ids_array\nfrom models.word_hi_flat_model import build_word_flat_att\nfrom evaluators.evaluator import Evaluator\n\n\ndef main():\n configs = Configs()\n\n data_path = configs.DATA_PATH\n data = get_data(data_path)\n random.shuffle(data)\n if configs.DEBUG:\n data, longest_essay, longest_title = get_words(data[:300], configs)\n else:\n data, longest_essay, longest_title = get_words(data, configs)\n train_data, test_data = train_test_split(data, test_size=0.1, random_state=42)\n train_data, dev_data = train_test_split(train_data, test_size=0.1, random_state=42)\n\n word_vocab = create_vocab(train_data, configs)\n train_titles, train_texts, train_scores, train_grades, train_lengths = essay_to_ids_flat(train_data, word_vocab)\n dev_titles, dev_texts, dev_scores, dev_grades, dev_lengths = essay_to_ids_flat(dev_data, word_vocab)\n test_titles, test_texts, test_scores, test_grades, test_lengths = essay_to_ids_flat(test_data, word_vocab)\n\n lengths_id_dict = create_id_dict(train_lengths)\n lengths_count = len(lengths_id_dict)\n train_lengths_X = convert_to_ids_array(train_lengths, lengths_id_dict)\n dev_lengths_X = convert_to_ids_array(dev_lengths, lengths_id_dict)\n test_lengths_X = convert_to_ids_array(test_lengths, lengths_id_dict)\n\n train_scores_y = convert_original_scores_to_new_scores(train_scores)\n dev_scores_y = convert_original_scores_to_new_scores(dev_scores)\n test_scores_y = convert_original_scores_to_new_scores(test_scores)\n\n train_titles_X = pad_flat_text_sequences(train_titles, longest_title)\n dev_titles_X = pad_flat_text_sequences(dev_titles, longest_title)\n test_titles_X = pad_flat_text_sequences(test_titles, longest_title)\n\n train_texts_X = pad_flat_text_sequences(train_texts, longest_essay)\n dev_texts_X = pad_flat_text_sequences(dev_texts, longest_essay)\n test_texts_X = pad_flat_text_sequences(test_texts, longest_essay)\n\n train_scores_y_scaled = scale_down_scores(train_scores_y)\n dev_scores_y_scaled = scale_down_scores(dev_scores_y)\n test_scores_y_scaled = scale_down_scores(test_scores_y)\n\n embedding_path = configs.EMBEDDING_PATH\n embedd_dict, embedd_dim, _ = load_word_embedding_dict(embedding_path)\n print('embedd_dict complete')\n embedd_matrix = build_embedd_table(word_vocab, embedd_dict, embedd_dim, caseless=True)\n embed_table = [embedd_matrix]\n\n train_inputs = [train_texts_X, train_titles_X, train_lengths_X]\n dev_inputs = [dev_texts_X, dev_titles_X, dev_lengths_X]\n test_inputs = [test_texts_X, test_titles_X, test_lengths_X]\n\n model = build_word_flat_att(len(word_vocab), longest_essay, lengths_count, longest_title,\n configs, embedding_weights=embed_table)\n evaluator = Evaluator(dev_inputs, dev_scores_y_scaled, test_inputs, test_scores_y_scaled)\n evaluator.evaluate(model, -1, print_info=True)\n epochs = configs.EPOCHS\n batch_size = configs.BATCH_SIZE\n for ii in range(epochs):\n print('Epoch %s/%s' % (str(ii + 1), epochs))\n start_time = time.time()\n model.fit(train_inputs,\n train_scores_y_scaled, batch_size=batch_size, epochs=1, verbose=0, shuffle=True)\n tt_time = time.time() - start_time\n print(\"Training one epoch in %.3f s\" % tt_time)\n evaluator.evaluate(model, ii + 1)\n\n evaluator.print_final_info()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"train_word_flat_att.py","file_name":"train_word_flat_att.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"509796740","text":"# Solution to Codility Lesson #4: FrogRiverOne\n# Copyright (c) MarcinSkrobczynski\n\n\ndef solution(x: int, a: list) -> int:\n leaves = [1] + [0] * x\n count = 0\n\n for i, value in enumerate(a):\n if value <= x and leaves[value] == 0:\n leaves[value] = 1\n count += 1\n\n if count == x:\n return i\n\n return -1\n","sub_path":"solutions/lesson_4/FrogRiverOne.py","file_name":"FrogRiverOne.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"160110645","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nwith open('requirements.txt', 'r') as f:\n INSTALL_REQUIRES = [x for x in list(f) if x[:2] not in [\"-i\", \"-r\"]]\n\nwith open('test-requirements.txt', 'r') as f:\n TEST_REQUIRES = [x for x in list(f) if x[:2] not in ['-i', '-r']]\n\n# needed for setup.py develop\n# import multiprocessing\n\n\nsetup(\n name=\"potplex\",\n description=\"play with kivy\",\n author=\"Albert Kurucz\",\n packages=find_packages(),\n setup_requires=['vcversioner'],\n vcversioner={\n 'version_module_paths': ['potplex/_version.py'],\n },\n install_requires=INSTALL_REQUIRES,\n test_requires=TEST_REQUIRES,\n test_suite='nose.collector',\n entry_points={'console_scripts': [\n 'potplex = potplex.potplex:_main',\n ]},\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: System Administrators\",\n \"License :: Other/Proprietary License\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2.7\",\n \"Topic :: System :: Networking\",\n \"Natural Language :: English\"\n ],\n #data_files=[('xrc', ['resources/starter_gui.xrc'])]\n #package_data={'kivyplex': ['xrc/*.xrc']},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"363719186","text":"\"\"\"Builda sample language model with LSTM and text captions.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport os\nfrom os.path import join, exists, isfile, isdir\nimport sys\nimport math\nimport shutil\nimport tensorflow as tf\nimport numpy as np\nfrom pprint import pformat\nimport cPickle as pickle\nimport importlib\nfrom data_loader import load_dataset\n\nfrom im_cap_model import ImCapModel\nfrom utils import CONFIG\n\nconfig = CONFIG.Trainer\nlogger = None\n\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.flags.DEFINE_string(\"model_config_name\", \"\",\n \"Which model config to use.\")\ntf.flags.DEFINE_string(\"save_model_dir\", \"\",\n \"Path to dir to save model checkpoint.\")\ntf.flags.DEFINE_string(\"model_path\", \"\",\n \"Path to model for inference or eval mode.\")\ntf.flags.DEFINE_string(\"out_caption_path\", \"\",\n \"Path to output the predicted captions.\")\ntf.flags.DEFINE_string(\"resume_from_model_path\", None,\n \"Path to model checkpoint to resume training.\")\ntf.flags.DEFINE_string(\"mode\", \"train\",\n \"Run mode.\")\ntf.flags.DEFINE_string(\"dataset_name\", \"flickr8k\",\n \"flickr8k, flickr30k or coco\")\n\n\ndef unpickle(path):\n with open(path, 'rb') as fp:\n res = pickle.load(fp)\n return res\n\ndef _strip_name(paths):\n return [(os.path.basename(p), i) for i, p in enumerate(paths)]\n\n\ndef load_cnn_features(path):\n feats_dir = os.path.splitext(os.path.basename(path))[0]\n img_name_list_path = join(\n os.path.dirname(path),\n '{}_list.txt'.format('_'.join(feats_dir.split('_')[:-3])))\n feats_shape = tuple([int(i) for i in feats_dir.split('_')[-1].split('X')])\n feats_mmap = np.memmap(path, mode='r', # read-only\n shape=feats_shape, dtype=np.float32)\n img_to_idx = {}\n with open(img_name_list_path, 'r') as fp:\n img_to_idx = dict(_strip_name(fp.read().split('\\n')))\n\n return (img_to_idx, feats_mmap)\n\n\ndef stringify(ls, sep=', '):\n return sep.join([str(i) for i in ls])\n\n\ndef restore_model(sess, path):\n if isfile(path):\n restore_path = path\n else:\n ckpt = tf.train.get_checkpoint_state(path)\n if ckpt and exists(ckpt.model_checkpoint_path):\n restore_path = ckpt.model_checkpoint_path\n # restored_step = int(ckpt.model_checkpoint_path.split('-')[-1])\n else:\n logger.fatal('No restorable checkpoint model found.')\n if restore_path:\n restorer = tf.train.Saver()\n restorer.restore(sess, restore_path)\n logger.info('Restoring model from %s', restore_path)\n\ndef print_msg(*args):\n return \"epoch: {} niter: {} batch_loss: {} curr_epoch_loss: {}\".format(*args)\n\ndef decode_samples_to_captions(samples, id_to_word):\n captions = []\n for sample in samples:\n cap = []\n for idx in sample:\n w = id_to_word.get(idx, None)\n if w is None:\n continue\n elif w == '':\n break\n cap.append(w)\n captions.append(cap)\n return captions\n\n\ndef main(_, conf={}):\n global logger\n try:\n mymodel = importlib.import_module(FLAGS.model_config_name)\n except:\n raise AttributeError(\"No model named %s\" % FLAGS.model_config_name)\n solver_config = mymodel.solver\n solver_config.log_fname = solver_config.log_fname.replace(\n \"expts\", \"expts/{}\".format(FLAGS.dataset_name))\n logger = config.log.getLogger(flag=3, fname=solver_config.log_fname)\n # print the experiment flags for logging purpose\n logger.info(\"python %s\", stringify(sys.argv, ' '))\n\n ###########################################################################\n # load train image captions\n ###########################################################################\n # assert exists(FLAGS.cnn_features_path)\n # assert exists(FLAGS.raw_captions_dir)\n # cnn_feats_path = FLAGS.cnn_features_path\n # raw_captions_dir = FLAGS.raw_captions_dir\n # train_cap_path = join(raw_captions_dir, 'Flickr8k.train.annotation.kl')\n # train_image_ids, train_raw_captions = unpickle(train_cap_path)\n\n # ###########################################################################\n # # load dev image captions\n # ###########################################################################\n # dev_cap_path = join(raw_captions_dir, 'Flickr8k.dev.annotation.kl')\n # dev_image_ids, dev_raw_captions = unpickle(dev_cap_path)\n\n # ###########################################################################\n # # load vocab\n # ###########################################################################\n # vocab_path = join(raw_captions_dir, 'vocab.kl')\n # word_to_ids = unpickle(vocab_path)\n # id_to_word = dict([(v, k) for k, v in word_to_ids.iteritems()])\n\n # ###########################################################################\n # # load cnn features\n # ###########################################################################\n # (train_img_to_idx, train_cnn_features) = load_cnn_features(cnn_feats_path)\n # dev_img_to_idx = train_img_to_idx\n\n ###########################################################################\n # new way of loading dataset\n ###########################################################################\n dataset_dir = 'data/%s' % FLAGS.dataset_name\n logger.info(\"Dataset path: %s\", dataset_dir)\n ret = load_dataset(dataset_dir, split='train')\n (train_raw_captions, train_image_ids,\n train_cnn_features, train_img_to_idx, word_to_ids) = ret\n\n ret = load_dataset(dataset_dir, word_to_id=word_to_ids, split='val')\n (dev_raw_captions, dev_image_ids, dev_cnn_features, dev_img_to_idx, _) = ret\n\n id_to_word = dict([(v, k) for k, v in word_to_ids.iteritems()])\n ###########################################################################\n # load the model config\n ###########################################################################\n model_config = mymodel.model\n num_samples = model_config.num_samples = len(train_image_ids)\n model_config.vocab_size = len(word_to_ids.keys()) + 1\n model_config.log_fname = model_config.log_fname.replace(\n \"expts\", \"expts/{}\".format(FLAGS.dataset_name))\n logger.info('Solver configuration: %s', pformat(solver_config))\n\n batch_size = model_config.batch_size\n num_epochs = solver_config.num_epochs\n solver_config.save_model_dir = solver_config.save_model_dir.replace(\n \"expts\", \"expts/{}\".format(FLAGS.dataset_name))\n if FLAGS.save_model_dir:\n save_model_dir = FLAGS.save_model_dir\n solver_config.save_model_dir = save_model_dir\n else:\n save_model_dir = solver_config.save_model_dir\n bool_save_model = True if save_model_dir else False\n if bool_save_model and not exists(save_model_dir):\n os.makedirs(save_model_dir)\n\n ###########################################################################\n # create the model\n ###########################################################################\n model = ImCapModel(model_config, word_to_ids)\n loss = model.build_model()\n tf.get_variable_scope().reuse_variables()\n generated_captions = model.build_generator()\n\n ###########################################################################\n # training related variables\n ###########################################################################\n global_step = tf.Variable(\n initial_value=0,\n name=\"global_step\",\n trainable=False,\n collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.VARIABLES])\n\n learning_rate = tf.constant(solver_config.learning_rate)\n train_op = tf.contrib.layers.optimize_loss(\n loss=loss,\n global_step=global_step,\n learning_rate=learning_rate,\n optimizer=solver_config.optimizer,\n clip_gradients=solver_config.train_clip_gradients,\n learning_rate_decay_fn=None)\n\n ###########################################################################\n # setup saver\n ###########################################################################\n saver = tf.train.Saver(max_to_keep=solver_config.max_to_keep)\n\n ###########################################################################\n # setup Session and begin training\n ###########################################################################\n sess = tf.Session()\n coord = tf.train.Coordinator()\n\n init = tf.initialize_all_variables()\n sess.run(init)\n\n ###########################################################################\n # restore previously saved model to resume training\n ###########################################################################\n bool_resume_path = True if model_config.resume_from_model_path else False\n if bool_resume_path and exists(model_config.resume_from_model_path):\n restore_model(sess, model_config.resume_from_model_path)\n\n tf.get_default_graph().finalize()\n\n # compute the number of iterations reqd for training\n niters_per_epoch = num_samples // batch_size + 1\n num_iters_to_run = num_epochs * niters_per_epoch\n\n # placeholders\n image_feature = model_config.img_input_feed\n caption_feature = model_config.cap_input_feed\n\n # setup 10 random dev set images to check the generated captions\n num_gen_samples = model_config.batch_size\n samp_idx = np.random.randint(0, dev_raw_captions.shape[0], num_gen_samples)\n samp_captions = dev_raw_captions[samp_idx, :]\n samp_img_ids = dev_image_ids[samp_idx]\n im_ids = [dev_img_to_idx[ind] for ind in samp_img_ids]\n samp_cnn = train_cnn_features[im_ids, ...]\n logger.info('Sampling captions for: [%s]', ','.join(samp_img_ids[:10]))\n\n if bool_save_model:\n logger.info('Model save path: {}'.format(save_model_dir))\n model_save_freq = solver_config.ckpt_epoch_freq\n logger.info('Save model per iters: {}'.format(model_save_freq))\n logger.info('Initial learning rate: {}'.format(solver_config.learning_rate))\n logger.info('Num of samples: {}'.format(num_samples))\n logger.info('Num of iters: {}'.format(num_iters_to_run))\n\n ###########################################################################\n # start training\n ###########################################################################\n for i in range(num_epochs):\n epoch_loss = 0.\n idxs = np.random.permutation(num_samples)\n # caption features: train_image_ids, train_raw_captions\n # image features: img_to_idx, train_cnn_features\n epoch_captions = train_raw_captions[idxs, :]\n epoch_image_ids = train_image_ids[idxs]\n\n for start, end in zip(range(0, num_samples, batch_size),\n range(batch_size, num_samples, batch_size)):\n batch_caps = epoch_captions[start:end, :]\n batch_img_ids = epoch_image_ids[start:end]\n im_ids = [train_img_to_idx[ind] for ind in batch_img_ids]\n batch_cnn = train_cnn_features[im_ids, ...]\n\n feeder = {model.images: batch_cnn, model.input_seqs: batch_caps}\n _, batch_loss, niters = sess.run(\n [train_op, loss, global_step], feed_dict=feeder)\n epoch_loss += batch_loss\n\n if niters % 20 == 0:\n logger.info(print_msg(i, niters, batch_loss, epoch_loss))\n\n if start == 0:\n # generate some sample captions\n gen_samples = sess.run(generated_captions,\n feed_dict={model.images: samp_cnn})\n sampled_captions = decode_samples_to_captions(gen_samples, id_to_word)\n for j, (idx, cap) in enumerate(zip(samp_img_ids, sampled_captions)):\n logger.info('Generated caption: epoch %d, %s - %s',\n i, idx, ' '.join(cap))\n if j == 10:\n break\n\n if bool_save_model and i % (model_save_freq) == 0:\n # completed epoch, save model snapshot\n _path = saver.save(sess, solver_config.save_model_dir, global_step=i)\n if bool_save_model:\n # save the final model\n saver.save(sess, solver_config.save_model_dir, global_step=num_epochs)\n\n coord.request_stop()\n sess.close()\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"173549250","text":"import vertica_python, re, json, time, pandas as pd, numpy as np\nimport datetime\nfrom time import time\nclass Vertica:\n def __init__(self,**conn_info):\n \"\"\"传入配置信息\"\"\"\n self._conn_info = conn_info\n\n def __get_connect(self):\n \"\"\"获取连接\"\"\"\n self.connection = vertica_python.connect(**self._conn_info)\n cur = self.connection.cursor()\n if not cur:\n raise(NameError,\"连接数据库失败\")\n else:\n return cur\n\n def __sqltodf(self,sql_code = ''):\n \"\"\"处理查询语句\"\"\"\n sql_code = re.sub('--.*?\\n', '\\n', sql_code) \n # 删除SQL代码里的注释行\n sql_list = ''.join(sql_code.replace('\\n','').split()).split(';')\n return sql_list\n # cur.execute(sql_list[s_i]+\";\")\n # e = time.time()\n # print(sql_list[s_i])\n # print(\"execute time: \"+str(round((e-s)/60, 2))+\" min\")\n # del sql_list, s_i\n # print('SQL Done...')\n # return pd.Dataframe(cur.fetchall())\n\n def __exec_query(self,sql):\n \"\"\"执行查询语句\"\"\"\n cur = self.__get_connect()\n cur.execute(sql)\n resList = cur.fetchall()\n #查询完毕后必须关闭连接\n self.conn.close()\n return resList\n\n def exec_query_dict(self, sql):\n \"\"\"把查询的数据变成dict返回\"\"\"\n result = []\n for row in self.__exec_query(sql):\n result.append( dict([(desc[0], row[index]) for index, desc in enumerate(row.cursor_description)]) )\n \n return result\n def exec_nonquery(self,sql):\n \"\"\"\n 执行非查询语句\n 调用示例:\n cur = self.__GetConnect()\n cur.execute(sql)\n self.conn.commit()\n self.conn.close()\n \"\"\"\n cur = self.__get_connect()\n cur.execute(sql)\n self.conn.commit()\n self.conn.close()\n \n def exce_copy(self,copysql,path):\n \"\"\"使用copy的语法\"\"\"\n cur = self.__get_connect()\n with open(path, \"rb\") as fs:\n cur.copy(copysql,fs, buffer_size=65536)\n\n","sub_path":"study_python/python_连接数据库/Demo_04封装py_vertica.py","file_name":"Demo_04封装py_vertica.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"334304375","text":"import json, uuid, os\nfrom unittest import mock\n\nfrom app.extensions import cache\nfrom app.api.constants import DOWNLOAD_TOKEN\nfrom tests.factories import DocumentManagerFactory\n\n\ndef test_download_file_happy_path(test_client, db_session, auth_headers, tmp_path):\n document = DocumentManagerFactory(path_root=tmp_path, file_display_name='testfile.pdf')\n\n test_data = 'Contents of file'\n file_path = document.full_storage_path\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n with open(file_path, \"w\") as f:\n f.write(test_data)\n\n token_resp = test_client.get(\n f'/document-manager/{document.document_guid}/token',\n headers=auth_headers['full_auth_header'])\n token_data = json.loads(token_resp.data.decode())\n token_guid = token_data['token_guid']\n assert token_resp.status_code == 200\n assert token_guid\n\n with mock.patch.object(cache, 'get') as mock_cache_get:\n mock_cache_get.return_value = document.document_guid\n\n get_resp = test_client.get(f'/document-manager?token={token_guid}')\n assert get_resp.status_code == 200\n assert get_resp.data.decode() == test_data\n mock_cache_get.assert_called_with(DOWNLOAD_TOKEN(token_guid))\n\n\ndef test_download_file_no_token(test_client, db_session):\n get_resp = test_client.get(f'/document-manager')\n get_data = json.loads(get_resp.data.decode())\n\n assert get_resp.status_code == 400\n assert get_data['status'] == 400\n assert get_data['message'] is not ''\n\n\ndef test_download_file_invalid_token(test_client, db_session):\n get_resp = test_client.get(f'/document-manager?token={uuid.uuid4()}')\n get_data = json.loads(get_resp.data.decode())\n\n assert get_resp.status_code == 400\n assert get_data['status'] == 400\n assert get_data['message'] is not ''\n","sub_path":"python-backend/tests/document_manager/resources/test_document_manager_resource.py","file_name":"test_document_manager_resource.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"170758207","text":"from flask import request, redirect, url_for, render_template\nfrom flaskr import app\nfrom flaskr import mongo\nfrom pymongo import DESCENDING\n\n@app.route('/')\ndef show():\n print('ergerbnrwjig')\n return render_template('layout.html')\n\n@app.route('/name', methods=['POST'])\ndef name_search():\n name = request.form[\"name\"]\n results = mongo.db.artist.find({'$or':\n [{'name': name}, {'aliases.name': name}]\n }).sort('rating.count', DESCENDING)\n return render_template('show_entries.html', results=results)\n\n@app.route('/tag', methods=['POST'])\ndef tag_search():\n tag = request.form[\"tag\"]\n results = mongo.db.artist.find({'tags.value': tag}).sort('rating.count', DESCENDING)\n return render_template('show_entries.html', results=results)\n","sub_path":"flaskr/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"125423366","text":"from django.urls import path\nfrom . import views\n\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\napp_name = \"Pizza\"\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"pizzas\", views.pizzas, name=\"pizzas\"),\n path(\"pizzas//\", views.pizza, name=\"pizza\"),\n path(\"new_comment//\", views.new_comment, name=\"new_comment\"),\n]\nurlpatterns += staticfiles_urlpatterns()\n","sub_path":"Pizza/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"83011446","text":"from django.db import models\nfrom django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom django.utils.module_loading import import_string\nfrom django.core import validators\n\nfrom polymorphic.models import PolymorphicModel\nfrom autoslug import AutoSlugField\nfrom filebrowser.fields import FileBrowseField\n\nfrom cms.common.utils import served_langs\nfrom cms.common.mixins import SeoMixin\nfrom cms.content.models import Article, Category\nfrom cms.content import views as ContentViews\nfrom cms.galleries.models import Gallery\nfrom cms.galleries import views as GalleryViews\n\n\nTITLE_CLICK_ACTION_CHOICES = (\n ('go_to_object', _('Go to object')),\n ('nothing', _('Nothing')),\n)\n\nIMAGE_CLICK_ACTION_CHOICES = (\n ('go_to_object', _('Go to object')),\n ('enlarge', _('Enlarge image')),\n ('nothing', _('Nothing')),\n)\n\nORDER_BY_CHOICES = (\n ('category,order', _('Order')),\n ('category,-order', _('Reverse order')),\n ('-created', _('Newest first')),\n ('created', _('Oldest first')),\n ('?', _('Random')),\n)\n\n\nclass Page(PolymorphicModel):\n title = models.CharField(_('title'), max_length=255)\n published = models.BooleanField(_('published'), default=True)\n slug = AutoSlugField(\n _('slug'), populate_from='title',\n blank=True, editable=True, unique_with='language'\n )\n homepage = models.BooleanField(_('homepage'), default=False)\n language = models.CharField(\n _('language'), max_length=255,\n choices=settings.LANGUAGE_CHOICES, default='all'\n )\n\n class Meta:\n app_label = 'pages'\n verbose_name = _('page')\n verbose_name_plural = _('pages')\n\n def __str__(self):\n return self.title\n\n def save(self, *args, **kwargs):\n # prevent duplicated homepage (within same language)\n if self.homepage:\n if self.language == 'all':\n other = Page.objects.filter(homepage=True) \\\n .exclude(pk=self.pk).first()\n else:\n other = Page.objects.filter(\n homepage=True,\n language__in=('all', self.language)\n ).exclude(pk=self.pk).first()\n if other:\n other.homepage = False\n other.save()\n\n super(Page, self).save(*args, **kwargs)\n\n # prevent duplicated slug (within same language)\n # it's done after actual save() because of AutoSlugField\n if self.language == 'all':\n other = Page.objects.filter(slug=self.slug).exclude(pk=self.pk) \\\n .exists()\n else:\n other = Page.objects.filter(\n slug=self.slug,\n language__in=('all', self.language)\n ).exclude(pk=self.pk).exists()\n\n if other:\n self.slug += '-2'\n self.save()\n\n def get_url(self):\n if self.homepage:\n return reverse('pages:homepage')\n else:\n return reverse('pages:page', kwargs={'slug': self.slug})\n\n\nclass PageArticle(Page):\n article = models.ForeignKey(\n Article, verbose_name=_('article'), related_name='pages'\n )\n\n class Meta:\n app_label = 'pages'\n verbose_name = _('article')\n verbose_name_plural = _('articles')\n\n def get_view(self, request):\n return ContentViews.article(\n request,\n article=self.article,\n page=self\n )\n\n\nclass PageArticlesCategory(Page):\n category = models.ForeignKey(\n Category, verbose_name=_('category'), related_name='pages'\n )\n\n class Meta:\n app_label = 'pages'\n verbose_name = _('category of articles')\n verbose_name_plural = _('categories of articles')\n\n def get_view(self, request):\n return ContentViews.category(\n request,\n category=self.category,\n page=self\n )\n\n\nclass PageArticlesCategories(Page, SeoMixin):\n categories = models.ManyToManyField(Category, verbose_name=_('categories'))\n\n description = models.TextField(_('description'), null=True, blank=True)\n image = FileBrowseField(\n _('image'), max_length=255, blank=True, null=True,\n extensions=['.jpg', '.jpeg', '.gif', '.png']\n )\n\n order_by = models.CharField(\n _('order by'), max_length=255,\n choices=ORDER_BY_CHOICES, default='order'\n )\n pagination = models.BooleanField(_('pagination'), default=True)\n articles_on_page = models.PositiveIntegerField(\n _('articles on page'), default=5,\n validators=[validators.MinValueValidator(1)]\n )\n\n title_click_action = models.CharField(\n _('title click action'), max_length=255,\n choices=TITLE_CLICK_ACTION_CHOICES, default='go_to_object'\n )\n image_click_action = models.CharField(\n _('image click action'), max_length=255,\n choices=IMAGE_CLICK_ACTION_CHOICES, default='go_to_object'\n )\n\n show_articles_created = models.BooleanField(\n _('show articles\\' creation date'), default=True\n )\n truncate_intro = models.PositiveIntegerField(\n _('truncate intro'),\n help_text='Truncate articles\\' intro to given number of words. Leave '\n 'this field blank to show entire short descriptions.',\n null=True, blank=True, default=60,\n validators=[validators.MinValueValidator(1)]\n )\n\n html_class = models.CharField(\n _('HTML class'), max_length=255, blank=True, default=''\n )\n\n template = models.CharField(\n _('template'),\n help_text='Specifies custom template for listing. Leave this field '\n 'blank to use default. Using unsecure template can '\n 'potencially escalate security issue.
Refer to '\n 'Django documentation for '\n 'more info.',\n max_length=255, blank=True, default=''\n )\n\n class Meta:\n app_label = 'pages'\n verbose_name = _('multiple categories of articles')\n verbose_name_plural = _('multiple categories of articles')\n\n def get_view(self, request):\n return ContentViews.render_multiple_categories(\n request,\n categories=self.categories,\n page=self\n )\n\n\nclass PageFeaturedArticles(Page):\n class Meta:\n app_label = 'pages'\n verbose_name = _('featured articles')\n verbose_name_plural = _('featured articles')\n\n def get_view(self, request):\n return ContentViews.render_featured_articles(request, page=self)\n\n\nclass PageGallery(Page):\n gallery = models.ForeignKey(\n Gallery, verbose_name=_('gallery'), related_name='pages'\n )\n\n class Meta:\n app_label = 'pages'\n verbose_name = _('gallery')\n verbose_name_plural = _('galleries')\n\n def get_view(self, request):\n return GalleryViews.render_gallery(\n request,\n gallery=self.gallery,\n page=self\n )\n\n\nclass PageCustomView(Page):\n custom_view = models.CharField(\n _('custom view'),\n help_text=_('Should be valid Python path pointing to Django view.'),\n max_length=255\n )\n pass_page_obj = models.BooleanField(\n _('pass Page object to view function'), default=True\n )\n\n class Meta:\n app_label = 'pages'\n verbose_name = _('custom view')\n verbose_name_plural = _('custom views')\n\n def get_view(self, request):\n if self.pass_page_obj:\n return import_string(self.custom_view)(request, page=self)\n else:\n return import_string(self.custom_view)(request)\n\n\nclass Menu(models.Model):\n title = models.CharField(_('title'), max_length=255)\n published = models.BooleanField(_('published'), default=True)\n template_position = AutoSlugField(\n _('template position'), populate_from='title',\n unique_with='language', blank=True, editable=True\n )\n html_class = models.CharField(\n _('HTML class'), max_length=255, blank=True, default=''\n )\n language = models.CharField(\n _('language'), max_length=255,\n choices=settings.LANGUAGE_CHOICES, default='all'\n )\n pages = models.ManyToManyField(Page, through='PageInMenu', blank=True)\n\n class Meta:\n app_label = 'pages'\n verbose_name = _('menu')\n verbose_name_plural = _('menus')\n\n def __str__(self):\n return self.title\n\n def get_items(self):\n return PageInMenu.objects.filter(\n Q(page__language__in=served_langs()) | Q(page=None),\n menu=self, published=True\n )\n\n def save(self, *args, **kwargs):\n items = self.pageinmenu_set.all()\n for i in items:\n if i.parent and i.pageinmenu_set.exists():\n i.parent = None\n i.save()\n super(Menu, self).save(*args, **kwargs)\n\n\nclass PageInMenu(models.Model):\n menu = models.ForeignKey(Menu)\n page = models.ForeignKey(Page, null=True, blank=True)\n order = models.PositiveSmallIntegerField(_('order'), default=0)\n title = models.CharField(\n _('title'),\n help_text=_('If given overrides Page title.'),\n max_length=255, blank=True, default=''\n )\n link = models.CharField(\n _('link'), help_text=_('Overrides Page.'),\n max_length=255, blank=True, default=''\n )\n parent = models.ForeignKey(\n 'self', verbose_name=_('parent'), null=True, blank=True\n )\n published = models.BooleanField(_('published'), default=True)\n login_required = models.BooleanField(\n _('login required'),\n help_text=_('This controls visibility in menu only. '\n 'Page can still be accessible, despite '\n 'not logged in.'),\n default=False\n )\n open_in_new_tab = models.BooleanField(_('open in new tab'), default=False)\n html_class = models.CharField(\n _('HTML class'), max_length=255, blank=True, default=''\n )\n html_attrs = models.CharField(\n _('HTML attributes'), max_length=255, blank=True, default=''\n )\n\n class Meta:\n app_label = 'pages'\n ordering = ('order',)\n verbose_name = ''\n verbose_name_plural = _('pages')\n\n def __str__(self):\n parent = self.parent\n if parent:\n prefix = '({}) > '.format(parent.get_title())\n else:\n prefix = ''\n return prefix + self.get_title()\n\n def get_title(self):\n return self.title or self.page.title\n\n def get_children(self):\n return self.pageinmenu_set.filter(published=True)\n","sub_path":"cms/pages/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"394191995","text":"#-*-coding:utf-8 -*\nimport os\nfrom random import randrange\n\n# Ecran de présentation du jeu\nprint(\"*************************\")\nprint(\"* Le juste prix !!! *\")\nprint(\"*************************\")\n\n# initialisation des variables de départ\ncontinuer_partie = True #Booléen vrai tant qu'on dois continuer la partie\ncompteur = 0\nprint (\"Vous devez deviner, quelle est le prix du schmilblick !!!\")\n\nprix_objet = randrange(100)\n\n\nwhile continuer_partie:\n\tprix = input(\"Quel est le juste prix en Euros ? (entre 0 et 99)\")\n\tprix = int(prix)\n\tif prix < prix_objet:\n\t\tprint(\"c'est plus !!!\")\n\t\tcompteur += 1\n\telif prix > prix_objet:\n\t\tprint(\"C'est moins !!!\")\n\t\tcompteur += 1\n\telse:\n\t\tprint(\"C'est gagne\")\n\t\tcontinuer_partie = False\n\n\t\n\t\nprint (\"Vous avez saisis : \",prix, \"Euros\")\nprint(\"Nombre d'essai : \", compteur)\n# On met en pause le systeme windows\nos.system(\"pause\")","sub_path":"Partie_1/TP Juste prix/juste_prix.py","file_name":"juste_prix.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"649916964","text":"\"\"\"OpenAPI core schemas module\"\"\"\nimport logging\nfrom collections import defaultdict\nimport warnings\n\nfrom distutils.util import strtobool\nfrom functools import lru_cache\n\nfrom json import loads\nfrom six import iteritems\n\nfrom openapi_core.enums import SchemaType, SchemaFormat\nfrom openapi_core.exceptions import (\n InvalidValueType, UndefinedSchemaProperty, MissingProperty, InvalidValue,\n)\nfrom openapi_core.models import ModelFactory\n\nlog = logging.getLogger(__name__)\n\n\ndef forcebool(val):\n if isinstance(val, str):\n val = strtobool(val)\n\n return bool(val)\n\n\nDEFAULT_CAST_CALLABLE_GETTER = {\n SchemaType.INTEGER: int,\n SchemaType.NUMBER: float,\n SchemaType.BOOLEAN: forcebool,\n}\n\n\nclass Schema(object):\n \"\"\"Represents an OpenAPI Schema.\"\"\"\n\n def __init__(\n self, schema_type=None, model=None, properties=None, items=None,\n schema_format=None, required=None, default=None, nullable=False,\n enum=None, deprecated=False, all_of=None):\n self.type = schema_type and SchemaType(schema_type)\n self.model = model\n self.properties = properties and dict(properties) or {}\n self.items = items\n self.format = SchemaFormat(schema_format)\n self.required = required or []\n self.default = default\n self.nullable = nullable\n self.enum = enum\n self.deprecated = deprecated\n self.all_of = all_of and list(all_of) or []\n\n def __getitem__(self, name):\n return self.properties[name]\n\n def get_all_properties(self):\n properties = self.properties.copy()\n\n for subschema in self.all_of:\n subschema_props = subschema.get_all_properties()\n properties.update(subschema_props)\n\n return properties\n\n def get_all_required_properties(self):\n required = self.required.copy()\n\n for subschema in self.all_of:\n subschema_req = subschema.get_all_required_properties()\n required += subschema_req\n\n return required\n\n def get_cast_mapping(self):\n mapping = DEFAULT_CAST_CALLABLE_GETTER.copy()\n mapping.update({\n SchemaType.ARRAY: self._unmarshal_collection,\n SchemaType.OBJECT: self._unmarshal_object,\n })\n\n return defaultdict(lambda: lambda x: x, mapping)\n\n def cast(self, value):\n \"\"\"Cast value to schema type\"\"\"\n if value is None:\n if not self.nullable:\n raise InvalidValueType(\"Null value for non-nullable schema\")\n return self.default\n\n if self.type is None:\n return value\n\n cast_mapping = self.get_cast_mapping()\n\n if self.type in cast_mapping and value == '':\n return None\n\n cast_callable = cast_mapping[self.type]\n try:\n return cast_callable(value)\n except ValueError:\n raise InvalidValueType(\n \"Failed to cast value of {0} to {1}\".format(value, self.type)\n )\n\n def unmarshal(self, value):\n \"\"\"Unmarshal parameter from the value.\"\"\"\n if self.deprecated:\n warnings.warn(\n \"The schema is deprecated\", DeprecationWarning)\n casted = self.cast(value)\n\n if casted is None and not self.required:\n return None\n\n if self.enum and casted not in self.enum:\n raise InvalidValue(\n \"Value of {0} not in enum choices: {1}\".format(\n value, self.enum)\n )\n\n return casted\n\n def _unmarshal_collection(self, value):\n return list(map(self.items.unmarshal, value))\n\n def _unmarshal_object(self, value):\n if isinstance(value, (str, bytes)):\n value = loads(value)\n\n all_properties = self.get_all_properties()\n all_required_properties = self.get_all_required_properties()\n all_properties_keys = all_properties.keys()\n value_keys = value.keys()\n\n extra_props = set(value_keys) - set(all_properties_keys)\n\n if extra_props:\n raise UndefinedSchemaProperty(\n \"Undefined properties in schema: {0}\".format(extra_props))\n\n properties = {}\n for prop_name, prop in iteritems(all_properties):\n try:\n prop_value = value[prop_name]\n except KeyError:\n if prop_name in all_required_properties:\n raise MissingProperty(\n \"Missing schema property {0}\".format(prop_name))\n if not prop.nullable and not prop.default:\n continue\n prop_value = prop.default\n properties[prop_name] = prop.unmarshal(prop_value)\n return ModelFactory().create(properties, name=self.model)\n\n\nclass PropertiesGenerator(object):\n\n def __init__(self, dereferencer):\n self.dereferencer = dereferencer\n\n def generate(self, properties):\n for property_name, schema_spec in iteritems(properties):\n schema = self._create_schema(schema_spec)\n yield property_name, schema\n\n def _create_schema(self, schema_spec):\n return SchemaFactory(self.dereferencer).create(schema_spec)\n\n\nclass SchemaFactory(object):\n\n def __init__(self, dereferencer):\n self.dereferencer = dereferencer\n\n def create(self, schema_spec):\n schema_deref = self.dereferencer.dereference(schema_spec)\n\n schema_type = schema_deref.get('type', 'object')\n schema_format = schema_deref.get('format')\n model = schema_deref.get('x-model', None)\n required = schema_deref.get('required', False)\n default = schema_deref.get('default', None)\n properties_spec = schema_deref.get('properties', None)\n items_spec = schema_deref.get('items', None)\n nullable = schema_deref.get('nullable', False)\n enum = schema_deref.get('enum', None)\n deprecated = schema_deref.get('deprecated', False)\n all_of_spec = schema_deref.get('allOf', None)\n\n properties = None\n if properties_spec:\n properties = self.properties_generator.generate(properties_spec)\n\n all_of = []\n if all_of_spec:\n all_of = map(self.create, all_of_spec)\n\n items = None\n if items_spec:\n items = self._create_items(items_spec)\n\n return Schema(\n schema_type=schema_type, model=model, properties=properties,\n items=items, schema_format=schema_format, required=required,\n default=default, nullable=nullable, enum=enum,\n deprecated=deprecated, all_of=all_of,\n )\n\n @property\n @lru_cache()\n def properties_generator(self):\n return PropertiesGenerator(self.dereferencer)\n\n def _create_items(self, items_spec):\n return self.create(items_spec)\n\n\nclass SchemaRegistry(SchemaFactory):\n\n def __init__(self, dereferencer):\n super(SchemaRegistry, self).__init__(dereferencer)\n self._schemas = {}\n\n def get_or_create(self, schema_spec):\n schema_deref = self.dereferencer.dereference(schema_spec)\n model = schema_deref.get('x-model', None)\n\n if model and model in self._schemas:\n return self._schemas[model], False\n\n return self.create(schema_deref), True\n\n\nclass SchemasGenerator(object):\n\n def __init__(self, dereferencer, schemas_registry):\n self.dereferencer = dereferencer\n self.schemas_registry = schemas_registry\n\n def generate(self, schemas_spec):\n schemas_deref = self.dereferencer.dereference(schemas_spec)\n\n for schema_name, schema_spec in iteritems(schemas_deref):\n schema, _ = self.schemas_registry.get_or_create(schema_spec)\n yield schema_name, schema\n","sub_path":"openapi_core/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":7747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"579271085","text":"from typing import List\n\nimport numpy as np\nimport neuralcoref\nimport spacy\nimport pytextrank\nimport string\nimport pickle\nimport tensorflow_hub as hub\nfrom bert_embedding import BertEmbedding\nfrom flair.data import Sentence\nfrom flair.models import SequenceTagger\nfrom scipy.spatial.distance import cosine\n\n\nclass PreProcess:\n def __init__(self):\n self.nlp = spacy.load(\"en_core_web_lg\")\n\n @staticmethod\n def normalize_case(text: str):\n \"\"\"\n Normalizes the text to the lower case\n :param text: string\n :return: string\n Returns the string in the lower case\n \"\"\"\n return text.lower()\n\n def tokenize(self, text: str):\n \"\"\"\n Tokenize the string of text to the list of SpaCy tokens\n :param text: string\n Input text\n :return: List[str]\n Returns the list of tokens of the input text\n \"\"\"\n doc = self.nlp(text.lower())\n tokens = [token.text for token in doc if not token.is_punct]\n return tokens\n\n def remove_stopwords(self, tokens: List[str]):\n \"\"\"\n Remove stopwords in the tokens of text\n :param tokens: List[str]\n List of string of tokens\n :return: List[str]\n Returns the list of tokens with removing stopwords\n\n # Source:\n # https://www.analyticsvidhya.com/blog/2019/08/how-to-remove-stopwords-text-normalization-nltk-spacy-gensim-python/\n \"\"\"\n filtered_tokens = []\n\n for token in tokens:\n lexeme = self.nlp.vocab[token]\n if not lexeme.is_stop:\n filtered_tokens.append(token)\n return filtered_tokens\n\n def demote_ques(self, question: str, answer: str):\n \"\"\"\n Removes the tokens of the answer repeated from the question\n :param question: string\n :param answer: string\n :return: string\n Returns string of answer with removing the words present in the question\n \"\"\"\n\n question_tokens = self.tokenize(question)\n answer_tokens = self.tokenize(answer)\n answer_tokens = [token for token in answer_tokens if token not in question_tokens]\n\n demoted_answer = ''\n\n for i in range(len(answer_tokens)):\n if i == len(answer_tokens) - 1:\n demoted_answer += answer_tokens[i]\n return demoted_answer\n demoted_answer += answer_tokens[i] + ' '\n\n\nclass Utilities:\n def __init__(self):\n self.nlp = spacy.load(\"en_core_web_lg\")\n\n\n @staticmethod\n def get_use_embed(tokens):\n\n module_url = \"https://tfhub.dev/google/universal-sentence-encoder/4\"\n embed = hub.load(module_url)\n embeddings = embed(tokens)\n word_array = []\n for i in range(len(embeddings)):\n word_array.append(embeddings[i].numpy())\n return word_array\n\n @staticmethod\n def _get_bert_embed(tokens):\n\n embedding = BertEmbedding().embedding(sentences=tokens)\n\n word_array = []\n for i in range(len(embedding)):\n word_array.append(embedding[i][1][0])\n return word_array\n\n @staticmethod\n def _get_embed_list(tokens):\n with open(\"dataset/embeddings/phrases_use_stu_answers.pickle\", \"rb\") as handle:\n phrases_embed = pickle.load(handle)\n\n embed_list = []\n for chunk in tokens:\n embed_list.append(phrases_embed[chunk])\n\n return embed_list\n\n @staticmethod\n def get_cosine_similarity(array_1, array_2):\n return cosine(array_1, array_2)\n\n def cosine_similarity_matrix(self, des_tokens, stu_tokens):\n\n des_tokens_array = self.get_use_embed(des_tokens)\n stu_tokens_array = self.get_use_embed(stu_tokens)\n\n matrix = np.zeros((len(stu_tokens_array), len(des_tokens_array)))\n\n for i in range(0, len(stu_tokens_array)):\n for j in range(0, len(des_tokens_array)):\n matrix[i][j] = 1 - self.get_cosine_similarity(stu_tokens_array[i], des_tokens_array[j])\n return matrix\n\n @staticmethod\n def get_frequency(desired_words, total_tokens):\n word_freq = {}\n\n for word in desired_words:\n count = 0\n for answer in total_tokens:\n if word in answer:\n count += 1\n\n word_freq[word] = count\n\n return word_freq\n\n def corefer_resolution(self, text):\n neuralcoref.add_to_pipe(self.nlp)\n doc = self.nlp(text.lower())\n return doc._.coref_resolved\n\n def extract_phrases(self, text: str):\n \"\"\"\n Extracts the phrases of the text extracted from Flair package\n :param text: string\n :return: List[str]\n Returns the extracted list of phrases from the input text\n \"\"\"\n\n sentence = Sentence(text)\n tagger = SequenceTagger.load('chunk')\n tagger.predict(sentence)\n\n token_list: List[str] = []\n token_tags: List[str] = []\n\n for token in sentence:\n token_list.append(token.text)\n\n for label_type in token.annotation_layers.keys():\n # if token.get_labels(label_type)[0].value == \"O\":\n # token_tags.append('O')\n # if token.get_labels(label_type)[0].value == \"_\":\n # token_tags.append('_')\n token_tags.append(token.get_labels(label_type)[0].value) # Append token tags for each token\n\n phrases: List[str] = self._get_flair_phrases(token_list, token_tags)\n\n return phrases\n\n @staticmethod\n def _get_flair_phrases(token_list: List[str], token_tags: List[str]):\n \"\"\"\n Generate the phrases from the extracted tokens and their corresponding tags, by merging the relevant tokens\n :param token_list: List[str]\n List of strings of tokens\n :param token_tags: List[str]\n List of tags in order with tokens, extracted by Flair package\n :return: List[str]\n Returns the list of phrases merging the relevant tokens\n \"\"\"\n\n assert len(token_tags) == len(token_list)\n\n phrases = []\n phrase = ''\n\n # Creating the list of outside phrases and '_' phrases\n for token, tag in zip(token_list, token_tags):\n if token in string.punctuation:\n continue\n\n if '-' not in tag: # '-' do not occur for the single token tags.\n phrases.append(token)\n\n else:\n state, phrase_pos = tag.split('-')\n if state == 'B':\n phrase = ''\n phrase += token\n elif state == 'I':\n phrase += ' ' + token\n elif state == 'E':\n phrase += ' ' + token\n phrases.append(phrase)\n elif state == 'S':\n phrases.append(token)\n\n return phrases\n\n def extract_phrases_tr(self, text: str):\n \"\"\"\n Returns only noun key phrases\n Source: https://spacy.io/universe/project/spacy-pytextrank\n\n :param text: The text in which the key phrases should be extracted\n :return: List[str]\n Returns list of strings of key phrases in the text\n \"\"\"\n tr = pytextrank.TextRank()\n self.nlp.add_pipe(tr.PipelineComponent, name='textrank', last=True)\n\n doc = self.nlp(text)\n\n phrases = []\n\n for p in doc._.phrases:\n phrases.append(p.text)\n\n return phrases\n","sub_path":"formative_assessment/utilities/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"206741459","text":"# testes com input\n\nprint('TESTE COM ENTRADA DE INTEIROS')\nvalor = input('Entre com um valor inteiro: ')\nvalor = int(valor)\n\nvalor_2 = input('Entre com outro valor inteiro: ')\nvalor_2 = int(valor_2)\n\nsoma = valor + valor_2\n\nprint('A soma dos valores: ',str(soma))\n\n\nprint('TESTE COM ENTRADA DE PONTOS FLUTUANTES')\nvalor = input('Entre com um valor de ponto flutuante: ')\nvalor = float(valor)\n\nvalor_2 = input('Entre com outro valor de ponto flutuante: ')\nvalor_2 = float(valor_2)\n\nsoma = valor + valor_2\n\nprint('A soma dos valores: ',str(soma))\n\nprint('MEXENDO COM LISTAS DE NUMEROS: ')\n# Atributos\nnumeros = []\ncontador_pos = 0\ncontador_for = 0\nbooleano = True\n\n# While\nwhile booleano:\n print('Diga um numero inteiro para a posicao [',contador_pos,']')\n entrada = input('Diga o valor: ')\n entrada = int(entrada)\n numeros.append(entrada)\n restart = input('Deseja adicionar mais um valor? S/N : ')\n if restart.upper() == 'S' or restart.lower() == 's':\n contador_pos += 1\n booleano = True\n elif restart.upper() == 'N' or restart.lower() == 'n':\n booleano = False\n else:\n print('Entrada invalida, finalizando...')\n break\n\n# for de leitura\nfor numero in numeros:\n print('Posicao [',contador_for,']: ',numero)\n contador_for += 1\n\n\n","sub_path":"Basics/Input/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"515487742","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport json\nimport pymongo\n\n\nclass YouboyPipeline(object):\n def process_item(self, item, spider):\n return item\n\nclass JsonWriterPipeline(object):\n\n def __init__(self):\n self.file = open('./youboy_items.json', 'a',encoding='utf-8')\n\n def process_item(self, item, spider):\n if item['name']:\n line = json.dumps(dict(item)) + \"\\n\"\n self.file.write(line)\n return item\n\nclass MongoPipeline(object):\n def __init__(self, mongo_uri, mongo_port,mongo_db):\n self.mongo_uri = mongo_uri\n self.mongo_port = mongo_port\n self.mongo_db = mongo_db\n self.i = 0\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(mongo_uri=crawler.settings.get('MONGO_URI'), mongo_port=crawler.settings.get('MONGO_PORT'),mongo_db=crawler.settings.get('MONGO_DB'))\n\n def open_spider(self, spider):\n self.client = pymongo.MongoClient(\"\", 27017,connect=True)\n self.db = self.client['scrapy_hc']\n\n def process_item(self,item,spider):\n # if isinstance(item,HcItemItem):\n collection = 'Youboy'\n # if self.db[collection].update({'name': item['name']}, {'$set': dict(item)}, True):\n if item['name']:\n # if self.db[collection].update({'name':item['name']},{'$set': dict(item)},True):\n if self.db[collection].insert(dict(item)):\n print('Sueecss saved to Mongodb',item['name'])\n self.i +=1\n print(self.i)\n else:\n print('Not Mongodb ')\n\n return item\n","sub_path":"youboy/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"445486500","text":"class Solution:\n def countTexts(self, pressedKeys: str) -> int:\n counter = [3] * 10\n counter[0], counter[1], counter[7], counter[9] = 0, 0, 4, 4\n mod = 10 ** 9 + 7\n n = len(pressedKeys)\n dp = [0] * (n + 1)\n dp[0], dp[1] = 1, 1\n for i in range(2, n + 1):\n x = int(pressedKeys[i - 1])\n j = 0\n while j < counter[x] and i - j >= 1 and pressedKeys[i - j - 1] == pressedKeys[i - 1]:\n dp[i] += dp[i - j - 1]\n j += 1\n dp[i] %= mod\n return dp[n]\n\n\ns = Solution()\nprint(s.countTexts(\"222222222222222222222222222222222222\"))\nprint(s.countTexts(\"22233\"))\n","sub_path":"leetcode/2022/contest/weekly-292/Contest3.py","file_name":"Contest3.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"247105394","text":"import csv\nimport io\nfrom collections import OrderedDict\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _, ugettext_lazy\n\nfrom pretix.base.exporter import BaseExporter\nfrom pretix.base.models import Order, OrderPosition, Question\n\n\nclass BaseCheckinList(BaseExporter):\n pass\n\n\nclass CSVCheckinList(BaseCheckinList):\n name = \"overview\"\n identifier = 'checkinlistcsv'\n verbose_name = ugettext_lazy('Check-in list (CSV)')\n\n @property\n def export_form_fields(self):\n return OrderedDict(\n [\n ('items',\n forms.ModelMultipleChoiceField(\n queryset=self.event.items.all(),\n label=_('Limit to products'),\n widget=forms.CheckboxSelectMultiple,\n initial=self.event.items.filter(admission=True)\n )),\n ('secrets',\n forms.BooleanField(\n label=_('Include QR-code secret'),\n required=False\n )),\n ('paid_only',\n forms.BooleanField(\n label=_('Only paid orders'),\n initial=True,\n required=False\n )),\n ('sort',\n forms.ChoiceField(\n label=_('Sort by'),\n initial='name',\n choices=(\n ('name', _('Attendee name')),\n ('code', _('Order code')),\n ),\n widget=forms.RadioSelect,\n required=False\n )),\n ('questions',\n forms.ModelMultipleChoiceField(\n queryset=self.event.questions.all(),\n label=_('Include questions'),\n widget=forms.CheckboxSelectMultiple,\n required=False\n )),\n ]\n )\n\n def render(self, form_data: dict):\n output = io.StringIO()\n writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, delimiter=\",\")\n\n questions = list(Question.objects.filter(event=self.event, id__in=form_data['questions']))\n qs = OrderPosition.objects.filter(\n order__event=self.event, item_id__in=form_data['items']\n ).prefetch_related(\n 'answers', 'answers__question'\n ).select_related('order', 'item', 'variation')\n\n if form_data['sort'] == 'name':\n qs = qs.order_by('attendee_name')\n elif form_data['sort'] == 'code':\n qs = qs.order_by('order__code')\n\n headers = [\n _('Order code'), _('Attendee name'), _('Product'), _('Price')\n ]\n if form_data['paid_only']:\n qs = qs.filter(order__status=Order.STATUS_PAID)\n else:\n qs = qs.filter(order__status__in=(Order.STATUS_PAID, Order.STATUS_PENDING))\n headers.append(_('Paid'))\n\n if form_data['secrets']:\n headers.append(_('Secret'))\n\n for q in questions:\n headers.append(str(q.question))\n\n writer.writerow(headers)\n\n for op in qs:\n row = [\n op.order.code,\n op.attendee_name,\n str(op.item.name) + (\" – \" + str(op.variation.value) if op.variation else \"\"),\n op.price,\n ]\n if not form_data['paid_only']:\n row.append(_('Yes') if op.order.status == Order.STATUS_PAID else _('No'))\n if form_data['secrets']:\n row.append(op.secret)\n acache = {}\n for a in op.answers.all():\n acache[a.question_id] = str(a)\n for q in questions:\n row.append(acache.get(q.pk, ''))\n\n writer.writerow(row)\n\n return 'checkin.csv', 'text/csv', output.getvalue().encode(\"utf-8\")\n","sub_path":"src/pretix/plugins/checkinlists/exporters.py","file_name":"exporters.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"116111032","text":"import os\nfrom pathlib import Path\nimport shutil\nimport stat\nfrom setuptools import setup\nfrom setuptools.command.build_py import build_py\n\nproject_path = Path(__file__).parent\n\n\ndef on_rm_error(func, path, exc_info):\n os.chmod(path, stat.S_IWRITE)\n os.unlink(path)\n\n\ndef remove_dirs_and_files(rm_dist=False):\n remove_dirs = []\n remove_dirs = [Path(project_path, \"build\")]\n remove_dirs += [str(f) for f in Path(project_path, \"src\").glob(\"**/*.exp\")]\n remove_dirs += [str(f) for f in Path(project_path, \"src\").glob(\"**/*.pyd\")]\n remove_dirs += [str(f) for f in Path(project_path, \"src\").glob(\"**/*.lib\")]\n remove_dirs += [str(f) for f in Path(project_path, \"src\").glob(\"**/*.so\")]\n if rm_dist:\n remove_dirs += [str(f) for f in Path(project_path).glob(\"*{0}\".format(\"dist\"))]\n remove_dirs += [str(f) for f in Path(project_path).glob(\"*{0}\".format(\"egg_info\"))]\n\n for d in remove_dirs:\n if os.path.isdir(d):\n shutil.rmtree(d, onerror=on_rm_error)\n elif os.path.isfile(d):\n os.remove(d)\n\n\nremove_dirs_and_files(True)\n\nVERSION = \"0.0.3\"\n\nREQUIREMENTS = [\"sobol-seq>=0.1.2\", \"numpy>=1.18.1\"]\nSETUP_REQUIREMENTS = []\n\nsetup(\n name=\"csampling\",\n version=VERSION,\n description=\"This package implements sampling methods, useful for simulation analysis.\",\n long_description=Path(\"README.md\").read_text(),\n long_description_content_type=\"text/markdown\",\n author=\"paarhaam\",\n author_email=\"paarhaam@yahoo.com\",\n url=\"https://github.com/paarhaam/csampling\",\n packages=[\"csampling\"],\n package_dir={\"\": \"src\"},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\"Programming Language :: Python :: 3.7\"],\n install_requires=REQUIREMENTS,\n setup_requires=SETUP_REQUIREMENTS,\n cmdclass={\"build_py\": build_py},\n python_requires=\">=3.7\",\n license='MIT'\n)\n\nremove_dirs_and_files(False)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"444065304","text":"# coding: utf-8\n\n\"\"\"\nCreated: __.__.____\nAuthor: surkovss\n\nDescription: ______\n\"\"\"\n\n\nimport json\nfrom sqlalchemy import inspect\nfrom sqlalchemy.orm import reconstructor\nfrom sqlalchemy.ext.declarative import DeclarativeMeta\n\n\nclass Model(object):\n arguments_transformations = {}\n\n def __init__(self, *args, **kwargs):\n super(Model, self).__init__(*args, **kwargs)\n\n self._class_name = type(self).__name__\n\n self.column_names = self.prepare_column_names()\n\n @reconstructor\n def init_on_load(self):\n self._class_name = type(self).__name__\n\n self.column_names = self.prepare_column_names()\n\n @classmethod\n def prepare_column_names(cls):\n mapper = inspect(cls)\n\n # Возвращает в порядке определения колонок в классе. Причём\n # сначала идут параметры, где имя колонки заданно явно.\n column_names = tuple(attr.key for attr in mapper.column_attrs)\n\n return column_names\n\n def __setattr__(self, key, value):\n if isinstance(value, str):\n try:\n value = str(value, encoding='utf-8')\n except:\n pass\n\n if key in self.arguments_transformations:\n value = self.arguments_transformations[key](value)\n\n super(Model, self).__setattr__(key, value)\n\n def __unicode__(self):\n begin = '{'\n\n middle = \\\n ', '.join('\"{column_name}\": \"{column_value}\"'.\n format(column_name=str(column_name),\n column_value=str(getattr(self, column_name)))\n for column_name in self.column_names)\n\n end = '}'\n\n return begin + middle + end\n\n def to_dict(self):\n\n new_dict = {}\n for col_name in self.__table__.columns.keys():\n new_dict.setdefault(col_name, getattr(self, col_name))\n\n return new_dict","sub_path":"utils/modelling.py","file_name":"modelling.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"388555513","text":"'''\r\ntime - O(N)\r\nspace - O(1)\r\nApproach:\r\n1. iterate through n elements, in first iteration find the celebrity.\r\n2. In the second iteration verify if the found celebrity is actual celebrity or not.\r\n'''\r\nclass Solution():\r\n def findCelebrity(self, n):\r\n celeb = 0\r\n\r\n for people in range(n):\r\n if knows(celeb, people):\r\n celeb = people\r\n\r\n for people in range(n):\r\n if people == celeb:\r\n continue;\r\n\r\n if (knows(celeb,people) or not knows(people, celeb)):\r\n return -1\r\n\r\n return celeb\r\n\r\n","sub_path":"186_FindTheCelebrity.py","file_name":"186_FindTheCelebrity.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"2528008","text":"\nfrom common import vha_excel_common\n\nimport time\nimport re\nfrom dateutil import parser\n\nimport openpyxl\nfrom openpyxl import load_workbook\nfrom openpyxl import Workbook\nfrom openpyxl.drawing.image import Image\nfrom openpyxl.utils import coordinate_from_string\n\ndef is_vha_time_in_scope(trigger_time, warning_time):\n cell_time = time.strftime(\"%Y-%m-%d \", time.localtime()) + warning_time + ':59'\n\n time_struct = time.strptime(cell_time, \"%Y-%m-%d %H:%M:%S\")\n\n time1 = time.mktime(time_struct)\n time2 = time.mktime(trigger_time)\n\n diff = time1 - time2\n\n print(str(diff))\n if diff > 0:\n return True\n else:\n return False\n\n\ndef vha_equals_without_punctuation(target, real):\n is_equals = False\n pattern = re.compile(r'[\\u4e00-\\u9fa5a-zA-Z0-9]')\n target_no_punctuation = pattern.findall(target)\n real_no_punctuation = pattern.findall(real)\n\n target_len = len(target_no_punctuation)\n real_len = len(real_no_punctuation)\n equals_count = 0\n not_equals_str = 'target=' + str(target_no_punctuation) + ',real=' + str(real_no_punctuation)\n\n from difflib import SequenceMatcher\n\n similar = SequenceMatcher(None, target_no_punctuation, real_no_punctuation).ratio()\n\n print(str(similar))\n\n if similar > 0.9:\n is_equals = True\n else:\n is_equals = False\n print(not_equals_str)\n return is_equals, not_equals_str\n\n\nif __name__ == \"__main__\":\n target = '检查轮胎压力,以确保1轮胎已适当充气。警告:轮胎压力监视系统不能取代手动检查轮胎压力。应使用胎压计定期检查轮胎压力(至少每月一次)。有关详细信息,请参阅《车主手册》中“车轮和轮胎”章节的“轮胎充气”内容。未能正确保持轮胎压力可能增加轮胎故障、失控、翻车和人员伤害的风险。'\n real = '检查轮胎压力,以确保2轮胎已适当充气。警告:轮胎压力监视系统不能取代手动检查轮胎压力。应使用胎压计定期检查轮胎压力(至少每月一次)。有关详细信息,请参阅《车主手册》中“车轮和轮胎”章节的“轮胎充气”內容。未能正确保持轮胎压力可能增加轮胎故障、失控、翻车和人员伤害的风险。'\n\n is_equals, not_equals_str = vha_equals_without_punctuation(target, real)\n print(str(is_equals))\n print(not_equals_str)\n #cell_time = time.localtime()\n\n #print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n # warning_time = '16:38'\n #\n # timeStr = '2018-06-20 16:38:56'\n # trigger_time = time.mktime(time.strptime(timeStr, '%Y-%m-%d %H:%M:%S'))\n #\n # trigger_time = time.localtime(trigger_time)\n # is_vha_time_in_scope(trigger_time, warning_time)\n\n \"\"\"\n print(vha_excel_common.get_vha_title_by_warning_code('600E11'))\n print(vha_excel_common.get_vha_what_shall_i_do_what_happen('600E11', 'NONE', 'NONE'))\n print(vha_excel_common.get_vha_all('600E11', 'NONE', 'NONE'))\n \"\"\"","sub_path":"VHA2.0/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"335434776","text":"import os\nfrom flask import Flask\n\nfrom analytics.views import page\n\n__all__ = [\n 'create_app'\n]\n\n\ndef create_app(config_filename):\n app = Flask(__name__, template_folder=os.path.join('..', 'templates'))\n\n config_path = os.path.join('..', 'instance', config_filename)\n app.config.from_pyfile(config_path)\n\n app.register_blueprint(page)\n\n return app\n","sub_path":"analytics/analytics/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"184108254","text":"import networkx as nx\nimport numpy as np\nfrom create_matrix import create_matrix\n\n\nG=nx.read_edgelist('../pr.edgelist',create_using=nx.DiGraph(),nodetype=int)\nn=nx.number_of_nodes(G)\n\n\ndef update(G,v,d,k):\n A=create_matrix(G) #行列A\n\n Mg=d*A+(1-d)*v\n \n for i in range(0,k):\n v=Mg*v\n print(i+1,\"回目\\n\",v)\n return v\n\nv = np.ones((n,1))/n\nd=0.85\nk=10\nupdate(G,v,d,k)\n","sub_path":"前期/algorism/10/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"60569842","text":"#!/usr/bin/env python\n#\n# test_performance.py -\n#\n# Author: Paul McCarthy \n#\n\n\nfrom . import run_cli_tests, discretise\n\n\ncli_tests = \"\"\"\n3d.nii.gz\n3d.nii.gz -ot mask -t 4000 10000\n3d.nii.gz -ot mip\n{{discretise('3d.nii.gz', 500)}} -ot label\ndti\ndti/dti_V1 -ot rgbvector\ndti/dti_V1 -ot linevector\nsh -ot sh\nmesh_l_thal.vtk -mc 1 0 0\n\"\"\"\n\n\nextras = {'discretise' : discretise}\n\n\ndef add_prefix(prefix):\n tests = list(cli_tests.strip().split('\\n'))\n tests = [prefix + t for t in tests]\n return '\\n'.join(tests)\n\n\ndef test_performance_p1_ortho():\n tests = add_prefix('-p 1 -s ortho ')\n run_cli_tests('test_performance', tests, extras=extras)\n\n\ndef test_performance_p2_ortho():\n tests = add_prefix('-p 2 -s ortho ')\n run_cli_tests('test_performance', tests, extras=extras)\n\n\ndef test_performance_p1_lightbox():\n tests = add_prefix('-p 1 -s lightbox ')\n run_cli_tests('test_performance', tests, extras=extras)\n\n\ndef test_performance_p2_lightbox():\n tests = add_prefix('-p 2 -s lightbox ')\n run_cli_tests('test_performance', tests, extras=extras)\n","sub_path":"tests/test_performance.py","file_name":"test_performance.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"475861629","text":"import werkzeug\n# This next lime Prevents ImportErrors - flask-restplus currently gets broken by \n# Werkzeug 1.0.0. Alternatively, you could also downgrade werkzeug \n# (the package Python uses to interface with an HTTP web server)\n# Note: line 6 must happen BEFORE you import Flask to be able to work!\nwerkzeug.cached_property = werkzeug.utils.cached_property\nfrom flask import Flask\nfrom flask_restplus import Api, fields, Resource\n\n\n# instantiate Flask app and API - in same file for now\napplication = app = Flask(__name__)\napi = Api(app)\n\n# languages for the HTTP responses\npython = {'language': 'Python'}\nlanguages = [python]\n\n\n# Outlining a model - Swagger will make docs for this\nlanguage_model = api.model('Language', {\n 'language': fields.String('human-readable description')\n } # key value pair passed in above \n)\n\n# routes for the API\n@api.route('/language')\nclass Language(Resource):\n \"\"\"\n This is a class for the API route, we use these rather than functions \n (which is more like vanilla Flask).\n\n For now, the base route will be created automatically using\n a really sleek Swagger documentation - will help us document \n information about our API, and try out the endpoints in the browser.\n\n \"\"\"\n def get(self):\n '''Method for the Language class to handle GET requests.'''\n return languages # jsonify not required like in vanilla Flask\n\n # route where users can add new languages\n @api.expect(language_model) # has to be in form of model above\n def post(self):\n '''Data is sent with multiple methods. One is shown below'''\n languages.append(api.payload) # param for model field above\n return {'result': 'Language added'}, 201 # HTTP 201 = \"something created\" \n\n\n# run the app in debug mode locally\nif __name__ == '__main__':\n app.run(debug=True)\n ","sub_path":"Activities/restplus_intro/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"431041448","text":"\"\"\"create table user topic\n\nRevision ID: a7c056e10dd8\nRevises: 239ed15d4c2c\nCreate Date: 2017-10-09 15:12:46.431026\n\n\"\"\"\nimport os\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\nschema_name = os.getenv('SCHEMA_NAME')\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a7c056e10dd8'\ndown_revision = '239ed15d4c2c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n 'user_topic',\n sa.Column('_id', sa.Text, nullable=False, unique=True),\n sa.Column('_database_id', sa.Text, nullable=False),\n sa.Column('_owner_id', sa.Text, nullable=False),\n sa.Column('_access', sa.dialects.postgresql.JSONB),\n sa.Column('_created_by', sa.Text, nullable=False),\n sa.Column('_created_at', sa.DateTime),\n sa.Column('_updated_by', sa.Text, nullable=False),\n sa.Column('_updated_at', sa.DateTime),\n sa.Column('user_id', sa.Text, nullable=False),\n sa.Column('topic_id', sa.Text, nullable=False),\n sa.PrimaryKeyConstraint('_id', '_database_id', '_owner_id'),\n sa.UniqueConstraint('user_id', 'topic_id'),\n sa.ForeignKeyConstraint(['user_id'], ['%s.user._id' % schema_name]),\n sa.ForeignKeyConstraint(['topic_id'], ['%s.topic._id' % schema_name]),\n schema=schema_name\n )\n\n op.execute(\n \"\"\"\n CREATE TRIGGER trigger_notify_record_change\n AFTER INSERT OR UPDATE OR DELETE\n ON %s.user_topic\n FOR EACH ROW\n EXECUTE PROCEDURE public.notify_record_change();\n \"\"\" % schema_name\n )\n\n\ndef downgrade():\n op.drop_table('user_topic', schema=schema_name)\n","sub_path":"alembic/versions/a7c056e10dd8_create_table_user_topic.py","file_name":"a7c056e10dd8_create_table_user_topic.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"311689651","text":"from os.path import basename\n\n\ndef is_txt_file(file_name):\n base = basename(file_name)\n table = base.split('.')\n if table.__len__() <= 1 or table[1] != \"txt\":\n print(\"This is not a txt file, please enter .txt file.\")\n exit(84)\n\n\ndef open_file(file_name):\n try:\n file = open(file_name, \"r\")\n except:\n print(\"Failed to open file, check name or permission.\")\n exit(84)\n return file\n\n\ndef read_file(file_name):\n is_txt_file(file_name)\n file = open_file(file_name)\n content = file.read()\n file.close()\n return content\n","sub_path":"src/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"445009914","text":"from dataset import Dataset\nfrom kernelevaluater import KernelEvaluater, OutputFormat\nfrom kernels import NGramsStringKernel, SubSequenceStringKernel, WordStringKernel\nfrom reuters import DataType\n\n__author__ = 'Daniel Schlaug'\n\nlengths = [3, 4, 5, 6, 7, 8, 10, 12, 14]\nweight_decays = [0.01, 0.03, 0.05, 0.07, 0.09, 0.1, 0.3, 0.5, 0.7, .9]\n\nlengths = 14\nweight_decays = 0.5\n\nkernels = [SubSequenceStringKernel, NGramsStringKernel, WordStringKernel]\n\ndataset = Dataset()\ntraining_data = dataset.get_data(topic=None, data_type=DataType.training)\ntest_data = dataset.get_data(topic=None, data_type=DataType.testing)\n\nevaluator = KernelEvaluater(training_data=training_data, test_data=test_data, kernels=kernels)\n\nprint(evaluator.evaluation(kernel_kwargs={'length': lengths, 'weight_decay': 0.5}, output_format=OutputFormat.latex))\nprint(evaluator.evaluation(kernel_kwargs={'length': 5, 'weight_decay': weight_decays}, output_format=OutputFormat.latex))","sub_path":"tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"361568184","text":"# -*- coding: utf-8 -*-\n\"\"\" login_view.py - presenter for the login prompt\"\"\"\n__author__ = \"topseli\"\n__license__ = \"0BSD\"\n\n\nimport os\nimport sys\n\nfrom PyQt5 import QtWidgets, uic\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot\nfrom PyQt5.QtWidgets import QMessageBox\n\n\nclass LoginView(QtWidgets.QWidget):\n\n login_signal = pyqtSignal(dict)\n\n def __init__(self):\n super(LoginView, self).__init__()\n self.init_ui()\n\n def init_ui(self):\n path = os.path.dirname(os.path.abspath(__file__)) + '/login_view.ui'\n uic.loadUi(path, self)\n\n def show_warning(self, e):\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Warning)\n msg.setText(\"The server is not responding\")\n msg.setWindowTitle(\"Connection error\")\n msg.setDetailedText(str(e))\n msg.setStandardButtons(QMessageBox.Ok)\n msg.exec_()\n\n @pyqtSlot()\n def on_login_button_clicked(self):\n login_info = {\n \"address\": self.address_input.text(),\n \"port\": int(self.port_input.text()),\n \"username\": self.username_input.text()\n }\n self.login_signal.emit(login_info)\n\n\ndef run():\n APP = QtWidgets.QApplication(sys.argv)\n APP_WINDOW = LoginView()\n APP_WINDOW.exit_button.clicked.connect(sys.exit)\n APP_WINDOW.show()\n APP.exec_()\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"login_view.py","file_name":"login_view.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"563271198","text":"#!/usr/bin/env python-mr\n\nfrom collections import defaultdict\nfrom _.data.formatting.blocks import Block\n\ndef pair_map(lines):\n m = {}\n for line in lines:\n pair, insert = line.split(\" -> \")\n if pair in m:\n raise Exception(\"Duplicate pairs?\")\n m[pair] = insert\n\n return m\n\n\nLOAD = \"groups\"\ndef REWRITE(lines):\n return (lines[0][0], pair_map(lines[1]))\n\n\ndef insert(string, insertions):\n out = \"\"\n for first, second in zip(string, string[1:]):\n i = insertions[first + second]\n out = out + first + i\n\n out += string[-1]\n return out\n\n\ndef count(string):\n c = defaultdict(int)\n for s in string:\n c[s] += 1\n\n return c\n\n\ndef PART1(inputs):\n string, mapping = inputs\n print(inputs)\n\n s = string\n for _i in range(10):\n s = insert(s, mapping)\n\n c = count(s)\n return c[max(c, key=lambda x: c[x])] - c[min(c, key=lambda x: c[x])]\n\n\nclass CountingDict(defaultdict):\n def __add__(self, other):\n if type(self) != type(other):\n return NotImplemented\n\n keys = set(self.keys()).union(other.keys())\n out = CountingDict(int)\n for k in keys:\n out[k] += self.get(k, 0)\n out[k] += other.get(k, 0)\n\n return out\n\n\ndef inserts(string, mapping, count=1):\n # The string ~doubles in size every insertion pass, an iterated solution is\n # much too slow. Instead this recursive memoization is much much faster.\n\n memo = {}\n def compute_pair(p1, p2, count):\n key = (p1, p2, count)\n #print(\"compute_pair\", *key)\n\n if key in memo:\n return memo[key]\n\n c = CountingDict(int)\n if count == 0:\n c[p1] += 1\n c[p2] += 1\n else:\n i = mapping[p1 + p2]\n c = compute_pair(p1, i, count - 1) + compute_pair(i, p2, count - 1)\n # i is counted in both.\n c[i] -= 1\n\n memo[key] = c\n return c\n\n c = CountingDict(int)\n for first, second in zip(string, string[1:]):\n #print(\"zip\", first, second, c)\n c = c + compute_pair(first, second, count)\n c[second] -= 1\n\n c[string[-1]] += 1 # removed because it was the last pair\n return c\n\n\ndef PART2(inputs):\n string, mapping = inputs\n count = 40\n c = inserts(string, mapping, count=count)\n #print(\"after\", count, \"we see\", c)\n return c[max(c, key=lambda x: c[x])] - c[min(c, key=lambda x: c[x])]\n","sub_path":"2021/day-14/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"583519824","text":"import json\n\nclass Sentence(object):\n def __init__(self,strang, log):\n self.strang = strang\n self.log = log\n self.word_sequence = strang.split(\" \")\n\n def rebuild(self):\n return \" \".join(self.word_sequence)\n\n def rebuildHTML(self):\n \ts = \"\"\n \tvecs = self.word_vectors()\n \tweights = [ w['weight'] for w in vecs ]\n \twmin = min(weights)\n \twmax = max(weights)\n \tscale = wmax-wmin\n \tscale = scale if scale else 1\n \tfor w in vecs:\n \t\ts += \"\" + \\\n \t\t\t\tw['word'] + \" \"\n \treturn s\n\n\n def topics(self):\n \tTrue\n\n def word_vectors(self):\n \tvecs = []\n \tfor w in self.word_sequence:\n \t\tif w in self.log.words:\n \t\t\tvecs.append({ 'word': w, 'weight': self.log.words[w] })\n \t\telse:\n \t\t\tvecs.append({ 'word': w, 'weight': 0 })\n \treturn vecs\n\n def dict(self):\n # d = self.__dict__.copy()\n # del d['strang']\n return [ w['word'] + \" \" + str(w['weight']) for w in self.word_vectors() ]\n # return self.word_vectors()\n\n","sub_path":"sentence.py","file_name":"sentence.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"43889690","text":"'''\n作者:zlq\n日期:18-4-18\n'''\nimport pygame\n\nfrom random import *\n\npygame.init()\nclass MyPlane(pygame.sprite.Sprite):\n def __init__(self,bg_size):\n pygame.sprite.Sprite.__init__(self)\n self.image1 = pygame.image.load(\"../images/plane1.png\").convert_alpha()\n self.image2 = pygame.image.load(\"../images/plane2.png\").convert_alpha()\n self.destroy_images = [pygame.image.load(\"../images/me_destroy_1.png\").convert_alpha(),\\\n pygame.image.load(\"../images/me_destroy_2.png\").convert_alpha(),\\\n pygame.image.load(\"../images/me_destroy_3.png\").convert_alpha(),\\\n pygame.image.load(\"../images/me_destroy_4.png\").convert_alpha()]\n self.rect = self.image1.get_rect()\n self.rect.left, self.rect.top = ((bg_size.width - self.rect.width) / 2, \\\n bg_size.height - self.rect.height - 50)\n # 状态\n self.alive = True\n\n # 速度\n self.speed = 10\n # 背景大小\n self.bg_size = bg_size\n #获取非透明区域\n self.mask = pygame.mask.from_surface(self.image1)\n\n def move_up(self):\n if self.rect.top <=0:\n self.rect.top = 0\n else:\n self.rect.top -= self.speed\n\n def move_down(self):\n if self.rect.bottom >= self.bg_size.height - 50:\n self.rect.bottom = self.bg_size.height - 50\n else:\n self.rect.bottom += self.speed\n\n def move_left(self):\n if self.rect.left <= -150:\n self.rect.left = -150\n else:\n self.rect.left -= self.speed\n\n def move_right(self):\n if self.rect.right >= self.bg_size.width:\n self.rect.right = self.bg_size.width\n else:\n self.rect.right += self.speed\n\n\n","sub_path":"zhangliqiang/feijidazhan/fjdz/project/plane.py","file_name":"plane.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"353313820","text":"import gc\nimport os\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\n\n\ndef data_augmentation(image, mode):\n if mode == 0:\n # original\n return image\n elif mode == 1:\n # flip up and down\n return np.flipud(image)\n elif mode == 2:\n # rotate counterwise 90 degree\n return np.rot90(image)\n elif mode == 3:\n # rotate 90 degree and flip up and down\n image = np.rot90(image)\n return np.flipud(image)\n elif mode == 4:\n # rotate 180 degree\n return np.rot90(image, k=2)\n elif mode == 5:\n # rotate 180 degree and flip\n image = np.rot90(image, k=2)\n return np.flipud(image)\n elif mode == 6:\n # rotate 270 degree\n return np.rot90(image, k=3)\n elif mode == 7:\n # rotate 270 degree and flip\n image = np.rot90(image, k=3)\n return np.flipud(image)\n\n\nclass train_data():\n def __init__(self, filepath='./data/image_clean_pat.npy'):\n self.filepath = filepath\n assert '.npy' in filepath\n if not os.path.exists(filepath):\n print(\"[!] Data file not exists\")\n sys.exit(1)\n\n def __enter__(self):\n print(\"[*] Loading data...\")\n self.data = np.load(self.filepath)\n np.random.shuffle(self.data)\n print(\"[*] Load successfully...\")\n return self.data\n\n def __exit__(self, type, value, trace):\n del self.data\n gc.collect()\n print(\"In __exit__()\")\n\n\ndef load_data(filepath='./data/image_clean_pat.npy'):\n return train_data(filepath=filepath)\n\n\ndef load_images(filelist):\n # pixel value range 0-255\n if not isinstance(filelist, list):\n im = Image.open(filelist).convert('L')\n return np.array(im).reshape(1, im.size[1], im.size[0], 1)\n data = []\n for file in filelist:\n im = Image.open(file).convert('L')\n data.append(np.array(im).reshape(1, im.size[1], im.size[0], 1))\n return data\n\ndef load_images_RGB(filelist):\n # pixel value range 0-255\n if not isinstance(filelist, list):\n im = Image.open(filelist).convert('RGB')\n return np.array(im).reshape(1, im.size[1], im.size[0], 3)\n data = []\n for file in filelist:\n im = Image.open(file).convert('RGB')\n data.append(np.array(im).reshape(1, im.size[1], im.size[0], 3))\n return data\n\ndef save_images(filepath, ground_truth, noisy_image=None, clean_image=None):\n # assert the pixel value range is 0-255\n ground_truth = np.squeeze(ground_truth)\n noisy_image = np.squeeze(noisy_image)\n clean_image = np.squeeze(clean_image)\n if not clean_image.any():\n cat_image = ground_truth\n else:\n cat_image = np.concatenate([ground_truth, noisy_image, clean_image], axis=1)\n im = Image.fromarray(cat_image.astype('uint8')).convert('L')\n im.save(filepath, 'png')\n\ndef save_images_RGB(filepath, ground_truth, noisy_image=None, clean_image=None):\n # assert the pixel value range is 0-255\n ground_truth = np.squeeze(ground_truth)\n noisy_image = np.squeeze(noisy_image)\n clean_image = np.squeeze(clean_image)\n if not clean_image.any():\n cat_image = ground_truth\n else:\n cat_image = np.concatenate([ground_truth, noisy_image, clean_image], axis=1)\n im = Image.fromarray(cat_image.astype('uint8')).convert('RGB')\n im.save(filepath, 'png')\n\n\ndef save_images1(filepath, ground_truth, noisy_image=None, clean_image=None):\n # assert the pixel value range is 0-255\n print(np.shape(ground_truth), np.shape(noisy_image), np.shape(clean_image))\n ground_truth = np.squeeze(ground_truth)\n noisy_image = np.squeeze(noisy_image)\n clean_image = np.squeeze(clean_image)\n\n\n\ndef cal_psnr(im1, im2):\n # assert pixel value range is 0-255 and type is uint8\n mse = ((im1.astype(np.float) - im2.astype(np.float)) ** 2).mean()\n psnr = 10 * np.log10(255 ** 2 / mse)\n return psnr\n\n\ndef tf_psnr(im1, im2):\n # assert pixel value range is 0-1\n mse = tf.losses.mean_squared_error(labels=im2 * 255.0, predictions=im1 * 255.0)\n return 10.0 * (tf.log(255.0 ** 2 / mse) / tf.log(10.0))\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"71420322","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport pdb\nimport traceback\nfrom typing import Dict, List, Set, Tuple\n\n\ndef unique_planets(orbits: List[Tuple[str, str]]) -> Set[str]:\n \"\"\"Get the unique list of planets from all of the orbits.\"\"\"\n planets: Set[str] = set()\n planets.update([x[0] for x in orbits])\n planets.update([x[1] for x in orbits])\n return planets\n\n\ndef to_dot(orbits: List[Tuple[str, str]]) -> None:\n print(\"digraph {\")\n\n planets = unique_planets(orbits)\n planet_nodes: Dict[str, str] = {}\n for planet in planets:\n node = f\"node_{planet}\"\n planet_nodes[planet] = node\n print(f' {node} [label = \"{planet}\"];')\n print(\"\")\n\n for planet, child in orbits:\n print(f\" {planet_nodes[planet]} -> {planet_nodes[child]};\")\n\n print(\"}\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Advent of Code - 2019 - Day 6 - Universal Orbit Map - To Dot.\"\n )\n parser.add_argument(\n \"input\",\n type=str,\n default=\"input.txt\",\n nargs=\"?\",\n help=\"The puzzle input. (Default %(default)s)\",\n )\n args = parser.parse_args()\n\n orbits: List[Tuple[str, str]] = []\n with open(args.input) as inf:\n for line in inf:\n orbits.append(tuple(line.strip().split(\")\"))) # type: ignore\n\n try:\n to_dot(orbits)\n except Exception:\n traceback.print_exc()\n pdb.post_mortem()\n","sub_path":"2019/06-universal_orbit/to_dot.py","file_name":"to_dot.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"360995173","text":"def get_related_docs_vec(tag_lst,num_words,tag_inv_idx):\n new = np.zeros(num_words)\n for tag in tag_lst:\n if tag in tag_inv_idx:\n new+=tag_inv_idx[tag]\n return new\n\ndef input_vec(word_to_int_dict):\n vec = np.zeros(td_mat.shape[1])\n for w in keywords:\n if w in word_to_int_dict:\n vec[word_to_int_dict[w]] = 1\n return vec\n\ndef rochio(original_input, list_of_liked_hashtags, word_to_int_dict, tag_inv_idx):\n a=.8\n b=.2\n num_words = len(word_to_int_dict)\n original_query_vec = input_vec(original_query.split(' '))\n good_docs_sum_vec = get_related_docs_vec(list_of_liked_hashtags,num_words, tag_inv_idx)\n newQ = a*original_query_vec+ b*good_docs_sum_vec\n\n return newQ #in the flask you will feed this back to the input_to_tags func\n","sub_path":"app/irsystem/models/rocchio.py","file_name":"rocchio.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"224913787","text":"# coding:utf-8\nimport unittest\n\nimport urllib.request\n\n__author__ = \"Charles François Rey\"\n__copyright__ = \"Copyright (c) 2017 Charles François Rey\"\n__license__ = \"MIT\"\n__version__ = \"0.1.0\"\n__status__ = \"Prototype\"\n\n\nclass TestTemplate(unittest.TestCase):\n __point_table = ([0] + ([255] * 255))\n\n def __init__(self, **kwargs):\n super(TestTemplate, self).__init__('test_compare_domains')\n self._test_kwargs = kwargs\n\n @staticmethod\n def get(url):\n return urllib.request.urlopen(url).read()\n\n def test_compare_domains(self):\n domains = self._test_kwargs['domains']\n if len(domains) != 2:\n raise self.failureException('there must be 2 domains')\n\n path = self._test_kwargs['path']\n\n url1 = domains[0] + path\n url2 = domains[1] + path\n\n res1 = self.get(url1)\n res2 = self.get(url2)\n\n self.assertEqual(res1, res2, msg='%s and %s are different' % (url1, url2))\n\nif __name__ == '__main__':\n suite = unittest.TestSuite()\n\n suite.addTest(TestTemplate(domains=('http://example.com', 'http://www.example.com'), path='/'))\n # ... call addTest(TestTemplate(...)) with as many paths as needed ...\n\n unittest.TextTestRunner().run(suite)\n","sub_path":"compare_websites.py","file_name":"compare_websites.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"531728637","text":"\n\nfrom xai.brain.wordbase.nouns._scream import _SCREAM\n\n#calss header\nclass _SCREAMING(_SCREAM, ):\n\tdef __init__(self,): \n\t\t_SCREAM.__init__(self)\n\t\tself.name = \"SCREAMING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"scream\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_screaming.py","file_name":"_screaming.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"388006470","text":"from odoo import api, fields, models, _\nfrom datetime import datetime, timedelta\nfrom odoo.exceptions import UserError\nimport logging\n_logger = logging.getLogger(__name__)\n\nclass SalesCostRevenue(models.Model):\n _inherit='sale.order'\n\n \n branch_id = fields.Many2one('branches.cost.center', string=\"Branch\", related='analytic_account_id.branch_id')\n company_name = fields.Char(related=\"company_id.name\", string=\"Company\")\n is_associated_with_project = fields.Boolean(string=\"Is associated with a project?\")\n project_id = fields.Many2one('project.project',string=\"Project\")\n \n\n @api.multi\n def _prepare_invoice(self):\n res = super(SalesCostRevenue, self)._prepare_invoice()\n res.update({'analytic_account_id': self.analytic_account_id.id})\n return res\n\n @api.onchange('template_id')\n def onchange_template_id(self):\n if not self.template_id:\n return\n template = self.template_id.with_context(lang=self.partner_id.lang)\n\n order_lines = [(5, 0, 0)]\n for line in template.quote_line:\n discount = 0\n if self.pricelist_id:\n price = self.pricelist_id.with_context(uom=line.product_uom_id.id).get_product_price(line.product_id, 1, False)\n if self.pricelist_id.discount_policy == 'without_discount' and line.price_unit:\n discount = (line.price_unit - price) / line.price_unit * 100\n price = line.price_unit\n\n else:\n price = line.price_unit\n\n data = {\n 'name': line.name,\n 'date_of_pickup': line.date_of_pickup,\n 'shift': line.shift,\n 'delivery_date': line.delivery_date,\n 'department_id': line.department_id.id,\n 'price_unit': price,\n 'discount': 100 - ((100 - discount) * (100 - line.discount)/100),\n 'product_uom_qty': line.product_uom_qty,\n 'product_id': line.product_id.id,\n 'layout_category_id': line.layout_category_id,\n 'product_uom': line.product_uom_id.id,\n 'website_description': line.website_description,\n 'state': 'draft',\n 'customer_lead': self._get_customer_lead(line.product_id.product_tmpl_id),\n }\n if self.pricelist_id:\n data.update(self.env['sale.order.line']._get_purchase_price(self.pricelist_id, line.product_id, line.product_uom_id, fields.Date.context_today(self)))\n order_lines.append((0, 0, data))\n\n self.order_line = order_lines\n self.order_line._compute_tax_id()\n\n option_lines = []\n for option in template.options:\n if self.pricelist_id:\n price = self.pricelist_id.with_context(uom=option.uom_id.id).get_product_price(option.product_id, 1, False)\n else:\n price = option.price_unit\n data = {\n\n 'product_id': option.product_id.id,\n 'layout_category_id': option.layout_category_id,\n 'name': option.name,\n 'quantity': option.quantity,\n 'uom_id': option.uom_id.id,\n 'price_unit': price,\n 'discount': option.discount,\n 'website_description': option.website_description,\n }\n option_lines.append((0, 0, data))\n self.options = option_lines\n\n if template.number_of_days > 0:\n self.validity_date = fields.Date.to_string(datetime.now() + timedelta(template.number_of_days))\n\n self.website_description = template.website_description\n self.require_payment = template.require_payment\n\n if template.note:\n self.note = template.note\n\nclass SalesOrderLineAquion(models.Model):\n _inherit='sale.order.line'\n\n date_of_pickup = fields.Date(string=\"Date of Pickup\", store=True)\n shift = fields.Selection(selection=[\n ('AM', 'AM'),\n ('PM', 'PM')\n ],string=\"Shift\", store=True)\n delivery_date = fields.Date(String=\"Delivery Date\", store=True)\n department_id = fields.Many2one('aquion.department', string=\"Department\", store=True)\n\n @api.multi\n def _prepare_invoice_line(self, qty):\n res = super(SalesOrderLineAquion, self)._prepare_invoice_line(qty)\n res.update({'date_of_pickup': self.date_of_pickup,\n 'shift': self.shift,\n 'delivery_date': self.delivery_date,\n 'department_id': self.department_id.id})\n return res\n\n \n\nclass QuoteTemplateAquion(models.Model):\n _inherit='sale.quote.template'\n\n delivery_date = fields.Date(string=\"Delivery Date\")\n date_of_pickup = fields.Date(string=\"Date of Pickup\")\n shift = fields.Selection(selection=[\n ('AM', 'AM'),\n ('PM', 'PM')\n ],string=\"Shift\")\n\nclass QuoteTemplateLine(models.Model):\n _inherit='sale.quote.line'\n\n delivery_date = fields.Date(string=\"Delivery Date\")\n department_id = fields.Many2one('aquion.department', string=\"Department\")\n date_of_pickup = fields.Date(string=\"Date of Pickup\")\n shift = fields.Selection(selection=[\n ('AM', 'AM'),\n ('PM', 'PM')\n ],string=\"Shift\")\n layout_category_id = fields.Many2one('sale.layout_category', string=\"Section\", compute='_create_section')\n\n @api.depends('date_of_pickup', 'shift')\n def _create_section(self):\n for i in self:\n if i.date_of_pickup and i.shift:\n section_shift_exists = self.env['sale.layout_category'].search([('name', '=', str(i.date_of_pickup) + ' ' + i.shift)])\n if section_shift_exists:\n i.layout_category_id = section_shift_exists.id\n else:\n section_shift = self.env['sale.layout_category'].create({'name': str(i.date_of_pickup) + ' ' + i.shift})\n i.layout_category_id = section_shift.id\n\n\nclass AquionDepartment(models.Model):\n _name='aquion.department'\n\n name = fields.Char(string=\"Name\", required=True)\n\nclass SalesInvoiceCostRevenue(models.Model):\n _inherit='account.invoice'\n\n analytic_account_id = fields.Many2one('account.analytic.account', string=\"Analytic Account\")\n branch_id = fields.Many2one('branches.cost.center', string=\"Branch\", related='analytic_account_id.branch_id')\n company_name = fields.Char(related=\"company_id.name\", string=\"Company\")\n is_royalty_fee = fields.Boolean()\n received_by = fields.Char(string=\"Received By\")\n\n @api.multi\n def _get_dr_report_name(self):\n self.ensure_one()\n return self.type == 'out_invoice' and self.state == 'draft' and _('Delivery Receipt') or \\\n self.type == 'out_invoice' and self.state in ('open','paid') and _('Delivery Receipt - %s') % (self.number)\n \n @api.multi\n def _get_service_invoice(self):\n self.ensure_one()\n return self.type == 'out_invoice' and self.state == 'draft' and _('Service Invoice') or \\\n self.type == 'out_invoice' and self.state in ('open','paid') and _('Service Invoice - %s') % (self.number)\n \n \n\n\n @api.onchange('purchase_id')\n def purchase_order_change(self):\n if not self.purchase_id:\n return {}\n if not self.partner_id:\n self.partner_id = self.purchase_id.partner_id.id\n self.analytic_account_id = self.purchase_id.account_analytic_id.id\n\n new_lines = self.env['account.invoice.line']\n for line in self.purchase_id.order_line - self.invoice_line_ids.mapped('purchase_line_id'):\n data = self._prepare_invoice_line_from_po_line(line)\n new_line = new_lines.new(data)\n new_line._set_additional_fields(self)\n new_lines += new_line\n\n self.invoice_line_ids += new_lines\n self.payment_term_id = self.purchase_id.payment_term_id\n self.env.context = dict(self.env.context, from_purchase_order_change=True)\n self.purchase_id = False\n return {}\n\nclass InvoiceRoyaltyFee(models.Model):\n _inherit='account.invoice.line'\n\n month = fields.Many2one('royalty.fee.month', string=\"Month\")\n date_of_pickup = fields.Date(string=\"Date of Pickup\")\n shift = fields.Selection(selection=[\n ('AM', 'AM'),\n ('PM', 'PM')\n ],string=\"Shift\")\n delivery_date = fields.Date(String=\"Delivery Date\")\n department_id = fields.Many2one('aquion.department', string=\"Department\")\n company_name = fields.Char(related=\"company_id.name\", string=\"Company\")\n\nclass ProductAnalyticAccount(models.Model):\n _inherit='product.template'\n\n analytic_account_id = fields.Many2one('account.analytic.account', string=\"Analytic Account\", domain=\"[('company_id', '=', company_id)]\")\n company_name = fields.Char(related=\"company_id.name\", string=\"Company\")","sub_path":"suds/.history/models/sales_20190923155232.py","file_name":"sales_20190923155232.py","file_ext":"py","file_size_in_byte":8861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"349433968","text":"import numpy as np\nimport unittest\nimport similaritymeasures\nfrom scipy.spatial.distance import cdist\n\n# let's just define some data\nx1 = np.linspace(0.0, 1.0, 100)\ny1 = np.ones(100)*2\nx2 = np.linspace(0.0, 1.0, 50)\ny2 = np.ones(50)\n\ncurve1 = np.array((x1, y1)).T\ncurve2 = np.array((x2, y2)).T\n\nx1 = np.linspace(0.0, 1.0, 100)\ny1 = x1\nx2 = np.linspace(0.0, 1.0, 50)\ny2 = x2+1.0\n\ncurve3 = np.array((x1, y1)).T\ncurve4 = np.array((x2, y2)).T\n\nP = np.array([[0, 0], [1, 1], [2, 2]])\nQ = P.copy()\nQ[:, 1] = Q[:, 1] + 1\n\nr1 = 10\nr2 = 100\ntheta = np.linspace(0.0, 2.0*np.pi, 100)\nx1 = np.cos(theta)*r1\nx2 = np.cos(theta)*r2\ny1 = np.sin(theta)*r1\ny2 = np.sin(theta)*r2\ncurve5 = np.array((x1, y1)).T\ncurve6 = np.array((x2, y2)).T\n\n\nclass TestEverything(unittest.TestCase):\n\n def test_c1_c2_area(self):\n area = similaritymeasures.area_between_two_curves(curve1, curve2)\n self.assertTrue(area, 1.0)\n\n def test_c3_c4_area(self):\n area = similaritymeasures.area_between_two_curves(curve3, curve4)\n self.assertTrue(area, 1.0)\n\n def test_c1_c2_pcm(self):\n pcm = similaritymeasures.pcm(curve1, curve2)\n self.assertTrue(pcm, np.nan)\n\n def test_c3_c4_pcm(self):\n pcm = similaritymeasures.pcm(curve3, curve4)\n self.assertTrue(pcm, 50.0)\n\n def test_c1_c2_df(self):\n df = similaritymeasures.frechet_dist(curve1, curve2)\n self.assertTrue(df, 1.0)\n\n def test_c3_c4_df(self):\n df = similaritymeasures.frechet_dist(curve3, curve4)\n self.assertTrue(df, 1.0)\n\n def test_c1_c2_cl(self):\n cl = similaritymeasures.curve_length_measure(curve1, curve2)\n self.assertTrue(cl, 4.054651081081643)\n\n def test_c3_c4_cl(self):\n cl = similaritymeasures.curve_length_measure(curve3, curve4)\n self.assertTrue(cl, 10.986122886681098)\n\n def test_P_Q_dtw(self):\n r, _ = similaritymeasures.dtw(P, Q)\n self.assertTrue(r, 3.0)\n\n def test_c5_c6_dtw(self):\n r, _ = similaritymeasures.dtw(curve5, curve6)\n self.assertTrue(np.isclose(r, 9000.0))\n\n def test_c5_c6_df(self):\n df = similaritymeasures.frechet_dist(curve5, curve6)\n self.assertTrue(np.isclose(df, 90.0))\n\n def test_P_Q_dtw_path(self):\n r, d = similaritymeasures.dtw(P, Q)\n path = similaritymeasures.dtw_path(d)\n c = cdist(P, Q)\n cost = sum(c[path[:, 0], path[:, 1]])\n self.assertTrue(np.isclose(r, cost))\n\n def test_c5_c6_dtw_path(self):\n r, d = similaritymeasures.dtw(curve5, curve6)\n path = similaritymeasures.dtw_path(d)\n c = cdist(curve5, curve6)\n cost = sum(c[path[:, 0], path[:, 1]])\n self.assertTrue(np.isclose(r, cost))\n\n def test_P_Q_dtw_cityblock(self):\n r, _ = similaritymeasures.dtw(P, Q, metric='cityblock')\n self.assertTrue(r, 3.0)\n\n def test_P_Q_dtw_minkowski_p1(self):\n r, _ = similaritymeasures.dtw(P, Q, metric='minkowski', p=1)\n self.assertTrue(r, 3.0)\n\n def test_P_Q_dtw_minkowski_p3(self):\n r, _ = similaritymeasures.dtw(P, Q, metric='minkowski', p=3)\n self.assertTrue(r, 3.0)\n\n\nif __name__ == '__main__':\n\n unittest.main()\n","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"292587966","text":"#############装饰器的妙用###############\ndef print_run_time(func):\n def new_func(*args, **kwargs):\n import datetime\n before = datetime.datetime.now()\n print(\"%s start at {}\".format(before)%func.__name__)\n x = func(*args, **kwargs)\n after = datetime.datetime.now()\n print(\"Elapsed Time = {}\".format(after-before))\n return x\n return new_func \n\n\n#@print_run_time\n\n###########################\n##############显示实时图片#################\nimport cv2\nimport numpy as np\n\n\ndef prep_image(img, inp_dim):\n \"\"\"\n Prepare image for inputting to the neural network. \n 预处理图片\n Returns a Variable \n \"\"\"\n\n orig_im = img\n dim = orig_im.shape[1], orig_im.shape[0] #shape->(800,645,3)\n img = cv2.resize(orig_im, (inp_dim, inp_dim))#\n img_ = img[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim\n\ndef write(x, img):\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n\n label = \"{0}\".format(classes[cls])\n color = random.choice(colors)\n cv2.rectangle(img, c1, c2,color, 1)\n t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]\n c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4\n cv2.rectangle(img, c1, c2,color, -1)\n cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);\n return img\n\n\n\nimport torch \nimport random \nfrom util import *\nimport pickle as pkl\nimport time\n\nclasses = load_classes('D:\\extend_source\\pytorch\\pytorch-yolo-v3-master\\data\\coco.names')\ncolors = pkl.load(open(\"D:\\extend_source\\pytorch\\pytorch-yolo-v3-master\\pallete\", \"rb\"))\n \n #选择摄像头\ncap = cv2.VideoCapture(0)\nassert cap.isOpened(), 'Cannot capture source'\n\n\nwhile cap.isOpened():\n\n\n start = time.time()\n\n # get a frame\n ret, frame = cap.read()\n # show a frame\n if ret == 0:\n break\n\n img, orig_im, dim = prep_image(frame, 225)#orig_im用来显示,img用来infer\n\n \n #output = model(Variable(img), CUDA)#把模型写在这里\n #output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)#\n #output[:,1:5] = torch.clamp(output[:,1:5], 0.0, float(inp_dim))/inp_dim\n \n# im_dim = im_dim.repeat(output.size(0), 1)\n #output[:,[1,3]] *= frame.shape[1]\n #output[:,[2,4]] *= frame.shape[0]\n #list(map(lambda x: write(x, orig_im), output))#多个目标检测\n a = torch.Tensor([ 0.0000, 55.8204, 136.4974, 622.0961, 477.4839, 0.9925, 0.0000, 0.0000])\n #此处添加如何处理frame\n orig_im=write(a,orig_im)\n\n cv2.imshow(\"test\", orig_im)\n\n\n\n\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n time.sleep(0.01)\n end = time.time()\n second = end-start\n fps = 1/second\n print(\"FPS of the video is {0}\".format(fps))\n\n\ncap.release()\ncv2.destroyAllWindows()\n\n############################################################\n\n#############################################\n\n","sub_path":"常用代码.py","file_name":"常用代码.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"286061212","text":"\"\"\"\nImplementation of Harris Detector\n\nReference:\nC. Harris and M. Stephens, “A Combined Corner and Edge Detector,”\nin Proceedings of Alvey Vision Conference 1988, Manchester, 1988, pp. 23.1-23.6.\n\"\"\"\n\nimport cv2 as cv\nimport numpy as np\nfrom scipy.signal import convolve2d\n\n# Read original image and get gaussian kernel\nimg = cv.imread('example.png')\nimg_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\nG = cv.getGaussianKernel(3, 1)\n\n# Begin Harris Detector Computation\n\nI = img_gray.astype('float32')\nX = convolve2d(I, [[-1, 0, 1]], mode='same')\nY = convolve2d(I, [[-1], [0], [1]], mode='same')\nA = convolve2d(X*X, G, mode='same')\nB = convolve2d(Y*Y, G, mode='same')\nC = convolve2d(X*Y, G, mode='same')\nR = A*B - C*C - 0.04 * (A + B)\n\n# Finished Harris Detector Computation\n\n# Classify all points in R and show corners in image\nmaxima = np.max(R)\nimg[R > maxima*0.1] = [255, 0, 0]\ncv.imshow('harris_detector', img)\ncv.waitKey(0)\n","sub_path":"digital_image_processing/feature_detectors/harris.py","file_name":"harris.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"257625903","text":"from ast import NodeTransformer\nfrom types import ClassType\nfrom compiler import parse as ast24_parse\n\nfrom OFS.interfaces import ITraversable\nfrom zExceptions import NotFound, Unauthorized\n\nfrom zope.component import queryMultiAdapter\nfrom zope.contentprovider.interfaces import IContentProvider\nfrom zope.contentprovider.interfaces import ContentProviderLookupError\nfrom zope.contentprovider.tales import addTALNamespaceData\n\ntry:\n from zope.contentprovider.interfaces import BeforeUpdateEvent\nexcept ImportError:\n BeforeUpdateEvent = None\n\nfrom zope.event import notify\nfrom zope.location.interfaces import ILocation\nfrom zope.traversing.adapters import traversePathElement\nfrom zope.traversing.interfaces import TraversalError\n\nfrom RestrictedPython.RestrictionMutator import RestrictionMutator\nfrom RestrictedPython.Utilities import utility_builtins\nfrom RestrictedPython import MutatingWalker\n\nfrom Products.PageTemplates.Expressions import render\n\nfrom AccessControl.ZopeGuards import guarded_getattr\nfrom AccessControl.ZopeGuards import guarded_getitem\nfrom AccessControl.ZopeGuards import guarded_apply\nfrom AccessControl.ZopeGuards import guarded_iter\nfrom AccessControl.ZopeGuards import protected_inplacevar\n\nfrom chameleon.astutil import Symbol\nfrom chameleon.astutil import Static\nfrom chameleon.codegen import template\nfrom sourcecodegen import generate_code\n\nfrom z3c.pt import expressions\n\n_marker = object()\n\ntry:\n # If this import succeeds, we need to use a content provider\n # renderer which acquisition-wraps the provider component\n from Products.Five.browser import providerexpression\n providerexpression\n\nexcept ImportError:\n ProviderExpr = expressions.ProviderExpr\nelse:\n def render_content_provider(econtext, name):\n name = name.strip()\n\n context = econtext.get('context')\n request = econtext.get('request')\n view = econtext.get('view')\n\n cp = queryMultiAdapter(\n (context, request, view), IContentProvider, name=name)\n\n # provide a useful error message, if the provider was not found.\n if cp is None:\n raise ContentProviderLookupError(name)\n\n # add the __name__ attribute if it implements ILocation\n if ILocation.providedBy(cp):\n cp.__name__ = name\n\n # Insert the data gotten from the context\n addTALNamespaceData(cp, econtext)\n\n # BBB: This is where we're different:\n if getattr(cp, '__of__', None) is not None:\n cp = cp.__of__(context)\n\n # Stage 1: Do the state update.\n if BeforeUpdateEvent is not None:\n notify(BeforeUpdateEvent(cp, request))\n cp.update()\n\n # Stage 2: Render the HTML content.\n return cp.render()\n\n class ProviderExpr(expressions.ProviderExpr):\n transform = Symbol(render_content_provider)\n\n\nzope2_exceptions = NameError, \\\n ValueError, \\\n AttributeError, \\\n LookupError, \\\n TypeError, \\\n NotFound, \\\n Unauthorized, \\\n TraversalError\n\n\ndef static(obj):\n return Static(template(\"obj\", obj=Symbol(obj), mode=\"eval\"))\n\n\nclass BoboAwareZopeTraverse(object):\n traverse_method = 'restrictedTraverse'\n\n __slots__ = ()\n\n @classmethod\n def traverse(cls, base, request, path_items):\n \"\"\"See ``zope.app.pagetemplate.engine``.\"\"\"\n\n length = len(path_items)\n if length:\n i = 0\n method = cls.traverse_method\n while i < length:\n name = path_items[i]\n i += 1\n\n if ITraversable.providedBy(base):\n traverser = getattr(base, method)\n base = traverser(name)\n else:\n base = traversePathElement(\n base, name, path_items[i:], request=request\n )\n\n return base\n\n def __call__(self, base, econtext, call, path_items):\n request = econtext.get('request')\n\n if path_items:\n base = self.traverse(base, request, path_items)\n\n if call is False:\n return base\n\n if getattr(base, '__call__', _marker) is not _marker or callable(base):\n base = render(base, econtext)\n\n return base\n\n\nclass TrustedBoboAwareZopeTraverse(BoboAwareZopeTraverse):\n traverse_method = 'unrestrictedTraverse'\n\n __slots__ = ()\n\n def __call__(self, base, econtext, call, path_items):\n request = econtext.get('request')\n\n base = self.traverse(base, request, path_items)\n\n if call is False:\n return base\n\n if (getattr(base, '__call__', _marker) is not _marker \\\n or isinstance(base, ClassType)):\n return base()\n\n return base\n\n\nclass PathExpr(expressions.PathExpr):\n exceptions = zope2_exceptions\n\n traverser = Static(template(\n \"cls()\", cls=Symbol(BoboAwareZopeTraverse), mode=\"eval\"\n ))\n\n\nclass TrustedPathExpr(PathExpr):\n traverser = Static(template(\n \"cls()\", cls=Symbol(TrustedBoboAwareZopeTraverse), mode=\"eval\"\n ))\n\n\nclass NocallExpr(expressions.NocallExpr, PathExpr):\n pass\n\n\nclass ExistsExpr(expressions.ExistsExpr):\n exceptions = zope2_exceptions\n\n\nclass RestrictionTransform(NodeTransformer):\n secured = {\n '_getattr_': guarded_getattr,\n '_getitem_': guarded_getitem,\n '_apply_': guarded_apply,\n '_getiter_': guarded_iter,\n '_inplacevar_': protected_inplacevar,\n }\n\n def visit_Name(self, node):\n value = self.secured.get(node.id)\n if value is not None:\n return Symbol(value)\n\n return node\n\n\nclass UntrustedPythonExpr(expressions.PythonExpr):\n rm = RestrictionMutator()\n rt = RestrictionTransform()\n\n # Make copy of parent expression builtins\n builtins = expressions.PythonExpr.builtins.copy()\n\n # Update builtins with Restricted Python utility builtins\n builtins.update(dict(\n (name, static(builtin)) for (name, builtin) in utility_builtins.items()\n ))\n\n def rewrite(self, node):\n if node.id == 'repeat':\n node.id = 'wrapped_repeat'\n else:\n node = super(UntrustedPythonExpr, self).rewrite(node)\n\n return node\n\n def parse(self, string):\n encoded = string.encode('utf-8')\n node = ast24_parse(encoded, 'eval').node\n MutatingWalker.walk(node, self.rm)\n string = generate_code(node)\n decoded = string.decode('utf-8')\n value = super(UntrustedPythonExpr, self).parse(decoded)\n\n # Run restricted python transform\n self.rt.visit(value)\n\n return value\n","sub_path":"buildout-cache--/eggs/five.pt-2.2.2-py2.7.egg/five/pt/expressions.py","file_name":"expressions.py","file_ext":"py","file_size_in_byte":6725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"550967797","text":"name=input(\"Please enter your name: \")\nproduct=input(\"Please enter product you want to buy: \")\nunitprice = input(\"Please enter unit price of product: \")\ntotalMoney = input(\"Please enter how much money you have: \")\nquantity = int(totalMoney)//int(unitprice)\nmoney_left=int(totalMoney)%int(unitprice)\n\nprint(\"Hi {}.Your product is {}.\"\n \"Price of each product is {}.\"\n \"You have total money {}.\"\n \"Hence you can bring {} number of product {}.You will be left with money {} \".format(name,product,unitprice,totalMoney,quantity,product,money_left))","sub_path":"string_formating.py","file_name":"string_formating.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"358562721","text":"import bpy\r\nfrom bpy.props import BoolProperty, FloatProperty\r\n\r\n\r\nclass HOPS_OT_BevelMultiplier(bpy.types.Operator):\r\n bl_idname = \"view3d.bevel_multiplier\"\r\n bl_label = \"Hops Bevel Multiplier\"\r\n bl_options = {'REGISTER', 'UNDO'}\r\n bl_description = \"Multiplies / Divides bevel width of selected objects\"\r\n\r\n multiply: BoolProperty(name=\"Multiply/Divide\",\r\n description=\"multiply or devide bevel value for modifier\",\r\n default=True)\r\n\r\n multiply_amount: FloatProperty(name=\"Multiply by\", description=\"Multiply by\", default=2.0, min=0.0)\r\n divide_amount: FloatProperty(name=\"Divide by\", description=\"Divide by\", default=2.0, min=0.1)\r\n\r\n use_active: BoolProperty(name=\"Unify Bevel To Active\",\r\n description=\"Unify bevel value for modifiers To Active\",\r\n default=False)\r\n\r\n active_value: FloatProperty(name=\"Active Bevel Value\", description=\"active object bevel value\", default=0,)\r\n\r\n @classmethod\r\n def poll(cls, context):\r\n object = context.active_object\r\n if object is None: return False\r\n return object.type == \"MESH\" and object.mode == \"OBJECT\"\r\n\r\n def draw(self, context):\r\n layout = self.layout\r\n box = layout.box()\r\n box.prop(self, \"multiply\")\r\n box = layout.box()\r\n box.prop(self, \"multiply_amount\")\r\n box.prop(self, \"divide_amount\")\r\n box = layout.box()\r\n box.prop(self, \"use_active\")\r\n\r\n def execute(self, context):\r\n\r\n object = bpy.context.active_object\r\n for modifieractive in object.modifiers:\r\n if modifieractive.type == \"BEVEL\":\r\n self.active_value = modifieractive.width\r\n\r\n for obj in bpy.context.selected_objects:\r\n for modifier in obj.modifiers:\r\n if modifier.type == \"BEVEL\":\r\n if self.use_active:\r\n if self.multiply:\r\n modifier.width = self.active_value * self.multiply_amount\r\n else:\r\n modifier.width = self.active_value / self.divide_amount\r\n else:\r\n if self.multiply:\r\n modifier.width = modifier.width * self.multiply_amount\r\n else:\r\n modifier.width = modifier.width / self.divide_amount\r\n return {'FINISHED'}\r\n","sub_path":"All_In_One/addons/HOps/operators/misc/bevel_multiplier.py","file_name":"bevel_multiplier.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"67429123","text":"import os,sys\r\n\r\n\r\ndef main():\r\n file_path = 'C:\\\\Users\\\\User_2\\\\Desktop\\\\lista.txt'\r\n folder_path = 'C:\\\\Users\\\\User_2\\\\Desktop\\\\testni_folder\\\\'\r\n\r\n file_list = [] # in a form of a tuple (name, absolute_path)\r\n for dirName, subdirList, fileList in os.walk(folder_path):\r\n for fname in fileList:\r\n path = os.path.join(dirName,fname)\r\n name = fname\r\n file_list.append((name,path))\r\n\r\n textfile = open(file_path, 'r')\r\n filetext = textfile.read()\r\n textfile.close()\r\n\r\n # list of all files in a given folder:\r\n # file_list\r\n # list of files in a \"list.txt\":\r\n txt_list = []\r\n for file_name in filetext.split():\r\n txt_list.append(file_name)\r\n txt_list = list(set(txt_list))\r\n\r\n result = []\r\n # txt_list <------> file_list\r\n for t in txt_list:\r\n dont_copy = False\r\n for f in file_list:\r\n if f[0] == t:\r\n dont_copy = True\r\n break\r\n\r\n\r\n\r\ndef remove_extension(path):\r\n dot_found = False\r\n new_path = ''\r\n for ch in path:\r\n if ch != '.':\r\n new_path.append(ch)\r\n else:\r\n break\r\n return new_path\r\n\r\nif __name__ == \"__main__\":\r\n main() \r\n","sub_path":"remove_listed_files.py","file_name":"remove_listed_files.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"401170890","text":"from ....tools.decorators import metric\n\nimport anndata\nimport numpy as np\nimport scanpy as sc\nimport sklearn.metrics\n\n\n@metric(metric_name=\"Mean-squared error\", maximize=False)\ndef mse(adata):\n\n test_data = anndata.AnnData(X=adata.obsm[\"test\"], obs=adata.obs, var=adata.var)\n denoised_data = anndata.AnnData(\n X=adata.obsm[\"denoised\"], obs=adata.obs, var=adata.var\n )\n\n # scaling and transformation\n target_sum = np.median(test_data.X.sum(axis=1))\n\n sc.pp.normalize_total(test_data, target_sum)\n sc.pp.log1p(test_data)\n\n sc.pp.normalize_total(denoised_data, target_sum)\n sc.pp.log1p(denoised_data)\n\n error = sklearn.metrics.mean_squared_error(test_data.X, denoised_data.X)\n return error\n","sub_path":"openproblems/tasks/denoising/metrics/mse.py","file_name":"mse.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"572748320","text":"## Author: Krzysztof Oblak\n## Date: 28/11/2014\n\nfrom random import randrange\nfrom flask import *\n##from threading import Thread\nfrom itertools import islice\nimport linecache\nimport time\nimport natsort\n\napp = Flask(__name__)\na = \"words.txt\"\nb = \"containers.txt\"\nc = \"score.txt\"\nbaseWord = \"\"\ninputList = [\"\"]*7\nflagList = [\"\"]*7\nprocessedList = []\nresultList = [False]*7\nindex = 0\nvalid = True\nstartTime = -1.0\nendTime = -2.0\nresultTime = -3.0\n\ndef getHighScore():\n temp = []\n with open(c, 'r') as infile:\n lines_gen = islice(infile, 10)\n for line in lines_gen:\n temp.append(line)\n return temp\n\ndef fileLength():\n with open(b) as file:\n for i, l in enumerate(file):\n pass\n return i + 1\n\ndef getWord():\n lineNum = randrange(fileLength())\n source = linecache.getline(b,lineNum)\n return source.strip()\n\ndef checkLetters(inp,bs):\n index = session['index']\n flagList = session['fList']\n inList = sorted(inp)\n bsList = sorted(bs)\n\n for elem in inList:\n if elem in bsList:\n bsList.remove(elem)\n else:\n flagList[index] = \"No letter '\" + elem + \"' in word \" + bs + \" or used too many times\"\n session['fList'] = flagList\n return False\n session['fList'] = flagList\n return True\n\ndef checkWord(wordInput,flagInput,base):\n datafile = open(a)\n resultList = session['rList']\n index = session['index']\n flagList = session['fList']\n for line in datafile:\n if wordInput in line.upper().strip() and len(wordInput) == len(line.upper().strip()) and checkLetters(wordInput,base) == True and wordInput != base and flagInput == \"\":\n resultList[index] = True\n flagList[index] = \"Correct\"\n session['fList'] = flagList\n if wordInput == base:\n flagList[index] = \"Duplicate of the source word\"\n session['fList'] = flagList\n datafile.close()\n session['index'] = index+1\n session['rList'] = resultList\n\ndef startGame():\n session['base'] = getWord()\n session['sTime'] = round(time.time(),3)\n\ndef storeInput(a,b,c,d,e,f,g):\n inputList = [\"\"]*7\n inputList[0] = a.strip()\n inputList[1] = b.strip()\n inputList[2] = c.strip()\n inputList[3] = d.strip()\n inputList[4] = e.strip()\n inputList[5] = f.strip()\n inputList[6] = g.strip()\n session['inList'] = inputList\n\ndef processInput():\n inputList = session['inList']\n flagList = session['fList']\n baseWord = session['base']\n valid = session['valid']\n resultTime = -3\n startTime = session['sTime']\n endTime = session['eTime']\n\n for x in range(0,7):\n for y in range(x,7):\n if x == y:\n pass\n else:\n if inputList[x].upper() == inputList[y].upper():\n flagList[y] = \"Duplicate of \" + inputList[x]\n session['fList'] = flagList\n\n for x in range(0,7):\n checkWord(inputList[x].upper(),flagList[x],baseWord.upper())\n\n flagList = session['fList']\n\n for x in range(0,7):\n if flagList[x] == \"\":\n flagList[x] = \"Not in dictionary\"\n\n for x in range(0,7):\n if flagList[x] == \"Correct\" and valid == True:\n valid = True;\n else:\n valid = False\n\n if valid == True:\n resultTime = endTime - startTime\n session['rTime'] = round(resultTime,3)\n else:\n resultTime = -1\n session['rTime'] = resultTime\n\n session['fList'] = flagList\n session['index'] = index\n session['valid'] = valid\n session['sTime'] = startTime\n session['eTime'] = endTime\n\ndef getRank(nm,tm):\n rank = -1\n query = str(tm)+\"^DA&^*\"+nm\n with open(c) as search:\n for i,line in enumerate(search):\n line = line.strip() # remove '\\n' at end of line\n if query == line:\n val = str(i+1)\n if val.endswith('1') and val != \"11\":\n return val+'st'\n elif val.endswith('2') and val != \"12\":\n return val+'nd'\n elif val.endswith('3') and val != \"13\":\n return val+'rd'\n else:\n return val+'th'\n else:\n pass\n return str(rank)\n\ndef storeScore():\n with open(c,'a') as file:\n file.write(str(session['rTime'])+\"^DA&^*\"+session['name']+\"\\n\")\n file = open(c, \"r\")\n toSort = file.read()\n listToSort = toSort.splitlines()\n sortedList = natsort.natsorted(listToSort, key=lambda y: y.lower())\n # now write the output file\n with open(c, 'w') as file:\n for elem in sortedList:\n file.write(elem+\"\\n\")\n file.close()\n\n@app.route(\"/\")\ndef display_intro():\n session['base'] = ''\n return render_template(\"intro.html\",the_title=\"WordGame\")\n\n@app.route(\"/game\")\ndef display_home():\n if session['base'] == '':\n startGame()\n baseWord = session['base']\n return render_template(\"home.html\",the_title=\"WordGame\", the_source_word=baseWord)\n\n@app.route(\"/result\", methods=['POST'])\ndef display_result():\n all_ok = True\n if request.form[\"first\"].strip() == '' or request.form[\"second\"].strip() == '' or \\\n request.form[\"third\"].strip() == '' or request.form[\"fourth\"].strip() == '' or \\\n request.form[\"fifth\"].strip() == '' or request.form[\"sixth\"].strip() == '' or \\\n request.form[\"seventh\"].strip() == '':\n all_ok = False\n flash(\"Missing word input.\")\n if all_ok:\n session['index'] = 0\n session['eTime'] = round(time.time(),3)\n session['valid'] = True\n session['fList'] = [\"\"]*7\n session['rList'] = [False]*7\n storeInput(request.form[\"first\"],request.form[\"second\"],\n request.form[\"third\"],request.form[\"fourth\"],\n request.form[\"fifth\"],request.form[\"sixth\"],\n request.form[\"seventh\"])\n processInput()\n inputList = session['inList']\n flagList = session['fList']\n resultTime = session['rTime']\n baseWord = session['base']\n session['base']=''\n return render_template(\"result.html\",stored_input_flags=zip(inputList,flagList),time_result=resultTime,base=baseWord)\n else:\n return redirect(url_for('display_home'))\n\n@app.route(\"/scores\", methods=['POST'])\ndef display_score():\n session['name'] = request.form['name']\n if session['rTime'] != -1:\n storeScore()\n times = []\n names = []\n top = getHighScore()\n for elem in top:\n temp = elem.split('^DA&^*')\n times.append(temp[0])\n names.append(temp[1])\n \n rank = getRank(session['name'],session['rTime'])\n session['rTime'] = -1\n return render_template(\"highscore.html\",top_ten = zip(times,names), the_rank=rank)\n else:\n return display_top()\n@app.route(\"/topscores\")\ndef display_top():\n times = []\n names = []\n top = getHighScore()\n for elem in top:\n temp = elem.split('^DA&^*')\n times.append(temp[0])\n names.append(temp[1])\n return render_template(\"highscore.html\",top_ten = zip(times,names),the_rank=str(-1))\n\n@app.route(\"/about\")\ndef display_about():\n return render_template(\"about.html\",the_title=\"WordGame\")\napp.secret_key = 'IGI&AD^DAD6*AUIA14819Duiad&^DA&^*6768'\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"wordgame.py","file_name":"wordgame.py","file_ext":"py","file_size_in_byte":7416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"235356502","text":"#! /usr/bin/env python3\n\nimport unicodedata as ud\n\nfrom testCases import *\nfrom tools.ngram import NGram as NG\n\n\n\n################ Testing code from here on ################\n## (correction) = (correct(wrong))\n## When (correction) != (target) : P(expected) < P(correction)\n## When (correction) != (target) : (bad increases)\n## When (target) not in NWORDS : unknown word\n\ndef enzymetest(tests, ng, bias=None, verbose=True):\n\timport time\n\tn, bad, unknown, start = 0, 0, 0, time.clock()\n\tif bias:\n\t\tfor target in tests:\n\t\t\tng.NWORDS[target] += bias\n\tfor target,wrongs in tests.items():\n\t\tfor wrong in wrongs.split():\n\t\t\tn += 1\n##\t\t\tw = correct(wrong)\n\t\t\tcandidates = ng.correctSet(wrong)\n\t\t\tw=max(candidates, key=ng.NWORDS.get)\n\t\t\tdiff= ng.NWORDS[w] - ng.NWORDS[target]\n\t\t\tif w!=target:\n\t\t\t\tbad += 1\n\t\t\t\tunknown += (target not in ng.NWORDS)\n\t\t\t\tif verbose:\n\t\t\t\t\tif ((diff > 0) and (diff < 10) and (ng.NWORDS[target]==1)):\n\t\t\t\t\t\t\n\t\t\t\t\t\tprint('for set [[%s]], Diff bw (correct(%s): %s(%d)) & (expected: %s(%d)) is %d'%(candidates, wrong, w, ng.NWORDS[w], target, ng.NWORDS[target], diff))\n##\t\t\t\t\tprint('correct(%r) => %r (%d); expected %r (%d)' % (wrong, w, NWORDS[w], target, NWORDS[target]))\n\t\n\treturn dict(bad=bad, n=n, bias=bias, pct=int(100. - 100.*bad/n), unknown=unknown, secs=int(time.clock()-start))\n\n\n\n## Will be adding unown words into NWORDS for bad cases,\n## else increase P(target) with difference in mean of P(target) and P(cw),\n## which increases next time chances if possible to reach through our correction algorithm.\ndef learned(tests, ng, bias=None, verbose=True):\n\timport time\n\tn, bad, unknown, start = 0, 0, 0, time.clock()\n\tprint(\"********** Learned *************\")\n\tif bias:\n\t\tfor target in tests:\n\t\t\tng.NWORDS[target] += bias\n\tfor target,wrongs in tests.items():\n\t\tfor wrong in wrongs.split():\n\t\t\tn += 1\n\t\t\t## This is for multiple runs, as we already know (expected of wrong) word.\n\t\t\t## Hence not again calling correct(wrong)\n\t\t\tif(wrong in ng.EXPECTENCE):\n\t\t\t\tw = ng.EXPECTENCE[wrong]\n\t\t\telse:\n\t\t\t\tw = ng.correct(wrong)\n\t\t\tif w!=target:\n\t\t\t\tbad += 1\n\t\t\t\tif (target not in ng.NWORDS):\n\t\t\t\t\tunknown +=1;\n\t\t\t\t\t## This is creating error model\n\t\t\t\t\tng.EXPECTENCE[wrong] = target;\n\n\t\t\t\telse:\n\t\t\t\t\tng.NWORDS[target] += int(abs(ng.NWORDS[target] - ng.NWORDS[w])/2)\n\n\t\t\t\tif verbose:\n\t\t\t\t\tprint('correct(%r) => %r (%d); expected %r (%d)' % (wrong, w, ng.NWORDS[w], target, ng.NWORDS[target]))\n\t\n\treturn dict(bad=bad, n=n, bias=bias, pct=int(100. - 100.*bad/n), unknown=unknown, secs=int(time.clock()-start))\n\n\n\n\n\nif __name__ == '__main__':\n#def main():\n\n########## Change these as needed #############\n\tlang = \"english\"\n\tbias, verbose, timesrun = None, True, 1\n###############################################\n\n\tdataset=[]\n\tng = NG(lang)\n\n\tfor i in range(timesrun):\n\t\tdataset.append((\"Ran \"+str(i+1), enzymetest(test2.test(), ng, bias, verbose)))\n\t\tif bias:\n\t\t\tbias = None\n\n\tfor item in dataset: print(item)\n#\tprint(EXPECTENCE)\n\n#if __name__ == '__main__':\n#`\tmain()\n","sub_path":"enzyme.py","file_name":"enzyme.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"105274034","text":"# x1 ------\\------------+------- y1\n# F /\n# \\ G \n# x2 --------+-------/---------- y2\n#\n# forward\n#\n# y2 = F(x1) + x2\n# y1 = G(y2) + x1\n#\n# inverse\n#\n# x1 = y1 - G(y2)\n# x2 = y2 - F(x1)\n\nimport tensorflow as tf\n\n# ------------- define the revnet ---------------------\nclass rev_net:\n\n def __init__(self,Fnet,Gnet):\n self.F = Fnet\n self.G = Gnet\n\n def forward(self,x1,x2):\n\n Fx1 = self.F.net(x1)\n y2 = tf.add(x2,Fx1)\n Gy2 = self.G.net(y2)\n y1 = tf.add(Gy2,x1)\n\n return y1,y2 \n\n# ------------- define the inverse revnet -------------\n\n def backward(self,y1,y2):\n\n Gy2 = self.G.net(y2)\n x1 = tf.subtract(y1,Gy2)\n Fx1 = self.F.net(x1)\n x2 = tf.subtract(y2,Fx1)\n \n return(x1,x2)\n","sub_path":"2D/bak/rev_net.py","file_name":"rev_net.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"488953977","text":"from ._employee import Pracownik\nfrom ._subject import Przedmiot\nfrom ._utils import timestamp_to_datetime, concat_hours_and_minutes\n\n\nclass PoraLekcji:\n \"\"\"\n Pora lekcji\n\n Attributes:\n id (:class:`int`): ID pory lekcji\n numer (:class:`int`): Numer kolejny pory lekcji\n od (:class:`datetime.datetime`): Godzina i minuta rozpoczęcia lekcji\n do (:class:`datetime.datetime`): Godzina i minuta zakończenia lekcji\n \"\"\"\n\n def __init__(self, id=None, numer=None, od=None, do=None):\n self.id = id\n self.numer = numer\n self.od = od\n self.do = do\n\n def __repr__(self):\n return \"\".format(\n self.numer, self.od.hour, self.od.minute, self.do.hour, self.do.minute\n )\n\n @classmethod\n def from_json(cls, j):\n id = j.get(\"Id\")\n numer = j.get(\"Numer\")\n od = timestamp_to_datetime(j.get(\"Poczatek\"))\n do = timestamp_to_datetime(j.get(\"Koniec\"))\n return cls(id=id, numer=numer, od=od, do=do)\n\n\nclass Lekcja:\n \"\"\"\n Lekcja\n\n Attributes:\n numer (:class:`int`): Numer lekcji\n pora (:class:`vulcan.models.PoraLekcji`): Informacje o porze lekcji\n przedmiot (:class:`vulcan.models.Przedmiot`): Przedmiot na lekcji\n dzien (:class:`datetime.date`): Data lekcji\n od (:class:`datetime.datetime`): Data i godzina rozpoczęcia lekcji\n do (:class:`datetime.datetime`): Data i godzina zakończenia lekcji\n \"\"\"\n\n def __init__(\n self,\n numer=None,\n pora=None,\n przedmiot=None,\n pracownik=None,\n dzien=None,\n od=None,\n do=None,\n LekcjaUcznia=None,\n sala=None,\n adnotacja=None,\n przekreslonaNazwa=None,\n pogrubionaNazwa=None,\n ):\n self.numer = numer\n self.pora = pora\n self.przedmiot = przedmiot\n self.pracownik = pracownik\n self.dzien = dzien\n self.od = od\n self.do = do\n self.LekcjaUcznia = LekcjaUcznia\n self.sala = sala\n self.adnotacja = adnotacja\n self.przekreslonaNazwa = przekreslonaNazwa\n self.pogrubionaNazwa = pogrubionaNazwa\n\n def __repr__(self):\n return \"\".format(\n self.numer, self.przedmiot.nazwa, self.pracownik.nazwa\n )\n\n @classmethod\n def from_json(cls, j):\n numer = j.get(\"NumerLekcji\")\n pora = PoraLekcji.from_json(j.get(\"PoraLekcji\"))\n przedmiot = Przedmiot.from_json(j.get(\"Przedmiot\"))\n pracownik = Pracownik.from_json(j.get(\"Pracownik\"))\n dzien_datetime = timestamp_to_datetime(j.get(\"Dzien\"))\n dzien = dzien_datetime.date()\n od = concat_hours_and_minutes(dzien_datetime, j[\"PoraLekcji\"][\"Poczatek\"])\n do = concat_hours_and_minutes(dzien_datetime, j[\"PoraLekcji\"][\"Koniec\"])\n LekcjaUcznia = j.get(\"PlanUcznia\")\n sala = j.get(\"Sala\")\n adnotacja= j.get(\"AdnotacjaOZmianie\")\n przekreslonaNazwa= j.get(\"PrzekreslonaNazwa\")\n pogrubionaNazwa= j.get(\"PogrubionaNazwa\")\n return cls(\n numer=numer,\n pora=pora,\n przedmiot=przedmiot,\n pracownik=pracownik,\n dzien=dzien,\n od=od,\n do=do,\n LekcjaUcznia=LekcjaUcznia,\n sala=sala,\n adnotacja= adnotacja,\n przekreslonaNazwa= przekreslonaNazwa,\n pogrubionaNazwa= pogrubionaNazwa,\n )\n","sub_path":"vulcan/_lesson.py","file_name":"_lesson.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"105041960","text":"import argparse\nimport yaml\nimport json\nimport os\nfrom global_settings import settings\nfrom typing import List\n\n\ndef createFolder(name, logfile = None):\n name = name.strip().rstrip(\"/\")\n exist = os.path.exists(name)\n if exist:\n # print(name + \" already here.\")\n pass\n else:\n # print(name + \" created.\")\n os.makedirs(name)\n\n\ndef get_config(args):\n \"\"\"\n @return: A dict containing configurations\n \"\"\"\n cfg_file = args.cfg\n with open(cfg_file, 'r') as f:\n settings['cfg'] = yaml.safe_load(f)\n return settings['cfg']\n\n\ndef args_build_index():\n \"\"\"\n Set command line arguments for index builder\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--cfg\",\n default = 'config.yaml', required = False)\n \n return parser.parse_args()\n\n\ndef get_sp_term(term, sp = \"#\"):\n return sp + term\n\n\ndef get_doc_year(doc_id: str) -> str:\n if '/' in doc_id: # format cat/doct_id:\n _, doc_id_ = doc_id.split('/')\n year = doc_id_[:2]\n else:\n year = doc_id[:2]\n return year\n\n\ndef get_cat_tag(cat, sp = \"#\"):\n return get_sp_term(cat.upper(), sp)\n\n\ndef get_cat_fullname(cat):\n \"\"\"Convert category abbreviation to full name\n\n Arguments:\n cat {[type]} -- [description]\n \"\"\"\n if not \"settings['cat_abbr_to_full']\" in globals():\n with open(settings['cfg']['CAT_ABBR_DICT'], 'r') as f:\n con = f.read()\n lines = [line.split() for line in con.strip().split('\\n')]\n settings['cat_abbr_to_full'] = {abbr: full for abbr, full in lines}\n return settings['cat_abbr_to_full'][cat]\n\n\ndef get_int_doc_id(doc_id: str):\n if not len(settings['doc_id_2_doc_no']):\n if os.path.exists(settings['cfg']['DOC_ID_2_DOC_NO']):\n with open(settings['cfg']['DOC_ID_2_DOC_NO'], 'r') as f:\n settings['doc_id_2_doc_no'] = json.load(f)\n else:\n # initialize\n settings['doc_id_2_doc_no']['NEXT'] = 0\n return settings['doc_id_2_doc_no'][doc_id]\n\n\ndef get_str_doc_id(doc_id: int) -> str:\n if not len(settings['doc_no_2_doc_id']):\n if not len(settings['doc_id_2_doc_no']) and os.path.exists(\n settings['cfg']['DOC_ID_2_DOC_NO']): # read from disk\n with open(settings['cfg']['DOC_ID_2_DOC_NO'], 'r') as f:\n settings['doc_id_2_doc_no'] = json.load(f)\n settings['doc_no_2_doc_id'] = {\n v: k for k, v in settings['doc_id_2_doc_no'].items()}\n \n return settings['doc_no_2_doc_id'][doc_id]\n\n\ndef get_doc_numbers():\n \"\"\"Get the total number of documents\n \"\"\"\n if not len(settings['doc_id2length']):\n with open(settings['cfg']['DOC_ID_2_DOC_LEN'], 'r') as f:\n settings['doc_id2length'] = json.load(f)\n return len(settings['doc_id2length']) - 2\n\n\ndef get_average_word_count():\n \"\"\"Get average number of words in documents\n \"\"\"\n if not len(settings['doc_id2length']):\n with open(settings['cfg']['DOC_ID_2_DOC_LEN'], 'r') as f:\n settings['doc_id2length'] = json.load(f)\n return settings['doc_id2length']['avg']\n\n\ndef get_doc_word_count(doc_id):\n \"\"\"\n\n Returns:\n [type] -- [description]\n \"\"\"\n if not len(settings['doc_id2length']):\n with open(settings['cfg']['DOC_ID_2_DOC_LEN'], 'r') as f:\n settings['doc_id2length'] = json.load(f)\n return settings['doc_id2length'][doc_id]\n\n\ndef get_index_file_path(key):\n \"\"\"A key -> a file\n\n Arguments:\n key {[type]} -- [description]\n \"\"\"\n return os.path.join(settings['cfg']['INDEX_DIR'],\n '_'.join([settings['cfg']['INDEX_PREFIX'], '{:03d}'.format(key)]) + '.pkl')\n\n\ndef get_word_occurences(word):\n if not len(settings['unigram']):\n with open(settings['cfg']['UNIGRAM_FILE'], 'r') as f:\n settings['unigram'] = json.load(f)\n if word in settings['unigram']:\n return settings['unigram'][word][2]\n else:\n return 0\n\n\ndef v_byte_encode(n: int) -> bytearray:\n \"\"\"Apply variable byte length compression to a number\n\n Arguments:\n n {int} -- [description]\n \"\"\"\n b = bytearray()\n while True:\n b.insert(0, n % 128)\n if n < 128:\n break\n n = int(n / 128)\n b[-1] += 128\n return b\n\n\ndef v_byte_decode(bytestream: bytearray) -> List[int]:\n nums = []\n n = 0\n for i, b in enumerate(bytestream):\n if b < 128:\n n = 128 * n + b\n else:\n n = 128 * n + (b - 128)\n nums.append(n)\n n = 0\n return nums\n\n\nif __name__ == \"__main__\":\n # Test\n answer1 = [b'\\x81',\n b'\\x86',\n b'\\xff',\n b'\\x01\\x80',\n b'\\x01\\x82',\n b'\\x01\\x1c\\xa0', ]\n encode_seq = [1, 6, 127, 128, 130, 20000]\n for i, n in enumerate(encode_seq):\n print(v_byte_encode(n) == answer1[i], v_byte_encode(n), answer1[i])\n # Test Decode\n decoded = v_byte_decode(b''.join(answer1))\n print(decoded == encode_seq, decoded, encode_seq)\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"36441298","text":"from selenium import webdriver\nimport time\nurl='https://image.baidu.com/'\nbrowser=webdriver.Chrome()\nbrowser.get(url)\ninput=browser.find_element_by_xpath('//*[@id=\"kw\"]')\ninput.send_keys('T-ara')\ntime.sleep(2)\nbutton=browser.find_element_by_xpath('//*[@id=\"homeSearchForm\"]/span[2]')\nbutton.click()\nwhile True:\n browser.execute_script(\"window.scrollTo(0,document.body.scrollHeight)\")\n# 操作将进度条拉到最底部?\n# browser.execute_script('alert(\"To Bottom\")')?","sub_path":"PythonProjects/爬虫/selenium/访问百度图片并搜索操作下拉.py","file_name":"访问百度图片并搜索操作下拉.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"180859800","text":"__author__ = 'Einsiedler'\n\n\nimport subprocess\nimport platform\nfrom robot.api import logger\n\n\nclass Uploader(object):\n\n def __init__(self, server_url=\"ubuntu@jenkins.geocomply.net\"):\n self.url = server_url\n pass\n\n def upload_via_scp(self, src_folder, remote_path):\n if platform.system() == \"Darwin\":\n r = subprocess.Popen('rsync -r --delete %s:%s/*' % (self.url, remote_path),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n error, output = r.communicate()\n logger.info(\"Remove if files exist... Error:[%s], Output:[%s]\" % (error, output))\n\n p = subprocess.Popen('scp -r \\\"%s\\\" %s:%s' % (src_folder, self.url, remote_path),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n error, output = p.communicate()\n logger.info(\"Upload folder... Error:[%s], Output:[%s]\" % (error, output))\n elif platform.system() == \"Windows\":\n raise Exception(\"Unimplemented.\")\n else:\n raise Exception(\"Unsupported platform detected.\")","sub_path":"src/remote/Uploader.py","file_name":"Uploader.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"452050641","text":"#-*- coding: utf-8 -*-\n# Copyright (C) 2015-2016 by Brendt Wohlberg \n# All rights reserved. BSD 3-clause License.\n# This file is part of the SPORCO package. Details of the copyright\n# and user license can be found in the 'LICENSE.txt' file distributed\n# with the package.\n\n\"\"\"ADMM algorithm for the CCMOD problem\"\"\"\n\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom builtins import range\n\nimport numpy as np\nfrom scipy import linalg\nimport copy\nimport pprint\n\nfrom sporco.admm import admm\nimport sporco.linalg as sl\n\n__author__ = \"\"\"Brendt Wohlberg \"\"\"\n\n\nclass DictionarySize(object):\n \"\"\"Compute dictionary size parameters from a dictionary size\n specification tuple as in the dsz argument of :func:`bcrop`.\"\"\"\n\n def __init__(self, dsz, dimN=2):\n \"\"\"Initialise a DictionarySize object.\n\n Parameters\n ----------\n dsz : tuple\n Dictionary size specification (using the same format as the\n `dsz` argument of :func:`bcrop`)\n dimN : int, optional (default 2)\n Number of spatial dimensions\n \"\"\"\n\n self.dsz = dsz\n if isinstance(dsz[0], tuple):\n # Multi-scale dictionary specification\n if isinstance(dsz[0][0], tuple):\n self.ndim = len(dsz[0][0])\n self.nchn = 0\n for c in range(0, len(dsz[0])):\n self.nchn += dsz[0][c][-2]\n else:\n self.ndim = len(dsz[0])\n if self.ndim == dimN + 1:\n self.nchn = 1\n else:\n self.nchn = dsz[0][-2]\n mxsz = np.zeros((dimN,), dtype=int)\n self.nflt = 0\n for m in range(0, len(dsz)):\n if isinstance(dsz[m][0], tuple):\n # Separate channel specification\n for c in range(0, len(dsz[m])):\n mxsz = np.maximum(mxsz, dsz[m][c][0:dimN])\n self.nflt += dsz[m][0][-1]\n else:\n # Combined channel specification\n mxsz = np.maximum(mxsz, dsz[m][0:dimN])\n self.nflt += dsz[m][-1]\n self.mxsz = tuple(mxsz)\n else:\n # Single scale dictionary specification\n self.ndim = len(dsz)\n self.mxsz = dsz[0:dimN]\n self.nflt = dsz[-1]\n if self.ndim == dimN + 1:\n self.nchn = 1\n else:\n self.nchn = dsz[-2]\n\n\n\n def __str__(self):\n \"\"\"Return string representation of object.\"\"\"\n\n return pprint.pformat(vars(self))\n\n\n\n\n\nclass ConvRepIndexing(object):\n \"\"\"Manage the inference of problem dimensions and the roles of\n :class:`numpy.ndarray` indices for convolutional representations\n as in :class:`.ConvBPDN` and related classes.\n \"\"\"\n\n def __init__(self, dsz, S, dimK=None, dimN=2):\n \"\"\"Initialise a ConvRepIndexing object representing dimensions of S\n (input signal), D (dictionary), and X (coefficient array) in a\n convolutional representation. These dimensions are inferred\n from the input `dsz` and `S` as well as from parameters `dimN`\n and `dimK`. Management and inferrence of these problem\n dimensions is not entirely straightforward because\n :class:`.ConvCnstrMOD` and related classes make use\n *internally* of S, D, and X arrays with a standard layout\n (described below), but *input* `S` and `dsz` are allowed to\n deviate from this layout for the convenience of the user. Note\n that S, D, and X refers to the names of signal, dictionary,\n and coefficient map arrays in :class:`.ConvBPDN`; the\n corresponding variable names in :class:`.ConvCnstrMOD` are S,\n X, and A.\n\n The most fundamental parameter is `dimN`, which specifies the\n dimensionality of the spatial/temporal samples being\n represented (e.g. `dimN` = 2 for representations of 2D\n images). This should be common to *input* `S` and `dsz`, and is also\n common to *internal* S, D, and X. The remaining dimensions of\n input `S` can correspond to multiple channels (e.g. for RGB\n images) and/or multiple signals (e.g. the array contains\n multiple independent images). If input `S` contains two\n additional dimensions (in addition to the `dimN` spatial\n dimensions), then those are considered to correspond, in\n order, to channel and signal indices. If there is only a\n single additional dimension, then determination whether it\n represents a channel or signal index is more complicated. The\n rule for making this determination is as follows:\n\n * if `dimK` is set to 0 or 1 instead of the default ``None``, then\n that value is taken as the number of signal indices in input `S`\n and any remaining indices are taken as channel indices (i.e. if\n `dimK` = 0 then dimC = 1 and if `dimK` = 1 then dimC = 0).\n * if `dimK` is ``None`` then the number of channel dimensions\n is determined from the number of dimensions specified in the\n input dictionary size `dsz`. Input `dsz` should specify at\n least `dimN` + 1 dimensions, with the final dimension\n indexing dictionary filters. If it has exactly `dimN` + 1\n dimensions then it is a single-channel dictionary, and input\n `S` is also assumed to be single-channel, with the\n additional index in `S` assigned as a signal index\n (i.e. `dimK` = 1). Conversely, if input `dsz` specified\n `dimN` + 2 dimensions it is a multi-channel dictionary, and\n the additional index in `S` is assigned as a channel index\n (i.e. dimC = 1).\n\n Note that it is an error to specify `dimK` = 1 if input `S`\n has `dimN` + 1 dimensions and input `dsz` specified `dimN` + 2\n dimensions since a multi-channel dictionary requires a\n multi-channel signal. (The converse is not true: a\n multi-channel signal can be decomposed using a single-channel\n dictionary.)\n\n The *internal* data layout for S (signal), D (dictionary), and\n X (coefficient array) is (multi-channel dictionary)\n ::\n\n sptl. chn sig flt\n S(N0, N1, ..., C, K, 1)\n D(N0, N1, ..., C, 1, M)\n X(N0, N1, ..., 1, K, M)\n\n or (single-channel dictionary)\n\n ::\n\n sptl. chn sig flt\n S(N0, N1, ..., C, K, 1)\n D(N0, N1, ..., 1, 1, M)\n X(N0, N1, ..., C, K, M)\n\n where\n\n * Nv = [N0, N1, ...] and N = N0 x N1 x ... are the vector of sizes\n of the spatial/temporal indices and the total number of\n spatial/temporal samples respectively\n * C is the number of channels in S\n * K is the number of signals in S\n * M is the number of filters in D\n\n It should be emphasised that dimC and `dimK` may take on values\n 0 or 1, and represent the number of channel and signal\n dimensions respectively *in input S*. In the internal layout\n of S there is always a dimension allocated for channels and\n signals. The number of channel dimensions in input `D` and the\n corresponding size of that index are represented by dimCd\n and Cd respectively.\n\n Parameters\n ----------\n dsz : tuple\n Dictionary size specification (using the same format as the\n `dsz` argument of :func:`bcrop`)\n S : array_like\n Input signal\n dimK : 0, 1, or None, optional (default None)\n Number of dimensions in input signal corresponding to multiple\n independent signals\n dimN : int, optional (default 2)\n Number of spatial/temporal dimensions of signal samples\n \"\"\"\n\n # Extract properties of dictionary size specification tuple\n ds = DictionarySize(dsz, dimN)\n self.dimCd = ds.ndim - dimN - 1\n self.Cd = ds.nchn\n self.M = ds.nflt\n self.dsz = dsz\n\n # Numbers of spatial, channel, and signal dimensions in\n # external S are dimN, dimC, and dimK respectively. These need\n # to be calculated since inputs D and S do not already have\n # the standard data layout above, i.e. singleton dimensions\n # will not be present\n if dimK is None:\n rdim = S.ndim - dimN\n if rdim == 0:\n (dimC, dimK) = (0, 0)\n elif rdim == 1:\n dimC = self.dimCd # Assume S has same number of channels as D\n dimK = S.ndim - dimN - dimC # Assign remaining channels to K\n else:\n (dimC, dimK) = (1, 1)\n else:\n dimC = S.ndim - dimN - dimK # Assign remaining channels to C\n\n self.dimN = dimN # Number of spatial dimensions\n self.dimC = dimC # Number of channel dimensions in S\n self.dimK = dimK # Number of signal dimensions in S\n\n # Number of channels in S\n if self.dimC == 1:\n self.C = S.shape[dimN]\n else:\n self.C = 1\n self.Cx = self.C - self.Cd + 1\n\n # Ensure that multi-channel dictionaries used with a signal with a\n # matching number of channels\n if self.Cd > 1 and self.C != self.Cd:\n raise ValueError(\"Multi-channel dictionary with signal with \"\n \"mismatched number of channels (Cd=%d, C=%d)\" %\n (self.Cd, self.C))\n\n # Number of signals in S\n if self.dimK == 1:\n self.K = S.shape[self.dimN+self.dimC]\n else:\n self.K = 1\n\n # Shape of spatial indices and number of spatial samples\n self.Nv = S.shape[0:dimN]\n self.N = np.prod(np.array(self.Nv))\n\n # Axis indices for each component of X and internal S and D\n self.axisN = tuple(range(0, dimN))\n self.axisC = dimN\n self.axisK = dimN + 1\n self.axisM = dimN + 2\n\n # Shapes of internal S, D, and X\n self.shpD = self.Nv + (self.Cd,) + (1,) + (self.M,)\n self.shpS = self.Nv + (self.C,) + (self.K,) + (1,)\n self.shpX = self.Nv + (self.Cx,) + (self.K,) + (self.M,)\n\n\n\n def __str__(self):\n \"\"\"Return string representation of object.\"\"\"\n\n return pprint.pformat(vars(self))\n\n\n\n\n\nclass ConvCnstrMOD(admm.ADMMEqual):\n \"\"\"ADMM algorithm for Convolutional Constrained MOD problem\n :cite:`wohlberg-2016-efficient` :cite:`wohlberg-2016-convolutional`.\n\n Solve the optimisation problem\n\n .. math::\n \\mathrm{argmin}_\\mathbf{d} \\;\n (1/2) \\sum_k \\\\left\\| \\sum_m \\mathbf{d}_m * \\mathbf{x}_{k,m} -\n \\mathbf{s}_k \\\\right\\|_2^2 \\quad \\\\text{such that} \\quad\n \\mathbf{d}_m \\in C\n\n via the ADMM problem\n\n .. math::\n \\mathrm{argmin}_\\mathbf{d} \\;\n (1/2) \\sum_k \\\\left\\| \\sum_m \\mathbf{d}_m * \\mathbf{x}_{k,m} -\n \\mathbf{s}_k \\\\right\\|_2^2 + \\sum_m \\iota_C(\\mathbf{g}_m) \\quad\n \\\\text{such that} \\quad \\mathbf{d}_m = \\mathbf{g}_m \\;\\;,\n\n where :math:`\\iota_C(\\cdot)` is the indicator function of feasible\n set :math:`C` consisting of filters with unit norm and constrained\n support. Multi-channel problems with input image channels\n :math:`\\mathbf{s}_{c,k}` are also supported, either as\n\n .. math::\n \\mathrm{argmin}_\\mathbf{d} \\;\n (1/2) \\sum_c \\sum_k \\\\left\\| \\sum_m \\mathbf{d}_m * \\mathbf{x}_{c,k,m} -\n \\mathbf{s}_{c,k} \\\\right\\|_2^2 \\quad \\\\text{such that} \\quad\n \\mathbf{d}_m \\in C\n\n with single-channel dictionary filters :math:`\\mathbf{d}_m` and\n multi-channel coefficient maps :math:`\\mathbf{x}_{c,k,m}`, or\n\n .. math::\n \\mathrm{argmin}_\\mathbf{d} \\;\n (1/2) \\sum_c \\sum_k \\\\left\\| \\sum_m \\mathbf{d}_{c,m} * \\mathbf{x}_{k,m} -\n \\mathbf{s}_{c,k} \\\\right\\|_2^2 \\quad \\\\text{such that} \\quad\n \\mathbf{d}_{c,m} \\in C\n\n with multi-channel dictionary filters :math:`\\mathbf{d}_{c,m}` and\n single-channel coefficient maps :math:`\\mathbf{x}_{k,m}`. In this\n latter case, normalisation of filters :math:`\\mathbf{d}_{c,m}` is\n performed jointly over index :math:`c` for each filter :math:`m`.\n\n After termination of the :meth:`solve` method, attribute :attr:`itstat` is\n a list of tuples representing statistics of each iteration. The\n fields of the named tuple ``IterationStats`` are:\n\n ``Iter`` : Iteration number\n\n ``DFid`` : Value of data fidelity term \\\n :math:`(1/2) \\sum_k \\| \\sum_m \\mathbf{d}_m * \\mathbf{x}_{k,m} -\n \\mathbf{s}_k \\|_2^2`\n\n ``Cnstr`` : Constraint violation measure\n\n ``PrimalRsdl`` : Norm of primal residual\n\n ``DualRsdl`` : Norm of dual residual\n\n ``EpsPrimal`` : Primal residual stopping tolerance \\\n :math:`\\epsilon_{\\mathrm{pri}}`\n\n ``EpsDual`` : Dual residual stopping tolerance \\\n :math:`\\epsilon_{\\mathrm{dua}}`\n\n ``Rho`` : Penalty parameter\n\n ``XSlvRelRes`` : Relative residual of X step solver\n\n ``XSlvCGIt`` : CG iterations used in X step solver\n\n ``Time`` : Cumulative run time\n \"\"\"\n\n\n\n class Options(admm.ADMMEqual.Options):\n \"\"\"CCMOD algorithm options\n\n Options include all of those defined in\n :class:`sporco.admm.admm.ADMMEqual.Options`, together with\n additional options:\n\n ``AuxVarObj`` : Flag indicating whether the objective function \\\n should be evaluated using variable X (``False``) or Y (``True``) \\\n as its argument\n\n ``LinSolveCheck`` : If ``True``, compute relative residual of \\\n X step solver\n\n ``ZeroMean`` : Flag indicating whether the solution dictionary \\\n :math:`\\{\\mathbf{d}_m\\}` should have zero-mean components\n\n ``LinSolve`` : Select linear solver for x step. Options are \\\n ``SM`` (Sherman-Morrison) or ``CG`` (Conjugate Gradient)\n\n ``CG`` : CG solver options\n\n ``MaxIter`` : Maximum iterations\n\n ``StopTol`` : Stopping tolerance\n \"\"\"\n\n defaults = copy.deepcopy(admm.ADMMEqual.Options.defaults)\n defaults.update({'AuxVarObj' : False, 'ReturnX' : False,\n 'RelaxParam' : 1.8, 'ZeroMean' : False,\n 'LinSolve' : 'SM', 'LinSolveCheck' : False,\n 'CG' : {'MaxIter' : 1000, 'StopTol' : 1e-3}})\n defaults['AutoRho'].update({'Enabled' : True, 'Period' : 1,\n 'AutoScaling' : True, 'Scaling' : 1000,\n 'RsdlRatio' : 1.2})\n\n\n def __init__(self, opt=None):\n \"\"\"Initialise CCMOD algorithm options object.\"\"\"\n\n if opt is None:\n opt = {}\n admm.ADMMEqual.Options.__init__(self, opt)\n\n if self['AuxVarObj']:\n self['fEvalX'] = False\n self['gEvalY'] = True\n else:\n self['fEvalX'] = True\n self['gEvalY'] = False\n\n if self['AutoRho','RsdlTarget'] is None:\n self['AutoRho','RsdlTarget'] = 1.0\n\n\n\n itstat_fields_objfn = ('DFid', 'Cnstr')\n itstat_fields_extra = ('XSlvRelRes', 'CGIt')\n hdrtxt_objfn = ('DFid', 'Cnstr')\n hdrval_objfun = {'DFid' : 'DFid', 'Cnstr' : 'Cnstr'}\n\n\n\n def __init__(self, A, S, dsz, opt=None, dimK=1, dimN=2):\n \"\"\"Initialise a ConvCnstrMOD object with problem parameters.\n\n This class supports an arbitrary number of spatial dimensions,\n `dimN`, with a default of 2. The input coefficient map array `A`\n (usually labelled X, but renamed here to avoid confusion with\n the X and Y variables in the ADMM base class) is expected to\n be in standard form as computed by the ConvBPDN class.\n\n The input signal set `S` is either `dimN` dimensional (no\n channels, only one signal), `dimN` +1 dimensional (either\n multiple channels or multiple signals), or `dimN` +2 dimensional\n (multiple channels and multiple signals). Parameter `dimK`, with\n a default value of 1, indicates the number of multiple-signal\n dimensions in `S`:\n\n ::\n\n Default dimK = 1, i.e. assume input S is of form\n S(N0, N1, C, K) or S(N0, N1, K)\n If dimK = 0 then input S is of form\n S(N0, N1, C, K) or S(N0, N1, C)\n\n The internal data layout for S, D (X here), and X (A here) is:\n ::\n\n dim<0> - dim : Spatial dimensions, product of N0,N1,... is N\n dim : C number of channels in S and D\n dim : K number of signals in S\n dim : M number of filters in D\n\n sptl. chn sig flt\n S(N0, N1, C, K, 1)\n D(N0, N1, C, 1, M) (X here)\n X(N0, N1, 1, K, M) (A here)\n\n The `dsz` parameter indicates the desired filter supports in the\n output dictionary, since this cannot be inferred from the\n input variables. The format is the same as the `dsz` parameter\n of :func:`bcrop`.\n\n Parameters\n ----------\n A : array_like\n Coefficient map array\n S : array_like\n Signal array\n dsz : tuple\n Filter support size(s)\n opt : ccmod.Options object\n Algorithm options\n dimK : int, optional (default 1)\n Number of dimensions for multiple signals in input S\n dimN : int, optional (default 2)\n Number of spatial dimensions\n \"\"\"\n\n # Set default options if none specified\n if opt is None:\n opt = ConvCnstrMOD.Options()\n\n # Infer problem dimensions and set relevant attributes of self\n self.cri = ConvRepIndexing(dsz, S, dimK=dimK, dimN=dimN)\n\n # Call parent class __init__\n super(ConvCnstrMOD, self).__init__(self.cri.shpD, S.dtype, opt)\n\n # Set penalty parameter\n self.set_attr('rho', opt['rho'], dval=self.cri.K, dtype=self.dtype)\n\n # Reshape S to standard layout (A, i.e. X in cbpdn, is assumed\n # to be taken from cbpdn, and therefore already in standard\n # form). If the dictionary has a single channel but the input\n # (and therefore also the coefficient map array) has multiple\n # channels, the channel index and multiple image index have\n # the same behaviour in the dictionary update equation: the\n # simplest way to handle this is to just reshape so that the\n # channels also appear on the multiple image index.\n if self.cri.Cd == 1 and self.cri.C > 1:\n self.S = S.reshape(self.cri.Nv + (1,) +\n (self.cri.C*self.cri.K,) + (1,))\n else:\n self.S = S.reshape(self.cri.shpS)\n self.S = np.asarray(self.S, dtype=self.dtype)\n\n # Compute signal S in DFT domain\n self.Sf = sl.rfftn(self.S, None, self.cri.axisN)\n\n # Create constraint set projection function\n self.Pcn = getPcn(opt['ZeroMean'], dsz, self.cri.Nv, self.cri.dimN)\n\n # Create byte aligned arrays for FFT calls\n self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype)\n xfshp = list(self.Y.shape)\n xfshp[dimN-1] = xfshp[dimN-1]//2 + 1\n self.Xf = sl.pyfftw_empty_aligned(xfshp,\n dtype=sl.complex_dtype(self.dtype))\n\n if A is not None:\n self.setcoef(A)\n\n # Increment `runtime` to reflect object initialisation\n # time. The timer object is reset to avoid double-counting of\n # elapsed time if a similar increment is applied in a derived\n # class __init__.\n self.runtime += self.timer.elapsed(reset=True)\n\n\n\n def uinit(self, ushape):\n \"\"\"Return initialiser for working variable U\"\"\"\n\n if self.opt['Y0'] is None:\n return np.zeros(ushape, dtype=self.dtype)\n else:\n # If initial Y is non-zero, initial U is chosen so that\n # the relevant dual optimality criterion (see (3.10) in\n # boyd-2010-distributed) is satisfied.\n return self.Y\n\n\n\n def setcoef(self, A):\n \"\"\"Set coefficient array.\"\"\"\n\n # If the dictionary has a single channel but the input (and\n # therefore also the coefficient map array) has multiple\n # channels, the channel index and multiple image index have\n # the same behaviour in the dictionary update equation: the\n # simplest way to handle this is to just reshape so that the\n # channels also appear on the multiple image index.\n if self.cri.Cd == 1 and self.cri.C > 1:\n A = A.reshape(self.cri.Nv + (1,) + (self.cri.Cx*self.cri.K,) +\n (self.cri.M,))\n self.A = np.asarray(A, dtype=self.dtype)\n\n self.Af = sl.rfftn(self.A, self.cri.Nv, self.cri.axisN)\n # Compute X^H S\n self.ASf = np.sum(np.conj(self.Af) * self.Sf, self.cri.axisK,\n keepdims=True)\n\n\n\n def getdict(self):\n \"\"\"Get final dictionary.\"\"\"\n\n return bcrop(self.Y, self.cri.dsz)\n\n\n\n def xstep(self):\n \"\"\"Minimise Augmented Lagrangian with respect to :math:`\\mathbf{x}`.\"\"\"\n\n self.cgit = None\n\n self.YU[:] = self.Y - self.U\n\n b = self.ASf + self.rho*sl.rfftn(self.YU, None, self.cri.axisN)\n if self.opt['LinSolve'] == 'SM':\n self.Xf[:] = sl.solvemdbi_ism(self.Af, self.rho, b, self.cri.axisM,\n self.cri.axisK)\n else:\n self.Xf[:], cgit = sl.solvemdbi_cg(self.Af, self.rho, b,\n self.cri.axisM, self.cri.axisK,\n self.opt['CG', 'StopTol'],\n self.opt['CG', 'MaxIter'], self.Xf)\n self.cgit = cgit\n\n self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)\n\n if self.opt['LinSolveCheck']:\n Aop = lambda x: np.sum(self.Af * x, axis=self.cri.axisM,\n keepdims=True)\n AHop = lambda x: np.sum(np.conj(self.Af) * x, axis=self.cri.axisK,\n keepdims=True)\n ax = AHop(Aop(self.Xf)) + self.rho*self.Xf\n self.xrrs = sl.rrs(ax, b)\n else:\n self.xrrs = None\n\n\n\n def ystep(self):\n \"\"\"Minimise Augmented Lagrangian with respect to :math:`\\mathbf{y}`.\n \"\"\"\n\n self.Y = self.Pcn(self.AX + self.U)\n\n\n\n def obfn_fvarf(self):\n \"\"\"Variable to be evaluated in computing data fidelity term,\n depending on 'fEvalX' option value.\n \"\"\"\n\n return self.Xf if self.opt['fEvalX'] else \\\n sl.rfftn(self.Y, None, self.cri.axisN)\n\n\n\n def eval_objfn(self):\n \"\"\"Compute components of objective function as well as total\n contribution to objective function.\n \"\"\"\n\n dfd = self.obfn_dfd()\n cns = self.obfn_cns()\n return (dfd, cns)\n\n\n\n def obfn_dfd(self):\n \"\"\"Compute data fidelity term :math:`(1/2) \\| \\sum_m \\mathbf{d}_m *\n \\mathbf{x}_m - \\mathbf{s} \\|_2^2`.\n \"\"\"\n\n Ef = np.sum(self.Af * self.obfn_fvarf(), axis=self.cri.axisM,\n keepdims=True) - self.Sf\n return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0\n\n\n\n def obfn_cns(self):\n \"\"\"Compute constraint violation measure :math:`\\| P(\\mathbf{y}) -\n \\mathbf{y}\\|_2`.\n \"\"\"\n\n return linalg.norm((self.Pcn(self.obfn_gvar()) - self.obfn_gvar()))\n\n\n\n def itstat_extra(self):\n \"\"\"Non-standard entries for the iteration stats record tuple.\"\"\"\n\n return (self.xrrs, self.cgit)\n\n\n\n\n\ndef stdformD(D, Cd, M, dimN=2):\n \"\"\"Reshape dictionary array (X here, D in cbpdn module) to internal\n standard form.\n\n Parameters\n ----------\n D : array_like\n Dictionary array\n Cd : int\n Size of dictionary channel index\n M : int\n Number of filters in dictionary\n dimN : int, optional (default 2)\n Number of problem spatial indices\n\n Returns\n -------\n Dr : ndarray\n Reshaped dictionary array\n \"\"\"\n\n return D.reshape(D.shape[0:dimN] + (Cd,) + (1,) + (M,))\n\n\n\ndef getPcn0(zm, dsz, dimN=2, dimC=1):\n \"\"\"Construct constraint set projection function without support\n projection. The `dsz` parameter specifies the support sizes of each\n filter using the same format as the `dsz` parameter of :func:`bcrop`.\n\n Parameters\n ----------\n zm : bool\n Flag indicating whether the projection function should include\n filter mean subtraction\n dsz : tuple\n Filter support size(s)\n dimN : int, optional (default 2)\n Number of problem spatial indices\n dimC : int, optional (default 1)\n Number of problem channel indices\n\n Returns\n -------\n fn : function\n Constraint set projection function\n \"\"\"\n\n if zm:\n return lambda x: normalise(zeromean(bcrop(x, dsz), dsz, dimN),\n dimN+dimC)\n else:\n return lambda x: normalise(bcrop(x, dsz, dimN), dimN+dimC)\n\n\n\ndef getPcn(zm, dsz, Nv, dimN=2, dimC=1):\n \"\"\"Construct the constraint set projection function utilised by\n ystep. The `dsz` parameter specifies the support sizes of each\n filter using the same format as the `dsz` parameter of :func:`bcrop`.\n\n Parameters\n ----------\n zm : bool\n Flag indicating whether the projection function should include\n filter mean subtraction\n dsz : tuple\n Filter support size(s)\n Nv : tuple\n Sizes of problem spatial indices\n dimN : int, optional (default 2)\n Number of problem spatial indices\n dimC : int, optional (default 1)\n Number of problem channel indices\n\n Returns\n -------\n fn : function\n Constraint set projection function\n \"\"\"\n\n if zm:\n return lambda x: normalise(zeromean(zpad(bcrop(x, dsz, dimN), Nv),\n dsz), dimN+dimC)\n else:\n return lambda x: normalise(zpad(bcrop(x, dsz, dimN), Nv), dimN+dimC)\n\n\n\ndef zeromean(v, dsz, dimN=2):\n \"\"\"Subtract mean value from each filter in the input array v. The `dsz`\n parameter specifies the support sizes of each filter using the\n same format as the `dsz` parameter of :func:`bcrop`. Support sizes\n must be taken into account to ensure that the mean values are\n computed over the correct number of samples, ignoring the\n zero-padded region in which the filter is embedded.\n\n Parameters\n ----------\n v : array_like\n Input dictionary array\n dsz : tuple\n Filter support size(s)\n dimN : int, optional (default 2)\n Number of spatial dimensions\n\n Returns\n -------\n vz : ndarray\n Dictionary array with filter means subtracted\n \"\"\"\n\n vz = v.copy()\n if isinstance(dsz[0], tuple):\n # Multi-scale dictionary specification\n axisN = tuple(range(0, dimN))\n m0 = 0 # Initial index of current block of equi-sized filters\n # Iterate over distinct filter sizes\n for mb in range(0, len(dsz)):\n # Determine end index of current block of filters\n if isinstance(dsz[mb][0], tuple):\n m1 = m0 + dsz[mb][0][-1]\n c0 = 0 # Init. idx. of current channel-block of equi-sized flt.\n for cb in range(0, len(dsz[mb])):\n c1 = c0 + dsz[mb][cb][-2]\n # Construct slice corresponding to cropped part of\n # current block of filters in output array and set from\n # input array\n cbslc = tuple([slice(0, x) for x in dsz[mb][cb][0:dimN]]) \\\n + (slice(c0, c1),) + (Ellipsis,) + (slice(m0, m1),)\n vz[cbslc] -= np.mean(v[cbslc], axisN)\n c0 = c1 # Update initial index for start of next block\n else:\n m1 = m0 + dsz[mb][-1]\n # Construct slice corresponding to cropped part of\n # current block of filters in output array and set from\n # input array\n mbslc = tuple([slice(0, x) for x in dsz[mb][0:-1]]) + \\\n (Ellipsis,) + (slice(m0, m1),)\n vz[mbslc] -= np.mean(v[mbslc], axisN)\n m0 = m1 # Update initial index for start of next block\n else:\n # Single scale dictionary specification\n axisN = tuple(range(0, dimN))\n axnslc = tuple([slice(0, x) for x in dsz[0:dimN]])\n vz[axnslc] -= np.mean(v[axnslc], axisN)\n\n return vz\n\n\n\ndef normalise(v, dimN=2):\n \"\"\"Normalise vectors, corresponding to slices along specified number\n of initial spatial dimensions of an array, to have unit\n :math:`\\ell_2` norm. The remaining axes enumerate the distinct\n vectors to be normalised.\n\n Parameters\n ----------\n v : array_like\n Array with components to be normalised\n dimN : int, optional (default 2)\n Number of initial dimensions over which norm should be computed\n\n Returns\n -------\n vnrm : ndarray\n Normalised array\n \"\"\"\n\n axisN = tuple(range(0,dimN))\n vn = np.sqrt(np.sum(v**2, axisN, keepdims=True))\n vn[vn == 0] = 1.0\n return np.asarray(v / vn, dtype=v.dtype)\n\n\n\ndef zpad(v, Nv):\n \"\"\"Zero-pad initial axes of array to specified size. Padding is\n applied to the right, top, etc. of the array indices.\n\n Parameters\n ----------\n v : array_like\n Array to be padded\n Nv : tuple\n Sizes to which each of initial indices should be padded\n\n Returns\n -------\n vp : ndarray\n Padded array\n \"\"\"\n\n vp = np.zeros(Nv + v.shape[len(Nv):], dtype=v.dtype)\n axnslc = tuple([slice(0, x) for x in v.shape])\n vp[axnslc] = v\n return vp\n\n\n\ndef bcrop(v, dsz, dimN=2):\n \"\"\"Crop specified number of initial spatial dimensions of dictionary\n array to specified size. Parameter `dsz` must be a tuple having one\n of the following forms (the examples assume two spatial/temporal\n dimensions). If all filters are of the same size, then\n\n ::\n\n (flt_rows, filt_cols, num_filt)\n\n may be used when the dictionary has a single channel, and\n\n ::\n\n (flt_rows, filt_cols, num_chan, num_filt)\n\n should be used for a multi-channel dictionary. If the filters are\n not all of the same size, then\n\n ::\n\n (\n (flt_rows1, filt_cols1, num_filt1),\n (flt_rows2, filt_cols2, num_filt2),\n ...\n )\n\n may be used for a single-channel dictionary. A multi-channel\n dictionary may be specified in the form\n\n ::\n\n (\n (flt_rows1, filt_cols1, num_chan, num_filt1),\n (flt_rows2, filt_cols2, num_chan, num_filt2),\n ...\n )\n\n or\n\n ::\n\n (\n (\n (flt_rows11, filt_cols11, num_chan11, num_filt1),\n (flt_rows21, filt_cols21, num_chan21, num_filt1),\n ...\n )\n (\n (flt_rows12, filt_cols12, num_chan12, num_filt2),\n (flt_rows22, filt_cols22, num_chan22, num_filt2),\n ...\n )\n ...\n )\n\n depending on whether the filters for each channel are of the same\n size or not. The total number of dictionary filters, is either\n num_filt in the first two forms, or the sum of num_filt1,\n num_filt2, etc. in the other form. If the filters are not\n two-dimensional, then the dimensions above vary accordingly, i.e.,\n there may be fewer or more filter spatial dimensions than\n flt_rows, filt_cols, e.g.\n\n ::\n\n (flt_rows, num_filt)\n\n for one-dimensional signals, or\n\n ::\n\n (flt_rows, filt_cols, filt_planes, num_filt)\n\n for three-dimensional signals.\n\n Parameters\n ----------\n v : array_like\n Dictionary array to be cropped\n dsz : tuple\n Filter support size(s)\n dimN : int, optional (default 2)\n Number of spatial dimensions\n\n Returns\n -------\n vc : ndarray\n Cropped dictionary array\n \"\"\"\n\n if isinstance(dsz[0], tuple):\n # Multi-scale dictionary specification\n maxsz = np.zeros((dimN,), dtype=int) # Max. support size\n # Iterate over dsz to determine max. support size\n for mb in range(0, len(dsz)):\n if isinstance(dsz[mb][0], tuple):\n for cb in range(0, len(dsz[mb])):\n maxsz = np.maximum(maxsz, dsz[mb][cb][0:dimN])\n else:\n maxsz = np.maximum(maxsz, dsz[mb][0:dimN])\n # Init. cropped array\n vc = np.zeros(tuple(maxsz) + v.shape[dimN:], dtype=v.dtype)\n m0 = 0 # Initial index of current block of equi-sized filters\n # Iterate over distinct filter sizes\n for mb in range(0, len(dsz)):\n # Determine end index of current block of filters\n if isinstance(dsz[mb][0], tuple):\n m1 = m0 + dsz[mb][0][-1]\n c0 = 0 # Init. idx. of current channel-block of equi-sized flt.\n for cb in range(0, len(dsz[mb])):\n c1 = c0 + dsz[mb][cb][-2]\n # Construct slice corresponding to cropped part of\n # current block of filters in output array and set from\n # input array\n cbslc = tuple([slice(0, x) for x in dsz[mb][cb][0:dimN]]) \\\n + (slice(c0, c1),) + (Ellipsis,) + (slice(m0, m1),)\n vc[cbslc] = v[cbslc]\n c0 = c1 # Update initial index for start of next block\n else:\n m1 = m0 + dsz[mb][-1]\n # Construct slice corresponding to cropped part of\n # current block of filters in output array and set from\n # input array\n mbslc = tuple([slice(0, x) for x in dsz[mb][0:-1]]) + \\\n (Ellipsis,) + (slice(m0, m1),)\n vc[mbslc] = v[mbslc]\n m0 = m1 # Update initial index for start of next block\n return vc\n else:\n # Single scale dictionary specification\n axnslc = tuple([slice(0, x) for x in dsz[0:dimN]])\n return v[axnslc]\n","sub_path":"sporco/admm/ccmod.py","file_name":"ccmod.py","file_ext":"py","file_size_in_byte":34244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"651066523","text":"\n\nfrom xai.brain.wordbase.nouns._playhouse import _PLAYHOUSE\n\n#calss header\nclass _PLAYHOUSES(_PLAYHOUSE, ):\n\tdef __init__(self,): \n\t\t_PLAYHOUSE.__init__(self)\n\t\tself.name = \"PLAYHOUSES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"playhouse\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_playhouses.py","file_name":"_playhouses.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"395820510","text":"#-*-coding:utf-8-*- \n# Given an array of integers, find out whether there are two distinct indices\n# i and j in the array such that the absolute difference between nums[i] and\n# nums[j] is at most t and the absolute difference between i and j is at\n# most k.\n#\n# Example 1:\n#\n# Input: nums = [1,2,3,1], k = 3, t = 0\n# Output: true\n# Example 2:\n#\n# Input: nums = [1,0,1,1], k = 1, t = 2\n# Output: true\n# Example 3:\n#\n# Input: nums = [1,5,9,1,5,9], k = 2, t = 3\n# Output: false\n\nclass Solution(object):\n def containsNearbyAlmostDuplicate(self, nums, k, t):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :type t: int\n :rtype: bool\n \"\"\"\n if k <= 0 or t < 0:\n return False\n key_to_val = {}\n for i, num in enumerate(nums):\n key = num // (t + 1)\n print(key, key_to_val)\n if key in key_to_val \\\n or key + 1 in key_to_val and key_to_val[key + 1] - num <= t \\\n or key - 1 in key_to_val and num - key_to_val[key - 1] <= t:\n return True\n if i >= k:\n del key_to_val[nums[i - k] // (t + 1)]\n key_to_val[key] = num\n return False\n\ns = Solution()\nprint(s.containsNearbyAlmostDuplicate([1,2,3,4], 3, 0))\nprint(s.containsNearbyAlmostDuplicate([1,0,2,1], 1, 2))\nprint(s.containsNearbyAlmostDuplicate([1,5,9,1, 5,9], 2, 3))\n","sub_path":"src/leetcode/LC_220_contains_duplicate3.py","file_name":"LC_220_contains_duplicate3.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"426048567","text":"import pygame\n\nfrom .color import COLORS\nfrom .sprites.core import Base\n\nclass Player(Base):\n\n MOVE_SPEED = 5\n\n def __init__(self):\n super(Player, self).__init__()\n\n self.image = pygame.Surface((25, 50))\n self.image.fill(COLORS.white)\n self.rect = self.image.get_rect()\n","sub_path":"01-dodger/dodger1/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"69600386","text":"#!/usr/bin/env python\nimport os\n\ndef GenSecureRandomNumber ():\n randstr = os.urandom (4)\n num = ord(randstr[0])\n num = (num << 8) | ord(randstr[1])\n num = (num << 8) | ord(randstr[2])\n num = (num << 8) | ord(randstr[3])\n\n return num\n\ndef GetListOfUniqueRandomNums (pCount, pMinVal, pMaxVal):\n randlist = []\n while len(randlist) < pCount:\n rnum = GenSecureRandomNumber ()\n rnum = (rnum % (pMaxVal - pMinVal)) + pMinVal\n if not rnum in randlist:\n randlist.append (rnum)\n\n return randlist\n\ndef SplitOrigTrainFile (pSrcFile, pDestTrain, pDestTest, pSepList):\n sfile = open (pSrcFile, 'r')\n trfile = open (pDestTrain, 'w')\n tsfile = open (pDestTest, 'w')\n\n ndx = 1\n\n for lines in sfile:\n if ndx in pSepList:\n tsfile.writelines(lines)\n else:\n trfile.writelines (lines)\n ndx = ndx + 1\n\ndef CreateFileNames (pPath, pNdx):\n fnamearr = []\n\n buf = \"%s/train_data_%d.txt\" % (pPath, pNdx)\n fnamearr.append(buf)\n buf = \"%s/train_label_%d.txt\" % (pPath, pNdx)\n fnamearr.append(buf)\n buf = \"%s/test_data_%d.txt\" % (pPath, pNdx)\n fnamearr.append(buf)\n buf = \"%s/test_label_%d.txt\" % (pPath, pNdx)\n fnamearr.append(buf)\n\n return fnamearr\n\ndef SeparateTrain (pTrainData, pTrainLabel, pFileNames):\n testselect = GetListOfUniqueRandomNums(50, 1, 488) + GetListOfUniqueRandomNums (45, 489, 920)\n SplitOrigTrainFile (pTrainData, pFileNames[0], pFileNames[2], testselect)\n SplitOrigTrainFile (pTrainLabel, pFileNames[1], pFileNames[3], testselect)\n\ndef GenerateMultipleTrainTestSet (pTrainData, pTrainLabel, pDestPath, pCount):\n\n for ndx in range(1, pCount + 1):\n fnames = CreateFileNames (pDestPath, ndx)\n SeparateTrain (pTrainData, pTrainLabel, fnames)\n\ndef SeparateLabels ():\n sfile = open ('train_data.txt', 'r')\n trfile = open ('train_data_label_1.txt', 'w')\n tsfile = open ('train_data_label_0.txt', 'w')\n\n ndx = 1\n\n for lines in sfile:\n if ndx > 488:\n tsfile.writelines(lines)\n else:\n trfile.writelines (lines)\n ndx = ndx + 1\n\n#GenerateMultipleTrainTestSet('../input_data/train_data.txt', '../input_data/train_labels.txt', '../bin', 10)\nSeparateLabels()\n","sub_path":"python_scripts/split_train.py","file_name":"split_train.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"10418095","text":"from django.conf.urls import patterns, url\nfrom views import RegistrarUnoPorDiez, UnoPorDiezView, Reporte1x10, Lista1x10Report, Listar1X10,Lista1x10ubchReport, RegistrarUnoPorJefeInst\nurlpatterns = patterns('',\n url(r'^registro/$', RegistrarUnoPorDiez.as_view(),\n name=\"registro_uno\"),\n url(r'^lista/cedula_patrullero=(?P\\d+)$',\n UnoPorDiezView.as_view()\n ),\n url(r'^unoordiez/$', Reporte1x10.as_view(),\n name='reporte_unodiez'),\n url(r'^reporte/(?P\\d+)$', Lista1x10Report.as_view()),\n url(r'^listar/(?P\\d+)$', Listar1X10.as_view()),\n url(r'^reporte/(?P\\w+)$', Lista1x10ubchReport.as_view()), # Url para la consulta de listado de 1x10 por ubch\n url(r'^registro_unojefes/$', RegistrarUnoPorJefeInst.as_view(),\n name=\"registro_unojefes\"),\n )\n \n","sub_path":"apps/registro_ubch/urls1.py","file_name":"urls1.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"491799544","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\n\nGPIO.setup(16, GPIO.OUT)\nGPIO.setup(18, GPIO.OUT)\nGPIO.setup(22, GPIO.OUT)\n\niUltimoOutputComporta1 = 0\niUltimoOutputComporta2 = 0\niUltimoOutputComporta3 = 0\n\nwhile(True):\n\t\n\tbChangedState = False\n\t\n\ttry:\n\t\tarq = open('/tmp/barragem_input_control.txt', 'r')\n\texcept FileNotFoundError:\n\t\tarq = open('/tmp/barragem_input_control.txt', 'w')\n\t\tarq.write('0|0|0')\n\t\tarq = open('/tmp/barragem_input_control.txt', 'r')\n\t\tbChangedState = True\n\n\tcomando = arq.read().split(\"|\")\n\t\n\tif len(comando) < 3:\n\t\tcontinue\n\t\n\tiOutputComporta1 = int(comando[0])\n\tiOutputComporta2 = int(comando[1])\n\tiOutputComporta3 = int(comando[2])\n\t\n\tif iUltimoOutputComporta1 != iOutputComporta1:\n\t\tiUltimoOutputComporta1 = iOutputComporta1\n\t\tbChangedState = True\n\t\t\n\tif iUltimoOutputComporta2 != iOutputComporta2:\n\t\tiUltimoOutputComporta2 = iOutputComporta2\n\t\tbChangedState = True\n\t\t\n\tif iUltimoOutputComporta3 != iOutputComporta3:\n\t\tiUltimoOutputComporta3 = iOutputComporta3\n\t\tbChangedState = True\n\t\n\tbDeuErro = False\n\tfor i in range(0,3):\n\t\tif comando[i] != \"0\" and comando[i] != \"1\":\n\t\t\tbDeuErro = True\n\t\t\tbreak\n\t\t\t\n\tif bDeuErro:\n\t\tcontinue\n\t\n\tif bChangedState == True:\n\t\tprint('')\n\t\tprint('Comporta 1: ', comando[0])\n\t\tprint('Comporta 2: ', comando[1])\n\t\tprint('Comporta 3: ', comando[2])\n\n\t#controle das saidas do rasp\n\tGPIO.output(16, int(comando[0]))\n\tGPIO.output(18, int(comando[1]))\n\tGPIO.output(22, int(comando[2]))\n\t\t\n\ttime.sleep(0.1)\n\tarq.close()\n\n\t\n","sub_path":"barragem_program.py","file_name":"barragem_program.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"519791711","text":"from django.test import TestCase, client\n\n# Create your tests here.\nfrom rest_framework.test import APITestCase, APIClient\nfrom rest_framework import status\nfrom .factory import OrderDetailFactory, OrderFactory, ProductFactory, UserFactory\nfrom ecommerce.api.models import Product, Order, OrderDetail\nimport json\n\ndef login(client, admin):\n token_request = client.post(\"/api/token/\", {'username': admin.username, 'password': 'admin'})\n token = token_request.data.get(\"access\")\n client.credentials(HTTP_AUTHORIZATION='Bearer {}'.format(token))\n\nclass EcommerceApiTestCase(APITestCase):\n @classmethod\n def setUpTestData(cls):\n cls.client = APIClient()\n cls.admin = UserFactory.create()\n product_1 = ProductFactory.create(price=3.00, stock=30)\n product_2 = ProductFactory.create(price=5.00, stock=5)\n product_3 = ProductFactory.create(price=1.00, stock=10)\n order = OrderFactory.create()\n OrderDetailFactory.create(order=order, product=product_1, quantity=15)\n\n def test_non_authenticated_request(self):\n request = self.client.get(\"/api/products/\")\n self.assertEqual(request.status_code, status.HTTP_401_UNAUTHORIZED)\n \n def test_authenticated_request(self):\n login(self.client, self.admin)\n request = self.client.get(\"/api/products/\")\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n\n def test_get_all_products(self):\n login(self.client, self.admin)\n request = self.client.get(\"/api/products/\")\n self.assertEqual(request.data.get(\"count\"), 3)\n\n def test_get_specific_product(self):\n login(self.client, self.admin)\n request = self.client.get(\"/api/products/1/\")\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n\n def test_delete_product(self):\n login(self.client, self.admin)\n new_name = 'edited product'\n request = self.client.delete(\"/api/products/1/\")\n self.assertEqual(request.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Product.objects.count(), 2)\n\n def test_create_new_product(self):\n login(self.client, self.admin)\n request = self.client.post(\"/api/products/\", {'name': 'new product', 'stock': 17, 'price': 7.50})\n self.assertEqual(request.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Product.objects.count(), 4)\n \n def test_edit_new_product(self):\n login(self.client, self.admin)\n new_name = 'edited product'\n request = self.client.put(\"/api/products/1/\", {'name': new_name, 'stock': 1, 'price': 15.50})\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n self.assertEqual(Product.objects.get(pk=1).name, new_name)\n\n def test_edit_product_stock(self):\n login(self.client, self.admin)\n new_stock = 100\n request = self.client.post(\"/api/products/1/update_stock/\", {'stock': new_stock})\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n self.assertEqual(Product.objects.get(pk=1).stock, new_stock)\n\n def test_edit_product_negative_stock(self):\n login(self.client, self.admin)\n new_stock = -100\n request = self.client.post(\"/api/products/1/update_stock/\", {'stock': new_stock})\n self.assertEqual(request.status_code, status.HTTP_400_BAD_REQUEST)\n \n def test_get_all_orders(self):\n login(self.client, self.admin)\n request = self.client.get(\"/api/orders/\")\n self.assertEqual(request.data.get(\"count\"), 1)\n \n def test_get_specific_order(self):\n login(self.client, self.admin)\n request = self.client.get(\"/api/orders/1/\")\n self.assertEqual(len(request.data.get(\"details\")), 1)\n self.assertEqual(request.data.get(\"total\"), 45.00)\n\n def test_create_new_order(self):\n login(self.client, self.admin)\n data= {\n \"details\":\n [\n {\n \"product\": 1,\n \"quantity\": 2\n },\n {\n \"product\": 3,\n \"quantity\": 1\n }\n ]\n }\n product_1_previous_stock = Product.objects.get(pk=1).stock\n product_3_previous_stock = Product.objects.get(pk=3).stock\n \n request = self.client.post('/api/orders/', data=data, format=\"json\")\n\n self.assertEqual(request.status_code, status.HTTP_201_CREATED)\n self.assertEqual(product_1_previous_stock, Product.objects.get(pk=1).stock + 2)\n self.assertEqual(product_3_previous_stock, Product.objects.get(pk=3).stock + 1)\n\n\n def test_create_order_duplicated_product(self):\n login(self.client, self.admin)\n data= {\n \"details\":\n [\n {\n \"product\": 3,\n \"quantity\": 1\n },\n {\n \"product\": 3,\n \"quantity\": 2\n }\n ]\n }\n request = self.client.post(\"/api/orders/\", data=data, format=\"json\")\n self.assertEqual(request.status_code, status.HTTP_400_BAD_REQUEST)\n message_error = json.loads(request.content).get(\"non_field_errors\")[0]\n self.assertIn(\"is duplicated\", message_error)\n\n\n def test_create_order_not_enough_stock_of_product(self):\n login(self.client, self.admin)\n data= {\n \"details\":\n [\n {\n \"product\": 3,\n \"quantity\": 1000,\n },\n ]\n }\n request = self.client.post(\"/api/orders/\", data=data, format=\"json\")\n self.assertEqual(request.status_code, status.HTTP_400_BAD_REQUEST)\n message_error = json.loads(request.content).get(\"non_field_errors\")[0]\n self.assertIn(\"There's not enough stock\", message_error)\n\n def test_delete_order_and_restore_stock(self):\n login(self.client, self.admin)\n order_detail = Order.objects.get(pk=1).details.first()\n product_to_restore = order_detail.product.id\n product_previous_stock = order_detail.product.stock\n stock_to_restore = order_detail.quantity\n request = self.client.delete(\"/api/orders/1/\")\n self.assertEqual(request.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Product.objects.get(id=product_to_restore).stock, product_previous_stock+stock_to_restore)\n\n def test_update_order_and_product_stock(self):\n login(self.client, self.admin)\n order_detail = Order.objects.get(pk=1).details.first()\n product_to_restore = order_detail.product.id\n product_1_previous_quantity = order_detail.quantity\n\n product_1_previous_stock = Product.objects.get(pk=1).stock\n product_2_previous_stock = Product.objects.get(pk=2).stock\n \n new_quantity = 10\n data= {\n \"details\":\n [\n {\n \"product\": 1,\n \"quantity\": new_quantity,\n },\n {\n \"product\": 2,\n \"quantity\": 2,\n },\n ]\n }\n stock_difference = new_quantity - product_1_previous_quantity\n request = self.client.put(\"/api/orders/1/\", data=data, format=\"json\")\n\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n self.assertEqual(Product.objects.get(id=product_to_restore).stock, product_1_previous_stock-stock_difference)\n\n self.assertEqual(Product.objects.get(id=2).stock, product_2_previous_stock-2)\n self.assertEqual(OrderDetail.objects.count(), 2)","sub_path":"ecommerce/api/tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"417529992","text":"from src.ModelMogelijkheden import ModelMogelijkheden\nfrom src.WeaponsDict import WeaponsDict\nfrom src.ModelMogelijkheden import ModelMogelijkheden\n\nclass ModelMogelijkhedenList(list):\n def __init__(self, unitMogelijkhedenFile = \"\", weaponsFile = \"\"):\n wd = WeaponsDict(weaponsFile)\n with open(unitMogelijkhedenFile, 'r') as file:\n print(file.readline())\n rows = file.read().splitlines()\n for row in rows:\n print(row)\n unitMogelijkheden = ModelMogelijkheden(row, wd)\n self.append(unitMogelijkheden)\n print(\"units regels gelezen: \",len(rows))\n print(\"units in dict: \",len(self))\n\n def createBuildList(self):\n modelBuildList = []\n for modelModelijkheden in self:\n modelBuildList.extend(modelModelijkheden.createBuildList())\n return modelBuildList\n","sub_path":"src/ModelMogelijkhedenList.py","file_name":"ModelMogelijkhedenList.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"361205345","text":"import torch\nimport torch.nn as nn\nimport torchvision.models as models\n\n\nclass EncoderCNN(nn.Module):\n def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet50(pretrained=True)\n for param in resnet.parameters():\n param.requires_grad_(False)\n \n modules = list(resnet.children())[:-1]\n self.resnet = nn.Sequential(*modules)\n self.embed = nn.Linear(resnet.fc.in_features, embed_size)\n\n def forward(self, images):\n features = self.resnet(images)\n features = features.view(features.size(0), -1)\n features = self.embed(features)\n return features\n \n\nclass DecoderRNN(nn.Module):\n def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):\n super(DecoderRNN, self).__init__()\n self.embed_size = embed_size\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.num_layers = num_layers\n resnet = models.resnet50(pretrained=True)\n \n self.lstm = nn.LSTM(input_size=embed_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True)\n self.fc = nn.Linear(hidden_size, vocab_size)\n self.embed = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_size)\n \n def forward(self, features, captions):\n captions = captions[:, :-1]\n embedded = self.embed(captions)\n embedded = torch.cat((features.unsqueeze(dim = 1), embedded), dim = 1)\n lstm_out, state = self.lstm(embedded)\n \n output = self.fc(lstm_out)\n \n return output\n\n def sample(self, inputs, states=None, max_len=20):\n \" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) \"\n predictions = []\n embeddings = inputs\n \n for l in range(max_len):\n hidden, states = self.lstm(inputs, states)\n output = self.fc(hidden.squeeze(1))\n _, prediction = torch.max(output, 1)\n predictions.append(prediction.item())\n inputs = self.embed(prediction).unsqueeze(1)\n \n \n return predictions\n \n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"276742977","text":"\"\"\"\n This examples shows how cleaned data can be read for further use\n\"\"\"\n\n\nimport numpy\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# BSGIP specific tools\nfrom c3x.data_loaders import configfileparser, nextgen_loaders, tariff_loaders\nfrom c3x.data_statistics import figure_of_merit\nfrom c3x.enomo.models import EnergyStorage, EnergySystem, Demand, Generation, LocalTariff\nfrom c3x.enomo.energy_optimiser import OptimiserObjectiveSet, LocalEnergyOptimiser\n\n# set up seaborn the way you like\nsns.set_style({'axes.linewidth': 1, 'axes.edgecolor': 'black', 'xtick.direction': \\\n 'out', 'xtick.major.size': 4.0, 'ytick.direction': 'out', 'ytick.major.size': 4.0, \\\n 'axes.facecolor': 'white', 'grid.color': '.8', 'grid.linestyle': u'-', 'grid.linewidth': 0.5})\n\nconfig = configfileparser.ConfigFileParser(\"config/example_for_FoMs.ini\")\n\nmeasurement_types = config.read_data_usage()\ndata_paths = config.read_data_path()\ndata_files = []\n\n# Create a nextGen data object that has working paths and can be sliced using batches\nnext_gen = nextgen_loaders.NextGenData('FoM', source=data_paths['source'],\n batteries=data_paths[\"batteries\"],\n solar=data_paths[\"solar\"],\n loads=data_paths[\"loads\"],\n node=data_paths[\"node\"])\n\ncleaned_data = next_gen.to_measurement_data()\n\n# Tariffs are in $ / kwh\ndata_location = '../tests/tariff_database/'\nlocal_tz = 'Australia/Sydney'\n\ntariff_dict = {}\nfor node in cleaned_data:\n load = cleaned_data[node][\"loads_\" + str(node)]\n tariff_info = {}\n tariff_info['import_tariff'] = tariff_loaders.link_tariffs(load, data_location, 'test_tou_tariff.json')\n tariff_info['export_tariff'] = tariff_loaders.link_tariffs(load, data_location, 'test_feed_in_tariff.json')\n tariff_info['le_export_tariff'] = tariff_loaders.link_tariffs(load, data_location, 'test_le_export_tariff.json')\n tariff_info['le_import_tariff'] = tariff_loaders.link_tariffs(load, data_location, 'test_le_import_tariff.json')\n tariff_info['lt_export_tariff'] = tariff_loaders.link_tariffs(load, data_location, 'test_lt_export_tariff.json')\n tariff_info['lt_import_tariff'] = tariff_loaders.link_tariffs(load, data_location, 'test_lt_import_tariff.json')\n tariff_info['re_export_tariff'] = tariff_loaders.link_tariffs(load, data_location, 'test_re_export_tariff.json')\n tariff_info['re_import_tariff'] = tariff_loaders.link_tariffs(load, data_location, 'test_re_import_tariff.json')\n tariff_info['rt_export_tariff'] = tariff_loaders.link_tariffs(load, data_location, 'test_rt_export_tariff.json')\n tariff_info['rt_import_tariff'] = tariff_loaders.link_tariffs(load, data_location, 'test_rt_import_tariff.json')\n tariff_dict[node] = tariff_info\n\ncustomer = next(iter(cleaned_data))\ndata_customer = cleaned_data[customer]\nc_load = data_customer[\"loads_\" + customer]\nsolar = data_customer[\"solar_\" + customer]\nnet_load = c_load[\"PLG\"] + solar[\"PLG\"]\n\ncolors = sns.color_palette()\nhrs = numpy.arange(0, len(c_load[\"PLG\"])) / 4\nfig = plt.figure(figsize=(14, 4))\nax1 = fig.add_subplot(1, 1, 1)\nl1, = ax1.plot(hrs, 4 * c_load[\"PLG\"], color=colors[0])\nl2, = ax1.plot(hrs, 4 * solar, color=colors[1])\nl3, = ax1.plot(hrs, 4 * net_load, color=colors[2])\nax1.set_xlabel('hour'), ax1.set_ylabel('kW')\nax1.legend([l1, l2, l3], ['Load', 'PV', 'Connection Point'], ncol=2)\nax1.set_xlim([0, len(c_load[\"PLG\"]) / 4])\nfig.tight_layout()\n\nfig.show()\n\n# ENOMO optimisation battery\nbattery = EnergyStorage(max_capacity=15.0,\n depth_of_discharge_limit=0,\n charging_power_limit=5.0,\n discharging_power_limit=-5.0,\n charging_efficiency=1,\n discharging_efficiency=1,\n throughput_cost=0.018)\n\nnet_load = figure_of_merit.network_net_power(cleaned_data, node_keys=[customer])\nimport_net_load = net_load[\"net_import\"]\nexport_net_load = net_load[\"net_export\"]\n\n# add the energy system e.G. Battery\n# demand will reject negative values\nenergy_system = EnergySystem()\nenergy_system.add_energy_storage(battery)\n\n# add a demand for the system e.G. imported energy\nload = Demand()\nload.add_demand_profile(import_net_load)\n\n# add a generation for the system e.G. exported energy\n# generation will reject positive values\npv = Generation()\npv.add_generation_profile(export_net_load)\n\n# add local tariff data for cost\nlocal_tariff = LocalTariff()\nlocal_tariff.add_local_energy_tariff_profile_export(dict(enumerate(tariff_info['le_export_tariff'])))\nlocal_tariff.add_local_energy_tariff_profile_import(dict(enumerate(tariff_info['le_import_tariff'])))\nlocal_tariff.add_local_transport_tariff_profile_export(dict(enumerate(tariff_info['lt_export_tariff'])))\nlocal_tariff.add_local_transport_tariff_profile_import(dict(enumerate(tariff_info['lt_import_tariff'])))\nlocal_tariff.add_remote_energy_tariff_profile_export(dict(enumerate(tariff_info['re_export_tariff'])))\nlocal_tariff.add_remote_energy_tariff_profile_import(dict(enumerate(tariff_info['re_import_tariff'])))\nlocal_tariff.add_remote_transport_tariff_profile_export(dict(enumerate(tariff_info['rt_export_tariff'])))\nlocal_tariff.add_remote_transport_tariff_profile_import(dict(enumerate(tariff_info['rt_import_tariff'])))\n\n# add the demand and generation profiles to the energy system\nenergy_system.add_demand(load)\nenergy_system.add_generation(pv)\nenergy_system.add_local_tariff(local_tariff)\n\n# Invoke the optimiser and optimise\nlocal_energy_models = True\noptimiser = LocalEnergyOptimiser(15, 324, energy_system, OptimiserObjectiveSet.LocalModelsThirdParty + OptimiserObjectiveSet.LocalPeakOptimisation)\n\n############################ Analyse the Optimisation ########################################\nstorage_energy_delta = optimiser.values('storage_charge_grid') +\\\n optimiser.values('storage_charge_generation') +\\\n optimiser.values('storage_discharge_demand') +\\\n optimiser.values('storage_discharge_grid')\n\ncolors = sns.color_palette()\nhrs = numpy.arange(0, len(c_load[\"PLG\"])) / 4\nfig = plt.figure(figsize=(14, 7))\nax1 = fig.add_subplot(2, 1, 1)\nl1, = ax1.plot(hrs, 4 * c_load[\"PLG\"], color=colors[0])\nl2, = ax1.plot(hrs, 4 * solar, color=colors[1])\nl4, = ax1.plot(hrs, 4 * storage_energy_delta, color=colors[3])\nax1.set_xlabel('hour'), ax1.set_ylabel('kW')\nax1.legend([l1, l2, l4], ['Load', 'PV', 'Storage'], ncol=3)\nax1.set_xlim([0, len(c_load[\"PLG\"]) / 4])\nax3 = fig.add_subplot(2, 1, 2)\nl1, = ax3.plot(hrs, storage_energy_delta * 4, color=colors[5])\nl2, = ax3.plot(hrs, optimiser.values('storage_state_of_charge'), color=colors[4])\nax3.set_xlabel('hour'), ax3.set_ylabel('action')\nax3.legend([l1, l2], ['battery action (kW)', 'SOC (kWh)'], ncol=2)\nax3.set_xlim([0, len(c_load[\"PLG\"]) / 4])\nfig.tight_layout()\nplt.show()\n\nnet_grid_flow = 4 * optimiser.values('storage_charge_grid') + 4 * optimiser.values('storage_discharge_grid') + 4 * optimiser.values('local_net_import') + 4 * optimiser.values('local_net_export')\n\nfig = plt.figure(figsize=(14, 7))\nax11 = fig.add_subplot(2, 1, 1)\nl1, = ax11.plot(hrs, 4 * net_load[\"net_load\"], color=colors[0])\nl2, = ax11.plot(hrs, 4 * optimiser.values('storage_charge_grid'), color=colors[1])\nl3, = ax11.plot(hrs, 4 * optimiser.values('storage_charge_generation'), color=colors[2])\nl4, = ax11.plot(hrs, 4 * optimiser.values('storage_discharge_demand'), color=colors[3])\nl5, = ax11.plot(hrs, 4 * optimiser.values('storage_discharge_grid'), color=colors[4])\nl6, = ax11.plot(hrs, 4 * optimiser.values('local_net_import'), color=colors[5])\nl7, = ax11.plot(hrs, 4 * optimiser.values('local_net_export'), color=colors[6])\nl8, = ax11.plot(hrs, 4 * optimiser.values('local_demand_transfer'), color=colors[8])\nax11.set_xlabel('hour'), ax1.set_ylabel('kW')\nax11.legend([l1, l2, l3, l4, l5, l6, l7, l8], ['Net', 'storage_charge_grid', 'storage_charge_generation', 'storage_discharge_load', 'storage_discharge_grid', 'Net Customer Import', 'Net Customer Export', 'Local Transfer'], ncol=3)\nax11.set_xlim([0, len(c_load[\"PLG\"]) / 4])\nax33 = fig.add_subplot(2, 1, 2)\nl33, = ax33.plot(hrs, net_grid_flow, color=colors[0])\nax33.set_xlabel('hour'), ax3.set_ylabel('kW')\nax33.legend([l33], ['Net Grid Flows'], ncol=2)\nax33.set_xlim([0, len(c_load[\"PLG\"]) / 4])\nplt.show()\n","sub_path":"scripts/demonstrate_ENOMO.py","file_name":"demonstrate_ENOMO.py","file_ext":"py","file_size_in_byte":8426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"96540188","text":"import gc\nimport test\nimport time\nfrom machine import SPI, Pin\n\ntime.sleep(1)\n\nspi = SPI(2, sck=Pin(18), mosi=Pin(23), miso=Pin(19), polarity=0, phase=0)\n\ni = 0\nlast_free = 0\nwhile True:\n test.test(spi.write)\n free = gc.mem_free()\n if free > last_free:\n print('gc cleaned up')\n time.sleep(1)\n print('free: {} allocated: {}'.format(free, gc.mem_alloc()))\n i += 1\n last_free = free","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"440338471","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\n# @CreationDate: 2020-12-26 16:55:28\n# @Author: xiaochuan\n# @Description: 两数之和(亚马逊、字节跳动、谷歌、Facebook、苹果、微软、腾讯在半年内面试中常考)\nfrom typing import List\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n # 暴力 O(n^2)\n # for i in range(len(nums)-1):\n # for j in range(i+1,len(nums)):\n # if nums[i] + nums[j] == target:\n # return [i,j]\n # return []\n\n # hashtable one/two times time:O(n) space:O(n)\n # a = target - b dict[nums[i]] = i judge target-b in dict? \n # 延申: 3Sums : -c = a + b 4Sums...NSums\n d = {}\n for i in range(len(nums)):\n if target - nums[i] in d:\n return [i,d[target-nums[i]]]\n d[nums[i]] = i\n return []\n\n\nif __name__ == \"__main__\":\n nums = [2, 7, 11, 15]\n target = 9 \n print(Solution().twoSum(nums,target))","sub_path":"Week_02/2.5.2 哈希映射集合 两数之和.py","file_name":"2.5.2 哈希映射集合 两数之和.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"511644481","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 30 14:56:39 2018\n\n@author: sebastianorbell\n\"\"\"\n\nimport numpy as np\nimport scipy.linalg as la\nfrom scipy.linalg import inv as inv\nfrom scipy.optimize import differential_evolution\nfrom scipy.optimize import minimize\nimport scipy.stats as sts\nfrom scipy import interpolate\nimport matplotlib.pyplot as plt\n\nclass rotational_relaxation:\n \n\n def __init__(self,aniso_dipolar,g1_iso,g2_iso,aniso_g1,aniso_g2,iso_h1,iso_h2,aniso_hyperfine_1,aniso_hyperfine_2,spin_numbers_1,spin_numbers_2,field,J,ks,kt,lamb,temp,kstd):\n # declare constants and identities\n\n self.r_perp = 16.60409997886e-10 \n self.r_parr = 4.9062966e-10\n \n self.prefact = 3.92904692e-03\n self.beta = 9.96973104e+01\n self.c = -4.92846450e-03\n \n #self.visc = 1.0e-3*(-0.00625*temp+2.425)\n self.visc = self.prefact*np.exp(self.beta/temp)+self.c\n \n self.convert = 1.76e8\n\n self.d_perp = self.convert*1.38064852e-23*temp/(8.0*np.pi*self.visc*(self.r_perp**3))\n self.d_parr = self.convert*1.38064852e-23*temp/(8.0*np.pi*self.visc*(self.r_parr**3))\n \n \n self.iden2 = np.eye(2)\n self.iden4 = np.eye(4)\n self.iden16 = np.eye(16)\n \n #declare matrices\n self.sx = np.array([[0,0.5],[0.5,0]])\n self.sy = np.array([[0,-0.5j],[0.5j,0]])\n self.sz = np.array([[0.5,0],[0,-0.5]])\n \n self.s1_x = la.kron(self.sx,self.iden2)\n self.s1_y = la.kron(self.sy,self.iden2)\n self.s1_z = la.kron(self.sz,self.iden2)\n \n self.s2_x = la.kron(self.iden2,self.sx)\n self.s2_y = la.kron(self.iden2,self.sy)\n self.s2_z = la.kron(self.iden2,self.sz)\n \n self.s1_s2 = la.kron(self.sx,self.sx) + la.kron(self.sy,self.sy) + la.kron(self.sz,self.sz)\n self.pro_trip = 0.75 * np.eye(4) + self.s1_s2\n self.pro_sing = 0.25 * np.eye(4) - self.s1_s2\n \n # Declare Liouvillian density operators\n self.lamb = lamb\n \n self.p0_lou = 0.5 * ((1.0-self.lamb)*np.reshape(self.pro_sing,(16,1))+(self.lamb/3.0)*np.reshape(self.pro_trip,(16,1))) \n self.pt_lou = np.reshape(self.pro_trip,(1,16)) \n self.ps_lou = np.reshape(self.pro_sing,(1,16))\n \n #Declare Hamiltonian and Louivillian\n self.ltot = np.zeros([16,16], dtype = complex)\n self.h0 = np.zeros([4,4], dtype = complex)\n \n # Declare redfield and components\n self.redfield = np.zeros([16,16], dtype = complex)\n self.B = np.zeros([16,16], dtype = complex)\n \n # Tensor terms\n self.elec = np.zeros([5,5,4,4],dtype = complex)\n self.uq = np.zeros([5,4,4],dtype = complex)\n \n # declare class variable\n \n self.aniso_g1 = aniso_g1\n self.aniso_g2 = aniso_g2\n self.g_mat = np.zeros([5],dtype = complex)\n self.g1_iso = g1_iso\n self.g2_iso = g2_iso\n \n self.hyperfine_1 = aniso_hyperfine_1\n self.h1_size = np.size(self.hyperfine_1[:,0,0])\n self.h1 = np.zeros([self.h1_size,5], dtype = complex)\n self.iso_h1 = iso_h1\n \n self.hyperfine_2 = aniso_hyperfine_2\n self.h2_size = np.size(self.hyperfine_2[:,0,0])\n self.h2 = np.zeros([self.h2_size,5], dtype = complex)\n self.iso_h2 = iso_h2\n \n self.dipolar = aniso_dipolar\n self.d_rank_2 = np.zeros([5],dtype = complex)\n \n self.ks = ks\n self.kt = kt\n \n self.kstd =kstd\n \n self.J_couple = J\n \n self.omega1 = [0.0,0.0,field]\n self.omega2 = [0.0,0.0,field]\n \n self.n1_nuclear_spins = self.h1_size\n self.n2_nuclear_spins = self.h2_size\n \n self.spin_numbers_1 = spin_numbers_1\n self.spin_numbers_2 = spin_numbers_2\n \n # Variables for sampling SW vectors\n self.angles1 = np.zeros([2,self.n1_nuclear_spins])\n self.angles2 = np.zeros([2,self.n2_nuclear_spins])\n \n self.nuc_vecs_1 = np.zeros([3,self.n1_nuclear_spins])\n self.nuc_vecs_2 = np.zeros([3,self.n2_nuclear_spins])\n \n self.omegatot_1 = np.zeros_like(self.omega1)\n self.omegatot_2 = np.zeros_like(self.omega2)\n \n self.vec_len_1 = np.sqrt(np.multiply(self.spin_numbers_1,(self.spin_numbers_1+1.0)))\n self.vec_len_2 = np.sqrt(np.multiply(self.spin_numbers_2,(self.spin_numbers_2+1.0)))\n \n \n # Haberkorn\n self.Haberkorn_Matrix()\n \n return\n \n # Construct Haberkorn\n def Haberkorn_Matrix(self):\n self.haberkorn = 0.5 * self.kt *self.pro_trip + 0.5 * self.ks * self.pro_sing\n return\n \n # Sample random angles on a sphere with constant radius\n def sample_angles(self):\n self.angles1 = np.random.rand(2,self.n1_nuclear_spins)\n self.angles2 = np.random.rand(2,self.n2_nuclear_spins)\n \n self.angles1[0,:] = np.arccos(2.0 * self.angles1[0,:]-1.0)\n self.angles2[0,:] = np.arccos(2.0 * self.angles2[0,:]-1.0)\n \n self.angles1[1,:] = (2.0 * np.pi) * self.angles1[1,:]\n self.angles2[1,:] = (2.0 * np.pi) * self.angles2[1,:]\n \n return\n \n # Sample a random vector on a sphere for each nuclear spin\n def Vectors(self):\n \n self.nuc_vecs_1[0,:] = np.multiply(self.vec_len_1, np.multiply( np.cos(self.angles1[1,:]), np.sin(self.angles1[0,:])))\n self.nuc_vecs_1[1,:] = np.multiply(self.vec_len_1, np.multiply( np.sin(self.angles1[1,:]), np.sin(self.angles1[0,:])))\n self.nuc_vecs_1[2,:] = np.multiply( self.vec_len_1, np.cos(self.angles1[0,:]))\n \n self.nuc_vecs_2[0,:] = np.multiply(self.vec_len_2, np.multiply( np.cos(self.angles2[1,:]), np.sin(self.angles2[0,:])))\n self.nuc_vecs_2[1,:] = np.multiply(self.vec_len_2, np.multiply( np.sin(self.angles2[1,:]), np.sin(self.angles2[0,:])))\n self.nuc_vecs_2[2,:] = np.multiply(self.vec_len_2, np.cos(self.angles2[0,:]))\n \n return\n \n # Construct the total, time independent, zeeman field.\n def Tot_zeeman_field(self):\n \n self.omegatot_1 = self.omega1 + np.sum(np.multiply(self.iso_h1, self.nuc_vecs_1),1)\n self.omegatot_2 = self.omega2 + np.sum(np.multiply(self.iso_h2, self.nuc_vecs_2),1)\n \n return\n \n # Construct the Hamiltonian matrix for each conformation\n def Hamiltonian_Matrix(self):\n \n self.hamiltonian = self.h0\n self.hamiltonian = la.kron(self.omegatot_1[0] * self.sx + self.omegatot_1[1] * self.sy + self.omegatot_1[2] * self.sz, self.iden2) + la.kron(self.iden2, self.omegatot_2[0] * self.sx + self.omegatot_2[1] * self.sy + self.omegatot_2[2] * self.sz)-2.0*(self.J_couple)*self.s1_s2\n \n return\n \n # Define the reference Liouvillian ltot and its inverse linv\n def liouville(self):\n\n self.ltot = la.kron((-1j*self.hamiltonian-self.haberkorn),self.iden4) + la.kron(self.iden4,np.transpose(+1j*self.hamiltonian-self.haberkorn)) - self.kstd*(la.kron(self.pro_trip,np.transpose(self.pro_sing))+la.kron(self.pro_sing,np.transpose(self.pro_trip)))\n\n return\n \n # Define rank 2 g-tensor component\n def rank_2_g_tensor(self):\n \n # g1\n self.g1 = self.g_mat\n #self.g1_plus_2 \n self.g1[4] = 0.5*(self.aniso_g1[0,0]-self.aniso_g1[1,1]-1.0j*(self.aniso_g1[0,1]+self.aniso_g1[1,0]))\n #self.g1_plus_1 \n self.g1[3] = -0.5*(self.aniso_g1[0,2]+self.aniso_g1[2,0]-1.0j*(self.aniso_g1[1,2]+self.aniso_g1[2,1]))\n #self.g1_zero \n self.g1[2] = (1.0/np.sqrt(6.0))*(2.0*self.aniso_g1[2,2]-(self.aniso_g1[0,0]+self.aniso_g1[1,1]))\n #self.g1_minus_1 \n self.g1[1] = 0.5*(self.aniso_g1[0,2]+self.aniso_g1[2,0]+1.0j*(self.aniso_g1[1,2]+self.aniso_g1[2,1]))\n #self.g1_minus_2 \n self.g1[0] = 0.5*(self.aniso_g1[0,0]-self.aniso_g1[1,1]+1.0j*(self.aniso_g1[0,1]+self.aniso_g1[1,0]))\n \n # g2\n self.g2 = self.g_mat\n #self.g2_plus_2 \n self.g2[4] = 0.5*(self.aniso_g2[0,0]-self.aniso_g2[1,1]-1.0j*(self.aniso_g2[0,1]+self.aniso_g2[1,0]))\n #self.g2_plus_1 \n self.g2[3] = -0.5*(self.aniso_g2[0,2]+self.aniso_g2[2,0]-1.0j*(self.aniso_g2[1,2]+self.aniso_g2[2,1]))\n #self.g2_zero \n self.g2[2] = (1.0/np.sqrt(6.0))*(2.0*self.aniso_g2[2,2]-(self.aniso_g2[0,0]+self.aniso_g2[1,1]))\n #self.g2_minus_1 \n self.g2[1] = 0.5*(self.aniso_g2[0,2]+self.aniso_g2[2,0]+1.0j*(self.aniso_g2[1,2]+self.aniso_g2[2,1]))\n #self.g2_minus_2 \n self.g2[0] = 0.5*(self.aniso_g2[0,0]-self.aniso_g2[1,1]+1.0j*(self.aniso_g2[0,1]+self.aniso_g2[1,0]))\n \n return\n \n # Define rank 2 hyperfine-tensor components\n def rank_2_hyperfine(self):\n \n # hyperfine tensors electron 1 \n #self.h1_plus_2 \n self.h1[:,4] = 0.5*(self.hyperfine_1[:,0,0]-self.hyperfine_1[:,1,1]-1.0j*(self.hyperfine_1[:,0,1]+self.hyperfine_1[:,1,0]))\n #self.h1_plus_1 \n self.h1[:,3] = -0.5*(self.hyperfine_1[:,0,2]+self.hyperfine_1[:,2,0]-1.0j*(self.hyperfine_1[:,1,2]+self.hyperfine_1[:,2,1]))\n #self.h1_zero \n self.h1[:,2] = (1.0/np.sqrt(6.0))*(2.0*self.hyperfine_1[:,2,2]-(self.hyperfine_1[:,0,0]+self.hyperfine_1[:,1,1]))\n #self.h1_minus_1 \n self.h1[:,1] = 0.5*(self.hyperfine_1[:,0,2]+self.hyperfine_1[:,2,0]+1.0j*(self.hyperfine_1[:,1,2]+self.hyperfine_1[:,2,1]))\n #self.h1_minus_2 \n self.h1[:,0] = 0.5*(self.hyperfine_1[:,0,0]-self.hyperfine_1[:,1,1]+1.0j*(self.hyperfine_1[:,0,1]+self.hyperfine_1[:,1,0]))\n \n # hyperfine tensors electron 2\n #self.h2_plus_2 \n self.h2[:,4] = 0.5*(self.hyperfine_2[:,0,0]-self.hyperfine_2[:,1,1]-1.0j*(self.hyperfine_2[:,0,1]+self.hyperfine_2[:,1,0]))\n #self.h2_plus_1 \n self.h2[:,3] = -0.5*(self.hyperfine_2[:,0,2]+self.hyperfine_2[:,2,0]-1.0j*(self.hyperfine_2[:,1,2]+self.hyperfine_2[:,2,1]))\n #self.h2_zero \n self.h2[:,2] = (1/np.sqrt(6.0))*(2*self.hyperfine_2[:,2,2]-(self.hyperfine_2[:,0,0]+self.hyperfine_2[:,1,1]))\n #self.h2_minus_1 \n self.h2[:,1] = 0.5*(self.hyperfine_2[:,0,2]+self.hyperfine_2[:,2,0]+1.0j*(self.hyperfine_2[:,1,2]+self.hyperfine_2[:,2,1]))\n #self.h2_minus_2 \n self.h2[:,0] = 0.5*(self.hyperfine_2[:,0,0]-self.hyperfine_2[:,1,1]+1.0j*(self.hyperfine_2[:,0,1]+self.hyperfine_2[:,1,0]))\n \n \n return\n \n # Define rank 2 dipolar tensor component\n def rank_2_dipolar(self):\n\n #self.d_rank_2_plus_2 \n self.d_rank_2[4] = 0.5*(self.dipolar[0,0]-self.dipolar[1,1]-1.0j*(self.dipolar[0,1]+self.dipolar[1,0]))\n #self.d_rank_2_plus_1 \n self.d_rank_2[3] = -0.5*(self.dipolar[0,2]+self.dipolar[2,0]-1.0j*(self.dipolar[1,2]+self.dipolar[2,1]))\n #self.d_rank_2_zero \n self.d_rank_2[2] = (1.0/np.sqrt(6.0))*(2.0*self.dipolar[2,2]-(self.dipolar[0,0]+self.dipolar[1,1]))\n #self.d_rank_2_minus_1 \n self.d_rank_2[1] = 0.5*(self.dipolar[0,2]+self.dipolar[2,0]+1.0j*(self.dipolar[1,2]+self.dipolar[2,1]))\n #self.d_rank_2_minus_2 \n self.d_rank_2[0] = 0.5*(self.dipolar[0,0]-self.dipolar[1,1]+1.0j*(self.dipolar[0,1]+self.dipolar[1,0]))\n \n return\n \n # Define rank 2 tensor product components\n def rank_two_component(self):\n \n # Electron 1\n \n self.u1x = self.g1 * self.omega1[0] + np.sum(self.h1[:,:] * self.nuc_vecs_1[0,:,None],0)\n self.u1y = self.g1 * self.omega1[1] + np.sum(self.h1[:,:] * self.nuc_vecs_1[1,:,None],0)\n self.u1z = self.g1 * self.omega1[2] + np.sum(self.h1[:,:] * self.nuc_vecs_1[2,:,None],0)\n \n # for i in range(0,self.h1_size):\n # self.u1x += self.h1[i,:] * self.nuc_vecs_1[0,i]\n # self.u1y += self.h1[i,:] * self.nuc_vecs_1[1,i]\n # self.u1z += self.h1[i,:] * self.nuc_vecs_1[2,i]\n \n self.elec_1 = self.elec\n \n for i in range(0,5):\n #self.elec1_t_plus_2 \n self.elec_1[i,4,:,:] = 0.5 * (self.u1x[i] + 1.0j*self.u1y[i])*(self.s1_x + 1.0j*self.s1_y)\n #self.elec1_t_plus_1 \n self.elec_1[i,3,:,:] = 0.5*((self.u1x[i] + 1.0j*self.u1y[i])*self.s1_z + (self.s1_x + 1.0j*self.s1_y)*self.u1z[i])\n #self.elec1_t_zero \n self.elec_1[i,2,:,:]= (-1.0/2*np.sqrt(6))*((self.u1x[i] + 1.0j*self.u1y[i])*(self.s1_x - 1.0j*self.s1_y) + (self.u1x[i] - 1.0j*self.u1y[i])*(self.s1_x + 1.0j*self.s1_y) + 4.0*self.s1_z*self.u1z[i])\n #self.elec1_t_minus_1 \n self.elec_1[i,1,:,:] = -0.5*((self.u1x[i] - 1.0j*self.u1y[i])*self.s1_z + (self.s1_x - 1.0j*self.s1_y)*self.u1z[i])\n #self.elec1_t_minus_2 \n self.elec_1[i,0,:,:]= 0.5 * (self.u1x[i] - 1.0j*self.u1y[i])*(self.s1_x - 1.0j*self.s1_y)\n \n # Electron 2\n \n self.u2x = self.g2 * self.omega2[0] + np.sum(self.h2[:,:] * self.nuc_vecs_2[0,:,None],0)\n self.u2y = self.g2 * self.omega2[1] + np.sum(self.h2[:,:] * self.nuc_vecs_2[1,:,None],0)\n self.u2z = self.g2 * self.omega2[2] + np.sum(self.h2[:,:] * self.nuc_vecs_2[2,:,None],0)\n \n # for i in range(0,self.h2_size):\n # self.u2x += self.h2[i,:] * self.nuc_vecs_2[0,i]\n # self.u2y += self.h2[i,:] * self.nuc_vecs_2[1,i]\n # self.u2z += self.h2[i,:] * self.nuc_vecs_2[2,i]\n \n self.elec_2 = self.elec\n \n for i in range(0,5):\n #self.elec2_t_plus_2 \n self.elec_2[i,4,:,:] = 0.5 * (self.u2x[i] + 1.0j*self.u2y[i])*(self.s2_x + 1.0j*self.s2_y)\n #self.elec2_t_plus_1 \n self.elec_2[i,3,:,:] = 0.5*((self.u2x[i] + 1.0j*self.u2y[i])*self.s2_z + (self.s2_x + 1.0j*self.s2_y)*self.u2z[i])\n #self.elec2_t_zero \n self.elec_2[i,2,:,:]= (-1.0/2*np.sqrt(6))*((self.u2x[i] + 1.0j*self.u2y[i])*(self.s2_x - 1.0j*self.s2_y) + (self.u2x[i] - 1.0j*self.u2y[i])*(self.s2_x + 1.0j*self.s2_y) + 4.0*self.s2_z*self.u2z[i])\n #self.elec2_t_minus_1 \n self.elec_2[i,1,:,:] = -0.5*((self.u2x[i] - 1.0j*self.u2y[i])*self.s2_z + (self.s2_x - 1.0j*self.s2_y)*self.u2z[i])\n #self.elec2_t_minus_2 \n self.elec_2[i,0,:,:]= 0.5 * (self.u2x[i] - 1.0j*self.u2y[i])*(self.s2_x - 1.0j*self.s2_y)\n \n \n # Dipolar coupling\n \n self.dipolar_tensor_product = self.elec\n self.ux = self.uq\n self.uy = self.uq\n self.uz = self.uq\n \n for i in range(0,5):\n self.ux[i,:,:] = self.d_rank_2[i] * self.s1_x\n self.uy[i,:,:] = self.d_rank_2[i] * self.s1_y\n self.uz[i,:,:] = self.d_rank_2[i] * self.s1_z\n \n for i in range(0,5):\n #self.dipolar_tensor_product_t_plus_2 \n self.dipolar_tensor_product[i,4,:,:] = 0.5 * (self.ux[i,:,:] + 1.0j*self.uy[i,:,:])*(self.s2_x + 1.0j*self.s2_y)\n #self.dipolar_tensor_product_t_plus_1 \n self.dipolar_tensor_product[i,3,:,:] = 0.5*((self.ux[i,:,:] + 1.0j*self.uy[i,:,:])*self.s2_z + (self.s2_x + 1.0j*self.s2_y)*self.uz[i,:,:])\n #self.dipolar_tensor_product_t_zero \n self.dipolar_tensor_product[i,2,:,:]= (-1.0/2*np.sqrt(6))*((self.ux[i,:,:] + 1.0j*self.uy[i,:,:])*(self.s2_x - 1.0j*self.s2_y) + (self.ux[i,:,:] - 1.0j*self.uy[i,:,:])*(self.s2_x + 1.0j*self.s2_y) + 4.0*self.s2_z*self.uz[i,:,:])\n #self.dipolar_tensor_product_t_minus_1 \n self.dipolar_tensor_product[i,1,:,:] = -0.5*((self.ux[i,:,:] - 1.0j*self.uy[i,:,:])*self.s2_z + (self.s2_x - 1.0j*self.s2_y)*self.uz[i,:,:])\n #self.dipolar_tensor_product_t_minus_2 \n self.dipolar_tensor_product[i,0,:,:]= 0.5 * (self.ux[i,:,:] - 1.0j*self.uy[i,:,:])*(self.s2_x - 1.0j*self.s2_y)\n \n return \n \n # Construct the redfield superoperator matrix\n def Redfield_Matrix(self):\n \n # Calculate eigenvalues and eigenvectors of reference Liouvillian\n self.lam, self.p = la.eig(self.ltot)\n self.pinv = inv(self.p)\n\n self.red = self.redfield\n\n # i = n and j = m\n for i in range(0,5):\n for j in range(0,5):\n # Define qmn = mu_b*(g1_m*t1_n + g2_m*t2_n)\n self.Bmn = self.B\n self.qmn = self.elec_1[i,j,:,:]+self.elec_2[i,j,:,:]+self.dipolar_tensor_product[i,j,:,:]\n self.Bmn = la.kron(self.qmn,self.iden4) - la.kron(self.iden4,np.transpose(self.qmn))\n \n self.tauc_n = 1.0/(6.0*self.d_perp +(self.d_parr-self.d_perp)*(np.float(i)-2.0)*(np.float(i)-2.0))\n self.jw_n = (1.0)/((1.0/self.tauc_n) - 1.0j*(np.tile((self.lam),(16,1))-np.tile(np.reshape(self.lam,(16,1)),(1,16))))\n \n self.elem_wise = np.multiply(self.jw_n, np.matmul(self.pinv,np.matmul(self.Bmn,self.p)))\n self.red += - (np.matmul(np.transpose(np.conj(self.Bmn)),np.matmul(self.p,np.matmul(self.elem_wise,self.pinv))))\n \n return\n \n def lifetime(self):\n lifetime = 0.0\n self.sample_angles()\n self.Vectors()\n self.Tot_zeeman_field()\n self.Hamiltonian_Matrix()\n self.liouville()\n self.rank_2_g_tensor()\n self.rank_2_dipolar()\n self.rank_2_hyperfine()\n self.rank_two_component()\n self.Redfield_Matrix()\n lifetime = np.matmul((self.pt_lou+self.ps_lou),np.matmul(inv(self.ltot+self.red),self.p0_lou))\n return np.real(-lifetime)\n \n def triplet_yield(self):\n trip_yield = 0.0\n self.sample_angles()\n self.Vectors()\n self.Tot_zeeman_field()\n self.Hamiltonian_Matrix()\n self.liouville()\n self.rank_2_g_tensor()\n self.rank_2_dipolar()\n self.rank_2_hyperfine()\n self.rank_two_component()\n self.Redfield_Matrix()\n \n trip_yield = np.matmul(self.pt_lou,np.matmul(inv(self.ltot+self.red),self.p0_lou))\n return np.real(-self.kt*trip_yield)\n \n#-----------------------------------------------------------------\n# -----------------------------------------------------------------\n# -----------------------------------------------------------------\n# Main, units of mT\n \ndef transform(N,T):\n T_prime = np.matmul(np.transpose(N),np.matmul(T,N))\n return T_prime\n \ndef array_construct(axx,ayy,azz,axy,axz,ayz):\n A = np.array([[axx,axy,axz],[axy,ayy,ayz],[axz,ayz,azz]])\n return A\n\n\ndef inertia_tensor(data):\n \n c_of_m = np.zeros(3)\n total_m = 0.0\n \n for i in range(0,len(data[:,0])):\n total_m += data[i,0]\n c_of_m +=data[i,1:4]*data[i,0]\n \n c_of_m = c_of_m/total_m\n # Convert coordinates such that they are centred at the centre of mass\n com_dat = np.zeros_like(data)\n \n com_dat[:,0] = data[:,0]\n com_dat[:,1:4] = data[:,1:4]-c_of_m\n \n inertia = np.zeros([3,3])\n \n \n for i in range(0,len(com_dat[:,0])):\n inertia[0,0] += com_dat[i,0]*(com_dat[i,2]*com_dat[i,2]+com_dat[i,3]*com_dat[i,3])\n inertia[1,1] += com_dat[i,0]*(com_dat[i,1]*com_dat[i,1]+com_dat[i,3]*com_dat[i,3])\n inertia[2,2] += com_dat[i,0]*(com_dat[i,1]*com_dat[i,1]+com_dat[i,2]*com_dat[i,2])\n \n inertia[0,1] += -com_dat[i,0]*(com_dat[i,1]*com_dat[i,2])\n inertia[1,0] += -com_dat[i,0]*(com_dat[i,1]*com_dat[i,2])\n \n inertia[0,2] += -com_dat[i,0]*(com_dat[i,1]*com_dat[i,3])\n inertia[2,0] += -com_dat[i,0]*(com_dat[i,1]*com_dat[i,3])\n \n inertia[2,1] += -com_dat[i,0]*(com_dat[i,3]*com_dat[i,2])\n inertia[1,2] += -com_dat[i,0]*(com_dat[i,3]*com_dat[i,2])\n \n \n val, vec = la.eig(inertia)\n a = np.copy(vec[:,0])\n vec[:,0] = vec[:,2]\n vec[:,2] = a\n return vec\n\ndef rad_tensor_mol_axis(transform_mol,transform_dmj,tensor):\n return transform(transform_mol,(transform(inv(transform_dmj),tensor)))\n\ndef calc_yield(lamb,ks,kt,kstd,temp,temp_dat,lifetime_exp_zero,lifetime_exp_res,lifetime_exp_high,J):\n\n\n # Define variables, initial frame\n rad_fram_aniso_g1 = np.array([[0.0006,0.0,0.0],[0.0,0.0001,0.0],[0.0,0.0,-0.0009]])\n rad_fram_aniso_g2 = np.array([[0.0010,0.0,0.0],[0.0,0.0007,0.0],[0.0,0.0,-0.0020]])\n \n rad_fram_aniso_hyperfine_1 = np.zeros([19,3,3])\n rad_fram_aniso_hyperfine_1[0] = array_construct(0.018394,0.00575,-0.024144,0.119167,-0.090257,-0.105530)\n rad_fram_aniso_hyperfine_1[1] = array_construct(-0.030255,0.134767,-0.104512,0.111178,0.03952,0.065691)\n rad_fram_aniso_hyperfine_1[2] = array_construct(0.041327,-0.039294,0.002033,0.017961,0.78922,0.025615)\n rad_fram_aniso_hyperfine_1[3] = array_construct(0.065617,-0.016154,-0.049462,0.036655,0.014217,0.004047)\n rad_fram_aniso_hyperfine_1[4] = array_construct(0.069089,-0.054902,-0.014187,0.013749,-0.075976,-0.006477)\n rad_fram_aniso_hyperfine_1[5] = array_construct(0.098308,-0.041108,-0.0572,-0.024641,0.013959,0.002803)\n rad_fram_aniso_hyperfine_1[6] = array_construct(0.017844,0.006183,-0.024028,-00.119099,-0.090068,0.105661)\n rad_fram_aniso_hyperfine_1[7] = array_construct(-0.030775,0.135406,-0.104631,-0.110876,0.039322,-0.065607)\n rad_fram_aniso_hyperfine_1[8] = array_construct(0.041235,-0.039174,-0.002061,-0.018150,0.078901,-0.025838)\n rad_fram_aniso_hyperfine_1[9] = array_construct(0.065415,-0.015957,-0.049358,-0.036874,0.014222,-0.004080)\n rad_fram_aniso_hyperfine_1[10] = array_construct(0.069102,-0.054901,-0.014201,-0.014035,-0.075981,0.006618)\n rad_fram_aniso_hyperfine_1[11] = array_construct(0.098464,-0.041245,-0.0571219,0.024346,0.014054,-0.002814)\n rad_fram_aniso_hyperfine_1[12] = array_construct(0.036159,-0.00026,-0.035899,0.038259,-0.007026,-0.004047)\n rad_fram_aniso_hyperfine_1[13] = array_construct(0.036159,-0.00026,-0.035899,0.038259,-0.007026,-0.004047)\n rad_fram_aniso_hyperfine_1[14] = array_construct(0.036159,-0.00026,-0.035899,0.038259,-0.007026,-0.004047)\n rad_fram_aniso_hyperfine_1[15] = array_construct(0.035983,-0.000104,-0.035879,-0.038338,-0.007021,0.004066)\n rad_fram_aniso_hyperfine_1[16] = array_construct(0.035983,-0.000104,-0.035879,-0.038338,-0.007021,0.004066)\n rad_fram_aniso_hyperfine_1[17] = array_construct(0.035983,-0.000104,-0.035879,-0.038338,-0.007021,0.004066)\n rad_fram_aniso_hyperfine_1[18] = array_construct(-0.772676,-0.7811,1.553776,0.000000,-0.061480,0.000443)\n\n rad_fram_aniso_hyperfine_2 = np.zeros([6,3,3])\n rad_fram_aniso_hyperfine_2[0] = array_construct(0.011586,0.032114,-0.0437,-0.101834,-0.000008,0.000014)\n rad_fram_aniso_hyperfine_2[1] = array_construct(0.011586,0.032114,-0.0437,-0.101834,0.000014,0.000008)\n rad_fram_aniso_hyperfine_2[2] = array_construct(0.011586,0.032114,-0.0437,-0.101834,0.000014,0.000008)\n rad_fram_aniso_hyperfine_2[3] = array_construct(0.011586,0.032114,-0.0437,-0.101834,-0.000008,0.000014)\n rad_fram_aniso_hyperfine_2[4] = array_construct(0.0352,0.034,-0.0692,0.0,0.0,0.0)\n rad_fram_aniso_hyperfine_2[5] = array_construct(0.0352,0.034,-0.0692,0.0,0.0,0.0)\n\n # axis frames\n data_xyz = np.loadtxt('dmj-an-fn1-ndi-opt.txt',delimiter=',')\n transform_mol = inertia_tensor(data_xyz)\n \n dmj_xyz = np.loadtxt('dmj_in_fn1.txt',delimiter=',')\n transform_dmj = inertia_tensor(dmj_xyz)\n \n ndi_xyz = np.loadtxt('NDI_in_fn1.txt',delimiter=',')\n transform_ndi = inertia_tensor(ndi_xyz)\n \n # Convert to molecular frame\n aniso_g1 = rad_tensor_mol_axis(transform_mol,transform_dmj,rad_fram_aniso_g1)\n aniso_g2 = rad_tensor_mol_axis(transform_mol,transform_ndi,rad_fram_aniso_g2)\n\n aniso_hyperfine_1 = rad_tensor_mol_axis(transform_mol,transform_dmj,rad_fram_aniso_hyperfine_1)\n aniso_hyperfine_2 = rad_tensor_mol_axis(transform_mol,transform_ndi,rad_fram_aniso_hyperfine_2)\n \n \n # for n=1 \n radius = 20.986e-10 \n \n cnst = (1.0e3*1.25663706e-6*1.054e-34*1.766086e11)/(4.0*np.pi*radius**3)\n aniso_dipolar = np.array([[1.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,-2.0]])*cnst\n \n # Isotropic components\n g1_iso = 2.0031\n g2_iso = 2.0040\n \n # ISO h1 for the anti conformation\n iso_h1 = np.array([[2.308839,0.903770,-0.034042,-0.077575,1.071863,0.258828,2.308288,0.0902293,-0.034202,0.077648,1.073569,0.259878,-0.166563,-0.166563,-0.166563,-0.166487,-0.166487,-0.166487,0.831260]])\n \n iso_h2 = np.array([[-0.1927,-0.1927,-0.1927,-0.1927,-0.0963,-0.0963]])\n \n spin_numbers_1 = np.array([[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,1.0]])\n spin_numbers_2 = np.array([[0.5,0.5,0.5,0.5,1.0,1.0]]) \n\n field = np.reshape(temp_dat[:,0],(len(temp_dat[:,0])))\n data_y = np.reshape(temp_dat[:,1],(len(temp_dat[:,1])))\n \n sampled_field = np.linspace(0.0,120.0,40)\n triplet_yield = np.zeros_like(sampled_field)\n standard_error = np.zeros_like(sampled_field) \n compound_error = np.zeros_like(sampled_field) \n\n\n num_samples = 20\n samples = np.arange(1.0,np.float(num_samples))\n trip = np.zeros_like(samples)\n w =5.0 \n \n#--------------------------------------------------------------------------------------------------------------------------------------\n#zero field lifetime\n \n lifetime_zero = 0.0\n zero = np.zeros_like(samples)\n # zero field lifetime\n for index, item in enumerate(samples):\n relaxation_0 = rotational_relaxation(aniso_dipolar,g1_iso,g2_iso,aniso_g1,aniso_g2,iso_h1,iso_h2,aniso_hyperfine_1,aniso_hyperfine_2,spin_numbers_1,spin_numbers_2,0.0,J,ks,kt,lamb,temp,kstd)\n zero[index] = relaxation_0.lifetime()\n lifetime_zero += zero[index]\n zero_error = sts.sem(zero) \n lifetime_zero = np.float(lifetime_zero)/np.float(num_samples)\n lifetime_dif_zero = lifetime_zero - lifetime_exp_zero\n w_0 = w/lifetime_exp_zero\n \n \n#--------------------------------------------------------------------------------------------------------------------------------------\n#resonance field lifetime (B=2J)\n \n lifetime_res = 0.0\n res = np.zeros_like(samples)\n # zero field lifetime\n for index, item in enumerate(samples):\n relaxation_0 = rotational_relaxation(aniso_dipolar,g1_iso,g2_iso,aniso_g1,aniso_g2,iso_h1,iso_h2,aniso_hyperfine_1,aniso_hyperfine_2,spin_numbers_1,spin_numbers_2,2.0*J,J,ks,kt,lamb,temp,kstd)\n res[index] = relaxation_0.lifetime()\n lifetime_res += res[index]\n res_error = sts.sem(res) \n lifetime_res = np.float(lifetime_res)/np.float(num_samples)\n lifetime_dif_res = lifetime_res - lifetime_exp_res\n w_res = w/lifetime_exp_res\n \n#--------------------------------------------------------------------------------------------------------------------------------------\n# High field lifetime \n \n lifetime_high = 0.0\n high = np.zeros_like(samples)\n # zero field lifetime\n for index, item in enumerate(samples):\n relaxation_0 = rotational_relaxation(aniso_dipolar,g1_iso,g2_iso,aniso_g1,aniso_g2,iso_h1,iso_h2,aniso_hyperfine_1,aniso_hyperfine_2,spin_numbers_1,spin_numbers_2,120.0,J,ks,kt,lamb,temp,kstd)\n high[index] = relaxation_0.lifetime()\n lifetime_high += high[index]\n high_error = sts.sem(high) \n lifetime_high = np.float(lifetime_high)/np.float(num_samples)\n lifetime_dif_high = lifetime_high - lifetime_exp_high\n w_h = w/lifetime_exp_high\n \n#--------------------------------------------------------------------------------------------------------------------------------------\n \n \n for index_field,item_field in enumerate(sampled_field):\n total_t = 0.0\n print(\"%\",100.0*(np.float(index_field))/(np.float(len(sampled_field))))\n for index, item in enumerate(samples):\n np.random.seed(index)\n \n # Define class \n relaxation = rotational_relaxation(aniso_dipolar,g1_iso,g2_iso,aniso_g1,aniso_g2,iso_h1,iso_h2,aniso_hyperfine_1,aniso_hyperfine_2,spin_numbers_1,spin_numbers_2,item_field,J,ks,kt,lamb,temp,kstd)\n # Calculate triplet yield\n trip[index] = relaxation.triplet_yield()\n total_t += trip[index]\n \n triplet_yield[index_field] = total_t\n standard_error[index_field] = sts.sem(trip)\n compound_error[index_field] = np.sqrt(standard_error[0]*standard_error[0]*((1.0/triplet_yield[0])**2 + (standard_error[index_field]*standard_error[index_field]*(triplet_yield[index_field]/triplet_yield[0])**2)))\n \n compound_error[0] = 0.0\n triplet_yield = triplet_yield /(triplet_yield[0])\n \n\n\n tck = interpolate.splrep(sampled_field, triplet_yield, s=0)\n xnew = field\n ynew = interpolate.splev(xnew, tck, der=0)\n # lagrange type terms to ensure that the experimental lifetime is correctly calculated and that Kt is greater than Ks\n val = np.float(5.0*np.sum(((ynew)-(data_y-data_y[0]+1.0))*((ynew)-(data_y-data_y[0]+1.0))) + (lifetime_dif_zero*w_0)**2 + (lifetime_dif_res*w_res)**2 + (lifetime_dif_high*w_h)**2)\n \n plt.clf()\n #plt.plot(field,ynew,'o')\n plt.plot(sampled_field,triplet_yield,'o--')\n plt.plot(field,(data_y-data_y[0]+1.0),'o')\n plt.fill_between(sampled_field, triplet_yield - 2.0*compound_error, triplet_yield + 2.0*compound_error,\n color='salmon', alpha=0.4)\n plt.ylabel('Relative Triplet Yield')\n plt.title('FN1 at (K) '+str(temp))\n plt.xlabel('field (mT)')\n plt.savefig(\"fn1\"+str(temp)+\".pdf\") \n plt.show()\n \n plt.clf()\n plt.plot(np.array([0.0,2.0*J,120.0]),np.array([lifetime_zero,lifetime_res,lifetime_high]), label = 'Calculated')\n plt.plot(np.array([0.0,2.0*J,120.0]),np.array([lifetime_exp_zero,lifetime_exp_res,lifetime_exp_high]),label = 'Experimental')\n plt.fill_between([0.0,2.0*J,120.0], np.array([lifetime_zero,lifetime_res,lifetime_high]) - 2.0*np.array([zero_error,res_error,high_error]), np.array([lifetime_zero,lifetime_res,lifetime_high]) + 2.0*np.array([zero_error,res_error,high_error]),color='g', alpha=0.4)\n plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=2,\n ncol=2, mode=\"expand\", borderaxespad=-1.)\n plt.xlabel('Field (mT)')\n plt.ylabel('Lifetime')\n plt.title('FN1 extreme narrowing limit lifetime at (K) '+str(temp))\n plt.savefig(\"FN1_lifetimes_\"+str(temp)+\".pdf\")\n plt.show()\n \n with open(\"fn1_\"+str(temp)+\"_no_lam_yield.txt\",\"w+\") as ff:\n for index in range(len(sampled_field)):\n ff.write(str(sampled_field[index])+',')\n ff.write(str(triplet_yield[index])+',')\n ff.write(str(compound_error[index]))\n ff.write('\\n')\n\n \n with open(\"fn1_\"+str(temp)+\"_no_lam_lifetime.txt\",\"w+\") as fl:\n fl.write(str(lifetime_zero)+',')\n fl.write(str(lifetime_res)+',')\n fl.write(str(lifetime_high))\n \n print('lamb,ks,kt,kstd')\n print(lamb,ks,kt,kstd)\n print(\"val\",val)\n return val\n\nnp.random.seed()\n# x0 = lamb,ks,kt,kstd\nbnds = [(1.0e-4, 0.3),(1.0e-3, 1.0e0),(1e-3, 1.0e1)]\n\ntemp = 273.0\n\n\"\"\"\nwith open(str(temp)+\"_dat_fn1.txt\",\"w+\") as p:\n with open(str(temp)+\"_results_fn1.txt\",\"w+\") as f:\n f.write(\"x0 = lamb,ks,kt,kstd\\n\") \n #---------------------------------------------------------------------------------------------------------------------------\n\n #x0 = [ 0.09572984547277354,0.15727827693717875,4.087967107400256,3.5534241548732792]\n x0 = [ 0.15,0.15727827693717875,4.087967107400256,2.6] # guess\n \n temp_dat = np.loadtxt('t_273.txt',delimiter=',')\n\n lifetime_exp_zero = 2.69043554319234\n lifetime_exp_res = 1.1276501297107735\n lifetime_exp_high = 2.631178193792446\n \n kstd = 0.0\n J = 20.25\n\n res = differential_evolution(lambda x1,x2,x3,x4,x5,x6,x7,x8: calc_yield(*x1,x2,x3,x4,x5,x6,x7,x8),bounds=bnds,args=(kstd,temp,temp_dat,lifetime_exp_zero,lifetime_exp_res,lifetime_exp_high,J),maxiter=20)\n \n f.write(\"\\n\")\n f.write(\"x0 for T=273k\\n\")\n f.write(str(res)+\"\\n\")\n for i in range(0,len(res.x)):\n p.write(str(res.x[i])+\",\")\n p.write(str(temp)+\"\\n\")\n\"\"\"\ntemp_dat = np.loadtxt('t_273.txt',delimiter=',')\n\nlifetime_exp_zero = 2.69043554319234\nlifetime_exp_res = 1.1276501297107735\nlifetime_exp_high = 2.631178193792446\n\nJ = 20.25\n\n# No initital triplet pop\nlamb = 0.0\nks = 0.17479786307762618\nkt = 7.0850755446767195\nkstd = 5.417873139773988\n\n#experimentalrates\n#kt = 0.268224\n#ks = 0.37736\n#lamb = 0.05480984\n#kstd = 7.3996459\n\n#\n#lamb = 0.1499999998918708\n#ks = 0.15727827765561803\n#kt = 4.087967244484629 \n#kstd = 2.599999947530394\n\ncalc_yield(0.18172880558263446,0.13963356743066382,10.0,0.0,temp,temp_dat,lifetime_exp_zero,lifetime_exp_res,lifetime_exp_high,J) \n\n\n","sub_path":"Extreme Narrowing limit/FN1/initial triplet pop/273/ex_nrw_lim_fn1_273.py","file_name":"ex_nrw_lim_fn1_273.py","file_ext":"py","file_size_in_byte":32891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"602186657","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\n@author: zhaogao\n@license: (C) Copyright 2013-2018.\n@contact: gaozhao89@qq.com\n@software: learn-py\n@file: len210_启动一个WEB浏览器.py\n@time: 28/05/2018 10:34 PM\n'''\n\n# 你想通过脚本启动浏览器并打开指定的 URL 网页\n# webbrowser 模块能被用来启动一个浏览器,并且与平台无关。例如:\n\nimport webbrowser\n\nwebbrowser.open('http://www.python.org')\n\n# 它会使用默认浏览器打开指定网页。如果你还想对网页打开方式做更多控制,还可以使用下面这些函数\n# Open the page in a new browser window\nwebbrowser.open_new('http://www.python.org')\n\n# Open the page in a new browser tab\nwebbrowser.open_new_tab('http://www.python.org')\n\n# 如果你想指定浏览器类型,可以使用 webbrowser.get() 函数来指定某个特定浏览 器\nc = webbrowser.get('firefox')\nc.open('http://www.python.org')\nc.open_new_tab('http://docs.python.org')\n\n# 浏览器的支持,可以查阅 https://docs.python.org/3/library/webbrowser.html\n# 在脚本中打开浏览器有时候会很有用。例如,某个脚本执行某个服务器发布任务, 你想快速打开一个浏览器来确保它已经正常运行了。或者是某个程序以 HTML 网页格 式输出数据,你想打开浏览器查看结果","sub_path":"cook/len210_启动一个WEB浏览器.py","file_name":"len210_启动一个WEB浏览器.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"56742842","text":"from decimal import Decimal\n\nfrom django.db import models\n\nfrom simple_history.models import HistoricalRecords\n\nimport accountifie.gl.bmo\n\nDZERO = Decimal('0')\n\nclass CashflowAllocation(models.Model):\n cashflow = models.ForeignKey('base.Cashflow')\n amount = models.DecimalField(max_digits=11, decimal_places=2)\n counterparty = models.ForeignKey('gl.Counterparty', null=True, blank=True, help_text=\"We need to match this up\")\n trans_type = models.ForeignKey('gl.Account', null=True, blank=True, help_text=\"We need to match this up\")\n project = models.ForeignKey('gl.Project', null=True, blank=True)\n tag = models.CharField(max_length=30, null=True, blank=True)\n\n \n history = HistoricalRecords()\n\n class Meta:\n app_label = 'base'\n db_table = 'base_cashflowallocation'\n\n def __unicode__(self):\n return '%.2f, C/P %s, project %s, tag %s' %(self.amount, self.counterparty, self.project, self.tag)\n\n\n\nclass Cashflow(models.Model, accountifie.gl.bmo.BusinessModelObject):\n ext_account = models.ForeignKey('gl.ExternalAccount')\n post_date = models.DateField()\n amount = models.DecimalField(max_digits=11, decimal_places=2)\n description = models.TextField(max_length=200, null=True)\n external_id = models.CharField(max_length=20, null=True)\n trans_type = models.ForeignKey('gl.Account', null=True, blank=True, help_text=\"We need to match this up\")\n counterparty = models.ForeignKey('gl.Counterparty', null=True, blank=True, help_text=\"We need to match this up\")\n tag = models.CharField(max_length=30, null=True, blank=True)\n \n history = HistoricalRecords()\n\n class Meta:\n app_label = 'base'\n db_table = 'base_cashflow'\n\n def __unicode__(self):\n return '%.2f: %s: %s' % (self.amount, self.external_id, self.post_date.strftime('%d-%b-%y'))\n\n def save(self):\n models.Model.save(self)\n self.update_gl()\n\n def _get_alloc_lines(self):\n allocations = self.cashflowallocation_set.all()\n \n alloc_lines = []\n running_total = DZERO\n if len(allocations) > 0:\n for allocation in allocations:\n if allocation.project is None:\n tags = []\n else:\n tags = ['project_%s' % allocation.project.id]\n\n alloc_lines.append((allocation.trans_type, DZERO - Decimal(allocation.amount), allocation.counterparty, tags))\n running_total += Decimal(allocation.amount)\n\n if abs(Decimal(self.amount) - running_total) >= Decimal('0.005'):\n alloc_lines.append((self.ext_account.gl_account, DZERO - (Decimal(self.amount) - running_total), None, []))\n \n return alloc_lines\n\n\n def get_gl_transactions(self):\n\n cf_acct = self.ext_account.gl_account\n depositary = self.ext_account.counterparty\n\n tran = []\n\n \n tran = dict(company=self.ext_account.company,\n date=self.post_date,\n comment= \"%s: %s\" % (self.id, self.description[:75]),\n trans_id='%s.%s.%s' % (self.ext_account.label, self.id, 'CFLOW'),\n lines=[(cf_acct, Decimal(self.amount), depositary, []),\n (self.trans_type, -Decimal(self.amount), self.counterparty, [self.tag] if self.tag else [])]\n )\n\n #tran['lines'] += self._get_alloc_lines()\n trans = [tran]\n \n return trans","sub_path":"cpd/base/models/bank_accounts.py","file_name":"bank_accounts.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"527723548","text":"from redis import Redis\nfrom redis_natives import Set, ZSet\n\n\nclass RedisWrapper(object):\n method_calls = []\n\n def __init__(self, redis):\n self._redis = redis\n\n def __getattr__(self, name):\n self.method_calls.append(name)\n return getattr(self._redis, name)\n\n\nclass RedisNativesTestCase(object):\n def setup_method(self, method):\n self.redis = RedisWrapper(Redis())\n self.redis.flushdb()\n self.test_key = 'test_key'\n self.other_key = 'other_key'\n\n\nclass SetTestCase(RedisNativesTestCase):\n def setup_method(self, method):\n super(SetTestCase, self).setup_method(method)\n self.set = Set(self.redis, self.test_key)\n self.other_set = Set(self.redis, self.other_key)\n\n\nclass IntegerSetTestCase(RedisNativesTestCase):\n def setup_method(self, method):\n super(IntegerSetTestCase, self).setup_method(method)\n self.set = Set(self.redis, self.test_key, type=int)\n self.other_set = Set(self.redis, self.other_key, type=int)\n\n\nclass ZSetTestCase(RedisNativesTestCase):\n def setup_method(self, method):\n super(ZSetTestCase, self).setup_method(method)\n self.zset = ZSet(self.redis, self.test_key)\n self.other_zset = ZSet(self.redis, self.other_key)\n self.redis.flushdb()\n self.redis.method_calls = []\n\n\nclass IntegerZSetTestCase(RedisNativesTestCase):\n def setup_method(self, method):\n super(IntegerZSetTestCase, self).setup_method(method)\n self.zset = ZSet(self.redis, self.test_key, type=int)\n self.other_zset = ZSet(self.redis, self.other_key, type=int)\n self.redis.flushdb()\n self.redis.method_calls = []\n","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"284804150","text":"import os, re\nfrom datetime import datetime\nfrom flask import Flask, request, flash, url_for, redirect, \\\n render_template, abort, send_from_directory, Response\nfrom pymongo import MongoClient\nimport json\n\napp = Flask(__name__)\napp.config.from_pyfile('flaskapp.cfg')\n\nmongo = MongoClient(app.config['MONGODB_URI'])\ndb_tvshow = mongo.tvshow.tvshow\ndb_channels = mongo.tvshow.channel\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/')\ndef serveStaticResource(resource):\n return send_from_directory('static/', resource)\n\n@app.route(\"/api/hour\")\ndef hour():\n date = request.args.get('date')\n if date is None or date == \"\":\n date = datetime.now()\n else:\n date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')\n\n query = {\n \"start\": {\"$lte\": date},\n \"end\": {\"$gte\": date},\n }\n\n channel = request.args.get('channel')\n if channel is not None and channel != \"\":\n query['channel.title'] = re.compile(channel, re.IGNORECASE)\n\n title = request.args.get('title')\n if title is not None and title != \"\":\n query['title'] = re.compile(title, re.IGNORECASE)\n\n result = db_tvshow.find(query)\n ret = {\n 'count': result.count(),\n 'items': [to_json(r) for r in result]\n }\n return Response(json.dumps(ret), content_type='text/json')\n\n@app.route(\"/api/channel\")\ndef channel():\n query = {}\n name = request.args.get('name')\n if name is not None and name != \"\":\n query['title'] = re.compile(name, re.IGNORECASE)\n\n result = db_channels.find(query, {'_id': 0})\n ret = {\n 'count': result.count(),\n 'items': [r for r in result]\n }\n return Response(json.dumps(ret), content_type='text/json')\n\ndef to_json(obj):\n return {\n 'title': obj['title'],\n 'start': obj['start'].isoformat(),\n 'end': obj['end'].isoformat(),\n 'duraction': obj['duraction'],\n 'desc': obj['desc'],\n 'channel': {\n 'title': obj['channel']['title'],\n 'numbers': obj['channel']['numbers']\n }\n }\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"flaskapp.py","file_name":"flaskapp.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"641320349","text":"import os\nimport sys\nimport logging\nimport gzip\nfrom codecs import open, getreader\n\nlogger = logging.getLogger(__name__)\n\ndef get_vcf_handle(fsock=None, infile=None):\n \"\"\"Open the vcf file and return a handle\"\"\"\n\n vcf = None\n if (fsock or infile):\n \n if fsock:\n # if not infile and hasattr(fsock, 'name'):\n logger.info(\"Reading vcf form stdin\")\n if sys.version_info < (3, 0):\n logger.info(\"Using codecs to read stdin\")\n sys.stdin = getreader('utf-8')(fsock)\n \n vcf = sys.stdin\n \n else:\n logger.info(\"Reading vcf from file {0}\".format(infile))\n file_name, file_extension = os.path.splitext(infile)\n if file_extension == '.gz':\n logger.debug(\"Vcf is zipped\")\n vcf = getreader('utf-8')(gzip.open(infile), errors='replace')\n elif file_extension == '.vcf':\n vcf = open(infile, mode='r', encoding='utf-8', errors='replace')\n else:\n raise IOError(\"File is not in a supported format!\\n\"\n \" Or use correct ending(.vcf or .vcf.gz)\")\n else:\n raise IOError(\"Please provide a fsock or infile\")\n \n return vcf","sub_path":"vcftoolbox/get_file_handle.py","file_name":"get_file_handle.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"553450426","text":"# Constants.py\n# This file holds all Constants which span multiple files\n\n##\n##--------------------SWITCH BOARD ----------------#\n##\nPACKET_HEADER = \"borat\"\n##### MODULE SWITCHES ####\nUSE_VISION = True # Disable if you want faster processing\n\n#### DEBUG SWITCHES ####\nUSE_SNAPSHOT = True # Enable if you want to take pictures with spacebar\n\n\n# Localization Related\nODOMETRY_DEBUG = False\nDEBUG_LOC_INFO = False\nDEBUG_LINES = False\nDEBUG_CORNERS = False\nDEBUG_FIELD_OBJECTS = False\n\n##\n##--------------------WALK TIME CONSTANTS -------------#\n##\nTIME_STEP = 40\nTIME_PER_STEP = TIME_STEP/1000. \n\n##\n##--------------------TEAM STUFF-----------------------#\n##\nNUM_PLAYERS_PER_TEAM = 4\n\n# Setup colors\nNUM_GAME_TEAM_COLORS = 2\nteamColorDict = dict(zip(range(NUM_GAME_TEAM_COLORS),\\\n\t\t (\"TEAM_BLUE\",\"TEAM_RED\")))\n(TEAM_BLUE,TEAM_RED) = range(NUM_GAME_TEAM_COLORS)\n\n##\n##--------------------VISION CONNECTION CONSTANTS------#\n##\nCAMERA_FPS = 10\n# image constants, used for angle x,y setters\nIMAGE_WIDTH = 640\nIMAGE_HEIGHT = 480\nIMAGE_CENTER_X = IMAGE_WIDTH / 2.0\nFOV_X_DEG = 46.4\nFOV_Y_DEG = 34.8\nIMAGE_ANGLE_X = IMAGE_WIDTH / FOV_X_DEG\nIMAGE_ANGLE_Y = IMAGE_HEIGHT / FOV_Y_DEG\n\nNUM_TOTAL_BALL_VALUES = 40\nNUM_VISION_BALL_VALUES = 9\nNUM_VISION_FIELD_OBJECT_VALUES = 9\nNUM_VISION_BACKSTOP_VALUES = 10\n\n# confidence system for landmark values\nNUM_FIELD_OBJECT_CERTAINTIES = 3\n(NOTSURE,\n MILDLYSURE,\n SURE) = range(NUM_FIELD_OBJECT_CERTAINTIES)\n\n# confidence system for distances to landmark values\nNUM_FIELD_OBJECT_DIST_CERTAINTIES = 4\n(BOTH_UNSURE,\n WIDTH_UNSURE,\n HEIGHT_UNSURE,\n BOTH_SURE) = range(NUM_FIELD_OBJECT_DIST_CERTAINTIES)\n\n##\n##--------------------LOCALIZATION CONSTANTS-----------#\n##\n\n# Set FIELD_SCALE to 1. for Official and 0.9 for Lab\n# For more information on field values see Rules_2008 Section 1\nOFFICIAL_SCALE = 1.\nFIELD_SCALE = OFFICIAL_SCALE\n\n#---Landmark Constants------#\n# Notes:\n# FIELD GREEN -- this relates to measurements along the outer edge of the field\n# (ie , out of bounds, the very outside green part)\n# FIELD WHITE -- this relates to the 'infield' or 'inbounds'\n\nFIELD_WHITE_WIDTH = 400. * FIELD_SCALE\nFIELD_WHITE_HEIGHT = 600. * FIELD_SCALE\nFIELD_GREEN_WIDTH = 440. * FIELD_SCALE\nFIELD_GREEN_HEIGHT = 680. * FIELD_SCALE\nFIELD_WIDTH = FIELD_GREEN_WIDTH\nFIELD_HEIGHT = FIELD_GREEN_HEIGHT\n\nCENTER_FIELD_X = FIELD_GREEN_WIDTH / 2\nCENTER_FIELD_Y = FIELD_GREEN_HEIGHT / 2\n\nFIELD_GREEN_LEFT_SIDELINE_X = 0\nFIELD_GREEN_RIGHT_SIDELINE_X = FIELD_GREEN_WIDTH\nFIELD_GREEN_BOTTOM_SIDELINE_Y = 0\nFIELD_GREEN_TOP_SIDELINE_Y = FIELD_GREEN_HEIGHT\n\nGREEN_PAD_Y = 40. * FIELD_SCALE\nGREEN_PAD_X = 20. * FIELD_SCALE\n\nFIELD_WHITE_BOTTOM_SIDELINE_Y = GREEN_PAD_Y\nFIELD_WHITE_TOP_SIDELINE_Y = FIELD_WHITE_HEIGHT + GREEN_PAD_Y\nFIELD_WHITE_LEFT_SIDELINE_X = GREEN_PAD_X\nFIELD_WHITE_RIGHT_SIDELINE_X = FIELD_WHITE_WIDTH + GREEN_PAD_X\n\nOPP_GOAL_HEADING = 0.\nMY_GOAL_HEADING = 180.\n\n# BEACON CONSTANTS\nLANDMARK_RIGHT_BEACON_X = FIELD_GREEN_WIDTH - 5.\nLANDMARK_RIGHT_BEACON_Y = FIELD_GREEN_HEIGHT / 2.\nLANDMARK_LEFT_BEACON_X = 5.\nLANDMARK_LEFT_BEACON_Y = FIELD_GREEN_HEIGHT / 2.\n\n# GOAL CONSTANTS\n# my left post is left of goalie defending my goal facing the opponent\nLANDMARK_MY_GOAL_LEFT_POST_X = CENTER_FIELD_X - 70\nLANDMARK_MY_GOAL_RIGHT_POST_X = CENTER_FIELD_X + 70\nLANDMARK_OPP_GOAL_LEFT_POST_X = CENTER_FIELD_X - 70\nLANDMARK_OPP_GOAL_RIGHT_POST_X = CENTER_FIELD_X + 70\n\n# measure to the center of the posts, 5 cm off the line\nLANDMARK_MY_GOAL_LEFT_POST_Y = FIELD_WHITE_BOTTOM_SIDELINE_Y-5.0\nLANDMARK_MY_GOAL_RIGHT_POST_Y = FIELD_WHITE_BOTTOM_SIDELINE_Y-5.0\nLANDMARK_OPP_GOAL_LEFT_POST_Y = FIELD_WHITE_TOP_SIDELINE_Y+5.0\nLANDMARK_OPP_GOAL_RIGHT_POST_Y = FIELD_WHITE_TOP_SIDELINE_Y+5.0\n\nCENTER_CIRCLE_RADIUS = 65. # not scaled\n\nGOALBOX_HEIGHT = 60. # not scaled\nGOALBOX_WIDTH = 200. # not scaled\n\nMIDFIELD_X = FIELD_GREEN_WIDTH / 2. \nMIDFIELD_Y = FIELD_GREEN_HEIGHT / 2.\n\nGOALBOX_TOP_Y = GOALBOX_HEIGHT + GREEN_PAD_Y\nGOALBOX_LEFT_X = MIDFIELD_X - GOALBOX_WIDTH / 2.\nGOALBOX_RIGHT_X = MIDFIELD_X + GOALBOX_WIDTH / 2.\n\n# my goal box constants relative to (0,0) on my team\nMY_GOALBOX_LEFT_X = MIDFIELD_X - GOALBOX_WIDTH / 2.\nMY_GOALBOX_RIGHT_X = MIDFIELD_X + GOALBOX_WIDTH / 2.\nMY_GOALBOX_BOTTOM_Y = GREEN_PAD_Y # bottom as in closest to (0,0)\nMY_GOALBOX_TOP_Y = GREEN_PAD_Y + GOALBOX_HEIGHT\n\n# opp goal box constants relative to (0,0) on my team\nOPP_GOALBOX_LEFT_X = MIDFIELD_X - GOALBOX_WIDTH / 2.\nOPP_GOALBOX_RIGHT_X = MIDFIELD_X + GOALBOX_WIDTH / 2.\nOPP_GOALBOX_BOTTOM_Y = FIELD_GREEN_HEIGHT-GREEN_PAD_Y-GOALBOX_HEIGHT\nOPP_GOALBOX_TOP_Y = FIELD_GREEN_HEIGHT-GREEN_PAD_Y\n\n#LANDMARK TUPLE\nNUM_LANDMARKS = 21\n\n(LANDMARK_MY_GOAL_LEFT_POST_ID,\n LANDMARK_MY_GOAL_RIGHT_POST_ID,\n LANDMARK_OPP_GOAL_LEFT_POST_ID,\n LANDMARK_OPP_GOAL_RIGHT_POST_ID,\n LANDMARK_LEFT_BEACON_ID,\n LANDMARK_RIGHT_BEACON_ID,\n LANDMARK_BALL_ID,\n LANDMARK_MY_CORNER_LEFT_L_ID,\n LANDMARK_MY_CORNER_RIGHT_L_ID,\n LANDMARK_MY_GOAL_LEFT_T_ID,\n LANDMARK_MY_GOAL_RIGHT_T_ID,\n LANDMARK_MY_GOAL_LEFT_L_ID,\n LANDMARK_MY_GOAL_RIGHT_L_ID,\n LANDMARK_CENTER_LEFT_T_ID,\n LANDMARK_CENTER_RIGHT_T_ID,\n LANDMARK_OPP_CORNER_LEFT_L_ID,\n LANDMARK_OPP_CORNER_RIGHT_L_ID,\n LANDMARK_OPP_GOAL_LEFT_T_ID,\n LANDMARK_OPP_GOAL_RIGHT_T_ID,\n LANDMARK_OPP_GOAL_LEFT_L_ID,\n LANDMARK_OPP_GOAL_RIGHT_L_ID) = range(NUM_LANDMARKS)\n\nlandmarkTuple = (\n \"LANDMARK_MY_GOAL_LEFT_POST\",\n \"LANDMARK_MY_GOAL_RIGHT_POST\",\n \"LANDMARK_OPP_GOAL_LEFT_POST\",\n \"LANDMARK_OPP_GOAL_RIGHT_POST\",\n \"LANDMARK_LEFT_BEACON\",\n \"LANDMARK_RIGHT_BEACON\",\n \"LANDMARK_BALL\",\n \"LANDMARK_MY_CORNER_LEFT_L\",\n \"LANDMARK_MY_CORNER_RIGHT_L\",\n \"LANDMARK_MY_GOAL_LEFT_T\",\n \"LANDMARK_MY_GOAL_RIGHT_T\",\n \"LANDMARK_MY_GOAL_LEFT_L\",\n \"LANDMARK_MY_GOAL_RIGHT_L\",\n \"LANDMARK_CENTER_LEFT_T\",\n \"LANDMARK_CENTER_RIGHT_T\",\n \"LANDMARK_OPP_CORNER_LEFT_L\",\n \"LANDMARK_OPP_CORNER_RIGHT_L\",\n \"LANDMARK_OPP_GOAL_LEFT_T\",\n \"LANDMARK_OPP_GOAL_RIGHT_T\",\n \"LANDMARK_OPP_GOAL_LEFT_L\",\n \"LANDMARK_OPP_GOAL_RIGHT_L\")\n\n# Landmark Lists for localization\nLANDMARK_MY_GOAL_LEFT_POST = [LANDMARK_MY_GOAL_LEFT_POST_X,\n LANDMARK_MY_GOAL_LEFT_POST_Y,\n LANDMARK_MY_GOAL_LEFT_POST_ID]\nLANDMARK_MY_GOAL_RIGHT_POST = [LANDMARK_MY_GOAL_RIGHT_POST_X,\n LANDMARK_MY_GOAL_RIGHT_POST_Y,\n LANDMARK_MY_GOAL_RIGHT_POST_ID]\nLANDMARK_OPP_GOAL_LEFT_POST = [LANDMARK_OPP_GOAL_LEFT_POST_X,\n LANDMARK_OPP_GOAL_LEFT_POST_Y,\n LANDMARK_OPP_GOAL_LEFT_POST_ID]\nLANDMARK_OPP_GOAL_RIGHT_POST = [LANDMARK_OPP_GOAL_RIGHT_POST_X,\n LANDMARK_OPP_GOAL_RIGHT_POST_Y,\n LANDMARK_OPP_GOAL_RIGHT_POST_ID]\nLANDMARK_LEFT_BEACON = [LANDMARK_LEFT_BEACON_X,\n LANDMARK_LEFT_BEACON_Y,\n LANDMARK_LEFT_BEACON_ID]\nLANDMARK_RIGHT_BEACON = [LANDMARK_LEFT_BEACON_X,\n LANDMARK_LEFT_BEACON_Y,\n LANDMARK_LEFT_BEACON_ID]\n\n# Vision IDs for landmarks\nNUM_VIS_LANDMARKS = 8\n(VISION_YGLP,\n VISION_YGRP,\n VISION_BGLP,\n VISION_BGRP,\n VISION_YB,\n VISION_BY,\n VISION_BG_BACKSTOP,\n VISION_YG_BACKSTOP,\n ) = range(NUM_VIS_LANDMARKS)\n\nvisionObjectTuple = (\"YGLP\",\n \"YGRP\",\n \"BGLP\",\n \"BGRP\",\n \"YB\",\n \"BY\")\n\n\n##\n##-------------CORNER CONSTANTS-----------------##\n##\n\n#---Corner Information---#\nNUM_CORNER_IDS = 28\nNUM_CORNER_VALUES = 5\nNUM_CORNER_VISION_VALUES = 3\nNUM_LINE_VALUES = 6\nCOMPLETELY_ABSTRACT_CORNER = 0\nNARROWLY_ABSTRACT_CORNER = 1\nSPECIFIC_CORNER = 2\nSPECIFIC_CORNER_INDEX = 14\n\n# CORNER IDS AS THE VISION SYSTEM DEFINES THEM\n(L_INNER_CORNER, # Completely Abstract Corner Types\n L_OUTER_CORNER,\n T_CORNER,\n CENTER_CIRCLE,\n BLUE_GOAL_T, # Narrowly Abstract Corner Types\n YELLOW_GOAL_T,\n BLUE_GOAL_RIGHT_L_OR_YELLOW_GOAL_LEFT_L,\n BLUE_GOAL_LEFT_L_OR_YELLOW_GOAL_RIGHT_L,\n CORNER_INNER_L,\n GOAL_BOX_INNER_L,\n CORNER_OR_GOAL_INNER_L,\n BLUE_GOAL_OUTER_L,\n YELLOW_GOAL_OUTER_L,\n CENTER_T,\n BLUE_CORNER_LEFT_L, # Specific Corners\n BLUE_CORNER_RIGHT_L,\n BLUE_GOAL_LEFT_T,\n BLUE_GOAL_RIGHT_T,\n BLUE_GOAL_LEFT_L,\n BLUE_GOAL_RIGHT_L,\n CENTER_BY_T,\n CENTER_YB_T,\n YELLOW_CORNER_LEFT_L,\n YELLOW_CORNER_RIGHT_L,\n YELLOW_GOAL_LEFT_T,\n YELLOW_GOAL_RIGHT_T,\n YELLOW_GOAL_LEFT_L,\n YELLOW_GOAL_RIGHT_L) = range(NUM_CORNER_IDS)\n\n# Narrowly Abstract Corner Types but the way python likes them\n#Commented out corners are identical in c and python,\n#They are included for easy reference only\n#L_INNER_CORNER = 0\n#L_OUTER_CORNER = 1\n#T_CORNER = 2\n#CENTER_CIRCLE = 3\nMY_GOAL_T = 4\nOPP_GOAL_T = 5\nMY_GOAL_RIGHT_L_OR_OPP_GOAL_LEFT_L = 6\nMY_GOAL_LEFT_L_OR_OPP_GOAL_RIGHT_L = 7\n#CORNER_INNER_L = 8\n#GOAL_BOX_INNER_L = 9\n#CORNER_OR_GOAL_INNER_L = 10\nMY_GOAL_OUTER_L = 11\nOPP_GOAL_OUTER_L = 12\n\n# Specific corner types the way python likes them\n#CENTER_T = 13\nMY_CORNER_LEFT_L = 14\nMY_CORNER_RIGHT_L = 15\nMY_GOAL_LEFT_T = 16\nMY_GOAL_RIGHT_T = 17\nMY_GOAL_LEFT_L = 18\nMY_GOAL_RIGHT_L = 19\nCENTER_LEFT_T = 20\nCENTER_RIGHT_T = 21\nOPP_CORNER_LEFT_L = 22\nOPP_CORNER_RIGHT_L = 23\nOPP_GOAL_LEFT_T = 24\nOPP_GOAL_RIGHT_T = 25\nOPP_GOAL_LEFT_L = 26\nOPP_GOAL_RIGHT_L = 27\n\nvisionCornerTuple = (\"L_INNER_CORNER\",\"L_OUTER_CORNER\",\"T_CORNER\",\"CENTER_CIRCLE\",\"BLUE_GOAL_T\",\"YELLOW_GOAL_T\",\"BLUE_GOAL_RIGHT_L_OR_YELLOW_GOAL_LEFT_L\",\"BLUE_GOAL_LEFT_L_OR_YELLOW_GOAL_RIGHT_L\",\"CORNER_INNER_L\",\"GOAL_BOX_INNER_L\",\"CORNER_OR_GOAL_INNER_L\",\"BLUE_GOAL_OUTER_L\",\"YELLOW_GOAL_OUTER_L\",\"CENTER_T\",\"BLUE_CORNER_LEFT_L\",\"BLUE_CORNER_RIGHT_L\",\"BLUE_GOAL_LEFT_T\",\"BLUE_GOAL_RIGHT_T\",\"BLUE_GOAL_LEFT_L\",\"BLUE_GOAL_RIGHT_L\",\"CENTER_BY_T\",\"CENTER_YB_T\",\"YELLOW_CORNER_LEFT_L\",\"YELLOW_CORNER_RIGHT_L\",\"YELLOW_GOAL_LEFT_T\",\"YELLOW_GOAL_RIGHT_T\",\"YELLOW_GOAL_LEFT_L\",\"YELLOW_GOAL_RIGHT_L\")\n\ncornerTuple = (\"L_INNER_CORNER\",\"L_OUTER_CORNER\",\"T_CORNER\",\"CENTER_CIRCLE\",\"MY_GOAL_T\",\"OPP_GOAL_T\",\"MY_GOAL_RIGHT_L_OR_OPP_GOAL_LEFT_L\",\"MY_GOAL_LEFT_L_OR_OPP_GOAL_RIGHT_L\",\"CORNER_INNER_L\",\"GOAL_BOX_INNER_L\",\"CORNER_OR_GOAL_INNER_L\",\"MY_GOAL_OUTER_L\",\"OPP_GOAL_OUTER_L\",\"CENTER_T\",\"MY_CORNER_LEFT_L\",\"MY_CORNER_RIGHT_L\",\"MY_GOAL_LEFT_T\",\"MY_GOAL_RIGHT_T\",\"MY_GOAL_LEFT_L\",\"MY_GOAL_RIGHT_L\",\"CENTER_LEFT_T\",\"CENTER_RIGHT_T\",\"OPP_CORNER_LEFT_L\",\"OPP_CORNER_RIGHT_L\",\"OPP_GOAL_LEFT_T\",\"OPP_GOAL_RIGHT_T\",\"OPP_GOAL_LEFT_L\",\"OPP_GOAL_RIGHT_L\")\n\n# Corner Info Tuple Dict\n# Indexed by corner constants, valued by tuples of info about that corner\n# The tuples for non-specifc corners don't include x and y coordinates\nCORNER_DESCRIPTION_STRING, CORNER_ABSTRACTION_LEVEL_INFO, CORNER_X_CORD, CORNER_Y_CORD = range(4)\nCORNER_TYPE_INDEX_DICT = { L_INNER_CORNER : 0, L_OUTER_CORNER:1, T_CORNER:2,\n CENTER_CIRCLE:3, MY_GOAL_T:4, OPP_GOAL_T:5,\n MY_GOAL_RIGHT_L_OR_OPP_GOAL_LEFT_L:6,\n MY_GOAL_LEFT_L_OR_OPP_GOAL_RIGHT_L:7,\n CORNER_INNER_L:8,\n GOAL_BOX_INNER_L:9,\n CORNER_OR_GOAL_INNER_L:10,\n MY_GOAL_OUTER_L:11,\n OPP_GOAL_OUTER_L:12,\n CENTER_T:13 }\n\nCORNER_INFO_DICT = {\n\n# completely abstract corners\nL_INNER_CORNER: ('L Inner Corner', COMPLETELY_ABSTRACT_CORNER),\\\nL_OUTER_CORNER: ('L Outer Corner', COMPLETELY_ABSTRACT_CORNER),\\\nT_CORNER: ('T Corner', COMPLETELY_ABSTRACT_CORNER),\\\nCENTER_CIRCLE: ('Center Circle', COMPLETELY_ABSTRACT_CORNER),\\\n\n# narrowly abstract corners\nMY_GOAL_T: ('My Goal T', NARROWLY_ABSTRACT_CORNER),\\\nOPP_GOAL_T: ('Opp Goal T', NARROWLY_ABSTRACT_CORNER),\\\nMY_GOAL_RIGHT_L_OR_OPP_GOAL_LEFT_L: ('My Goal Right L or Opp Goal Left L', NARROWLY_ABSTRACT_CORNER),\\\nMY_GOAL_LEFT_L_OR_OPP_GOAL_RIGHT_L: ('My Goal Left L or Opp Goal Right L', NARROWLY_ABSTRACT_CORNER),\\\nCORNER_INNER_L: ('Corner Inner L', NARROWLY_ABSTRACT_CORNER),\\\nGOAL_BOX_INNER_L: ('Goal Box Inner L', NARROWLY_ABSTRACT_CORNER),\\\nCORNER_OR_GOAL_INNER_L: ('Corner or Goal Inner L', NARROWLY_ABSTRACT_CORNER),\\\nMY_GOAL_OUTER_L: ('My Goal Outer L', NARROWLY_ABSTRACT_CORNER),\\\nOPP_GOAL_OUTER_L: ('Opp Goal Outer L', NARROWLY_ABSTRACT_CORNER),\\\nCENTER_T: ('Center T', NARROWLY_ABSTRACT_CORNER),\\\n\n# specific corners\nMY_CORNER_LEFT_L: ('My Corner Left L', SPECIFIC_CORNER,FIELD_WHITE_LEFT_SIDELINE_X, FIELD_WHITE_BOTTOM_SIDELINE_Y),\\\nMY_CORNER_RIGHT_L: ('My Corner Right L', SPECIFIC_CORNER,FIELD_WHITE_RIGHT_SIDELINE_X, FIELD_WHITE_BOTTOM_SIDELINE_Y),\\\n\nMY_GOAL_LEFT_T: ('My Goal Left T', SPECIFIC_CORNER, GOALBOX_LEFT_X, FIELD_WHITE_BOTTOM_SIDELINE_Y),\nMY_GOAL_RIGHT_T: ('My Goal Right T', SPECIFIC_CORNER, GOALBOX_RIGHT_X, FIELD_WHITE_BOTTOM_SIDELINE_Y),\\\n\nMY_GOAL_LEFT_L: ('My Goal Left L', SPECIFIC_CORNER, GOALBOX_LEFT_X, GOALBOX_HEIGHT+FIELD_WHITE_BOTTOM_SIDELINE_Y),\\\nMY_GOAL_RIGHT_L: ('My Goal Right L', SPECIFIC_CORNER, GOALBOX_RIGHT_X, GOALBOX_HEIGHT+FIELD_WHITE_BOTTOM_SIDELINE_Y),\\\n\nCENTER_LEFT_T: ('Center Left T', SPECIFIC_CORNER, FIELD_WHITE_LEFT_SIDELINE_X, MIDFIELD_Y),\\\nCENTER_RIGHT_T: ('Center Right T', SPECIFIC_CORNER, FIELD_WHITE_RIGHT_SIDELINE_X, MIDFIELD_Y),\\\n\nOPP_CORNER_LEFT_L: ('Opp Corner Left L', SPECIFIC_CORNER, FIELD_WHITE_LEFT_SIDELINE_X, FIELD_WHITE_TOP_SIDELINE_Y),\\\nOPP_CORNER_RIGHT_L: ('Opp Corner Right L', SPECIFIC_CORNER, FIELD_WHITE_RIGHT_SIDELINE_X, FIELD_WHITE_TOP_SIDELINE_Y),\\\n\nOPP_GOAL_LEFT_T: ('Opp Goal Left T', SPECIFIC_CORNER, GOALBOX_LEFT_X, FIELD_WHITE_TOP_SIDELINE_Y),\\\nOPP_GOAL_RIGHT_T: ('Opp Goal Right T', SPECIFIC_CORNER, GOALBOX_RIGHT_X, FIELD_WHITE_TOP_SIDELINE_Y),\\\n\nOPP_GOAL_LEFT_L: ('Opp Goal Left L', SPECIFIC_CORNER, GOALBOX_LEFT_X, FIELD_WHITE_TOP_SIDELINE_Y - GOALBOX_HEIGHT),\\\nOPP_GOAL_RIGHT_L: ('Opp Goal Right L', SPECIFIC_CORNER, GOALBOX_RIGHT_X, FIELD_WHITE_TOP_SIDELINE_Y - GOALBOX_HEIGHT)\n\n} #fin\n\n# Specific Corners for Type Dict\n# Indexed by abstract corner constans, valued by lists of specific corners that fall under that type\n# DOH - whether some corners are inner or outer depends on if dog is in the goal box \n# FOR NOW - return all possible corners for a given type. It will be the responsibility of the localization engine to filter this list of possible corners based on its current location\nSPECIFIC_CORNERS_FOR_TYPE_DICT = {\\\nL_INNER_CORNER : [MY_CORNER_LEFT_L, MY_CORNER_RIGHT_L, MY_GOAL_LEFT_L, MY_GOAL_RIGHT_L, OPP_CORNER_LEFT_L, OPP_CORNER_RIGHT_L, OPP_GOAL_LEFT_L, OPP_GOAL_RIGHT_L,],\\\nL_OUTER_CORNER : [MY_GOAL_LEFT_L, MY_GOAL_RIGHT_L, OPP_GOAL_LEFT_L, OPP_GOAL_RIGHT_L],\\\nT_CORNER : [MY_GOAL_LEFT_T, MY_GOAL_RIGHT_T, CENTER_BY_T, CENTER_YB_T, OPP_GOAL_RIGHT_T, OPP_GOAL_LEFT_T],\\\nCENTER_CIRCLE : [CENTER_BY_T, CENTER_YB_T],\\\nMY_GOAL_T: [MY_GOAL_LEFT_T, MY_GOAL_RIGHT_T],\\\nOPP_GOAL_T: [OPP_GOAL_LEFT_T, OPP_GOAL_RIGHT_T,],\\\nMY_GOAL_RIGHT_L_OR_OPP_GOAL_LEFT_L: [MY_GOAL_RIGHT_L, OPP_GOAL_LEFT_L],\\\nMY_GOAL_LEFT_L_OR_OPP_GOAL_RIGHT_L: [MY_GOAL_LEFT_L, OPP_GOAL_RIGHT_L],\\\nCORNER_INNER_L: [MY_CORNER_LEFT_L, MY_CORNER_RIGHT_L, OPP_CORNER_LEFT_L, OPP_CORNER_RIGHT_L], \\\nGOAL_BOX_INNER_L: [MY_GOAL_LEFT_L, MY_GOAL_RIGHT_L, OPP_GOAL_LEFT_L, OPP_GOAL_RIGHT_L], \\\nCORNER_OR_GOAL_INNER_L: [MY_CORNER_LEFT_L, MY_CORNER_RIGHT_L, MY_GOAL_LEFT_L, MY_GOAL_RIGHT_L, OPP_CORNER_LEFT_L, OPP_CORNER_RIGHT_L, OPP_GOAL_LEFT_L, OPP_GOAL_RIGHT_L],\\\nMY_GOAL_OUTER_L: [MY_GOAL_LEFT_L, MY_GOAL_RIGHT_L],\\\nOPP_GOAL_OUTER_L: [OPP_GOAL_LEFT_L, OPP_GOAL_RIGHT_L],\\\nCENTER_T: [CENTER_BY_T, CENTER_YB_T]}\n\n################################################################################\n##### END LOCALIZATION DATA\n################################################################################\n\n","sub_path":"skull/noggin/noggin/NogginConstants.py","file_name":"NogginConstants.py","file_ext":"py","file_size_in_byte":15610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"464844989","text":"import os, datetime\nimport apache_beam as beam\nfrom apache_beam.io import ReadFromText\nfrom apache_beam.io import WriteToText\n\nclass removeCountryCode(beam.DoFn):\n def process(self, element):\n record = element\n country_code = record.get('country_code')\n country_name = record.get('country_name')\n region_code = record.get('region_code')\n\n #Create new record by removing the country code\n new_record = {'country_name': country_name, 'region_code': region_code}\n\n return [new_record]\n\nPROJECT_ID = os.environ['PROJECT_ID']\nBUCKET = os.environ['BUCKET']\nDIR_PATH = BUCKET + '/output/' + datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '/'\n\n# run pipeline on Dataflow \noptions = {\n 'runner': 'DataflowRunner',\n 'project': PROJECT_ID,\n 'temp_location': BUCKET + '/temp',\n 'staging_location': BUCKET + '/staging',\n 'machine_type': 'n1-standard-1', # machine types listed here: https://cloud.google.com/compute/docs/machine-types\n 'num_workers': 1\n}\nopts = beam.pipeline.PipelineOptions(flags=[], **options)\n\nwith beam.Pipeline('DataflowRunner', options=opts) as p:\n\n # Query Countries table on big query selecting all attributes\n query_results_Countries= p | 'Read from Countries table BigQuery' >> beam.io.Read(beam.io.BigQuerySource(query='select * from Aggriculture.Countries')) \n \n # Write input PCollection to local file input.txt\n query_results_Countries | 'Write querried raw data to input.txt' >> WriteToText('input.txt')\n\n # Apply DoFn through ParDo\n out_pcoll = query_results_Countries | 'Replace instances of country_code where it is numeric with string country_code' >> beam.ParDo(removeCountryCode())\n\n # Write output PCollection to local file output.txt\n out_pcoll | 'Write transformed data to output.txt' >> WriteToText('output.txt')\n\n #Create new table\n qualified_table_name = PROJECT_ID + ':Aggriculture.CountriesMS6'\n table_schema = 'country_name:STRING,region_code:INTEGER'\n\n # Write output PCollection to new BigQuery table in main dataset\n out_pcoll | 'Write transformed data to BigQuery' >> beam.io.Write(beam.io.BigQuerySink(qualified_table_name, schema=table_schema, create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED, write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE))","sub_path":"Countries_cluster.py","file_name":"Countries_cluster.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"261834166","text":"def twoSum(nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n r = []\n N = len(nums)\n for i in range(N):\n for j in range(i + 1, N):\n if nums[i] + nums[j] == target:\n r.append([nums[i], nums[j]])\n return r\nprint(twoSum([1,3,2,2],4))\nprint(twoSum([1,9,2,8,3,7,4,6,5,5,13,14,11,13,-1],10))\n\n\n# def subpairs(nums):\n# \"\"\"\n# :type nums: List[int]\n# :type target: int\n# :rtype: List[int]\n# \"\"\"\n# s = []\n# r = set()\n# N = len(nums)\n# for i in range(N):\n# for j in range(i + 1, N):\n# s.append(sorted([nums[i], nums[j]]))\n# r = set(s)\n# return r\n# print(subpairs([1,3,2,2]))\n# print(subpairs([1,9,2,8,3,7,4,6,5,5,13,14,11,13,-1]))","sub_path":"BigO_examples/Sumpairs.py","file_name":"Sumpairs.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"128866945","text":"#!/usr/bin/env python\n\nfrom subprocess import Popen, PIPE, STDOUT\nimport tarfile\nimport gzip\n\nfile = gzip.open(\"!{bootFile.baseName}.nw.gz\", \"wb\")\ntar = tarfile.open(\"!{bootFile}\", \"r:gz\")\nfor tarinfo in tar:\n f=tar.extractfile(tarinfo)\n content=f.read()\n p = Popen(['FastTree','-nopr','-nosupport','-wag','-gamma'], stdout=PIPE, stdin=PIPE, stderr=PIPE)\n stdout_data = p.communicate(input=content)[0]\n file.write(stdout_data)\ntar.close()\nfile.close()\n","sub_path":"mammals_COI5P/templates/bootstrapfastree.py","file_name":"bootstrapfastree.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"234397480","text":"import matplotlib\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nimport math\nfrom torch.utils.data import TensorDataset, DataLoader\nimport torch.utils.data as data\nfrom torchvision import transforms\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nfrom torch import nn\nimport torch\nfrom random import randint, sample\nfrom PIL import Image\nimport numpy as np\nimport struct\nimport gzip\nimport os\n\n# from tensorboardX import SummaryWriter\n\n\ntorch.cuda.set_device(5)\n\n\nclass MoireCNN(nn.Module):\n\n def conv(self, channels):\n x=nn.Sequential(\n nn.Conv2d(channels, channels, 3, 1, 1),\n nn.ReLU(True),\n nn.Conv2d(channels, channels, 3, 1, 1),\n nn.ReLU(True),\n nn.Conv2d(channels, channels, 3, 1, 1),\n nn.ReLU(True),\n nn.Conv2d(channels, channels, 3, 1, 1),\n nn.ReLU(True),\n nn.Conv2d(channels, channels, 3, 1, 1),\n nn.ReLU(True)\n )\n return x\n\n def __init__(self):\n\n super().__init__()\n \n self.s11=nn.Sequential(\n nn.Conv2d(3, 32, 3, 1, 1),\n nn.ReLU(True),\n nn.Conv2d(32, 32, 3, 1, 1)\n )\n self.s12=nn.Conv2d(32, 3, 3, 1, 1)\n self.s13=self.conv(32)\n \n self.s21=nn.Sequential(\n nn.Conv2d(32, 32, 3, 2, 1),\n nn.ReLU(True),\n nn.Conv2d(32, 64, 3, 1, 1)\n )\n self.s22=nn.Sequential(\n nn.ConvTranspose2d(64, 32, 4, 2, 1),\n nn.ReLU(True),\n nn.Conv2d(32, 3, 3, 1, 1)\n )\n self.s23=self.conv(64)\n \n self.s31=nn.Sequential(\n nn.Conv2d(64, 64, 3, 2, 1),\n nn.ReLU(True),\n nn.Conv2d(64, 64, 3, 1, 1)\n )\n self.s32=nn.Sequential(\n nn.ConvTranspose2d(64, 64, 4, 2, 1),\n nn.ReLU(True),\n nn.ConvTranspose2d(64, 32, 4, 2, 1),\n nn.ReLU(True),\n nn.Conv2d(32, 3, 3, 1, 1)\n )\n self.s33=self.conv(64)\n \n self.s41=nn.Sequential(\n nn.Conv2d(64, 64, 3, 2, 1),\n nn.ReLU(True),\n nn.Conv2d(64, 64, 3, 1, 1)\n )\n self.s42=nn.Sequential(\n nn.ConvTranspose2d(64, 64, 4, 2, 1),\n nn.ReLU(True),\n nn.ConvTranspose2d(64, 32, 4, 2, 1),\n nn.ReLU(True),\n nn.ConvTranspose2d(32, 32, 4, 2, 1),\n nn.ReLU(True),\n nn.Conv2d(32, 3, 3, 1, 1)\n )\n self.s43=self.conv(64)\n \n self.s51=nn.Sequential(\n nn.Conv2d(64, 64, 3, 2, 1),\n nn.ReLU(True),\n nn.Conv2d(64, 64, 3, 1, 1)\n )\n self.s52=nn.Sequential(\n nn.ConvTranspose2d(64, 64, 4, 2, 1),\n nn.ReLU(True),\n nn.ConvTranspose2d(64, 32, 4, 2, 1),\n nn.ReLU(True),\n nn.ConvTranspose2d(32, 32, 4, 2, 1),\n nn.ReLU(True),\n nn.ConvTranspose2d(32, 32, 4, 2, 1),\n nn.ReLU(True),\n nn.Conv2d(32, 3, 3, 1, 1)\n )\n self.s53=self.conv(64)\n \n def forward(self, x):\n x1=self.s11(x)\n x2=self.s21(x1)\n x3=self.s31(x2)\n x4=self.s41(x3)\n x5=self.s51(x4)\n \n x1=self.s12(self.s13(x1))\n x2=self.s22(self.s23(x2))\n x3=self.s32(self.s33(x3))\n x4=self.s42(self.s43(x4))\n x5=self.s52(self.s53(x5))\n\n x=x1+x2+x3+x4+x5\n \n return x\n\n\ndef train(epoch, lr):\n model.train()\n\n for batch_idx, (data, target) in enumerate(train_loader):\n if use_gpu:\n data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)\n data, target = Variable(data), Variable(target)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=0.00001)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % 5000 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data))\n\n\ndef test(epoch):\n model.eval()\n\n idx = 0\n loss_sum = 0.0\n for (data, target) in test_loader:\n if use_gpu:\n data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)\n data, target = Variable(data), Variable(target)\n\n with torch.no_grad():\n output = model(data)\n loss = criterion(output, target)\n\n loss_sum += loss.data\n idx += 1\n loss_sum /= idx\n\n print('Test Epoch: {} \\tLoss: {:.6f}'.format(\n epoch, loss_sum))\n\n global pre_loss, lr, best_loss\n if loss_sum > pre_loss:\n lr *= 0.9\n \n if loss_sum < best_loss:\n best_loss = loss_sum\n torch.save(model, \"moire-1.pth\")\n\n pre_loss = loss_sum\n\n\nclass MoirePic(data.Dataset):\n def __init__(self, rootX, rootY, training=True):\n self.picX=[rootX+i for i in os.listdir(rootX)]\n self.picY=[rootY+i for i in os.listdir(rootY)]\n self.picX.sort()\n self.picY.sort()\n # self.picX=self.picX[:40]\n # self.picY=self.picY[:40]\n self.Len=len(self.picX)\n\n if not training:\n L = sample(range(self.Len), self.Len//10)\n tempX = [self.picX[i] for i in L]\n tempY = [self.picY[i] for i in L]\n self.picX=tempX\n self.picY=tempY\n self.Len=len(L)\n \n def __getitem__(self, index):\n tf=transforms.ToTensor()\n\n def rand_crop(data,label):\n img_w, img_h = 256, 256\n\n width1 = randint(0, data.shape[1] - img_w )\n height1 = randint(0, data.shape[2] - img_h)\n width2 = width1 + img_w\n height2 = height1 + img_h \n\n return (data[:,width1:width2,height1:height2],\n label[:,width1:width2,height1:height2])\n\n pathX, pathY=self.picX[index], self.picY[index]\n imgX, imgY=Image.open(pathX), Image.open(pathY)\n return rand_crop(tf(imgX), tf(imgY))\n \n def __len__(self):\n return self.Len\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(mean=0.0, std=0.01)\n m.bias.data.fill_(0)\n\n\ndataset = MoirePic(\"/data_new/moire/trainData/source/\",\n \"/data_new/moire/trainData/target/\")\ntestdataset = MoirePic(\"/data_new/moire/trainData/source/\",\n \"/data_new/moire/trainData/target/\", False)\nuse_gpu = torch.cuda.is_available()\nbatch_size = 8\nkwargs = {'num_workers': 14, 'pin_memory': True}\ntrain_loader = DataLoader(dataset=dataset, shuffle=True,\n batch_size=batch_size, **kwargs)\ntest_loader = DataLoader(dataset=testdataset, shuffle=True,\n batch_size=batch_size, **kwargs) \n\n# model = MoireCNN()\nmodel = torch.load(\"moire-1.pth\")\n# model.apply(weights_init)\n\n# with SummaryWriter(comment='MoireCNN') as w:\n# w.add_graph(model, (x, ))\n\nif use_gpu:\n model = model.cuda()\n # model = nn.DataParallel(model)\n print('USE GPU')\nelse:\n print('USE CPU')\n\ncriterion = nn.MSELoss()\nlr = 0.00004\npre_loss = 100.0\nbest_loss = 100.0\n\nfor epoch in range(100):\n train(epoch, lr)\n test(epoch)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"190391651","text":"import json\nimport logging\nimport re\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.http import JsonResponse\nfrom django.shortcuts import redirect\nfrom django.urls import (\n reverse,\n)\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.edit import FormView\n\nfrom core.utils.generic_helpers import get_current_financial_year\n\nfrom costcentre.forms import MyCostCentresForm\nfrom costcentre.models import CostCentre\n\nfrom forecast.forms import (\n AddForecastRowForm,\n EditForecastFigureForm,\n PasteForecastForm,\n PublishForm,\n)\nfrom forecast.models import (\n FinancialCode,\n FinancialPeriod,\n ForecastMonthlyFigure,\n)\nfrom forecast.serialisers import FinancialCodeSerializer\nfrom forecast.utils.access_helpers import (\n can_edit_at_least_one_cost_centre,\n can_forecast_be_edited,\n get_user_cost_centres,\n)\nfrom forecast.utils.edit_helpers import (\n BadFormatException,\n CannotFindForecastMonthlyFigureException,\n CannotFindMonthlyFigureException,\n IncorrectDecimalFormatException,\n NoFinancialCodeForEditedValue,\n NotEnoughColumnsException,\n NotEnoughMatchException,\n RowMatchException,\n TooManyMatchException,\n check_cols_match,\n check_row_match,\n set_monthly_figure_amount,\n)\nfrom forecast.utils.query_fields import edit_forecast_order\nfrom forecast.views.base import (\n CostCentrePermissionTest,\n NoCostCentreCodeInURLError,\n)\n\n\ndef get_financial_code_serialiser(cost_centre_code):\n financial_codes = (\n FinancialCode.objects.filter(cost_centre_id=cost_centre_code, )\n .prefetch_related(\n \"forecast_forecastmonthlyfigures\",\n \"forecast_forecastmonthlyfigures__financial_period\",\n ).order_by(*edit_forecast_order())\n )\n\n return FinancialCodeSerializer(financial_codes, many=True, )\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ChooseCostCentreView(\n UserPassesTestMixin,\n FormView,\n):\n template_name = \"forecast/edit/choose_cost_centre.html\"\n form_class = MyCostCentresForm\n cost_centre = None\n\n def test_func(self):\n can_edit = can_edit_at_least_one_cost_centre(\n self.request.user\n )\n\n if not can_edit:\n raise PermissionDenied()\n\n return True\n\n def get_user_cost_centres(self):\n user_cost_centres = get_user_cost_centres(\n self.request.user,\n )\n\n cost_centres = []\n\n for (cost_centre) in user_cost_centres:\n cost_centres.append({\n \"name\": cost_centre.cost_centre_name,\n \"code\": cost_centre.cost_centre_code,\n })\n\n return json.dumps(cost_centres)\n\n def get_form_kwargs(self):\n kwargs = super(ChooseCostCentreView, self).get_form_kwargs()\n kwargs[\"user\"] = self.request.user\n return kwargs\n\n def form_valid(self, form):\n self.cost_centre = form.cleaned_data[\"cost_centre\"]\n return super(ChooseCostCentreView, self).form_valid(form)\n\n def get_success_url(self):\n return reverse(\n \"edit_forecast\",\n kwargs={\"cost_centre_code\": self.cost_centre.cost_centre_code},\n )\n\n\nclass AddRowView(\n CostCentrePermissionTest, FormView,\n):\n template_name = \"forecast/edit/add.html\"\n form_class = AddForecastRowForm\n cost_centre_code = None\n\n def get_cost_centre(self):\n if self.cost_centre_code is not None:\n return\n\n if \"cost_centre_code\" not in self.kwargs:\n raise NoCostCentreCodeInURLError(\"No cost centre code provided in URL\")\n\n self.cost_centre_code = self.kwargs[\"cost_centre_code\"]\n\n def get_success_url(self):\n self.get_cost_centre()\n\n return reverse(\n \"edit_forecast\", kwargs={\"cost_centre_code\": self.cost_centre_code}\n )\n\n def cost_centre_details(self):\n self.get_cost_centre()\n\n cost_centre = CostCentre.objects.get(cost_centre_code=self.cost_centre_code,)\n return {\n \"group\": cost_centre.directorate.group.group_name,\n \"group_code\": cost_centre.directorate.group.group_code,\n \"directorate\": cost_centre.directorate.directorate_name,\n \"directorate_code\": cost_centre.directorate.directorate_code,\n \"cost_centre_name\": cost_centre.cost_centre_name,\n \"cost_centre_code\": cost_centre.cost_centre_code,\n }\n\n def get_form_kwargs(self):\n self.get_cost_centre()\n\n kwargs = super(AddRowView, self).get_form_kwargs()\n kwargs['cost_centre_code'] = self.cost_centre_code\n return kwargs\n\n def form_valid(self, form):\n data = form.cleaned_data\n\n financial_code = FinancialCode.objects.filter(\n cost_centre_id=self.cost_centre_code,\n programme=data[\"programme\"],\n natural_account_code=data[\"natural_account_code\"],\n analysis1_code=data[\"analysis1_code\"],\n analysis2_code=data[\"analysis2_code\"],\n project_code=data[\"project_code\"],\n ).first()\n\n if not financial_code:\n financial_code = FinancialCode.objects.create(\n cost_centre_id=self.cost_centre_code,\n programme=data[\"programme\"],\n natural_account_code=data[\"natural_account_code\"],\n analysis1_code=data[\"analysis1_code\"],\n analysis2_code=data[\"analysis2_code\"],\n project_code=data[\"project_code\"],\n )\n\n # Create \"actual\" monthly figures for past months\n actual_months = FinancialPeriod.financial_period_info.actual_period_code_list()\n\n if len(actual_months) > 0:\n financial_year = get_current_financial_year()\n\n for actual_month in actual_months:\n ForecastMonthlyFigure.objects.create(\n financial_code=financial_code,\n financial_year_id=financial_year,\n financial_period_id=actual_month,\n )\n\n return super().form_valid(form)\n\n\nclass PasteForecastRowsView(\n CostCentrePermissionTest, FormView,\n):\n form_class = PasteForecastForm\n\n @transaction.atomic\n def form_valid(self, form): # noqa: C901\n if \"cost_centre_code\" not in self.kwargs:\n raise NoCostCentreCodeInURLError(\"No cost centre code provided in URL\")\n\n try:\n cost_centre_code = self.kwargs[\"cost_centre_code\"]\n\n paste_content = form.cleaned_data[\"paste_content\"]\n pasted_at_row = form.cleaned_data.get(\"pasted_at_row\", None)\n all_selected = form.cleaned_data.get(\"all_selected\", False)\n\n financial_codes = FinancialCode.objects.filter(\n cost_centre_id=self.cost_centre_code,\n )\n\n # TODO - introduce a way of checking for\n # active financial periods (see previously used logic below)\n\n # Get number of active financial periods\n # active_periods = FinancialPeriod.objects.filter(\n # display_figure=True\n # ).count()\n\n row_count = financial_codes.count()\n rows = paste_content.splitlines()\n\n # Remove any rows that start with empty cells (to account for totals etc)\n rows = [row for row in rows if not row[0].strip() == \"\"]\n\n pasted_row_count = len(rows)\n\n if len(rows) == 0:\n return JsonResponse(\n {\"error\": \"Your pasted data is not formatted correctly.\"},\n status=400,\n )\n\n # Check for header row\n has_start_row = False\n if rows[0].lower().startswith(\"programme\"):\n has_start_row = True\n\n # Account for header row in paste\n if has_start_row:\n pasted_row_count -= 1\n\n if all_selected and row_count < pasted_row_count:\n return JsonResponse(\n {\n \"error\": (\n \"You have selected all forecast rows \"\n \"but the pasted data has too many rows.\"\n )\n },\n status=400,\n )\n\n if all_selected and row_count > pasted_row_count:\n return JsonResponse(\n {\n \"error\": (\n \"You have selected all forecast rows \"\n \"but the pasted data has too few rows.\"\n )\n },\n status=400,\n )\n\n try:\n for index, row in enumerate(rows):\n if index == 0 and has_start_row:\n continue\n\n cell_data = re.split(r\"\\t\", row.rstrip(\"\\t\"))\n\n # Check that pasted at content and desired first row match\n check_row_match(\n index, pasted_at_row, cell_data,\n )\n\n # Check cell data length against expected number of cols\n check_cols_match(cell_data)\n\n set_monthly_figure_amount(\n cost_centre_code, cell_data,\n )\n except (\n BadFormatException,\n TooManyMatchException,\n NotEnoughColumnsException,\n NotEnoughMatchException,\n RowMatchException,\n CannotFindMonthlyFigureException,\n CannotFindForecastMonthlyFigureException,\n IncorrectDecimalFormatException,\n ) as ex:\n return JsonResponse({\"error\": str(ex)}, status=400,)\n\n financial_code_serialiser = get_financial_code_serialiser(\n self.cost_centre_code,\n )\n\n return JsonResponse(\n financial_code_serialiser.data,\n safe=False,\n )\n except Exception:\n logger.fatal(\n \"Error when pasting forecast data\",\n exc_info=True,\n )\n return JsonResponse(\n {\"error\": \"There was an error when attempting to paste \"\n \"your data, please make sure you have selected \"\n \"all columns when you copy from the spreadsheet. \"\n \"Some of the forecast data may have been updated. \"\n \"If the error persists, please contact the Live \"\n \"Services Team\"\n },\n status=400,\n )\n\n def form_invalid(self, form):\n return JsonResponse(\n {\n \"error\": \"There was a problem with your \"\n \"submission, please contact support\"\n },\n status=400,\n )\n\n\nclass EditForecastFigureView(\n CostCentrePermissionTest, FormView,\n):\n form_class = EditForecastFigureForm\n\n def form_valid(self, form):\n if \"cost_centre_code\" not in self.kwargs:\n raise NoCostCentreCodeInURLError(\"No cost centre code provided in URL\")\n\n cost_centre_code = self.kwargs[\"cost_centre_code\"]\n\n cost_centre = CostCentre.objects.filter(\n cost_centre_code=cost_centre_code,\n ).first()\n\n financial_year = get_current_financial_year()\n\n financial_code = FinancialCode.objects.filter(\n cost_centre=cost_centre,\n natural_account_code=form.cleaned_data[\"natural_account_code\"],\n programme__programme_code=form.cleaned_data[\"programme_code\"],\n analysis1_code__analysis1_code=form.cleaned_data.get(\n \"analysis1_code\", None,\n ),\n analysis2_code__analysis2_code=form.cleaned_data.get(\n \"analysis2_code\", None,\n ),\n project_code__project_code=form.cleaned_data.get(\"project_code\", None,),\n )\n\n month = form.cleaned_data[\"month\"]\n\n if not financial_code.first():\n raise NoFinancialCodeForEditedValue()\n\n monthly_figure = ForecastMonthlyFigure.objects.filter(\n financial_year_id=financial_year,\n financial_code=financial_code.first(),\n financial_period__financial_period_code=month,\n archived_status=None,\n ).first()\n\n amount = form.cleaned_data[\"amount\"]\n\n if amount > settings.MAX_FORECAST_FIGURE:\n amount = settings.MAX_FORECAST_FIGURE\n\n if amount < settings.MIN_FORECAST_FIGURE:\n amount = settings.MIN_FORECAST_FIGURE\n\n if monthly_figure:\n monthly_figure.amount = amount\n else:\n financial_period = FinancialPeriod.objects.filter(\n financial_period_code=month\n ).first()\n monthly_figure = ForecastMonthlyFigure(\n financial_year_id=financial_year,\n financial_code=financial_code.first(),\n financial_period=financial_period,\n amount=amount,\n )\n\n monthly_figure.save()\n\n financial_code_serialiser = get_financial_code_serialiser(self.cost_centre_code)\n\n return JsonResponse(financial_code_serialiser.data, safe=False)\n\n def form_invalid(self, form):\n return JsonResponse(\n {\n \"error\": \"There was a problem with your \"\n \"submission, please contact support\"\n },\n status=400,\n )\n\n\nclass EditForecastView(\n CostCentrePermissionTest, TemplateView,\n):\n template_name = \"forecast/edit/edit.html\"\n\n def class_name(self):\n return \"wide-table\"\n\n def cost_centre_details(self):\n cost_centre = CostCentre.objects.get(cost_centre_code=self.cost_centre_code,)\n return {\n \"group\": cost_centre.directorate.group.group_name,\n \"group_code\": cost_centre.directorate.group.group_code,\n \"directorate\": cost_centre.directorate.directorate_name,\n \"directorate_code\": cost_centre.directorate.directorate_code,\n \"cost_centre_name\": cost_centre.cost_centre_name,\n \"cost_centre_code\": cost_centre.cost_centre_code,\n }\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n form = PublishForm(initial={\"cost_centre_code\": self.cost_centre_code, })\n\n financial_code_serialiser = get_financial_code_serialiser(\n self.cost_centre_code,\n )\n\n serialiser_data = financial_code_serialiser.data\n forecast_dump = json.dumps(serialiser_data)\n\n actual_data = FinancialPeriod.financial_period_info.actual_period_code_list()\n period_display = (\n FinancialPeriod.financial_period_info.period_display_code_list()\n ) # noqa\n paste_form = PasteForecastForm()\n\n context[\"form\"] = form\n context[\"paste_form\"] = paste_form\n context[\"forecast_dump\"] = forecast_dump\n context[\"actuals\"] = actual_data\n context[\"period_display\"] = period_display\n\n return context\n\n\nclass EditUnavailableView(\n TemplateView,\n):\n template_name = \"forecast/edit/edit_locked.html\"\n\n def dispatch(self, request, *args, **kwargs):\n # If edit is open, redirect to choose CC page\n if can_forecast_be_edited(request.user):\n return redirect(reverse(\"choose_cost_centre\"))\n\n return super(EditUnavailableView, self).dispatch(\n request,\n *args,\n **kwargs,\n )\n\n\nclass ErrorView(\n TemplateView,\n):\n def dispatch(self, request, *args, **kwargs):\n 1 / 0\n","sub_path":"forecast/views/edit_forecast.py","file_name":"edit_forecast.py","file_ext":"py","file_size_in_byte":15854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"92976334","text":"import os, io, re\nfrom modes import InputMode, CSSMode, RenderMode, ReplyType\nfrom communicator import Communicator, QueriesExecutionAbort\nfrom songparser import SongParser\nimport Lang\n#from song import Song\n\n\nclass MusicSheetMakerError(Exception):\n def __init__(self, explanation):\n self.explanation = explanation\n\n def __str__(self):\n return str(self.explanation)\n \n pass\n\n\nclass MusicSheetMaker:\n\n def __init__(self, locale='en_US', songs_in='test_songs', songs_out='songs_out'):\n self.name = 'music-sheet-maker'\n self.locale=self.set_locale(locale)\n self.communicator = Communicator(owner=self, locale=self.locale)\n self.song = None\n self.song_parser = None\n self.directory_base = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..'))\n self.song_dir_in = os.path.join(self.directory_base,songs_in)\n self.song_dir_out = os.path.join(self.directory_base,songs_out)\n self.css_path = os.path.normpath(os.path.join(self.directory_base, \"css/main.css\"))#TODO: move that into Renderer\n self.rel_css_path = os.path.relpath(self.css_path, start=self.song_dir_out)\n self.css_mode = CSSMode.EMBED#TODO: move that into Renderer\n self.render_modes_enabled = [mode for mode in RenderMode]\n # self.render_modes_disabled = [RenderMode.JIANPUASCII, RenderMode.DOREMIASCII]\n self.render_modes_disabled = []\n self.render_modes_enabled = [mode for mode in self.render_modes_enabled if\n mode not in self.render_modes_disabled]\n self.botcog_render_modes = [RenderMode.PNG]\n self.website_render_modes = [RenderMode.HTML]\n \n def __getattr__(self, attr_name):\n \"\"\"\n Default function to call in case no one else is found.\n \"\"\"\n if 'communicator' in self.__dict__.keys():\n return getattr(self.communicator, attr_name)\n else:\n raise AttributeError(\"type object \" + repr(type(self).__name__) + \" has no attribute 'communicator\")\n\n def get_name(self):\n return self.name\n \n def get_locale(self): \n return self.locale\n \n def set_locale(self, locale):\n \n self.locale = Lang.check_locale(locale)\n if self.locale is None:\n self.locale = Lang.guess_locale()\n print(\"**WARNING: bad locale type %s passed to MusicSheetMaker. Reverting to %s\"%(locale, self.locale))\n \n return self.locale\n\n \n def is_botcog(self, recipient):\n try:\n is_bot = recipient.get_name() == \"music-cog\"\n except AttributeError:\n try: # Guesses harder\n recipient.bot\n is_bot = True\n except AttributeError:\n is_bot = False\n return is_bot\n\n\n def is_website(self, recipient):\n try:\n is_website = recipient.get_name() == \"sky-music-website\"\n except AttributeError:\n try: # Guesses harder\n recipient.session_ID\n is_website = True\n except:\n is_website = False\n\n return is_website\n\n def is_commandline(self, recipient): \n try:\n return recipient.get_name() == \"command-line\"\n except AttributeError: # Guesses harder\n return not (self.is_botcog(recipient) or self.is_website(recipient))\n\n def get_song(self):\n return self.song\n\n def set_song(self, song):\n self.song = song\n\n def get_song_parser(self):\n return self.song_parser\n\n def set_song_parser(self, song_parser=None):\n if song_parser is None:\n song_parser = SongParser(self)\n self.song_parser = song_parser\n\n def get_directory_base(self):\n return self.directory_base\n \n def get_render_modes_enabled(self):\n\n return self.render_modes_enabled\n\n def execute_queries(self, queries=None):\n\n if queries is None:\n self.communicator.memory.clean()\n queries = self.communicator.memory.recall_unsatisfied(filters=('to_me'))\n else:\n if not isinstance(queries, (list, set)):\n queries = [queries]\n #2 lines for debugging:\n #print('\\n%%%%I AM MAKER, THE UNSATISFIED QUERIES ARE:%%%%')\n #self.communicator.memory.print_out(filters=('unsatisfied'))\n\n \"\"\"\n The query statisfaction loop:\n runs until all queries are satisfied\n \"\"\"\n reply_valid = False\n while not reply_valid:\n reply_valid = True #To break the loop if no query\n for q in queries:\n #Fetching the stock Query name and arguments \n query_name = q.get_name()\n try: \n stock_query = self.communicator.query_stock[query_name]\n handler_args = ', '.join(('sender=q.get_sender()','query=q'))\n expression = 'self.' + str(stock_query['handler']) + '(' + handler_args + ')'\n except KeyError as err:\n #TODO: handle non-stock queries???\n raise MusicSheetMakerError('Cannot create stock query ' + repr(query_name) + ', because of ' + repr(err))\n pass\n #Actual evaluation of the stock query\n try:\n answer = eval(expression)\n q.reply_to(answer)\n reply_valid = q.get_reply_validity()\n \n except QueriesExecutionAbort as qExecAbort:\n raise qExecAbort\n\n '''\n def next_step(self, recipient, step_number=0):\n \"\"\"\n Starts the next step of song creation for the given recipient\n Requires a current_step dictionary\n \"\"\"\n steps = [self.ask_instructions, self.ask_render_modes, self.ask_notes_or_file, self.ask_input_mode,\n self.set_parser_input_mode, self.ask_song_key, self.ask_octave_shift, self.parse_song,\n self.display_error_ratio, self.ask_song_metadata, self.set_song_metadata]\n \n if step_number == 0:\n res = self.set_song_parser()\n elif step_number > 1 and step_number < len(steps) + 1:\n res = steps[step_number](recipient)\n else:\n res = self.render_song(recipient)\n\n if isinstance(res, tuple):\n (q, r) = res\n try:\n q.expect_reply()\n return q\n except AttributeError:\n pass\n try:\n q[0].expect_reply()\n return q\n except (IndexError, AttributeError):\n pass\n \n return None\n '''\n \n def create_song(self, **kwargs):\n \"\"\"\n A very linear, sequential way of building a song from user inputs\n Returns a list of tuples (buffers, types) where buffers is a list of IOString/IOBytes buffers, and types the list of their types\n \"\"\"\n try:\n recipient = kwargs['sender']\n except KeyError:\n raise MusicSheetMakerError('No recipient specified for the Song')\n\n #Actually the following is not used but it may be useful to have the triggering query as an argument\n try:\n q_create_song = kwargs['query']\n except KeyError:\n raise MusicSheetMakerError('No Query passed to create_song') \n \n #======= NEW SONG =======\n \n # 1. Set Song Parser\n self.set_song_parser()\n\n # 2. Display instructions\n (i_instr, res) = self.ask_instructions(recipient=recipient)\n \n # 2.b Ask for render modes (query created for website only)\n (q_render, render_modes) = self.ask_render_modes(recipient=recipient)\n \n # 3. Ask for notes\n #TODO: allow the player to enter the notes using several messages??? or maybe not\n (q_notes, notes) = self.ask_notes_or_file(recipient=recipient, prerequisites=[i_instr])\n \n # 4. Ask for input mode (or display the one found)\n (q_mode, input_mode) = self.ask_input_mode(recipient=recipient, notes=notes, prerequisites=[q_notes])\n #(q_mode, input_mode) = self.ask_input_mode(recipient=recipient, prerequisites=[q_notes]) #TODO EXPERIMENTAL\n \n # 5. Set input_mode\n self.get_song_parser().set_input_mode(input_mode)\n #self.set_parser_input_mode(recipient) #TODO EXPERIMENTAL\n \n # 6. Ask for song key (or display the only one possible)\n (q_key, song_key) = self.ask_song_key(recipient=recipient, notes=notes, input_mode=input_mode, prerequisites=[q_notes, q_mode])\n #(q_key, song_key) = self.ask_song_key(recipient=recipient, prerequisites=[q_notes, q_mode]) #TODO EXPERIMENTAL\n \n # 7. Asks for octave shift\n (q_shift, octave_shift) = self.ask_octave_shift(recipient=recipient)\n \n # 8. Parse song\n self.parse_song(recipient, notes=notes, song_key=song_key, octave_shift=octave_shift)\n #self.parse_song(recipient) #TODO EXPERIMENTAL\n \n # 9. Displays error ratio\n (i_error, res) = self.display_error_ratio(recipient=recipient, prerequisites=[q_notes, q_mode, q_shift])\n \n # 10. Asks for song metadata\n (qs_meta, (title, artist, transcript)) = self.ask_song_metadata(recipient=recipient)\n \n self.set_song_metadata(recipient=recipient, meta=(title, artist, transcript), song_key=song_key)\n #self.set_song_metadata(recipient=recipient) #TODO EXPERIMENTAL\n \n # 11. Renders Song\n song_bundle = self.render_song(recipient, render_modes)\n \n # 12. Sends result back (required for website)\n return song_bundle\n \n \n def ask_instructions(self, recipient, prerequisites=None, execute=True):\n \n question_rep = ('\\n'.join(['\\n* ' + input_mode.get_long_desc(self.locale) for input_mode in InputMode]),\n self.get_song_parser().get_icon_delimiter(), self.get_song_parser().get_pause(),\n self.get_song_parser().get_quaver_delimiter(), self.get_song_parser().get_quaver_delimiter().join(['A1','B1','C1']),\n self.get_song_parser().get_repeat_indicator()+'2'\n )\n \n if self.is_commandline(recipient): \n i_instr = self.communicator.send_stock_query('instructions_stdout', recipient=recipient, question_rep=question_rep, prerequisites=prerequisites)\n elif self.is_website(recipient):\n i_instr = self.communicator.send_stock_query('instructions_website', recipient=recipient, question_rep=question_rep, prerequisites=prerequisites)\n else:\n i_instr = self.communicator.send_stock_query('instructions_botcog', recipient=recipient, question_rep=question_rep, prerequisites=prerequisites) \n \n if execute:\n recipient.execute_queries(i_instr)\n instructions = i_instr.get_reply().get_result()\n return (i_instr, instructions)\n else:\n return (i_instr, None)\n \n \n def ask_song_metadata(self, recipient, prerequisites=None, execute=True):\n\n queries = []\n\n queries += [self.communicator.send_stock_query('song_title', recipient=recipient, prerequisites=prerequisites)]\n queries += [self.communicator.send_stock_query('original_artist', recipient=recipient, prerequisites=prerequisites)]\n queries += [self.communicator.send_stock_query('transcript_writer', recipient=recipient, prerequisites=prerequisites)]\n\n if execute:\n recipient.execute_queries(queries)\n meta_data = [q.get_reply().get_result() for q in queries]\n return (queries, tuple(meta_data))\n else:\n return (queries, None)\n\n\n def retrieve_song_metadata(self, recipient):\n '''\n Experimental\n '''\n (title, artist, transcript) = (None, None, None)\n \n q_title = self.communicator.recall_by_recipient(recipient, criterion=\"song_title\", filters=[\"valid_reply\"], sort_by=\"date\")\n q_artist = self.communicator.recall_by_recipient(recipient, criterion=\"original_artist\", filters=[\"valid_reply\"], sort_by=\"date\")\n q_transcript = self.communicator.recall_by_recipient(recipient, criterion=\"transcript_writer\", filters=[\"valid_reply\"], sort_by=\"date\")\n\n if len(q_title) != 0:\n title = q_title[-1].get_reply().get_result()\n if len(q_artist) != 0:\n artist = q_artist[-1].get_reply().get_result()\n if len(q_transcript) != 0:\n transcript = q_transcript[-1].get_reply().get_result()\n \n return (title, artist, transcript)\n\n\n def ask_notes(self, recipient, prerequisites=None, execute=True):\n \n q_notes = self.communicator.send_stock_query('notes', recipient=recipient, prerequisites=prerequisites) \n \n if execute:\n recipient.execute_queries(q_notes)\n notes = q_notes.get_reply().get_result()\n return (q_notes, notes)\n else:\n return (q_notes, None)\n\n \n def ask_file(self, recipient, prerequisites=None, execute=True):\n \n q_file = self.communicator.send_stock_query('file', recipient=recipient, question_rep=(os.path.relpath(os.path.normpath(self.song_dir_in)),),\n prerequisites=prerequisites, limits=(os.path.normpath(self.song_dir_in)))\n \n if execute:\n recipient.execute_queries(q_file)\n file_name = q_file.get_reply().get_result()\n file_path = os.path.join(self.song_dir_in, os.path.normpath(file_name))\n return (q_file, file_path) #should return file name\n else:\n return (q_file, None)\n \n \n def read_file(self, file_path):\n \n isfile = os.path.isfile(file_path)\n \n if not isfile:\n MusicSheetMakerError('File does not exist: ' + os.path.abspath(file_path))\n else:\n #load file\n try:\n with open(file_path, mode='r', encoding='utf-8', errors='ignore') as fp:\n lines = fp.readlines() #Returns a list of strings\n except (OSError, IOError) as err:\n raise err\n \n return lines\n\n\n def ask_notes_or_file(self, recipient, prerequisites=None, execute=True):\n \"\"\"\n Asks for notes (all recipients) or a file name (command-line only)\n If a file name is detected but the file does not exist, sends a query to ask for a valid file path\n If notes are detected, return the notes as a list of strings splitted by the OS line separator\n \"\"\"\n \n if not self.is_commandline(recipient):\n \n return self.ask_notes(recipient=recipient, prerequisites=prerequisites, execute=execute)\n \n else:\n \n q_notes = self.communicator.send_stock_query('notes_file', question_rep=(os.path.relpath(os.path.normpath(self.song_dir_in)),),\n recipient=recipient, prerequisites=prerequisites)\n \n if not execute: \n return (q_notes, None) \n else: \n recipient.execute_queries(q_notes)\n \n result = q_notes.get_reply().get_result()\n \n if self.is_commandline(recipient):\n #Detects if the result is a file path\n file_path = os.path.join(self.song_dir_in, os.path.normpath(result))\n isfile = os.path.isfile(file_path)\n \n if not isfile:\n splitted = os.path.splitext(result)\n if len(splitted[0]) > 0 and 2 < len(splitted[1]) <= 5 and re.search('\\\\.', splitted[0]) is None:\n # then certainly a file name\n self.communicator.memory.erase(q_notes)\n \n q_notes, file_path = self.ask_file(recipient=recipient, prerequisites=prerequisites, execute=execute)\n isfile = True #ask_file only returns when a valid file path is found\n else:\n isfile = False #Don't allow reading files on the website or music-cog\n \n if isfile and self.is_commandline(recipient):\n notes = self.read_file(file_path)\n print('(Song imported from %s)'%os.path.abspath(file_path))\n else: \n notes = result.split(os.linesep) # Returns a list of strings in any case\n \n if self.is_commandline(recipient): #Loop to ask for several lines in the standard input interface \n while result: \n q_notes = self.communicator.send_stock_query('notes', recipient=recipient, prerequisites=prerequisites)\n recipient.execute_queries(q_notes)\n result = q_notes.get_reply().get_result()\n \n result = result.split(os.linesep)\n for result in result:\n notes.append(result)\n \n return (q_notes, notes)\n\n \n def retrieve_notes(self, recipient):\n '''\n Experimental\n ''' \n q_notes_file = self.communicator.recall_by_recipient(recipient, criterion=\"file|notes_file\", filters=[\"valid_reply\"], sort_by=\"date\")\n if len(q_notes_file) != 0:\n result = q_notes_file[-1].get_reply().get_result()\n file_path = os.path.join(self.song_dir_in, os.path.normpath(result))\n isfile = os.path.isfile(file_path)\n \n if isfile and self.is_commandline(recipient):\n notes = self.read_file(file_path)\n else:\n notes = result.split(os.linesep) \n return notes\n \n \n q_notes = self.communicator.recall_by_recipient(recipient, criterion=\"notes\", filters=[\"valid_reply\"], sort_by=\"date\")\n if len(q_notes) != 0:\n notes = q_notes_file[-1].get_reply().get_result().split(os.linesep)\n return notes\n \n \n\n def ask_render_modes(self, recipient, prerequisites=None, execute=True):\n \"\"\"\n Asks for the desired render modes for the Song\n \"\"\"\n \n render_modes = self.render_modes_enabled\n\n if len(render_modes) == 1:\n return (None, render_modes)\n\n if self.is_commandline(recipient):\n \n return (None, render_modes)\n \n elif self.is_botcog(recipient):\n \n return (None, self.botcog_render_modes)\n \n else:\n\n q_render = self.communicator.send_stock_query('render_modes', recipient=recipient, limits=render_modes, prerequisites=prerequisites)\n \n if execute:\n recipient.execute_queries(q_render)\n render_modes = q_render.get_reply().get_result()\n return (q_render, render_modes)\n else:\n return (q_render, None)\n\n\n def ask_input_mode(self, recipient, notes=None, prerequisites=None, execute=True):\n \"\"\"\n Try to guess the musical notation and asks the player to confirm\n \"\"\"\n \n if notes is None:\n notes = self.retrieve_notes(recipient)\n \n possible_modes = self.get_song_parser().get_possible_modes(notes)\n \n if len(possible_modes) == 0:\n #To avoid loopholes. I am not sure this case is ever reached, because get_possible_modes should return all modes if None is found.\n all_input_modes = [mode for mode in InputMode]\n \n q_mode = self.communicator.send_stock_query('musical_notation', recipient=recipient,\n limits=all_input_modes, prerequisites=prerequisites)\n \n elif len(possible_modes) == 1:\n q_mode = self.communicator.send_stock_query('one_input_mode', recipient=recipient, \n question_rep=(possible_modes[0].get_short_desc(self.locale),), prerequisites=prerequisites)\n \n else:\n q_mode = self.communicator.send_stock_query('musical_notation', recipient=recipient, limits=possible_modes, prerequisites=prerequisites)\n \n if len(possible_modes) == 1:\n mode = possible_modes[0]\n else:\n mode = None\n \n if execute:\n recipient.execute_queries(q_mode)\n if len(possible_modes) != 1:\n mode = q_mode.get_reply().get_result()\n return (q_mode, mode)\n else:\n return (q_mode, mode)\n\n\n def retrieve_input_mode(self, recipient, notes=None):\n '''\n Experimental\n ''' \n if notes is None:\n notes = self.retrieve_notes(recipient)\n \n try:\n input_mode = self.get_song_parser.get_input_mode()\n except:\n input_mode = None\n \n if input_mode is None:\n q_mode = self.communicator.recall_by_recipient(recipient, criterion=ReplyType.INPUTMODE, filters=[\"valid_reply\"], sort_by=\"date\")\n if len(q_mode) == 0:\n input_mode = self.get_song_parser().get_possible_modes(notes)[0]\n else:\n input_mode = q_mode[-1].get_reply().get_result()\n\n return input_mode\n\n\n def ask_song_key(self, recipient, notes=None, input_mode=None, prerequisites=None, execute=True):\n \"\"\"\n Attempts to detect key for input written in absolute musical scales (western, Jianpu)\n \"\"\"\n #EXPERIMENTAL\n if notes is None:\n notes = self.retrieve_notes(recipient)\n \n if input_mode is None:\n input_mode = self.retrieve_input_mode(recipient, notes)\n \n song_key = None \n possible_keys = self.get_song_parser().find_key(notes)\n \n if possible_keys is None:\n #Asks for any text string\n q_key = self.communicator.send_stock_query('recommended_key', recipient=recipient, prerequisites=prerequisites)\n \n elif len(possible_keys) == 0:\n #Sends information that there is no possible key\n q_key = self.communicator.send_stock_query('no_possible_key', recipient=recipient, prerequisites=prerequisites)\n possible_keys = ['C']\n\n elif len(possible_keys) == 1:\n #Sends information that there is only 1 possible key\n q_key = self.communicator.send_stock_query('one_possible_key', recipient=recipient,\n question_rep=(str(possible_keys[0]),), prerequisites=prerequisites)\n else:\n #Asks to choose a key within a list\n q_key = self.communicator.send_stock_query('possible_keys', recipient=recipient,\n foreword_rep=(', '.join(possible_keys),), limits=possible_keys, prerequisites=prerequisites)\n\n if execute:\n \n recipient.execute_queries(q_key)\n if possible_keys is None:\n song_key = q_key.get_reply().get_result()\n if len(song_key.strip()) == 0:\n song_key = 'C'\n elif len(possible_keys) == 1:\n song_key = possible_keys[0]\n elif len(possible_keys) > 1:\n song_key = q_key.get_reply().get_result() \n else:\n raise MusicSheetMakerError('Possible keys is an empty list.')\n \n return (q_key, song_key)\n \n else:#Not execute\n \n if possible_keys is None:\n song_key = None\n elif len(possible_keys) == 1:\n song_key = possible_keys[0]\n else:\n song_key = None\n \n return (q_key, song_key)\n \n\n def retrieve_song_key(self, recipient, notes=None, input_mode=None):\n '''\n Experimental\n '''\n if notes is None:\n notes = self.retrieve_notes(recipient)\n \n if input_mode is None:\n input_mode = self.retrieve_input_mode(recipient, notes=notes)\n\n song_key = None\n \n q_key = self.communicator.recall_by_recipient(recipient, criterion=\"possible_keys\", filters=[\"valid_reply\"], sort_by=\"date\")\n if len(q_key) != 0:\n return q_key[-1].get_reply().get_result()\n \n q_key = self.communicator.recall_by_recipient(recipient, criterion=\"recommended_key\", filters=[\"valid_reply\"], sort_by=\"date\")\n if len(q_key) != 0:\n song_key = q_key[-1].get_reply().get_result()\n if len(song_key.strip()) == 0:\n song_key = 'C'\n return song_key\n\n q_key = self.communicator.recall_by_recipient(recipient, criterion=\"one_possible_key\", filters=[\"valid_reply\"], sort_by=\"date\")\n if len(q_key) != 0:\n try:\n return self.get_song_parser().find_key(notes)[0]\n except TypeError:\n return 'C'\n\n q_key = self.communicator.recall_by_recipeint(recipient, criterion=\"no_possible_key\", filters=[\"valid_reply\"], sort_by=\"date\")\n if len(q_key) != 0:\n return 'C'\n \n return 'C' \n \n\n def ask_octave_shift(self, recipient, prerequisites=None, execute=True):\n \n q_shift = self.communicator.send_stock_query('octave_shift', recipient=recipient, prerequisites=prerequisites)\n \n if execute:\n recipient.execute_queries(q_shift)\n octave_shift = q_shift.get_reply().get_result()\n return (q_shift, octave_shift)\n else:\n return (q_shift, None)\n\n def retrieve_octave_shift(self, recipient): \n '''\n Experimental\n '''\n q_shift = self.communicator.recall_by_recipient(recipient, criterion=\"octave_shift\", filters=[\"valid_reply\"], sort_by=\"date\")\n if len(q_shift) == 0:\n octave_shift = 0\n else: \n octave_shift = q_shift[-1].get_reply().get_result()\n return octave_shift \n \n \n def send_buffers_to_botcog(self, buffers, recipient, prerequisites=None, execute=True):\n '''\n Discord only\n TODO: fill this method, or if very short, put it inside create_song directly, or delete it if unused\n '''\n return buffers\n \n \n def display_error_ratio(self, recipient, prerequisites=None, execute=True):\n\n error_ratio = self.get_song().get_num_broken() / max(1, self.get_song().get_num_instruments())\n \n if error_ratio == 0:\n i_error = None\n elif error_ratio < 0.05:\n i_error = self.communicator.send_stock_query('few_errors', recipient=recipient, prerequisites=prerequisites)\n else:\n i_error = self.communicator.send_stock_query('many_errors', recipient=recipient, prerequisites=prerequisites)\n \n if execute and i_error is not None:\n recipient.execute_queries(i_error)\n error_message = i_error.get_reply().get_result()\n return (i_error, error_message)\n else:\n return (i_error, None)\n\n\n def set_parser_input_mode(self, recipient, notes=None, input_mode=None):\n \n if input_mode is None:\n if notes is None:\n notes = self.retrieve_notes(recipient)\n input_mode = self.retrieve_input_mode(recipient, notes)\n else:\n raise MusicSheetMakerError('Could not retrieve input_modes because no notes were given.')\n\n self.get_song_parser().set_input_mode(input_mode)\n\n\n def set_song_metadata(self, recipient, meta=None, song_key=None, notes=None):\n \n if meta is None:\n (title, artist, transcript) = self.retrieve_song_metadata(recipient)\n else:\n (title, artist, transcript) = meta\n \n if song_key is None:\n song_key = self.retrieve_song_key(recipient)\n \n self.get_song().set_meta(title=title, artist=artist, transcript=transcript, song_key=song_key)\n\n\n def parse_song(self, recipient, notes=None, song_key=None, octave_shift=None):\n\n if notes is None:\n notes = self.retrieve_notes(recipient)\n \n if octave_shift is None:\n octave_shift = self.retrieve_octave_shift(recipient)\n\n if song_key is None:\n song_key = self.retrieve_song_key(recipient, notes=notes)\n \n self.set_song(self.get_song_parser().parse_song(song_lines=notes, song_key=song_key, octave_shift=octave_shift))\n\n return\n\n '''\n def retrieve_render_modes(self, recipient):\n \n q_render = self.communicator.recall_by_recipient(recipient, criterion=\"render_modes\", filters=[\"valid_reply\"], sort_by=\"date\")\n if len(q_render) != 0: \n render_modes = q_render.get_reply().get_result()\n return render_modes\n ''' \n\n def render_song(self, recipient, render_modes=None):\n \n if render_modes is None:\n if self.is_botcog(recipient):\n render_modes = self.botcog_render_modes\n elif self.is_website(recipient):\n render_modes = self.website_render_modes\n else:\n render_modes = self.render_modes_enabled\n \n if self.is_commandline(recipient):\n \n print(\"=\"*40) \n song_bundle = []\n for render_mode in render_modes:\n buffers = self.write_song_to_buffers(render_mode) # A list of IOString or IOBytes buffers\n file_paths = self.build_file_paths(render_mode, len(buffers)) \n self.send_buffers_to_files(render_mode, buffers, file_paths, recipient=recipient)\n song_bundle.append((buffers, [render_mode]*len(buffers)))\n \n else: #website or botcog or...\n \n self.css_mode = CSSMode.EMBED #Prevent the HTML/SVG from depending on an auxiliary .css file \n song_bundle = [] # A list of tuples\n for render_mode in render_modes: \n buffers = self.write_song_to_buffers(render_mode) # A list of IOString or IOBytes buffers\n song_bundle.append((buffers, [render_mode]*len(buffers))) \n \n return song_bundle\n \n\n def send_buffers_to_files(self, render_mode, buffers, file_paths, recipient, prerequisites=None, execute=True):\n \"\"\"\n Writes the content of an IOString or IOBytes buffer list to one or several files.\n Command line only\n \"\"\"\n #TODO: Move this method in Renderer???\n try:\n numfiles = len(buffers)\n except (TypeError, AttributeError):\n buffers = [buffers]\n numfiles = 1\n \n #Creates output directory if did not exist\n if not os.path.isdir(self.song_dir_out):\n os.mkdir(self.song_dir_out)\n\n if len(buffers) != len(file_paths):\n raise MusicSheetMakerError('inconsistent lengths of buffers and file_paths')\n\n (file_base, file_ext) = os.path.splitext(file_paths[0])\n\n for i, buffer in enumerate(buffers):\n\n if isinstance(buffer, io.StringIO):\n output_file = open(file_paths[i], 'w+', encoding='utf-8', errors='ignore')\n elif isinstance(buffer, io.BytesIO):\n output_file = open(file_paths[i], 'bw+')\n elif buffer is None:\n pass\n else:\n raise MusicSheetMakerError('Unknown buffer type in ' + str(self))\n \n \n if buffer is not None:\n output_file.write(buffer.getvalue())\n \n if numfiles == 1:\n \n question_rep = (render_mode.get_short_desc(self.locale), str(os.path.relpath(file_paths[0])))\n \n i_song_files = self.communicator.send_stock_query('one_song_file', recipient=recipient, question_rep=question_rep, prerequisites=prerequisites)\n \n elif numfiles > 1 and i == 0:\n \n question_rep = (render_mode.get_short_desc(self.locale), str(os.path.relpath(self.song_dir_out)))\n afterword_rep = (str(numfiles), str(os.path.split(file_paths[0])[1]), str(os.path.split(file_paths[-1])[1]))\n i_song_files = self.communicator.send_stock_query('several_song_files', recipient=recipient, question_rep=question_rep, afterword_rep=afterword_rep, prerequisites=prerequisites)\n else:\n question_rep = (render_mode.get_short_desc(self.locale),)\n i_song_files = self.communicator.send_stock_query('no_song_file', recipient=recipient, question_rep=question_rep, prerequisites=prerequisites)\n \n if execute:\n recipient.execute_queries(i_song_files)\n result = i_song_files.get_reply().get_result()\n return (i_song_files, result)\n else:\n return (i_song_files, None)\n\n \n def write_song_to_buffers(self, render_mode):\n \"\"\"\n Writes the song to files with different formats as defined in RenderMode\n Returns a list [], even if it has only 1 element\n \"\"\"\n #TODO: Move this method to Renderer?\n if render_mode in self.get_render_modes_enabled():\n\n if render_mode == RenderMode.HTML:\n buffers = [self.get_song().write_html(self.css_mode, self.css_path, self.rel_css_path)]\n elif render_mode == RenderMode.SVG:\n buffers = self.get_song().write_svg(self.css_mode, self.css_path, self.rel_css_path)\n elif render_mode == RenderMode.PNG:\n buffers = self.get_song().write_png()\n elif render_mode == RenderMode.MIDI:\n buffers = [self.get_song().write_midi()]\n else: # Ascii\n buffers = [self.get_song().write_ascii(render_mode)]\n\n else:\n buffers = []\n \n for buffer in buffers:\n if buffer is not None:\n buffer.seek(0)\n\n return buffers\n \n def build_file_paths(self, render_mode, numfiles):\n '''\n Command line only : generates a list of file paths for a given input mode.\n '''\n #TODO: move this method in Renderer???\n if numfiles == 0:\n return None\n\n file_base = os.path.join(self.song_dir_out, self.get_song().get_title())\n file_ext = render_mode.extension\n \n file_paths = []\n if numfiles > 1:\n for i in range(numfiles):\n file_paths += [file_base + str(i) + file_ext]\n else:\n file_paths = [file_base + file_ext]\n\n return file_paths\n\n","sub_path":"python/music_sheet_maker.py","file_name":"music_sheet_maker.py","file_ext":"py","file_size_in_byte":35726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"635678934","text":"import os\nimport yaml\nimport json\nimport validators\nfrom slugify import slugify\nfrom urllib.parse import urlparse\n\nclass RiskAssessment:\n\n _validation_errors = []\n\n employer = None\n employer_website_url = None\n employer_address = None\n workplace = None\n employer_website_url = None\n employer_address = None\n approximate_number_of_workers = None\n public_visit = None\n risk_assessment_status = None\n risk_assessment_url = None\n risk_assessment_title = None\n risk_assessment_date = None\n additional_information_url = None\n union_presence = None\n sic_codes = []\n company_number = None\n\n def to_json(self):\n\n data = {\n \"employer\": self.employer,\n \"employer_website_url\": self.employer_website_url,\n \"emplyer_address\": self.employer_address,\n \"workplace\": self.workplace,\n \"workplace_website_url\": self.workplace_website_url,\n \"workplace_address\": self.workplace_address,\n \"workplace_approximate_number_of_workers\": self.workplace_approximate_number_of_workers,\n \"public_visit\": self.public_visit,\n \"risk_assessment_status\": self.risk_assessment_status,\n \"risk_assessment_url\": self.risk_assessment_url,\n \"risk_assessment_title\": self.risk_assessment_title,\n \"risk_assessment_date\": self.risk_assessment_date.strftime(\"%Y-%m-%d\") if self.risk_assessment_date else None,\n \"additional_information_url\": self.additional_information_url,\n \"union_presence\": self.union_presence,\n \"sic_codes\": self.sic_codes,\n \"company_number\": self.company_number,\n }\n return json.dumps(data)\n\n def get_slug(self):\n return slugify(\"%s %s\" % (self.employer, self.workplace))\n\n def get_risk_assessment_domain(self):\n return urlparse(self.risk_assessment_url).netloc\n\n def is_valid(self):\n self._validation_errors = []\n if self.employer is None:\n self._validation_errors.append(\"Employer is a required field\")\n if not self.employer_website_url == None and not validators.url(self.employer_website_url):\n self._validation_errors.append(\"Employer website url must be a url or None\")\n if self.workplace is None:\n self._validation_errors.append(\"Workplace is a required field\")\n if not self.workplace_website_url == None and not validators.url(self.workplace_website_url):\n self._validation_errors.append(\"Workplace website url must be a url or None\")\n if not type(self.workplace_approximate_number_of_workers) is int and not self.workplace_approximate_number_of_workers is None:\n self._validation_errors.append(\"Number of workers must be a string or None\")\n if not self.public_visit in (True, False, None):\n self._validation_errors.append(\"Public visit must be True, False or None\")\n if not self.risk_assessment_status in (\"public\", \"private\", \"on request\") and not self.risk_assessment_status is None:\n self._validation_errors.append(\"risk assessment published must be public, private or None\")\n if not self.risk_assessment_url == None and not validators.url(self.risk_assessment_url):\n self._validation_errors.append(\"assessment URL must be a url or None\")\n if not type(self.risk_assessment_title) is str and not self.risk_assessment_title is None:\n self._validation_errors.append(\"Risk assessment title must be an string or None\")\n if not self.additional_information_url == None and not validators.url(self.additional_information_url):\n self._validation_errors.append(\"additional information URL must be a url or None\")\n if not self.union_presence in (True, False, None):\n self._validation_errors.append(\"Union presence must be True, False or None\")\n if not type(self.sic_codes) is list:\n self._validation_errors.append(\"Sic codes must be a list\")\n\n return self._validation_errors == []\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"249934151","text":"\"\"\"Testing Multiplication class\"\"\"\nfrom calc.calculations.multiplication import Multiplication\n\n\ndef test_calculator_multiply_static():\n \"\"\"testing that our calculator has a static method for addition\"\"\"\n #Arrange\n mynumbers = (1.0,2.0,3.0)\n multiplication = Multiplication(mynumbers)\n #Act\n #Assert\n assert multiplication.get_result() == 6.0\n","sub_path":"tests/multiplication_test.py","file_name":"multiplication_test.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"581831668","text":"# -*- coding: utf-8 -*-\nimport requests\nimport json\nfrom datetime import date, datetime\nimport csv\nimport pyproj\nimport modules.mapper as mapper\nimport geojson\nimport datetime\nimport folium as folium\n\n#-----------------------------\n# CSV Modul\ndef read_csv(filename):\n with open(filename, \"r+\") as file:\n csv_reader = csv.reader(file)\n next(csv_reader)\n\n data = []\n for line in csv_reader:\n data.append(line)\n return data\n\ndef dump_file(filename, data):\n with open(filename, \"w\") as file:\n file.write(data)\n\ndef create_geojson(fruitname, infile, outfile):\n csv = read_csv(infile)\n data = []\n collection = {'features': {}}\n for line in csv:\n if line != None:\n coord = [0, 0]\n try:\n # Definiere Grid für Koordinatenumwandlung\n utm = pyproj.Proj(proj=\"utm\",zone=line[1])\n dec = pyproj.Proj(init = \"epsg:4326\")\n # Wandle UTM Koordinaten um zu Lat/Long\n coord[1], coord[0] = pyproj.transform(utm, dec, line[2], line[3] )\n except:\n print(fruitname + \": Fehler beim Umwandeln der Koordinaten von UTM zu Lat/Long: \", line[2], line[3] + \"\\n\")\n\n # Kreiere GeoJson\n properties = {\n \"UTM Region\":line[1],\n \"X_Coord\" :line[2],\n \"Y_Coord\" :line[3],\n #\"Strschl\":line[12],\n #\"Straße\":line[13],\n #\"Hausnummer\":line[14],\n #\"Kennung\":line[18],\n \"Pflanzjahr\":line[4],\n \"Höhe\":int(line[12]) if line[12] != \"0\" else \"keine Angabe\",\n \"Baumumfang\":int(line[30]) if line[30] != \"0\" else \"keine Angabe\",\n \"Alter\":int(datetime.datetime.now().year) - int(line[4]),\n \"Name\":fruitname,\n \"Deutsch\":line[10],\n \"Gattung\":line[7],\n \"Art\":line[8],\n \"Sorte\":line[9],\n #\"Kürzel\":line[6],\n \"Reifezeit\":line[11],\n #\"Artenschutz\":line[44],\n }\n print(str(properties) + \"\\n\")\n\n point = geojson.Point((coord[1], coord[0]))\n feature = geojson.Feature(geometry=point, id=line[0], properties=properties)\n data.append(feature)\n\n collection = geojson.FeatureCollection(data)\n\n # speichere in Datei\n dump_file(outfile, str(collection))\n return collection\n\n\ndef create_geojson_markers(geojson, map, feature_group, prefix=None, icon=None, color=\"blue\", tooltip=None, popup=False, recipelink=\"\", infolink=\"\", infofruit=\"\", season=\"Sommer\", show=True):\n data = geojson\n\n # Kreiire Layer\n map.addFeatureSubGroup(feature_group, season, show=show)\n\n for feature in data[\"features\"]:\n lon, lat = feature[\"geometry\"][\"coordinates\"]\n\n # Popup\n popup = \"~ \" + feature[\"properties\"][\"Name\"] + \" ( \" + feature[\"properties\"][\"Gattung\"] + \" \" + feature[\"properties\"][\"Art\"] + \" \" + feature[\"properties\"][\"Sorte\"] + \") ~\" + \"

Pflanzjahr: \" + str(feature[\"properties\"][\"Pflanzjahr\"]) + \"
Alter: \" + str(feature[\"properties\"][\"Alter\"]) + \"
Höhe: \" + str(feature[\"properties\"][\"Höhe\"]) + \"
Baumumfang: \" + str(feature[\"properties\"][\"Baumumfang\"]) + \"
Reifezeit: \" + season + \"
___________________________________
\" + \"Weitere Infos\" + \" | \" + \"Rezept\" + \"\"\n\n try:\n map.addMarkerToFeatureGroup(feature_group, [lat, lon], prefix=prefix, icon=icon, popup=popup, tooltip=feature[\"properties\"][\"Name\"], color=color)\n except:\n print(\"Fehler bei der Platzierung des markers zu:\\n\", feature)\n","sub_path":"Karte/modules/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"597712864","text":"\nimport sys\nimport pygame\nfrom foto import Foto\n\nclass Blue:\n \"\"\"Класс для управления ресурсами и поведением игры.\"\"\"\n\n def __init__(self):\n \"\"\"Инициализирует игру и создает игровые ресурсы.\"\"\"\n pygame.init()\n # Размер экрана\n self.screen = pygame.display.set_mode((1200, 700))\n pygame.display.set_caption(\"Alien\")\n # Назначение цвета фона.\n self.bg_color = (0, 0, 230)\n\n self.foto = Foto(self)\n\n def run_game(self):\n \"\"\"Запуск основного цикла игры.\"\"\"\n while True:\n # Отслеживание событий клавиатуры и мыши.\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n \n # При каждом проходе цикла перерисовывается экран.\n self.screen.fill(self.bg_color)\n self.foto.blitme()\n # Отображение последнего прорисованного зкрана.\n pygame.display.flip()\n\nif __name__ in '__main__':\n # Создание экземпляра и запуск игры.\n b = Blue()\n b.run_game()","sub_path":"14/homework/blue.py","file_name":"blue.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"530425098","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nPyMzn can be configured with custom executable paths and other variables.\nConfiguration is done via the module ``pymzn.config``. For instance::\n\n import pymzn.config\n\n pymzn.config.set('mzn2fzn', 'path/to/mzn2fzn')\n pymzn.config.set('solns2out', 'path/to/solns2out')\n\nThe configurable properties used by PyMzn are the following:\n\n * **mzn2fzn**: Path to the *mzn2fzn* executable;\n * **solns2out**: Path to the *solns2out* executable;\n * **solver**: Solver instance to use when calling pymzn.minizinc;\n * **solver_args**: Arguments to pass to the solver when calling pymzn.minizinc;\n * **all_solutions**: Overrides the all_solutions flag for minizinc calls;\n * **num_solutions**: Overrides the num_solutions value for minizinc calls;\n * **statistics**: Overrides the statistics flag for all calls to minizinc;\n * **timeout**: Overrides the timeout for all calls to minizinc;\n * **args**: Additional arguments to pass to the template engine;\n * **include**: List of search paths to include in all mzn2fzn calls;\n * **keep**: Overrides the keep flag of all minizinc and mzn2fzn calls;\n * **output_dir**: Set a default output directory for generated files;\n * **force_flatten**: Overrides the force_flatten flag of all minizinc calls;\n * **no_output_annotations**: Overrides the no_output_annotation flag of all\n minizinc calls;\n * **dzn_width**: The horizontal character limit for dzn files;\n This property is used to wrap long dzn statements when writing dzn files.\n This property is also used in the ``pymzn.minizinc`` function as a limit to\n decide whether to write the inline data into a file.\n\nOne can also set custom properties to be used for custom solvers.\n\n\nDebug\n-----\n\nPyMzn can also be set to print debugging messages on standard output via::\n\n pymzn.debug()\n\nThis function is meant to be used in interactive sessions or in\napplications that do not configure the ``logging`` library. If you configure the\n``logging`` library in your application, then PyMzn will print logging messages\nas well. The logging level in PyMzn is always ``DEBUG``. To disable debugging\nmessages you can then call::\n\n pymzn.debug(False)\n\n\"\"\"\nimport os\n\n\n_modified = False\n_config = None\n_defaults = {\n 'mzn2fzn': 'mzn2fzn',\n 'solns2out': 'solns2out',\n 'dzn_width': 70\n}\n\n\ndef _cfg_file():\n try:\n import appdirs\n return os.path.join(appdirs.user_config_dir(__name__), 'config.yml')\n except ImportError:\n return None\n\n\ndef get(key, default=None):\n \"\"\"Get the value of a configuration variable.\n\n Parameters\n ----------\n key : str\n The key of the variable to retrieve.\n default\n The default value to return if the key does not exist.\n\n Returns\n -------\n The value associated to the key if the key exists, otherwise the default\n if provided.\n \"\"\"\n global _config\n if _config is None:\n _config = {}\n try:\n import yaml\n cfg_file = _cfg_file()\n if cfg_file and os.path.isfile(cfg_file):\n with open(cfg_file) as f:\n _config = yaml.load(f)\n except ImportError:\n pass\n if default is None:\n default = _defaults.get(key)\n return _config.get(key, default)\n\n\ndef set(key, value):\n \"\"\"Set the value of configuration variable.\n\n Parameters\n ----------\n key : str\n The key of the variable to set.\n value\n The value to assign to the variable.\n \"\"\"\n global _config\n global _modified\n if get(key) != value:\n _config[key] = value\n _modified = True\n\n\ndef dump():\n \"\"\"Writes the changes to the configuration file.\"\"\"\n global _config\n global _modified\n if _modified:\n try:\n import yaml\n cfg_file = _cfg_file()\n cfg_dir, __ = os.path.split(cfg_file)\n os.makedirs(cfg_dir, exist_ok=True)\n with open(cfg_file, 'w') as f:\n yaml.dump(_config, f)\n _modified = False\n except ImportError as err:\n raise RuntimeError(\n 'Cannot dump the configuration settings to file. You need to '\n 'install the necessary dependencies (pyyaml, appdirs).'\n ) from err\n\n","sub_path":"pymzn/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"531608240","text":"from __future__ import (absolute_import, division, print_function)\n\nimport unittest\n\nfrom fitbenchmarking.resproc.misc import display_name_for_minimizers\nfrom fitbenchmarking.resproc.misc import weighted_suffix_string\nfrom fitbenchmarking.resproc.misc import build_items_links\n\n\nclass MiscTests(unittest.TestCase):\n def test_displayNameForMinimizers_return_minimizer_mock_names(self):\n names = ['Minimizer1', 'Minimizer2', 'Minimizer3', 'Minimizer4',\n 'Minimizer5', 'Minimizer6', 'Minimizer7', 'Minimizer8',\n 'Minimizer9', 'DTRS']\n\n display_names = display_name_for_minimizers(names)\n display_names_expected = ['Minimizer1', 'Minimizer2', 'Minimizer3',\n 'Minimizer4', 'Minimizer5',\n 'Minimizer6', 'Minimizer7', 'Minimizer8',\n 'Minimizer9', 'Trust Region']\n\n self.assertListEqual(display_names_expected, display_names)\n\n def test_weightedSuffixString_return_string_value_weighted(self):\n value = weighted_suffix_string(True)\n self.assertEqual(value, 'weighted')\n\n def test_weightedSuffixString_return_string_value_unweighted(self):\n value = weighted_suffix_string(False)\n self.assertEqual(value, 'unweighted')\n\n def test_buildItemsLinks_return_summary_links(self):\n comparison_type = 'summary'\n comparison_dim = 'accuracy'\n using_errors = True\n\n items_link = \\\n build_items_links(comparison_type, comparison_dim, using_errors)\n items_link_expected = ['Minimizers_weighted_comparison_in_terms_of'\n '_accuracy_nist_lower',\n 'Minimizers_weighted_comparison_in_terms_of'\n '_accuracy_nist_average',\n 'Minimizers_weighted_comparison_in_terms_of'\n '_accuracy_nist_higher',\n 'Minimizers_weighted_comparison_in_terms_of'\n '_accuracy_cutest',\n 'Minimizers_weighted_comparison_in_terms_of'\n '_accuracy_neutron_data']\n\n self.assertListEqual(items_link_expected, items_link)\n\n def test_buildItemsLinks_return_accuracy_links(self):\n\n comparison_type = 'accuracy'\n comparison_dim = ''\n using_errors = True\n\n items_link = \\\n build_items_links(comparison_type, comparison_dim, using_errors)\n items_link_expected = 'FittingMinimizersComparisonDetailedWithWeights'\n\n self.assertEqual(items_link_expected, items_link)\n\n def test_buildItemsLinks_return_runtime_links(self):\n\n comparison_type = 'runtime'\n comparison_dim = ''\n using_errors = False\n\n items_link = \\\n build_items_links(comparison_type, comparison_dim, using_errors)\n items_link_expected = 'FittingMinimizersComparisonDetailed'\n\n self.assertEqual(items_link_expected, items_link)\n\n def test_buildItemsLinks_return_empty_itemsLinks_invalid_comparison(self):\n\n comparison_type = 'pasta'\n comparison_dim = ''\n using_errors = False\n\n items_link = \\\n build_items_links(comparison_type, comparison_dim, using_errors)\n items_link_expected = ''\n\n self.assertEqual(items_link_expected, items_link)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"fitbenchmarking/resproc/tests/test_misc.py","file_name":"test_misc.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"148268520","text":"import pandas as pd\nimport os\n#from sodapy import Socrata\n\n#_airflow_root = os.path.join('Intervals', 'airflow')\n#_pipe_root = os.path.join(_airflow_root, 'us-central1-transit-pipe-en-af986b93-bucket')\n#_dags_root = os.path.join(_pipe_root, 'dags')\n_pipe_root = os.path.dirname(__file__)\n_pipe_root = _pipe_root.split(\"/\")\n_data_root = os.path.join(_pipe_root[-2], 'data')\n\ndef import_data(): #verificar argumentos\n \n #novos dados\n #client = Socrata(\"data.cityofnewyork.us\", \"TwSER6tYF74jBi5yYFZAj9HYy\")\n #results = client.get(\"i4gi-tjb9\", limit=2000)\n #df_new = pd.DataFrame.from_records(results)\n \n df_new = pd.read_json(\"https://data.cityofnewyork.us/resource/i4gi-tjb9.json\")\n\n df_new.to_csv(os.path.join(_data_root, \"01_orig_data.csv\"))\n return \n\ndef pre_proc_calc_medias(): \n df = pd.read_csv(os.path.join(_data_root, \"01_orig_data.csv\"), index_col=0)\n\n link_points = df.link_points\n media_lat = []\n media_long = []\n\n for x in link_points: #para cada linha \n points = x.split(' ') #separando as coordenadas\n lat = []\n long = []\n for y in points: #para cada ponto (lat, long) de points_str\n point = y.split(',') #separa lat de long\n \n if(point[0] == ''):\n lat.append(None)\n else:\n lat.append(float(point[0]))\n \n if(len(point) == 2 and point[1] != ''): #Alguns estao vazios(?)\n if(point[1] != \"-\"):\n long.append(float(point[1]))\n else:\n long.append(None)\n else:\n long.append(None)\n \n long = list(filter(None, long))\n lat = list(filter(None, lat))\n\n media_lat.append(sum(lat)/len(lat))\n media_long.append(sum(long)/len(long))\n \n #novo dataset com as medias e sem colunas que nao serao utilizadas\n obj = {'data_as_of': df.data_as_of,'link_points': df.link_points, 'media_lat': media_lat, 'media_long': media_long, 'speed': df.speed, 'travel_time': df.travel_time}\n data_frame = pd.DataFrame(data=obj)\n data_frame.to_csv(os.path.join(_data_root, \"02_pre_data_medias.csv\"))\n \n\ndef pre_proc_clean_data():\n df = pd.read_csv(os.path.join(_data_root, \"02_pre_data_medias.csv\"), index_col=0)\n\n df = df.drop(df[df.speed == 0].index) #removendo linhas onde speed = 0\n df.to_csv(os.path.join(_data_root, \"03_pre_data_clean.csv\"))\n\ndef pre_proc_merge_new_data():\n df_old = pd.read_csv(os.path.join(_data_root,\"04_data.csv\"), index_col=0) #dados gerados na requisicao anterior\n df_new = pd.read_csv(os.path.join(_data_root,\"03_pre_data_clean.csv\"), index_col=0) #dados gerados na requisicao atual\n\n #conversoes necessarias para fazer o merge dos data_frames\n df_new['data_as_of'] = pd.to_datetime(df_new.data_as_of)\n df_old['data_as_of'] = pd.to_datetime(df_old.data_as_of)\n\n frames = [df_old, df_new]\n df = pd.concat(frames)\n #df = pd.merge(df_old, df_new)\n df.to_csv(os.path.join(_data_root,\"04_data.csv\"))","sub_path":"dags/traffic_utils.py","file_name":"traffic_utils.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"292099170","text":"import os\nimport json\nimport numpy as np\nimport warnings\nimport torch\nimport argparse\nfrom datasets import *\nfrom models import *\nfrom train import *\nfrom pathlib import Path\n\nwarnings.filterwarnings(\"ignore\")\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', default=1000, type=int, help='Number of epochs to train.')\nparser.add_argument('--lr_G', default=5e-5)\nparser.add_argument('--lr_D', default=3e-4)\nparser.add_argument('--generate_every', default=100)\nparser.add_argument('--train_type', default='WGAN')\nparser.add_argument('--print_every', default=20)\nparser.add_argument('--dataset', default='Wine', choices=['random_condition', 'Yeast', 'Wine'])\nargs = parser.parse_args()\n\nnum_gen = 200\nlr_gen = args.lr_G\nlr_dis = args.lr_D\nepochs = args.epochs\nprint_every = args.print_every\ngenerate_every = args.generate_every\nstep_per_epoch = 32\nrandom.seed(1)\ntorch.manual_seed(1)\ndataset = args.dataset\nX, Y = load_dataset(dataset)\n\nif __name__ == '__main__':\n plt.switch_backend('agg')\n GPU = torch.cuda.is_available()\n print('GPU:{}'.format(GPU))\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n X1 = PCA(n_components=2, ).fit_transform(X)\n plt.scatter(X1[:, 0], X1[:, 1], c=Y)\n plt.savefig('pictures/init_{}.png'.format(dataset))\n # plt.show()\n plt.cla()\n params = LoadParams(dataset)\n G = VCGAN_generator(z_dim=params[\"z_dim\"], hidden_dim=params['hidden_dim'], x_dim=params['x_dim'],\n num_layer=params['num_layer'],\n col_types=params['col_types'],\n col_idxes=params['col_idxes'], c_dim=params['c_dim'])\n D = VCGAN_discriminator(hidden_dim=params['hidden_dim'], x_dim=params['x_dim'], num_layer=params['num_layer']\n , c_dim=params['c_dim'],wgan=True)\n G.apply(init_weights)\n D.apply(init_weights)\n\n WGAN_train(X, Y, G, D, dataset=dataset, epochs=epochs, lr_gen=lr_gen, lr_dis=lr_dis,\n col_types=params['col_types'],\n col_idxes=params['col_idxes'],\n z_dim=params['z_dim'], print_every=print_every,\n generate_every=generate_every,\n num_gen=num_gen, step_per_epoch=step_per_epoch, GPU=GPU,\n )\n\n G = G.cpu()\n torch.save(G, 'models/G_{}_{}_{}.pkl'.format(dataset, epochs, args.train_type))\n CGAN_Generate(G, z_dim=params['z_dim'], num_c=params['c_dim'], dataset=dataset, num_gen=num_gen)\n # plt.scatter(X[:, 0], X[:, 1], c=Y)\n # plt.show()\n","sub_path":"train_WGAN.py","file_name":"train_WGAN.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"327195339","text":"from typing import Dict\nfrom yaak import inject\nimport paginate\n\nfrom Src.BioAnalyzer.CrossCutting.DTOs.GenePrioritization.DataIntegrationDto import DataIntegrationDto\nfrom Src.BioAnalyzer.CrossCutting.Filters.GenePrioritization.FeSingleDataIntegration import FeSingleDataIntegration\nfrom Src.BioAnalyzer.DataAccess.Entities.GenePrioritization.DataIntegration import DataIntegration\nfrom Src.Core.Manager.ManagerBase import ManagerBase\n\n\nclass DataIntegratorManager(ManagerBase):\n \"\"\"description of class\"\"\"\n\n @inject.Param(repository='DataIntegrationRepositoryBase')\n def __init__(self, repository):\n \"\"\"\n\n :param repository: \n \"\"\"\n super().__init__(repository)\n self.__page_size = 10\n\n def add_one(self, data_integration_dto: DataIntegrationDto):\n fe_data_integration = self._repository.get_one(FeSingleDataIntegration(data_type=data_integration_dto.data_type,\n conditional=data_integration_dto.conditional),\n DataIntegration,\n {'data_type': 1})\n\n if fe_data_integration.result:\n self._repository.delete_one(fe_data_integration)\n\n adj_list = data_integration_dto.adjacency_list\n\n page = paginate.Page(adj_list, page=0, items_per_page=self.__page_size)\n\n while True:\n\n if not page.next_page:\n break\n\n data_integration_dto.adjacency_list = page.items\n self._repository.add_one(data_integration_dto)\n\n page = paginate.Page(adj_list, page=page.next_page, items_per_page=self.__page_size)\n\n def get_one(self, fe_data_integration: FeSingleDataIntegration,\n include_or_exclude_fields: Dict[str, int] = None) -> FeSingleDataIntegration:\n \"\"\"\n\n :param fe_data_integration: \n :param include_or_exclude_fields: \n :return: \n \"\"\"\n return self._repository.get_one(fe_data_integration,\n DataIntegration,\n include_or_exclude_fields)","sub_path":"Src/BioAnalyzer/Managers/GenePrioritization/DataIntegratorManager.py","file_name":"DataIntegratorManager.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"654242218","text":"'''\n1. You are give a partially filled 9*9 2-D array(arr) which represents an incomplete sudoku state.\n2. You are required to assign the digits from 1 to 9 to the empty cells following some rules.\n\nRule 1 -> Digits from 1-9 must occur exactly once in each row.\nRule 2 -> Digits from 1-9 must occur exactly once in each column.\nRule 3 -> Digits from 1-9 must occur exactly once in each 3x3 sub-array of the given 2D array.\n\nSample Input:\n\n3 0 6 5 0 8 4 0 0\n5 2 0 0 0 0 0 0 0\n0 8 7 0 0 0 0 3 1\n0 0 3 0 1 0 0 8 0\n9 0 0 8 6 3 0 0 5\n0 5 0 0 9 0 6 0 0\n1 3 0 0 0 0 2 5 0\n0 0 0 0 0 0 0 7 4\n0 0 5 2 0 6 3 0 0\n\nSample Output:\n\n3 1 6 5 7 8 4 9 2 \n5 2 9 1 3 4 7 6 8 \n4 8 7 6 2 9 5 3 1 \n2 6 3 4 1 5 9 8 7 \n9 7 4 8 6 3 1 2 5 \n8 5 1 7 9 2 6 4 3 \n1 3 8 9 4 7 2 5 6 \n6 9 2 3 5 1 8 7 4 \n7 4 5 2 8 6 3 1 9 \n\nhttps://www.youtube.com/watch?v=uyetDh-DyDg&list=PL-Jc9J83PIiHO9SQ6lxGuDsZNt2mkHEn0&index=1&t=1s\n\n'''\n\ndef isValid(board, x, y, val):\n \n n = len(board)\n \n #check for row from 0 to 8\n for i in range(n): \n if(board[i][y] == val):\n return False\n \n #check for column from 0 to 8\n for j in range(n):\n if(board[x][j] == val):\n return False\n \n \n smi = x//3 * 3 # formula to get starting row of sub matrix\n smj = y//3 * 3 # formula to get starting col of sub matrix\n \n # check for sub matrix\n for i in range(3):\n for j in range(3):\n if(board[i+smi][j+smj] == val):\n return False\n \n return True\n \n\ndef solveSudoku(board, i, j):\n \n if(i == len(board)):\n displayBoard(board)\n return\n \n ni = 0 # next i\n nj = 0 # next j\n \n if(j == len(board[0]) - 1):\n ni = i + 1\n nj = 0\n else:\n ni = i\n nj = j + 1\n \n if(board[i][j] != 0):\n solveSudoku(board, ni, nj)\n \n else:\n for po in range(1, 10):\n if(isValid(board, i, j, po)):\n board[i][j] = po # possible option\n solveSudoku(board, ni, nj)\n board[i][j] = 0\n \ndef displayBoard(board):\n \n for i in range(len(board)):\n for j in range(len(board[0])):\n print(board[i][j], end = \" \")\n print()\n\n\n \ngrid =[\n [3, 0, 6, 5, 0, 8, 4, 0, 0], \n [5, 2, 0, 0, 0, 0, 0, 0, 0], \n [0, 8, 7, 0, 0, 0, 0, 3, 1], \n [0, 0, 3, 0, 1, 0, 0, 8, 0], \n [9, 0, 0, 8, 6, 3, 0, 0, 5], \n [0, 5, 0, 0, 9, 0, 6, 0, 0], \n [1, 3, 0, 0, 0, 0, 2, 5, 0], \n [0, 0, 0, 0, 0, 0, 0, 7, 4], \n [0, 0, 5, 2, 0, 6, 3, 0, 0]\n ] \n\nsolveSudoku(grid, 0, 0)","sub_path":"pepcoding/backtracking/1_solve_sudoku.py","file_name":"1_solve_sudoku.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"342777367","text":"#!/usr/bin/env python\n# ----------------------------------------------------------------------------\n# Copyright 2015-2016 Nervana Systems Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\n\nimport os\nimport numpy as np\nimport pickle as pkl\n\nfrom aeon.dataloader import DataLoader\nfrom neon.backends import gen_backend\nfrom neon.util.argparser import NeonArgparser, extract_valid_args\nfrom neon.models import Model\nfrom neon.data.dataloader_transformers import TypeCast, Retuple\n\nfrom decoder import ArgMaxDecoder\nfrom utils import get_wer\n\n\ndef data_transform(dl):\n \"\"\" Data is loaded from Aeon as a 4-tuple. We need to cast the audio\n (index 0) from int8 to float32 and repack the data into (audio, 3-tuple).\n \"\"\"\n\n dl = TypeCast(dl, index=0, dtype=np.float32)\n dl = Retuple(dl, data=(0,), target=(1, 2, 3))\n return dl\n\n# Parse the command line arguments\narg_defaults = {'batch_size': 32}\nparser = NeonArgparser(__doc__, default_overrides=arg_defaults)\nparser.add_argument('--use_wer', action=\"store_true\",\n help='compute wer instead of cer.')\nparser.add_argument('--inference_file', default=None,\n help='saves results in inference_file.')\nparser.add_argument('--print_examples', action=\"store_true\",\n help='print an example transcript for each batch')\nargs = parser.parse_args()\n\nif args.model_file is None:\n raise ArgumentError(\"A model file is required for evaluation\")\n\nif \"val\" not in args.manifest:\n raise ArgumentError(\"Please provide an argument of the form:\\n\" + \\\n \"--manifest val:/path/to/validation/manifest.csv\")\n\n# Setup parameters for argmax decoder\nalphabet = \"_'ABCDEFGHIJKLMNOPQRSTUVWXYZ \"\nnout = len(alphabet)\nargmax_decoder = ArgMaxDecoder(alphabet, space_index=alphabet.index(\" \"))\n\n# Initialize our backend\nbe = gen_backend(**extract_valid_args(args, gen_backend))\n\n# Setup dataloader\neval_manifest = args.manifest['val']\nif not os.path.exists(eval_manifest):\n raise IOError(\"Manifest file {} not found\".format(eval_manifest))\n\n# Setup required dataloader parameters\nnbands = 13\nmax_utt_len = 30\nmax_tscrpt_len = 1300\n\n# Audio transformation parameters\nfeats_config = dict(sample_freq_hz=16000,\n max_duration=\"{} seconds\".format(max_utt_len),\n frame_length=\".025 seconds\",\n frame_stride=\".01 seconds\",\n feature_type=\"mfsc\",\n num_filters=nbands)\n\n# Transcript transformation parameters\ntranscripts_config = dict(\n alphabet=alphabet,\n max_length=max_tscrpt_len,\n pack_for_ctc=True)\n\n# Initialize dataloader\neval_cfg_dict = dict(type=\"audio,transcription\",\n audio=feats_config,\n transcription=transcripts_config,\n manifest_filename=eval_manifest,\n macrobatch_size=be.bsz,\n minibatch_size=be.bsz)\neval_set = DataLoader(backend=be, config=eval_cfg_dict)\neval_set = data_transform(eval_set)\n\n# Load the model\nmodel = Model(args.model_file)\n\n# Process data and compute stats\nwer, sample_size, results = get_wer(model, be, eval_set, argmax_decoder, nout,\n use_wer=args.use_wer, print_examples=args.print_examples)\n\nprint(\"\\n\" + \"-\" * 80)\nif args.use_wer:\n print(\"wer = {}\".format(wer))\nelse:\n print(\"cer = {}\".format(wer))\nprint(\"-\" * 80 + \"\\n\")\n\nif args.inference_file:\n # Save results in args.inference_file\n with open(args.inference_file, 'wb') as f:\n pkl.dump((results, wer), f)\n print(\"Saved inference results to {}\".format(args.inference_file))\n","sub_path":"speech/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"569701071","text":"#encoding:utf-8\n\nimport csv\nimport importlib\nimport random\nimport datetime\n\nimport pymongo\nimport yaml\n\nfrom utils import SupplyResult\n\n\nsubreddit = 'all'\nt_channel = '@r_channels'\n\n\ndef get_active_period(r2t, channel_name):\n min_cursor = r2t.stats.find({'channel' : channel_name.lower()}).sort([('ts', pymongo.ASCENDING)]).limit(1)\n min_ts = min_cursor.next()['ts']\n max_cursor = r2t.stats.find({'channel' : channel_name.lower()}).sort([('ts', pymongo.DESCENDING)]).limit(1)\n max_ts = max_cursor.next()['ts']\n diff = max_ts - min_ts\n return diff.days\n\n\ndef get_newly_active(r2t, channels_list):\n newly_active = list()\n for channel in channels_list:\n days_active = get_active_period(r2t, channel)\n if days_active <= 31:\n newly_active.append(channel)\n return newly_active\n\n\ndef get_top_growers_for_last_week(r2t, channels_list):\n top_growers = dict()\n now = datetime.datetime.now()\n for channel in channels_list:\n week_ago_cursor = r2t.stats.find({\n 'channel': channel.lower(),\n 'ts': {'$gte': now - datetime.timedelta(days=7)}\n }).sort([('ts', pymongo.ASCENDING)]).limit(100)\n for stat_record in week_ago_cursor:\n if 'members_cnt' in stat_record:\n week_ago_members_cnt = stat_record['members_cnt']\n break\n current_cursor = r2t.stats.find({'channel': channel.lower()}).sort([('ts', pymongo.DESCENDING)]).limit(100)\n for stat_record in current_cursor:\n if 'members_cnt' in stat_record:\n current_members_cnt = stat_record['members_cnt']\n break\n grow = current_members_cnt - week_ago_members_cnt\n if grow >= 10:\n top_growers[channel] = grow\n return sorted(top_growers, key=top_growers.get, reverse=True)[:3]\n\n\ndef send_post(submission, r2t):\n config_filename = 'configs/prod.yml'\n with open(config_filename) as config_file:\n config = yaml.load(config_file.read())\n channels_list = list()\n with open(config['cron_file']) as tsv_file:\n tsv_reader = csv.DictReader(tsv_file, delimiter='\\t')\n for row in tsv_reader:\n submodule_name = row['submodule_name']\n submodule = importlib.import_module('channels.{}.app'.format(submodule_name))\n channel_name = submodule.t_channel\n \n if ('@' in channel_name) and (channel_name not in ['@r_channels_test', '@r_channels']):\n channels_list.append(channel_name)\n newly_active = get_newly_active(r2t, channels_list)\n text_to_send = 'Weekend news\\n\\n'\n if len(newly_active) > 0:\n text_to_send += '🎉 Welcome to newly active channels: {channels_list}. 🎈🎈\\n\\n'.format(channels_list=', '.join(newly_active))\n text_to_send += '🏆 Channel of the week: {channel_name}. Join and enjoy!\\n\\n'.format(channel_name=random.choice(channels_list))\n top_growers = get_top_growers_for_last_week(r2t, channels_list)\n if len(top_growers) > 0:\n text_to_send += '🔥 Hottest channels of the week: {channels}.\\n\\n'.format(channels=', '.join(top_growers))\n list_of_channels = ['{n}. {channel}'.format(n=str(i + 1).zfill(2), channel=channel)\n for i, channel in enumerate(random.sample(channels_list, k=len(channels_list)))]\n text_to_send += '⬇️ All active channels:\\n{list_of_channels}\\n\\n'.format(list_of_channels='\\n'.join(list_of_channels))\n text_to_send += '🙋\\nQ: How can I help?\\nA: Promote your favorite channels!\\n\\n'\n text_to_send += 'Q: How to make similar channels?\\nA: Ask here or use manual at https://github.com/Fillll/reddit2telegram.\\n\\n'\n text_to_send += 'Q: Where to donate?\\nA: http://bit.ly/r2t_donate'\n r2t.send_text(text_to_send, parse_mode='HTML')\n # It's not a proper supply, so just stop.\n return SupplyResult.STOP_THIS_SUPPLY\n","sub_path":"channels/tech_weekly_radar/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"648514840","text":"import shelve\nimport random\n\ndef BlackJack():\n print(\"->---<$>----<$>---<-\")\n choiceLoop = 0\n while choiceLoop<1:\n print(\"\\n-Rules(A)\\n-I already know how to play(B)\\n-Back(Back)\")\n choice = input(\": \").upper()\n if choice == \"A\":\n introToBlackJack()\n elif choice == \"B\":\n results = playBlackJack()\n if results == True:\n return 5\n if results == False:\n return -5\n if results == None:\n return 0\n elif choice == \"BACK\":\n return\n else:\n print(choice.capitalize() , \" is not an option. Try again.\")\n\ndef introToBlackJack():\n print(\"Noooooooooooooo!\")\n return\n\ndef playBlackJack():\n dealerHasMissed = False\n playerHasMissed = False\n score = 0\n dealerScore = 0\n cardTaken = False\n while True:\n if playerHasMissed == False:\n Rank = (\"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\")\n Set = (\" of Diamonds\", \" of Spades\", \" of Clubs\", \" of Hearts\")\n rank = random.choice(Rank)\n if rank == \"Ace\":\n score += 1\n elif rank == \"Jack\":\n score += 10\n elif rank == \"Queen\":\n score += 10\n elif rank == \"King\":\n score += 10\n else:\n score += int(rank)\n cards = (rank + random.choice(Set))\n if cardTaken == False:\n print(\"your first card is \" + cards)\n print(\"Your current score is \" + str(score))\n cardTaken = True\n break\n elif cardTaken == True:\n print(\"Your next card is \" + cards)\n print(\"Your current score is \" + str(score))\n break\n if score == dealerScore and score == 21:\n print(\"We drew!\\n\")\n return None\n if score == 21:\n print(\"You win!\\n\")\n if dealerScore > 21:\n print(\"The dealer is bust!\\n\")\n return True\n else:\n return True\n if score > 21:\n print(\"Your bust!\\n\")\n return False\n if dealerScore > 21:\n print(\"The dealer is bust!\\n\")\n return True\n if playerHasMissed == False:\n while True:\n askForMiss = input(\"do you want to hit(H) or miss(M)?\\n: \")\n if askForMiss == \"H\":\n print(\"O.K.\")\n break\n elif askForMiss == \"M\":\n playerHasMissed = True\n print(\"You have missed.\\nYou had a score of \" + str(score))\n break\n else:\n print(askForMiss.capitalize() + \" is not an option.\")\n\n if dealerHasMissed == False:\n dealerScore += random.randint(1, 12)\n dealerMiss = random.randint(0, 3)\n if dealerScore == 21:\n dealerMiss = 0\n if dealerMiss == 0:\n print(\"The dealer has missed.\\nHe had a score of \" + str(dealerScore))\n dealerHasMissed = True\n if dealerScore == score and playerHasMissed == True:\n print(\"You Drew!\")\n return None\n elif dealerScore < score and playerHasMissed == True:\n print(\"You win!\")\n return True\n elif dealerScore > score and playerHasMissed == True:\n print(\"The dealer has won\")\n return False\n else:\n print(\"The dealer has hit\")\n\n\ndef apps(score):\n while True:\n while True:\n with open(\"boughtApps.txt\") as f:\n Applications = f.readlines()\n Applications = [x.strip() for x in Applications]\n Applications.sort()\n Applications.sort()\n print(\"Applications\\n\")\n appNumber = 1\n for A in Applications:\n print(\"-\" + A + \"(\" + str(appNumber) + \")\")\n appNumber += 1\n print(\"-Back(Quit)\")\n chooseApp = input(\": \").upper()\n if chooseApp == \"QUIT\":\n return 0\n try:\n chooseAppNumber = int(chooseApp)\n if chooseAppNumber == 0:\n print(\"0 is not on the list\\n\")\n else:\n break\n except ValueError:\n print(chooseApp.capitalize() + \" is not a number.\")\n\n appNumber -= 1\n\n if chooseAppNumber > appNumber or chooseAppNumber < 0:\n print(str(chooseApp) + \" is not on the list.\")\n else:\n chooseAppNumber -= 1\n chosenApp = Applications[chooseAppNumber]\n if chosenApp == \"Number guessing game\":\n\n while True:\n score = 0\n gameScore = numberGuessingGame()\n\n if gameScore == -1:\n print(\"-1 point!\")\n score -= 1\n elif gameScore == 1:\n print(\"+1 point\")\n score += 1\n else:\n print(\"+\" + str(gameScore) + \" points\")\n score += gameScore\n\n choice = input(\"Again?\\n(Y or N)\\n: \").upper()\n\n if choice == \"Y\":\n input(\"Great!\")\n elif choice == \"N\":\n print(\"good bye!\")\n return score\n else:\n print(choice + \" is not an option.\")\n elif chosenApp == \"Blackjack\":\n\n while True:\n score = 0\n gameScore = BlackJack()\n if gameScore == -5:\n print(\"-5 points\")\n score -= 5\n elif gameScore == 5:\n print(\"+5 points\")\n score += 5\n else:\n print(\"+0 points\")\n\n choice = input(\"Again?\\n(Y or N)\\n: \").upper()\n\n if choice == \"Y\":\n input(\"Great!\")\n elif choice == \"N\":\n print(\"good bye!\")\n return score\n else:\n print(choice + \" is not an option.\")\n\n\ndef shop(score):\n boughtMessage = \"Great! Your purchase will be ready in your apps.\"\n while True:\n with open(\"shopStock.txt\") as f:\n stock = f.readlines()\n stock = [x.strip() for x in stock]\n stock.sort()\n while True:\n\n appNumber = 1\n\n for S in stock:\n print(\"-\" + S + \"(\" + str(appNumber) + \")\")\n appNumber += 1\n print(\"-Back(Quit)\")\n chooseApp = input(\": \").upper()\n if chooseApp == \"QUIT\":\n return 0\n try:\n chooseAppNumber = int(chooseApp)\n if chooseAppNumber == 0:\n print(\"0 is not on the list\\n\")\n else:\n break\n except ValueError:\n print(chooseApp.capitalize() + \" is not a number.\")\n\n appNumber -= 1\n\n if chooseAppNumber > appNumber or chooseAppNumber < 0:\n print(str(chooseApp) + \" is not on the list.\")\n else:\n chooseAppNumber -= 1\n chosenApp = stock[chooseAppNumber]\n if chosenApp == \"Blackjack: 20 points\":\n if score >= 20:\n print(boughtMessage)\n boughtApps = open(\"boughtApps.txt\", \"a\")\n\n boughtApps.write(\"Blackjack\\n\")\n\n readShopFiles = open(\"shopStock.txt\", \"r\")\n linesInFile = readShopFiles.readlines()\n readShopFiles.close()\n writeShopFiles = open(\"shopStock.txt\", \"w\")\n for line in linesInFile:\n if line != \"Blackjack: 20 points\" + \"\\n\":\n writeShopFiles.write(line)\n writeShopFiles.close()\n\n return 20\n else:\n print(\"Sorry but Blackjack is too expensive for you to afford. try some other games to save up points.\")\n if chosenApp == \"Hangman: 20 points\":\n if score >= 20:\n print(boughtMessage)\n boughtApps = open(\"boughtApps.txt\", \"a\")\n\n boughtApps.write(\"Hangman\\n\")\n\n readShopFiles = open(\"shopStock.txt\", \"r\")\n linesInFile = readShopFiles.readlines()\n readShopFiles.close()\n writeShopFiles = open(\"shopStock.txt\", \"w\")\n for line in linesInFile:\n if line != \"Hangman: 20 points\" + \"\\n\":\n writeShopFiles.write(line)\n writeShopFiles.close()\n\n return 20\n else:\n print(\"Sorry but Hangman is too expensive for you to afford. try some other games to save up points.\")\n if chosenApp == \"Noughts and crosses: 30 points\":\n if score >= 30:\n print(boughtMessage)\n boughtApps = open(\"boughtApps.txt\", \"a\")\n\n boughtApps.write(\"Noughts and crosses\\n\")\n readShopFiles = open(\"shopStock.txt\", \"r\")\n linesInFile = readShopFiles.readlines()\n readShopFiles.close()\n writeShopFiles = open(\"shopStock.txt\", \"w\")\n for line in linesInFile:\n if line != \"Noughts and crosses: 30 points\" + \"\\n\":\n writeShopFiles.write(line)\n writeShopFiles.close()\n\n return 30\n else:\n print(\"Sorry but Noughts and crosses is too expensive for you to afford. try some other games to save up points.\")\n if chosenApp == \"Mine blower: 50 points\":\n if score >= 50:\n print(boughtMessage)\n boughtApps = open(\"boughtApps.txt\", \"a\")\n\n boughtApps.write(\"Mine blower\\n\")\n readShopFiles = open(\"shopStock.txt\", \"r\")\n linesInFile = readShopFiles.readlines()\n readShopFiles.close()\n writeShopFiles = open(\"shopStock.txt\", \"w\")\n for line in linesInFile:\n if line != \"Mine blower: 50 points\" + \"\\n\":\n writeShopFiles.write(line)\n writeShopFiles.close()\n\n return 50\n else:\n print(\"Sorry but Mine Blower is too expensive for you to afford. try some other games to save up points.\")\n\n print(\"The app you have chosen is not in stock and will probably never be. Sorry.\\n\")\n\n\n\ndef numberGuessingGame():\n randomNumber = random.randint(1000,9999)\n\n while True:\n difficulty = input(\"Number guessing game!\\nChoose your difficulty\\n-Easy(A)-20 tries to guess the 4 digit number\\n-Hard(B)-10 tries to guess the 4 digit number. 4x all points.\\n: \").upper()\n\n if difficulty == \"A\":\n guessesLeft = 20\n elif difficulty == \"B\":\n guessesLeft = 10\n else:\n (difficulty.capitalize() + \" is not an option.\")\n print()\n\n while True:\n\n while True:\n userFriendly = str(guessesLeft)\n choice = input(\"Guess my four digit number in \" + userFriendly + \" guesses!\\n: \")\n print(\"\")\n\n try:\n value = float(choice)\n break\n except ValueError:\n print(\"That's not a number\\n\")\n\n if difficulty == \"A\":\n gameScore = guessesLeft\n else:\n gameScore = guessesLeft * 4\n \n if difficulty == \"A\":\n guessesTaken = 20 - guessesLeft\n else:\n guessesTaken = 10 - guessesLeft\n \n if value == randomNumber and gameScore == 20 or value == randomNumber and gameScore == 40 and difficulty == \"B\":\n print(\"Well done! You got it first try!\")\n return gameScore * 2\n \n if value == randomNumber:\n print(\"Well done! You got it in \" + str(guessesTaken) + \" tries!\")\n return gameScore\n \n if guessesLeft == 0:\n print(\"Oh no! You lost! better luck next time!\")\n return -1\n \n if value > randomNumber:\n print(\"This number is too high.\")\n \n if value < randomNumber:\n print(\"This number is too low.\")\n\n guessesLeft -= 1\n\n\ndef main(score):\n\n while True:\n userFriendlyScore = str(score)\n\n print(\"Menu:\\n\\n-Apps(A)\\n-Shop(B)\\n-Reset all save data(C)\\n-Save and quit(Quit)\\n\\nYou have \" + userFriendlyScore + \" points.\")\n menu = input(\"\\n: \").upper()\n\n if menu == \"A\":\n score += apps(score)\n \n elif menu == \"B\":\n score -= shop(score)\n d = shelve.open('score.txt')\n d['score'] = score\n d.close()\n\n elif menu == \"C\":\n\n while True:\n reset = input(\"Are you sure you want to reset?\\n(Y or N)\\n: \").upper()\n\n if reset == \"N\":\n break\n elif reset == \"Y\":\n password = input(\"\\nEnter password\\n: \")\n\n if password == \"FEAT\":\n print(\"Resetting all data...\")\n score = 0\n resetShopStock = open(\"shopStock.txt\", \"w\")\n resetShopStock.write(\"Blackjack: 20 points\\nHangman: 20 points\\nNoughts and crosses: 30 points\\nMine blower: 50 points\\n\")\n resetShopStock.close()\n resetBoughtApps = open(\"boughtApps.txt\", \"w\")\n resetBoughtApps.write(\"Number guessing game\\n\")\n resetBoughtApps.close()\n d = shelve.open('score.txt')\n d['score'] = score\n d.close()\n print(\"All data reset\")\n break\n else:\n quit()\n else:\n print(reset.capitalize() + \" is not an option.\")\n \n elif menu == \"QUIT\":\n print(\"Saving...\")\n d = shelve.open('score.txt') \n d['score'] = score \n d.close()\n print(\"Points saved!\")\n quit()\n else:\n print(menu.capitalize() + \" is not an option.\")\n \n \nprint(\"Opening save files...\")\nd = shelve.open('score.txt')\nscore = d['score']\nprint(\"Save files open'd.\")\n\nscore += 200\n\nmain(score)\n","sub_path":"Python/Mini Games/Mini Games.py","file_name":"Mini Games.py","file_ext":"py","file_size_in_byte":15582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"344384836","text":"\"\"\"Implements ADVI approximations.\"\"\"\nfrom typing import Optional, Union\nfrom collections import namedtuple\n\nimport arviz as az\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom pymc4 import flow\nfrom pymc4.coroutine_model import Model\nfrom pymc4.distributions.transforms import JacobianPreference\nfrom pymc4.inference.utils import initialize_sampling_state\nfrom pymc4.utils import NameParts\nfrom pymc4.variational import updates\n\ntfd = tfp.distributions\ntfb = tfp.bijectors\nADVIFit = namedtuple(\"ADVIFit\", \"approximation, losses\")\n\n\nclass Approximation(tf.Module):\n \"\"\"Base Approximation class.\"\"\"\n\n def __init__(self, model: Optional[Model] = None, random_seed: Optional[int] = None):\n if not isinstance(model, Model):\n raise TypeError(\n \"`fit` function only supports `pymc4.Model` objects, but you've passed `{}`\".format(\n type(model)\n )\n )\n\n self.model = model\n self._seed = random_seed\n self.state, self.deterministic_names = initialize_sampling_state(model)\n if not self.state.all_unobserved_values:\n raise ValueError(\n f\"Can not calculate a log probability: the model {model.name or ''} has no unobserved values.\"\n )\n\n self.unobserved_keys = self.state.all_unobserved_values.keys()\n self.target_log_prob = self._build_logfn()\n self.approx = self._build_posterior()\n\n def _build_logfn(self):\n \"\"\"Build vectorized logp function.\"\"\"\n\n @tf.function(autograph=False)\n def logpfn(*values, **kwargs):\n if kwargs and values:\n raise TypeError(\"Either list state should be passed or a dict one\")\n elif values:\n kwargs = dict(zip(self.unobserved_keys, values))\n st = flow.SamplingState.from_values(kwargs)\n _, st = flow.evaluate_model_transformed(self.model, state=st)\n return st.collect_log_prob()\n\n def vectorize_logp_function(logpfn):\n def vectorized_logpfn(*q_samples):\n return tf.vectorized_map(lambda samples: logpfn(*samples), q_samples)\n\n return vectorized_logpfn\n\n return vectorize_logp_function(logpfn)\n\n def _build_posterior(self):\n raise NotImplementedError\n\n def flatten_view(self):\n \"\"\"Flattened view of the variational parameters.\"\"\"\n pass\n\n def sample(self, n):\n \"\"\"Generate samples from posterior distribution.\"\"\"\n q_samples = dict(zip(self.unobserved_keys, self.approx.sample(n)))\n\n # TODO - Account for deterministics as well.\n # For all transformed_variables, apply inverse of bijector to sampled values to match support in constraint space.\n _, st = flow.evaluate_model(self.model)\n for transformed_name in self.state.transformed_values:\n untransformed_name = NameParts.from_name(transformed_name).full_untransformed_name\n transform = st.distributions[untransformed_name].transform\n if transform.JacobianPreference == JacobianPreference.Forward:\n q_samples[untransformed_name] = transform.forward(q_samples[transformed_name])\n else:\n q_samples[untransformed_name] = transform.inverse(q_samples[transformed_name])\n\n # Add a new axis so as n_chains=1 for InferenceData: handles shape issues\n trace = {k: v.numpy()[np.newaxis] for k, v in q_samples.items()}\n trace = az.from_dict(trace, observed_data=self.state.observed_values)\n return trace\n\n\nclass MeanField(Approximation):\n \"\"\"\n Mean Field ADVI.\n\n This class implements Mean Field Automatic Differentiation Variational Inference. It posits spherical \n Gaussian family to fit posterior. And assumes the parameters to be uncorrelated.\n\n References\n ----------\n - Kucukelbir, A., Tran, D., Ranganath, R., Gelman, A.,\n and Blei, D. M. (2016). Automatic Differentiation Variational\n Inference. arXiv preprint arXiv:1603.00788.\n \"\"\"\n\n def _build_loc(self, shape, dtype, name):\n loc = tf.Variable(tf.random.normal(shape, seed=self._seed), name=f\"{name}/mu\", dtype=dtype)\n return loc\n\n def _build_cov_matrix(self, shape, dtype, name):\n # As per `tfp.vi.fit_surrogate_posterior` docs, use `TransformedVariable` or `DeferredTensor`\n # to ensure all ops invoke gradients while applying transformation.\n scale = tfp.util.TransformedVariable(\n tf.fill(shape, value=tf.constant(1, dtype=dtype)),\n tfb.Softplus(), # For positive values of scale\n name=f\"{name}/sigma\",\n )\n return scale\n\n def _build_posterior(self):\n def apply_normal(dist_name):\n unobserved_value = self.state.all_unobserved_values[dist_name]\n shape = unobserved_value.shape\n dtype = unobserved_value.dtype\n return tfd.Normal(\n self._build_loc(shape, dtype, dist_name),\n self._build_cov_matrix(shape, dtype, dist_name),\n )\n\n # Should we use `tf.nest.map_structure` or `pm.utils.map_structure`?\n variational_params = tf.nest.map_structure(apply_normal, self.unobserved_keys)\n return tfd.JointDistributionSequential(variational_params)\n\n\nclass FullRank(Approximation):\n \"\"\"Full Rank Automatic Differential Variational Inference(Full Rank ADVI).\"\"\"\n\n def _build_loc(self):\n raise NotImplementedError\n\n def _build_cov_matrix(self):\n raise NotImplementedError\n\n def _build_posterior(self):\n raise NotImplementedError\n\n\nclass LowRank(Approximation):\n \"\"\"Low Rank Automatic Differential Variational Inference(Low Rank ADVI).\"\"\"\n\n def _build_loc(self):\n raise NotImplementedError\n\n def _build_cov_matrix(self):\n raise NotImplementedError\n\n def _build_posterior(self):\n raise NotImplementedError\n\n\ndef fit(\n model: Optional[Model] = None,\n method: Union[str, MeanField] = \"advi\",\n num_steps: int = 10000,\n sample_size: int = 1,\n random_seed: Optional[int] = None,\n optimizer=None,\n **kwargs,\n):\n \"\"\"\n Fit an approximating distribution to log_prob of the model.\n\n Parameters\n ----------\n model : Optional[:class:`Model`]\n Model to fit posterior against\n method : Union[str, :class:`Approximation`]\n Method to fit model using VI\n\n - 'advi' for :class:`MeanField`\n - 'fullrank_advi' for :class:`FullRank`\n - 'lowrank_advi' for :class:`LowRank`\n - or directly pass in :class:`Approximation` instance\n num_steps : int\n Number of iterations to run the optimizer\n sample_size : int\n Number of Monte Carlo samples used for approximation\n random_seed : Optional[int]\n Seed for tensorflow random number generator\n optimizer : TF1-style | TF2-style | from pymc4/variational/updates\n Tensorflow optimizer to use\n kwargs : Optional[Dict[str, Any]]\n Pass extra non-default arguments to\n ``tensorflow_probability.vi.fit_surrogate_posterior``\n\n Returns\n -------\n ADVIFit : collections.namedtuple\n Named tuple, including approximation, ELBO losses depending on the `trace_fn`\n \"\"\"\n _select = dict(advi=MeanField,)\n\n if isinstance(method, str):\n # Here we assume that `model` parameter is provided by the user.\n try:\n inference = _select[method.lower()](model, random_seed)\n except KeyError:\n raise KeyError(\n \"method should be one of %s or Approximation instance\" % set(_select.keys())\n )\n\n elif isinstance(method, Approximation):\n # Here we assume that `model` parameter is not provided by the user\n # as the :class:`Approximation` itself contains :class:`Model` instance.\n inference = method\n\n else:\n raise TypeError(\n \"method should be one of %s or Approximation instance\" % set(_select.keys())\n )\n\n # Defining `opt = optimizer or updates.adam()`\n # leads to optimizer initialization issues from tf.\n if optimizer:\n opt = optimizer\n else:\n opt = updates.adam()\n\n @tf.function(autograph=False)\n def run_approximation():\n losses = tfp.vi.fit_surrogate_posterior(\n target_log_prob_fn=inference.target_log_prob,\n surrogate_posterior=inference.approx,\n num_steps=num_steps,\n sample_size=sample_size,\n seed=random_seed,\n optimizer=opt,\n **kwargs,\n )\n return losses\n\n return ADVIFit(inference, run_approximation())\n","sub_path":"pymc4/variational/approximations.py","file_name":"approximations.py","file_ext":"py","file_size_in_byte":8664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"115201914","text":"import data_handler\nimport re\nfrom random import randint\nimport random\nimport time\nfrom datetime import date\nfrom datetime import datetime\nfrom datetime import timedelta\nimport sys\nimport os\nfrom django.utils import timezone\nfrom django.db.models import Max\nfrom django.db.models import Sum\nsys.path.append('../web') # needed for sibling import\nimport django\nos.environ.setdefault(\n \"DJANGO_SETTINGS_MODULE\",\n \"web.settings\"\n)\nfrom django.conf import settings\ndjango.setup()\nfrom halloffame.models import *\n\n\n# Adds the user to the db\ndef update_user_db(msg):\n # Check if the chat exists alredy or not:\n if Chat.objects.filter(id=msg['chat']['id']).count() > 0:\n pass\n else:\n chat = Chat(id=msg['chat']['id'])\n if int(msg['chat']['id']) < 0:\n chat.title = msg['chat']['title']\n chat.save()\n # TODO: Make update instead of replacing old user, just like for the chats\n # Telegram user\n mario = TelegramUser(id=str(msg['from']['id']))\n if 'first_name' in msg['from']:\n mario.firstName = msg['from']['first_name']\n if 'last_name' in msg['from']:\n mario.lastName = msg['from']['last_name']\n if 'username' in msg['from']:\n mario.nickname = msg['from']['username']\n mario.save()\n\n # The relation between tg user and chat\n if ChatMember.objects.filter(chat=str(msg['chat']['id']),\n tg_user=str(msg['from']['id']))\\\n .exists():\n pass\n else:\n chat_member = ChatMember(chat=Chat.objects.get(id=str(msg['chat']['id'])),\n tg_user=TelegramUser.objects.get(id=str(msg['from']['id'])))\n chat_member.save()\n return mario\n\n\ndef debug_handler(msg, bot):\n \"\"\"\n if msg['text'] == \"moi\":\n sunglasses = u\"\\U0001F60E\"\n reply = \"I'm back! \" + sunglasses + \" #hype\"\n bot.sendMessage(str(msg['chat']['id']), reply)\n \"\"\"\n if msg['text'] == \"TelegramUser.objects.all()\":\n reply = TelegramUser.objects.all()\n bot.sendMessage(str(msg['chat']['id']), str(reply))\n if msg['text'] == 'timezone.localtime(timezone.now())':\n reply = str(timezone.localtime(timezone.now()))\n bot.sendMessage(str(msg['chat']['id']), str(reply))\n \"\"\"\n if msg['text'] == \"ChatMember_test\":\n sender = ChatMember.objects.get(chat=str(msg['chat']['id']),\n tg_user=str(msg['from']['id']))\n sender.rank = sender.rank + 1\n sender.save()\n reply = ChatMember.objects.all()\n bot.sendMessage(str(msg['chat']['id']), str(reply))\n \"\"\"\n pass\n\n# If leet is missed [höh kukaan ei sanonut leet, voi rähmä 2 leetitöntä päivää putkeen :(,\n# eikö kukaan taaskaan? Lopun ajat ovat koittaneet,\n# yrittäkää nyt, harmittaa, masentaa,\n# mitä iloa on elää jos kukaan ei sano 1337?]\ndef bob_handler(msg, bot):\n bob_chat = Chat.objects.get(id=str(msg['chat']['id']))\n sender = ChatMember.objects.get(chat=str(msg['chat']['id']),\n tg_user=str(msg['from']['id']))\n if msg['text'] == '1337':\n print('[INFO] ' + time.strftime(\"%H:%M:%S\") + ' Received 1337 message. ')\n print('[INFO] ' + time.strftime(\"%H:%M:%S\") + ' bob_chat.latestLeet: ' + str(bob_chat.latestLeet))\n print('[INFO] ' + time.strftime(\"%H:%M:%S\") + ' date.today(): ' + str(date.today()))\n print('[INFO] ' + time.strftime(\"%H:%M:%S\") + ' Sender rank before: ' + str(sender.rank))\n ranks = data_handler.read_ranks_file()\n if bob_chat.latestLeet != date.today() and \\\n int(time.strftime(\"%H\")) == 13 and \\\n int(time.strftime(\"%M\")) == 37:\n print('[INFO] ' + time.strftime(\"%H:%M:%S\") + ' Time and date correct! ')\n bob_chat.latestLeet = date.today()\n bob_chat.save()\n if sender._rank < len(ranks) - 1:\n sender._rank += 1\n up = u\"\\U0001F53C\"\n reply = \"Asento! \" + str(sender.tg_user) + \" ansaitsi ylennyksen arvoon \" + \\\n ranks[sender._rank] + \"! \" + up + \" Lepo. \"\n else:\n sender.prestige += 1\n reply = \"Asento! \" + str(sender.tg_user) + \\\n \" on saavuttanut jo korkeimman mahdollisen sotilasarvon! Näin ollen \" + str(sender.tg_user) + \\\n \" lähtee uudelle kierrokselle. Onneksi olkoon! \" + \\\n \"Juuri päättynyt kierros oli hänen \" + str(sender.prestige) + \". Lepo. \"\n sender._rank = 0\n print('[SEND] ' + time.strftime(\"%H:%M:%S\") + \" \" + reply)\n bot.sendMessage(msg['chat']['id'], reply)\n\n # 33% chance for demotes\n elif randint(0, 1) == 0:\n print('[INFO] ' + time.strftime(\"%H:%M:%S\") + ' Incorrect time, removing points. ')\n if sender._rank > 0:\n sender._rank -= 1\n down = u\"\\U0001F53D\"\n reply = \"Alokasvirhe! \" + str(sender.tg_user) + \" alennettiin arvoon \" + \\\n ranks[sender._rank] + \". \" + down\n print('[SEND] ' + time.strftime(\"%H:%M:%S\") + ' ' + reply)\n bot.sendMessage(msg['chat']['id'], reply)\n else:\n print('[INFO] ' + time.strftime(\"%H:%M:%S\") + ' Incorrect time, but the user got lucky. ')\n print('[-END] ' + time.strftime(\"%H:%M:%S\") + ' Sender rank after: ' + str(sender.rank))\n sender.save()\n\n\ndef random_proverb():\n max_id = Proverb.objects.all().aggregate(max_id=Max(\"id\"))['max_id']\n for i in range(0, 100):\n pk = random.randint(1, max_id)\n proverb = Proverb.objects.filter(pk=pk).first()\n if proverb:\n return proverb\n # If it takes over 100 tries, return empty\n return None\ndef rare_proverb():\n proverb = Proverb.objects.all().first()\n proverb.save()\n return proverb\ndef semi_rare_proverb():\n proverbs = Proverb.objects.all()\n for i in range(0, proverbs.count()):\n if 0.9 < random.random():\n return proverbs[i]\n return proverbs.last()\n\n\ndef set_reminder(msg, bot):\n # if fails, send error message describing usage and return\n chat = Chat.objects.get(id=str(msg['chat']['id']))\n # TODO: make also float number possible\n # TODO: make also possible to simply put the date in to this\n # TODO: add months also\n # Extract times 1 2 3 4 5\n expr = re.match(r'muistuta ([0-9]+y )?([0-9]+d )?([0-9]+h )?([0-9]+m )?(.+)', msg['text'])\n if expr.group(1) or expr.group(2) or expr.group(3) or expr.group(4):\n remind_date = datetime.now()\n if expr.group(1):\n year = float(expr.group(1)[:-2])\n remind_date = remind_date + timedelta(days=year*365)\n if expr.group(2):\n day = float(expr.group(2)[:-2])\n remind_date = remind_date + timedelta(days=day)\n if expr.group(3):\n hour = float(expr.group(3)[:-2])\n remind_date = remind_date + timedelta(hours=hour)\n if expr.group(4):\n minute = float(expr.group(4)[:-2])\n remind_date = remind_date + timedelta(minutes=minute)\n remember_this = expr.group(5)\n reminder = Reminder(remember_this=remember_this, chat=chat, date=remind_date)\n reminder.save()\n reply = 'Muistutetaan ' + str(remind_date.strftime('%d.%m.%Y klo %H:%M'))\n bot.sendMessage(msg['chat']['id'], reply)\n else:\n reply = 'Muistutus oli väärää muotoa. '\n bot.sendMessage(msg['chat']['id'], reply)\n print('[ERRO] Something went wrong')\n\n\n# Shitposting features here\ndef spammer(msg, bot):\n # Post random proverb\n if msg['text'].lower() == 'viisaus':\n proverb = semi_rare_proverb()\n proverb.send_count += 1\n proverb.save()\n if proverb.author:\n author = ' - ' + proverb.author\n else:\n author = ''\n if proverb.date:\n year = str(proverb.date.year)\n else:\n year = ''\n reply = proverb.proverb + author + ' ' + year\n bot.sendMessage(msg['chat']['id'], reply)\n # Add new proverb\n elif msg['text'][:14].lower() == 'uusi viisaus: ':\n sender_name = str(TelegramUser.objects.get(id=str(msg['from']['id'])))\n proverb = Proverb(proverb=msg['text'][14:], author=sender_name, date=date.today())\n proverb.save()\n reply = 'Viisaus tallennettu. '\n bot.sendMessage(msg['chat']['id'], reply)\n elif msg['text'].lower() == \"bob, kuinka viisas olet?\":\n reply = str(Proverb.objects.all().count())\n bot.sendMessage(msg['chat']['id'], reply)\n # Reminder\n elif msg['text'][:9].lower() == 'muistuta ':\n set_reminder(msg, bot)\n # If string \"_* vai _*\" is found, make split and post random\n elif re.search(r'..*\\svai\\s..*', msg['text']) is not None:\n options = re.split(r'\\svai\\s', msg['text'])\n reply = (random.choice(options))\n print('[SEND] ' + time.strftime(\"%H:%M:%S\") + \" \" + reply)\n bot.sendMessage(msg['chat']['id'], reply)\n elif msg['text'].lower() == \"huutista\":\n reply = '...joka tuutista! 😂'\n bot.sendMessage(msg['chat']['id'], reply)\n\n\ndef msg_handler(msg, bot, settings_data):\n # print('Received message. ' + str(msg))\n update_user_db(msg)\n if True: # str(msg['chat']['id']) == settings_data['bob_ID']:\n bob_handler(msg, bot)\n\n if str(msg['chat']['id']) != settings_data['bob_ID']:\n spammer(msg, bot)\n\n if str(msg['from']['id']) == settings_data['dev_ID']:\n debug_handler(msg, bot)\n\n\n\n","sub_path":"bob/message_handler.py","file_name":"message_handler.py","file_ext":"py","file_size_in_byte":9665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"580013422","text":"import pyautogui\n\ndef move_and_click(x,y,sleeptime):\n pyautogui.moveTo(x=x, y=y,duration=0, tween=pyautogui.linear)\n pyautogui.click(x=x, y=y,clicks=1, button='left')\n pyautogui.sleep(sleeptime)\ndef move_and_click_input_enter(x,y,sleeptime,input_str,press,select_all=False):\n pyautogui.moveTo(x=x, y=y,duration=0, tween=pyautogui.linear)\n pyautogui.click(x=x, y=y,clicks=1, button='left')\n if select_all:\n pyautogui.hotkey(\"ctrl\", \"a\")\n pyautogui.typewrite(input_str)\n pyautogui.press(press)\n pyautogui.sleep(sleeptime)\ndef move_and_click_select_copy(x,y,sleeptime):\n pyautogui.moveTo(x=x, y=y,duration=0, tween=pyautogui.linear)\n pyautogui.click(x=x, y=y,clicks=1, button='left')\n pyautogui.hotkey(\"ctrl\", \"a\")\n pyautogui.hotkey(\"ctrl\", \"c\")\n pyautogui.sleep(sleeptime)\ndef move_and_click_paste(x,y,sleeptime):\n pyautogui.moveTo(x=x, y=y,duration=0, tween=pyautogui.linear)\n pyautogui.click(x=x, y=y,clicks=1, button='left')\n pyautogui.hotkey(\"ctrl\", \"v\")\n pyautogui.sleep(sleeptime)\ndef click_enter(x,y):\n pyautogui.moveTo(x=x, y=y,duration=0, tween=pyautogui.linear)\n pyautogui.click(x=x, y=y,clicks=1, button='left')\n pyautogui.press('enter')\ndef start_page(num):\n #点击console\n move_and_click(1150,184,2)\n #清除console里面的内容\n move_and_click(990,217,2)\n move_and_click_input_enter(1005,248,1,'$(\"input.corona_input[data-v-5f5cbc17]\").value=%s'%num,'enter')\n click_enter(808,701)\n pyautogui.sleep(3)\nnum=295\nend_num = 364\n#网页地点730.4 and 704.8\n#刷新第一页\n#清除blogger/v2\nmove_and_click(1153,250,2)\n#清除response\nmove_and_click(1230,216,2)\nstart_page(num)\nfor i in range(num,end_num):\n print(\"执行第%s页\"%(i))\n #跳转到network\n move_and_click(1314,183,1)\n #移动到fetch\n move_and_click(1252,280,1)\n #enter blogger/v2\n move_and_click_input_enter(1053,250,3,\"/api/solar/cooperator/blogger/v2\",'enter',True)\n #点击 blogger/v2\n move_and_click(1060,321,1)\n #点击response\n move_and_click(1461,443,1)\n #移动到data,并点击,复制\n move_and_click_select_copy(1430,473,1)\n #移除response\n move_and_click(1230,217,1)\n #点击粘贴界面\n move_and_click(420,22,1)\n #刷新粘贴界面\n move_and_click(107,65,2)\n #找到粘贴地方,并粘贴\n move_and_click_paste(450,220,1)\n #点击提交\n move_and_click(455,323,2)\n #点击回原小红书\n move_and_click(126,21,1)\n #点击console\n move_and_click(1150,184,2)\n #清除console里面的内容\n move_and_click(990,217,2)\n #点击console,并跳转下一页\n move_and_click_input_enter(1005,248,1,'$(\".corona-pagination_pagination\").children[$(\".corona-pagination_pagination\").children.length-1].click()','enter')\n #清除console里面的内容\n move_and_click(990,217,2)","sub_path":"彩妆.py","file_name":"彩妆.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"119531182","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nimport admin_app.core.utils.models\nimport django_extensions.db.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feeds', '0001_initial'),\n ('core', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Article',\n fields=[\n ('content_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Content')),\n ('age_from', models.PositiveIntegerField(default=0, verbose_name='\\u0432\\u043e\\u0437\\u0440\\u0430\\u0441\\u0442 \\u043e\\u0442')),\n ('age_till', models.PositiveIntegerField(null=True, verbose_name='\\u0432\\u043e\\u0437\\u0440\\u0430\\u0441\\u0442 \\u0434\\u043e', blank=True)),\n ('link', models.TextField(default=b'', verbose_name='\\u043e\\u0440\\u0438\\u0433\\u0438\\u043d\\u0430\\u043b\\u044c\\u043d\\u0430\\u044f \\u0441\\u0441\\u044b\\u043b\\u043a\\u0430', blank=True)),\n ('original_author', models.CharField(default=b'', help_text='\\u0410\\u0432\\u0442\\u043e\\u0440 \\u0441\\u0442\\u0430\\u0442\\u044c\\u0438 \\u0438\\u0437 \\u0438\\u0441\\u0442\\u043e\\u0447\\u043d\\u0438\\u043a\\u0430', max_length=255, verbose_name='\\u043e\\u0440\\u0438\\u0433\\u0438\\u043d\\u0430\\u043b\\u044c\\u043d\\u044b\\u0439 \\u0430\\u0432\\u0442\\u043e\\u0440', blank=True)),\n ('video_code', models.TextField(default=b'', verbose_name='\\u043a\\u043e\\u0434 \\u0432\\u0438\\u0434\\u0435\\u043e', blank=True)),\n ('feed', models.ForeignKey(related_name='articles', verbose_name='\\u043b\\u0435\\u043d\\u0442\\u0430', blank=True, to='feeds.Feed', null=True)),\n ],\n options={\n 'verbose_name': '\\u0421\\u0442\\u0430\\u0442\\u044c\\u044f',\n 'verbose_name_plural': '\\u0421\\u0442\\u0430\\u0442\\u044c\\u0438',\n },\n bases=('core.content', models.Model),\n ),\n migrations.CreateModel(\n name='ArticleAuthor',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),\n ('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),\n ('slug', django_extensions.db.fields.AutoSlugField(populate_from=b'title', editable=False, blank=True, unique=True, verbose_name='c\\u043b\\u0430\\u0433')),\n ('title', admin_app.core.utils.models.CapitalizeCharField(unique=True, max_length=255, verbose_name='\\u0424.\\u0418.\\u041e.')),\n ('post', admin_app.core.utils.models.CapitalizeCharField(default='', max_length=255, verbose_name='\\u0434\\u043e\\u043b\\u0436\\u043d\\u043e\\u0441\\u0442\\u044c', blank=True)),\n ('is_active', models.BooleanField(default=True, verbose_name='\\u0430\\u043a\\u0442\\u0438\\u0432\\u043d\\u043e\\u0441\\u0442\\u044c')),\n ],\n options={\n 'ordering': ('title',),\n 'verbose_name': '\\u0410\\u0432\\u0442\\u043e\\u0440',\n 'verbose_name_plural': '\\u0410\\u0432\\u0442\\u043e\\u0440\\u044b',\n },\n bases=(admin_app.core.utils.models.CommonModelMixin, models.Model),\n ),\n migrations.AddField(\n model_name='article',\n name='main_author',\n field=models.ForeignKey(related_name='articles', blank=True, to='articles.ArticleAuthor', help_text='\\u0410\\u0432\\u0442\\u043e\\u0440 \\u0441\\u0442\\u0430\\u0442\\u044c\\u0438', null=True, verbose_name='\\u0430\\u0432\\u0442\\u043e\\u0440'),\n ),\n ]\n","sub_path":"src/admin_app/articles/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"539768321","text":"\"\"\"\nThis file defines the combination of behaviors the robots will assume for each condition/run\n\"\"\"\nfrom enum_state import gen_enum_class\nProfileBase = gen_enum_class(\"BehaviorProfile\", \"disabled\", \"easy\", \"help\", \"dead\")\nclass BehaviorProfile(ProfileBase):\n \"\"\" Experiment Robot Behavior Profile \"\"\"\n __settings = { \n # CONDITION 1\n (1,1,\"dirtdog\"): ProfileBase.EASY,\n (1,1,\"eva\"): ProfileBase.EASY,\n (1,1,\"discovery\"): ProfileBase.DEAD,\n (1,1,\"bender\"): ProfileBase.HELP,\n (1,1,\"roomba500\"): ProfileBase.DISABLED,\n (1,1,\"neato\"): ProfileBase.DISABLED,\n\n (1,2,\"dirtdog\"): ProfileBase.HELP,\n (1,2,\"eva\"): ProfileBase.HELP,\n (1,2,\"discovery\"): ProfileBase.DISABLED,\n (1,2,\"bender\"): ProfileBase.DISABLED,\n (1,2,\"roomba500\"): ProfileBase.EASY,\n (1,2,\"neato\"): ProfileBase.DEAD,\n\n\n # CONDITION 2\n (2,1,\"dirtdog\"): ProfileBase.EASY,\n (2,1,\"eva\"): ProfileBase.EASY,\n (2,1,\"discovery\"): ProfileBase.DISABLED,\n (2,1,\"bender\"): ProfileBase.DISABLED,\n (2,1,\"roomba500\"): ProfileBase.HELP,\n (2,1,\"neato\"): ProfileBase.DEAD,\n\n (2,2,\"dirtdog\"): ProfileBase.HELP,\n (2,2,\"eva\"): ProfileBase.HELP,\n (2,2,\"discovery\"): ProfileBase.DEAD,\n (2,2,\"bender\"): ProfileBase.EASY,\n (2,2,\"roomba500\"): ProfileBase.DISABLED,\n (2,2,\"neato\"): ProfileBase.DISABLED,\n\n\n # CONDITION 3\n (3,1,\"dirtdog\"): ProfileBase.EASY,\n (3,1,\"eva\"): ProfileBase.EASY,\n (3,1,\"discovery\"): ProfileBase.DEAD,\n (3,1,\"bender\"): ProfileBase.HELP,\n (3,1,\"roomba500\"): ProfileBase.DISABLED,\n (3,1,\"neato\"): ProfileBase.DISABLED,\n\n (3,2,\"dirtdog\"): ProfileBase.HELP,\n (3,2,\"eva\"): ProfileBase.HELP,\n (3,2,\"discovery\"): ProfileBase.DISABLED,\n (3,2,\"bender\"): ProfileBase.DISABLED,\n (3,2,\"roomba500\"): ProfileBase.EASY,\n (3,2,\"neato\"): ProfileBase.DEAD,\n\n\n # CONDITION 4\n (4,1,\"dirtdog\"): ProfileBase.EASY,\n (4,1,\"eva\"): ProfileBase.EASY,\n (4,1,\"discovery\"): ProfileBase.DISABLED,\n (4,1,\"bender\"): ProfileBase.DISABLED,\n (4,1,\"roomba500\"): ProfileBase.HELP,\n (4,1,\"neato\"): ProfileBase.DEAD,\n\n (4,2,\"dirtdog\"): ProfileBase.HELP,\n (4,2,\"eva\"): ProfileBase.HELP,\n (4,2,\"discovery\"): ProfileBase.DEAD,\n (4,2,\"bender\"): ProfileBase.EASY,\n (4,2,\"roomba500\"): ProfileBase.DISABLED,\n (4,2,\"neato\"): ProfileBase.DISABLED,\n\n }\n\n def get_robot_config(self, robot, cond_id, run_id):\n \"\"\" Internally sets, and also returns the string value, of the behavior profile for this robot \"\"\"\n self.set(self.__settings[(cond_id,run_id,robot)])\n return self.get()\n\n\n","sub_path":"vacuum_experiment_msgs/src/vacuum_experiment_msgs/behavior_profile.py","file_name":"behavior_profile.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"529757406","text":"#!/usr/bin/env python3\n\n# ######################################################################\n\nfrom matplotlib import rc\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# ######################################################################\n\n# Inner and outer radius of the colored donuts.\nr_in = 0.12\nr_out = 0.15\n\n# Padding between donut and arrow.\ndr = 0.01\n\n# Border thickness around donuts and arrows.\nline_width = 2\n\n# Type names and corresponding colors. Order matters -- they are laid\n# out clockwise from the top. In order to try to get the colors to look\n# nice together, the sum of the color channels is uniform.\nnames_colors = (\n ('Heart', '#ff4444'),\n ('Fire', '#ff8800'),\n ('Lightning', '#bbbb00'),\n ('Plant', '#44ff44'),\n ('Wind', '#00bbbb'),\n ('Water', '#4444ff'),\n ('Shadow', '#8800ff'),\n ('Rock', '#888888'),\n)\n\n# ######################################################################\n\ndef main():\n # Set up the figure. White background, LaTeX font. Return the Axes.\n ax = get_ax()\n\n draw_circles(ax)\n\n cycle(ax, 'Fire', 'Plant', 'Water')\n cycle(ax, 'Lightning', 'Water', 'Rock')\n cycle(ax, 'Fire', 'Shadow', 'Wind')\n cycle(ax, 'Wind', 'Heart', 'Lightning')\n cycle(ax, 'Shadow', 'Plant', 'Rock', 'Heart')\n\n# plt.savefig('type_graph.png', transparent=True)\n\n return plt.show()\n\n# ######################################################################\n\ndef get_ax(size=8):\n rc( 'font', **{ 'family':'sans-serif', 'sans-serif':['Helvetica'], 'size':'14' } )\n rc('text', usetex=True)\n rc('text.latex', preamble='\\\\usepackage{amsmath}, \\\\usepackage{amssymb}, \\\\usepackage{color}')\n fig = plt.figure( figsize=(size, size), facecolor='w')\n # Remove padding around plot.\n plt.subplots_adjust(bottom=0., left=0., right=1., top=1.)\n # No axis frames.\n plt.axes(frameon=False)\n # Set the axis limits.\n ax = fig.gca()\n ax.set_xlim( [-1, 1] )\n ax.set_ylim( [-1, 1] )\n # No ticks.\n ax.set_xticks( [] )\n ax.set_yticks( [] )\n return ax\n\n# ======================================================================\n\ndef draw_circles(ax):\n global names_colors\n for i, (name, color) in enumerate(names_colors):\n add_circle(ax, name, loc(i), color)\n return\n\n# ----------------------------------------------------------------------\n\ndef add_circle(ax, name, location, color):\n global r_in, r_out, line_width\n c_out = plt.Circle(location, r_out, ec='k', fc=color, lw=line_width)\n c_in = plt.Circle(location, r_in, ec='k', fc='w', lw=line_width)\n ax.add_artist(c_out)\n ax.add_artist(c_in)\n x, y = location\n return ax.text(x, y, tex(name), verticalalignment='center', horizontalalignment='center')\n\n# ======================================================================\n\ndef loc(i):\n r = 1 - r_out\n q = 2*np.pi*(i + 0.5)/8.\n return np.array( [ r*np.sin(q), r*np.cos(q) ] )\n\n# ======================================================================\n\ndef cycle(ax, *args):\n rgsa = args[1:] + args[:1]\n return [ beats(ax, a, r) for a, r in zip(args, rgsa) ]\n\n# ----------------------------------------------------------------------\n\ndef beats(ax, water, fire):\n global r_in, r_out, dr, names_colors\n for i, (name, color) in enumerate(names_colors):\n if name == water:\n# i_water = i\n loc_water = loc(i)\n color_water = color\n if name == fire:\n# i_fire = i\n loc_fire = loc(i)\n\n# di = (i_water - i_fire) % 8\n# di = min(di, 8 - di)\n# print(water, fire, di)\n\n # We don't want to start the arrow at the center of the circle --\n # it'll overwrite the text! Let's start at the edge of the outer\n # circle.\n dxy = loc_fire - loc_water\n udxy = dxy / np.sqrt( np.dot(dxy, dxy) )\n new_loc_fire = loc_fire - udxy*(r_out + dr)\n new_loc_water = loc_water + udxy*(r_out + dr)\n return ax.annotate(\n \"\",\n xy=new_loc_fire,\n xytext=new_loc_water,\n arrowprops=dict(\n connectionstyle='arc3',\n ec='k',\n fc=color_water,\n lw=line_width,\n width=7,\n ),\n )\n\n# ######################################################################\n\ndef notex(x):\n \"\"\"Format a chunk of text to be non-math LaTeX.\"\"\"\n if '\\n' in x:\n # If there are multiple lines, handle each individually.\n return ' $ \\n $ '.join( notex(y) for y in x.split('\\n') )\n else:\n return '\\\\operatorname{' + x.replace(' ', '\\\\;') + '}'\n\n# ----------------------------------------------------------------------\n\ndef tex(x):\n \"\"\"Split a string into math and non-math chunks, by dollar signs.\"\"\"\n nomath = x.split('$')[::2]\n ret = [None]*( len( x.split('$') ) )\n ret[1::2] = x.split('$')[1::2]\n ret[::2] = [ notex(n) for n in nomath ]\n return ' $ ' + ''.join(ret) + ' $ '\n\n# ######################################################################\n\nif __name__ == '__main__':\n main()\n","sub_path":"assets/images/pokemon/pokeplot.py","file_name":"pokeplot.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"386257820","text":"\"\"\"\nCreate a program that asks the user for a number\nand then prints out a list of all the divisors of that number.\n(If you don’t know what a divisor is, it is a number that divides evenly into another number.\nFor example, 13 is a divisor of 26 because 26 / 13 has no remainder.)\n\"\"\"\n\nuser_number = int(input(\"Please enter a number : \"))\n\nlist_range = list(range(1, user_number+1))\n\ndivisorList = []\n\n# Solution 1\nfor n in list_range:\n if user_number % n == 0:\n divisorList.append(n)\n\nprint(divisorList)\n\n# Solution 2\nd = [x for x in list_range if user_number % x == 0]\nprint(d)","sub_path":"basic_exercises/4_divisors.py","file_name":"4_divisors.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"211953427","text":"\"\"\"added column\n\nRevision ID: 28598bfaf3ab\nRevises: 20b5d75db930\nCreate Date: 2015-07-21 18:27:28.594491\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '28598bfaf3ab'\ndown_revision = '20b5d75db930'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column(u'users_user', sa.Column('phone_number', sa.String(length=200), nullable=True))\n op.create_index(op.f('ix_users_user_phone_number'), 'users_user', ['phone_number'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_users_user_phone_number'), table_name='users_user')\n op.drop_column(u'users_user', 'phone_number')\n ### end Alembic commands ###\n","sub_path":"alembic/versions/28598bfaf3ab_added_column.py","file_name":"28598bfaf3ab_added_column.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"249372131","text":"import time\nimport random\nfrom .database.mysql import MysqlDatabase\nfrom .logger.filelogger import FileLogger\nfrom .logger.consolelogger import ConsoleLogger\n\ndef randSleep():\n rand = random.uniform(0.2, 3.5) \n time.sleep(rand)\n\ndef loggerFactory(str, config):\n if str == \"console\":\n return ConsoleLogger(config)\n \n if str == \"file\":\n return FileLogger(config)\n\n return ConsoleLogger()\n\ndef databaseFactory(str, config):\n if str == \"mysql\":\n return MysqlDatabase(config)\n","sub_path":"app/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"105406619","text":"\"\"\"\n This Python script runs CGI scripts without an webserver.\n Created by Maarten Plieger - 20151104\n\"\"\"\n\nimport sys\nfrom subprocess import PIPE, Popen, STDOUT\nfrom threading import Thread\nimport os\nimport io\nimport errno\nimport time\n\nclass CGIRunner:\n \n def __init__(self):\n self.headers = \"\"\n self.headersSent = False\n self.foundLF = False\n \n def startProcess(self,cmds,callback=None,env = None,bufsize=0):\n try:\n from Queue import Queue, Empty\n except ImportError:\n from queue import Queue, Empty # python 3.x\n\n ON_POSIX = 'posix' in sys.builtin_module_names\n\n def enqueue_output(out, queue):\n for line in iter(out.readline, b''):\n queue.put(line)\n out.close()\n\n #print cmds\n #print env\n \n \n p = Popen(cmds, stdout=PIPE, stderr=STDOUT,bufsize=bufsize, close_fds=ON_POSIX,env=env)\n q = Queue()\n t = Thread(target=enqueue_output, args=(p.stdout, q))\n t.daemon = True # thread dies with the program\n t.start()\n \n #http://stackoverflow.com/questions/156360/get-all-items-from-thread-queue\n # read line without blocking\n while True:\n try:\n #line = q.get_nowait() #<-- Causes a lot of CPU usage!\n line = q.get(timeout=.01)\n if(callback != None):\n callback(line)\n except Empty:\n if(t.isAlive() == False):\n break;\n \n \"\"\" Somehow sometimes stuff is still in que \"\"\"\n while True:\n try: \n line = q.get(timeout=.1)\n if(callback != None):\n callback(line)\n except Empty:\n if(t.isAlive() == False):\n break;\n \n return p.wait() \n\n def _filterHeader(self,_message,writefunction):\n \n \n if self.headersSent == False:\n self.headers = self.headers + _message\n message = bytearray(_message)\n endHeaderIndex = 0\n for j in range(len(message)):\n if message[j] == 10 :\n if self.foundLF == False:\n self.foundLF = True\n #print \"LF Found\"\n continue\n elif self.foundLF == True and message[j] != 13:\n self.foundLF = False\n #print \"Sorry, not LF Found\"\n continue\n \n if(self.foundLF == True): \n if message[j] == 10 :\n #print \"Second LF Found\"\n self.headersSent = True;\n endHeaderIndex = j+2;\n #print \"HEADER FOUND\"\n #print message[:endHeaderIndex]\n writefunction(message[endHeaderIndex:])\n \n break;\n else:\n writefunction(_message)\n \n \"\"\"\n Run the CGI script with specified URL and environment. Stdout is captured and put in a StringIO object provided in output\n \"\"\"\n def run(self,cmds,url,output,env = [], path = None, isCGI = True):\n #output = subprocess.Popen([\"../../bin/adagucserver\", \"myarg\"], stdout=subprocess.PIPE, env=adagucenv).communicate()[0]\n self.headersSent = False\n self.foundLF = False\n self.headers = \"\"\n \n if isCGI is False:\n self.headersSent = True\n \n def writefunction(data):\n output.write(data)\n \n def monitor1(_message):\n self._filterHeader(_message,writefunction)\n \n localenv = {}#os.environ.copy()\n if url != None:\n localenv['QUERY_STRING']=url\n else :\n localenv['QUERY_STRING']=\"\"\n \n \n if path!=None:\n localenv['SCRIPT_NAME']=\"/myscriptname\";\n #SCRIPT_NAME [/cgi-bin/autoresource.cgi], REQUEST_URI [/cgi-bin/autoresource.cgi/opendap/clipc/combinetest/wcs_nc2.nc.das]\n localenv['REQUEST_URI']=\"/myscriptname/\" + path\n \n localenv.update(env) \n status = self.startProcess(cmds,monitor1,localenv,bufsize=8192)\n \n output.flush()\n \n return status, self.headers\n","sub_path":"data/python/adaguc/CGIRunner.py","file_name":"CGIRunner.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"83458313","text":"import pygame\nimport sys\nfrom platform import *\n\npygame.init()\nwin = pygame.display.set_mode((1050, 500))\npygame.display.set_caption('Pygame template')\nclock = pygame.time.Clock()\n\nbg = pygame.image.load('img\\TileSet_08.png')\n\npl = Platform()\n\nwhile True:\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n win.blit(bg, (0, 0))\n\n pygame.display.update()\n clock.tick(30)\n","sub_path":"platform_game.py","file_name":"platform_game.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"166041465","text":"\n\"\"\"\nTest for BAM file and AlignmentSet support.\n\"\"\"\n\nimport logging\nimport os\nimport platform\nimport unittest\nfrom pbcore.io import AlignmentSet\nfrom kineticsTools.KineticWorker import KineticWorker\nfrom kineticsTools.ipdModel import IpdModel\nfrom kineticsTools.ReferenceUtils import ReferenceUtils, ReferenceWindow\n\nlogging.basicConfig()\nlog = logging.getLogger()\n\n# FIXME\ndata_dir = \"/pbi/dept/secondary/siv/testdata/kineticsTools\"\n\nclass _TestBase(object):\n MAX_ALIGNMENTS = 1500\n\n \"\"\"\n Common test functionality. All input type tests should inherit from this,\n and yield identical results.\n \"\"\"\n\n def getOpts(self):\n \"\"\"Derived tests can override this to customize behaviour\"\"\"\n return self.basicOpts()\n\n def basicOpts(self):\n \"\"\"Mock up some options for the kinetic worker\"\"\"\n self_ = self\n class opts:\n def __init__(self):\n self.mapQvThreshold = -1\n self.cap_percentile = 99.0\n self.minCoverage = 3\n self.subread_norm = True\n self.maxCoverage = 200\n self.identify = True\n self.methylFraction = False\n self.pvalue = 0.01\n self.modsToCall = ['H', 'J', 'K']\n # Bug 23546: need to set values for these two new flags:\n self.identifyMinCov = 5\n self.methylMinCov = 10\n self.useLDA = False\n self.maxAlignments = self_.MAX_ALIGNMENTS\n self.randomSeed = None\n return opts()\n\n def getAlignments (self):\n raise NotImplementedError()\n\n def getReference (self):\n refDir = \"/pbi/dept/secondary/siv/references\"\n return os.path.join(refDir, \"Helicobacter_pylori_J99\", \"sequence\",\n \"Helicobacter_pylori_J99.fasta\")\n\n def setUp(self):\n self.cmpH5 = None\n resourcesDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../kineticsTools/resources')\n ref = self.getReference()\n alnFile = self.getAlignments()\n assert os.path.exists(alnFile) and os.path.exists(ref)\n\n self.ds = AlignmentSet(alnFile, referenceFastaFname=ref)\n self.contigs = ReferenceUtils.loadReferenceContigs(ref, self.ds)\n self.ipdModel = IpdModel(self.contigs, os.path.join(resourcesDir, \"P6-C4.h5\"))\n # Create a functional KineticWorker object that can be poked at\n self.kw = KineticWorker(self.ipdModel)\n # Put in our cmp.h5 - this is normally supplied by the Worker\n self.kw.caseCmpH5 = self.ds\n self.kw.controlCmpH5 = None\n\n self.kw.options = self.getOpts()\n\n def test_private_api (self):\n start = 50\n end = 100\n REF_GROUP_ID = \"gi|12057207|gb|AE001439.1|\"\n referenceWindow = ReferenceWindow(0, REF_GROUP_ID, start, end)\n bounds = (start, end)\n rir = list(self.kw.caseCmpH5.readsInRange(referenceWindow.refName,\n referenceWindow.start, referenceWindow.end))\n self.assertEqual(len(rir), 301)\n chunks = self.kw._fetchChunks(REF_GROUP_ID, (start, end),\n self.kw.caseCmpH5)\n factor = 1.0 / self.ds.readGroupTable[0].FrameRate\n rawIpds = self.kw._loadRawIpds(rir, start, end, factor)\n logging.critical(len(rawIpds))\n # XXX note that this is very dependent on the exact order of reads\n # found by readsInRange(), which may be altered by changes to the\n # implementation of the dataset API. It should, however, remain\n # consistent across equivalent input types.\n # XXX 2015-08-28 disabling this for now because it will change if the\n # dataset contains multiple .bam files\n #self.assertEqual(\"%.4f\" % rawIpds[0][2], \"0.2665\")\n log.info(rawIpds)\n chunks = self.kw._chunkRawIpds(rawIpds)\n #log.critical(chunks)\n\n def test_small_decode (self):\n \"\"\"Test for known modifications near the start of H. pylori genome\"\"\"\n # XXX should have mods on 60- (m4C), 89+ (m6A), 91- (m6A)\n start = 50\n end = 100\n REF_GROUP_ID = \"gi|12057207|gb|AE001439.1|\"\n referenceWindow = ReferenceWindow(0, REF_GROUP_ID, start, end)\n bounds = (start, end)\n\n self.kw._prepForReferenceWindow(referenceWindow)\n kinetics = self.kw._summarizeReferenceRegion(bounds, False, True)\n mods = self.kw._decodePositiveControl(kinetics, bounds)\n log.info(mods)\n\n # Verify that we detect m6A mods at 14982 and 14991\n m6AMods = [x for x in mods if x['modification'] == 'm6A' and x['tpl'] in (88, 90) ]\n self.assertEqual(len(m6AMods), 2)\n m4CMods = [x for x in mods if x['modification'] == 'm4C' and x['tpl'] in (59,) ]\n self.assertEqual(len(m4CMods), 1)\n for x in mods:\n if x['strand'] == 0:\n self.assertEqual(x['tpl'], 88)\n else:\n self.assertTrue(x['tpl'] in [59,90])\n\n@unittest.skipUnless(os.path.isdir(data_dir), \"Missing test data directory\")\nclass TestBam(_TestBase, unittest.TestCase):\n def getAlignments (self):\n return os.path.join(data_dir, \"Hpyl_1_5000.bam\")\n\n\n@unittest.skipUnless(os.path.isdir(data_dir), \"Missing test data directory\")\nclass TestDataset (TestBam, unittest.TestCase):\n def getAlignments (self):\n return os.path.join(data_dir, \"Hpyl_1_5000.xml\")\n\n\n@unittest.skipUnless(os.path.isdir(data_dir), \"Missing test data directory\")\nclass TestSplitDataset(_TestBase, unittest.TestCase):\n def getAlignments (self):\n return os.path.join(data_dir, \"Hpyl_1_5000_split.xml\")\n\n\n@unittest.skipUnless(os.path.isdir(data_dir), \"Missing test data directory\")\nclass TestChunkedDataset(_TestBase, unittest.TestCase):\n\n def getAlignments(self):\n return os.path.join(data_dir, \"Hpyl_1_5000_chunk.xml\")\n\n @unittest.skip\n def test_private_api(self):\n pass\n\n def test_small_decode(self):\n start = 985\n end = 1065\n REF_GROUP_ID = \"gi|12057207|gb|AE001439.1|\"\n referenceWindow = ReferenceWindow(0, REF_GROUP_ID, start, end)\n bounds = (start, end)\n\n self.kw._prepForReferenceWindow(referenceWindow)\n kinetics = self.kw._summarizeReferenceRegion(bounds, False, True)\n mods = self.kw._decodePositiveControl(kinetics, bounds)\n self.assertEqual(len(mods), 4)\n\n\n@unittest.skipUnless(os.path.isdir(data_dir), \"Missing test data directory\")\nclass TestNonStochastic(TestBam): #_TestBase, unittest.TestCase):\n # XXX force this down to trigger RNG\n MAX_ALIGNMENTS = 150\n\n @unittest.skip\n def test_private_api(self):\n pass\n\n def test_small_decode(self):\n start = 50\n end = 100\n REF_GROUP_ID = \"gi|12057207|gb|AE001439.1|\"\n referenceWindow = ReferenceWindow(0, REF_GROUP_ID, start, end)\n bounds = (start, end)\n self.kw._prepForReferenceWindow(referenceWindow)\n kinetics = self.kw._summarizeReferenceRegion(bounds, False, True)\n # XXX note that this is very dependent on the exact order of reads\n # found by readsInRange(), which may be altered by changes to the\n # implementation of the dataset API. It should be immune to stochastic\n # effects, however.\n self.assertEqual(\"%.5f\" % kinetics[0]['ipdRatio'], \"1.06460\")\n mods = self.kw._decodePositiveControl(kinetics, bounds)\n self.assertEqual(len(mods), 3)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_inputs.py","file_name":"test_inputs.py","file_ext":"py","file_size_in_byte":7486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"592893020","text":"import os\nfrom core import execute\nfrom core import utils\n\nclass SSLScan(object):\n \"\"\"docstring for SslScan\"\"\"\n def __init__(self, options):\n utils.print_banner(\"SSL Scanning\")\n utils.make_directory(options['env']['WORKSPACE'] + '/ssl/')\n self.options = options\n self.initial()\n\n\n def initial(self):\n self.testssl()\n\n def testssl(self):\n utils.print_good('Starting testssl')\n cmd = 'bash $PLUGINS_PATH/testssl.sh/testssl.sh --parallel --logfile $WORKSPACE/ssl/$TARGET-testssl.txt $TARGET'\n cmd = utils.replace_argument(self.options, cmd)\n utils.print_info(\"Execute: {0} \".format(cmd))\n execute.run(cmd)\n utils.check_output(self.options, '$WORKSPACE/ssl/$TARGET-testssl.txt')\n","sub_path":"modules/sslscan.py","file_name":"sslscan.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"136015410","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\nimport os\n\nrelease_info = {}\ninfopath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n \"modopt\", \"info.py\"))\nwith open(infopath) as open_file:\n exec(open_file.read(), release_info)\n\nsetup(\n name='modopt',\n author='sfarrens',\n author_email='samuel.farrens@cea.fr',\n version=release_info[\"__version__\"],\n url='https://github.com/cea-cosmic/ModOpt',\n download_url='https://github.com/cea-cosmic/ModOpt',\n packages=find_packages(),\n install_requires=['numpy>=1.16.4', 'scipy>=1.3.0', 'progressbar2>=3.34.3'],\n license='MIT',\n description='Modular Optimisation tools for soliving inverse problems.',\n long_description=release_info[\"__about__\"],\n setup_requires=['pytest-runner', ],\n tests_require=['pytest>=5.0.1', 'pytest-cov>=2.7.1', 'pytest-pep8'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"174565201","text":"#!/usr/bin/env python3\n\ndef get_desc():\n return 5, 'Day 5: A Maze of Twisty Trampolines, All Alike'\n\n\ndef calc(log, values, mode):\n ip = 0\n steps = 0\n values = [int(x) for x in values]\n\n if mode == 0:\n while ip < len(values):\n next_ip = values[ip] + ip\n values[ip] += 1\n ip = next_ip\n steps += 1\n else:\n while ip < len(values):\n next_ip = values[ip] + ip\n if values[ip] >= 3:\n values[ip] -= 1\n else:\n values[ip] += 1\n ip = next_ip\n steps += 1\n\n return steps\n\n\ndef test(log):\n values = [\n \"0\",\n \"3\",\n \"0\",\n \"1\",\n \"-3\",\n ]\n\n if calc(log, values, 0) == 5:\n if calc(log, values, 1) == 10:\n return True\n else:\n return False\n else:\n return False\n\n\ndef run(log, values):\n log.show(calc(log, values, 0))\n log.show(calc(log, values, 1))\n","sub_path":"2017/Helpers/day_05.py","file_name":"day_05.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"150585945","text":"\nn=eval(input())\nnames=[]\nfor i in range(n):\n names.append(input())\nm=eval(input())\nrec=[]\nfor i in range(m):\n rec.append(input())\n if rec[len(rec)-1] not in names:\n print('WRONG')\n elif rec.count(rec[len(rec)-1])>1:\n print('REPEAT')\n else:\n print('OK')","sub_path":"Code/CodeRecords/2391/60590/310743.py","file_name":"310743.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"527878101","text":"import random\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Function to show an image from a tensor\ndef ShowImage(image, title='Figure'):\n npimage = image.detach().numpy()\n\n # Transpose for the correct shape\n if npimage.shape[0] == 3:\n npimage = np.transpose(npimage, axes=(1, 2, 0))\n \n if npimage.shape[0] == 2:\n npimage = np.transpose(npimage)\n \n plt.imshow(npimage)\n plt.title(title)\n plt.show()\n\n# Load rendered images from .png files in folders \"/data/Testset/GroundTruth/\" and \"/data/Testset/Input/\"\ntestset = torchvision.datasets.ImageFolder(root='./data/Testset/', transform=torchvision.transforms.ToTensor())\ntestloader = torch.utils.data.DataLoader(testset, batch_size=3, shuffle=False, num_workers=0)\n\n# Show the images by using an iterator\niterator = iter(testloader)\n\nsplats = iterator.next()\n#ShowImage(splats[0][0], 'Splats Color')\n#ShowImage(splats[0][1], 'Splats Depth')\n#ShowImage(splats[0][2], 'Splats Normals')\n\nsparsePoints = iterator.next()\n#ShowImage(sparsePoints[0][0], 'Sparse Points Color')\n#ShowImage(sparsePoints[0][1], 'Sparse Points Depth')\n#ShowImage(sparsePoints[0][2], 'Sparse Points Normals')\n\n# Load and evaluate neural network\nmodel = torch.jit.load('./data/Pytorch_Jit_Model_Lucy.pt')\nmodel = model.cuda()\nprint('Loaded model')\n\n# Evaluate model with input from files\n# Input: 1 Channel Color (R, G or B), 1 Channel Depth\n# Output: 1 Channel Color, 1 Channel Depth, 1 Channel Visibility Mask\ninput = torch.zeros(1, 2, 1024, 1024)\n\n# First channel is color\ninput[0][0] = sparsePoints[0][0][0]\n\n# Second channel is depth (average from RGB values)\ninput[0][1] = torch.mean(sparsePoints[0][1], 0)\n\n# Show input images\nShowImage(input[0][0], 'Input Color R')\nShowImage(input[0][1], 'Input Depth')\n\n# Evaluate model on gpu\ninput = input.cuda()\noutput = model(input)\nprint('Evaluated model')\n\n# Show output image from the neural network\noutput = output.cpu()\nShowImage(output[0][0], 'Output Color R')\nShowImage(output[0][1], 'Output Depth')\nShowImage(output[0][2], 'Output Visibility Mask')","sub_path":"NeuralNetwork/NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"279886279","text":"#If the bill was $150.00, split between 5 people, with 12% tip. \n#Each person should pay (150.00 / 5) * 1.12 = 33.6\n#Format the result to 2 decimal places = 33.60\n#Tip: There are 2 ways to round a number. You might have to do some Googling to solve this.💪\n#HINT 1: https://www.google.com/search?q=how+to+round+number+to+2+decimal+places+python&oq=how+to+round+number+to+2+decimal\n#HINT 2: https://www.kite.com/python/answers/how-to-limit-a-float-to-two-decimal-places-in-python\nbill_amt = int(input(\"Enter Bill Amount: \"))\nsplit_bw = int(input(\"Enter the total number of people to split between: \"))\ntip_percentage = (int(input(\"Enter Tip Percentage: \"))/100)\nnew_bill = (bill_amt * tip_percentage) + bill_amt\n\nresult = new_bill / split_bw\nprint(\"{:.2f}\".format(result))\n","sub_path":"Day_2/Project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"107317102","text":"__author__ = 'zhangxulong'\n# use pca for dimension reduction\nfrom numpy import *\nimport numpy\n\n\ndef pca(mata, length):\n meanVal = mean(mata, axis=0)\n stdVal = std(mata)\n rmmeanMat = (mata - meanVal) / stdVal\n covMat = cov(rmmeanMat, rowvar=0)\n eigval, eigvec = linalg.eig(covMat)\n maxnum = argsort(-eigval, axis=0) # sort descend\n tfMat = eigvec[:, maxnum[0:length]] # top length\n finalData = dot(rmmeanMat, tfMat) #\n recoMat = finalData * tfMat.T * stdVal + meanVal\n return finalData, recoMat\n\n\ndef test():\n return 0\n\n\ntest()\n","sub_path":"SID-Ratanpara-realize/dimension_reduction.py","file_name":"dimension_reduction.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"73300286","text":"# _*_ coding:utf-8 _*_\r\n\r\nimport socket\r\nimport time\r\nfrom kodec import msg_type_pb2, logical_pb2\r\nfrom public import IPConver\r\nimport struct\r\nimport random\r\n\r\n# 公开课学生上台\r\nclass PublicStageupClass(object):\r\n def __init__(self, userId):\r\n self.userId = userId\r\n self.stage_id = \"\"\r\n\r\n # 公开课连麦逻辑函数\r\n def publicClassStageupLogic(self, recData):\r\n if recData.result_frame.code == 0:\r\n if recData.head_frame.msg_type == msg_type_pb2.PUBLIC_CLASS_START_STAGE_UP_BROADCAST:\r\n start_handsup = recData.logical_frame.public_class_start_stage_up_broadcast\r\n print(\"收到连麦上台广播:\", start_handsup.stage_id)\r\n self.stage_id = start_handsup.stage_id\r\n return 1042\r\n elif recData.head_frame.msg_type == msg_type_pb2.PUBLIC_CLASS_HANDS_UP_RES:\r\n print(\"学生已举手成功!\")\r\n return 1051\r\n elif recData.head_frame.msg_type == msg_type_pb2.PUBLIC_CLASS_STOP_STAGE_UP_BROADCAST:\r\n print(\"连麦结束!\")\r\n return 1102\r\n elif recData.head_frame.msg_type == msg_type_pb2.PUBLIC_CLASS_PICK_HANDS_UP_USER_TO_STAGE_P2P:\r\n print(\"收到老师指定上台\")\r\n p2p_user_stageup = recData.logical_frame.public_class_pick_hands_up_user_to_stage_p2p\r\n self.teacher_id = p2p_user_stageup.teacher_id\r\n self.channel_teacher_id = p2p_user_stageup.channel_teacher_id\r\n return 1072\r\n elif recData.head_frame.msg_type == msg_type_pb2.PUBLIC_CLASS_USER_STAGE_UP_RES:\r\n print(\"学生上台成功!\")\r\n return 1081\r\n elif recData.head_frame.msg_type == msg_type_pb2.PUBLIC_CLASS_USER_STAGE_UP_BROADCAST:\r\n print(\"收到学生上台广播\")\r\n user_stage_up_broadcast = recData.logical_frame.public_class_user_stage_up_broadcast\r\n print(\"本次Stage_id:\", user_stage_up_broadcast.stage_id)\r\n print(\"用户频道id:\", user_stage_up_broadcast.channel_user_id)\r\n print(\"用户信息:\", user_stage_up_broadcast.user_info)\r\n return 1082\r\n elif recData.head_frame.msg_type == msg_type_pb2.PUBLIC_CLASS_LET_USER_STAGE_DOWN_BROADCAST:\r\n user_stage_down_broadcast = recData.logical_frame.public_class_let_user_stage_down_broadcast\r\n if user_stage_down_broadcast.user_id == self.userId:\r\n print(\"老师让我下台:\", user_stage_down_broadcast.user_id)\r\n return 1092\r\n else:\r\n pass\r\n\r\n # 公开课学生举手封包函数\r\n def pack_publicClassHandsup(self, token):\r\n reqPack = logical_pb2.RequestPackage()\r\n reqCommFrame = reqPack.head_frame\r\n reqCommFrame.msg_type = msg_type_pb2.PUBLIC_CLASS_HANDS_UP_REQ\r\n reqCommFrame.msg_no = 'wk_tt_' + str(random.randint(1, 999999)) # 采用随机���\r\n reqCommFrame.msg_from_user_id = self.userId\r\n reqCommFrame.msg_to_user_id = \"\"\r\n reqCommFrame.device_type = 4 ## 设备类型,0 pc 1 ios 2 android 3 手机网页 4 pc网页\r\n reqCommFrame.version = 102000017\r\n # reqCommFrame.timestamp = int(time.time() * 1000)\r\n reqCommFrame.ip = IPConver.ip2int(socket.gethostbyname(socket.gethostname()))\r\n reqCommFrame.client_info.os_name = \"windows\"\r\n reqCommFrame.client_info.client_version = \"wkai2133\"\r\n reqCommFrame.extended_fields['from'] = 'multiuser_test'\r\n\r\n # 构造上报奖励请求逻辑帧\r\n req_message = logical_pb2.RequestMessage()\r\n req_message.token = token\r\n reqBody = req_message.public_class_hands_up_req\r\n reqBody.stage_id = self.stage_id\r\n\r\n # 对请求数据包进行序列化\r\n reqPack.logical_frame = req_message.SerializeToString()\r\n handsupMessage = reqPack.SerializeToString()\r\n\r\n Msg_flag = int('0x0000', 16)\r\n # 计算请求封包的长度\r\n Msg_len = reqPack.ByteSize() + 2\r\n finalMessage = struct.pack('!IH', Msg_len, Msg_flag) + handsupMessage\r\n return finalMessage\r\n\r\n # 公开课学生上台封包函数\r\n def pack_publicUserStageup(self, token):\r\n reqPack = logical_pb2.RequestPackage()\r\n reqCommFrame = reqPack.head_frame\r\n reqCommFrame.msg_type = msg_type_pb2.PUBLIC_CLASS_USER_STAGE_UP_REQ\r\n reqCommFrame.msg_no = 'wk_tt_' + str(random.randint(1, 999999)) # 采用随机数\r\n reqCommFrame.msg_from_user_id = self.userId\r\n reqCommFrame.msg_to_user_id = \"\"\r\n reqCommFrame.device_type = 4 ## 设备类型,0 pc 1 ios 2 android 3 手机网页 4 pc网页\r\n reqCommFrame.version = 102000017\r\n # reqCommFrame.timestamp = int(time.time() * 1000)\r\n reqCommFrame.ip = IPConver.ip2int(socket.gethostbyname(socket.gethostname()))\r\n reqCommFrame.client_info.os_name = \"windows\"\r\n reqCommFrame.client_info.client_version = \"wkai2133\"\r\n reqCommFrame.extended_fields['from'] = 'multiuser_test'\r\n\r\n # 构造上报奖励请求逻辑帧\r\n req_message = logical_pb2.RequestMessage()\r\n req_message.token = token\r\n reqBody = req_message.public_class_user_stage_up_req\r\n reqBody.stage_id = self.stage_id\r\n reqBody.channel_user_id = random.randint(12345678,87654321)\r\n # reqBody.channel_user_id = self.userId\r\n reqBody.teacher_id = self.teacher_id\r\n\r\n # 对请求数据包进行序列化\r\n reqPack.logical_frame = req_message.SerializeToString()\r\n stageupMessage = reqPack.SerializeToString()\r\n\r\n Msg_flag = int('0x0000', 16)\r\n # 计算请求封包的长度\r\n Msg_len = reqPack.ByteSize() + 2\r\n finalMessage = struct.pack('!IH', Msg_len, Msg_flag) + stageupMessage\r\n return finalMessage","sub_path":"liveTest/simulateSever/liveServiceMonitor/logical/PublicStageupClass.py","file_name":"PublicStageupClass.py","file_ext":"py","file_size_in_byte":5950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"343532337","text":"import matplotlib.pyplot as plt\nfrom sklearn.datasets import load_digits\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\n\ndigits = load_digits()\n\nfig, axes = plt.subplots(2, 5, figsize=(9, 4),\n subplot_kw={'xticks':(), 'yticks': ()})\nfor ax, img in zip(axes.ravel(), digits.images):\n ax.imshow(img)\n\nplt.tight_layout()\n\npca = PCA(n_components=2)\npca.fit(digits.data)\n# 将 digits 数据变换到前两个主成分的方向上\ndigits_pca = pca.transform(digits.data)\ncolors = [\"#476A2A\", \"#7851B8\", \"#BD3430\", \"#4A2D4E\", \"#875525\",\n \"#A83683\", \"#4E655E\", \"#853541\", \"#3A3120\", \"#535D8E\"]\nplt.figure(figsize=(8, 7))\nplt.xlim(digits_pca[:, 0].min(), digits_pca[:, 0].max())\nplt.ylim(digits_pca[:, 1].min(), digits_pca[:, 1].max())\nfor i in range(len(digits.data)):\n # 将数据实际绘制成文本,而不是散点\n plt.text(digits_pca[i, 0], digits_pca[i, 1], str(digits.target[i]),\n color=colors[digits.target[i]], fontdict={'weight': 'bold', 'size': 9})\nplt.xlabel(\"First principal component\")\nplt.ylabel(\"Second principal component\")\nplt.tight_layout()\n\ntsne = TSNE(random_state=42)\n# 使用 fit_transform 而不是 fit,因为 t-SNE 没有 transform 方法\ndigits_tsne = tsne.fit_transform(digits.data)\n\nplt.figure(figsize=(8, 7))\nplt.xlim(digits_tsne[:, 0].min(), digits_tsne[:, 0].max() + 1)\nplt.ylim(digits_tsne[:, 1].min(), digits_tsne[:, 1].max() + 1)\nfor i in range(len(digits.data)):\n # 将数据实际绘制成文本,而不是散点\n plt.text(digits_tsne[i, 0], digits_tsne[i, 1], str(digits.target[i]),\n color=colors[digits.target[i]],\n fontdict={'weight': 'bold', 'size': 9})\nplt.xlabel(\"t-SNE feature 0\")\nplt.ylabel(\"t-SNE feature 1\")\n\nplt.tight_layout()\nplt.show()\n","sub_path":"src/PyUnsuprised/t-sne-digits.py","file_name":"t-sne-digits.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"353373926","text":"import socket\r\n\r\ndef encrypt(n,e,text):\r\n\tcipher = [(ord(c) ** e) % n for c in text]\r\n\tc2 = [str(c) for c in cipher]\r\n\tif len(text) == 1:\r\n\t\tc2 += str((ord(\" \") ** e) % n)\r\n\treturn ','.join(c2)\r\n\t\r\n\t\r\n\t\r\ns = socket.socket()\r\ns.connect((\"127.0.0.1\",8000))\r\ns.send(\"e and n\")\r\ne , n = s.recv(1024).split(\" \")\r\ne , n = (int(e),int(n))\r\ntext = \"\"\r\nwhile text != \"stop\":\r\n\ttext = raw_input('enter text: ')\r\n\tenc = encrypt(n,e,text)\r\n\ts.send(enc)\r\ns.close()\r\n\r\n\r\n","sub_path":"rsaClient.py","file_name":"rsaClient.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"511731744","text":"from kivy.app import App\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.properties import ObjectProperty\nfrom kivy.uix.popup import Popup\nimport string, random, sqlite3\n\nconn = sqlite3.connect(\"./Assets/db_productivity.sqlite3\")\nconn.execute('pragma foreign_keys=on')\nc = conn.cursor()\n\nclass Queries:\n\n @staticmethod\n def remove_fc_from_deck(fc_id):\n gd = App.get_running_app()\n c.execute(\"\"\"\n DELETE\n FROM `tbl_learning_map_fc_fc_decks`\n WHERE `fc_id` = '{fcid}'\n AND `fc_deck_id` = '{fdid}'\n \"\"\".format(fcid=fc_id, fdid=gd.glob_dict['fc_deck_id']))\n Navigation.page_nav(dest='empty')\n Navigation.page_nav(dest='flash_card_deck', orig='flash_card_deck')\n\n @staticmethod\n def get_fc_deck_data(fc_deck_id):\n c.execute(\"\"\"\n SELECT `fc_deck_name`,`fc_deck_excerpt`\n FROM `tbl_learning_flash_cards_decks`\n WHERE `fc_deck_id` = '{fdid}'\n \"\"\".format(fdid=fc_deck_id))\n fc_deck_data = c.fetchone()\n return fc_deck_data\n\n @staticmethod\n def get_fc_deck_list():\n c.execute(\"\"\"\n SELECT `fc_deck_id`,`fc_deck_name`,`fc_deck_excerpt`\n FROM `tbl_learning_flash_cards_decks`\n \"\"\")\n fc_deck_list = c.fetchall()\n return fc_deck_list\n\n @staticmethod\n def get_fc_decks_from_fc(fc_id):\n c.execute(\"\"\"\n SELECT `tbl_learning_flash_cards_decks`.`fc_deck_name`\n FROM `tbl_learning_flash_cards_decks`, `tbl_learning_map_fc_fc_decks`\n WHERE `tbl_learning_map_fc_fc_decks`.`fc_id` = '{fcid}'\n AND `tbl_learning_flash_cards_decks`.`fc_deck_id` = `tbl_learning_map_fc_fc_decks`.`fc_deck_id`\n ORDER BY `tbl_learning_flash_cards_decks`.`fc_deck_name`\n \"\"\".format(fcid=fc_id))\n fc_tags = c.fetchall()\n return fc_tags\n\n @staticmethod\n def get_fc_list_from_deck(fc_deck_id):\n c.execute(\"\"\"\n SELECT `tbl_learning_map_fc_fc_decks`.`fc_id`,`tbl_learning_flash_cards`.`fc_title`\n FROM `tbl_learning_map_fc_fc_decks`,`tbl_learning_flash_cards`\n WHERE `tbl_learning_map_fc_fc_decks`.`fc_deck_id` = '{fdid}'\n AND `tbl_learning_map_fc_fc_decks`.`fc_id` = `tbl_learning_flash_cards`.`fc_id`\n \"\"\".format(fdid=fc_deck_id))\n fc_list = c.fetchall()\n return fc_list\n\n @staticmethod\n def get_fc_data(fc_id):\n c.execute(\"\"\"\n SELECT `fc_title`,`fc_front`,`fc_back`,`fc_difficulty`\n FROM `tbl_learning_flash_cards`\n WHERE `fc_id` = '{fcid}'\n \"\"\".format(fcid=fc_id))\n fc_data = c.fetchone()\n return fc_data\n\n @staticmethod\n def get_fc_tag_list():\n c.execute(\"\"\"\n SELECT `fc_tag_id`,`fc_tag_name`\n FROM `tbl_learning_flash_cards_tags`\n \"\"\")\n fc_tag_list = c.fetchall()\n return fc_tag_list\n\n @staticmethod\n def get_fc_tags_from_fc(fc_id):\n c.execute(\"\"\"\n SELECT `tbl_learning_flash_cards_tags`.`fc_tag_name`\n FROM `tbl_learning_flash_cards_tags`, `tbl_learning_map_fc_fc_tags`\n WHERE `tbl_learning_map_fc_fc_tags`.`fc_id` = '{fcid}'\n AND `tbl_learning_flash_cards_tags`.`fc_tag_id` = `tbl_learning_map_fc_fc_tags`.`fc_tag_id`\n ORDER BY `tbl_learning_flash_cards_tags`.`fc_tag_name`\n \"\"\".format(fcid=fc_id))\n fc_tags = c.fetchall()\n return fc_tags\n\n @staticmethod\n def get_fc_tag_data(fc_tag_id):\n c.execute(\"\"\"\n SELECT `fc_tag_name`,`fc_tag_excerpt`\n FROM `tbl_learning_flash_cards_tags`\n WHERE `fc_tag_name` = '{fcid}'\n \"\"\".format(fcid=fc_tag_id))\n fc_tag_data = c.fetchone()\n return fc_tag_data\n\n @staticmethod\n def get_book_data(book_id):\n c.execute(\"\"\"\n SELECT `tbl_books`.`book_id`,`tbl_books`.`book_title`,`tbl_books`.`isbn`,`tbl_books`.`publisher`,\n `tbl_books`.`year_published`,`tbl_books`.`original_year_published`,\n `tbl_books`.`primary_genre`,`tbl_books`.`secondary_genre`,`tbl_books`.`cover_front`,\n `tbl_books`.`cover_back`,`tbl_books`.`read_status`,`tbl_books`.`rating`,`tbl_books`.`notes`,\n `tbl_books`.`textbook`,`tbl_books`.`timestamp`,\n `tbl_book_publishers`.`publisher_name`\n FROM `tbl_books`,`tbl_book_publishers`\n WHERE `tbl_books`.`book_id` = '{bid}'\n \"\"\".format(bid=book_id))\n book_data = c.fetchone()\n return book_data\n\n @staticmethod\n def get_book_author_data(book_id):\n c.execute(\"\"\"\n SELECT `tbl_book_authors`.`author_id`,`tbl_book_authors`.`author_first`,\n `tbl_book_authors`.`author_middle`,`tbl_book_authors`.`author_last`\n FROM `tbl_book_authors`,`tbl_map_author_book`\n WHERE `tbl_map_author_book`.`tmab_book_id` = '{bid}'\n AND `tbl_book_authors`.`author_id` = `tbl_map_author_book`.`tmab_author_id`\n \"\"\".format(bid=book_id))\n book_author = c.fetchall()\n return book_author\n\n @staticmethod\n def get_book_list():\n c.execute(\"\"\"\n SELECT `book_id`,`book_title`,`primary_genre`,`read_status`\n FROM `tbl_books`\n ORDER BY `tbl_books`.`book_title`\n \"\"\")\n book_list = c.fetchall()\n return book_list\n\n @staticmethod\n def get_book_user_notes(book_id):\n c.execute(\"\"\"\n SELECT `note_id`,`tbn_note_title`,`tbn_note_excerpt`,`tbn_note_full`\n FROM `tbl_book_notes`\n WHERE `tbn_book_id` = '{bid}'\n \"\"\".format(bid=book_id))\n book_user_notes = c.fetchall()\n return book_user_notes\n\n @staticmethod\n def get_book_note_data(note_id):\n c.execute(\"\"\"\n SELECT `tbn_note_title`,`tbn_note_excerpt`,`tbn_note_full`\n FROM `tbl_book_notes`\n WHERE `note_id` = '{nid}'\n \"\"\".format(nid=note_id))\n return c.fetchone()\n\n @staticmethod\n def get_author_list():\n c.execute(\"\"\"\n SELECT `author_id`,`author_first`, `author_middle`,`author_last`\n FROM `tbl_book_authors`\n ORDER BY `author_last`\n \"\"\")\n author_list = c.fetchall()\n return author_list\n\n @staticmethod\n def get_author_data(author_id):\n c.execute(\"\"\"\n SELECT `author_id`,`author_first`, `author_middle`,`author_last`\n FROM `tbl_book_authors`\n WHERE `author_id` = '{aid}'\n ORDER BY `author_last`\n \"\"\".format(aid=author_id))\n author_list = c.fetchall()\n return author_list\n\n @staticmethod\n def get_publisher_list():\n c.execute(\"\"\"\n SELECT `publisher_id`,`publisher_name`\n FROM `tbl_book_publishers`\n ORDER BY `publisher_name`\n \"\"\")\n publisher_list = c.fetchall()\n return publisher_list\n\n @staticmethod\n def get_publisher_data(publisher_name):\n c.execute(\"\"\"\n SELECT `publisher_name`\n FROM `tbl_book_publishers`\n WHERE `publisher_name` = '{pn}'\n \"\"\".format(pn=publisher_name))\n publisher_data = c.fetchone()\n return publisher_data\n\n @staticmethod\n def get_publisher_data_by_id(publisher_id):\n c.execute(\"\"\"\n SELECT `publisher_name`\n FROM `tbl_book_publishers`\n WHERE `publisher_id` = '{pid}'\n \"\"\".format(pid=publisher_id))\n publisher_data = c.fetchone()\n return publisher_data\nclass Navigation:\n\n @staticmethod\n def page_nav(**kwargs):\n gd = App.get_running_app()\n if 'dest' in kwargs and kwargs['dest'] == 'reset':\n gd.glob_dict['reset_page'] = kwargs['orig'] if 'orig' in kwargs else 'home'\n Navigation.page_nav(dest='empty')\n elif 'dest' in kwargs:\n if kwargs['dest'] == 'prev_page':\n dest = gd.glob_dict['orig']\n else:\n dest = kwargs['dest']\n gd.glob_dict['orig'] = gd.glob_dict['cur_page']\n gd.glob_dict['cur_page'] = dest\n else:\n dest = 'home'\n gd.glob_dict['edit'] = kwargs['edit'] if 'edit' in kwargs else False\n if 'book_id' in kwargs:\n gd.glob_dict['book_id'] = kwargs['book_id']\n if 'fc_deck_id' in kwargs:\n gd.glob_dict['fc_deck_id'] = kwargs['fc_deck_id']\n if 'note_id' in kwargs:\n gd.glob_dict['note_id'] = kwargs['note_id']\n if 'fc_id' in kwargs:\n gd.glob_dict['fc_id'] = kwargs['fc_id']\n gd.sm.current = dest\n\n @staticmethod\n def book_view_pressed(book_id, orig):\n gd = App.get_running_app()\n gd.glob_dict['book_id'] = book_id\n gd.glob_dict['orig'] = orig\n gd.sm.current = 'empty'\n gd.sm.current = 'book'\n\n @staticmethod\n def edit_book_pressed(book_id, orig):\n gd = App.get_running_app()\n gd.glob_dict['book_id'] = book_id\n gd.glob_dict['orig'] = orig\n gd.glob_dict['edit'] = True\n gd.sm.current = 'new_book'\n\n @staticmethod\n def book_note_pressed(note_id):\n gd = App.get_running_app()\n gd.glob_dict['edit'] = False\n gd.glob_dict['note_id'] = note_id\n popup = BookNotePopup()\n popup.open()\n\n @staticmethod\n def confirm_flash_card_deck_delete(fc_deck_id):\n gd = App.get_running_app()\n gd.glob_dict['fc_deck_id'] = fc_deck_id\n popup = DeleteFlashCardDeckConfirmationPopup()\n popup.open()\n\n @staticmethod\n def confirm_flash_card_delete(fc_id):\n gd = App.get_running_app()\n gd.glob_dict['fc_id'] = fc_id\n popup = DeleteFlashCardConfirmationPopup()\n popup.open()\n\n @staticmethod\n def edit_book_note_pressed(note_id, orig):\n gd = App.get_running_app()\n gd.glob_dict['note_id'] = note_id\n gd.glob_dict['orig'] = orig\n gd.glob_dict['edit'] = True\n gd.sm.current = 'new_book_note'\n\n @staticmethod\n def edit_book_author_pressed(author_id):\n gd = App.get_running_app()\n gd.glob_dict['author_id'] = author_id\n gd.glob_dict['edit'] = True\n gd.sm.current = 'new_book_author'\n\n\nclass DeleteFlashCardDeckConfirmationPopup(Popup):\n\n def on_parent(self, widget, parent):\n gd = App.get_running_app()\n self.fc_deck_id = gd.glob_dict['fc_deck_id']\n c.execute(\"\"\"\n SELECT `fc_deck_name`\n FROM `tbl_learning_flash_cards_decks`\n WHERE `fc_deck_id` = '{fdid}'\n \"\"\".format(fdid=self.fc_deck_id))\n try:\n self.fc_deck_name = c.fetchone()[0]\n except:\n self.gd = App.get_running_app()\n self.gd.sm.current = 'flash_cards'\n\n def on_confirm(self):\n gd = App.get_running_app()\n c.execute(\"\"\"\n DELETE\n FROM `tbl_learning_flash_cards_decks`\n WHERE `fc_deck_id` = '{fdid}'\n \"\"\".format(fdid=self.fc_deck_id))\n conn.commit()\n try:\n c.execute(\"\"\"\n SELECT `fc_deck_id`\n FROM `tbl_learning_flash_cards_decks`\n \"\"\")\n self.fc_deck_id = c.fetchone()[0]\n self.gd.glob_dict['fc_deck_id'] = self.fc_deck_id\n except:\n pass\n gd.glob_dict['edit'] = False\n if gd.glob_dict['cur_page'] != 'flash_card_deck':\n Navigation.page_nav(dest='flash_cards')\n Navigation.page_nav(dest=gd.glob_dict['cur_page'], orig=gd.glob_dict['orig'])\n self.dismiss()\n\n def on_close(self):\n self.dismiss()\n\n\nclass DeleteFlashCardConfirmationPopup(Popup):\n\n def on_parent(self, widget, parent):\n gd = App.get_running_app()\n self.fc_id = gd.glob_dict['fc_id']\n c.execute(\"\"\"\n SELECT `fc_title`\n FROM `tbl_learning_flash_cards`\n WHERE `fc_id` = '{fcid}'\n \"\"\".format(fcid=self.fc_id))\n try:\n self.fc_title = c.fetchone()[0]\n except:\n Navigation.page_nav(dest='flash_card_deck', orig='flash_card_deck', edit=False)\n\n def on_confirm(self):\n gd = App.get_running_app()\n c.execute(\"\"\"\n DELETE\n FROM `tbl_learning_flash_cards`\n WHERE `fc_id` = '{fcid}'\n \"\"\".format(fcid=self.fc_id))\n conn.commit()\n if gd.glob_dict['cur_page'] != 'flash_card_deck':\n Navigation.page_nav(dest='flash_cards', edit=False)\n Navigation.page_nav(dest='empty')\n Navigation.page_nav(dest=gd.glob_dict['cur_page'], orig=gd.glob_dict['orig'], edit=False)\n self.dismiss()\n\n def on_close(self):\n self.dismiss()\nclass BookNotePopup(Popup):\n\n def on_parent(self, widget, parent):\n gd = App.get_running_app()\n self.note_id = gd.glob_dict['note_id']\n note_data = Queries.get_book_note_data((self.note_id))\n self.note_title = note_data[0] if note_data[0] else \"\"\n self.note_excerpt = note_data[1] if note_data[1] else \"\"\n self.note_full = note_data[2] if note_data[2] else \"\"\n\n def on_close(self):\n self.dismiss()\nclass MiscFuns:\n\n def get_id(n):\n new_id = ''.join(random.SystemRandom().choice(string.digits) for _ in range(n))\n return new_id\nclass LoadDialog(FloatLayout):\n load = ObjectProperty(None)\n cancel = ObjectProperty(None)","sub_path":"lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":14493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"136186980","text":"#-*- coding:utf-8 -*-\n\nfrom sys import getsizeof\n\n#http://blog.csdn.net/liyjupc/article/details/52679528\n#[TODO]位运算\n#位运算优势:\n# 在计算机中所有数据都是以二进制的形式储存的。\n# 位运算其实就是直接对在内存中的二进制数据进行操作,因此处理数据的速度非常快。\n#一. 位操作基础:\n# | 符号 | 描述 | 运算规则 |\n# -----------------------------------\n# | & | 与 | 两个位都为1时,结果才为1|\n# | | | 或 | 两个位都为0时,结果才为0|\n# | ^ | 异或 | 两个位相同为0,相异为1 |\n# | ~ | 取反 | 0变1,1变0 |\n# | << | 左移 |各二进位全部左移若干位,高位丢弃,低位补0|\n# | >> | 右移 |各二进位全部右移若干位,对无符号数,高位补0,有符号数,\n# 各编译器处理方法不一样,有的补符号位(算术右移),有的补0(逻辑右移)|\n# 注意以下几点:\n# 1. 在这6种操作符,只有~取反是单目操作符,其它5种都是双目操作符。\n# 2. 位操作只能用于整形数据,对float和double类型进行位操作会被编译器报错。\n# 3. 位操作符的运算优先级比较低,因为尽量使用括号来确保运算顺序,否则很可能会得到莫明其妙的结果。\n# 4. 另外位操作还有一些复合操作符,如&=、|=、 ^=、<<=、>>=。\n#二. 常用位操作小技巧\n# 1. 判断奇偶\n# 只要根据最未位是0还是1来决定,为0就是偶数,为1就是奇数。\n# 因此可以用if (a & 1 == 0)代替if (a % 2 == 0)来判断a是不是偶数。\n# 2. 交换两数\n# 一般的写法是:利用临时变量才处理。\n# 利用位运算:自身与自身亦或为0,与0异或不改变数据\n# a = (a ^ b)\n# b = (b ^ a) = (b ^ (a ^ b)) = a\n# a = a ^ b = (a ^ (b ^ (a ^ b))) = b\n# 3. 变换符号\n# 对二进制数,进行取反 + 1\n# 以通过下面的变换方法将-11变成11\n# 1111 0101(二进制) –取反-> 0000 1010(二进制) –加1-> 0000 1011(二进制)\n# 可以这样的将11变成-11\n# 0000 1011(二进制) –取反-> 0000 1010(二进制) –加1-> 1111 0101(二进制)\n# 代码:\n# (~a) + 1\n# 4. 求绝对值\n# 先对a >> 31位,则剩下的1位是符号位,1表示负数,0表示正数:\n# 负数进行取反+1\n# i = a >> 31\n# return ~a + 1 if i == 1 else a\n# 由于i的取值为:0 -1,a与0的亦或为自己,与-1的亦或,就是取反。再-(-1),或者-0\n# return (a ^ i) - 1\n#\n#三、位操作与空间压缩\n# 本文着重对筛素数法所使用的素数表进行优化来减小其空间占用。\n# 要压缩素数表的空间占用,可以使用位操作。\n# 在上面程序是用bool数组来作标记的,bool型数据占1个字节(8位),\n# 因此用位操作来压缩下空间占用将会使空间的占用减少八分之一。\n#\n#四、位操作的趣味应用\n# 1. 高低位交换\n# 给出一个16位的无符号整数。称这个二进制数的前8位为“高位”,后8位为“低位”。\n# 例如,数34520用二进制表示为:10000110 11011000\n# 将它的高低位进行交换,我们得到了一个新的二进制数:11011000 10000110\n# 由于x是无符号数,右移时,会高位补0,因此右移 00000000 10000110\n# 左移时,低位补0,因此左移 11011000 00000000\n# x >> 8 与 x << 8 就可以得到了将它的高低位进行交换,我们得到了一个新的二进制数:\n# 11011000 10000110\n#\n# 2. 高低位交换\n# 回顾下字符串的逆序,可以从字符串的首尾开始,依次交换两端的数据。\n# 在二进制逆序我们也可以用这种方法,但运用位操作的高低位交换来处理二进���逆序将会得到更简洁的方法。\n# 类似于归并排序的分组处理,可以通过下面4步得到16位数据的二进制逆序:\n# 第一步:每2位为一组,组内高低位交换\n#\n# 10 00 01 10 11 01 10 00\n# -->01 00 10 01 11 10 01 00\n#\n# 第二步:每4位为一组,组内高低位交换\n# 0100 1001 1110 0100\n# -->0001 0110 1011 0001\n#\n# 第三步:每8位为一组,组内高低位交换\n# 00010110 10110001\n# -->01100001 00011011\n#\n# 第四步:每16位为一组,组内高低位交换\n# 01100001 00011011\n# -->00011011 01100001\n# 对第一步,可以依次取出每2位作一组,再组内高低位交换,这样有点麻烦,\n# 一种非常有技巧的方法。\n# 先分别取10000110 11011000的奇数位和偶数位,空位以下划线表示。\n# 原 数 10000110 11011000\n# 奇数位 1_0_0_1_ 1_0_1_0_\n# 偶数位 _0_0_1_0 _1_1_0_0\n# 将下划线用0填充,可得\n# 原 数 10000110 11011000\n# 奇数位 10000010 10001000\n# 偶数位 00000100 01010000\n# 再将奇数位右移一位,偶数位左移一位,此时将这两个数据相与即可以达到奇偶位上数据交换的效果了。\n# 原 数 10000110 11011000\n# 奇数位右移 01000011 01101100\n# 偶数位左移 0000100 010100000\n# 相与得到 01001000 11100100\n# 取x的奇数位并将偶数位用0填充用代码实现就是x & 0xAAAA\n# 取x的偶数位并将奇数位用0填充用代码实现就是x & 0x5555 4位也是如此\n#\n# 3. 二进制中1的个数\n# 统计二进制中1的个数可以直接移位再判断,当然像《编程之美》书中用循环移位计数或先打一个表再计算都可以。\n# 本文详细讲解一种高效的方法。以34520为例,可以通过下面四步来计算其二进制中1的个数二进制中1的个数。\n# 第一步:每2位为一组,组内高低位相加 x & 0xAAAA 取x的奇数位并将偶数位用0填充用代码实现就是 x & 0x5555\n# 奇数位置右移动,那么奇偶的数据都在最右端了,相加就是结果\n# 第二步:每4位为一组,组内高低位相加\n# 第三步:每8位为一组,组内高低位相加\n# 第四步:每16位为一组,组内高低位相加\ndef isOld(num):\n \"\"\"\n 判断奇数\n >>> print(isOld(20))\n False\n \"\"\"\n if num & 1 == 1:\n return True\n else:\n return False\n\ndef exchangebit(numA, numB):\n \"\"\"\n 交换两个元素\n >>> numA = 10\n >>> numB = 20\n >>> numA,numB = exchangebit(numA, numB)\n >>> print(numA)\n 20\n >>> print(numB)\n 10\n \"\"\"\n numA = numA ^ numB\n numB = numB ^ numA\n numA = numA ^ numB\n return numA,numB\n\ndef exchangeSymbol(num):\n \"\"\"\n 转换符号\n >>> print(exchangeSymbol(10))\n -10\n >>> print(exchangeSymbol(-20))\n 20\n \"\"\"\n return ~num + 1\n\ndef equalAbs(num):\n \"\"\"\n 求绝对值:\n >>> print(equalAbs(-20))\n 20\n >>> print(equalAbs(10))\n 10\n >>> print(equalAbs(20))\n 20\n \"\"\"\n i = num >> 31\n return (num ^ i) - i\n # return exchangeSymbol(num) if i == -1 else num\n\ndef getPrime(num):\n \"\"\"\n 用筛法求素数的基本思想是:把从1开始的、某一范围内的正整数从小到大顺序排列, 1不是素数,\n 首先把它筛掉。剩下的数中选择最小的数是素数,然后去掉它的倍数。依次类推,直到筛子为空时结束。\n //在一个数指定位上置1\n int j = 0;\n j |= 1 << 10;\n printf(\"%d\\n\", j);\n\n //判断指定位上是0还是1\n int j = 1 << 10;\n if ((j & (1 << 10)) != 0)\n printf(\"指定位上为1\");\n else\n printf(\"指定位上为0\");\n\n >>> print(getPrime(40))\n [2, 3, 5, 7, 11, 13, 17, 23, 29, 31, 37]\n \"\"\"\n maxbitnum = num // 32 #位运算存储需要的元素数据\n maxelemnum = num // 3 #最终存储的元素\n\n flag = []\n primes = []\n for i in range(maxbitnum+1): #初始最终存储的元素\n flag.append(0)\n\n print(flag)\n i = 2\n while i < num:\n print(flag[i // 32]) #存储位置\n print(i % 32) #实际元素\n if not ((flag[i // 32] >> (i % 32)) & 1): #0 左移动2位,与1,的结果0,说明是素数\n primes.append(i)\n # print(i,\"是素数\")\n j = i\n while j < num:\n # print(\"\\n\")\n # print(\"====存储位置:\", j // 32) #存储位置\n # print(\"====存储位置的元素:\", flag[j // 32]) #存储位置\n # print(\"----实际元素:\", j % 32) #实际元素\n flag[j // 32] = (flag[j // 32] | (1 << (j % 32))) #在flag中标记不是素数了。\n j += i\n i = i + 1\n # print(\"====i====:\", i)\n return primes\n\n\n\ndef printBinary(a):\n i = getsizeof(a)\n while i >= 0:\n if ((a >> i) & 1) == 1:\n #右移i位,与1与,==1,说明是1\n print(\"1\", end = \"\")\n else:\n print(\"0\", end = \"\")\n if i == 8:\n print(\"0\", end = \" \")\n i = i - 1\n print(\"\\n\")\n\n\ndef exchangeHighAndLow(a):\n \"\"\"\n >>> print(exchangeHighAndLow(4))\n 00000000 00000100\n \"\"\"\n printBinary(a)\n\n a = (a >> 8) | (a << 8)\n printBinary(a)\n\n\ndef binaryrestore(a):\n \"\"\"\n 逆序,可以分组逆序\n >>> print(binaryrestore(4))\n 0100000000000000\n \"\"\"\n #0xAAAA a & 10101010 10101010 由于奇数位1,所以可求出a中的奇数位,偶数为0,与就是填充\n #0x5555 01010101 01010101\n printBinary(a)\n a = ((a & 0xAAAA) >> 1) | ((a & 0x5555) << 1)\n a = ((a & 0xCCCC) >> 2) | ((a & 0x3333) << 2)\n a = ((a & 0xF0F0) >> 4) | ((a & 0x0F0F) << 4)\n a = ((a & 0xFF00) >> 8) | ((a & 0x00FF) << 8)\n printBinary(a)\n\n\ndef binaryonenumber(a):\n \"\"\"\n 逆序,可以分组逆序\n >>> print(binaryrestore(4))\n 0100000000000000\n \"\"\"\n #0xAAAA a & 10101010 10101010 由于奇数位1,所以可求出a中的奇数位,偶数为0,与就是填充\n #0x5555 01010101 01010101\n printBinary(a)\n a = ((a & 0xAAAA) >> 1) + ((a & 0x5555))\n a = ((a & 0xCCCC) >> 2) + ((a & 0x3333))\n a = ((a & 0xF0F0) >> 4) + ((a & 0x0F0F))\n a = ((a & 0xFF00) >> 8) + ((a & 0x00FF))\n printBinary(a)\n\nif __name__ == '__main__':\n # getPrime(40)\n # exchangeHighAndLow(4)\n # print(binaryrestore(4))\n print(binaryonenumber(25))\n # import doctest\n # doctest.testmod()\n","sub_path":"code/python/工具箱/位运算/位运算.py","file_name":"位运算.py","file_ext":"py","file_size_in_byte":10973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"463921733","text":"#LA 20-\n#Create the grocery list in dictionary of name quantity and price.\n#keep updating the list till user stops.\n#if the name matches with previous name, only update quantity with price.\n#Print the list. Check if the items are within budget. Ask amount initially.\n#if not keep updating the quantity, and use negative numbers to reduce quantity\n#finally print qrocery amount and balance amount left\nfrom typing import Dict\nn = int(input(\"Enter number of vegetable you want to enter: \"))\nprint(\"Enter vegetable and price per each with space in between: potato 5\")\nd = {}\nfor _ in range(n):\n l = input(\"Enter the vegetable and price of it: \").split()\n d[l[0]] = int(l[1])\nprint(d)\na = int(input(\"Enter your budget: \"))\nrm = a\ngr = {}\nbag = {}\nwhile(True):\n print(\"Enter what you want to buy/replace and how much quantity\")\n l = input(\"Enter the vegetable and quantity of it: \").split()\n if l[0] in d:\n if int(l[1])>0 and rm>0:\n if l[0] in gr:\n gr[l[0]]+=int(l[1])*d[l[0]]\n bag[l[0]] += int(l[1])\n else:\n gr[l[0]] = int(l[1]) * d[l[0]]\n bag[l[0]] = int(l[1])\n else:\n if int(l[1])>0 and rm<0:\n print(\"Out of Budget, cannot Buy\")\n else:\n if l[0] not in bag:\n print(\"Item not in bag\")\n else:\n if bag[l[0]]>=abs(int(l[1])):\n bag[l[0]] += int(l[1])\n gr[l[0]] += int(l[1])*d[l[0]]\n else:\n print(\"You do not have the quantity of {0} in your bag\".format(l[0]))\n else:\n print(\"Item not in shop, cannot buy!!\")\n rm = a - sum([gr[x] for x in gr])\n print(\"Remaining balance: \",rm)\n print(gr,bag)\n b = input(\"Enter ~ to stop shopping else enter anything else to continue\")\n if b==\"~\" and rm>=0:\n break\n elif b==\"~\" and rm<0:\n print(\"Balance out of budget can't exit\")\nprint(\"Grocery list: \",gr,\"\\n Thanks for shopping!!\")\n","sub_path":"21_5_18/LA20.py","file_name":"LA20.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"163862294","text":"class mystack:\n def __init__(self):\n self.data = []\n\n def push(self, item):\n self.data.append(item)\n\n def pop(self):\n if len(self.data) != 0:\n return self.data.pop(-1)\n\n def peek(self):\n if len(self.data) != 0:\n return self.data[-1]\n\n def isEmpty(self):\n if len(self.data) == 0:\n return True\n return False\n\ns = mystack()\ns.push(3)\ns.push(5)\ns.push(7)\nprint(\"peek:\", s.peek())\nprint(\"first pop:\", s.pop())\nprint(\"second pop:\", s.pop())\nprint(s.isEmpty())\ns.pop()\nprint(s.isEmpty())\nprint(s.data)\n\n","sub_path":"Week2/2.2.py","file_name":"2.2.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"329862956","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 21 07:20:50 2019\r\n\r\n@author: WELCOME\r\n\"\"\"\r\n#negative indexing\r\ncars=[]\r\n\r\nprint(cars)\r\n\r\ncars=['tata', 'skoda', 'maruti', 'mahindra', 'kia']\r\n\r\nprint(cars[-2])\r\n\r\n#for loop\r\n\r\nStudents=['aaaa','bbbb','cccc']\r\n\r\nfor students in Students:\r\n print(students)\r\n \r\nfor studentss in Students:\r\n print(f\"{studentss.title()}, practice daily\")\r\n \r\n #working with number\r\n \r\nnum_value=list(range(1,10))\r\nprint(num_value)\r\n\r\nfor values in num_value:\r\n print(values)\r\n\r\n\r\nsquares=[]\r\nfor value in range(5,20):\r\n square=value**2\r\n squares.append(square)\r\n \r\nprint(squares)","sub_path":"Fourth and fifth day coding.py","file_name":"Fourth and fifth day coding.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"23310864","text":"#!/usr/bin/python3\n\n\n\"\"\"\nmethod that calculates the fewest number of\noperations needed to result in exactly n H\ncharacters in the file.\n\"\"\"\n\n\ndef minOperations(n):\n \"\"\"\n method that calculates the fewest\n number of operations needed to result\n in exactly n H characters in the file.\n \"\"\"\n if n <= 1:\n return 0\n count = 2\n chars = 2\n aux_copy = 1\n\n while chars < n:\n if n % chars == 0:\n aux_copy = chars\n count += 1\n chars += aux_copy\n count += 1\n return count\n","sub_path":"0x03-minimum_operations/0-minoperations.py","file_name":"0-minoperations.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"439878598","text":"\"\"\"Urls for subscriptions application.\"\"\"\n\nfrom django.urls import path\nfrom . import views\n\n\napp_name = 'subscriptions'\n\nurlpatterns = [\n path(\n 'posts/',\n views.NewsFeedListView.as_view(),\n name='news_feed'\n ),\n path(\n 'blogs/',\n views.SubscriptionListView.as_view(),\n name='index'\n ),\n path(\n 'posts//read',\n views.mark_as_read,\n name='mark_as_read'\n ),\n]\n","sub_path":"blog_ne_kid/subscriptions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"458230078","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport tensorflow as tf\nimport numpy as np\ntf.set_random_seed(777) \n\n\ndef min_max_scaler(data):\n numerator = data - np.min(data, 0)\n denominator = np.max(data, 0) - np.min(data, 0)\n return numerator / (denominator + 1e-7)\n\n\nxy = min_max_scaler(xy)\nprint(xy)\n\nx_data = xy[:, 0:-1]\ny_data = xy[:, [-1]]\n\nX = tf.placeholder(tf.float32, shape=[None, 4])\nY = tf.placeholder(tf.float32, shape=[None, 1])\n\nW = tf.Variable(tf.random_normal([4, 1]), name='weight')\nb = tf.Variable(tf.random_normal([1]), name='bias')\n\nhypothesis = tf.matmul(X, W) + b\n\ncost = tf.reduce_mean(tf.square(hypothesis - Y))\n\ntrain = tf.train.GradientDescentOptimizer(learning_rate=1e-5).minimize(cost)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for step in range(101):\n _, cost_val, hy_val = sess.run(\n [train, cost, hypothesis], feed_dict={X: x_data, Y: y_data}\n )\n print(step, \"Cost: \", cost_val, \"\\nPrediction:\\n\", hy_val)\n\n","sub_path":"Week3/DongWook/lab 7-2.py","file_name":"lab 7-2.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"177868585","text":"# encoding=utf-8\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nimport os\n\n# 对于第一个卷积层\nfilter_size1 = 5\nnum_filters1 = 6\n\n# 对于第二个卷积层\nfilter_size2 = 5\nnum_filters2 = 16\n\n# 全连接层\nfc1_size = 120\nfc2_size = 84\n\n# MNIST数据集为每一个维度为28个像素的图像\nimg_size = 28\nimg_shape = (img_size, img_size)\n\n# num_channels=1意味灰阶\nnum_channels = 1\n# 数字识别共10个类别\nnum_class = 10\n\n\ndef plot_images(images, cls_true, cls_pred=None):\n assert len(images) == len(cls_true) == 9\n\n fig, axes = plt.subplots(3, 3)\n fig.subplots_adjust(hspace=0.3, wspace=0.3)\n\n for i, ax in enumerate(axes.flat):\n ax.imshow(images[i].reshape(img_shape), cmap='binary')\n\n if cls_pred is None:\n xlabel = \"True: {0}\".format(cls_true[i])\n else:\n xlabel = \"True: {0}, Pred: {1}\".format(cls_true[i], cls_pred[i])\n\n ax.set_xlabel(xlabel)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.show()\n\n\ndef inference(input_tensor, train, regularizer):\n # 第一个卷积层。由于我们的卷积层的输入为28*28*1的原始MNIST图片像素,所以这个卷积层使用全0填充,所以输出为28*28*6的矩阵。\n with tf.variable_scope('layer1_conv1'):\n conv1_weights = tf.get_variable(\n \"weight\", [filter_size1, filter_size1, num_channels, num_filters1],\n initializer=tf.truncated_normal_initializer(stddev=0.1)\n )\n conv1_biases = tf.get_variable(\"bias\", [num_filters1], initializer=tf.constant_initializer(0.0))\n\n # 使用边长为5,深度为6的过滤器,过滤器移动的步长为1,且不使用全0填充。\n conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding=\"SAME\")\n relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))\n\n # 第一个池化层,此处选用最大池化层,池化层过滤器的边长为2,使用全0填充且移动的步长为2。\n with tf.variable_scope(\"layer2_pool1\"):\n pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # 第二个卷积层\n with tf.variable_scope('layer3_cov2'):\n conv2_weights = tf.get_variable(\n \"weight\", [filter_size2, filter_size2, num_filters1, num_filters2],\n initializer=tf.truncated_normal_initializer(stddev=0.1)\n )\n conv2_biases = tf.get_variable('bias', [num_filters2], initializer=tf.constant_initializer(0.0))\n conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='VALID')\n relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))\n\n # 第二个池化层\n with tf.variable_scope('layer4_pool2'):\n pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n\n pool_shape = pool2.get_shape().as_list()\n nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]\n reshaped = tf.reshape(pool2, [pool_shape[0], nodes])\n\n with tf.variable_scope('layer5_fc1'):\n fc1_weights = tf.get_variable('weight', [nodes, fc1_size],\n initializer=tf.truncated_normal_initializer(stddev=0.1))\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(fc1_weights))\n fc1_biases = tf.get_variable('bias', [fc1_size], initializer=tf.constant_initializer(0.1))\n\n fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)\n # 防止过拟合,加入Dropout层\n if train:\n fc1 = tf.nn.dropout(fc1, 0.5)\n\n with tf.variable_scope('layer6_fc2'):\n fc2_weights = tf.get_variable(\n 'weight', [fc1_size, fc2_size], initializer=tf.truncated_normal_initializer(stddev=0.1)\n )\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(fc2_weights))\n fc2_biases = tf.get_variable('bias', [fc2_size], initializer=tf.constant_initializer(0.1))\n fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases)\n if train:\n fc2 = tf.nn.dropout(fc2, 0.5)\n\n with tf.variable_scope('layer7_fc3'):\n fc3_weights = tf.get_variable(\n 'weights', [fc2_size, num_class], initializer=tf.truncated_normal_initializer(stddev=0.1))\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(fc3_weights))\n fc3_biases = tf.get_variable('bias', [num_class], initializer=tf.truncated_normal_initializer(stddev=0.1))\n logit = tf.matmul(fc2, fc3_weights) + fc3_biases\n\n return logit\n\n\nbatch_size = 100\nlearning_rate_base = 0.01\nlearning_rate_decay = 0.99\nregularization_rate = 0.0001\ntraining_step = 10000\nmoving_average_decay = 0.99\n\n\n\ndef train(mnist):\n with tf.name_scope('input'):\n x = tf.placeholder(tf.float32, [batch_size, img_size, img_size, num_channels], name='x_input')\n y_ = tf.placeholder(tf.float32, [None, num_class], name='y_input')\n\n\n with tf.name_scope('input_reshape'):\n image_shaped_input = tf.reshape(x, (batch_size, img_size, img_size, num_channels))\n tf.summary.image('input', image_shaped_input, 10)\n\n with tf.name_scope('prediction'):\n regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)\n y = inference(x, False, regularizer)\n global_step = tf.Variable(0, trainable=False)\n\n y_true_cls = tf.argmax(y_, axis=1)\n y_pred = tf.nn.softmax(y)\n y_pred_cls = tf.argmax(y_pred, axis=1)\n\n with tf.name_scope('moving_average'):\n # 定义损失函数、学习率、滑动平均操作以及训练过程\n variable_averages = tf.train.ExponentialMovingAverage(moving_average_decay, global_step)\n variables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n with tf.name_scope('cross_entropy_mean'):\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=y_true_cls)\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n tf.summary.scalar('cross_entropy_mean', cross_entropy_mean)\n\n with tf.name_scope('loss'):\n loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))\n tf.summary.scalar('loss', loss)\n\n with tf.name_scope('train_step'):\n learning_rate = tf.train.exponential_decay(\n learning_rate_base, global_step, mnist.train.num_examples / batch_size, learning_rate_decay,\n staircase=True)\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n\n with tf.control_dependencies([train_step, variables_averages_op]):\n train_op = tf.no_op(name='train')\n\n\n saver = tf.train.Saver()\n\n merged = tf.summary.merge_all()\n\n with tf.Session() as sess:\n\n summary_writer = tf.summary.FileWriter('logs', sess.graph)\n\n tf.global_variables_initializer().run()\n for i in range(training_step):\n xs, ys = mnist.train.next_batch(batch_size)\n\n reshaped_xs = np.reshape(xs, (batch_size, img_size, img_size, num_channels))\n\n _, loss_value, step, summary = sess.run([train_op, loss, global_step, merged],\n feed_dict={x: reshaped_xs, y_: ys})\n\n if step % 1000 == 0:\n with tf.name_scope('test_accuracy'):\n # 测试数据集进行测试\n num_test = len(mnist.test.images)\n cls_pred = np.zeros(shape=num_test, dtype=np.int)\n k = 0\n\n while k < num_test:\n j = min(k + batch_size, num_test)\n x_test = mnist.test.images[k:j, :]\n y_test = mnist.test.labels[k:j, :]\n reshaped_x_test = np.reshape(x_test, (batch_size, img_size, img_size, num_channels))\n\n cls_pred[k:j] = sess.run(y_pred_cls, feed_dict={x: reshaped_x_test, y_: y_test})\n k = j\n\n cls_true = np.argmax(mnist.test.labels, 1)\n\n correct = (cls_true == cls_pred)\n\n correct_sum = correct.sum()\n acc = float(correct_sum) / num_test\n tf.summary.scalar('accuracy', acc)\n\n summary_writer.add_summary(summary, i)\n\n print(\"%.4d 次迭代后的损失为:%f,测试数据集的准确率为%f,({%d}/{%d})\"\n % (step, loss_value, acc, correct_sum, num_test))\n\n saver.save(sess, os.path.join('mnist_projecter', \"model.ckpt\"), i)\n\n if step == training_step :\n incorrect = (correct == False)\n images = mnist.test.images[incorrect]\n cls_pred = cls_pred[incorrect]\n cls_true = np.argmax(mnist.test.labels, 1)[incorrect]\n plot_images(images=images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9])\n\n summary_writer.close()\n\n\ndef main(argv=None):\n mnist = input_data.read_data_sets('mnist/', one_hot=True)\n train(mnist)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Project/LeNet-5.py","file_name":"LeNet-5.py","file_ext":"py","file_size_in_byte":9190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"613524935","text":"import binascii\nimport platform\nimport time\nfrom Library.utils import *\nfrom Library.gpt import gpt\ntry:\n from Library.oppo import oppo\nexcept Exception as e:\n pass\n\nlogger = logging.getLogger(__name__)\nfrom queue import Queue\nfrom threading import Thread\n\n\ndef writefile(wf, q, stop):\n while True:\n data = q.get()\n if len(data) > 0:\n wf.write(data)\n q.task_done()\n if stop() and q.empty():\n break\n\n\nclass asyncwriter():\n def __init__(self, wf):\n self.writequeue = Queue()\n self.worker = Thread(target=writefile, args=(wf, self.writequeue, lambda: self.stopthreads,))\n self.worker.setDaemon(True)\n self.stopthreads = False\n self.worker.start()\n\n def write(self, data):\n self.writequeue.put_nowait(data)\n\n def stop(self):\n self.stopthreads = True\n self.writequeue.join()\n\n\nclass qualcomm_firehose:\n class cfg:\n MemoryName = \"eMMC\"\n TargetName = \"\"\n Version = \"\"\n ZLPAwareHost = 1\n SkipStorageInit = 0\n SkipWrite = 0\n MaxPayloadSizeToTargetInBytes = 1048576\n MaxPayloadSizeFromTargetInBytes = 8192\n SECTOR_SIZE_IN_BYTES = 512\n MaxXMLSizeInBytes = 4096\n bit64 = True\n\n def __init__(self, cdc, xml, cfg, verbose, oppoprjid, serial):\n self.cdc = cdc\n self.xml = xml\n self.cfg = cfg\n self.pk = None\n self.ops = None\n self.serial = serial\n self.oppoprjid = oppoprjid\n logger.setLevel(verbose)\n if verbose==logging.DEBUG:\n fh = logging.FileHandler('log.txt')\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n # ch = logging.StreamHandler()\n # ch.setLevel(logging.ERROR)\n if self.cfg.MemoryName == \"UFS\":\n self.cfg.SECTOR_SIZE_IN_BYTES = 4096\n\n def getstatus(self, resp):\n if \"value\" in resp:\n value = resp[\"value\"]\n if value == \"ACK\":\n return True\n else:\n return False\n return True\n\n def xmlsend(self, data, response=True):\n self.cdc.write(bytes(data,'utf-8'), self.cfg.MaxXMLSizeInBytes)\n data = bytearray()\n counter = 0\n timeout = 3\n resp = {\"value\": \"NAK\"}\n status = False\n if response:\n while b\" timeout:\n break\n data+=tmp\n except Exception as e:\n logger.error(e)\n return [False, resp, data]\n try:\n logger.debug(\"RX:\"+data.decode('utf-8'))\n except:\n logger.debug(\"RX:\" + hexlify(data).decode('utf-8'))\n try:\n resp = self.xml.getresponse(data)\n status = self.getstatus(resp)\n except:\n status = True\n return [status, data, data]\n else:\n status = True\n return [status, resp, data]\n\n def cmd_reset(self):\n data = \"\"\n val = self.xmlsend(data)\n try:\n v = None\n while (v != b''):\n v = self.cdc.read()\n if v != b'':\n resp = self.xml.getlog(v)[0]\n else:\n break\n print(resp)\n except:\n pass\n if val[0]:\n logger.info(\"Reset succeeded.\")\n return True\n else:\n logger.error(\"Reset failed.\")\n return False\n\n def cmd_xml(self, filename):\n with open(filename, 'rb') as rf:\n data = rf.read()\n val = self.xmlsend(data)\n if val[0]:\n logger.info(\"Command succeeded.\" + str(val[2]))\n return val[2]\n else:\n logger.error(\"Command failed:\" + str(val[2]))\n return val[2]\n\n def cmd_nop(self):\n data = \"\"\n val = self.xmlsend(data)\n if val[0]:\n logger.info(\"Nop succeeded.\")\n return self.xml.getlog(val[2])\n else:\n logger.error(\"Nop failed.\")\n return False\n\n def cmd_getsha256digest(self, physical_partition_number, start_sector, num_partition_sectors):\n data = f\"\\n\"\n val = self.xmlsend(data)\n if val[0]:\n res = self.xml.getlog(val[2])\n for line in res:\n logger.info(line)\n if \"Digest \" in res:\n return res.split(\"Digest \")[1]\n else:\n return res\n else:\n logger.error(\"GetSha256Digest failed.\")\n return False\n\n def cmd_setbootablestoragedrive(self, partition_number):\n data = f\"\\n\"\n val = self.xmlsend(data)\n if val[0]:\n logger.info(\"Setbootablestoragedrive succeeded.\")\n return True\n else:\n logger.error(\"Setbootablestoragedrive failed: %s\" % val[2])\n return False\n\n def cmd_send(self, content, response=True):\n data = f\"\\n<{content} />\"\n if response:\n val = self.xmlsend(data)\n if val[0] and not b\"log value=\\\"ERROR\\\"\" in val[1]:\n logger.info(f\"{content} succeeded.\")\n return val[2]\n else:\n logger.error(f\"{content} failed.\")\n logger.error(f\"{val[2]}\")\n return False\n else:\n self.xmlsend(data, False)\n return True\n\n def cmd_patch(self, physical_partition_number, start_sector, byte_offset, value, size_in_bytes, display=True):\n '''\n \n '''\n\n data = f\"\\n\" + \\\n f\"\\n\"\n\n if self.ops is not None and \"setprojmodel\" in self.supported_functions:\n pk, token = self.ops.generatetoken(True)\n data += f\"pk=\\\"{pk}\\\" token=\\\"{token}\\\" \"\n\n rsp = self.xmlsend(data)\n if rsp[0] == True:\n if display:\n logger.info(f\"Patch:\\n--------------------\\n\")\n logger.info(rsp[1])\n return True\n else:\n logger.warning(\"Patch command isn't supported.\")\n return False\n\n\n def cmd_program(self, physical_partition_number, start_sector, filename, display=True):\n size = os.stat(filename).st_size\n fsize=os.stat(filename).st_size\n fname=os.path.basename(filename)\n with open(filename, \"rb\") as rf:\n # Make sure we fill data up to the sector size\n num_partition_sectors = size // self.cfg.SECTOR_SIZE_IN_BYTES\n if (size % self.cfg.SECTOR_SIZE_IN_BYTES) != 0:\n num_partition_sectors += 1\n if display:\n logger.info(\n f\"\\nWriting {fname} to physical partition {str(physical_partition_number)}, sector {str(start_sector)}, sectors {str(num_partition_sectors)}\")\n data = f\"\\n\" + \\\n f\"\\n\"\n rsp = self.xmlsend(data)\n pos = 0\n prog = 0\n if display:\n print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n if rsp[0]:\n bytesToWrite = self.cfg.SECTOR_SIZE_IN_BYTES * num_partition_sectors\n total = self.cfg.SECTOR_SIZE_IN_BYTES * num_partition_sectors\n old = 0\n while fsize > 0:\n wlen=self.cfg.MaxPayloadSizeToTargetInBytes//self.cfg.SECTOR_SIZE_IN_BYTES*self.cfg.SECTOR_SIZE_IN_BYTES\n if fsize old):\n if display:\n print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n\n if display and prog != 100:\n print_progress(100, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n self.cdc.write(b'', self.cfg.MaxPayloadSizeToTargetInBytes)\n time.sleep(0.2)\n info = self.xml.getlog(self.cdc.read(self.cfg.MaxXMLSizeInBytes))\n rsp = self.xml.getresponse(self.cdc.read(self.cfg.MaxXMLSizeInBytes))\n if \"value\" in rsp:\n if rsp[\"value\"] == \"ACK\":\n return True\n else:\n logger.error(f\"Error:\")\n for line in info:\n logger.error(line)\n return False\n else:\n return True\n else:\n logger.error(f\"Error:{rsp}\")\n return False\n return False\n\n def cmd_program_buffer(self, physical_partition_number, start_sector, wfdata, display=True):\n size=len(wfdata)\n\n # Make sure we fill data up to the sector size\n num_partition_sectors = size // self.cfg.SECTOR_SIZE_IN_BYTES\n if (size % self.cfg.SECTOR_SIZE_IN_BYTES) != 0:\n num_partition_sectors += 1\n if display:\n logger.info(\n f\"\\nWriting to physical partition {str(physical_partition_number)}, sector {str(start_sector)}, sectors {str(num_partition_sectors)}\")\n data = f\"\\n\" + \\\n f\"\\n\"\n rsp = self.xmlsend(data)\n pos = 0\n prog = 0\n if display:\n print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n if rsp[0]:\n bytesToWrite = self.cfg.SECTOR_SIZE_IN_BYTES * num_partition_sectors\n total = self.cfg.SECTOR_SIZE_IN_BYTES * num_partition_sectors\n old = 0\n fpos=0\n fsize=len(wfdata)\n while fsize > 0:\n wlen = self.cfg.MaxPayloadSizeToTargetInBytes // self.cfg.SECTOR_SIZE_IN_BYTES * self.cfg.SECTOR_SIZE_IN_BYTES\n if fsize < wlen:\n wlen = fsize\n wdata = wfdata[fpos:fpos+wlen]\n bytesToWrite -= wlen\n fsize -= wlen\n pos += wlen\n fpos += wlen\n if (wlen % self.cfg.SECTOR_SIZE_IN_BYTES) != 0:\n filllen = (wlen // self.cfg.SECTOR_SIZE_IN_BYTES * self.cfg.SECTOR_SIZE_IN_BYTES) + self.cfg.SECTOR_SIZE_IN_BYTES\n wdata += b\"\\x00\" * (filllen - wlen)\n wlen = len(wdata)\n self.cdc.write(wdata, wlen)\n prog = int(float(pos) / float(total) * float(100))\n if (prog > old):\n if display:\n print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n\n if display and prog != 100:\n print_progress(100, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n self.cdc.write(b'', self.cfg.MaxPayloadSizeToTargetInBytes)\n time.sleep(0.2)\n info = self.xml.getlog(self.cdc.read(self.cfg.MaxXMLSizeInBytes))\n rsp = self.xml.getresponse(self.cdc.read(self.cfg.MaxXMLSizeInBytes))\n if \"value\" in rsp:\n if rsp[\"value\"] == \"ACK\":\n return True\n else:\n logger.error(f\"Error:\")\n for line in info:\n logger.error(line)\n return False\n else:\n return True\n else:\n logger.error(f\"Error:{rsp}\")\n return False\n return False\n\n def cmd_erase(self, physical_partition_number, start_sector, num_partition_sectors, display=True):\n if display:\n logger.info(\n f\"\\nErasing from physical partition {str(physical_partition_number)}, sector {str(start_sector)}, sectors {str(num_partition_sectors)}\")\n data = f\"\\n\" + \\\n f\"\\n\"\n\n rsp = self.xmlsend(data)\n empty = b\"\\x00\" * self.cfg.MaxPayloadSizeToTargetInBytes\n pos = 0\n prog = 0\n if display:\n print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n if (rsp[0]) == True:\n bytesToWrite = self.cfg.SECTOR_SIZE_IN_BYTES * num_partition_sectors\n total = self.cfg.SECTOR_SIZE_IN_BYTES * num_partition_sectors\n old = 0\n while (bytesToWrite > 0):\n wlen = self.cfg.MaxPayloadSizeToTargetInBytes\n if bytesToWrite < wlen:\n wlen = bytesToWrite\n self.cdc.write(empty[0:wlen], self.cfg.MaxPayloadSizeToTargetInBytes)\n prog = int(float(pos) / float(total) * float(100))\n if (prog > old):\n if display:\n print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n bytesToWrite -= wlen\n pos += wlen\n if display and prog != 100:\n print_progress(100, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n self.cdc.write(b'', self.cfg.MaxPayloadSizeToTargetInBytes)\n time.sleep(0.2)\n info = self.xml.getlog(self.cdc.read(self.cfg.MaxXMLSizeInBytes))\n rsp = self.xml.getresponse(self.cdc.read(self.cfg.MaxXMLSizeInBytes))\n if \"value\" in rsp:\n if rsp[\"value\"] == \"ACK\":\n return True\n else:\n logger.error(f\"Error:\")\n for line in info:\n logger.error(line)\n else:\n return True\n else:\n logger.error(f\"Error:{rsp}\")\n return False\n return False\n\n def cmd_read(self, physical_partition_number, start_sector, num_partition_sectors, filename, display=True):\n if display:\n logger.info(\n f\"\\nReading from physical partition {str(physical_partition_number)}, sector {str(start_sector)}, sectors {str(num_partition_sectors)}\")\n with open(filename, \"wb\") as wr:\n #wr = asyncwriter(wf)\n data = f\"\\n\"\n rsp = self.xmlsend(data)\n if rsp[0]:\n if \"value\" in rsp[1]:\n if rsp[1][\"value\"] == \"NAK\":\n if display:\n logger.error(rsp[2].decode('utf-8'))\n return b\"\"\n bytesToRead = self.cfg.SECTOR_SIZE_IN_BYTES * num_partition_sectors\n total = bytesToRead\n dataread = 0\n old = 0\n prog = 0\n if display:\n print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n while bytesToRead > 0:\n tmp = self.cdc.read(self.cfg.MaxPayloadSizeToTargetInBytes)\n bytesToRead -= len(tmp)\n dataread += len(tmp)\n wr.write(tmp)\n if display:\n prog = int(float(dataread) / float(total) * float(100))\n if (prog > old):\n print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n old = prog\n if display and prog != 100:\n print_progress(100, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n # time.sleep(0.2)\n info = self.xml.getlog(self.cdc.read(self.cfg.MaxXMLSizeInBytes))\n rsp = self.xml.getresponse(self.cdc.read(self.cfg.MaxXMLSizeInBytes))\n #wr.stop()\n if \"value\" in rsp:\n if rsp[\"value\"] == \"ACK\":\n return tmp\n else:\n logger.error(f\"Error:\")\n for line in info:\n logger.error(line)\n return b\"\"\n else:\n return tmp\n else:\n logger.error(f\"Error:{rsp[1]}\")\n return b\"\"\n\n def cmd_read_buffer(self, physical_partition_number, start_sector, num_partition_sectors, display=True):\n if display:\n logger.info(\n f\"\\nReading from physical partition {str(physical_partition_number)}, sector {str(start_sector)}, sectors {str(num_partition_sectors)}\")\n data = f\"\\n\"\n\n rsp = self.xmlsend(data)\n resData = bytearray()\n if rsp[0]:\n if \"value\" in rsp[1]:\n if rsp[1][\"value\"] == \"NAK\":\n if display:\n logger.error(rsp[2].decode('utf-8'))\n return b\"\"\n bytesToRead = self.cfg.SECTOR_SIZE_IN_BYTES * num_partition_sectors\n total = bytesToRead\n dataread = 0\n old = 0\n prog = 0\n if display:\n print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n while bytesToRead > 0:\n tmp = self.cdc.read(self.cfg.MaxPayloadSizeToTargetInBytes)\n bytesToRead -= len(tmp)\n dataread += len(tmp)\n resData += tmp\n prog = int(float(dataread) / float(total) * float(100))\n if (prog > old):\n if display:\n print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n old = prog\n if display and prog != 100:\n print_progress(100, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n # time.sleep(0.2)\n info = self.xml.getlog(self.cdc.read(self.cfg.MaxXMLSizeInBytes))\n rsp = self.xml.getresponse(self.cdc.read(self.cfg.MaxXMLSizeInBytes))\n if \"value\" in rsp:\n if rsp[\"value\"] == \"ACK\":\n return resData\n else:\n logger.error(f\"Error:\")\n for line in info:\n logger.error(line)\n return b\"\"\n else:\n return resData\n else:\n logger.error(f\"Error:{rsp[2]}\")\n return b\"\"\n return resData #Do not remove, needed for oppo\n\n def get_gpt(self, lun, gpt_num_part_entries, gpt_part_entry_size, gpt_part_entry_start_lba):\n data = self.cmd_read_buffer(lun, 0, 2, False)\n if data == b\"\":\n return None, None\n guid_gpt = gpt(\n num_part_entries=gpt_num_part_entries,\n part_entry_size=gpt_part_entry_size,\n part_entry_start_lba=gpt_part_entry_start_lba,\n )\n header = guid_gpt.parseheader(data, self.cfg.SECTOR_SIZE_IN_BYTES)\n if \"first_usable_lba\" in header:\n sectors = header[\"first_usable_lba\"]\n if sectors==0:\n return None, None\n data = self.cmd_read_buffer(lun, 0, sectors, False)\n if data==b\"\":\n return None, None\n guid_gpt.parse(data, self.cfg.SECTOR_SIZE_IN_BYTES)\n return data, guid_gpt\n else:\n return None, None\n\n def get_backup_gpt(self, lun, gpt_num_part_entries, gpt_part_entry_size, gpt_part_entry_start_lba):\n data = self.cmd_read_buffer(lun, 0, 2, False)\n if data == b\"\":\n return None\n guid_gpt = gpt(\n num_part_entries=gpt_num_part_entries,\n part_entry_size=gpt_part_entry_size,\n part_entry_start_lba=gpt_part_entry_start_lba,\n )\n header = guid_gpt.parseheader(data, self.cfg.SECTOR_SIZE_IN_BYTES)\n if \"backup_lba\" in header:\n sectors = header[\"first_usable_lba\"] - 1\n data = self.cmd_read_buffer(lun, header[\"last_usable_lba\"] + 1, sectors, False)\n if data==b\"\":\n return None\n return data\n else:\n return None\n\n def connect(self, lvl):\n v = b'-1'\n if lvl != 1:\n if platform.system() == 'Windows':\n self.cdc.timeout = 10\n else:\n self.cdc.timeout = 10\n info = []\n while v != b'':\n try:\n v = self.cdc.read()\n if v == b'':\n break\n data = self.xml.getlog(v)\n if len(data) > 0:\n info.append(data[0])\n if info == []:\n break\n except:\n break\n supfunc=False\n if info==[]:\n info=self.cmd_nop()\n if info==[]:\n logger.info(\"No supported functions detected, configuring qc generic commands\")\n self.supported_functions = ['configure','program','firmwarewrite','patch','setbootablestoragedrive','ufs','emmc','power','benchmark','read','getstorageinfo','getcrc16digest','getsha256digest','erase','peek','poke','nop','xml']\n else:\n self.supported_functions = []\n for line in info:\n if \"chip serial num\" in line.lower():\n logger.info(line)\n try:\n serial = line.split(\"0x\")[1][:-1]\n self.serial = int(serial,16)\n except:\n serial = line.split(\": \")[2]\n self.serial = int(serial.split(\" \")[0])\n if supfunc and \"end of supported functions\" not in line.lower():\n rs = line.replace(\"\\n\", \"\")\n if rs != \"\":\n self.supported_functions.append(rs)\n if \"supported functions\" in line.lower():\n supfunc = True\n\n '''\n self.supported_functions = []\n for line in info:\n if \"chip serial num\" in line.lower():\n logger.info(line)\n try:\n serial=line.split(\": \")[1]\n self.serial=int(serial.split(\" \")[0])\n except:\n serial=line.split(\": \")[2]\n self.serial=int(serial.split(\" \")[0])\n if supfunc and \"end of supported functions\" not in line.lower():\n rs=line.replace(\"\\n\", \"\")\n if rs!=\"\":\n self.supported_functions.append(rs)\n if \"supported functions\" in line.lower():\n supfunc = True\n '''\n try:\n self.ops = oppo(self,projid=self.oppoprjid, serials=[self.serial, self.serial])\n except Exception as e:\n self.ops = None\n data=self.cdc.read() #logbuf\n try:\n logger.info(data.decode('utf-8'))\n except:\n pass\n connectcmd = f\"\" + \\\n f\"\" + \\\n \"\"\n '''\n \"\" \\\n \"\"\n '''\n rsp = self.xmlsend(connectcmd)\n\n if rsp[0] == True:\n data=self.cdc.read()\n if not \"MemoryName\" in rsp[1]:\n #print(rsp[1])\n rsp[1][\"MemoryName\"]=\"eMMC\"\n if not \"MaxXMLSizeInBytes\" in rsp[1]:\n rsp[1][\"MaxXMLSizeInBytes\"]=\"4096\"\n logging.warning(\"Couldn't detect MaxPayloadSizeFromTargetinBytes\")\n if not \"MaxPayloadSizeToTargetInBytes\" in rsp[1]:\n rsp[1][\"MaxPayloadSizeToTargetInBytes\"] = \"1038576\"\n if not \"MaxPayloadSizeToTargetInBytesSupported\" in rsp[1]:\n rsp[1][\"MaxPayloadSizeToTargetInBytesSupported\"] = \"1038576\"\n self.cfg.MemoryName = rsp[1][\"MemoryName\"]\n self.cfg.MaxPayloadSizeToTargetInBytes = int(rsp[1][\"MaxPayloadSizeToTargetInBytes\"])\n self.cfg.MaxPayloadSizeToTargetInBytesSupported = int(rsp[1][\"MaxPayloadSizeToTargetInBytesSupported\"])\n self.cfg.MaxXMLSizeInBytes = int(rsp[1][\"MaxXMLSizeInBytes\"])\n if \"MaxPayloadSizeFromTargetInBytes\" in rsp[1]:\n self.cfg.MaxPayloadSizeFromTargetInBytes = int(rsp[1][\"MaxPayloadSizeFromTargetInBytes\"])\n else:\n self.cfg.MaxPayloadSizeFromTargetInBytes = self.cfg.MaxXMLSizeInBytes\n logging.warning(\"Couldn't detect MaxPayloadSizeFromTargetinBytes\")\n if \"TargetName\" in rsp[1]:\n self.cfg.TargetName = rsp[1][\"TargetName\"]\n if \"MSM\" not in self.cfg.TargetName:\n self.cfg.TargetName = \"MSM\" + self.cfg.TargetName\n else:\n self.cfg.TargetName = \"Unknown\"\n logger.warning(\"Couldn't detect TargetName\")\n if \"Version\" in rsp[1]:\n self.cfg.Version = rsp[1][\"Version\"]\n else:\n self.cfg.Version = 0\n logger.warning(\"Couldn't detect Version\")\n else:\n if \"MaxPayloadSizeToTargetInBytes\" in rsp[1]:\n try:\n self.cfg.MemoryName = rsp[1][\"MemoryName\"]\n self.cfg.MaxPayloadSizeToTargetInBytes = int(rsp[1][\"MaxPayloadSizeToTargetInBytes\"])\n self.cfg.MaxPayloadSizeToTargetInBytesSupported = int(rsp[1][\"MaxPayloadSizeToTargetInBytesSupported\"])\n self.cfg.MaxXMLSizeInBytes = int(rsp[1][\"MaxXMLSizeInBytes\"])\n self.cfg.MaxPayloadSizeFromTargetInBytes = int(rsp[1][\"MaxPayloadSizeFromTargetInBytes\"])\n self.cfg.TargetName = rsp[1][\"TargetName\"]\n except:\n pass\n if \"MSM\" not in self.cfg.TargetName:\n self.cfg.TargetName = \"MSM\" + self.cfg.TargetName\n self.cfg.Version = rsp[1][\"Version\"]\n if lvl == 0:\n return self.connect(lvl + 1)\n else:\n logger.error(f\"Error:{rsp}\")\n exit(0)\n logger.info(f\"TargetName={self.cfg.TargetName}\")\n logger.info(f\"MemoryName={self.cfg.MemoryName}\")\n logger.info(f\"Version={self.cfg.Version}\")\n if self.cfg.MemoryName.lower() == \"emmc\":\n self.cfg.SECTOR_SIZE_IN_BYTES = 512\n elif self.cfg.MemoryName.lower() == \"ufs\":\n self.cfg.SECTOR_SIZE_IN_BYTES = 4096\n return self.supported_functions\n\n # OEM Stuff here below --------------------------------------------------\n\n def cmd_writeimei(self, imei):\n if len(imei) != 16:\n logger.info(\"IMEI must be 16 digits\")\n return False\n data = \"\"\n val = self.xmlsend(data)\n if val[0] == True:\n logger.info(\"writeIMEI succeeded.\")\n return True\n else:\n logger.error(\"writeIMEI failed.\")\n return False\n\n def cmd_getstorageinfo(self):\n data = \"\"\n val = self.xmlsend(data)\n if val[0] == True:\n logger.info(f\"GetStorageInfo:\\n--------------------\\n\")\n logger.info(val[1])\n return True\n else:\n logger.warning(\"GetStorageInfo command isn't supported.\")\n return False\n\n def cmd_getstorageinfo_string(self):\n data = \"\"\n val = self.xmlsend(data)\n resp = \"\"\n if val[0] == True:\n resp += (f\"GetStorageInfo:\\n--------------------\\n\")\n resp += (val[1])\n return resp\n else:\n return \"\"\n\n def cmd_poke(self, address, data, filename=\"\", info=False):\n rf = None\n if filename != \"\":\n rf = open(filename, \"rb\")\n SizeInBytes = os.stat(filename).st_size\n else:\n SizeInBytes = len(data)\n if info:\n logger.info(f\"Poke: Address({hex(address)}),Size({hex(SizeInBytes)})\")\n '''\n \n '''\n maxsize = 8\n lengthtowrite = SizeInBytes\n if lengthtowrite < maxsize:\n maxsize = lengthtowrite\n pos = 0\n old = 0\n datawritten = 0\n mode = 0\n if info:\n print_progress(0, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n while (lengthtowrite > 0):\n if rf != None:\n content = hex(int(hexlify(rf.read(maxsize)).decode('utf-8'), 16))\n else:\n content = 0\n if lengthtowrite < maxsize:\n maxsize = lengthtowrite\n for i in range(0, maxsize):\n content = (content << 8) + int(\n hexlify(data[pos + maxsize - i - 1:pos + maxsize - i]).decode('utf-8'), 16)\n # content=hex(int(hexlify(data[pos:pos+maxsize]).decode('utf-8'),16))\n content = hex(content)\n if mode == 0:\n xdata = f\"\\n\"\n else:\n xdata = f\"\\n\"\n try:\n self.cdc.write(xdata, self.cfg.MaxXMLSizeInBytes)\n except:\n pass\n addrinfo = self.cdc.read(self.cfg.MaxXMLSizeInBytes)\n if (b\"SizeInBytes\" in addrinfo or b\"Invalid parameters\" in addrinfo):\n tmp = b\"\"\n while b\"NAK\" not in tmp and b\"ACK\" not in tmp:\n tmp += self.cdc.read(self.cfg.MaxXMLSizeInBytes)\n xdata = f\"\\n\"\n self.cdc.write(xdata, self.cfg.MaxXMLSizeInBytes)\n addrinfo = self.cdc.read(self.cfg.MaxXMLSizeInBytes)\n if b' old):\n print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n old = prog\n if info:\n logger.info(\"Done writing.\")\n return True\n\n def cmd_peek(self, address, SizeInBytes, filename=\"\", info=False):\n if info:\n logger.info(f\"Peek: Address({hex(address)}),Size({hex(SizeInBytes)})\")\n wf = None\n if filename != \"\":\n wf = open(filename, \"wb\")\n '''\n \n '''\n data = f\"\\n\"\n '''\n \n \n '''\n try:\n self.cdc.write(data, self.cfg.MaxXMLSizeInBytes)\n except:\n pass\n addrinfo = self.cdc.read(self.cfg.MaxXMLSizeInBytes)\n if (b\"SizeInBytes\" in addrinfo or b\"Invalid parameters\" in addrinfo):\n tmp = b\"\"\n while b\"NAK\" not in tmp and b\"ACK\" not in tmp:\n tmp += self.cdc.read(self.cfg.MaxXMLSizeInBytes)\n data = f\"\"\n self.cdc.write(data, self.cfg.MaxXMLSizeInBytes)\n addrinfo = self.cdc.read(self.cfg.MaxXMLSizeInBytes)\n if b' old):\n print_progress(prog, 100, prefix='Progress:', suffix='Complete', bar_length=50)\n old = prog\n\n if wf != None:\n wf.close()\n if b'\\n\\n\\n\"\n return self.cmd_rawxml(data,False)\n else:\n print(\"Setprojmodel command isn't yet implemented\")\n return False\n\n def cmd_demacia(self):\n if self.ops is not None:\n pk, token = self.ops.demacia()\n self.pk = pk\n data = \"\\n\\n\\n\"\n return self.cmd_rawxml(data,False)\n else:\n print(\"Demacia command isn't yet implemented\")\n return False\n\n def cmd_rawxml(self, data, response=True):\n if response:\n val = self.xmlsend(data)\n if val[0] == True:\n logger.info(f\"{data} succeeded.\")\n return val[2]\n else:\n logger.error(f\"{data} failed.\")\n logger.error(f\"{val[2]}\")\n return False\n else:\n self.xmlsend(data, False)\n return True\n","sub_path":"Library/firehose.py","file_name":"firehose.py","file_ext":"py","file_size_in_byte":41648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"343980938","text":"import discord\nfrom discord.ext import commands\nimport aiohttp\n\n\nclass Image(commands.Cog):\n\t\"\"\"\n\tPermet d'obtenir des images depuis Internet.\n\t\"\"\"\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\n\n\tdef cog_unload(self):\n\t\tpass\n\n\n\t@commands.Cog.listener()\n\tasync def on_ready(self):\n\t\tpass\n\n\n\t@commands.group(aliases=[\"img\"])\n\tasync def image(self, ctx):\n\t\t\"\"\"\n\t\tPermet d'obtenir des images depuis Internet.\n\t\t\"\"\"\n\t\tif ctx.invoked_subcommand is None:\n\t\t\tawait ctx.send_help(ctx.command)\n\n\n\t@image.command(name=\"get_a_life\", aliases=[\"gal\"], enabled=False, reason=\"https://api-to.get-a.life/meme est en panne pour le moment ...\")\n\tasync def image_get_a_life(self, ctx):\n\t\t\"\"\"\n\t\tAffiche un meme au hasard trouvé sur get-a_life.com.\n\t\t\"\"\"\n\t\turl = 'https://api-to.get-a.life/meme'\n\t\tasync with ctx.typing():\n\t\t\tasync with self.bot.client_session.get(url) as r:\n\t\t\t\tif r.status == 200:\n\t\t\t\t\tdata = await r.json()\n\t\t\t\t\tawait ctx.send(f\"{data['text']}\\n{data['url']}\")\n\t\t\t\telse:\n\t\t\t\t\tawait ctx.send(f\"erreur, code HTTP {r.status}\")\n\n\n\t@image.command(name=\"meme\")\n\tasync def image_meme_api_herokuapp(self, ctx):\n\t\t\"\"\"\n\t\tAffiche un meme au hasard trouvé sur meme-api.herokuapp.com/gimme.\n\t\t\"\"\"\n\t\turl = 'https://meme-api.herokuapp.com/gimme'\n\t\tasync with ctx.typing():\n\t\t\tasync with self.bot.client_session.get(url) as r:\n\t\t\t\tif r.status == 200:\n\t\t\t\t\tdata = await r.json()\n\t\t\t\t\tembed = discord.Embed()\n\t\t\t\t\tembed.add_field(name=\"postLink\", value=data['postLink'], inline=False)\n\t\t\t\t\tembed.add_field(name=\"subreddit\", value=data['subreddit'], inline=False)\n\t\t\t\t\tembed.add_field(name=\"title\", value=data['title'], inline=False)\n\t\t\t\t\tembed.set_image(url=data['url'])\n\t\t\t\t\tawait ctx.send(embed=embed)\n\t\t\t\telse:\n\t\t\t\t\tawait ctx.send(f\"erreur, code HTTP {r.status}\")\n\ndef setup(bot):\n\tbot.add_cog(Image(bot))","sub_path":"bot/ext/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"185509898","text":"#to run this, you have to do python read_reportfile.py\n#This code works very good, earlier the code was confused between HMS and SHMS 3/4 rate, becuase in all \"SHMS\", there is \"HMS\"\n# and Deb fixed it.\n#2019 oct 03\n#import various modules \nimport glob, string, pickle \nimport numpy as np \nimport pandas as pd\n#use glob to grab the whole report folder and read those report file one by one\n\n#rf = glob.glob('/lustre/expphy/volatile/hallc/spring17/hdbhatt/group/REPORT_OUTPUT/COIN/PRODUCTION/replay_coin_production_626*-1.report') \nrf = glob.glob('/lustre/expphy/volatile/hallc/spring17/hdbhatt/group/REPORT_OUTPUT/COIN/PRODUCTION_oct4/replay_coin_production_****-1.report') \n\n#define the data dictionary \ndd={ }\n\n#sort the list for consistency \nrf.sort()\n\n\n#define a report file dictionary : where we will store the several values\n#from the report files \n \nrfd={ 'rn' : [], # run number \n 'hms_rate' : [], #HMS 3/4 Trigger Rate line 133 //added \n 'shms_rate' : [], # SHMS 3/4 Trigger Rate line 124 //added\n 'current_4A' : [], # current as measured by bcm 4A\n 'current_4B' : [], # current as measured by bcm 4B \n 'current_unser' : [] # current as measured by Unser\n\n }\nfor index, run in enumerate(rf):\n with open(rf[index]) as fobj:\n for line in fobj:\n report_data = line.split(':')\n if('Run #' in report_data[0]) : rfd['rn'].append(report_data[1].strip()) # [1] is for \n if('HMS 3/4 Trigger Rate' in report_data[0] and not 'SHMS' in report_data[0]) : rfd['hms_rate'].append(report_data[1][:5].strip()) # [1][:7] is for\n if('HMS 3/4 Trigger Rate' in report_data[0]) : print(line)\n if('SHMS 3/4 Trigger Rate' in report_data[0]) : rfd['shms_rate'].append(report_data[1][:7].strip())\n if('BCM1 Current' in report_data[0]) : rfd['current_4A'].append(report_data[1][:7].strip())\n if('BCM2 Current' in report_data[0]) : rfd['current_4B'].append(report_data[1][:7].strip())\n if('Unser Current' in report_data[0]) : rfd['current_unser'].append(report_data[1][:7].strip())\n\n#print(rfd)\n\nfor index, run in enumerate(rf):\n print(rfd['rn'][index])\n print(rfd['hms_rate'][index])\n\nfor var_str, var in rfd.items(): #var_str : keys, var : list of values\n #print(var)\n dd[var_str]=[]\n for index, var in enumerate(rfd['rn']):\n dd[var_str].append(rfd[var_str][index])\n\nfor rfd_var, rfd_list in dd.items():\n# print(rfd_list)\n rfd_array = np.asarray(rfd_list, dtype='float')\n dd[rfd_var] = rfd_array\n\n#print(dd)\n#pickle.dump(dd,open('bcm_unser_cut_current.pkl','wb'))\n\nfile = open(\"rate/hms_shms_rate_good1.txt\",\"w\")\nfor index in range(len(rfd['rn'])):\n print(rfd['rn'][index] + \" \" + rfd['hms_rate'][index] + \" \"+rfd['shms_rate'][index])\n file.write(str(rfd['rn'][index]) + \" \" + str(rfd['hms_rate'][index]) + \" \" + str(rfd['shms_rate'][index]) +\"\\n\")# \" \" + str(rfd['current_4A'][index])+ \" \" + str(rfd['current_4B'][index]) + \"\\n\")\n \n #added to read pkl file\n #unpickled_df = pd.read_pickle(\"./bcm_unser_cut_current.pkl\")\n #print (unpickled_df)\nfile.close()\n \n \n\n","sub_path":"REPORT_FILE/read_rate.py","file_name":"read_rate.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"311804454","text":"# Load the Pandas libraries with alias 'pd'\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, VotingClassifier\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\n\n\ndata = pd.read_csv(\"./data/FDAXON.csv\")\ndata = data.dropna()\n\nX = data.drop('Success', axis=1)\n# X = X.drop(X.columns[2], axis=1)\n\ny = data['Success']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)\n\nseed = 10\n\ndtc = DecisionTreeClassifier(\n criterion='gini',\n min_samples_leaf=5,\n min_samples_split=5,\n max_depth=None,\n random_state=seed\n)\n\n\nrfc = RandomForestClassifier(\n n_estimators=10,\n oob_score=True,\n random_state=seed\n)\n\n\nknc = KNeighborsClassifier(n_neighbors=50)\n\n\nmlpc = MLPClassifier(solver='lbfgs',\n alpha=1e-5,\n hidden_layer_sizes=(5, 2),\n random_state=seed)\n\n\nvc = VotingClassifier(estimators=[('dtc', dtc),\n ('rfc', rfc),\n ('knc', knc),\n ('mlpc', mlpc)\n ],\n voting='hard')\n\nvc = vc.fit(X_train, y_train)\n\n\ny_pred = vc.predict(X_test)\n\n\nprint(confusion_matrix(y_test, y_pred,labels=[1,0]))\nprint(classification_report(y_test, y_pred))\n","sub_path":"VOTING/voting.py","file_name":"voting.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"274484982","text":"import matplotlib.pyplot as plt, numpy as np, pandas as pd\nfrom ..utils import *\nfrom ..measure import *\n\ndef plot_emsd(ax,emsd,label='_Hidden', color='gray', alpha=1.,**kwargs):\n x_values=emsd.index.values/10**3#np.log10(emsd.index.values)# lag in seconds\n y_values=emsd.values#*10**3#np.log10(emsd.values) #msd in cm^2\n ax.plot(x_values,y_values, label=label, color=color, alpha=alpha,**kwargs)\n\ndef format_plot_emsd(ax,fontsize=20,use_loglog=True):\n #format plot\n if use_loglog:\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel('lag (seconds)',fontsize=fontsize)\n ax.set_ylabel('MSD (cm$^2$)',fontsize=fontsize)\n# ax.set_title(f'FK model, Area:25cm$^2$, $D_{{V_{{mem}}}}$:0.5cm$^2$/s, N:{trials_considered}\\nmin_duration:{T_min/10**3:.1f}s\\n',fontsize=fontsize)\n ax.tick_params(axis='both', which='major', labelsize=fontsize)\n ax.tick_params(axis='both', which='minor', labelsize=0)\n# ax.set_ylim([0,2.05])\n\n# def plot_slope_of_emsd(ax,emsd,T_min,omit_time,label='_Hidden', color='gray', alpha=0.3,plot_reference_lines=True,**kwargs):\n# lag_values,slope_values=compute_slope_vs_lag(emsd,T_min,omit_time,window_width=50,stepsize=10)\ndef plot_slope_of_emsd(ax,lag_values,slope_values,label='_Hidden', color='gray', alpha=1.,plot_reference_lines=True,**kwargs):\n# lag_values,slope_values=compute_slope_vs_lag(emsd,T_min,omit_time,window_width=50,stepsize=10\n ax.plot(lag_values,slope_values, label=label, color=color, alpha=alpha,**kwargs)\n if plot_reference_lines:\n ax.plot(lag_values,2+0.*slope_values,label='Ballistic',color='black',ls='dotted')\n ax.plot(lag_values,1+0.*slope_values,label='Brownian',color='black',ls='solid')\n\ndef format_slope_of_emsd(ax,fontsize=20,use_loglog=True,plot_reference_lines=True,loc='best',ncol_legend=2,**kwargs):\n #format plot\n if use_loglog:\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel('lag (seconds)',fontsize=fontsize)\n ax.set_ylabel('exponent value',fontsize=fontsize)\n# ax.set_title(f'FK model, Area:25cm$^2$, $D_{{V_{{mem}}}}$:0.5cm$^2$/s, N:{trials_considered}\\nmin_duration:{T_min/10**3:.1f}s\\n',fontsize=fontsize)\n ax.tick_params(axis='both', which='major', labelsize=fontsize)\n ax.tick_params(axis='both', which='minor', labelsize=0)\n if plot_reference_lines:\n ax.legend(loc=loc,fontsize=fontsize,ncol=ncol_legend)\n# ax.set_ylim([0,2.05])\n\n\nif __name__=='__main__':\n import sys,os\n for file in sys.argv[1:]:\n # trgt ='_unwrap.csv'\n # assert (file[-len(trgt):]==trgt)\n df =pd.read_csv(file)\n T_min=1000#ms\n omit_time=150#ms\n DS=0.025#cm/pixel\n figsize=(17,4);fontsize=16\n saving=True\n savefig_folder=os.path.dirname(file)#os.path.join(nb_dir,'Figures/msd_loglog')\n savefig_fn=os.path.basename(file).replace('.csv','.png')#f'logMSD_vs_loglag_Tmin_{T_min/10**3:.1f}_N_{trials_considered}_mni_{min_num_individuals}.png'\n\n df=pd.read_csv(file)\n # df=return_unwrapped_trajectory(df, width, height, sr, mem, dsdpixel, **kwargs)\n DT=compute_time_between_frames(df);print(f\"DT={DT}\")\n # df=get_all_longer_than(df,DT,T_min=T_min)\n #count remaining individuals\n num_individuals=len(list(set(df.particle.values)));print(f\"num_individuals={num_individuals}\")\n emsd=compute_emsd(traj=df.copy(), DT=DT, omit_time=omit_time, printing=False,DS=DS)\n\n fig,axs=plt.subplots(ncols=3,figsize=figsize)\n plot_emsd(axs[0],emsd)\n format_plot_emsd(axs[0],use_loglog=False,fontsize=fontsize)\n\n plot_emsd(axs[1],emsd)\n format_plot_emsd(axs[1],use_loglog=True,fontsize=fontsize)\n\n plot_slope_of_emsd(axs[2],emsd,label='_Hidden', color='gray', alpha=0.3,plot_reference_lines=True)\n format_slope_of_emsd(axs[2],fontsize=fontsize,use_loglog=False)\n\n if not saving:\n plt.tight_layout()\n plt.show()\n else:\n plt.tight_layout()\n os.chdir(savefig_folder)\n plt.savefig(savefig_fn, dpi=300)\n print(f\"saved figure in \\n\\t{savefig_fn}\")\n plt.close()\n","sub_path":"notebooks/lib/viewer/PlotEMSD.py","file_name":"PlotEMSD.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"34839814","text":"from tkinter import *\nfrom tkinter import ttk\ngui = Tk()\ngui.title('Cadastrar Equipamento')\n\nfm_cliente = LabelFrame(gui, text='Cliente')\nfm_equipamento = LabelFrame(gui, text='Equipamento')\n\n#Declarando Widgets de Cadastro de Clientes\n\nlb_cliente = Label(fm_cliente, text='Razão Social/Nome: ', padx=0)\nlb_tipo = Label(fm_cliente, text='Tipo:')\nlb_cel = Label(fm_cliente, text='Celular: ', padx=0)\nlb_tel = Label(fm_cliente, text='Telefone: ', padx=0)\nlb_email = Label(fm_cliente, text='E-mail: ', padx=0)\nlb_ender = Label(fm_cliente, text='Endereço: ', padx=0)\nlb_num = Label(fm_cliente, text='Nº: ', padx=0)\nlb_os = Label(fm_cliente, text='Numero de OS')\n\ncb_tipo = ttk.Combobox(fm_cliente)\ncb_tipo['values'] = ('Fisica', 'Juridica')\n\net_cliente = Entry(fm_cliente)\net_cel = Entry(fm_cliente)\net_tel = Entry(fm_cliente)\net_email = Entry(fm_cliente)\net_ender = Entry(fm_cliente, width=18)\net_num = Entry(fm_cliente, width=5)\net_os = Entry(fm_cliente, width=11, fg='red', bg='black')\net_os.insert(0, '*415*')\n#Demarcando Posições dos Widgets Clientes\n\nfm_cliente.grid(row=0, column=0)\nlb_cliente.grid(row=0, column=0)\nlb_tipo.grid(row=0, column=2)\ncb_tipo.grid(row=0, column=3)\nlb_cel.grid(row=1, column=0, sticky=W)\nlb_tel.grid(row=2, column=0, sticky=W)\nlb_email.grid(row=3, column=0, sticky=W)\nlb_ender.grid(row=4, column=0, sticky=W)\nlb_num.grid(row=5, column=0, sticky=W)\net_cliente.grid(row=0, column=1, sticky=W)\net_cel.grid(row=1, column=1, sticky=W)\net_tel.grid(row=2, column=1, sticky=W)\net_email.grid(row=3, column=1, sticky=W)\net_ender.grid(row=4, column=1, sticky=W)\net_num.grid(row=5, column=1, sticky=W)\nlb_os.grid(row=4, column=3, sticky=E)\net_os.grid(row=5, column=3, sticky=E)\n\n#Declarando Widget De Cadastro de Equipamento\n\nlb_equipamento = Label(fm_equipamento, text='Equipamento:')\nlb_modelo = Label(fm_equipamento, text='Modelo:')\nlb_cor = Label(fm_equipamento, text='Cor:')\nlb_ns = Label(fm_equipamento, text='Numero de Serie:')\nlb_acessorios = Label(fm_equipamento, text='Acessorios:')\nlb_defeito = Label(fm_equipamento, text='Defeito:')\n\net_equipamento = Entry(fm_equipamento)\net_modelo = Entry(fm_equipamento)\net_cor = Entry(fm_equipamento)\net_ns = Entry(fm_equipamento)\net_acessorios = Entry(fm_equipamento)\net_defeito = Entry(fm_equipamento)\n\n#Declarando Posição de Widgets de Equipamento\n\nfm_equipamento.grid(row=1, column=0)\nlb_equipamento.grid(row=0, column=0)\nlb_modelo.grid(row=1, column=0)\nlb_cor.grid(row=2, column=0)\nlb_ns.grid(row=3, column=0)\nlb_acessorios.grid(row=4, column=0)\nlb_defeito.grid(row=5, column=0)\n\net_equipamento.grid(row=0, column=1)\net_modelo.grid(row=1, column=1)\net_cor.grid(row=2, column=1)\net_ns.grid(row=3, column=1)\net_acessorios.grid(row=4, column=1)\net_defeito.grid(row=5, column=1)\nlb_result = Label(gui, text='')\nbt_cadastrar = Button(gui, text='Cadastrar')\nlb_result.grid(row=11, columnspan=2)\nbt_cadastrar.grid(row=12, columnspan=2)\ngui.mainloop()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\nfrom tkinter import *\nimport os\nimport time\na = time.localtime()\ndata = ('Hora: {}:{} Data: {}/{}/{} '.format(a[3], a[4], a[2], a[1], a[0]))\n\ndef cad_eqp():\n link = 'localhost'\n equipamento = et_equipamento.get()\n modelo = et_modelo.get()\n cor = et_cor.get()\n ns = et_ns.get()\n acessorios = et_acessorios.get()\n defeito = et_defeito.get()\n html = '''\n\n \n \n \n Dados Do Equipamento: {}\n \n \n \n \n \n
\n
\n \n
\n \n
\n

Dados Do Equipamento

\n
\n

{}

\n
\n

Equipamento: {}

\n
\n

Modelo: {}

\n
\n

Cor: {}

\n
\n

Numero de Serie: {}

\n
\n

Acessorios: {}

\n
\n

Defeito: {}

\n
\n

QRCODE

\n
\n \n
\n \n \n'''.format(equipamento, data, equipamento, modelo, cor, ns, acessorios, defeito, equipamento, modelo)\n os.system('cd /home/haise/Servidor/os/QR_LIST && qr {}.html >> {}{}.png'.format(link, equipamento,modelo))\n os.system('cd /home/haise/Servidor/os && echo \"{}\" >> {}.html'.format(html, equipamento))\n\n\ngui = Tk()\ngui.title('Cadastrar Equipamento')\nlb_equipamento = Label(gui, text='Equipamento:')\nlb_modelo = Label(gui, text='Modelo:')\nlb_cor = Label(gui, text='Cor:')\nlb_ns = Label(gui, text='Numero de Serie:')\nlb_acessorios = Label(gui, text='Acessorios:')\nlb_defeito = Label(gui, text='Defeito:')\n\net_equipamento = Entry(gui)\net_modelo = Entry(gui)\net_cor = Entry(gui)\net_ns = Entry(gui)\net_acessorios = Entry(gui)\net_defeito = Entry(gui)\nlb_result = Label(gui, text='')\n\nbt_cadastrar = Button(gui, text='Cadastrar', command=cad_eqp)\n\nlb_equipamento.grid(row=0, column=0)\nlb_modelo.grid(row=1, column=0)\nlb_cor.grid(row=2, column=0)\nlb_ns.grid(row=3, column=0)\nlb_acessorios.grid(row=4, column=0)\nlb_defeito.grid(row=5, column=0)\n\net_equipamento.grid(row=0, column=1)\net_modelo.grid(row=1, column=1)\net_cor.grid(row=2, column=1)\net_ns.grid(row=3, column=1)\net_acessorios.grid(row=4, column=1)\net_defeito.grid(row=5, column=1)\nlb_result.grid(row=6, columnspan=2)\nbt_cadastrar.grid(row=7, columnspan=2)\n\ngui.mainloop()\n\n\"\"\"\n\n\"\"\"\n if equipamento == '':\n lb_result['text'] = 'É Obrigatorio Preencher o *Equipamento*'\n lb_result['fg'] = 'red'\n else:\n if modelo == '':\n lb_result['text'] = 'É Obrigatorio Preencher o *Modelo*'\n lb_result['fg'] = 'red'\n else:\n if cor == '':\n lb_result['text'] = 'É Obrigatorio Preencher a *Cor*'\n lb_result['fg'] = 'red'\n else:\n if ns == '':\n lb_result['text'] = 'É Obrigatorio Preencher o *Numero de Serie*'\n lb_result['fg'] = 'red'\n else:\n if defeito == '':\n lb_result['text'] = 'É Obrigatorio Especificar o Defeito!'\n lb_result['fg'] = 'red'\n else:\n lb_result['text'] = 'Cadastrado com Sucesso'\n lb_result['fg'] = 'green'\n if acessorios == '':\n acessorios = 'Sem Acessorios'\n\"\"\"","sub_path":"Func/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"214897103","text":"# Descripción: Este módulo contiene los algoritmos de ordenamiento de\r\n# simples de complejidad computacional On2: \r\n# \r\n# insertionsort, selectionsort, shellsort, bubblesort.\r\n# \r\n\r\n# Y los algoritmos de complejidad computacional nlgn:\r\n#\r\n# mergesort(lista), mergesort_iterativo(lista),\r\n# mergesort_three(lista), mergesort_opt(lista),\r\n# quicksort_iterativo(lista), quicksort(lista),\r\n# randomized_quicksort(lista)\r\n#\r\n# Creado por: Juan Reyna\r\n# email: 10-10883@usb.ve\r\n# version: 4.0\r\n\r\nimport random as rd\r\nimport sys\r\nimport math as mt\r\n\r\nsys.setrecursionlimit(1000000000 * 2)\r\n\r\n\r\ndef insertion_sort(A):\r\n \"\"\"Dado un arreglo de longitud n. Insertionsort toma uno por uno\r\n los elementos y se recorren hacia su posición con respecto a los\r\n anteriormente ordenados.\r\n\r\n Esta función devuelve el mismo arreglo ordenado\"\"\"\r\n\r\n m = A.index(min(A))\r\n n = len(A)\r\n A[0], A[m] = A[m], A[0]\r\n\r\n for i in range(1, n):\r\n j = i\r\n while A[j] < A[j - 1]:\r\n A[j], A[j - 1] = A[j - 1], A[j]\r\n j -= 1\r\n return A\r\n\r\n\r\ndef selection_sort(A):\r\n \"\"\"Dado un arreglo de longitud n. Selectionsort encuentra el menor\r\n de todos los elementos del arreglo e intercambiarlo con el que\r\n está en la primera posición. Luego el segundo mas pequeño,\r\n y así sucesivamente hasta ordenar todo el arreglo.\r\n\r\n Esta función devuelve el mismo arreglo ordenado\"\"\"\r\n\r\n n = len(A)\r\n for i in range(0, n):\r\n lowindex = i\r\n lowkey = A[i]\r\n for j in range(i + 1, n):\r\n if A[j] < lowkey:\r\n lowkey = A[j]\r\n lowindex = j\r\n A[i], A[lowindex] = A[lowindex], A[i]\r\n return A\r\n\r\n\r\ndef shell_sort(A):\r\n\r\n n = len(A)\r\n incr = n // 2\r\n while incr > 0:\r\n for i in range(incr, n):\r\n j = i - incr\r\n while j > -1:\r\n if A[j] > A[j + incr]:\r\n A[j], A[j + incr] = A[j + incr], A[j]\r\n j = j - incr\r\n else:\r\n j = -1\r\n incr = incr // 2\r\n return A\r\n\r\n\r\ndef bubble_sort(A):\r\n \"\"\"Dado un arreglo de longitud n. Insertionsort Se recorre el\r\n arreglo intercambiando los elementos adjacentes que estén\r\n desordenados. Se recorre el arreglo tantas veces\r\n hasta que ya no haya cambios.\r\n\r\n Esta función devuelve el mismo arreglo ordenado\"\"\"\r\n\r\n n = len(A)\r\n for i in range(0, n):\r\n for j in range(n - 1, i, -1):\r\n if A[j - 1] > A[j]:\r\n A[j - 1], A[j] = A[j], A[j - 1]\r\n return A\r\n\r\n\r\ndef mergesort(A):\r\n if len(A) <= 2:\r\n TimSort(A)\r\n else:\r\n mitad = len(A) // 2\r\n vector1 = A[:mitad]\r\n vector2 = A[mitad:]\r\n mergesort(vector1)\r\n mergesort(vector2)\r\n A = merge(vector1, vector2, A)\r\n return A\r\n\r\n\r\ndef merge(U, V, T):\r\n i, j = 0, 0\r\n N = len(U)\r\n M = len(V)\r\n U.append(mt.inf) if type(U[0]) == int or type(U[0]) == float else U.append(2 * max(T))\r\n V.append(mt.inf) if type(V[0]) == int or type(V[0]) == float else V.append(2 * max(T))\r\n for k in range(0, M + N):\r\n if U[i] < V[j]:\r\n T[k] = U[i]\r\n i = i + 1\r\n else:\r\n T[k] = V[j]\r\n j = j + 1\r\n return T\r\n\r\n\r\ndef mergesort_iterativo(A):\r\n k = 1\r\n N = len(A)\r\n while k < N:\r\n a, b, c = 0, k, min(2 * k, N)\r\n z = [0] * N\r\n while b < N:\r\n p, q, r = a, b, a\r\n while p != b and q != c:\r\n if A[p] <= A[q]:\r\n z[r] = A[p]\r\n r, p = r + 1, p + 1\r\n else:\r\n z[r] = A[q]\r\n r, q = r + 1, q + 1\r\n while p != b:\r\n z[r] = A[p]\r\n r, p = r + 1, p + 1\r\n while q != c:\r\n z[r] = A[q]\r\n r, q = r + 1, q + 1\r\n r = a\r\n while r != c:\r\n A[r] = z[r]\r\n r += 1\r\n a, b, c = a + 2 * k, b + 2 * k, min(c + 2 * k, N)\r\n k *= 2\r\n\r\n return z\r\n\r\n\r\n# Algoritmo de ordenamiento Heapsort\r\ndef parent(i):\r\n # Retorna el índice del padre del elemento i\r\n return i // 2\r\n\r\n\r\ndef rightChild(derecha):\r\n # Retorna índice de hijo derecho del elemento en la posición i\r\n return 2 * derecha + 1\r\n\r\n\r\ndef leftChild(izquierda):\r\n # Retorna índice de hijo izquierdo del elemento en la posición i\r\n return 2 * izquierda\r\n\r\n\r\ndef maxHeapify(A, i, n): # borre n\r\n # Constrye el Heap que incluye el elemento en la posición i\r\n\r\n le = leftChild(i)\r\n r = rightChild(i)\r\n if le < n and A[le] > A[i]:\r\n # cambie n por i\r\n largest = le\r\n else:\r\n largest = i\r\n if r < n and A[r] > A[largest]:\r\n largest = r\r\n if largest != i:\r\n A[i], A[largest] = A[largest], A[i]\r\n maxHeapify(A, largest, n)\r\n\r\n\r\ndef buildMaxHeapify(A):\r\n # Construye en A un heap de tipo Max con los elementos de A\r\n n = len(A)\r\n for i in range(n // 2, -1, -1):\r\n maxHeapify(A, i, n)\r\n return A\r\n\r\n\r\ndef heapsort(A):\r\n # Implementa heapsort sobre el arreglo A\r\n buildMaxHeapify(A)\r\n n = len(A) - 1\r\n for i in range(n, -1, -1):\r\n A[0], A[i] = A[i], A[0]\r\n maxHeapify(A, 0, i)\r\n return A\r\n\r\n\r\ndef sort_three(A):\r\n for i in range(0, len(A)):\r\n for j in range(i + 1, len(A)):\r\n if A[i] > A[j]:\r\n A[i], A[j] = A[j], A[i]\r\n else:\r\n pass\r\n return A\r\n\r\n\r\ndef mergesort_three(A):\r\n if len(A) <= 3:\r\n sort_three(A)\r\n else:\r\n mitad = len(A) // 2\r\n vector1 = A[:mitad]\r\n vector2 = A[mitad:]\r\n mergesort(vector1)\r\n mergesort(vector2)\r\n A = merge(vector1, vector2, A)\r\n return A\r\n\r\n\r\ndef mergesort_opt(A):\r\n if len(A) <= 500:\r\n shell_sort(A)\r\n else:\r\n mitad = len(A) // 2\r\n vector1 = A[:mitad]\r\n vector2 = A[mitad:]\r\n mergesort(vector1)\r\n mergesort(vector2)\r\n A = merge(vector1, vector2, A)\r\n return A\r\n\r\n\r\ndef quicksort_iterativo(A):\r\n n, m, longitud = 0, 1, len(A)\r\n\r\n while m < longitud:\r\n n, m = n + 1, m * 2\r\n\r\n k, p, q = 0, 0, longitud\r\n x = [0] * (n)\r\n y = [0] * (n)\r\n\r\n while (k != 0) or (q - p >= 2):\r\n if (q - p <= 1):\r\n k = k - 1\r\n p = x[k]\r\n q = y[k]\r\n elif (q - p >= 2):\r\n mitad = (p + q) // 2\r\n z = A[mitad]\r\n r, w, b = p, p, q\r\n\r\n while w != b:\r\n\r\n if A[w] < z:\r\n A[r], A[w] = A[w], A[r]\r\n r, w = r + 1, w + 1\r\n elif A[w] == z:\r\n w = w + 1\r\n elif A[w] > z:\r\n b = b - 1\r\n A[b], A[w] = A[w], A[b]\r\n\r\n if r - p <= q - w:\r\n x[k] = w\r\n y[k] = q\r\n q = r\r\n elif q - w < r - p:\r\n\r\n x[k] = p\r\n y[k] = r\r\n p = w\r\n\r\n k = k + 1\r\n\r\n return A\r\n\r\n\r\ndef partition(A, p, r):\r\n x = A[r]\r\n i = p - 1\r\n for j in range(p, r):\r\n if A[j] <= x:\r\n i = i + 1\r\n A[i], A[j] = A[j], A[i]\r\n A[i + 1], A[r] = A[r], A[i + 1]\r\n return i + 1\r\n\r\n\r\ndef quicksort_aux(A, p, r):\r\n if p < r:\r\n q = partition(A, p, r)\r\n quicksort_aux(A, p, q - 1)\r\n quicksort_aux(A, q + 1, r)\r\n return A\r\n\r\n\r\ndef quicksort(A):\r\n return quicksort_aux(A, 0, len(A) - 1)\r\n\r\n\r\ndef RandomizedPartition(A, p, r):\r\n i = rd.randint(p, r)\r\n A[r], A[i] = A[i], A[r]\r\n return partition(A, p, r)\r\n\r\n\r\ndef RandomizedQuicksort(A, p, r):\r\n if p < r:\r\n q = RandomizedPartition(A, p, r)\r\n RandomizedQuicksort(A, p, q - 1)\r\n RandomizedQuicksort(A, q + 1, r)\r\n return A\r\n\r\n\r\ndef randomized_quicksort(A):\r\n return RandomizedQuicksort(A, 0, len(A) - 1)\r\n\r\n\r\n\r\nminrun = 32\r\n\r\n\r\ndef InsSort(arr, start, end):\r\n for i in range(start + 1, end + 1):\r\n elem = arr[i]\r\n j = i - 1\r\n while j >= start and elem < arr[j]:\r\n arr[j + 1] = arr[j]\r\n j -= 1\r\n arr[j + 1] = elem\r\n return arr\r\n\r\n\r\ndef merge1(arr, start, mid, end):\r\n if mid == end:\r\n return arr\r\n first = arr[start:mid + 1]\r\n last = arr[mid + 1:end + 1]\r\n len1 = mid - start + 1\r\n len2 = end - mid\r\n ind1 = 0\r\n ind2 = 0\r\n ind = start\r\n\r\n while ind1 < len1 and ind2 < len2:\r\n if first[ind1] < last[ind2]:\r\n arr[ind] = first[ind1]\r\n ind1 += 1\r\n else:\r\n arr[ind] = last[ind2]\r\n ind2 += 1\r\n ind += 1\r\n\r\n while ind1 < len1:\r\n arr[ind] = first[ind1]\r\n ind1 += 1\r\n ind += 1\r\n\r\n while ind2 < len2:\r\n arr[ind] = last[ind2]\r\n ind2 += 1\r\n ind += 1\r\n\r\n return arr\r\n\r\n\r\ndef TimSort(arr):\r\n n = len(arr)\r\n\r\n for start in range(0, n, minrun):\r\n end = min(start + minrun - 1, n - 1)\r\n arr = InsSort(arr, start, end)\r\n\r\n curr_size = minrun\r\n while curr_size < n:\r\n for start in range(0, n, curr_size * 2):\r\n mid = min(n - 1, start + curr_size - 1)\r\n end = min(n - 1, mid + curr_size)\r\n arr = merge1(arr, start, mid, end)\r\n curr_size *= 2\r\n return arr\r\n\r\n\r\nif __name__ == '__main__':\r\n# lista = [rd.randint(0, 2000) for i in range(0, 20)]\r\n lista = [1,4,3,10,2,7,8,9,14,16]\r\n print(heapsort(lista))\r\n","sub_path":"Laboratorio 4 semana 5 evaluado/sortlib.py","file_name":"sortlib.py","file_ext":"py","file_size_in_byte":9710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"498025478","text":"def insectos():\n print(\"Bienvenido al programa que registra los insectos que recolectas\")\n dias=1\n insec=int(input(\"Cuantos insectos atrapaste hoy? \"))\n while insec<=30:\n if insec >=30:\n print(\"despues de\", dias, \"dia(s) de recoleccion, llevas\", insec, \"insectos\")\n suma= insec-30\n print(\"Te has pasado con\", suma, \"insectos\")\n print(\"Felicidades has llegado a la meta \")\n break\n elif insec <30:\n print(\"despues de\",dias ,\"dia(s) de recoleccion, llevas\", insec, \"insectos\")\n dias+=1\n falta=(insec-30)*-1\n print(\"Te hacen falta recolectar\", falta)\n insec+=int(input(\"Cuantos insectos atrapaste hoy? \"))\n\n\ndef capturarDatos():\n print((\"Bienvenido al programa que encuentra el mayor\"))\n lista=[]\n dato = int(input(\"Teclea un numero [-1 para salir]: \"))\n if dato==-1:\n print(\"No hay valor mayor\")\n else:\n while dato!=-1:\n lista.append(dato)\n dato=int(input(\"Teclea un numero [-1 para salir]: \"))\n print(\"El mayor es: \",max(lista))\n\n\ndef main():\n print(\"1. Recolectar insectos \")\n print(\"2. Encontrar el mayor\")\n print(\"3. Salir\")\n numero=int(input(\"Dar un numero del 1 al 3 :\"))\n while numero!=3:\n if numero==1:\n insectos()\n if numero==2:\n capturarDatos()\n print(\"1. Recolectar insectos \")\n print(\"2. Encontrar el mayor\")\n print(\"3. Salir\")\n numero = int(input(\"Dar un numero del 1 al 3 :\"))\nmain()\n\n\n","sub_path":"le menu dos.py","file_name":"le menu dos.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"340515395","text":"#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\n\"\"\"Quartic reaction coordinate\n \n This module contains a set of functions, which allow to perform an optimization of the coefficients\n in the polinomial expansion of quartic reaction coordinate (QRC) model (a*x**4 + b*x**3 + c*x**2),\n taking data from a file containing the reaction coordinate and energy in each point of an IRC calculation\n and performs a coordinate transformation from IRC to QRC.\n \n Example\n -------\n You must have a file with the IRC and energy values of each point. From terminal execute:\n \n run_quartic_coord_transf energy.dat\n\n The result will be a file called quartic.dat with two sections: The first one is a summary of the main r\n esults from the new QRC and the second one is the new coordinate in each point of IRC.\n \n \"\"\"\n\nfrom quartic_reaction_coordiante import *\nfrom sympy import *\nimport sys\n\nirc_file = sys.argv[1]\n\n\ndef max_points(irc_data):\n \"\"\"Save the total number of points in IRC \"\"\"\n file = open(irc_data, \"r\")\n max_points_irc = (int(file.readlines()[-1].split()[0]))\n return max_points_irc\n\n\ndef energy(irc_data):\n \"\"\"Save the energy in each points of IRC \"\"\"\n file = open(irc_data, \"r\")\n energies = []\n for line in file:\n if \"Reaction coord Energy\" in line:\n for _ in list(range(max_points(irc_data))):\n energy_k = (float(next(file).split()[2]))\n energies.append(energy_k)\n return energies\n\n\ndef coord_irc(irc_data):\n print('Analyzing each point of IRC ...')\n \"\"\"Save the coordinate value of each points of IRC \"\"\"\n file = open(irc_data, \"r\")\n coordinate = []\n coor_zero_to_one = []\n for line in file:\n if \"Reaction coord Energy\" in line:\n for _ in list(range(max_points(irc_data))):\n coord_k = (float(next(file).split()[1]))\n coordinate.append(coord_k)\n for coord in coordinate:\n a_coord = round((coord - coordinate[0]) /\n (coordinate[max_points(irc_data) - 1] - coordinate[0]), 4)\n coor_zero_to_one.append(a_coord)\n file.close()\n print('Done!')\n return coor_zero_to_one\n\n\ndef coord_transformation(irc_data):\n \"\"\"Perform a coordinate transformation from IRC to QRC \"\"\"\n file = open(irc_data, \"r\")\n output_file = (open(\"quartic.dat\", \"w\"))\n eact, erxn = [float(max(energy(irc_data))),\n float(file.readlines()[-1].split()[2])]\n a, b, c = quarticrxn(eact, erxn, \"yes\", output_file)\n print('Transforming coordinate from IRC to QRC ...')\n x = Symbol('x')\n sb = Matrix([0.1])\n x_transformed = []\n iteration = 0\n for i in energy(irc_data):\n f = a * x ** 4 + b * x ** 3 + c * x ** 2 - i\n m = Matrix([f])\n j_i = (m.jacobian([x])) ** (-1)\n if i == 0:\n s0 = sb\n while m.subs([(x, s0[0])]).norm() > 5e-8:\n s0 = s0 - (m.subs([(x, s0[0])]) * j_i.subs([(x, s0[0])]))\n iteration = iteration + 1\n # print(s0[0])\n x_transformed.append(round(s0[0], 4))\n sb = Matrix([s0[0] + 1 / (max_points(irc_data) + 1)])\n elif i == erxn:\n while m.subs([(x, sb[0])]).norm() > 5e-10:\n sb = sb - (m.subs([(x, sb[0])]) * j_i.subs([(x, sb[0])]))\n iteration = iteration + 1\n # print(sb[0])\n x_transformed.append(round(sb[0], 4))\n sb = Matrix([0.9999])\n else:\n while m.subs([(x, sb[0])]).norm() > 5e-10:\n sb = sb - (m.subs([(x, sb[0])]) * j_i.subs([(x, sb[0])]))\n iteration = iteration + 1\n # print(sb[0])\n x_transformed.append(round(sb[0], 4))\n if sb[0] > 0.99:\n sb = Matrix([0.999])\n else:\n sb = Matrix([sb[0] + 1 / (max_points(irc_data + 1))])\n print('Done!')\n output_file.close()\n return x_transformed\n\n\ndef save_data(irc_data):\n \"\"\"Save data of QRC analysis and the coordinate transformation \"\"\"\n output_file = (open(\"quartic.dat\", \"a\"))\n irc = coord_irc(irc_data)\n qrc = coord_transformation(irc_data)\n energy_of_each_point = energy(irc_data)\n print('Saving data ...')\n quartic_plot(energy_of_each_point, irc, qrc, \"plot\", \"save\")\n output_file.write('\\n=====================================================\\n\\n')\n output_file.write('\\n Coordinate transformation \\n')\n output_file.write(' from IRC to QRC \\n\\n')\n output_file.write('\\n=====================================================\\n\\n')\n output_file.write(\"\\t IRC\\t\\t QRC\\t\\t Energy\\n\")\n output_file.write(\"\\t\\t\\t \\t\\t(kcal/mol)\\n\")\n output_file.write('=====================================================\\n')\n for point in list(range(max_points(irc_file))):\n output_file.write(\"\\t\" + str(irc[point]) + \"\\t\\t\" + str(qrc[point]) + \"\\t\\t\" +\n str(energy_of_each_point[point]) + \"\\n\")\n print('Done!')\n output_file.write('\\n==========================END========================\\n')\n output_file.close()\n\n\nsave_data(irc_file)\n","sub_path":"run_quartic_coord_transf.py","file_name":"run_quartic_coord_transf.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"135365034","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# autores: Gabriel de Souza Barreto, Guilherme Bastos de Oliveira\n# objetivo: implementar singleton para realizar tarefas do servidor\n# última modificação: 04/06/2018\n\nfrom utils import *\nimport socket, struct, time\n\nclass Servidor:\n def __init__(self, id, end_ip, porta):\n self.id = id\n self.end_ip = end_ip\n self.porta = porta\n self.clones = []\n self.socket_escuta = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n self.criaListaServidores()\n self.configuraSocket()\n\n # configuração do socket para escutar multicast e do socket para enviar mensagem\n def configuraSocket(self):\n self.socket_escuta.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket_escuta.bind(('', self.porta))\n mreq = struct.pack('4sl', socket.inet_aton(self.end_ip), socket.INADDR_ANY)\n self.socket_escuta.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n self.socket_envia = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n # função alvo da thread de verificação\n def statusContinuo(self, paraThread):\n segundos_passados = time.time()\n self.enviaInicial()\n while not paraThread():\n for serv in self.clones:\n serv.checa()\n if time.time() > segundos_passados + T_HEARTBEAT:\n self.enviaStatus('O')\n segundos_passados = time.time()\n\n # cria lista (tabela) de clones\n def criaListaServidores(self):\n for i in range(N):\n if i != self.id:\n self.clones.append(Clone(i, self.id))\n\n # define se é líder\n def lider(self):\n for servidor in self.clones[:self.id]:\n if servidor.status == 'O':\n safePrint(\"[R] - não sou o líder\")\n return False\n safePrint(\"[R] - sou o líder\")\n return True\n\n # métodos de processamento de mensagens\n def processaStatus(self, dados):\n tipo, origem, valor = struct.unpack('cic', dados)\n if origem != self.id:\n safePrint(\"[S] - recebi status \" + str(valor) + \" de \" + str(origem))\n offset = 0 if origem < self.id else 1\n self.clones[origem-offset].atualizaStatus(valor)\n\n def processaInicial(self, dados):\n tipo, origem = struct.unpack('ci', dados)\n if origem != self.id:\n safePrint(\"[I] - recebi I de \" + str(origem))\n offset = 0 if origem < self.id else 1\n self.clones[origem-offset].atualizaStatus('O')\n self.enviaStatus('O')\n\n def processaRequisicao(self, end_cliente, dados):\n safePrint(\"[R] - recebi requisição de \" + end_cliente[0])\n if self.lider():\n tipo, tam = struct.unpack('ci', dados[:8])\n expressao = struct.unpack('{}s'.format(tam), dados[8:])[0]\n # evita processamento de expressões com letras\n if not any(c.isalpha() for c in expressao): \n try:\n self.enviaResposta(end_cliente, eval(expressao))\n # tratamento dos erros\n except ZeroDivisionError:\n self.enviaRespostaErro(end_cliente, ERRO_DIVZERO)\n except SyntaxError:\n self.enviaRespostaErro(end_cliente, ERRO_SINTAXE)\n except:\n self.enviaRespostaErro(end_cliente, ERRO_DESCONHECIDO)\n else:\n self.enviaRespostaErro(end_cliente, ERRO_LETRA)\n\n # métodos para o envio de mensagens\n def enviaInicial(self):\n safePrint(\"[I] - enviando I\")\n dados = struct.pack('ci', \"I\", self.id)\n self.socket_envia.sendto(dados, (self.end_ip, self.porta))\n\n def enviaStatus(self, status):\n safePrint(\"[S] - enviando status \" + status)\n dados = struct.pack('cic', \"S\", self.id, status)\n self.socket_envia.sendto(dados, (self.end_ip, self.porta))\n\n def enviaResposta(self, end_cliente, resposta):\n dados = struct.pack('ccf', 'A', SUCESSO, resposta)\n self.socket_envia.sendto(dados, end_cliente)\n safePrint(\"[A] - respondendo requisição de \" + end_cliente[0])\n\n def enviaRespostaErro(self, end_cliente, erro):\n safePrint(\"[A] - respondendo requisição de \" + end_cliente[0])\n dados = struct.pack('ccf', 'A', erro, 0)\n self.socket_envia.sendto(dados, end_cliente)\n","sub_path":"utils/Servidor.py","file_name":"Servidor.py","file_ext":"py","file_size_in_byte":4432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"620048146","text":"import streamlit as st\nimport pandas as pd\n\nfrom CarbonSafeFunctions import main_body_divider\n\nclass default_values: # Value, Min, Max\n\tdf = pd.DataFrame([[0.0001, 0.0, 5.0],\n\t\t\t\t\t\t[1.0, 0.0, 10.0],\n\t\t\t\t\t\t[50.0, 1.0, 80.0],\n\t\t\t\t\t\t[3.5, 1.0, 20.0],\n\t\t\t\t\t\t[10.0, 1.0, 50.0],\n\t\t\t\t\t\t[1500.0, 1.0, 10000.0],\n\t\t\t\t\t\t[100.0, 20.0, 100.0],\n\t\t\t\t\t\t[20.0, 1.0, 60.0],\n\t\t\t\t\t\t[6.0, 0.0, 20.0],\n\t\t\t\t\t\t[1.5, 0.0, 10.0],\n\t\t\t\t\t\t[45.0, 0.0, 80.0]],\n\t\t\t\t\t\tcolumns = ['Values', 'Min', 'Max'],\n\t\t\t\t\t\tindex = ['Underwrite Insurance Fee (%CAPEX)', 'Basic Industrial Insurance Annual Premium (%CAPEX)', 'Industrial Insurance Complexity & Contingency Factor (%)',\n\t\t\t\t\t\t\t\t'PISC Fund Return (%/yr)', 'PISC Trust Fund Contingency (%)', 'Long Term Liabiliy Maximum Possible Loss ($MM)',\n\t\t\t\t\t\t\t\t'Share of Possible Loss Carried by Project (%)', 'Long Term Liability Contingency Factor (%)', 'LTL Trust Fund Returns (%/yr)', 'LTL Inflation Expectation (%/yr)',\n\t\t\t\t\t\t\t\t'Proability Weight (% Chance of Event)'])\nvalues = default_values()\nraw_values = values.df.copy()\nindexes = values.df.index\nvalue_column = values.df.columns[0]\n\n\ndef set_values(reset_message):\n\tglobal value\n\tglobal raw_values\n\tglobal value_column\n\n\tinside = 0\n\n\tif st.checkbox('Insurance, PISC, & LTL Parameters:'):\n\t\tinside = 1\n\t\tfor name in values.df.index:\n\t\t\tvalues.df.loc[name, value_column] = st.slider(name, values.df.loc[name, 'Min'], values.df.loc[name, 'Max'], values.df.loc[name, value_column], values.df.loc[name, value_column] / 100)\n\t\t\tst.write('')\n\t\tif st.button(\"*Restet to default\"):\n\t\t\tvalues.df = raw_values\n\t\t\tst.success(reset_message)\n\t\tmain_body_divider()\n\treturn inside\n\n\nclass InsurancePiscLtl:\n\tdef __init__(self, reset_message):\n\t\tglobal value\n\t\tglobal indexes\n\t\tglobal value_column\n\n\t\tself.inside = set_values(reset_message)\n\n\t\tself.data = values.df[value_column]\n\t\tself.underwrite = round(values.df.loc[indexes[0], value_column] / 100, 3)\n\t\tself.basic_premium = values.df.loc[indexes[1], value_column]\n\t\tself.insurance_cont = values.df.loc[indexes[2], value_column]\n\t\tself.adjusted_premium = ((self.basic_premium / 100) * (1 + (self.insurance_cont / 100)))\n\t\tself.PISC_return = values.df.loc[indexes[3], value_column] / 100\n\t\tself.PISC_cont = values.df.loc[indexes[4], value_column] / 100\n\t\tself.max_loss = values.df.loc[indexes[5], value_column]\n\t\tself.share_loss = values.df.loc[indexes[6], value_column] / 100\n\t\tself.LTL_cont = values.df.loc[indexes[7], value_column] / 100\n\t\tself.LTL_return = values.df.loc[indexes[8], value_column] / 100\n\t\tself.LTL_inflation = values.df.loc[indexes[9], value_column] / 100\n\t\tself.chance_event = values.df.loc[indexes[10], value_column] / 100\n\t\tself.max_loss_res = self.max_loss * self.share_loss\n\n\n\n\n\n\n\n\n","sub_path":"InsurancePiscLtlParameters.py","file_name":"InsurancePiscLtlParameters.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"538625487","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.utils import shuffle\ndata = pd.read_csv('drebinAndroidDataset.csv', sep=',', error_bad_lines=False, index_col=False, dtype='unicode')\nmalware = {'S' : 0, 'B' : 1}\n#df = pd.DataFrame(data)\n#df['class'] = df['class'].replace( '[\\S,)]','', regex=True )\ndata = data.replace([\"S\",\"B\"],[1,0]) \ndata = shuffle(data)\nprint(data)\n#data.replace(to_replace ='B',value ='1')\n#df['class'] = df['class'].astype(int)\nX = data.iloc[:,0:215] #independent columns\ny = data.iloc[:,215] #target column i.e price range\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, Y_train, Y_test = train_test_split(X,y,test_size=0.3,random_state=0)\n\nprint('X_train- ',X_train.shape)\nprint(X_train)\nprint('Y_train- ',Y_train.shape)\nprint(Y_train)\nprint('X_test- ',X_test.shape)\nprint(X_test)\nprint('Y_test- ',Y_test.shape)\nprint(Y_test)\nfrom sklearn.ensemble import ExtraTreesClassifier\nmodel = ExtraTreesClassifier()\nmodel.fit(X_train,Y_train)\nfeatures = model.feature_importances_\nfeature_dict = {}\nfor i in range(features.size):\n\tfeature_dict[i] = features[i]\n\n#for i in sorted (feature_dict.keys()) : \n# print(i, end = \" \") \n#print(sorted(feature_dict.items(), key = lambda kv:(kv[1], kv[0]))) \n\nfinal_keys = []\nfor keys in sorted(feature_dict.items(), key = lambda kv:(kv[1], kv[0])): \n\tfinal_keys.append(keys[0])\n\tprint(keys) \n\nprint(final_keys)\nx = 200\nselected_keys = []\nfor i in range(15):\n\tselected_keys.append(final_keys[x])\n\tx = x+1\n\nselected_keys.sort()\nprint(selected_keys)\n\nf = open(\"rf.txt\", \"a\")\nfor i in range(len(selected_keys)):\n\tf.write(str(selected_keys[i]))\n\tf.write(\"\\n\")\nf.close()\n\n\n#feature_dict.sort(key=sortSecond)\n#features.sort(reverse=True)\n\n#print(feature_dict) \n","sub_path":"code/rf.py","file_name":"rf.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"44556267","text":"# name: OilTank.py\n# Version: rev. 1\n# Date: 28/04/2018\n# Author: Brian Hickey, x17126622 with help from below\n# Reference: https://code-maven.com/slides/python-programming/sqlite-insert\n# https//stackoverflow.com-Why the need to commit explicitly when doing an UPDATE?\n# Class work and presentations e.g. SQLite.py exercise.\n# https://docs.python.org/2/library/datetime.html\n# This python file is very much a mis-match of different examples of using different operations\n# and adapting them to the code needed for the project.\n# Learning and using datetime is an example of this\n# Email code (def emailalarm, http://stackabuse.com/how-to-send-emails-with-gmail-using-python/\n#\n\nfrom grovepi import *\n\nimport datetime\nimport time\nimport sqlite3\nimport smtplib\nfrom grove_rgb_lcd import *\n\n\ndef emailalarm(): #email level alarm is activated. Disabled after email is sent.\n gmail_user = 'user'\n gmail_password = 'passwd'\n\n sent_from = gmail_user\n to = ['receive email address']\n subject = 'The Oil Tank is below 20 Litres'\n body = 'Get oil!'\n #This code was sourced online. See references\n email_text = \"\"\"\\ \n From: %s\n To: %s\n Subject: %s\n\n %s\n \"\"\" % (sent_from, \", \".join(to), subject, body)\n\n try: #Try to send and if good print email sent, else print something went wrong\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.ehlo()\n server.login(gmail_user, gmail_password)\n server.sendmail(sent_from, to, email_text)\n server.close()\n\n print ('Email sent!')\n \n except:\n print ('Something went wrong...')\n \n\n\n\nSensorEnable = True #SensorEnable set to true. Coded this way to allow for enable/disable function if needed\nvalue = 0 #Initiate the value variable used in the readsensor function\nsensor = [] #Initialize array for the sensor readings\nlowLevelAlarm = False #Initialize the lowlevel alarm bit\nLowLevelCount = 0 #Initialize the lowlevel count for the low level alarm\nLowLevel = 20 #initialize the low level value in litres\nultrasonic_ranger = 4 #Set the input port for the ultrasonic sensor\nrelay = 2\t\t #Port for relay\nbutton = 3\t\t #Port for Button\naverage = 0 #Set the average value to 0. Used in getting average sensor reading\nemailEnable = True # Set email enable to true at startup\nRedLED = 8 #Port for RED LED\npinMode(relay,\"OUTPUT\")\t# Assign mode for buzzer as output #Set port for the relay as an output\npinMode(button,\"INPUT\")\t\t# Assign mode for Button as input #set the port for the button as input\n\n\n# readsensor function\ndef readsensor(value):\n\n for x in range(5):\n total = 0 # reset the current total\n SensorReading = ultrasonicRead(ultrasonic_ranger) # current reading\n MaxSensor = 70.0 # maximum value in range from sensor\n MinSensor = 2.0 # minimum value in range from sensor\n minOffset = MaxSensor - MinSensor # calculate the span of the range\n litreRange = 900.0 # The range of the tank in litres\n tempvalue = (((SensorReading - minOffset) - MinSensor) * -1) # convert value max to min to min to max\n multiplier = litreRange / MaxSensor # calculate multiplier (slope)\n litres = tempvalue * multiplier # convert to litres\n total = total + litres # get 5 results for average\n return total\n\n\n\n\n\n# Use a while loop to constantly read the current time\nwhile SensorEnable:\n \n button_status= digitalRead(button)\t#Read the Button status\t\n\t\t\n if button_status:\n \n setRGB(0,255,0) #set display background\n buf=[\"Current level: \\n\", str(average), \" Litres\"] #write the current value to the display. Only done after first reading from sensor\n setText(\"\".join(buf))\n time.sleep(10) #wait 10secs and turn off display\n setRGB(0,0,0)\n buf=[\"Press to check level\"] #This text is shown but without backlight\n setText(\"\".join(buf))\n # If a low level alarm occurs, only allow heat on between 18:00 and 22:00 \n if lowLevelAlarm and datetime.datetime.now().hour>18 and datetime.datetime.now().hour<22:\n digitalWrite(relay,1)\n else:\n digitalWrite(relay,0)\n \n if lowLevelAlarm:\n digitalWrite(RedLED,1)\n else:\n digitalWrite(RedLED,0)\n \n \n # Check if the time is on quarter hour. \n while ((datetime.datetime.now().minute == 00) or (datetime.datetime.now().minute == 15) or\n (datetime.datetime.now().minute == 30) or (datetime.datetime.now().minute == 45)):\n # get the Sensor reading\n total = readsensor(value)\n dt = datetime.datetime.now()\n date = dt.strftime(\"%y-%m-%d\")\n timenow = dt.strftime(\"%H:%M\") \n minute = dt.minute\n sensor.append(total)\n\n if len(sensor) > 4: #if 5 readings\n print (timenow) #for testing: print timenow\n print (sum(sensor) / len(sensor)) #print measured value\n average = sum(sensor) / len(sensor) #compute average of readings\n connection = sqlite3.connect(\"TankLevel.db\") #connect to database(sqlite)\n cursor = connection.cursor() #Initialize cursor\n cursor.execute('''INSERT INTO level (Date, Time, Level) VALUES (?, ?, ?)''', (date, timenow, average,)) #Put the date, timenow and average reading into the database\n if average < LowLevel : #If read value is lower than low value\n LowLevelCount += 1 #add one to lowlevelcount\n if LowLevelCount >= 3: #if lowlevel count is greater or equal to 3, enable lowlevel alarm\n lowLevelAlarm = True #Low level alarm is true\n if lowLevelAlarm and emailEnable: #if lowlevel alarm and email has not already been sent, call email function\n emailalarm() #call email function\n emailEnable = False #Email has been sent, disable email\n print (lowLevelAlarm) #for testing print status of lowlevel alarm\n print (LowLevelCount) #print lowlevel count\n print (emailEnable) #print if email enable is true\n connection.commit() #commit changes to the database\n cursor.close() #close the cursor for the database\n connection.close() #close the connection to the database\n sensor = [] #clear the sensor array values\n time.sleep(60) #wait 60 seconds. this is to wait for the time to update by one 1 minute\n \n\n \n \n\n \n \n\n","sub_path":"OilTankMonitorRev1.py","file_name":"OilTankMonitorRev1.py","file_ext":"py","file_size_in_byte":7617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"371539973","text":"# Adapted from https://github.com/inmcm/Simon_Speck_Ciphers/blob/master/Python/SimonSpeckCiphers/simon/simon.py\n\nfrom __future__ import print_function\nfrom collections import deque\n\n__author__ = 'inmcm'\n\ntable = [bin(i).count('1') for i in range(256)]\n\n\ndef ones(n):\n w = 0\n while n:\n w += table[n & 255]\n n >>= 8\n return w\n\n\nclass SimonCipher(object):\n \"\"\"Simon Block Cipher Object\"\"\"\n\n # Z Arrays (stored bit reversed for easier usage)\n z0 = 0b01100111000011010100100010111110110011100001101010010001011111\n z1 = 0b01011010000110010011111011100010101101000011001001111101110001\n z2 = 0b11001101101001111110001000010100011001001011000000111011110101\n z3 = 0b11110000101100111001010001001000000111101001100011010111011011\n z4 = 0b11110111001001010011000011101000000100011011010110011110001011\n\n # valid cipher configurations stored:\n # block_size:{key_size:(number_rounds,z sequence)}\n __valid_setups = {32: {64: (32, z0)},\n 48: {72: (36, z0), 96: (36, z1)},\n 64: {96: (42, z2), 128: (44, z3)},\n 96: {96: (52, z2), 144: (54, z3)},\n 128: {128: (68, z2), 192: (69, z3), 256: (72, z4)}}\n\n __valid_modes = ['ECB', 'CTR', 'CBC', 'PCBC', 'CFB', 'OFB']\n\n def __init__(self, key, key_size=128, block_size=128, mode='ECB', init=0, counter=0):\n \"\"\"\n Initialize an instance of the Simon block cipher.\n :param key: Int representation of the encryption key\n :param key_size: Int representing the encryption key in bits\n :param block_size: Int representing the block size in bits\n :param mode: String representing which cipher block mode the object should initialize with\n :param init: IV for CTR, CBC, PCBC, CFB, and OFB modes\n :param counter: Initial Counter value for CTR mode\n :return: None\n \"\"\"\n print(bin(key))\n\n # Setup block/word size\n try:\n self.possible_setups = self.__valid_setups[block_size]\n self.block_size = block_size\n self.word_size = self.block_size >> 1\n except KeyError:\n print('Invalid block size!')\n print('Please use one of the following block sizes:',\n [x for x in self.__valid_setups.keys()])\n raise\n\n # Setup Number of Rounds, Z Sequence, and Key Size\n try:\n self.rounds, self.zseq = self.possible_setups[key_size]\n self.key_size = key_size\n except KeyError:\n print('Invalid key size for selected block size!!')\n print('Please use one of the following key sizes:',\n [x for x in self.possible_setups.keys()])\n raise\n\n # Create Properly Sized bit mask for truncating addition and left shift outputs\n self.mod_mask = (2 ** self.word_size) - 1\n\n # Parse the given iv and truncate it to the block length\n try:\n self.iv = init & ((2 ** self.block_size) - 1)\n self.iv_upper = self.iv >> self.word_size\n self.iv_lower = self.iv & self.mod_mask\n except (ValueError, TypeError):\n print('Invalid IV Value!')\n print('Please Provide IV as int')\n raise\n\n # Parse the given Counter and truncate it to the block length\n try:\n self.counter = counter & ((2 ** self.block_size) - 1)\n except (ValueError, TypeError):\n print('Invalid Counter Value!')\n print('Please Provide Counter as int')\n raise\n\n # Check Cipher Mode\n try:\n position = self.__valid_modes.index(mode)\n self.mode = self.__valid_modes[position]\n except ValueError:\n print('Invalid cipher mode!')\n print('Please use one of the following block cipher modes:',\n self.__valid_modes)\n raise\n\n # Parse the given key and truncate it to the key length\n try:\n self.key = key & ((2 ** self.key_size) - 1)\n except (ValueError, TypeError):\n print('Invalid Key Value!')\n print('Please Provide Key as int')\n raise\n\n # Pre-compile key schedule\n m = self.key_size // self.word_size\n self.key_schedule = []\n\n # Create list of subwords from encryption key\n k_init = [((self.key >> (self.word_size * ((m-1) - x)))\n & self.mod_mask) for x in range(m)]\n\n k_reg = deque(k_init) # Use queue to manage key subwords\n\n round_constant = self.mod_mask ^ 3 # Round Constant is 0xFFFF..FC\n\n # Generate all round keys\n for x in range(self.rounds):\n\n rs_3 = ((k_reg[0] << (self.word_size - 3)) +\n (k_reg[0] >> 3)) & self.mod_mask\n\n if m == 4:\n rs_3 = rs_3 ^ k_reg[2]\n\n rs_1 = ((rs_3 << (self.word_size - 1)) +\n (rs_3 >> 1)) & self.mod_mask\n\n c_z = ((self.zseq >> (x % 62)) & 1) ^ round_constant\n\n new_k = c_z ^ rs_1 ^ rs_3 ^ k_reg[m - 1]\n\n self.key_schedule.append(k_reg.pop())\n k_reg.appendleft(new_k)\n print(bin(self.key_schedule[0]))\n print(bin(self.key_schedule[1]))\n\n def encrypt_round(self, x, y, k):\n \"\"\"\n Complete One Feistel Round\n :param x: Upper bits of current plaintext\n :param y: Lower bits of current plaintext\n :param k: Round Key\n :return: Upper and Lower ciphertext segments\n \"\"\"\n\n # Generate all circular shifts\n ls_1_x = ((x >> (self.word_size - 1)) + (x << 1)) & self.mod_mask\n ls_8_x = ((x >> (self.word_size - 8)) + (x << 8)) & self.mod_mask\n ls_2_x = ((x >> (self.word_size - 2)) + (x << 2)) & self.mod_mask\n\n # XOR Chain\n xor_1 = (ls_1_x & ls_8_x) ^ y\n xor_2 = xor_1 ^ ls_2_x\n new_x = k ^ xor_2\n\n return new_x, x\n\n def decrypt_round(self, x, y, k):\n \"\"\"Complete One Inverse Feistel Round\n :param x: Upper bits of current ciphertext\n :param y: Lower bits of current ciphertext\n :param k: Round Key\n :return: Upper and Lower plaintext segments\n \"\"\"\n\n # Generate all circular shifts\n ls_1_y = ((y >> (self.word_size - 1)) + (y << 1)) & self.mod_mask\n ls_8_y = ((y >> (self.word_size - 8)) + (y << 8)) & self.mod_mask\n ls_2_y = ((y >> (self.word_size - 2)) + (y << 2)) & self.mod_mask\n\n # Inverse XOR Chain\n xor_1 = k ^ x\n xor_2 = xor_1 ^ ls_2_y\n new_x = (ls_1_y & ls_8_y) ^ xor_2\n\n return y, new_x\n\n def encrypt(self, plaintext):\n \"\"\"\n Process new plaintext into ciphertext based on current cipher object setup\n :param plaintext: Int representing value to encrypt\n :return: Int representing encrypted value\n \"\"\"\n try:\n b = (plaintext >> self.word_size) & self.mod_mask\n a = plaintext & self.mod_mask\n except TypeError:\n print('Invalid plaintext!')\n print('Please provide plaintext as int')\n raise\n\n self.leak = 0\n\n if self.mode == 'ECB':\n b, a = self.encrypt_function(b, a)\n\n elif self.mode == 'CTR':\n true_counter = self.iv + self.counter\n d = (true_counter >> self.word_size) & self.mod_mask\n c = true_counter & self.mod_mask\n d, c = self.encrypt_function(d, c)\n b ^= d\n a ^= c\n self.counter += 1\n\n elif self.mode == 'CBC':\n b ^= self.iv_upper\n a ^= self.iv_lower\n b, a = self.encrypt_function(b, a)\n\n self.iv_upper = b\n self.iv_lower = a\n self.iv = (b << self.word_size) + a\n\n elif self.mode == 'PCBC':\n f, e = b, a\n b ^= self.iv_upper\n a ^= self.iv_lower\n b, a = self.encrypt_function(b, a)\n self.iv_upper = b ^ f\n self.iv_lower = a ^ e\n self.iv = (self.iv_upper << self.word_size) + self.iv_lower\n\n elif self.mode == 'CFB':\n d = self.iv_upper\n c = self.iv_lower\n d, c = self.encrypt_function(d, c)\n b ^= d\n a ^= c\n\n self.iv_upper = b\n self.iv_lower = a\n self.iv = (b << self.word_size) + a\n\n elif self.mode == 'OFB':\n d = self.iv_upper\n c = self.iv_lower\n d, c = self.encrypt_function(d, c)\n self.iv_upper = d\n self.iv_lower = c\n self.iv = (d << self.word_size) + c\n\n b ^= d\n a ^= c\n\n ciphertext = (b << self.word_size) + a\n\n return ciphertext, self.leak\n\n def decrypt(self, ciphertext):\n \"\"\"\n Process new ciphertest into plaintext based on current cipher object setup\n :param ciphertext: Int representing value to encrypt\n :return: Int representing decrypted value\n \"\"\"\n try:\n b = (ciphertext >> self.word_size) & self.mod_mask\n a = ciphertext & self.mod_mask\n except TypeError:\n print('Invalid ciphertext!')\n print('Please provide ciphertext as int')\n raise\n\n if self.mode == 'ECB':\n a, b = self.decrypt_function(a, b)\n\n elif self.mode == 'CTR':\n true_counter = self.iv + self.counter\n d = (true_counter >> self.word_size) & self.mod_mask\n c = true_counter & self.mod_mask\n d, c = self.encrypt_function(d, c)\n b ^= d\n a ^= c\n self.counter += 1\n\n elif self.mode == 'CBC':\n f, e = b, a\n a, b = self.decrypt_function(a, b)\n b ^= self.iv_upper\n a ^= self.iv_lower\n\n self.iv_upper = f\n self.iv_lower = e\n self.iv = (f << self.word_size) + e\n\n elif self.mode == 'PCBC':\n f, e = b, a\n a, b = self.decrypt_function(a, b)\n b ^= self.iv_upper\n a ^= self.iv_lower\n self.iv_upper = (b ^ f)\n self.iv_lower = (a ^ e)\n self.iv = (self.iv_upper << self.word_size) + self.iv_lower\n\n elif self.mode == 'CFB':\n d = self.iv_upper\n c = self.iv_lower\n self.iv_upper = b\n self.iv_lower = a\n self.iv = (b << self.word_size) + a\n d, c = self.encrypt_function(d, c)\n b ^= d\n a ^= c\n\n elif self.mode == 'OFB':\n d = self.iv_upper\n c = self.iv_lower\n d, c = self.encrypt_function(d, c)\n self.iv_upper = d\n self.iv_lower = c\n self.iv = (d << self.word_size) + c\n\n b ^= d\n a ^= c\n\n plaintext = (b << self.word_size) + a\n\n return plaintext\n\n def encrypt_function(self, upper_word, lower_word):\n \"\"\"\n Completes appropriate number of Simon Fiestel function to encrypt provided words\n Round number is based off of number of elements in key schedule\n upper_word: int of upper bytes of plaintext input \n limited by word size of currently configured cipher\n lower_word: int of lower bytes of plaintext input \n limited by word size of currently configured cipher\n x,y: int of Upper and Lower ciphertext words \n \"\"\"\n x = upper_word\n y = lower_word\n\n # Run Encryption Steps For Appropriate Number of Rounds\n for k in self.key_schedule:\n # Generate all circular shifts\n ls_1_x = ((x >> (self.word_size - 1)) + (x << 1)) & self.mod_mask\n ls_8_x = ((x >> (self.word_size - 8)) + (x << 8)) & self.mod_mask\n ls_2_x = ((x >> (self.word_size - 2)) + (x << 2)) & self.mod_mask\n\n # XOR Chain\n xor_1 = (ls_1_x & ls_8_x) ^ y\n xor_2 = xor_1 ^ ls_2_x\n y = x\n x = k ^ xor_2\n\n self.leak += ones(x) + ones(y)\n\n return x, y\n\n def decrypt_function(self, upper_word, lower_word):\n \"\"\"\n Completes appropriate number of Simon Fiestel function to decrypt provided words\n Round number is based off of number of elements in key schedule\n upper_word: int of upper bytes of ciphertext input \n limited by word size of currently configured cipher\n lower_word: int of lower bytes of ciphertext input \n limited by word size of currently configured cipher\n x,y: int of Upper and Lower plaintext words \n \"\"\"\n x = upper_word\n y = lower_word\n\n # Run Encryption Steps For Appropriate Number of Rounds\n for k in reversed(self.key_schedule):\n # Generate all circular shifts\n ls_1_x = ((x >> (self.word_size - 1)) + (x << 1)) & self.mod_mask\n ls_8_x = ((x >> (self.word_size - 8)) + (x << 8)) & self.mod_mask\n ls_2_x = ((x >> (self.word_size - 2)) + (x << 2)) & self.mod_mask\n\n # XOR Chain\n xor_1 = (ls_1_x & ls_8_x) ^ y\n xor_2 = xor_1 ^ ls_2_x\n y = x\n x = k ^ xor_2\n\n return x, y\n\n def update_iv(self, new_iv):\n if new_iv:\n try:\n self.iv = new_iv & ((2 ** self.block_size) - 1)\n self.iv_upper = self.iv >> self.word_size\n self.iv_lower = self.iv & self.mod_mask\n except TypeError:\n print('Invalid Initialization Vector!')\n print('Please provide IV as int')\n raise\n return self.iv\n\ndef increment_by_one(hex_input):\n return hex(int(hex_input, 16)+1)\n\ndef flip(value):\n if value == '1':\n return '0'\n return '1'\n\ndef flip_next_one(key, index):\n new_key = key[:index] + flip(key[index]) + key[index+1:]\n return new_key\n\ndef get_ones_offset(plaintext, ones, simon_cipher):\n return abs(simon_cipher.encrypt(plaintext)[1] - ones)\n\ndef get_closest_key(plaintext_to_ones, start_key = None):\n if start_key is None:\n start_binary = bin(0)[2:].zfill(128)\n closest = bin(0)[2:].zfill(128)\n closest_ones = None\n current_key = start_binary\n else:\n start_binary = start_key\n closest = start_key\n closest_ones = None\n current_key = start_binary\n\n #target = 128*68/2\n key_input_to_ones = {}\n for index in range(128):\n print(index, current_key)\n new_inputs = {}\n current_keys = (current_key, flip_next_one(current_key, index))\n key_to_offsets = {}\n closest_average_one = None\n closest_key = None\n for key in current_keys:\n w = SimonCipher(int(key,2), key_size=128, block_size=64)\n for plaintext in plaintext_to_ones:\n if (key, plaintext) in key_input_to_ones:\n current_ones = key_input_to_ones[(key, plaintext)]\n new_inputs[(key, plaintext)] = current_ones\n else:\n _, current_ones = w.encrypt(plaintext)\n new_inputs[(key, plaintext)] = current_ones\n key_to_offsets.setdefault(key, []).append(abs(plaintext_to_ones[plaintext] - current_ones))\n average_one = sum(key_to_offsets[key])/len(key_to_offsets[key])\n if closest_average_one is None or closest_average_one > average_one:\n closest_key = key\n closest_average_one = average_one\n\n key_input_to_ones = new_inputs\n current_key = closest_key\n return current_key\n\ndef parse_data_into_plaintext_to_ones(data):\n plaintext_to_ones = {}\n for plaintext, ones in data:\n plaintext_to_ones[plaintext] = ones\n return plaintext_to_ones\n\ndef verify_key(key, plaintext_to_ones):\n max_offset = 0\n min_offset = float(\"inf\")\n w = SimonCipher(int(key,2))\n all_ones = []\n wrong = 0\n for plaintext in plaintext_to_ones:\n _, current_ones = w.encrypt(plaintext)\n \n offset = abs(current_ones - plaintext_to_ones[plaintext])\n if offset != 0:\n wrong += 1\n all_ones.append(offset)\n if offset > max_offset:\n max_offset = offset\n if offset < min_offset:\n min_offset = offset\n average_ones = sum(all_ones)/len(all_ones)\n \n return max_offset, min_offset, average_ones, wrong/len(all_ones)\n\n\nif __name__ == \"__main__\":\n \n with open(\"./data_1.txt\", 'r') as data1:\n plaintext_to_ones1 = parse_data_into_plaintext_to_ones(eval(data1.read()))\n\n with open(\"./data_2.txt\", 'r') as data2:\n plaintext_to_ones2 = parse_data_into_plaintext_to_ones(eval(data2.read()))\n\n with open(\"./data_3.txt\", 'r') as data3:\n plaintext_to_ones3 = parse_data_into_plaintext_to_ones(eval(data3.read()))\n\n with open(\"./data_4.txt\", 'r') as data4:\n plaintext_to_ones4 = parse_data_into_plaintext_to_ones(eval(data4.read()))\n\n with open(\"./data_5.txt\", 'r') as data5:\n plaintext_to_ones5 = parse_data_into_plaintext_to_ones(eval(data5.read()))\n\n # with open(\"./data_6.txt\", 'r') as data6:\n # plaintext_to_ones6 = parse_data_into_plaintext_to_ones(eval(data6.read()))\n\n # with open(\"./data_7.txt\", 'r') as data7:\n # plaintext_to_ones7 = parse_data_into_plaintext_to_ones(eval(data7.read()))\n\n # with open(\"./data_8.txt\", 'r') as data8:\n # plaintext_to_ones8 = parse_data_into_plaintext_to_ones(eval(data8.read()))\n\n # with open(\"./data_9.txt\", 'r') as data9:\n # plaintext_to_ones9 = parse_data_into_plaintext_to_ones(eval(data9.read()))\n\n # #plaintext_to_ones = plaintext_to_ones1\n\n plaintext_to_ones = {**plaintext_to_ones1, **plaintext_to_ones2, \n **plaintext_to_ones3, **plaintext_to_ones4, \n **plaintext_to_ones5}#, **plaintext_to_ones6,\n # **plaintext_to_ones7, **plaintext_to_ones8,\n # **plaintext_to_ones9}\n \n #key = str(get_closest_key(plaintext_to_ones))#, start_key='010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'))\n\n # key = 00100000000000000000000000000000101000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000 # (396, 0, 74.1489)\n #key = '00100000000000000000000000000000001000000010000000000000000000000010000000000000000000000000000000000000000000000010000000000000'\n #print(key)\n \n\n # key = '010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n # key = '000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000'\n \n # this key is pretty close!\n # key = '01010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n # offsets - (max, min)\n # min key offsets - (3241, 2603)\n # max key offsets - (3205, 2680)\n # average key offsets - (3246, 2666)\n # average key = '010000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000'\n #print(verify_key(key, plaintext_to_ones))\n \n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"simon.py","file_name":"simon.py","file_ext":"py","file_size_in_byte":19475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"533613161","text":"__author__ = \"Nitin Kumar, Rick Sherman\"\n__credits__ = \"Jeremy Schulman\"\n\nimport unittest\nfrom nose.plugins.attrib import attr\n\nfrom jnpr.junos.device import Device\nfrom jnpr.junos.rpcmeta import _RpcMetaExec\n\nfrom mock import patch\nfrom lxml import etree\n\n\n@attr('unit')\nclass Test_RpcMetaExec(unittest.TestCase):\n\n def setUp(self):\n self.dev = Device(host='1.1.1.1')\n self.rpc = _RpcMetaExec(self.dev)\n\n def test_rpcmeta_constructor(self):\n self.assertTrue(isinstance(self.rpc._junos, Device))\n\n @patch('jnpr.junos.device.Device.execute')\n def test_rpcmeta_load_config(self, mock_execute_fn):\n root = etree.XML('test')\n self.rpc.load_config(root)\n self.assertEqual(mock_execute_fn.call_args[0][0].tag,\n 'load-configuration')\n\n @patch('jnpr.junos.device.Device.execute')\n def test_rpcmeta_load_config_with_configuration_tag(self, mock_execute_fn):\n root = etree.XML(\n 'test')\n self.rpc.load_config(root)\n self.assertEqual(mock_execute_fn.call_args[0][0].tag,\n 'load-configuration')\n\n @patch('jnpr.junos.device.Device.execute')\n def test_rpcmeta_load_config_option_action(self, mock_execute_fn):\n set_commands = \"\"\"\n set system host-name test_rpc\n set system domain-name test.juniper.net\n \"\"\"\n self.rpc.load_config(set_commands, action='set')\n self.assertEqual(mock_execute_fn.call_args[0][0].get('action'),\n 'set')\n\n @patch('jnpr.junos.device.Device.execute')\n def test_rpcmeta_option_format(self, mock_execute_fn):\n set_commands = \"\"\"\n set system host-name test_rpc\n set system domain-name test.juniper.net\n \"\"\"\n self.rpc.load_config(set_commands, format='text')\n self.assertEqual(mock_execute_fn.call_args[0][0].get('format'),\n 'text')\n\n @patch('jnpr.junos.device.Device.execute')\n def test_rpcmeta_exec_rpc_vargs(self, mock_execute_fn):\n self.rpc.system_users_information(dict(format='text'))\n self.assertEqual(mock_execute_fn.call_args[0][0].get('format'),\n 'text')\n\n @patch('jnpr.junos.device.Device.execute')\n def test_rpcmeta_exec_rpc_kvargs(self, mock_execute_fn):\n self.rpc.system_users_information(set_data=('test',))\n self.assertEqual(mock_execute_fn.call_args[0][0][0].text,\n 'test')\n\n @patch('jnpr.junos.device.Device.execute')\n def test_rpcmeta_exec_rpc_normalize(self, mock_execute_fn):\n self.rpc.any_ole_rpc(normalize=True)\n self.assertEqual(mock_execute_fn.call_args[1], {'normalize': True})\n\n @patch('jnpr.junos.device.Device.execute')\n def test_rpcmeta_get_config(self, mock_execute_fn):\n root = etree.XML('test')\n self.rpc.get_config(root)\n self.assertEqual(mock_execute_fn.call_args[0][0].tag,\n 'get-configuration')\n","sub_path":"tests/unit/test_rpcmeta.py","file_name":"test_rpcmeta.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"74449233","text":"# -*- coding: utf-8 -*-\n\nt = input()\nm = []\n\nfor i in range(12):\n aux = []\n for j in range(12):\n aux.append(float(input()))\n m.append(aux)\n aux = []\n\nsoma = 0\ntotal = 0\nleitura = 0\nfor i in range(5):\n for j in range(12):\n if leitura < j < 11 - leitura:\n total += 1\n soma += m[i][j]\n leitura += 1\n\nresultado = soma / (t == 'S' and 1 or total)\nprint('%.1f' % resultado)\n","sub_path":"uri-online-judge/python3/iniciante/1187.py","file_name":"1187.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"169870276","text":"from django.conf.urls import url, include\n\nfrom rest_framework.routers import DefaultRouter\n\nfrom . import views, api\n\nrouter = DefaultRouter()\nrouter.register(r'meta-result', api.MetaResult, base_name=\"meta-result\")\nrouter.register(r'study-population', api.StudyPopulation, base_name=\"study-population\")\nrouter.register(r'exposure', api.Exposure, base_name=\"exposure\")\nrouter.register(r'assessed-outcome', api.AssessedOutcome, base_name=\"assessed-outcome\")\n\nurlpatterns = [\n\n # overall views\n url(r'^assessment/(?P\\d+)/full-export/$',\n views.FullExport.as_view(),\n name='export'),\n\n url(r'^assessment/(?P\\d+)/assessed-outcomes/$',\n views.AssessedOutcomeList.as_view(),\n name='assessedoutcome_list'),\n\n url(r'^assessment/(?P\\d+)/report/$',\n views.AssessedOutcomeReport.as_view(),\n name='ao_report'),\n\n url(r'^assessment/(?P\\d+)/meta-result-report/$',\n views.MetaResultReport.as_view(),\n name='mr_report'),\n\n url(r'^assessment/(?P\\d+)/meta-result-full-export/$',\n views.MetaResultFullExport.as_view(),\n name='mr_export'),\n\n # study-criteria views\n url(r'^assessment/(?P\\d+)/study-criteria/create/$',\n views.StudyCriteriaCreate.as_view(),\n name='studycriteria_create'),\n\n # study-population views\n url(r'^study/(?P\\d+)/study-population/create/$',\n views.StudyPopulationCreate.as_view(),\n name='sp_create'),\n\n url(r'^study/(?P\\d+)/study-population/copy-as-new-selector/$',\n views.StudyPopulationCopyAsNewSelector.as_view(),\n name='sp_copy_selector'),\n\n url(r'^study-population/(?P\\d+)/$',\n views.StudyPopulationDetail.as_view(),\n name='sp_detail'),\n\n url(r'^study-population/(?P\\d+)/update/$',\n views.StudyPopulationUpdate.as_view(),\n name='sp_update'),\n\n url(r'^study-population/(?P\\d+)/delete/$',\n views.StudyPopulationDelete.as_view(),\n name='sp_delete'),\n\n # exposure views\n url(r'^study-population/(?P\\d+)/exposure/create/$',\n views.ExposureCreate.as_view(),\n name='exposure_create'),\n\n url(r'^study-population/(?P\\d+)/exposure/copy-as-new-selector/$',\n views.ExposureCopyAsNewSelector.as_view(),\n name='exposure_copy_selector'),\n\n url(r'^exposure/(?P\\d+)/$',\n views.ExposureDetail.as_view(),\n name='exposure_detail'),\n\n url(r'^exposure/(?P\\d+)/update/$',\n views.ExposureUpdate.as_view(),\n name='exposure_update'),\n\n url(r'^exposure/(?P\\d+)/delete/$',\n views.ExposureDelete.as_view(),\n name='exposure_delete'),\n\n # factor views\n url(r'^assessment/(?P\\d+)/factors/create/$',\n views.FactorCreate.as_view(),\n name='factor_create'),\n\n # assessed-outcome views\n url(r'^exposure/(?P\\d+)/assessed-outcome/create/$',\n views.AssessedOutcomeCreate.as_view(),\n name='assessedoutcome_create'),\n\n url(r'^exposure/(?P\\d+)/assessed-outcome/copy-as-new-selector/$',\n views.AssessedOutcomeCopyAsNewSelector.as_view(),\n name='assessedoutcome_copy_selector'),\n\n url(r'^assessed-outcome/(?P\\d+)/$',\n views.AssessedOutcomeDetail.as_view(),\n name='assessedoutcome_detail'),\n\n url(r'^assessed-outcome/(?P\\d+)/update/$',\n views.AssessedOutcomeUpdate.as_view(),\n name='assessedoutcome_update'),\n\n url(r'^assessed-outcome/(?P\\d+)/versions/$',\n views.AssessedOutcomeVersions.as_view(),\n name='assessedoutcome_versions'),\n\n url(r'^assessed-outcome/(?P\\d+)/delete/$',\n views.AssessedOutcomeDelete.as_view(),\n name='assessedoutcome_delete'),\n\n # meta-protocol views\n url(r'^study/(?P\\d+)/meta-protocol/create/$',\n views.MetaProtocolCreate.as_view(),\n name='mp_create'),\n\n url(r'^meta-protocol/(?P\\d+)/$',\n views.MetaProtocolDetail.as_view(),\n name='mp_detail'),\n\n url(r'^meta-protocol/(?P\\d+)/json/$',\n views.MetaProtocolJSON.as_view(),\n name='mp_json'),\n\n url(r'^meta-protocol/(?P\\d+)/update/$',\n views.MetaProtocolUpdate.as_view(),\n name='mp_update'),\n\n url(r'^meta-protocol/(?P\\d+)/delete/$',\n views.MetaProtocolDelete.as_view(),\n name='mp_delete'),\n\n # meta-result views\n url(r'^assessment/(?P\\d+)/meta-results/$',\n views.MetaResultList.as_view(),\n name='metaresult_list'),\n\n url(r'^meta-protocol/(?P\\d+)/meta-result/create/$',\n views.MetaResultCreate.as_view(),\n name='mr_create'),\n\n url(r'^meta-protocol/(?P\\d+)/meta-result/copy-as-new-selector/$',\n views.MetaResultCopyAsNewSelector.as_view(),\n name='mr_copy_selector'),\n\n url(r'^meta-result/(?P\\d+)/$',\n views.MetaResultDetail.as_view(),\n name='mr_detail'),\n\n url(r'^meta-result/(?P\\d+)/json/$',\n views.MetaResultJSON.as_view(),\n name='mr_json'),\n\n url(r'^meta-result/(?P\\d+)/update/$',\n views.MetaResultUpdate.as_view(),\n name='mr_update'),\n\n url(r'^meta-result/(?P\\d+)/delete/$',\n views.MetaResultDelete.as_view(),\n name='mr_delete'),\n\n url(r'^api/', include(router.urls)),\n]\n","sub_path":"project/epi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"53041535","text":"import sys\nimport copy\nfrom PyQt5 import QtCore, QtWidgets, QtGui\nfrom PyQt5.QtCore import QSize\nfrom moduleConfgFrames import *\nfrom images import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.Qt import *\nfrom PyQt5.QtGui import QFont, QColor, QIcon\nfrom Elements.Elements import Element\nimport functools\nfrom PyQt5.QtCore import pyqtSlot\nfrom Generators.Generator import C_Generator\n# import module_window\n\n\nclass StandardItem(QStandardItem):\n def __init__(self, txt='', font_size=12, set_bold=False, color=QColor(0, 0, 0)):\n super().__init__()\n\n fnt = QFont('Open Sans', font_size)\n fnt.setBold(set_bold)\n self.setEditable(False)\n self.setForeground(color)\n self.setFont(fnt)\n self.setText(txt)\n\nclass moduleConfg(QMainWindow):\n switch_create_window = QtCore.pyqtSignal()\n switch_open_window = QtCore.pyqtSignal()\n projectName = ''\n windowFrame = Ui_MainWindow()\n\n checkedModulesTree = None\n rootNode = None\n Application_Root = None\n CDD_Root = None\n Service_Root = None\n BSW_Root = None\n comboBox = None\n\n portConnections = {}\n\n def __init__(self,toolName,toolIcon):\n super(moduleConfg, self).__init__()\n\n self.windowFrame.setupUi(self)\n\n self.setWindowTitle(toolName)\n self.setWindowIcon(QtGui.QIcon(toolIcon))\n\n #self.parametersAndReferences = QScrollArea(self)\n self.windowFrame.parametersAndReferences.setWidgetResizable(True)\n #self.parametersAndReferences.setGeometry(500, 70, 850, 500)\n self.parametersAndReferencesRows = QFormLayout()\n groupBox = QGroupBox(\"Configurations\")\n groupBox.setLayout(self.parametersAndReferencesRows)\n self.windowFrame.parametersAndReferences.setWidget(groupBox)\n\n #Create the View\n #self.checkedModulesView = QTreeView(self)\n #show header\n self.windowFrame.checkedModulesView.setHeaderHidden(True)\n #self.checkedModulesView.setGeometry(20, 70, 450, 500)\n \n self.checkedModulesTree = QStandardItemModel()\n #root of tree\n self.rootNode = self.checkedModulesTree.invisibleRootItem()\n \n\n self.windowFrame.checkedModulesView.clicked.connect(self.getValue)\n self.windowFrame.checkedModulesView.clicked.connect(self.showConfigurations)\n\n #self.windowFrame.checkedModulesView.clicked.connect(self.showDescription)\n #self.windowFrame.checkedModulesView.clicked.connect(self.showMultiplicity)\n\n #self.windowFrame.descriptionLabel = QtWidgets.QPlainTextEdit(self)\n self.windowFrame.descriptionLabel.appendPlainText(\"description information Label\")\n #self.windowFrame.descriptionLabel.setGeometry(20, 580, 450, 120)\n self.windowFrame.descriptionLabel.setFont(QtGui.QFont(\"Sanserif\", 10))\n self.windowFrame.descriptionLabel.setStyleSheet(\"\"\"QLabel {border: 1px solid #000;}\"\"\")\n self.windowFrame.descriptionLabel.setReadOnly(True)\n\n #self.windowFrame.containerMultiplicityLabel = QtWidgets.QPlainTextEdit(self)\n self.windowFrame.containerMultiplicityLabel.appendPlainText(\"Container Multiplicity Label\")\n #self.containerMultiplicityLabel.setGeometry(500, 580, 350, 120)\n self.windowFrame.containerMultiplicityLabel.setFont(QtGui.QFont(\"Sanserif\", 10))\n self.windowFrame.containerMultiplicityLabel.setStyleSheet(\"\"\"QLabel {border: 1px solid #000;}\"\"\")\n self.windowFrame.containerMultiplicityLabel.setReadOnly(True)\n\n #buttons\n #self.saveButton = QtWidgets.QPushButton(self)\n self.windowFrame.saveButton.setText(\"Save\")\n #self.saveButton.move(1100, 620)\n self.windowFrame.saveButton.resize(90, 60)\n self.windowFrame.saveButton.setFont(QtGui.QFont(\"Sanserif\", 11))\n self.windowFrame.saveButton.setToolTip('This is an example button')\n self.windowFrame.saveButton.setIcon(QIcon(\"save.png\"))\n self.windowFrame.saveButton.setIconSize(QSize(25, 25))\n self.windowFrame.saveButton.clicked.connect(self.saveButtonFunction)\n\n\n #self.generateButton = QtWidgets.QPushButton(self)\n self.windowFrame.generateButton.setText(\"Generate\")\n #self.generateButton.move(1200, 620)\n self.windowFrame.generateButton.resize(90, 60)\n self.windowFrame.generateButton.setFont(QtGui.QFont(\"Sanserif\", 10))\n self.windowFrame.generateButton.setToolTip('This is an example button')\n self.windowFrame.generateButton.setIcon(QIcon(\"cg.png\"))\n self.windowFrame.generateButton.setIconSize(QSize(22, 22))\n self.windowFrame.generateButton.clicked.connect(self.generateButtonFunction)\n\n self.showMenuBar()\n self.showToolBar()\n\n def treeOfCheckedModulesInit(self):\n self.checkedModulesTree = QStandardItemModel()\n self.rootNode = self.checkedModulesTree.invisibleRootItem()\n self.Application_Root = StandardItem('Application Software Component', 10)\n self.CDD_Root = StandardItem('Complex Device Driver Software Component', 10)\n self.Service_Root = StandardItem('Service Software Component', 10)\n self.BSW_Root = StandardItem('BSW Functions', 10)\n\n def treeOfCheckedModules(self, SWC, key):\n for Module in SWC:\n self.treeChildren = StandardItem(Module, 10)\n if key == 0:\n self.Application_Root.appendRow(self.treeChildren)\n elif key == 1:\n self.CDD_Root.appendRow(self.treeChildren)\n elif key == 2:\n self.Service_Root.appendRow(self.treeChildren)\n\n self.PortSrandardItem = StandardItem('Ports', 8)\n self.P_PortSrandardItem = StandardItem('R Ports', 8)\n self.R_PortSrandardItem = StandardItem('P Ports', 8)\n self.RunnableSrandardItem = StandardItem('Runnables', 8)\n self.treeChildren.appendRow(self.PortSrandardItem)\n self.treeChildren.appendRow(self.RunnableSrandardItem)\n self.PortSrandardItem.appendRow(self.R_PortSrandardItem)\n self.PortSrandardItem.appendRow(self.P_PortSrandardItem)\n\n #set Tree root \n self.rootNode.appendRow(self.Application_Root)\n self.rootNode.appendRow(self.CDD_Root)\n self.rootNode.appendRow(self.Service_Root)\n self.rootNode.appendRow(self.BSW_Root)\n self.windowFrame.checkedModulesView.setModel(self.checkedModulesTree)\n self.windowFrame.checkedModulesView.expandAll()\n\n def showConfigurations(self):\n for i in reversed(range(self.parametersAndReferencesRows.count())):\n if self.parametersAndReferencesRows.itemAt(i).widget() is not None:\n self.parametersAndReferencesRows.itemAt(i).widget().deleteLater()\n else:\n for j in reversed(range(self.parametersAndReferencesRows.itemAt(i).layout().count())):\n self.parametersAndReferencesRows.itemAt(i).layout().itemAt(j).widget().deleteLater()\n self.parametersAndReferencesRows.removeItem(self.parametersAndReferencesRows.itemAt(i).layout())\n\n if self.windowFrame.checkedModulesView.selectedIndexes()[0].data(Qt.DisplayRole) == 'BSW Functions':\n for i in range(0,5):\n layout = QHBoxLayout()\n\n checkBox = QCheckBox(\"Is Included\")\n \n taskCB = QComboBox()\n taskCB.addItem(\"None\")\n posCB = QComboBox()\n posCB.addItem(\"None\")\n\n priodCB = QSpinBox()\n\n taskL = QLabel()\n taskL.setText(\"Task\")\n taskL.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n posL = QLabel()\n posL.setText(\"Position\")\n posL.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n priodL = QLabel()\n priodL.setText(\"Priodicity\")\n priodL.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n \n layout.addWidget(checkBox)\n layout.addWidget(taskL)\n layout.addWidget(taskCB)\n layout.addWidget(posL)\n layout.addWidget(posCB)\n layout.addWidget(priodL)\n layout.addWidget(priodCB)\n\n if i == 0:\n self.parametersAndReferencesRows.addRow(\"Com_MainFunctionRx\", layout)\n elif i == 1:\n self.parametersAndReferencesRows.addRow(\"Com_MainFunctionTx\", layout)\n elif i == 2:\n self.parametersAndReferencesRows.addRow(\"CanTp_MainFunction\", layout)\n elif i == 3:\n self.parametersAndReferencesRows.addRow(\"Can_MainFunction_Read\", layout)\n elif i == 4:\n self.parametersAndReferencesRows.addRow(\"Can_MainFunction_Write\", layout)\n elif self.windowFrame.checkedModulesView.selectedIndexes()[0].data(Qt.DisplayRole) == 'Runnables' :\n \n Elements = Element()\n Elements.update()\n\n for i in Elements.Application_SWC_Types:\n if self.windowFrame.checkedModulesView.selectedIndexes()[0].parent().data(Qt.DisplayRole) == i.Name:\n for j in i.InternalBehavoirs:\n for k in j.Runnables:\n layoutRunnable = QHBoxLayout()\n taskCBox = QComboBox()\n taskCBox.addItem(\"None\")\n \n posCBox = QSpinBox()\n\n taskTypeSelectedLabel = QLabel()\n taskTypeSelectedLabel.setText(\"Basic\")\n\n triggerSelectedLabel = QLabel()\n triggerSelectedLabel.setText(\"Init Event\")\n\n priodData = QLabel()\n if triggerSelectedLabel.text() == 'Timing Event':\n priodData.setText(\"10\")\n else:\n priodData.setText(\"None\")\n\n taskLabel = QLabel()\n taskLabel.setText(\"Task\")\n taskLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n taskTypeLabel = QLabel()\n taskTypeLabel.setText(\"Type\")\n taskTypeLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n triggerLabel = QLabel()\n triggerLabel.setText(\"Trigger\")\n triggerLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n posLabel = QLabel()\n posLabel.setText(\"Position\")\n posLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n priodLabel = QLabel()\n priodLabel.setText(\"Priodicity\")\n priodLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n \n layoutRunnable.addWidget(taskLabel)\n layoutRunnable.addWidget(taskCBox)\n layoutRunnable.addWidget(posLabel)\n layoutRunnable.addWidget(posCBox)\n layoutRunnable.addWidget(taskTypeLabel)\n layoutRunnable.addWidget(taskTypeSelectedLabel)\n layoutRunnable.addWidget(triggerLabel)\n layoutRunnable.addWidget(triggerSelectedLabel)\n layoutRunnable.addWidget(priodLabel)\n layoutRunnable.addWidget(priodData)\n\n self.parametersAndReferencesRows.addRow(k.Name, layoutRunnable)\n\n elif self.windowFrame.checkedModulesView.selectedIndexes()[0].data(Qt.DisplayRole) == 'R Ports':\n Elements = Element()\n Elements.update()\n \n for i in Elements.Application_SWC_Types:\n if self.windowFrame.checkedModulesView.selectedIndexes()[0].parent().parent().data(Qt.DisplayRole) == i.Name:\n for a in i.Ports:\n self.comboBox = QComboBox()\n if a.Port_Type == 'R-Port':\n self.comboBox.addItem(\"None\")\n for e in Elements.Application_SWC_Types:\n for p in e.Ports:\n if a.Interface_Type == 'Sender_Reciever_Interface' and p.Interface_Type == 'Sender_Reciever_Interface':\n if Elements.Sender_Reciever_Port_Interfaces[a.Interface_ID].Name == Elements.Sender_Reciever_Port_Interfaces[p.Interface_ID].Name:\n if p.Port_Type == 'P-Port':\n self.comboBox.addItem(p.Name)\n elif a.Interface_Type == 'Client_Server_Interface' and p.Interface_Type == 'Client_Server_Interface':\n if Elements.Client_Server_Port_Interfaces[a.Interface_ID].Name == Elements.Client_Server_Port_Interfaces[p.Interface_ID].Name:\n if p.Port_Type == 'P-Port':\n self.comboBox.addItem(p.Name)\n\n self.comboBox.setObjectName(a.Name)\n self.comboBox.currentIndexChanged.connect(functools.partial(self.SelectedIndex, a.Name))\n self.comboBox.setCurrentText(self.portConnections[a.Name])\n self.parametersAndReferencesRows.addRow(a.Name, self.comboBox)\n\n elif self.windowFrame.checkedModulesView.selectedIndexes()[0].data(Qt.DisplayRole) == 'P Ports':\n Elements = Element()\n Elements.update()\n\n for i in Elements.Application_SWC_Types:\n if self.windowFrame.checkedModulesView.selectedIndexes()[0].parent().parent().data(Qt.DisplayRole) == i.Name:\n for a in i.Ports:\n self.comboBox = QComboBox()\n if a.Port_Type == 'P-Port':\n self.comboBox.addItem(\"None\")\n self.parametersAndReferencesRows.addRow(a.Name, self.comboBox)\n\n def getValue(self, val):\n None\n # print(val.data())\n # print(val.row())\n\n def SelectedIndex(self,PortName):\n for i in reversed(range(self.parametersAndReferencesRows.count())):\n if self.parametersAndReferencesRows.itemAt(i).widget() is not None:\n if self.parametersAndReferencesRows.itemAt(i).widget().objectName() == PortName:\n self.portConnections[PortName] = self.parametersAndReferencesRows.itemAt(i).widget().currentText()\n\n\n def showMenuBar(self):\n #self.menuBar = QtWidgets.QMenuBar(self)\n #self.setMenuBar(self.menuBar)\n self.windowFrame.menuBar.setStyleSheet(\"\"\"QMenuBar {border: 1px solid #000;}\"\"\")\n\n self.fileMenu = self.windowFrame.menuBar.addMenu('File')\n self.newProjectItem = QAction('New Project', self)\n self.fileMenu.addAction(self.newProjectItem)\n #self.newProjectItem.triggered.connect(self.createProjectWindow)\n\n self.openExitProjectItem = QAction('Open Project', self)\n self.fileMenu.addAction(self.openExitProjectItem)\n #self.openExitProjectItem.triggered.connect(self.exitItemAction)\n\n self.saveProjectItem = QAction('Save', self)\n self.fileMenu.addAction(self.saveProjectItem)\n #self.saveProjectItem.triggered.connect(self.saveButtonFunction)\n\n self.exitItem = QAction('Exit', self)\n self.fileMenu.addAction(self.exitItem)\n #self.exitItem.triggered.connect(self.exitItemAction)\n\n self.generateMenu = self.windowFrame.menuBar.addMenu('Generate')\n self.generateMenuItem = QAction('C Generate', self)\n self.generateMenu.addAction(self.generateMenuItem)\n\n self.settingMenu = self.windowFrame.menuBar.addMenu('Setting')\n self.settingMenuItem = QAction('Project Setting', self)\n self.settingMenu.addAction(self.settingMenuItem)\n\n self.helpMenu = self.windowFrame.menuBar.addMenu('Help')\n self.helpMenuItem = QAction('About', self)\n self.helpMenu.addAction(self.helpMenuItem)\n\n def exitItemAction(self):\n questionMessage = QMessageBox()\n ret = questionMessage.question(self, '', \"Do you want to save project?\",\n questionMessage.Yes | questionMessage.No | questionMessage.Cancel)\n questionMessage.setDefaultButton(questionMessage.Cancel)\n if ret == questionMessage.Yes:\n self.saveButtonFunction()\n QtWidgets.QApplication.quit()\n elif ret == questionMessage.No:\n QtWidgets.QApplication.quit()\n else:\n pass\n \n def showToolBar(self):\n #self.toolBar = QtWidgets.QToolBar(self)\n #self.addToolBar(self.toolBar)\n\n self.addToolBarItem = QAction(\"add\", self)\n self.addToolBarItem.setIcon(QtGui.QIcon('plus.png'))\n self.windowFrame.toolBar.addAction(self.addToolBarItem)\n #self.addToolBarItem.triggered.connect(self.addContainers)\n\n self.deleteToolBarItem = QAction(\"delete\", self)\n self.deleteToolBarItem.setIcon(QtGui.QIcon('delete.jpg'))\n self.windowFrame.toolBar.addAction(self.deleteToolBarItem)\n #self.deleteToolBarItem.triggered.connect(self.deleteSelectedContainer)\n\n def createProjectWindow(self):\n self.switch_create_window.emit()\n\n def openProjectWindow(self):\n self.switch_open_window.emit()\n\n def saveButtonFunction(self):\n #when user press on sace button write here what do you want to execute\n pass\n\n def generateButtonFunction(self):\n # when user press on generate button write here what do you want to execute\n self.generator = C_Generator()\n self.generator.Rte_h_Gen()\n self.generator.Rte_runnable_Gen()\n self.generator.Rte_port_Gen()\n self.generator.Rte_Src_Gen()\n\n def showPopUpmCompleteMessage(self,massage):\n msg = QMessageBox()\n msg.setWindowTitle(\"message\")\n msg.setText(massage)\n msgRun = msg.exec_()\n\n\n def getFolderDirection(self,folderdir):\n self.folderNameDir = folderdir\n\n def closeEvent(self, a0: QtGui.QCloseEvent) -> None:\n questionMessage = QMessageBox()\n ret = questionMessage.question(self, '', \"Do you want to save project?\",questionMessage.Yes | questionMessage.No | questionMessage.Cancel)\n questionMessage.setDefaultButton(questionMessage.Cancel)\n if ret == questionMessage.Yes:\n self.saveButtonFunction()\n a0.accept()\n elif ret == questionMessage.No:\n a0.accept()\n else:\n a0.ignore()\n \n def errorMessageEnterNumber(self):\n self.msg = QMessageBox()\n self.msg.setWindowTitle(\"Error message\")\n self.msg.setText(\"You must enter number only\")\n msgRun = self.msg.exec_()\n \n def showDescription(self):\n self.windowFrame.descriptionLabel.clear()\n self.windowFrame.descriptionLabel.appendPlainText('put string here')\n\n def showMultiplicity(self):\n self.windowFrame.containerMultiplicityLabel.clear()\n self.windowFrame.containerMultiplicityLabel.appendPlainText('put string here')\n\n def projectName(self, name):\n self.projectName = name\n","sub_path":"module_configure.py","file_name":"module_configure.py","file_ext":"py","file_size_in_byte":19782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"478898543","text":"\n\n#calss header\nclass _SURE():\n\tdef __init__(self,): \n\t\tself.name = \"SURE\"\n\t\tself.definitions = [u'certain; without any doubt: ', u'certain or certainly: ', u'to be very or too confident: ', u'be confident that something is true: ', u'to have confidence in and trust someone: ', u'to be certain to: ', u'to look and/or take action to be certain that something happens, is true, etc.: ', u'If you have a sure knowledge or understanding of something, you know or understand it very well: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_sure.py","file_name":"_sure.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"182755808","text":"import os\nimport logging\nimport operator\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom collections import Counter\nfrom matplotlib import pyplot as plt\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.optimizers import Adam, RMSprop\nfrom tensorflow.keras.models import Model, load_model\nfrom tensorflow.keras.layers import Dense, Input, Dropout, BatchNormalization\n\nlogging.getLogger('tensorflow').disabled = True\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\nprint(\"\\nNum GPUs Available : {}\".format(len(physical_devices)))\nprint(\"Tensorflow version : {}\\n\".format(tf.__version__))\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\nfrom variables import*\nfrom util import load_data\n\nclass TrafficClassifier(object):\n def __init__(self):\n X, Y, Xtest = load_data()\n self.X = X \n self.Y = Y\n self.Xtest = Xtest\n print(\"Input Shape : {}\".format(self.X.shape))\n print(\"Label Shape : {}\".format(self.Y.shape))\n\n def classifier(self):\n inputs = Input(shape=(n_features,))\n x = Dense(dense1, activation='relu')(inputs)\n x = Dense(dense1, activation='relu')(x)\n x = BatchNormalization()(x)\n x = Dense(dense2, activation='relu')(x)\n x = Dense(dense2, activation='relu')(x)\n x = Dense(dense2, activation='relu')(x)\n x = BatchNormalization()(x)\n x = Dense(dense3, activation='relu')(x)\n x = Dense(dense3, activation='relu')(x)\n x = Dense(dense3, activation='relu')(x)\n x = Dense(dense3, activation='relu')(x)\n x = Dropout(keep_prob)(x)\n outputs = Dense(n_classes, activation='softmax')(x)\n self.model = Model(inputs, outputs)\n\n @staticmethod\n def network_acc(custom_acc):\n def acc(y_true,y_pred):\n targ = K.argmax(y_true, axis=-1)\n pred = K.argmax(y_pred, axis=-1)\n\n correct = K.cast(K.equal(targ, pred), dtype='float32')\n Pmax = K.cast(K.max(y_pred, axis=-1), dtype='float32')\n\n Pmax = Pmax * correct\n mask = K.cast(K.greater(Pmax, custom_acc), dtype='float32')\n\n return K.mean(mask)\n return acc\n\n\n def train(self):\n self.model.compile(\n loss='categorical_crossentropy',\n optimizer=Adam(learning_rate),\n # metrics=['accuracy']\n metrics=[\n TrafficClassifier.network_acc(\n custom_acc=custom_acc\n )]\n )\n self.history = self.model.fit(\n self.X,\n self.Y,\n batch_size=batch_size,\n epochs=num_epoches,\n validation_split=validation_split\n )\n\n def load_model(self):\n loaded_model = load_model(model_weights)\n loaded_model.compile(\n loss='categorical_crossentropy',\n optimizer=Adam(learning_rate),\n # metrics=['accuracy']\n metrics=[\n TrafficClassifier.network_acc(\n custom_acc=custom_acc\n )]\n )\n self.model = loaded_model\n\n def save_model(self):\n self.model.save(model_weights)\n\n def evaluation(self, X):\n Ypred = self.model.predict(X)\n Ppred = np.max(Ypred, axis=-1)\n unk = (Ppred <= custom_acc)\n Punk = np.mean(unk) * 100\n return Punk\n\n def unknown_evaluation(self):\n print(\"-----Unknown percentage------\")\n print(\"Train Data : {}%\".format(self.evaluation(self.X)))\n print(\"Test Data : {}%\".format(self.evaluation(self.Xtest)))\n\n def predict_classes(self):\n Ypred = self.model.predict(self.X)\n\n N = Ypred.shape[0]\n Ppred = np.argmax(Ypred, axis=-1)\n Ponehot = np.zeros((N, n_classes), dtype=np.int64)\n for i in range(N):\n j = Ppred[i]\n Ponehot[i,j] = 1\n Pclasses = self.encoder.inverse_transform(Ponehot).reshape(-1,)\n class_count = dict(Counter(Pclasses.tolist()))\n class_count = sorted(class_count.items(),key=operator.itemgetter(1),reverse=True)\n for label, value in class_count:\n fraction = (value/N)*100\n fraction = round(fraction, 3)\n print(\"{} : {}%\".format(label,fraction))\n\n def bin_probability(self,Confidence):\n bins = np.linspace(0, 1, n_bins)\n digitized = np.digitize(Confidence, bins)\n return digitized\n\n def plot_histogram(self, x1, x2):\n Wx1 = np.empty(x1.shape)\n Wx1.fill(1/len(x1))\n Wx2 = np.empty(x2.shape)\n Wx2.fill(1/len(x2))\n\n plt.hist(\n [x1, x2], \n bins = n_bins,\n weights=[Wx1, Wx2],\n histtype ='bar',\n color = colors, \n label=names\n )\n\n plt.legend()\n plt.xlabel('confidence')\n plt.ylabel('Distribution')\n plt.title('Confidence distribution of Train and Test data')\n plt.savefig(confidence_img)\n plt.show()\n\n def predict_distribution(self):\n Ptrain = self.model.predict(self.X)\n Ptest = self.model.predict(self.Xtest)\n\n ConfTrain = np.max(Ptrain, axis=-1)\n ConfTest = np.max(Ptest, axis=-1)\n\n ConfTrain_bin = self.bin_probability(ConfTrain) / n_bins\n ConfTest_bin = self.bin_probability(ConfTest) / n_bins\n self.plot_histogram(ConfTrain_bin, ConfTest_bin)\n\n def run(self):\n if os.path.exists(model_weights):\n print(\"Loading the model !!!\")\n self.load_model()\n else:\n print(\"Training the model !!!\")\n self.classifier()\n self.train()\n self.save_model()\n self.unknown_evaluation()\n self.predict_distribution()\n\nif __name__ == \"__main__\":\n model = TrafficClassifier()\n model.run()\n","sub_path":"ANN model/ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":6211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"12187135","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\n \"\"\"\n if not root:\n return json.dumps([])\n \n arr = []\n queue = [[root, -1]]\n while queue:\n tmp = queue.pop(0)\n cur, parentIdx = tmp[0], tmp[1]\n arr.append([cur.val, parentIdx])\n if cur.left:\n queue.append([cur.left, len(arr)-1])\n if cur.right:\n queue.append([cur.right, len(arr)-1])\n return json.dumps(arr)\n \n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\n \"\"\"\n arr = json.loads(data)\n if not arr:\n return None\n \n i = 0\n while i < len(arr):\n val, parentIdx = arr[i][0], arr[i][1]\n arr[i][0] = TreeNode(val)\n if parentIdx != -1:\n parent = arr[parentIdx][0]\n if parent.val > val:\n parent.left = arr[i][0]\n else:\n parent.right = arr[i][0]\n i += 1\n return arr[0][0]\n \n","sub_path":"src/449.py","file_name":"449.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"527545061","text":"#!/usr/bin/env python\nimport getopt\nimport logging\nimport os\nimport sys\nimport time\nimport random\n\nfrom kubernetes import client\n\n# extract env variables.\nnamespace = os.environ['NAMESPACE']\ncert = os.environ['CERT']\nhost = os.environ['KUBERNETES_SERVICE_HOST']\ntoken_path = os.environ['TOKEN']\n\nwith open(token_path, 'r') as token_file:\n token = token_file.read().replace('\\n', '')\n\n# setup logging\nlog = logging.getLogger(__name__)\nhandler = logging.StreamHandler(sys.stdout)\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nhandler.setLevel(logging.INFO)\nlog.addHandler(handler)\nlog.setLevel(logging.INFO)\n\nconfiguration = client.Configuration()\nconfiguration.host = \"https://\" + host\nconfiguration.ssl_ca_cert = cert\nconfiguration.api_key['authorization'] = token\nconfiguration.api_key_prefix['authorization'] = 'Bearer'\nbatchV1Api = client.BatchV1Api(client.ApiClient(configuration))\n\n\ndef is_job_complete(job_name):\n complete = False\n log.info(\"Checking if \" + job_name + \" is complete\")\n response = \"\"\n try:\n response = batchV1Api.read_namespaced_job_status(job_name, namespace)\n if response.status.succeeded == 1:\n job_status_type = response.status.conditions[0].type\n if job_status_type == \"Complete\":\n complete = True\n else:\n log.info(job_name + \" is not complete\")\n else:\n log.info(job_name + \" has not succeeded yet\")\n return complete\n except Exception as e:\n log.error(\"Exception when calling read_namespaced_job_status: %s\\n\" % e)\n\n\nDEF_TIMEOUT = 10\nDESCRIPTION = \"Kubernetes container job complete check utility\"\nUSAGE = \"Usage: job_complete.py [-t ] -j \" \\\n \"[-j ...]\\n\" \\\n \"where\\n\" \\\n \" - wait for container job complete timeout in min, \" \\\n \"default is \" + str(DEF_TIMEOUT) + \"\\n\" \\\n \" - name of the job to wait for\\n\"\n\n\ndef main(argv):\n # args are a list of job names\n job_names = []\n timeout = DEF_TIMEOUT\n try:\n opts, args = getopt.getopt(argv, \"hj:t:\", [\"job-name=\",\n \"timeout=\",\n \"help\"])\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n print(\"%s\\n\\n%s\" % (DESCRIPTION, USAGE))\n sys.exit()\n elif opt in (\"-j\", \"--job-name\"):\n job_names.append(arg)\n elif opt in (\"-t\", \"--timeout\"):\n timeout = float(arg)\n except (getopt.GetoptError, ValueError) as e:\n print(\"Error parsing input parameters: %s\\n\" % e)\n print(USAGE)\n sys.exit(2)\n if job_names.__len__() == 0:\n print(\"Missing required input parameter(s)\\n\")\n print(USAGE)\n sys.exit(2)\n\n for job_name in job_names:\n timeout = time.time() + timeout * 60\n while True:\n complete = is_job_complete(job_name)\n if complete is True:\n break\n elif time.time() > timeout:\n log.warning(\"timed out waiting for '\" + job_name +\n \"' to be completed\")\n exit(1)\n else:\n # spread in time potentially parallel execution in multiple\n # containers\n time.sleep(random.randint(5, 11))\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"kubernetes/readiness/src/main/scripts/job_complete.py","file_name":"job_complete.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"361503073","text":"# -*- coding: utf-8 -*-\n\nfrom urllib import request\nimport re\nfrom bs4 import BeautifulSoup as bs\nimport pymysql\n\ndb_config = {\n\t\"host\": \"39.108.138.156\",\n\t\"port\": \"3306\",\n\t\"user\": \"root\",\n\t'password':'123456',\n\t'db':'spider',\n\t'charset':'utf8mb4'\n}\n\n# db = pymysql.connect(db_config)\ndb = pymysql.connect(\"39.108.138.156\",\"root\",\"123456\",\"spider\" )\n\nurl = \"http://www2.huishoubao.com/cellphone/?pid=1032&classId=1&brandId=2\"\n\nreq = request.Request(url)\n\nreq.add_header(\"User-Agent\", \"Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1\")\n\nresponse = request.urlopen(req)\n\nhtml = response.read().decode('utf-8')\nsoup = bs(html, 'html.parser')\n\nlinks = soup.find_all('li', class_=\"product-item\")\n\n# //www.vmall.com/recycle?url=%2Fproducts_30796.html%3Fpid%3D1032\napmbl = []\nfor link in links:\n\tnew_data = {}\n\tnew_data['sname'] = link.find('p', class_=\"p-name\").get_text()\n\tnew_data['spid'] = int(re.match(r\"^.*_(\\d+)\\.html.*$\", link.find(\"a\")['href']).group(1))\n\tnew_data['sbid'] = 1\n\tnew_data['scid'] = 1\n\tapmbl.append(new_data)\n\nmsqr = []\nfor d in apmbl:\n\tssql = '(%d, \"%s\", %d, %d)' % (d['spid'], d['sname'], d['sbid'], d['scid'])\n\tmsqr.append(ssql)\n\nsqlstr = \",\".join(msqr)\n\ncursor = db.cursor()\n\nsql = \"INSERT INTO product(sid, name, brandid, categoryid) values %s\" % sqlstr\n\nprint(sql)\nresult = cursor.execute(sql)","sub_path":"hsb/hsb_spider.py","file_name":"hsb_spider.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"178955406","text":"\"\"\"Keys for indexing arrays.\"\"\"\n\n# This file is part of the 'tomate' project\n# (http://github.com/Descanonge/tomate) and subject\n# to the MIT License as defined in the file 'LICENSE',\n# at the root of this project. © 2020 Clément HAËCK\n\n\nfrom typing import Iterator, List, Optional, Sequence, Union, TYPE_CHECKING\n\nimport numpy as np\n\nfrom tomate.custom_types import KeyLikeInt, KeyLikeVar, KeyLikeValue\n\nif TYPE_CHECKING:\n from tomate.coordinates.coord import Coord\n from tomate.coordinates.time import Time\n from tomate.coordinates.variables import Variables\n\n\nclass Key():\n \"\"\"Element for indexing a dimension of an array.\n\n Can be None, int, List[int] or slice.\n\n See :doc:`../accessor` for more information.\n\n :param key: Key-like object.\n\n INT_TYPES: List[Type]\n\n :attr type: str: {'none', 'int', 'list', 'slice'}\n :attr parent_size: int, None: Size of the sequence it would be applied to.\n Useful for reversing keys, or turning slices into lists.\n :attr shape: int, None: Length of what the key would select.\n Integer and None keys have shape 0 (they would get\n a scalar).\n Is None if the shape is undecidable (for some slices\n for instance).\n \"\"\"\n\n INT_TYPES = (int, np.integer) #: Types that are considered integer.\n\n def __init__(self, key: KeyLikeInt):\n self.value = None\n self.type = ''\n self.shape = None\n self.parent_size = None\n self.set(key)\n\n def set(self, key: KeyLikeInt):\n \"\"\"Set key value.\n\n :param key: Key-like:\n :param TypeError: If key is not a valid type.\n \"\"\"\n reject = False\n if isinstance(key, (list, tuple, np.ndarray)):\n reject = any(not isinstance(z, self.INT_TYPES) for z in key)\n tp = 'list'\n key = [int(k) for k in key]\n if len(key) == 0:\n raise IndexError(\"Key cannot be an empty list.\")\n elif isinstance(key, self.INT_TYPES):\n tp = 'int'\n key = int(key)\n elif isinstance(key, slice):\n tp = 'slice'\n elif key is None:\n tp = 'none'\n else:\n reject = True\n if reject:\n raise TypeError(f\"Key is not int, List[int], or slice (is {type(key)})\")\n self.value = key\n self.type = tp\n self.set_shape()\n\n def __eq__(self, other: 'Key') -> bool:\n return self.value == other.value\n\n def __repr__(self):\n return str(self.value)\n\n def __iter__(self) -> Iterator[KeyLikeInt]:\n \"\"\"Iter through values.\"\"\"\n try:\n val = self.tolist()\n except TypeError:\n val = [self.value]\n return iter(val)\n\n def copy(self) -> 'Key':\n \"\"\"Return copy of self.\"\"\"\n if self.type == 'list':\n value = self.value.copy()\n else:\n value = self.value\n key = self.__class__(value)\n key.shape = self.shape\n key.parent_size = self.parent_size\n return key\n\n def set_shape(self):\n \"\"\"Set shape if possible.\n\n Shape is the size an array would have\n if the key was applied.\n\n Is None if cannot be determined from\n the key alone.\n\n :raises IndexError: If slice of shape 0.\n \"\"\"\n if self.type == 'int':\n self.shape = 0\n elif self.type == 'list':\n self.shape = len(self.value)\n elif self.type == 'none':\n self.shape = 0\n elif self.type == 'slice':\n self.shape = guess_slice_shape(self.value)\n if self.shape == 0:\n raise IndexError(f\"Invalid slice ({self.value}) of shape 0.\")\n\n def set_shape_coord(self, coord: 'Coord'):\n \"\"\"Set shape using a coordinate.\n\n :param coord: The coordinate that would be used.\n :raises IndexError: If slice of shape 0.\n \"\"\"\n self.parent_size = coord.size\n if self.type == 'slice':\n self.shape = len(coord[self.value])\n if self.shape == 0:\n raise IndexError(f\"Invalid slice ({self.value}) of shape 0.\")\n\n def no_int(self) -> Union[List[int], slice, None]:\n \"\"\"Return value, replaces int with list.\"\"\"\n if self.type == 'int':\n return [self.value]\n return self.value\n\n def reverse(self):\n \"\"\"Reverse key.\n\n Equivalent to a [::-1].\n \"\"\"\n if self.type == 'list':\n self.value = self.value[::-1]\n elif self.type == 'slice':\n self.value = reverse_slice_order(self.value)\n\n def simplify(self):\n \"\"\"Simplify list into a slice.\n\n Transform a list into a slice if the list is\n a serie of integers of fixed step.\n \"\"\"\n if self.type == 'list':\n key = list2slice_simple(self.value)\n if isinstance(key, slice):\n self.type = 'slice'\n self.value = key\n\n def tolist(self) -> List[int]:\n \"\"\"Return list of key.\"\"\"\n a = self.value\n if self.type == 'int':\n a = [a]\n elif self.type == 'list':\n a = a.copy()\n elif self.type == 'slice':\n if self.parent_size is not None:\n a = list(range(*self.value.indices(self.parent_size)))\n else:\n a = guess_tolist(self.value)\n\n elif self.type == 'none':\n a = []\n return a\n\n def apply(self, seq: Sequence) -> Sequence:\n \"\"\"Apply key to a sequence.\n\n :raises TypeError: Key type not applicable.\n \"\"\"\n if self.type == 'int':\n return seq[self.value]\n if self.type == 'list':\n return [seq[i] for i in self.value]\n if self.type == 'slice':\n return seq[self.value]\n raise TypeError(f\"Not applicable (key type '{self.type}').\")\n\n def __mul__(self, other: 'Key') -> 'Key':\n \"\"\"Subset key by another.\n\n If `B = A[self]`\n and `C = B[other]`\n then `C = A[self*other]`\n\n The type of the resulting key is of the strongest\n type of the two keys (int > list > slice).\n\n :returns: self*other\n \"\"\"\n a = self.tolist()\n key = other.value\n if other.type == 'int':\n key = [key]\n\n if other.type == 'slice':\n res = a[key]\n else:\n res = [a[k] for k in key]\n\n if self.type == 'int' or other.type == 'int':\n key = self.__class__(int(res[0]))\n elif self.type == 'list' or other.type == 'list':\n key = self.__class__(list(res))\n else:\n key = self.__class__(list2slice_simple(res))\n key.shape = len(res)\n return key\n\n def __add__(self, other: 'Key') -> 'Key':\n \"\"\"Expand a key by another.\n\n If `B = A[self]` and `C=A[other]`\n concatenate(B, C) = A[self + other]\n\n The type of the resulting key is a list,\n or a slice if one of the argument is a slice\n and the result can be written as one.\n\n :returns: self + other\n \"\"\"\n a = self.tolist()\n b = other.tolist()\n key = a + b\n\n if self.type == 'slice' or other.type == 'slice':\n key = list2slice_simple(key)\n\n return self.__class__(key)\n\n def sort(self):\n \"\"\"Sort indices.\"\"\"\n if self.type == 'list':\n self.value = list(set(self.value))\n self.value.sort()\n if self.type == 'slice':\n if self.value.step is not None and self.value.step < 0:\n self.reverse()\n\n def make_list_int(self):\n \"\"\"Make list of length one an integer.\"\"\"\n if self.type == 'list' and len(self.value) == 1:\n self.type = 'int'\n self.value = self.value[0]\n self.shape = 0\n\n def make_int_list(self):\n \"\"\"Make integer a list of lenght one.\"\"\"\n if self.type == 'int':\n self.type = 'list'\n self.value = [self.value]\n self.shape = 1\n\n\nclass KeyVar(Key):\n \"\"\"Key for indexing Variable dimension.\n\n Add support for strings keys to Key.\n Allows to go from variable name to index (and\n vice-versa).\n\n :param key: Key-like object.\n Can also be variable name, list of variables names, or\n a slice made from strings.\n\n :attr var: bool: If the key-value can be used only for variables\n (*ie* it is or contains a string). In which case\n one can use `make_var_idx`.\n\n Examples\n --------\n Examples of values:\n >>> 0, [0, 1], 'sst', ['sst'], slice('sst', 'chl', 1)\n \"\"\"\n\n def __init__(self, key: KeyLikeVar):\n self.var = False\n super().__init__(key)\n\n def set(self, key: KeyLikeVar):\n \"\"\"Set value.\n\n :param key: Can be integer or string.\n Can be list of integers or strings (not a mix of both).\n Can be a slice. Step must be None or integer. Start and\n Stop can be integers or strings (not a mix of both).\n\n :raises TypeError: Key is not of valid type.\n :raises ValueError: Slice is not valid (step is not integer,\n or start and stop are not of the same type).\n \"\"\"\n reject = False\n var = False\n if isinstance(key, str):\n tp = 'int'\n var = True\n elif isinstance(key, self.INT_TYPES):\n tp = 'int'\n key = int(key)\n elif isinstance(key, (list, tuple, np.ndarray)):\n if all([isinstance(k, str) for k in key]):\n tp = 'list'\n var = True\n elif all([isinstance(k, self.INT_TYPES) for k in key]):\n tp = 'list'\n key = [int(k) for k in key]\n else:\n reject = True\n if len(key) == 0:\n raise IndexError(\"Key cannot be an empty list.\")\n elif isinstance(key, slice):\n tp = 'slice'\n slc = [key.start, key.stop, key.step]\n for i, s in enumerate(slc):\n if isinstance(s, self.INT_TYPES):\n slc[i] = int(s)\n start, stop, step = slc\n invalid = False\n if step is not None and not isinstance(step, int):\n invalid = True\n types = {type(a) for a in [start, stop]\n if a is not None}\n if types == set([str]):\n var = True\n if types not in (set([int]), set([str]), set()):\n invalid = True\n if invalid:\n raise ValueError(\"Invalid slice.\")\n elif key is None:\n tp = 'none'\n else:\n reject = True\n\n if reject:\n raise TypeError(\"Key is not int, str, List[int], List[str] or slice\"\n f\" (is {type(key)})\")\n self.value = key\n self.type = tp\n self.var = var\n self.set_shape()\n\n def set_shape(self):\n if self.type == 'slice' and self.var:\n self.shape = None\n else:\n super().set_shape()\n\n def reverse(self):\n if not (self.var and self.type == 'slice'):\n super().reverse()\n\n def simplify(self):\n if not self.var:\n super().simplify()\n\n def tolist(self) -> List[int]:\n \"\"\"Return list of key.\n\n :raises TypeError: If string slice cannot be transformed into list.\n \"\"\"\n if self.type == 'slice' and self.var:\n raise TypeError(\"Variable slice cannot be transformed into list.\")\n return super().tolist()\n\n def __mul__(self, other: 'KeyVar') -> 'KeyVar':\n \"\"\"Subset key bd another.\n\n See Key.__mul__ for details.\n\n :raises TypeError: If `other` value is a KeyLikeStr, then\n `self` must be too.\n \"\"\"\n if not other.var:\n return super().__mul__(other)\n if not self.var:\n raise TypeError(\"If other is var, self must be too.\")\n\n a = self.tolist()\n key = other.value\n if other.type == 'int':\n key = [key]\n\n if other.type == 'slice':\n slc = slice(a.index(key.start), a.index(key.stop), key.step)\n res = a[slc]\n else:\n res = [z for z in a if z in key]\n\n if self.type == 'int' or other.type == 'int':\n key = KeyVar(res[0])\n elif self.type == 'list' or other.type == 'list':\n key = self.__class__(list(res))\n return key\n\n def make_idx_var(self, variables: 'Variables'):\n \"\"\"Transform indices into variables names.\"\"\"\n if not self.var:\n names = variables.get_var_names(self.value)\n self.set(names)\n self.set_shape_coord(variables)\n\n def make_var_idx(self, variables: 'Variables'):\n \"\"\"Transform variables names into indices.\"\"\"\n if self.var:\n idx = variables.get_var_indices(self.value)\n self.set(idx)\n self.set_shape_coord(variables)\n\n\nclass KeyValue():\n \"\"\"KeyLike object storing values.\n\n Can act like a Key, but missing lot of features\n presently.\n Should not be stored in a keyring.\n \"\"\"\n def __init__(self, key: KeyLikeValue):\n self.value = None\n self.type = ''\n self.shape = None\n self.set(key)\n\n def set(self, key: KeyLikeValue):\n \"\"\"Set value.\"\"\"\n if isinstance(key, (list, tuple, np.ndarray)):\n tp = 'list'\n elif isinstance(key, slice):\n tp = 'slice'\n elif key is None:\n tp = 'none'\n else:\n tp = 'int'\n\n self.value = key\n self.type = tp\n self.set_shape()\n\n def set_shape(self):\n \"\"\"Set shape.\"\"\"\n if self.type in ['int', 'none']:\n self.shape = 0\n elif self.type == 'list':\n self.shape = len(self.value)\n\n def apply(self, coord: 'Coord') -> KeyLikeInt:\n \"\"\"Find corresponding index.\"\"\"\n if self.type == 'int':\n return coord.get_index(self.value)\n if self.type == 'list':\n return coord.get_indices(self.value)\n if self.type == 'slice':\n return coord.subset(self.value.start, self.value.stop)\n raise TypeError(f\"Not applicable (key type '{self.type}').\")\n\n def apply_by_day(self, coord: 'Time') -> KeyLikeInt:\n \"\"\"Find corresponding index on same day.\"\"\"\n if self.type == 'int':\n return coord.get_index_by_day(self.value)\n if self.type == 'list':\n return coord.get_indices_by_day(self.value)\n if self.type == 'slice':\n return coord.subset_by_day(self.value.start, self.value.stop)\n raise TypeError(f\"Not applicable (key type '{self.type}')\")\n\n\ndef simplify_key(key: KeyLikeInt) -> KeyLikeInt:\n \"\"\"Simplify a key.\n\n Transform a list into a slice if the list is\n a serie of integers of fixed step.\n \"\"\"\n if isinstance(key, (list, tuple, np.ndarray)):\n key = list2slice_simple(list(key))\n return key\n\n\ndef list2slice_simple(L: List[int]) -> Union[slice, List[int]]:\n \"\"\"Transform a list into a slice when possible.\n\n Step can be any integer.\n Can be descending.\n \"\"\"\n if len(L) < 2:\n return L\n\n diff = np.diff(L)\n if len(L) == 2:\n diff2 = np.array([0])\n else:\n diff2 = np.diff(diff)\n\n if np.all(diff2 == 0):\n step = diff[0]\n start = L[0]\n stop = L[-1] + step\n\n if stop < 0:\n stop = None\n L = slice(start, stop, step)\n\n return L\n\n\ndef list2slice_complex(L: List[int]) -> Union[slice, List[int]]:\n \"\"\"Transform a list of integer into a list of slices.\n\n Find all series of continuous integer with a fixed\n step (that can be any integer) of length greater than 3.\n\n Examples\n --------\n [0, 1, 2, 3, 7, 8, 9, 10, 16, 14, 12, 10, 3, 10, 11, 12]\n will yield:\n [slice(0, 4, 1), slice(8, 11, 1), slice(16, 9, -2), 3, slice(10, 13, 1)]\n \"\"\"\n if len(L) < 3:\n return L\n\n diff = list(np.diff(L))\n diff2 = np.diff(diff)\n\n # Index of separation between two linear parts\n sep = np.where(diff2 != 0)[0]\n # Only one of the index (this is a second derivative of a step function)\n sep_start = sep[np.where(np.diff(sep) == 1)[0]] + 2\n\n idx = list(sep_start)\n if diff[0] != diff[1]:\n idx.insert(0, 1)\n if diff[-1] != diff[-2]:\n idx.append(len(L)-1)\n diff.append(diff[-1]+1)\n\n idx.insert(0, 0)\n idx.append(len(L))\n\n slices = []\n for i in range(len(idx)-1):\n i1 = idx[i]\n i2 = idx[i+1]\n start = L[i1]\n\n if i2 - i1 == 1:\n slices.append([start])\n continue\n\n step = diff[i1]\n stop = L[i2-1] + 1\n\n if step < 0:\n stop -= 2\n if stop == -1:\n stop = None\n\n slc = slice(start, stop, step)\n slices.append(slc)\n\n return slices\n\n\ndef guess_slice_shape(slc: slice) -> Optional[int]:\n \"\"\"Guess the shape of a slice.\n\n :returns: None if it is not possible to guess.\n (for instance for slice(None, None))\n \"\"\"\n\n start, stop, step = slc.start, slc.stop, slc.step\n pos = step is None or step > 0\n if start is not None and stop is not None:\n if start * stop >= 0:\n if start > stop if pos else start < stop:\n return 0\n return abs(stop - start)\n\n if pos:\n if start is None and stop is not None and stop >= 0:\n return stop\n if stop is None and start is not None and start < 0:\n return -start\n else:\n if stop is None and start is not None and start >= 0:\n return start\n if start is None and stop is not None and stop < 0:\n return -stop - 1\n\n return None\n\n\ndef guess_tolist(slc: slice) -> List[int]:\n \"\"\"Guess a list of indices without the size.\n\n Transforming a slice into a list of indices requires\n the size of the sequence the slice is destined for.\n >>> indices = slice(0, 5).indices(size)\n\n In some cases, it is possible to make a guess:\n slice(a, b); a and b of same sign\n slice(None, a, s>0); a > 0\n slice(a, None, s>0); a < 0\n slice(None, a, s<0); a < 0\n slice(a, None, s<0); a > 0\n\n :raises ValueError: If cannot guess.\n \"\"\"\n start, stop, step = slc.start, slc.stop, slc.step\n if step is None:\n step = 1\n\n if start is not None and stop is not None:\n if start * stop >= 0:\n return list(range(start, stop, step))\n\n if step > 0:\n if start is None and stop is not None and stop >= 0:\n return list(range(0, stop, step))\n if stop is None and start is not None and start < 0:\n return list(range(start, 0, step))\n else:\n if stop is None and start is not None and start >= 0:\n return list(range(start, 0, step))\n if start is None and stop is not None and stop < 0:\n return list(range(-1, stop, step))\n\n raise ValueError(f\"Slice ({slc}) cannot be turned into list by guessing.\")\n\n\ndef reverse_slice_order(slc: slice) -> slice:\n \"\"\"Reverse a slice order.\n\n ie the order in which indices are taken.\n The indices themselves do not change.\n We assume the slice is valid (shape > 0).\n \"\"\"\n start, stop, step = slc.start, slc.stop, slc.step\n if step is None:\n step = 1\n\n shift = [1, -1][step > 0]\n over = [-1, 0][step > 0]\n if start is not None:\n if start == over:\n start = None\n else:\n start += shift\n if stop is not None:\n if stop == over:\n stop = None\n else:\n stop += shift\n\n step *= -1\n start, stop = stop, start\n return slice(start, stop, step)\n","sub_path":"src/tomate/keys/key.py","file_name":"key.py","file_ext":"py","file_size_in_byte":19763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"342325264","text":"'''\nDescription\nGiven a binary tree, find the subtree with maximum average. Return the root of the subtree.\n\nLintCode will print the subtree which root is your return node.\nIt's guaranteed that there is only one subtree with maximum average.\n\nHave you met this question in a real interview?\nExample\nGiven a binary tree:\n\n 1\n / \\\n -5 11\n / \\ / \\\n1 2 4 -2\nreturn the node 11.\n'''\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param root: the root of binary tree\n @return: the root of the maximum average of subtree\n \"\"\"\n\n average, result = 0, None\n def findSubtree2(self, root):\n # write your code here\n if root is None:\n return\n self.helper(root)\n return self.result\n\n def helper(self, root):\n if root is None:\n return 0, 0\n\n left_sum, left_size = self.helper(root.left)\n right_sum, right_size = self.helper(root.right)\n\n sum, size = left_sum + right_sum + root.val, left_size + right_size + 1\n\n if self.result is None or sum / size > self.average:\n self.average = sum / size\n self.result = root\n\n return sum, size","sub_path":"Tree/DFS/subtree_with_maximum_average.py","file_name":"subtree_with_maximum_average.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"80884425","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\nimport mysql.connector\nimport time\nURL = 'http://www.xicidaili.com/nn'\nHEADER = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',\n}\n\n\ndef init_db():\n conn = mysql.connector.connect(\n user='root',\n password='',\n database='isbn_cover',\n host='127.0.0.1',\n port='3306'\n )\n return conn\n\nif __name__ == \"__main__\":\n conn = init_db()\n cur = conn.cursor()\n sql = \"insert ignore into proxy (ip_proxy) values(%s)\"\n proxy_pool = list()\n data = list()\n while True:\n resp = requests.get(URL, headers=HEADER, timeout=30)\n soup = BeautifulSoup(resp.text, 'lxml')\n trTagList = soup.select('#ip_list tr')\n for trTag in trTagList[1:]:\n host = (trTag.select('td')[1]).string.strip()\n port = (trTag.select('td')[2]).string.strip()\n proxy_pool.append('http://' + host + ':' + port)\n for ip_proxy in proxy_pool:\n proxies = {\n \"http\": ip_proxy,\n }\n try:\n rep = requests.get(\n \"http://202.112.150.126/index.php?client=szlib&isbn=0030896282/cover\",\n proxies=proxies,\n headers=HEADER,\n timeout=60\n )\n except:\n continue\n if rep.status_code != 200:\n continue\n print(ip_proxy)\n data.append((ip_proxy,))\n if len(data) > 3:\n cur.executemany(sql, data)\n conn.commit()\n data.clear()\n if len(data) > 0:\n cur.executemany(sql, data)\n conn.commit()\n print(\"暂停十分钟!!!\")\n time.sleep(600)\n","sub_path":"proxy_pool/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"43333826","text":"import psycopg2 as dbapi2\nfrom table_operations.baseClass import baseClass\nfrom tables import BookObj\n\n\nclass Book(baseClass):\n def __init__(self):\n super().__init__(\"BOOK\", BookObj)\n\n def add_book(self, book):\n query = \"INSERT INTO BOOK (BOOK_NAME, RELEASE_YEAR, BOOK_EXPLANATION) VALUES (%s, %s, %s)\"\n fill = (book.book_name, book.release_year, book.explanation)\n\n with dbapi2.connect(self.url) as connection:\n cursor = connection.cursor()\n cursor.execute(query, fill)\n cursor.close()\n\n return self.get_table()[-1].book_id\n\n def update(self, book_key, book):\n query = \"UPDATE BOOK SET BOOK_NAME = %s, RELEASE_YEAR = %s, BOOK_EXPLANATION = %s WHERE BOOK_ID = %s\"\n fill = (book.book_name, book.release_year, book.explanation, book_key)\n\n with dbapi2.connect(self.url) as connection:\n cursor = connection.cursor()\n cursor.execute(query, fill)\n cursor.close()\n\n return book_key\n\n def delete(self, book_key):\n\n query1 = \"DELETE FROM BOOK_AUTHOR WHERE BOOK_ID = %s\"\n query2 = \"DELETE FROM BOOK_CATEGORY WHERE BOOK_ID = %s\"\n query3 = \"DELETE FROM BOOK WHERE BOOK_ID = %s\"\n fill = (book_key,)\n\n with dbapi2.connect(self.url) as connection:\n cursor = connection.cursor()\n cursor.execute(query1, fill)\n cursor.execute(query2, fill)\n cursor.execute(query3, fill)\n cursor.close()\n\n def get_row(self, book_key):\n _book = None\n\n query = \"SELECT * FROM BOOK WHERE BOOK_ID = %s\"\n fill = (book_key,)\n\n with dbapi2.connect(self.url) as connection:\n cursor = connection.cursor()\n cursor.execute(query, fill)\n book = cursor.fetchone()\n if book is not None:\n _book = BookObj(book[0], book[1], book[2], book[3])\n\n return _book\n\n def get_table(self, select_columns=\"*\", where_columns=None, where_values=None):\n return self.getTableGeneric(select_columns, where_columns, where_values)\n","sub_path":"table_operations/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"288741568","text":"from Acquisition import aq_inner\nfrom Products.Five import BrowserView\nfrom datetime import date, datetime, timedelta\nfrom DateTime import DateTime\nimport time\n\nclass ContextToolsView(BrowserView):\n\n def isEventPast(self, event):\n \"\"\" Checks if the event is already past \"\"\"\n if event.portal_type != 'Event':\n return False\n else:\n try:\n t = DateTime(time.time())\n if event.end is not None:\n end = DateTime(event.end)\n return end.year() < t.year() or (end.year() == t.year() and end.month() < t.month()) or(end.year() == t.year() and end.month() == t.month() and end.day() < t.day())\n else:\n start = DateTime(event.start)\n return start.year() < t.year() or (start.year() == t.year() and start.month() < t.month()) or(start.year() == t.year() and start.month() == t.month() and start.day() < t.day())\n except:\n return False\n return True\n\n def isEventPermanent(self, event):\n NUM_YEARS = 2\n YEAR = 365\n YEAR_EXTRA = 365.25\n YEAR_DIFF = YEAR * NUM_YEARS\n YEARS_DIFF_EXTRA = YEAR_EXTRA * NUM_YEARS\n\n if event.portal_type != 'Event':\n return False\n else:\n try:\n t = DateTime(time.time())\n if event.end != None and event.start != None:\n end = event.end\n start = event.start\n today = datetime.today().date()\n\n diff = end.date() - start.date()\n if (diff >= timedelta(days=YEAR_DIFF) or diff >= timedelta(days=YEARS_DIFF_EXTRA)) and start.date() <= today:\n return True\n\n return False\n except:\n return False\n return True\n\nclass OnlineExperienceView(BrowserView):\n\n def getSlideshowItems(self):\n context = self.context\n inc = 2\n nthchild = 1\n\n inline_css = \"\"\n template = \".cd-fixed-bg:nth-child(%s) { background-image: url('%s');}\"\n\n if 'slideshow' in context:\n slideshow = context['slideshow']\n for _id in slideshow:\n obj = slideshow[_id]\n if obj:\n portal_type = getattr(obj, 'portal_type', None)\n if portal_type == \"Image\":\n url = obj.absolute_url()+\"/@@images/image/large\"\n new_image = template % (nthchild, url)\n inline_css += new_image\n nthchild += inc\n\n final_inline_css = \"\"\n return final_inline_css\n\n","sub_path":"src/plonetheme/veenkoloniaalmuseum/browser/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"444060514","text":"# 플랫폼-프로세서 간 통신 프로그램\n# 김진웅\n# input: (from car_control)\n# output: (to car_control)\n\n\nimport threading\nimport time\n\nimport serial\n\n# ==========================================\nfrom src.serial_packet import SerialPacket\n\n\nclass PlatformSerial:\n def __init__(self, platform_port):\n self.port = platform_port\n try:\n self.ser = serial.Serial(self.port, 115200)\n except Exception as e:\n print('[PlatformSerial| INIT ERROR: ', e, ']')\n self.read_packet = SerialPacket()\n self.write_packet = SerialPacket()\n\n self.stop_fg = False\n self.threading = threading.Thread(target=self.communicate)\n self.threading.start()\n\n def restart(self):\n self.stop_fg = True\n self.threading.join()\n self.threading.start()\n\n def communicate(self):\n while True:\n self.recv()\n self.send()\n if self.stop_fg is True: break\n self.stop_fg = False\n\n def send(self):\n self.write_packet.alive = self.read_packet.alive\n try:\n self.ser.write(self.write_packet.write_bytes())\n except Exception as e:\n print('[PlatformSerial| WRITE ERROR', e, ']')\n\n def recv(self):\n try:\n b = self.ser.read(18)\n except Exception as e:\n print('[PlatformSerial| READ ERROR', e, ']')\n return\n self.read_packet.read_bytes(b)\n\n def read(self):\n return self.read_packet.speed, self.read_packet.enc\n\n def write(self, gear, speed, steer, brake):\n self.write_packet.gear = gear\n self.write_packet.speed = speed\n self.write_packet.steer = steer\n self.write_packet.brake = brake\n\n def status(self):\n gear = self.read_packet.gear\n speed = self.read_packet.speed / 10\n steer = self.read_packet.steer / 71\n brake = self.read_packet.brake / 200\n # print('[READ]')\n # print(self.read_packet.get_attr(mode='a'))\n # print(str(speed) + 'kph', str(round(steer, 4)) + 'deg', str(round(brake, 4)) + 'brake')\n # print()\n return gear, speed, steer, brake\n\n def stop(self):\n self.stop_fg = True\n\n\ndef t_move():\n platform.write(SerialPacket.GEAR_FORWARD, 40, SerialPacket.STEER_STRAIGHT, SerialPacket.BRAKE_NOBRAKE)\n\n\ndef t_back():\n platform.write(SerialPacket.GEAR_BACKWARD, 60, SerialPacket.STEER_STRAIGHT, SerialPacket.BRAKE_NOBRAKE)\n\n\ndef t_stop():\n platform.write(SerialPacket.GEAR_NEUTRAL, 0, SerialPacket.STEER_STRAIGHT, 60)\n\n\ndef t_neutral():\n platform.write(SerialPacket.GEAR_NEUTRAL, 0, SerialPacket.STEER_STRAIGHT, SerialPacket.BRAKE_NOBRAKE)\n\n\ndef t_left():\n platform.write(SerialPacket.GEAR_NEUTRAL, 0, SerialPacket.STEER_MAXLEFT, SerialPacket.BRAKE_NOBRAKE)\n\n\ndef t_right():\n platform.write(SerialPacket.GEAR_NEUTRAL, 0, SerialPacket.STEER_MAXRIGHT, SerialPacket.BRAKE_NOBRAKE)\n\n\nif __name__ == '__main__':\n platform = PlatformSerial('COM6')\n while True:\n platform.status()\n t_stop()\n\n if platform.read_packet.aorm == SerialPacket.AORM_AUTO:\n t = time.time()\n while time.time() - t < 2:\n platform.status()\n t_move()\n t = time.time()\n while time.time() - t < 2:\n platform.status()\n t_stop()\n","sub_path":"src/communication.py","file_name":"communication.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"618215923","text":"\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/11/21 9:15\n# @Author : sch\n# @File : 2017_11_21.py\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport re\n\nplt.style.use('ggplot')\nplt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签\nplt.rcParams['axes.unicode_minus'] = False #用来正常显示负号\n\n# client_credit\ndata_shouxin_2016 = pd.read_clipboard()\ndata_xinyong_2016 = pd.read_clipboard()\ndata_xinyong_2016 = data_xinyong_2016.drop('15信用等级', 1)\ndata_temp = pd.merge(data_shouxin_2016, data_xinyong_2016, on='经销商名称', how='left')\n\ndata_1 = pd.read_clipboard()\ndata_2 = pd.read_clipboard()\ndata_3 = pd.read_clipboard()\ndata_1 = data_1.drop(['经销商编码', '结算单位名称', '结算单位编码'], 1)\ndata_2 = data_2.drop(['经销商编码', '结算单位名称', '结算单位编码'], 1)\ndata_3 = data_3.drop(['经销商编码', '结算单位名称', '结算单位编码'], 1)\ndata_12 = pd.merge(data_1, data_2, on='经销商名称', how='outer')\ndata_all_offline = pd.merge(data_12, data_3, on='经销商名称', how='outer')\n\ndata_all = pd.merge(data_temp, data_all_offline, on='经销商名称', how='left')\ndata_all.to_csv('data_output/20171121/all_2016.csv')\n####################################################\ndata_shouxin_2015 = pd.read_clipboard()\ndata_xinyong_2015 = pd.read_clipboard()\ndata_xinyong_2015 = data_xinyong_2015.drop('14信用等级', 1)\ndata_temp = pd.merge(data_shouxin_2015, data_xinyong_2015, on='经销商名称', how='left')\n\ndata_all_offline = pd.read_clipboard()\ndata_all_offline = data_all_offline.drop('结算单位名称', 1)\n\ndata_all = pd.merge(data_temp, data_all_offline, on='经销商名称', how='left')\ndata_all.to_csv('data_output/20171121/all_2015.csv')\n\n\n# client_balance\ndata1 = pd.read_clipboard()\n# 时间清理\ndef clean_time(data, day, month):\n data['日'] = data['日'].fillna(day)\n data['月'] = data['月'].fillna(month)\n\ndata2 = pd.read_clipboard()\n\ndata_all = data1.append(data2)\n\n\n\n\n\n\n\n\n\n# plot\ndata = pd.read_clipboard()\np_float = data['16授信比例'].str.strip(\"%\").astype(float)/100;\ndata['16授信比例'] = p_float\nfig1 = sns.boxplot(x=data['16信用等级'], y=data['16总指标'])\nfig1.set_title('信用等级-总指标')\n\ndictionary = {'A': 5, 'B': 4, 'C': 3, 'D': 2, 'E': 1}\ndata['16信用得分'] = data['16信用等级'].map(dictionary)\nsns.jointplot('16信用得分', '16总指标', data)\n\ndata_1 = pd.read_clipboard()\ndata_1['本期应收'] = data_1['本期应收'].apply(lambda x: re.sub(',', '', x))\ndata_1[data_1['本期应收'].astype(float) > 0]['本期应收'].count()\ndata_1['本期应收'].astype(float).sum()\n\ndata_1['余额'] = data_1['余额'].apply(lambda x: float(re.sub(',', '', x)))\ndata_1['余额'].astype(float).max()\n\n\nd = pd.DataFrame()\nd['余额'] = data_1['余额']\nd.reset_index(inplace=True)\n\na = d[d['余额'] < -300000]\nnp.max(a['index'].diff())\n\n\n\n\n\n\n\nall_name = pd.read_clipboard()\ndata1 = pd.read_clipboard()\ndata2 = pd.read_clipboard()\n\ndef get_None(x, data, group, target):\n if x in data[group].values:\n return data.loc[data[group] == x, target].iloc[0]\n else:\n return 'NA'\nall_name['16信用等级'] = all_name['经销商名称'].apply(lambda x: get_None(x, data1, '经销商名称', '16信用等级'))\nall_name['16线上总指标'] = all_name['经销商名称'].apply(lambda x:get_None(x, data2, '经销商名称', '16线上总指标'))\nall_name['16授信比例'] = all_name['经销商名称'].apply(lambda x:get_None(x, data2, '经销商名称', '16授信比例'))\nall_name['2016年授信表备注'] = all_name['经销商名称'].apply(lambda x:get_None(x, data2, '经销商名称', '2016年授信表备注'))\n\noffline_1 = pd.read_clipboard()\nall_name['2016年线下电工'] = all_name['经销商名称'].apply(lambda x:get_None(x, offline_1, '经销商名称', '2016年电工销售指标'))\noffline_2 = pd.read_clipboard()\nall_name['2016年线下照明'] = all_name['经销商名称'].apply(lambda x:get_None(x, offline_2, '经销商名称', '2016年照明总指标'))\noffline_3 = pd.read_clipboard()\nall_name['2016年线下排插'] = all_name['经销商名称'].apply(lambda x:get_None(x, offline_3, '经销商名称', '2016年排插销售指标'))\n\nall_name.to_csv('data_output/20171121/2016shouxin_new.csv')\n\n\n\n\n\nall_name = pd.read_clipboard()\nxiaoshou_2014 = pd.read_clipboard()\n# xiaoshou_2014 = xiaoshou_2014.drop('结算单位名称', 1)\nall_name['14线下指标'] = all_name['经销商名称'].apply(lambda x: get_None(x, xiaoshou_2014, '经销商名称', '2014年销售指标'))\nshouxin_2015 = pd.read_clipboard()\nall_name['14授信'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2015, '经销商名称', '14信用等级'))\nxiaoshou_2015 = pd.read_clipboard()\nall_name['15线下指标'] = all_name['经销商名称'].apply(lambda x: get_None(x, xiaoshou_2015, '经销商名称', '2015年销售指标'))\nshouxin_2015 = pd.read_clipboard()\nall_name['15总指标'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2015, '经销商名称', '15总指标'))\nall_name['15授信比例'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2015, '经销商名称', '15授信比例'))\nall_name['15信用额'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2015, '经销商名称', '15信用额'))\nall_name['15备注'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2015, '经销商名称', '15年备注'))\nall_name_confirm = all_name.copy()\n\nshouxin_2016 = pd.read_clipboard()\nall_name['15信用等级'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2016, '经销商名称', '15信用等级'))\noffline_diangong_2016 = pd.read_clipboard()\nall_name['16线下电工'] = all_name['经销商名称'].apply(lambda x: get_None(x, offline_diangong_2016, '经销商名称', '2016年电工销售指标'))\noffline_zhaoming_2016 = pd.read_clipboard()\nall_name['16线下照明'] = all_name['经销商名称'].apply(lambda x: get_None(x, offline_zhaoming_2016, '经销商名称', '2016年照明总指标'))\noffline_paicha_2016 = pd.read_clipboard()\nall_name['16线下排插'] = all_name['经销商名称'].apply(lambda x: get_None(x, offline_paicha_2016, '经销商名称', '2016年排插销售指标'))\nall_name_confirm2 = all_name.copy()\n\nshouxin_2016 = pd.read_clipboard()\nall_name['16线上总指标'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2016, '经销商名称', '16总指标'))\nall_name['16授信比例'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2016, '经销商名称', '16授信比例'))\nall_name['16信用额'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2016, '经销商名称', '16年信用额'))\nall_name['16备注'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2016, '经销商名称', '16年备注'))\nall_name_confirm3 = all_name.copy()\n\nshouxin_2017 = pd.read_clipboard()\nall_name['16信用等级'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2017, '经销商名称', '16信用等级'))\noffline_diangong_2017 = pd.read_clipboard()\nall_name['17线下电工'] = all_name['经销商名称'].apply(lambda x: get_None(x, offline_diangong_2017, '经销商名称', '2017年电工销售指标'))\noffline_zhaoming_2017 = pd.read_clipboard()\nall_name['17线下照明'] = all_name['经销商名称'].apply(lambda x: get_None(x, offline_zhaoming_2017, '经销商名称', '2017年照明总指标'))\noffline_paicha_2017 = pd.read_clipboard()\nall_name['17线下排插'] = all_name['经销商名称'].apply(lambda x: get_None(x, offline_paicha_2017, '经销商名称', '2017年排插总指标'))\nall_name_confirm4 = all_name.copy()\n\nshouxin_2017 = pd.read_clipboard()\nall_name['17线上总指标'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2017, '经销商名称', '17总指标'))\nall_name['17授信比例'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2017, '经销商名称', '17授信比例'))\nall_name['17信用额'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2017, '经销商名称', '17年信用额'))\nall_name['17备注'] = all_name['经销商名称'].apply(lambda x: get_None(x, shouxin_2017, '经销商名称', '17年备注'))\nall_name_confirm5 = all_name.copy()\n\nall_name.to_csv('data_output/20171121/all_2016_shouxin.csv')\n\n\n\ncompany_2014 = pd.read_clipboard()\n\n\n","sub_path":"2017_11_21.py","file_name":"2017_11_21.py","file_ext":"py","file_size_in_byte":8514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"201526128","text":"import torch\nimport torch.nn as nn\nimport gluonnlp as nlp\nimport torch.nn.functional as F\nfrom typing import Union, Tuple\n\n\nclass BiLSTM(nn.Module):\n \"\"\" class for bidirectional LSTM \"\"\"\n def __init__(self, input_size, hidden_size, is_pair:bool=False) -> None:\n \"\"\" initialization of BiLSTM class \"\"\"\n super(BiLSTM, self).__init__()\n self._hidden_size = hidden_size\n self._is_pair = is_pair\n self._bilstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size, batch_first=True, bidirectional=True)\n\n def forward(self, inputs: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:\n if self._is_pair:\n q1, q2 = inputs\n outputs1, _ = self._bilstm(q1)\n outputs2, _ = self._bilstm(q2)\n return outputs1, outputs2\n\n else:\n outputs, hidden = self._bilstm(inputs) # outputs : batch, seq_len, num_directions * hidden_size)\n return outputs, hidden[0]\n\n\nclass StackingLSTM(nn.Module):\n \"\"\" stacking LSTM \"\"\"\n def __init__(self, input_size, hidden_size, num_layers:int=2) -> None:\n \"\"\" initialization of stackingLSTM class \"\"\"\n super(StackingLSTM, self).__init__()\n self._hidden_size = hidden_size\n self._lstm = nn.LSTM(input_size, hidden_size, batch_first=True, num_layers=num_layers)\n\n def forward(self, inputs:torch.Tensor, initial_hidden:Tuple[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n # output: (batch, seq_len, num_layers * hidden_size)\n # hidden: (num_layers, batch, hidden_size)\n outputs, hidden = self._lstm(inputs, initial_hidden)\n\n return outputs, hidden\n\n\nclass Encoder(nn.Module):\n \"\"\" encoder class \"\"\"\n def __init__(self, vocab_ko:nlp.Vocab, embedding_dim:int, hidden_dim:int, padding_idx=1):\n super(Encoder,self).__init__()\n self.pad_idx = padding_idx\n\n self._embedding = nn.Embedding(len(vocab_ko.token_to_idx), embedding_dim, padding_idx=self.pad_idx) # NLC\n self._lstm = StackingLSTM(embedding_dim, hidden_dim)\n # self._dev = dev\n\n def forward(self, input_ko):\n embedded = self._embedding(input_ko)\n lstm_out, last_hidden = self._lstm(embedded) # last_hidden: (num_layers * num_directions, batch, hidden_size)\n return lstm_out, last_hidden\n\n def init_hidden(self):\n return\n\n\nclass Decoder(nn.Module):\n \"\"\" Decoder class \"\"\"\n def __init__(self, vocab_tgt:nlp.Vocab, embedding_dim:int, hidden_dim:int):\n \"\"\" Initialization of Decoder \"\"\"\n super(Decoder,self).__init__()\n self._embedding = nn.Embedding(len(vocab_tgt.token_to_idx), embedding_dim, padding_idx=1)\n self._lstm = StackingLSTM(embedding_dim, hidden_dim)\n self._wc = nn.Linear(hidden_dim * 2, hidden_dim)\n self._linear = nn.Linear(hidden_dim, len(vocab_tgt.token_to_idx))\n # self._softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, inputs, hidden_state, context_vector=None):\n \"\"\"\n Args:\n inputs: (batch,)\n hidden_state: decoder hidden state in current time step\n context_vector:\n\n Returns:\n output: decoder output for the current time step\n next_decoder_hidden: decoder hidden state for next time step\n \"\"\"\n embedded = self._embedding(inputs) # (batch, 1, hidden)\n lstm_out, next_decoder_hidden = self._lstm(embedded, hidden_state)\n next_hidden, next_cell_state = next_decoder_hidden\n\n if context_vector is not None:\n context_hidden_concat = torch.cat((context_vector, next_hidden[-1].unsqueeze(1)), dim=2)\n attentional_hidden_state = torch.tanh(self._wc(context_hidden_concat))\n output = self._linear(attentional_hidden_state)\n output = F.softmax(output, dim=2)\n\n else:\n output = self._linear(lstm_out)\n output = F.softmax(output, dim=2)\n\n # lstm_out: (batch, 1, hidden_size)\n # hidden: batch, num_layers * num_directions, hidden_size\n #\n return output, next_decoder_hidden\n\n\nclass Attention(nn.Module):\n \"\"\"\n will use general score function\n * reference: https://machinetalk.org/2019/03/29/neural-machine-translation-with-attention-mechanism/\n \"\"\"\n def __init__(self, hidden_dim:int):\n super(Attention, self).__init__()\n self._wa = nn.Linear(hidden_dim, hidden_dim)\n self._attn = nn.Linear(hidden_dim * 2, hidden_dim)\n\n def forward(self, decoder_inputs, decoder_hidden, encoder_outputs):\n # Dot score: h_t (dot) Wa (dot) h_s\n # encoder_output shape: (batch_size, max_len, hidden)\n # decoder_hidden shape: (batch_size, 1, hidden)\n # decoder_inputs shape: (batch_size, 1,\n\n # score will have shape: (batch_size, 1, max_len)\n score = torch.bmm(decoder_hidden, self._wa(encoder_outputs).permute(0,2,1))\n # alignment vector a_t\n alignment = F.softmax(score, dim=2)\n\n # context vector : (batch_size, 1, max_len) @ (batch_size, max_len, hidden) -> (batch, 1, hidden)\n # The context vector is what we use to compute the final output of the decoder.\n # It is the weighted average of the encoder’s output.\n context = torch.bmm(alignment, encoder_outputs)\n return context, alignment\n\n\n# Luong attention layer\n# https://pytorch.org/tutorials/beginner/chatbot_tutorial.html#decoder\nclass Attn(nn.Module):\n def __init__(self, method, hidden_size):\n super(Attn, self).__init__()\n self.method = method\n if self.method not in ['dot', 'general', 'concat']:\n raise ValueError(self.method, \"is not an appropriate attention method.\")\n self.hidden_size = hidden_size\n if self.method == 'general':\n self.attn = nn.Linear(self.hidden_size, hidden_size)\n elif self.method == 'concat':\n self.attn = nn.Linear(self.hidden_size * 2, hidden_size)\n self.v = nn.Parameter(torch.FloatTensor(hidden_size))\n\n def dot_score(self, hidden, encoder_output):\n return torch.sum(hidden * encoder_output, dim=2)\n\n def general_score(self, hidden, encoder_output):\n energy = self.attn(encoder_output)\n return torch.sum(hidden * energy, dim=2)\n\n def concat_score(self, hidden, encoder_output):\n energy = self.attn(torch.cat((hidden.expand(encoder_output.size(0), -1, -1), encoder_output), 2)).tanh()\n return torch.sum(self.v * energy, dim=2)\n\n def forward(self, hidden, encoder_outputs):\n # Calculate the attention weights (energies) based on the given method\n if self.method == 'general':\n attn_energies = self.general_score(hidden, encoder_outputs)\n elif self.method == 'concat':\n attn_energies = self.concat_score(hidden, encoder_outputs)\n elif self.method == 'dot':\n attn_energies = self.dot_score(hidden, encoder_outputs)\n\n # Transpose max_length and batch_size dimensions\n attn_energies = attn_energies.t()\n\n # Return the softmax normalized probability scores (with added dimension)\n return F.softmax(attn_energies, dim=1).unsqueeze(1)\n\n# https://pytorch.org/tutorials/beginner/chatbot_tutorial.html#decoder\nclass LuongAttnDecoderRNN(nn.Module):\n def __init__(self, attn_model, embedding, hidden_size, output_size, n_layers=1, dropout=0.1):\n super(LuongAttnDecoderRNN, self).__init__()\n\n # Keep for reference\n self.attn_model = attn_model\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.n_layers = n_layers\n self.dropout = dropout\n\n # Define layers\n self.embedding = embedding\n self.embedding_dropout = nn.Dropout(dropout)\n self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout))\n self.concat = nn.Linear(hidden_size * 2, hidden_size)\n self.out = nn.Linear(hidden_size, output_size)\n\n self.attn = Attn(attn_model, hidden_size)\n\n def forward(self, input_step, last_hidden, encoder_outputs):\n # Note: we run this one step (word) at a time\n # Get embedding of current input word\n embedded = self.embedding(input_step)\n embedded = self.embedding_dropout(embedded)\n # Forward through unidirectional GRU\n rnn_output, hidden = self.gru(embedded, last_hidden)\n # Calculate attention weights from the current GRU output\n attn_weights = self.attn(rnn_output, encoder_outputs)\n # Multiply attention weights to encoder outputs to get new \"weighted sum\" context vector\n context = attn_weights.bmm(encoder_outputs.transpose(0, 1))\n # Concatenate weighted context vector and GRU output using Luong eq. 5\n rnn_output = rnn_output.squeeze(0)\n context = context.squeeze(1)\n concat_input = torch.cat((rnn_output, context), 1)\n concat_output = torch.tanh(self.concat(concat_input))\n # Predict next word using Luong eq. 6\n output = self.out(concat_output)\n output = F.softmax(output, dim=1)\n # Return output and final hidden state\n return output, hidden","sub_path":"wk9_NMT/model/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":9244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"344659566","text":"# tests a particular algorithm on a bunch of strings.\n# UNFINISHED: see valid_string func or whatever its called\n# Anne-Laure Ehresmann\n\nimport sys\nimport random\n\n# evil global variables\nmax_iterations = 10000 # how many iterations the algorithm is allowed to go through before it declares it as failed\nnumber_of_strings_to_test_on = 2 # how many strings the algorithm will be tested on\nwindow_size = 6 # for now t = w\n\nclass algorithm:\n def __init__(self):\n self.rules = {}\n\n def add_rule(self, int, str):\n self.rules[int] = str\n\n\n# utitlity functions:\n\n# shuffles a string\ndef shuffle(string):\n return ''.join(random.sample(string,len(string)))\n\n# partitions a string, allows shifting\ndef partition_string(str, window_size: int, shift: int):\n partitions = []\n for i in range(0, len(str), window_size):\n partitions.append(str[i+shift:i+window_size+shift])\n\n # add the beginning of the string to the last partition\n partitions[-1] += (str[:shift])\n return partitions\n\n# returns the period of a string, if any\ndef principal_period(s):\n i = (s+s).find(s, 1, -1)\n return None if i == -1 else s[:i]\n\n# finds all cyclic shifts of a string\ndef cyclic_shifts(string):\n cyclic_shifts_list = []\n if len(string) == 1:\n return [string]\n else:\n for char in string:\n string = ''.join([string[1:],string[0]])\n cyclic_shifts_list.append(string)\n return cyclic_shifts_list\n\n\n#tests an algorithm on a specific string. returns true if the string eventually reached a valid conf, false if not\ndef test_algorithm_on_a_string(str, algorithm, num_of_max_iterations: int, window_size: int):\n shift = 0\n for i in range(0, max_iterations):\n shift = (shift + 1) % window_size\n partitions = partition_string(str, window_size, shift)\n for partition in partitions:\n count = partition.count('0')\n if count in algorithm.rules:\n partition = algorithm.rules[count]\n if verify_configuration_validity(\"\".join(partitions)):\n print(\"succeeded!\")\n print(partitions)\n return true\n return false\n\n\n# verifies if the string is valid, used by test_algorithm\ndef verify_configuration_validity(string, valid_period):\n period_shifts = cyclic_shifts(valid_period)\n string_period = principal_period(string)\n for valid in period_shifts:\n if string_period == valid: return True\n if string_period == valid[::-1]: return True # if same as inversed period\n\n\ndef main():\n # input: [#R_rule_1] [pattern_rule_1] [#R_rule_2] [pattern_rule_2]\n\n if len(sys.argv) != 5:\n print('Error: incorrect number of rules given.')\n sys.exit(0)\n else:\n\n alg = algorithm()\n alg.add_rule(int(sys.argv[1]), sys.argv[2])\n alg.add_rule(int(sys.argv[3]), sys.argv[4])\n\n string = \"0\"*window_size + \"1\"*window_size\n valid_period = principal\n valid = 0\n for i in range(0, number_of_strings_to_test_on):\n print('testing on string: ' + string)\n string = shuffle(string)\n if test_algorithm_on_a_string(string, alg, max_iterations, window_size): valid += 1\n\n\nmain()\n","sub_path":"old_code_implementations/python/test_some_algorithm.py","file_name":"test_some_algorithm.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"504934393","text":"from django.urls import path\nfrom . import views\n\napp_name='home'\nurlpatterns = [\n path('create/', views.create,name='create'),\n path('update/', views.update,name='update'),\n path('', views.login,name='login'),\n path('logout/', views.logout,name='logout'),\n path('storymap/', views.storymap,name='storymap'),\n path('checkname/',views.checkname,name='checkname'),\n path('survey/', views.survey,name='survey'),\n path('userpattern/', views.userpattern,name='userpattern'),\n # path('voice/', views.voice,name='voice'),\n # path('voiceUnknown/', views.voiceUnknown,name='voiceUnknown'),\n # path('voiceError/', views.voiceError,name='voiceError'),\n # path('voiceContinue/', views.voiceContinue,name='voiceContinue'),\n # path('voiceContinue_con/', views.voiceContinue_con,name='voiceContinue_con'),\n path('pic_saver/',views.pic_saver,name='pic_saver'),\n path('graphmodel/', views.graphmodel,name='graphmodel'),\n path('road_direction/', views.road_direction,name='road_direction'),\n path('voicestart/', views.voicestart,name='voicestart'),\n path('voiceend/', views.voiceend,name='voiceend'),\n path('voicemode/', views.voicemode,name='voicemode')\n # path('voicecon/', views.voicecon,name='voicecon')\n # path('voicestand/', views.voicestand,name='voicestand')\n]\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"615859314","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function, division\nfrom io import open\nimport unicodedata\nimport string\nimport re\nimport random\nimport os\nimport time\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nimport numpy as np\nfrom numpy.random import permutation\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\n\nfrom itertools import chain\n\nfrom gensim.models.word2vec import Word2Vec\nimport word2vecClassifier #JIC\n\n\nimport time\nimport math\nimport multiprocessing\n\n\nuse_cuda = torch.cuda.is_available()\nprint(\"Utilisation de la carte graphique :\",use_cuda)\n\n\ndef asMinutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef timeSince(since, percent):\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s (- %s)' % (asMinutes(s), asMinutes(rs))\n\n\nclass my_RNN(nn.Module):\n def __init__(self, input_size, hidden_size, model , n_layers=1):\n super(my_RNN, self).__init__()\n self.n_layers = n_layers\n self.hidden_size = hidden_size\n self.input_size = input_size\n\n self.model = model\n\n\n self.rnn = nn.RNN(input_size,hidden_size,n_layers)\n #self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear_out = nn.Linear(hidden_size, 1)\n\n\n def forward(self, input, hidden):\n # Entrées :\n # input (variable(mat)) : les instances\n # Sortie\n # Variable(vect) : les prédictions\n output,hidden = self.rnn(input,hidden)\n return output,hidden\n\n\n\n\n def initHidden(self):\n # première couche cachée\n result = Variable(torch.zeros(1,1,self.hidden_size))\n if use_cuda:\n return result.cuda()\n else:\n return result\n\n\n\n\n def train_once(self, input_variables, target_variable, optimizer, criterion):\n # Réalise l'entraînement pour une seule phrase, et réalise la backpropagation\n # Entrées :\n # - n_epochs (int) : nombre de fois qu'on applique toutes les instance de l'ensemble d'apprentissage\n # - input_variables list of Variable(vect) : instances d'apprentissage\n # - target_variable Variable(vect(+1|-1))) : labels\n # - optimizer (pytorch object) : le résultat de optim.SGD ou optim.Adam\n # - criterion (pytorch object) : le résultat de nn.L1Loss ou nn.MSELoss\n # Sorties :\n # perte (float) : la valeur de la perte globale\n\n output_list, hidden_list = [],[]\n hidden = self.initHidden()\n\n optimizer.zero_grad()\n\n for word in input_variables :\n\n output,hidden = self(word,hidden)\n\n output_list.append(output)\n hidden_list.append(hidden)\n\n output = torch.tanh(self.linear_out(output))\n\n loss = criterion(output.view(1,-1), target_variable.view(-1))\n\n loss.backward()\n optimizer.step()\n\n return loss.data[0], output_list, hidden_list\n\n\n\n def trainIters(self, n_epochs, training_pairs, te_pairs, learning_rate, print_every=1000, eval_every = 1000):\n # Réalise l'entraînement complet, à partir des ensembles d'apprentissage\n # Entrées :\n # - n_epochs (int) : nombre de fois qu'on applique toutes les instance de l'ensemble d'apprentissage\n # - training_pairs (list of (list of (vect)), (+1|-1))) : instances d'apprentissage\n # - te_pairs (list of (Variable(vect), Variable(+1|-1))) : instances de test\n # - learning_rate (float) : devine ;)\n # - print_every (int) : imprime l'erreur moyenne toutes les print_every epochs\n # - eval_every (int) : teste le NN sur la base de test et imprime la matrice de confusion\n # Sorties :\n # none\n\n start = time.time()\n plot_losses = []\n print_loss_total = 0 # Reset every print_every\n\n optimizer = optim.SGD(self.parameters(), lr=learning_rate)\n # Autre choix possible :\n #optimizer = optim.Adam(self.parameters(), lr=learning_rate)\n\n\n criterion = nn.L1Loss()\n #criterion = nn.MSELoss()\n\n for epoch in range(1, n_epochs + 1):\n loss = 0\n\n for pair in training_pairs:\n target_variable,input_variables = pair\n\n loss,_,_ = self.train_once(input_variables, target_variable, optimizer, criterion)\n\n print_loss_total += loss\n\n\n\n if epoch % print_every == 0:\n # print the loss and time\n print_loss_avg = print_loss_total / (print_every*len(training_pairs))\n print_loss_total = 0\n print('%s (%d %d%%) %.4f' % (timeSince(start, epoch / n_epochs),\n epoch, epoch / n_epochs * 100, print_loss_avg))\n\n if epoch % eval_every == 0:\n self.evaluateRandomly(te_pairs) # show confusion matrix on test data\n\n\n\n\n\n\n\n def evaluateRandomly(self, pairs):\n # evaluate on all pairs, print the confusion matrix\n n_successes = 0\n n_pos = 0 # also computes the proportion of positive reviews\n\n TP,TN,FP,FN = 0,0,0,0\n\n\n for pair in pairs: # replace with pairs[:n] for testing\n\n target_variable,input_variables = pair\n hidden = self.initHidden()\n output_list, hidden_list = [],[]\n\n\n for word in input_variables :\n output,hidden = self(word,hidden)\n output_list.append(output)\n hidden_list.append(hidden)\n\n output = torch.tanh(self.linear_out(output))\n\n #success = (output[int(pair[1])] == max(output))\n note = pair[0].data[0,0]\n predicted = output.data[0][0]\n\n #print('note',note)\n #print('predicted',predicted)\n success = (note*predicted > 0)\n #print('success',success[0])\n\n\n if success[0] :\n n_successes += 1\n if note>0:\n TP += 1\n else:\n TN += 1\n else:\n if note>0:\n FP += 1\n else:\n FN += 1\n\n n_pos = n_pos+1 if note==1 else n_pos\n\n print('')\n print('')\n print('Confusion matrix ')\n print()\n print(\" \\t\\t Actual class\")\n print(\" \\t\\t Pos \\t Neg\")\n print(\"Predicted Pos \\t {} \\t {}\".format(TP,FN))\n print(\" Neg \\t {} \\t {}\".format(FP,TN))\n print('')\n print('\\t \\t \\t \\t Positive reviews (%)) : ',100*n_pos/len(pairs))\n print('\\t \\t \\t \\t Success rate (%) : ',100*n_successes/len(pairs))\n\n\n\n # evaluate on all pairs, print the confusion matrix\n n_expected = n_pos+1\n #= sum([note.data[0] for (note,_) in pairs if note.data[0] == 1])\n\n predicted_scores = []\n actual = []\n\n\n for pair in pairs: # replace with pairs[:n] for testing\n\n target_variable,input_variables = pair\n hidden = self.initHidden()\n output_list, hidden_list = [],[]\n\n\n for word in input_variables :\n output,hidden = self(word,hidden)\n output_list.append(output)\n hidden_list.append(hidden)\n\n output = torch.tanh(self.linear_out(output))\n\n #success = (output[int(pair[1])] == max(output))\n note = pair[0].data[0,0]\n predicted = output.data[0,0,0]\n\n actual.append(0 if note == -1 else 1 ) # on remplace la valeur -1 par un 0,\n # ce sera plus simple pour les calculs\n predicted_scores.append(predicted)\n\n\n # find the good border\n best = int(n_expected) * [(0,-1)] # liste **triée, qui contient les n_expected meilleurs\n # éléments trouvés jusqu'ici, sous la forme de couples (indice,valeur)\n\n for i_p in range(len(predicted_scores)):\n x = predicted_scores[i_p]\n\n if x > best[-1][1] :\n # si cet élément a une meilleur score que le plus petit élément sauvegardé:\n # on cherche l'indice de la liste où il s'insère\n\n i_insert = 0 # trouver l'indice i où insérer l'élément\n while x < best[i_insert][1]:\n i_insert +=1\n best = best[:i_insert] + [(i_p,x)] + best[i_insert:-1]\n # on insère d au bon endroit pour que la liste reste triée\n\n\n predicted = np.zeros((len(actual),))\n predicted[[ind for (ind,val) in best]] = 1\n\n actual = np.array(actual)\n TP = int(sum( predicted*actual ))\n TN = int(sum( (1-predicted)*(1-actual) ))\n FP = int(sum( predicted*(1-actual) ))\n FN = int(sum( (1-predicted)*actual ))\n\n\n print('')\n print('')\n print('Confusion matrix (threshold method)')\n print()\n print(\" \\t\\t Actual class\")\n print(\" \\t\\t Pos \\t Neg\")\n print(\"Predicted Pos \\t {} \\t {}\".format(TP,FN))\n print(\" Neg \\t {} \\t {}\".format(FP,TN))\n print('')\n print('\\t \\t \\t \\t Positive reviews (%)) : ',100*int(TP+FP)/len(pairs))\n print('\\t \\t \\t \\t Success rate (%) : ',100*int(TP+TN)/len(pairs))\n\n\n\n\n# overriding getData to only load 1 folder\ndef getData(folder):\n \"\"\"\n Input:\n - folder: string of the path of a folder containing txt files.\n Output:\n - listdata: list of [Y, X] (e.g. Y = 'Positive', X = \"very cool\")\n \"\"\"\n listdata = []\n\n try :\n filenames = os.listdir(folder)\n for filename in filenames[:10]: # change here\n\n with open(os.path.join(folder, filename), 'r') as f:\n for line in f:\n\n line2 = line.strip().split('\\t')\n if len(line2) == 2:\n listdata.append(line2)\n except : # folder is a filenamewith open(os.path.join(folder, filename), 'r') as f:\n for line in folder[:10000]:\n line2 = line.strip().split('\\t')\n if len(line2) == 2:\n listdata.append(line2)\n\n return listdata\n\n\ndef folder2data(train_filename,test_filename,balanced_tr ,balanced_te, n_features):\n # Entrées :\n # - train_filename (str) : le nom du **dossier** (et non pas le nom du fichier) où se trouvent les instances d'apprentissage\n # - test_filename (str) : le nom du **dossier** (et non pas le nom du fichier) où se trouvent les instances de test\n # - balanced_tr (bool) : True si l'ensemble d'apprentissage est équilibré; False s'il est laissé tel quel\n # - balanced_te (bool) : True si l'ensemble de test est équilibré; False s'il est laissé tel quel\n # - n_features (int) : nombre de variables pour coder chaque instance\n # Sorties :\n # - cuple (new_tr_pairs, new_te_pairs):\n # new_tr_pairs : (list of (vect), (+1|-1)))\n # new_te_pairs : (list of (Variable(vect), Variable(+1|-1)))\n\n pairs = getData(train_filename)\n\n\n if balanced_tr :\n \"\"\"\n #Pour un équilibrage 75/25\n pairs_using_numbers = [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]\n Positive_reviews = [(1,text) for (target,text) in pairs if target == 'Positive']\n pairs_using_numbers += Positive_reviews[:int(len(pairs_using_numbers)*3)] # différence ici\n tr_pairs = pairs_using_numbers\n \"\"\"\n #Pour un équilibrage 50/50\n pairs_using_numbers = [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral' )]\n Positive_reviews = [(1,text) for (target,text) in pairs if target == 'Positive']\n pairs_using_numbers += Positive_reviews[:int(len(pairs_using_numbers))]\n tr_pairs = pairs_using_numbers\n\n else :\n pairs_using_numbers = [(1,text) for (target,text) in pairs if target == 'Positive']\n pairs_using_numbers += [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]\n tr_pairs = pairs_using_numbers\n\n pairs = getData(test_filename)\n\n if balanced_te :\n pairs_using_numbers = [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]\n Positive_reviews = [(1,text) for (target,text) in pairs if target == 'Positive']\n pairs_using_numbers += Positive_reviews[:int(len(pairs_using_numbers))]\n te_pairs = pairs_using_numbers\n\n else :\n pairs_using_numbers = [(1,text) for (target,text) in pairs if target == 'Positive']\n pairs_using_numbers += [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]\n te_pairs = pairs_using_numbers\n\n\n # print([text for (_,text) in tr_pairs[:2]])\n\n \"\"\"\n tfidf_vectorizer = TfidfVectorizer(ngram_range=(1,2))\n tfidf_vectorizer.fit([ text for (_,text) in tr_pairs+te_pairs])\n\n # fitting\n X_tr_token = tfidf_vectorizer.transform([ text for (_,text) in tr_pairs])\n X_te_token = tfidf_vectorizer.transform([ text for (_,text) in te_pairs])\n\n truncatedsvd = TruncatedSVD(n_components=n_features) # prépare à projeter les données dans un espace à n_components dimensions\n truncatedsvd.fit(X_tr_token)\n truncatedsvd.fit(X_te_token)\n\n # Réduction de dimension\n X_tr_reduced_dim = truncatedsvd.transform(X_tr_token)\n X_te_reduced_dim = truncatedsvd.transform(X_te_token)\n \"\"\"\n\n W2Vmodel = Word2Vec(sentences= [text.lower().split() for (_,text) in chain(tr_pairs,te_pairs) ] ,\n size= n_features,\n #window=self.window_size,\n negative=20,\n iter=50,\n seed=1000,\n workers=multiprocessing.cpu_count())\n\n\n new_tr_pairs = []\n for i in range(len(tr_pairs)):\n (note,text) = tr_pairs[i]\n note = Variable(torch.FloatTensor([[note]]))\n\n vect_list = [Variable(torch.FloatTensor(np.array( W2Vmodel[word.lower()]))).view(1,-1) for word in text.split() if word.lower() in W2Vmodel]\n\n new_tr_pairs.append((note,vect_list))\n\n new_te_pairs = []\n for i in range(len(te_pairs)):\n (note,text) = te_pairs[i]\n note = Variable(torch.FloatTensor([[note]]))\n\n vect_list = [Variable(torch.FloatTensor(np.array(W2Vmodel[word.lower()]))).view(1,-1) for word in text.split() if word.lower() in W2Vmodel]\n new_te_pairs.append((note,vect_list))\n\n\n\n return new_tr_pairs, new_te_pairs,W2Vmodel\n\n\n\n\n\"\"\"\ndef sentences(pairs):\n for pair in pairs:\n yield pair[1]\n\"\"\"\n\n\"\"\"\nclass sentences():\n def __init__(self,pairs):\n self.pairs = pairs\n\n def __iter__():\n retur\n\n return [pair[1] for pair in pairs]\n\"\"\"\n\"\"\"\ndef sentences(pairs):\n return [pair[1] for pair in pairs]\n\nRNN = my_RNN(n_features, hidden_size, trainW2V = iter(sentences(tr_pairs+te_pairs)), n_layers = 1)\n\"\"\"\n\n# ==================================================================\n# ================ Using the RNN in itself =========================\n# ==================================================================\n\n#training_set_folder = \"../../data/data_books_training_set\"\n#test_set_folder = \"../../data/data_books_testing_set\"\n\ntraining_set_folder = \"../../data/cleaned/Automotive_5.txt\"\ntest_set_folder = \"../../data/cleaned/Musical_Instruments_5.txt\"\n\nn_features=2\ntr_pairs,te_pairs,W2Vmodel = folder2data(training_set_folder,test_set_folder,balanced_tr=False, balanced_te=False, n_features=n_features)\n\nprint(\"instances d'entraînement\",len(tr_pairs))\nprint(\"instances de test\",len(te_pairs))\n\nhidden_size = 100\n\n\n\n\nRNN = my_RNN(n_features, hidden_size, model=W2Vmodel , n_layers = 1)\n\n#RNN.evaluateNpairs(te_pairs,1) # show some examples\n\n\nlr = 0.000005\nN_epochs = 50\nprint(\"learning rate\",lr)\nRNN.trainIters(N_epochs, tr_pairs, te_pairs, lr, 1,5)\n\n\nRNN.evaluateRandomly(te_pairs) # show global results\n\ntorch.save(RNN,'RNN_W2V')\n#cours ; cd 2eme_partie_S9/Transfer_learning/TransferLearningProject/learning/ ; python rnn_word2vec.py\n\n\"\"\"\nRNN = torch.load('RNN')\nRNN.evaluateRandomly(te_pairs)\n\"\"\"\nprint('')\nprint('')\n\nprint(' Done')\nprint('')\nprint('')\nprint('')\n","sub_path":"learning/rnn_word2vec.py","file_name":"rnn_word2vec.py","file_ext":"py","file_size_in_byte":16377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"228196963","text":"from composition import Composition\r\n\r\nmod_table = {\r\n 'Carbamidomethyl' : ['Cys', 57.0214],\r\n 'Deamidated' : ['Asn', 0.9840099999999978],\r\n 'HexNAc' : ['Asn', 203.07937],\r\n 'pyroGlu': ['Gln', -17.02655]}\r\n\r\nclass Modification:\r\n \"\"\"description of class\"\"\"\r\n\r\n def __init__(self, mod_name, mod_pos = -1, mod_num = 1, mass = 0.0, target=''):\r\n self.name = mod_name\r\n self.position = mod_pos\r\n self.number = mod_num\r\n global mod_table\r\n key = mod_table.get(mod_name)\r\n if key == None:\r\n self.mass = mass\r\n self.target = target\r\n mod_table[mod_name] = [target, mass]\r\n else:\r\n self.mass = mod_table.get(mod_name)[1]\r\n self.target = mod_table.get(mod_name)[0]\r\n","sub_path":"HH_glycopeptide - KK testing v2/modification.py","file_name":"modification.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"462738813","text":"import os\nimport numpy\nimport shutil\nimport pickle\nimport json\nimport datetime\nfrom Models.metrics import ModelMetrics, print_metrics\nfrom Utils.utils import get_abs_path, get_formatted_date\nfrom Models.reports import Report\n\n\ndef job_collector(Config, DefConfig, kwargs):\n worker = Collector(Config, DefConfig, kwargs)\n worker.run()\n\n\nclass Collector:\n def __init__(self, Config, DefConfig, kwargs):\n self.Config = Config\n if \"test_docs\" not in Config or not Config[\"results\"]:\n print (\"Documents have not been classified in this process chain.\")\n print (\"Consolidation can't be performed.\")\n return\n self.rank_threshold = 0.5\n if Config['consolidatedrank'] == \"True\":\n try:\n self.rank_threshold = float(Config[\"consolidated_rank_threshold\"])\n except ValueError:\n self.rank_threshold = 0.5\n self.testLabels = numpy.concatenate([numpy.array(x.labels).\n reshape(1,\n len(self.Config[\"predefined_categories\"])) for x in self.Config[\"test_docs\"]])\n self.qLabs = len(self.Config[\"predefined_categories\"])\n self.predictions = numpy.zeros([len(self.testLabels), self.qLabs])\n self.metrics = {}\n self.useProbabilities = False\n self.save_reports = False\n self.runtime = False\n\n def run(self):\n print (\"\\nCalculate consolidated metrics...\")\n if not self.Config[\"results\"]:\n print(\"No results to consolidate them. Consolidation can not be performed.\")\n return\n if self.Config[\"save_reports\"] == \"True\":\n if not self.Config[\"reports_path\"] or not os.path.isdir(get_abs_path(self.Config, \"reports_path\")):\n print(\"Wrong path to the folder, containing reports. Reports can not be created.\")\n else:\n self.save_reports = True\n if self.Config[\"prepare_resources_for_runtime\"] == \"True\":\n if (not self.Config[\"saved_resources_path\"] or\n not os.path.isdir(get_abs_path(self.Config, \"saved_resources_path\"))):\n print(\"Wrong path to the folder, containing resources for runtime. Resources can not be saved.\")\n else:\n self.runtime = True\n print(\"Rank threshold for consolidated results: %.2f\" % (self.rank_threshold))\n if self.save_reports or self.Config[\"show_consolidated_results\"] == \"True\":\n self.getConsolidatedResults()\n self.get_metrics()\n if self.save_reports:\n self.saveReports()\n if self.runtime:\n saved_rc_path = get_abs_path(self.Config, \"saved_resources_path\")\n if len(os.listdir(saved_rc_path)) > 0:\n print(\"Warning: folder %s is not empty. All its content will be deleted.\" % saved_rc_path)\n shutil.rmtree(saved_rc_path)\n os.makedirs(saved_rc_path, exist_ok=True)\n print(\"\\nCollect arfifacts for runtime...\")\n self.prepare_resources_for_runtime()\n\n\n def getConsolidatedResults(self):\n for key, res in self.Config[\"results\"].items():\n for i in range(len(res)):\n for j in range(self.qLabs):\n if res[i][j] == 1:\n self.predictions[i][j] += 1\n #elif res[i][j] >= self.rank_threshold:\n elif res[i][j] >= self.Config[\"ranks\"][key]:\n self.predictions[i][j] += 1\n q_models = len(self.Config[\"results\"])\n for p1 in self.predictions:\n for p in p1:\n if p >= q_models * self.rank_threshold:\n p = 1\n else:\n p = 0\n\n def get_metrics(self):\n ModelMetrics(self)\n if self.Config[\"show_consolidated_results\"] == \"True\":\n print_metrics(self)\n\n def saveReports(self):\n print (\"Save report...\")\n report = Report()\n report.requestId = self.Config[\"reqid\"]\n report.sourcesPath = self.Config[\"actual_path\"]\n report.datasetPath = self.Config[\"test_data_path\"]\n\n tokenization_options = [\"language_tokenization\", \"normalization\", \"stop_words\", \"exclude_positions\",\n \"extra_words\", \"exclude_categories\"]\n for t in tokenization_options:\n report.preprocess[t] = self.Config[t]\n for t in self.Config[\"test_docs\"]:\n report.docs[t.name] = {}\n report.docs[t.name][\"actual\"] = \",\".join(t.nlabs)\n if not self.Config[\"exclude_categories\"]:\n exclude_categories = []\n else:\n exclude_categories = self.Config[\"exclude_categories\"].split(\",\")\n cNames = [''] * (len(self.Config[\"predefined_categories\"]) - len(exclude_categories))\n for k, v in self.Config[\"predefined_categories\"].items():\n if k not in exclude_categories:\n cNames[v] = k\n report.categories = cNames\n for key, val in self.Config[\"results\"].items():\n for i in range(len(val)):\n labs = []\n for j in range(self.qLabs):\n #if val[i][j] >= self.rank_threshold:\n if val[i][j] >= self.Config[\"ranks\"][key]:\n labs.append(\"%s[%.2f]\" % (cNames[j], val[i][j]))\n report.docs[self.Config[\"test_docs\"][i].name][key] = \",\".join(labs)\n for key, val in self.Config[\"metrics\"].items():\n report.models[key] = val\n for key, val in self.Config[\"ranks\"].items():\n report.ranks[key] = val\n if len(self.Config[\"results\"]) > 1:\n for i in range(len(self.predictions)):\n labs = []\n for j in range(self.qLabs):\n if self.predictions[i][j] == 1:\n labs.append(cNames[j])\n report.docs[self.Config[\"test_docs\"][i].name][\"consolidated\"] = \",\".join(labs)\n report.models[\"consolidated\"] = self.rank_threshold\n rPath = get_abs_path(self.Config, \"reports_path\") + \"/\" + self.Config[\"reqid\"] + \".json\"\n with open(rPath, 'w', encoding=\"utf-8\") as file:\n json.dump(report.toJSON(), file, indent=4)\n file.close()\n\n def prepare_resources_for_runtime(self):\n tokenization_options = [\"language_tokenization\", \"normalization\", \"stop_words\", \"exclude_positions\",\n \"extra_words\", \"max_seq_len\", \"max_chars_seq_len\", \"single_doc_lang_tokenization_lib_path\"]\n self.Config[\"resources\"][\"tokenization\"] = {}\n ds = datetime.datetime.now()\n self.outDir = get_abs_path(self.Config, \"saved_resources_path\") + \"/\"\n for t in tokenization_options:\n if t != \"single_doc_lang_tokenization_lib_path\":\n self.Config[\"resources\"][\"tokenization\"][t] = self.Config[t]\n elif self.Config[\"language_tokenization\"] == \"True\":\n self.Config[\"resources\"][\"tokenization\"][\"single_doc_lang_tokenization_lib_path\"] = \\\n self.copyFile(get_abs_path(self.Config, \"single_doc_lang_tokenization_lib_path\"))\n isW2VNeeded = False\n for key, val in self.Config[\"resources\"][\"models\"].items():\n val[\"created_model_path\"] = self.copyFile(val[\"created_model_path\"])\n if \"w2v\" in val and val[\"w2v\"] == \"True\":\n isW2VNeeded = True\n if not isW2VNeeded and \"w2v\" in self.Config[\"resources\"]:\n self.Config[\"resources\"].pop(\"w2v\", None)\n if \"w2v\" in self.Config[\"resources\"]:\n w2vDict = {}\n isFirstLine = True\n fEmbeddings = open(self.Config[\"resources\"][\"w2v\"][\"created_model_path\"], encoding=\"utf-8\")\n for line in fEmbeddings:\n if isFirstLine == True:\n isFirstLine = False\n continue\n split = line.strip().split(\" \")\n word = split[0]\n vector = numpy.array([float(num) for num in split[1:]])\n w2vDict[word] = vector\n fEmbeddings.close()\n with open(self.Config[\"resources\"][\"w2v\"][\"created_model_path\"] + '.pkl', 'wb') as file:\n pickle.dump(w2vDict, file, pickle.HIGHEST_PROTOCOL)\n file.close()\n self.Config[\"resources\"][\"w2v\"][\"created_model_path\"] = \\\n self.copyFile(self.Config[\"resources\"][\"w2v\"][\"created_model_path\"] + '.pkl')\n if \"indexer\" in self.Config[\"resources\"]:\n self.Config[\"resources\"][\"indexer\"] = self.copyFile(self.Config[\"resources\"][\"indexer\"])\n if \"vectorizer\" in self.Config[\"resources\"]:\n self.Config[\"resources\"][\"vectorizer\"] = self.copyFile(self.Config[\"resources\"][\"vectorizer\"])\n if \"ptBertModel\" in self.Config[\"resources\"]:\n self.Config[\"resources\"][\"ptBertModel\"] = self.copyFile(self.Config[\"resources\"][\"ptBertModel\"])\n self.Config[\"resources\"][\"vocabPath\"] = self.copyFile(self.Config[\"resources\"][\"vocabPath\"])\n cNames = [''] * len(self.Config[\"predefined_categories\"])\n for k, v in self.Config[\"predefined_categories\"].items():\n cNames[v] = k\n with open(self.outDir + 'labels.txt', 'w', encoding=\"utf-8\") as file:\n file.write(\",\".join(cNames))\n file.close()\n self.Config[\"resources\"][\"labels\"] = \"labels.txt\"\n self.Config[\"resources\"][\"consolidatedRank\"] = self.rank_threshold\n with open(self.outDir + 'config.json', 'w', encoding=\"utf-8\") as file:\n json.dump(self.Config[\"resources\"], file, indent=4)\n file.close()\n de = datetime.datetime.now()\n print(\"\\nArtifacts are copied into the folder %s in %s\"%(\n get_abs_path(self.Config, \"saved_resources_path\"), get_formatted_date(ds, de)))\n\n def copyFile(self, inPath):\n dir, name = os.path.split(inPath)\n outPath = self.outDir + name\n shutil.copy(inPath, outPath)\n return name\n\n","sub_path":"TrainAndTest/Models/consolidation.py","file_name":"consolidation.py","file_ext":"py","file_size_in_byte":10078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"465839810","text":"#!/usr/bin/env python3\nimport os, sys\nimport collections\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))\n\nfrom datetime import datetime\nfrom dateutil.tz import tzlocal\nimport pytz\nimport re\nimport numpy as np\nimport json\nimport pandas as pd\n\nfrom pipeline import (lab, experiment, imaging, virus)\nimport pynwb\nfrom pynwb import NWBFile, NWBHDF5IO\n\n# ============================== SET CONSTANTS ==========================================\ndefault_nwb_output_dir = os.path.join('/data', 'NWB 2.0')\nzero_zero_time = datetime.strptime('00:00:00', '%H:%M:%S').time() # no precise time available\ninstitution = 'Janelia Research Campus'\n\n\ndef export_to_nwb(session_key, nwb_output_dir=default_nwb_output_dir, save=False, overwrite=False):\n\n this_session = (experiment.Session & session_key).fetch1()\n print(f'Exporting to NWB 2.0 for session: {this_session}...')\n # ===============================================================================\n # ============================== META INFORMATION ===============================\n # ===============================================================================\n\n # -- NWB file - a NWB2.0 file for each session\n file_name = '_'.join(\n [this_session['subject_nickname'],\n this_session['session_date'].strftime('%Y-%m-%d'),\n str(this_session['session'])])\n nwbfile = NWBFile(\n identifier=file_name,\n related_publications='http://dx.doi.org/10.1016/j.neuron.2017.05.005',\n experiment_description='Two-photon experiment recorded in {}'.format(this_session['brain_location_name']),\n session_description='Imaging session',\n session_start_time=datetime.combine(this_session['session_date'], zero_zero_time),\n file_create_date=datetime.now(tzlocal()),\n experimenter=this_session['username'],\n institution=institution,\n keywords=['motor planning', 'anterior lateral cortex', 'medial motor cortex',\n 'ALM', 'MM', 'Two-photon imaging'])\n\n # -- subject\n subj = (lab.Subject & session_key).fetch1()\n nwbfile.subject = pynwb.file.Subject(\n subject_id=this_session['subject_nickname'],\n genotype=' x '.join((lab.Subject.GeneModification\n & subj).fetch('gene_modification')),\n sex=subj['sex'],\n species=subj['species'],\n date_of_birth=datetime.combine(subj['date_of_birth'], zero_zero_time) if subj['date_of_birth'] else None)\n # -- virus\n nwbfile.virus = json.dumps([{k: str(v) for k, v in virus_injection.items() if k not in subj}\n for virus_injection in virus.VirusInjection * virus.Virus & session_key])\n\n # ===============================================================================\n # ======================== IMAGING & SEGMENTATION ===============================\n # ===============================================================================\n\n scan = (imaging.Scan & session_key).fetch1()\n\n # ---- Structural Images ----------\n images = pynwb.base.Images('images')\n\n if isinstance(scan['image_beads'], collections.Sequence):\n gcamp = pynwb.image.GrayscaleImage('GCaMP at 940nm', scan['image_gcamp'])\n images.add_image(gcamp)\n if isinstance(scan['image_ctb'], collections.Sequence):\n ctb = pynwb.image.RGBImage('CTB-647 IT', scan['image_ctb'])\n images.add_image(ctb)\n if isinstance(scan['image_beads'], collections.Sequence):\n beads = pynwb.image.GrayscaleImage('Beads PT', scan['image_beads'])\n images.add_image(beads)\n\n nwbfile.add_acquisition(images)\n\n imaging_plane = nwbfile.create_imaging_plane(\n name='Imaging plane',\n optical_channel=pynwb.ophys.OpticalChannel(\n name='green', description='green channel', emission_lambda=500.),\n description='Imaging session for PT and IT neurons during audio delay task',\n device=nwbfile.create_device(name='two-photon microscope with Thorlabs resonant galvo scannner'),\n excitation_lambda=940.,\n imaging_rate=300.,\n indicator='GCaMP6s',\n location='ALM',\n conversion=1e-6,\n unit='micrometers')\n\n # ---- Frame Time information -----\n\n frame_time = pynwb.image.TimeSeries(\n name='Frame Time',\n data=list(range(0, len(scan['frame_time']))),\n unit='a.u',\n timestamps=scan['frame_time']\n )\n nwbfile.add_acquisition(frame_time)\n\n # ----- Segementation information -----\n # link the imaging segmentation to the nwb file\n ophys = nwbfile.create_processing_module('Ophys', 'Processing result of imaging')\n img_seg = pynwb.ophys.ImageSegmentation()\n ophys.add_data_interface(img_seg)\n\n pln_seg = pynwb.ophys.PlaneSegmentation(\n name='Plane Segmentation',\n description='plane segmentation',\n imaging_plane=imaging_plane)\n\n img_seg.add_plane_segmentation([pln_seg])\n\n\n # insert ROI mask\n rois = (imaging.Scan.Roi & session_key).fetch(as_dict=True)\n\n for k, v in dict(\n roi_id='roi id',\n cell_type='PT, IT, or unknown',\n roi_trace='Trace on this session of this roi',\n neuropil_trace='Trace on this session of the neurophil',\n included='whether to include this roi into later analyses'\n ).items():\n pln_seg.add_column(name=k, description=v)\n\n\n for roi in rois:\n mask = np.zeros([512, 512])\n mask[np.unravel_index(roi['roi_pixel_list']-1, mask.shape, 'F')] = 1\n pln_seg.add_roi(\n roi_id=roi['roi_idx'],\n image_mask=mask,\n cell_type=roi['cell_type'],\n roi_trace=roi['roi_trace'],\n neuropil_trace=roi['roi_trace'],\n included=roi['inc'])\n\n # ===============================================================================\n # =============================== BEHAVIOR TRIALS ===============================\n # ===============================================================================\n\n # =============== TrialSet ====================\n # NWB 'trial' (of type dynamic table) by default comes with three mandatory attributes: 'start_time' and 'stop_time'\n # Other trial-related information needs to be added in to the trial-table as additional columns (with column name\n # and column description)\n\n dj_trial = experiment.SessionTrial * experiment.BehaviorTrial\n skip_adding_columns = experiment.Session.primary_key + ['trial_uid']\n\n if experiment.SessionTrial & session_key:\n # Get trial descriptors from TrialSet.Trial and TrialStimInfo\n trial_columns = [{'name': tag,\n 'description': re.sub('\\s+:|\\s+', ' ', re.search(\n f'(?<={tag})(.*)', str(dj_trial.heading)).group()).strip()}\n for tag in dj_trial.heading.names\n if tag not in skip_adding_columns + ['start_time', 'stop_time']]\n\n # Add new table columns to nwb trial-table for trial-label\n for c in trial_columns:\n nwbfile.add_trial_column(**c)\n\n # Add entry to the trial-table\n for trial in (dj_trial & session_key).fetch(as_dict=True):\n trial['start_time'] = float(trial['start_time'])\n trial['stop_time'] = float(trial['stop_time']) if trial['stop_time'] else 5.0\n [trial.pop(k) for k in skip_adding_columns]\n nwbfile.add_trial(**trial)\n\n # ===============================================================================\n # =============================== BEHAVIOR TRIAL EVENTS ==========================\n # ===============================================================================\n\n behav_event = pynwb.behavior.BehavioralEvents(name='BehavioralEvents')\n nwbfile.add_acquisition(behav_event)\n\n for trial_event_type in (experiment.TrialEventType & experiment.TrialEvent & session_key).fetch('trial_event_type'):\n event_times, trial_starts = (experiment.TrialEvent * experiment.SessionTrial\n & session_key & {'trial_event_type': trial_event_type}).fetch(\n 'trial_event_time', 'start_time')\n if len(event_times) > 0:\n event_times = np.hstack(event_times.astype(float) + trial_starts.astype(float))\n behav_event.create_timeseries(name=trial_event_type, unit='a.u.', conversion=1.0,\n data=np.full_like(event_times, 1),\n timestamps=event_times)\n\n # =============== Write NWB 2.0 file ===============\n if save:\n save_file_name = ''.join([nwbfile.identifier, '.nwb'])\n if not os.path.exists(nwb_output_dir):\n os.makedirs(nwb_output_dir)\n if not overwrite and os.path.exists(os.path.join(nwb_output_dir, save_file_name)):\n return nwbfile\n with NWBHDF5IO(os.path.join(nwb_output_dir, save_file_name), mode = 'w') as io:\n io.write(nwbfile)\n print(f'Write NWB 2.0 file: {save_file_name}')\n\n return nwbfile\n\n\n# ============================== EXPORT ALL ==========================================\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n nwb_outdir = sys.argv[1]\n else:\n nwb_outdir = default_nwb_output_dir\n\n for skey in experiment.Session.fetch('KEY'):\n export_to_nwb(skey, nwb_output_dir=nwb_outdir, save=True)\n","sub_path":"pipeline/export/datajoint_to_nwb.py","file_name":"datajoint_to_nwb.py","file_ext":"py","file_size_in_byte":9415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"186991793","text":"import requests\nfrom urllib.parse import urlencode\nimport re\n\nclass News():\n def __init__(self):\n pass\n\n def weibo_hot(self):\n url = 'https://s.weibo.com/top/summary/'\n headers = {\n 'Host':'s.weibo.com',\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',\n }\n\n result = requests.get(url, headers=headers)\n result_dict = dict()\n hot_word_list = re.findall('.*?.*?>(.*?)', result.text, re.S)\n\n for word in hot_word_list:\n\n data = {\n 'q':word,\n 'Refer':'top'\n }\n\n result_url = 'https://s.weibo.com/weibo' + '?' + urlencode(data)\n result_dict[word] = result_url\n return result_dict\n\n","sub_path":"Raspberry/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"446567222","text":"#!/usr/bin/env python3\nimport uuid\nimport os\nimport copy\nimport json\nimport argparse\n\nfrom cyy_naive_lib.log import get_logger\n\nfrom .dataset import (\n dataset_with_indices,\n sample_subset,\n sub_dataset,\n replace_dataset_labels,\n MachineLearningPhase,\n get_dataset,\n)\nfrom .configuration import (\n get_trainer_from_configuration,\n get_inferencer_from_configuration,\n)\nfrom .hyper_gradient_trainer import HyperGradientTrainer\nfrom .reproducible_env import global_reproducible_env\n\n\ndef get_arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dataset_name\", type=str)\n parser.add_argument(\"--model_name\", type=str)\n parser.add_argument(\"--epochs\", type=int, default=None)\n parser.add_argument(\"--batch_size\", type=int, default=None)\n parser.add_argument(\"--learning_rate\", type=float, default=None)\n parser.add_argument(\"--weight_decay\", type=float, default=None)\n parser.add_argument(\"--stop_accuracy\", type=float, default=None)\n parser.add_argument(\"--cache_size\", type=int, default=None)\n parser.add_argument(\"--model_path\", type=str, default=None)\n parser.add_argument(\"--reproducible_env_load_path\", type=str, default=None)\n parser.add_argument(\n \"--training_dataset_percentage\",\n type=float,\n default=None)\n parser.add_argument(\n \"--make_reproducible\",\n action=\"store_true\",\n default=False)\n parser.add_argument(\n \"--approx_hyper_gradient_and_momentum_dir\", type=str, default=None\n )\n parser.add_argument(\n \"--hessian_hyper_gradient_and_momentum_dir\", type=str, default=None\n )\n parser.add_argument(\n \"--hyper_gradient_sample_percentage\",\n type=float,\n default=None)\n parser.add_argument(\"--use_hessian\", action=\"store_true\", default=False)\n parser.add_argument(\n \"--use_hessian_and_approximation\", action=\"store_true\", default=False\n )\n parser.add_argument(\"--repeated_num\", type=int, default=None)\n parser.add_argument(\"--save_dir\", type=str, default=None)\n return parser\n\n\ndef get_parsed_args(parser=None):\n if parser is None:\n parser = get_arg_parser()\n args = parser.parse_args()\n if args.save_dir is None:\n args.save_dir = __create_unique_save_dir(\n os.path.join(\"models\", args.task_name))\n return args\n\n\ndef __create_unique_save_dir(save_dir: str):\n return os.path.join(save_dir, str(uuid.uuid4()))\n\n\ndef create_trainer_from_args(args):\n if args.reproducible_env_load_path is not None:\n global global_reproducible_env\n assert not global_reproducible_env.initialized\n global_reproducible_env.load(args.reproducible_env_load_path)\n args.make_reproducible = True\n\n if args.make_reproducible:\n global_reproducible_env.enable()\n global_reproducible_env.save(args.save_dir)\n\n trainer = get_task_configuration(args.task_name, True)\n if args.model_path is not None:\n trainer.load_model(args.model_path)\n\n hyper_parameter = copy.deepcopy(trainer.get_hyper_parameter())\n if args.epochs is not None:\n hyper_parameter.epochs = args.epochs\n if args.batch_size is not None:\n hyper_parameter.batch_size = args.batch_size\n if args.learning_rate is not None:\n hyper_parameter.learning_rate = args.learning_rate\n if args.weight_decay is not None:\n hyper_parameter.weight_decay = args.weight_decay\n trainer.set_hyper_parameter(hyper_parameter)\n\n if args.training_dataset_percentage is not None:\n os.makedirs(args.save_dir, exist_ok=True)\n subset_dict = sample_subset(\n trainer.training_dataset, args.training_dataset_percentage\n )\n sample_indices = sum(subset_dict.values(), [])\n trainer.training_dataset = sub_dataset(\n trainer.training_dataset, sample_indices)\n with open(\n os.path.join(args.save_dir, \"training_dataset_indices.json\"),\n mode=\"wt\",\n ) as f:\n json.dump(sample_indices, f)\n\n trainer.training_dataset = dataset_with_indices(trainer.training_dataset)\n\n if args.stop_accuracy is not None:\n trainer.stop_criterion = (\n lambda trainer, epoch, __: trainer.validation_accuracy[epoch]\n >= args.stop_accuracy\n )\n return trainer\n\n\ndef create_hyper_gradient_trainer_from_args(args):\n trainer = create_trainer_from_args(args)\n\n use_approximation = True\n if args.use_hessian:\n use_approximation = False\n if args.use_hessian_and_approximation:\n args.use_hessian = True\n use_approximation = True\n\n hyper_gradient_trainer = HyperGradientTrainer(\n trainer,\n args.cache_size,\n args.save_dir,\n hessian_hyper_gradient_and_momentum_dir=args.hessian_hyper_gradient_and_momentum_dir,\n approx_hyper_gradient_and_momentum_dir=args.approx_hyper_gradient_and_momentum_dir,\n use_hessian=args.use_hessian,\n use_approximation=use_approximation,\n )\n\n if args.hyper_gradient_sample_percentage is not None:\n subset_dict = sample_subset(\n trainer.training_dataset,\n args.hyper_gradient_sample_percentage,\n )\n sample_indices = sum(subset_dict.values(), [])\n os.makedirs(args.save_dir, exist_ok=True)\n with open(\n os.path.join(args.save_dir, \"hyper_gradient_indices.json\"),\n mode=\"wt\",\n ) as f:\n json.dump(sample_indices, f)\n get_logger().info(\"track %s samples\", len(sample_indices))\n hyper_gradient_trainer.set_computed_indices(sample_indices)\n return hyper_gradient_trainer\n\n\ndef create_validator_from_args(args):\n validator = get_task_configuration(args.task_name, False)\n if args.model_path is not None:\n validator.load_model(args.model_path)\n return validator\n\n\ndef get_randomized_label_map(args):\n randomized_label_map: dict = dict()\n with open(args.randomized_label_map_path, \"r\") as f:\n for k, v in json.load(f).items():\n randomized_label_map[int(k)] = int(v)\n return randomized_label_map\n\n\ndef get_training_dataset(args):\n dataset_name = get_task_dataset_name(args.task_name)\n training_dataset = get_dataset(dataset_name, MachineLearningPhase.Training)\n if (\n hasattr(args, \"training_dataset_indices_path\")\n and args.training_dataset_indices_path is not None\n ):\n get_logger().info(\"use training_dataset_indices_path\")\n with open(args.training_dataset_indices_path, \"r\") as f:\n subset_indices = json.load(f)\n training_dataset = sub_dataset(training_dataset, subset_indices)\n if args.randomized_label_map_path is not None:\n get_logger().info(\"use randomized_label_map_path\")\n training_dataset = replace_dataset_labels(\n training_dataset, get_randomized_label_map(args)\n )\n return training_dataset\n","sub_path":"influence_function/arg_parse.py","file_name":"arg_parse.py","file_ext":"py","file_size_in_byte":6927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"155301945","text":"import importlib\nimport datetime\nimport time\n\nfrom sqlalchemy.exc import OperationalError\nfrom sqlalchemy.sql.ddl import DDL\n\nfrom wopmars.framework.database.Base import Base\nfrom sqlalchemy import Column, Integer, String, ForeignKey, DateTime\nfrom sqlalchemy.orm import relationship, reconstructor\n\nfrom wopmars.framework.database.SQLManager import SQLManager\nfrom wopmars.framework.database.tables.IOPut import IOPut\nfrom wopmars.framework.database.tables.ModificationTable import ModificationTable\nfrom wopmars.framework.database.tables.ToolWrapper import ToolWrapper\nfrom wopmars.utils.Logger import Logger\nfrom wopmars.framework.database.tables.Type import Type\nfrom sqlalchemy.sql.functions import func\n\nclass IODbPut(IOPut, Base):\n \"\"\"\n This class extends IOPut and is specific to the input or output tables. It is the model which store the references\n to the actual tables needed by the user. The table ``wom_table`` associated with this model contains the\n following fields:\n\n - id: INTEGER - primary key - autoincrement - arbitrary ID\n - tablename: VARCHAR(255) - foreign key to the associated table: :class:`wopmars.framework.database.tables.ModificationTable.ModificationTable` - the name of the referenced table\n - model: VARCHAR(255) - the path to the model (in python notation)\n - rule_id: INTEGER - foreign key to the associated rule ID: :class:`wopmars.framework.database.tables.ToolWrapper.ToolWrapper`\n - type_id: INTEGER - foreign key to the associated type ID: :class:`wopmars.framework.database.tables.Type.Type`\n - used_at: DATE - date at which the table have been used\n \"\"\"\n __tablename__ = \"wom_table\"\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n tablename = Column(String(255), ForeignKey(\"wom_modification_table.table_name\"))\n model = Column(String(255))\n rule_id = Column(Integer, ForeignKey(\"wom_rule.id\"))\n type_id = Column(Integer, ForeignKey(\"wom_type.id\"))\n used_at = Column(DateTime, nullable=True)\n\n # One table is in one rule\n rule = relationship(\"ToolWrapper\", back_populates=\"tables\", enable_typechecks=False)\n # One file has One type\n type = relationship(\"Type\", back_populates=\"tables\")\n\n modification = relationship(\"ModificationTable\", back_populates=\"tables\")\n\n # all the model names met since the begining of this instance of WopMaRS\n tablemodelnames = set()\n # al the table names met since the begining of this instance of WopMaRS\n tablenames = set()\n\n def __init__(self, model, tablename):\n \"\"\"\n self.__table is initialized to None and will contain the model of this IODbPut object.\n\n :param model: The path to the model\n :type model: str\n :param tablename: The name of the table associated with the model\n :type tablename: str\n \"\"\"\n # The file containing the table should be in PYTHONPATH\n Base.__init__(self, model=model, tablename=tablename)\n Logger.instance().debug(str(model) + \" model loaded. Tablename: \" + str(tablename))\n self.__table = None\n\n @reconstructor\n def init_on_load(self):\n \"\"\"\n This is used by SQLAlchemy to regenerate the right object when loading it from the database. Here, we need to\n get back the actual Model from the model name and store it in self.__table.\n \"\"\"\n for table in IODbPut.tablemodelnames:\n mod = importlib.import_module(table)\n try:\n if table == self.model:\n # todo tabling\n self.__table = eval(\"mod.\" + self.model.split(\".\")[-1])\n except AttributeError as e:\n raise e\n Logger.instance().debug(self.tablename + \" table class reloaded. Model: \" + self.model)\n\n def set_table(self, model):\n self.__table = model\n\n def get_table(self):\n return self.__table\n\n @staticmethod\n def create_triggers():\n \"\"\"\n Create an INSERT, UPDATE, DELETE trigger on the tables created by the user in order to store the modifications date.\n \"\"\"\n stmt = [\"INSERT\", \"UPDATE\", \"DELETE\"]\n for tablename in Base.metadata.tables:\n if tablename[:4] != \"wom_\":\n for s in stmt:\n data={\"statement\": str(s), \"tablename\": str(tablename)} \n if SQLManager.instance().__dict__['d_database_config']['db_connection'] == 'sqlite':\n sql_trigger = \"\"\"\nCREATE TRIGGER IF NOT EXISTS modification_%(tablename)s AFTER %(statement)s ON %(tablename)s\nBEGIN\nUPDATE wom_modification_table SET date = CURRENT_TIMESTAMP WHERE table_name = '%(tablename)s';\nEND;\n \"\"\"%data\n elif SQLManager.instance().__dict__['d_database_config']['db_connection'] == 'mysql':\n sql_trigger = \"\"\"\nCREATE TRIGGER IF NOT EXISTS modification_%(tablename)s_%(statement)s AFTER %(statement)s ON %(tablename)s for each row UPDATE wom_modification_table SET date = CURRENT_TIMESTAMP WHERE table_name = '%(tablename)s';\n \"\"\"%data\n obj_ddl = DDL(sql_trigger)\n SQLManager.instance().create_trigger(Base.metadata.tables[tablename], obj_ddl)\n elif SQLManager.instance().__dict__['d_database_config']['db_connection'] == 'postgresql':\n sql_trigger = \"\"\"\nCREATE OR REPLACE FUNCTION modification_%(statement)s_%(tablename)s() RETURNS TRIGGER AS $modification_%(statement)s_%(tablename)s$\nBEGIN\nUPDATE wom_modification_table SET date = CURRENT_TIMESTAMP WHERE table_name = '%(tablename)s';\nRETURN NULL; -- result is ignored since this is an AFTER trigger\nEND;\n$modification_%(statement)s_%(tablename)s$ LANGUAGE plpgsql;\nDROP TRIGGER IF EXISTS modification_%(statement)s_%(tablename)s ON \"%(tablename)s\";\nCREATE TRIGGER modification_%(statement)s_%(tablename)s AFTER INSERT ON \"%(tablename)s\" FOR EACH ROW EXECUTE PROCEDURE modification_%(statement)s_%(tablename)s();\n \"\"\"%data\n obj_ddl = DDL(sql_trigger)\n SQLManager.instance().create_trigger(Base.metadata.tables[tablename], obj_ddl)\n\n\n @staticmethod\n def set_tables_properties(tables):\n \"\"\"\n Import the models of the current execution and then associate models with IODbPut objects.\n\n :param tables: the IODbPut which need their table properties to be set.\n :type tables: ResultSet(IODbPut)\n \"\"\"\n # import models for avoid references errors between models when dealing with them\n IODbPut.import_models(set([t.model for t in tables]))\n\n for table in tables:\n # keep track of the models used in static variable of IODbPut\n IODbPut.tablemodelnames.add(table.model)\n # Associate model with the IODbPut object\n mod = importlib.import_module(table.model)\n table_model = eval(\"mod.\" + table.model.split(\".\")[-1])\n table.set_table(table_model)\n # keep track of table names used in static variable of IODbPut\n IODbPut.tablenames.add(table_model.__tablename__)\n SQLManager.instance().get_session().add(table)\n\n @staticmethod\n def get_execution_tables():\n \"\"\"\n Return all the IODbPut objects found in model IODbPut.\n\n :return: ResultSet IODbPut objects\n \"\"\"\n session = SQLManager.instance().get_session()\n execution_id = session.query(func.max(ToolWrapper.execution_id))\n return session.query(IODbPut).filter(IODbPut.rule_id == ToolWrapper.id).filter(ToolWrapper.execution_id == execution_id).all()\n\n @staticmethod\n def import_models(model_names):\n \"\"\"\n Import all the given models\n\n :param model_names: The path to the models\n :type model_names: Iterable(String)\n \"\"\"\n for t in model_names:\n Logger.instance().debug(\"IODbPut.import_models: importing \" + str(t))\n importlib.import_module(t)\n\n def is_ready(self):\n \"\"\"\n A IODbPut object is ready if its table exists and contains entries.\n\n :return: bool if the table is ready\n \"\"\"\n session = SQLManager.instance().get_session()\n try:\n results = session.query(self.__table).first()\n if results is None:\n Logger.instance().debug(\"The table \" + self.tablename + \" is empty.\")\n return False\n except OperationalError as e:\n Logger.instance().debug(\"The table \" + self.__table.__tablename__ + \" doesn't exist.\")\n return False\n except Exception as e:\n session.rollback()\n raise e\n # todo twthread\n return True\n\n def __eq__(self, other):\n \"\"\"\n Two IODbPut object are equals if their table attributes belongs to the same class and if the associated table\n has the same content\n\n :param other: IODbPut\n :return: boolean: True if the table attributes are the same, False if not\n \"\"\"\n session = SQLManager.instance().get_session()\n if self.model != other.model or self.tablename != other.tablename:\n return False\n try:\n self_results = set(session.query(self.__table).all())\n other_results = set(session.query(other.get_table()).all())\n if self_results != other_results:\n return False\n except Exception as e:\n session.rollback()\n raise e\n return True\n\n def __hash__(self):\n return id(self)\n\n def __repr__(self):\n return \"\"\n\n def __str__(self):\n return \"\"\n","sub_path":"wopmars/framework/database/tables/IODbPut.py","file_name":"IODbPut.py","file_ext":"py","file_size_in_byte":9800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"143425697","text":"\"\"\"Transition and sequence analysis of neighborhood change.\"\"\"\n\nfrom warnings import warn\n\nimport geopandas as gpd\nimport numpy as np\nimport pandas as pd\nfrom giddy.markov import Markov, Spatial_Markov\nfrom giddy.sequence import Sequence\nfrom libpysal.weights import Voronoi, lag_categorical\nfrom libpysal.weights.contiguity import Queen, Rook\nfrom libpysal.weights.distance import KNN, DistanceBand, Kernel\nfrom sklearn.cluster import AgglomerativeClustering\n\nWs = {\n \"queen\": Queen,\n \"rook\": Rook,\n \"voronoi\": Voronoi,\n \"knn\": KNN,\n \"kernel\": Kernel,\n \"distanceband\": DistanceBand,\n}\n\n\ndef transition(\n gdf,\n cluster_col,\n temporal_index=\"year\",\n unit_index=\"geoid\",\n w_type=\"rook\",\n w_options=None,\n permutations=0,\n):\n \"\"\"\n (Spatial) Markov approach to transitional dynamics of neighborhoods.\n\n Parameters\n ----------\n gdf : geopandas.GeoDataFrame or pandas.DataFrame\n Long-form geopandas.GeoDataFrame or pandas.DataFrame containing neighborhood\n attributes with a column defining neighborhood clusters.\n cluster_col : string or int\n Column name for the neighborhood segmentation, such as\n \"ward\", \"kmeans\", etc.\n temporal_index : string, optional\n Column defining time and or sequencing of the long-form data.\n Default is \"year\".\n unit_index : string, optional\n Column identifying the unique id of spatial units.\n Default is \"geoid\".\n w_type : string, optional\n Type of spatial weights type (\"rook\", \"queen\", \"knn\" or\n \"kernel\") to be used for spatial structure. Default is\n None, if non-spatial Markov transition rates are desired.\n w_options : dict\n additional options passed to a libpysal weights constructor\n (e.g. `k` for a KNN weights matrix)\n permutations : int, optional\n number of permutations for use in randomization based\n inference (the default is 0).\n\n Returns\n --------\n mar : giddy.markov.Markov instance or giddy.markov.Spatial_Markov\n if w_type=None, a classic Markov instance is returned.\n if w_type is given, a Spatial_Markov instance is returned.\n\n Examples\n --------\n >>> from geosnap import Community\n >>> columbus = Community.from_ltdb(msa_fips=\"18140\")\n >>> columbus1 = columbus.cluster(columns=['median_household_income',\n ... 'p_poverty_rate', 'p_edu_college_greater', 'p_unemployment_rate'],\n ... method='ward', n_clusters=6)\n >>> gdf = columbus1.gdf\n >>> a = transition(gdf, \"ward\", w_type=\"rook\")\n >>> a.p\n array([[0.79189189, 0.00540541, 0.0027027 , 0.13243243, 0.06216216,\n 0.00540541],\n [0.0203252 , 0.75609756, 0.10569106, 0.11382114, 0. ,\n 0.00406504],\n [0.00917431, 0.20183486, 0.75229358, 0.01834862, 0. ,\n 0.01834862],\n [0.1959799 , 0.18341709, 0.00251256, 0.61809045, 0. ,\n 0. ],\n [0.32307692, 0. , 0. , 0. , 0.66153846,\n 0.01538462],\n [0.09375 , 0.0625 , 0. , 0. , 0. ,\n 0.84375 ]])\n >>> a.P[0]\n array([[0.82119205, 0. , 0. , 0.10927152, 0.06622517,\n 0.00331126],\n [0.14285714, 0.57142857, 0.14285714, 0.14285714, 0. ,\n 0. ],\n [0.5 , 0. , 0.5 , 0. , 0. ,\n 0. ],\n [0.21428571, 0.14285714, 0. , 0.64285714, 0. ,\n 0. ],\n [0.18918919, 0. , 0. , 0. , 0.78378378,\n 0.02702703],\n [0.28571429, 0. , 0. , 0. , 0. ,\n 0.71428571]])\n \"\"\"\n if not w_options:\n w_options = {}\n assert (\n unit_index in gdf.columns\n ), f\"The unit_index ({unit_index}) column is not in the geodataframe\"\n gdf_temp = gdf.copy().reset_index()\n df = gdf_temp[[unit_index, temporal_index, cluster_col]]\n df_wide = df.pivot(\n index=unit_index, columns=temporal_index, values=cluster_col\n ).dropna()\n y = df_wide.values\n if w_type is None:\n mar = Markov(y) # class markov modeling\n else:\n geoms = gdf_temp.groupby(unit_index).first()[gdf_temp.geometry.name]\n gdf_wide = df_wide.merge(geoms, left_index=True, right_index=True)\n w = Ws[w_type].from_dataframe(gpd.GeoDataFrame(gdf_wide), **w_options)\n w.transform = \"r\"\n mar = Spatial_Markov(\n y, w, permutations=permutations, discrete=True, variable_name=cluster_col\n )\n return mar\n\n\ndef sequence(\n gdf,\n cluster_col,\n seq_clusters=5,\n subs_mat=None,\n dist_type=None,\n indel=None,\n temporal_index=\"year\",\n unit_index=\"geoid\",\n):\n \"\"\"\n Pairwise sequence analysis and sequence clustering.\n\n Dynamic programming if optimal matching.\n\n Parameters\n ----------\n gdf : geopandas.GeoDataFrame or pandas.DataFrame\n Long-form geopandas.GeoDataFrame or pandas.DataFrame containing neighborhood\n attributes with a column defining neighborhood clusters.\n cluster_col : string or int\n Column name for the neighborhood segmentation, such as\n \"ward\", \"kmeans\", etc.\n seq_clusters : int, optional\n Number of neighborhood sequence clusters. Agglomerative\n Clustering with Ward linkage is now used for clustering\n the sequences. Default is 5.\n dist_type : string\n \"hamming\": hamming distance (substitution only\n and its cost is constant 1) from sklearn.metrics;\n \"markov\": utilize empirical transition\n probabilities to define substitution costs;\n \"interval\": differences between states are used\n to define substitution costs, and indel=k-1;\n \"arbitrary\": arbitrary distance if there is not a\n strong theory guidance: substitution=0.5, indel=1.\n \"tran\": transition-oriented optimal matching. Sequence of\n transitions. Based on :cite:`Biemann:2011`.\n subs_mat : array\n (k,k), substitution cost matrix. Should be hollow (\n 0 cost between the same type), symmetric and non-negative.\n indel : float, optional\n insertion/deletion cost.\n temporal_index : string, optional\n Column defining time and or sequencing of the long-form data.\n Default is \"year\".\n unit_index : string, optional\n Column identifying the unique id of spatial units.\n Default is \"geoid\".\n\n Returns\n --------\n gdf_temp : geopandas.GeoDataFrame or pandas.DataFrame\n geopandas.GeoDataFrame or pandas.DataFrame with a new column for sequence\n labels.\n df_wide : pandas.DataFrame\n Wide-form DataFrame with k (k is the number of periods)\n columns of neighborhood types and 1 column of sequence\n labels.\n seq_dis_mat : array\n (n,n), distance/dissimilarity matrix for each pair of\n sequences\n\n Examples\n --------\n >>> from geosnap.data import Community\n >>> columbus = Community.from_ltdb(msa_fips=\"18140\")\n >>> columbus1 = columbus.cluster(columns=['median_household_income',\n ... 'p_poverty_rate', 'p_edu_college_greater', 'p_unemployment_rate'],\n ... method='ward', n_clusters=6)\n >>> gdf = columbus1.gdf\n >>> gdf_new, df_wide, seq_hamming = Sequence(gdf, dist_type=\"hamming\")\n >>> seq_hamming.seq_dis_mat[:5, :5]\n array([[0., 3., 4., 5., 5.],\n [3., 0., 3., 3., 3.],\n [4., 3., 0., 2., 2.],\n [5., 3., 2., 0., 0.],\n [5., 3., 2., 0., 0.]])\n\n \"\"\"\n assert (\n unit_index in gdf.columns\n ), f\"The unit_index ({unit_index}) column is not in the geodataframe\"\n gdf_temp = gdf.copy().reset_index()\n df = gdf_temp[[unit_index, temporal_index, cluster_col]]\n df_wide = (\n df.pivot(index=unit_index, columns=temporal_index, values=cluster_col)\n .dropna()\n .astype(\"int\")\n )\n y = df_wide.values\n seq_dis_mat = Sequence(\n y, subs_mat=subs_mat, dist_type=dist_type, indel=indel, cluster_type=cluster_col\n ).seq_dis_mat\n model = AgglomerativeClustering(n_clusters=seq_clusters).fit(seq_dis_mat)\n name_seq = dist_type + \"_%d\" % (seq_clusters)\n df_wide[name_seq] = model.labels_\n gdf_temp = gdf_temp.merge(df_wide[[name_seq]], left_on=unit_index, right_index=True)\n gdf_temp = gdf_temp.reset_index(drop=True)\n\n return gdf_temp, df_wide, seq_dis_mat\n\n\ndef predict_markov_labels(\n gdf,\n unit_index=\"geoid\",\n temporal_index=\"year\",\n cluster_col=None,\n w_type=\"rook\",\n w_options=None,\n base_year=None,\n new_colname=None,\n time_steps=1,\n increment=None,\n seed=None,\n verbose=True,\n):\n \"\"\"Predict neighborhood labels based on spatial Markov transition model\n\n Parameters\n ----------\n gdf : geopandas.GeoDataFrame\n a long-form geodataframe with a column of labels to be simulated with a spatial Markov model\n unit_index : str,\n column on dataframe that identifies unique geographic units, by default \"geoid\"\n temporal_index : str\n column on dataframe that identifies unique time periods, by default \"year\"\n cluster_col : str\n column on the dataframe that stores cluster or other labels to be simulated\n w_type : str, optional\n type of spatial weights matrix to include in the transition model, by default \"queen\"\n w_options : dict, optional\n additional keyword arguments passed to the libpysal weights constructor\n base_year : int or str, optional\n the year from which to begin simulation (i.e. the set of labels to define the first\n period of the Markov sequence)\n new_colname : str, optional\n new column name to store predicted labels under. Defaults to \"predicted\"\n time_steps : int, optional\n the number of time-steps to simulate, by default 1\n increment : str or int, optional\n styled increment each time-step referrs to. For example, for a model fitted to decadal\n Census data, each time-step refers to a period of ten years, so an increment of 10 ensures\n that the temporal index aligns appropriately with the time steps being simulated\n verbose: bool\n if true, print warnings from the label sampling process\n\n Returns\n -------\n geopandas.GeoDataFrame\n long-form geodataframe with predicted cluster labels stored in the `new_colname` column\n \"\"\"\n crs = gdf.crs\n np.random.seed(seed)\n if not new_colname:\n new_colname = \"predicted\"\n if not w_options:\n w_options = {}\n\n assert (\n cluster_col and cluster_col in gdf.columns\n ), f\"The input dataframe has no column named {cluster_col}\"\n\n assert (\n base_year\n ), \"Missing `base_year`. You must provide an initial time point with labels to begin simulation\"\n assert (\n base_year in gdf[temporal_index].unique().tolist()\n ), \"A set of observations with `temporal_index`==`base_year` must be included in the gdf\"\n\n gdf = gdf.copy()\n gdf = gdf.dropna(subset=[cluster_col]).reset_index(drop=True)\n t = transition(\n gdf,\n cluster_col,\n w_type=w_type,\n unit_index=unit_index,\n temporal_index=temporal_index,\n w_options=w_options,\n )\n\n if time_steps == 1:\n\n gdf = gdf[gdf[temporal_index] == base_year].reset_index(drop=True)\n w = Ws[w_type].from_dataframe(gdf, **w_options)\n predicted = _draw_labels(w, gdf, cluster_col, t, unit_index, verbose)\n if new_colname:\n predicted = predicted.rename(columns={cluster_col: new_colname})\n return predicted\n\n else:\n assert (\n increment\n ), \"You must set the `increment` argument to simulate multiple time steps\"\n predictions = []\n gdf = gdf[gdf[temporal_index] == base_year]\n gdf = gdf[[unit_index, cluster_col, temporal_index, gdf.geometry.name]]\n current_time = base_year + increment\n gdf = gdf.dropna(subset=[cluster_col]).reset_index(drop=True)\n w = Ws[w_type].from_dataframe(gdf, **w_options)\n predictions.append(gdf)\n\n for step in range(1, time_steps + 1):\n # use the last known set of labels to get the spatial context for each geog unit\n gdf = predictions[step - 1].copy()\n\n predicted = _draw_labels(w, gdf, cluster_col, t, unit_index, verbose)\n predicted[temporal_index] = current_time\n predictions.append(predicted)\n current_time += increment\n gdf = gpd.GeoDataFrame(pd.concat(predictions), crs=crs)\n if new_colname:\n gdf = gdf.rename(columns={cluster_col: new_colname})\n return gdf\n\n\ndef _draw_labels(w, gdf, cluster_col, markov, unit_index, verbose):\n \"\"\"Draw a random class label from the spatially-conditioned transition rates.\n\n Parameters\n ----------\n w : libpysal.weights.W\n spatial weights object\n gdf : geopandas.GeoDataFrame\n geodataframe of observations with class/cluster labels as a column\n cluster_col : string\n the column on the dataframe that holds class labels\n markov : giddy.Spatial_Markov\n an instance of a Spatial_Markov class\n unit_index : string\n the column on the dataframe that identifies unique spatial units\n\n Returns\n -------\n geopandas.GeoDataFrame\n long-form geodataframe with predicted cluster labels stored in the `new_colname` column\n \"\"\"\n gdf = gdf.copy()\n gdf = gdf.dropna(subset=[cluster_col])\n lags = lag_categorical(w, gdf[cluster_col].values)\n clusters = gdf.reset_index()[cluster_col].astype(str).values\n classes = pd.Series(markov.classes).astype(str).values\n cluster_idx = dict(zip(classes, list(range(len(classes)))))\n\n labels = {}\n for i, lag in enumerate(lags):\n # select the transition matrix using the label of unit's spatial lag\n spatial_context = np.nan_to_num(markov.P, posinf=0.0, neginf=0.0)[\n cluster_idx[lag]\n ]\n # select the class's row from the transition matrix using the unit's label\n probs = spatial_context[cluster_idx[clusters[i]]]\n probs /= (\n probs.sum()\n ) # correct for tolerance, see https://stackoverflow.com/questions/25985120/numpy-1-9-0-valueerror-probabilities-do-not-sum-to-1\n probs = np.nan_to_num(probs.flatten())\n if sum(probs) == 0:\n # in case obs have a modal neighbor never before seen in the model\n # (so all transition probs are 0)\n # fall back to the aspatial transition matrix\n if verbose:\n warn(\n f\"Falling back to aspatial transition rule for unit {gdf[unit_index][i]}\"\n )\n probs = markov.p[cluster_idx[clusters[i]]].flatten()\n\n labels[i] = np.random.choice(classes, p=probs)\n\n labels = pd.Series(labels, name=cluster_col, index=gdf.index)\n out = gdf[[unit_index, gdf.geometry.name]]\n predicted = gpd.GeoDataFrame(pd.concat([labels, out], axis=1))\n return predicted\n","sub_path":"geosnap/analyze/dynamics.py","file_name":"dynamics.py","file_ext":"py","file_size_in_byte":15547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"395956028","text":"data = {\r\n'''\r\nThe data variable above should be set to the value of the unpacked version of ObjectInformation.xnb.\r\nA quick guide is as follows:\r\n - Find the file ObjectInformation.xnb\r\n > Try C:\\Program Files (x86)\\Steam\\steamapps\\common\\Stardew Valley\\Content\\Data\r\n - Unpack ObjectInformation.xnb into a .json file\r\n > https://stardewvalleywiki.com/Modding:Editing_XNB_files#Unpack_game_files gives a good tutorial\r\n for how to do this\r\n - Copy the dictionary that appears in \"content\": { ... } and paste it into this file, so that\r\n data will be equal to that dictionary\r\n - Rename this file to object_data.py by removing the '-empty' in the filename\r\n'''\r\n}\r\n","sub_path":"ArtifactScript/object_data-empty.py","file_name":"object_data-empty.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"512909235","text":"from collections import Counter, defaultdict, OrderedDict, deque\nfrom bisect import bisect_left, bisect_right\nfrom functools import reduce, lru_cache\nfrom typing import List\nimport itertools\nimport math\nimport heapq\nimport string\ntrue = True\nfalse = False\nMIN, MAX = -0x3f3f3f3f, 0x3f3f3f3f\n#\n# @lc app=leetcode id=923 lang=python3\n#\n# [923] 3Sum With Multiplicity\n#\n# https://leetcode.com/problems/3sum-with-multiplicity/description/\n#\n# algorithms\n# Medium (35.39%)\n# Total Accepted: 17.6K\n# Total Submissions: 49.6K\n# Testcase Example: '[1,1,2,2,3,3,4,4,5,5]\\n8'\n#\n# Given an integer array A, and an integer target, return the number of tuples\n# i, j, k  such that i < j < k and A[i] + A[j] + A[k] == target.\n#\n# As the answer can be very large, return it modulo 10^9 + 7.\n#\n#\n#\n# Example 1:\n#\n#\n# Input: A = [1,1,2,2,3,3,4,4,5,5], target = 8\n# Output: 20\n# Explanation:\n# Enumerating by the values (A[i], A[j], A[k]):\n# (1, 2, 5) occurs 8 times;\n# (1, 3, 4) occurs 8 times;\n# (2, 2, 4) occurs 2 times;\n# (2, 3, 3) occurs 2 times.\n#\n#\n#\n# Example 2:\n#\n#\n# Input: A = [1,1,2,2,2,2], target = 5\n# Output: 12\n# Explanation:\n# A[i] = 1, A[j] = A[k] = 2 occurs 12 times:\n# We choose one 1 from [1,1] in 2 ways,\n# and two 2s from [2,2,2,2] in 6 ways.\n#\n#\n#\n#\n#\n# Note:\n#\n#\n# 3 <= A.length <= 3000\n# 0 <= A[i] <= 100\n# 0 <= target <= 300\n#\n#\n\n\nclass Solution:\n def threeSumMulti(self, nums, t):\n cnt = [0] * 301\n for n in nums:\n cnt[n] += 1\n nums = sorted(list(set(nums)))\n ans = 0\n # print(nums, cnt)\n for i, ni in enumerate(nums):\n for j, nj in enumerate(nums[i:]):\n if ni + nj > t: break\n diff = t - ni - nj\n if diff < nj or cnt[diff] == 0:\n continue\n if j == 0:\n if diff == ni:\n ans += cnt[ni] * (cnt[ni] - 1) * (cnt[ni] - 2) // 6\n else:\n ans += cnt[ni] * (cnt[ni] - 1) * cnt[diff] // 2\n else:\n if diff == ni:\n ans += cnt[ni] * (cnt[ni] - 1) * cnt[nj] // 2\n elif diff == nj:\n ans += cnt[ni] * cnt[nj] * (cnt[nj] - 1) // 2\n else:\n ans += cnt[ni] * cnt[nj] * cnt[diff]\n # print(ni, nj, diff, ans)\n ans %= 10**9 + 7\n return ans\n\n\nsol = Solution()\n# nums, t = [1,1,2,2,2,2], 5\nnums, t = [1, 1, 2, 2, 3, 3, 4, 4, 5, 5], 8\nprint(sol.threeSumMulti(nums, t))\n","sub_path":"python_solutions/923.3sum-with-multiplicity.py","file_name":"923.3sum-with-multiplicity.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"240248369","text":"# -*- coding: utf-8 -*-\r\nfrom django.contrib import admin\r\n#from django.forms.models import *\r\nfrom django.conf import settings\r\nfrom models import Ticket, Good, Income, Article, UserData, Supplier\r\nfrom django import forms\r\nfrom models import *\r\nfrom django.forms import TextInput\r\nfrom django.forms.widgets import CheckboxInput\r\nfrom django.utils.translation import ugettext_lazy as _\r\nfrom datetime import datetime\r\n#from django.contrib.admin import SimpleListFilter\r\nimport logging\r\n#from ajax_filtered_fields.forms import ForeignKeyByRelatedField\r\n#, ManyToManyByRelatedField, AjaxForeignKeyField, ForeignKeyByLetter\r\nfrom django.utils.encoding import force_unicode\r\nfrom django.http import HttpResponseRedirect\r\nfrom chained_selects.widgets import ChainedSelectWidget\r\n\r\n\r\nclass TicketAdminForm(forms.ModelForm):\r\n #good = ForeignKeyByRelatedField(Good, \"article\")\r\n\r\n article = forms.ModelChoiceField(queryset=Article.objects.all())\r\n good = forms.ModelChoiceField(queryset=Good.objects.all(), widget=ChainedSelectWidget(\r\n parent_name='article', # имя первого селекта\r\n app_name='poluchka', # имя приложения, где лежит модель с методом\r\n model_name='article', # имя модели с методом\r\n method_name='chained_relation', # имя самого метода\r\n ))\r\n comment = forms.CharField(\r\n widget=TextInput(attrs={'size': '200'}), required=False)\r\n confirm_dublicate = forms.BooleanField(widget=CheckboxInput(\r\n ), required=False, label=_(u\"Сохранять дубликат\"))\r\n\r\n class Media:\r\n js = (\r\n settings.ADMIN_MEDIA_PREFIX + \"js/SelectBox.js\",\r\n settings.ADMIN_MEDIA_PREFIX + \"js/SelectFilter2.js\",\r\n # settings.ADMIN_MEDIA_PREFIX +'js/jquery.js',\r\n # settings.ADMIN_MEDIA_PREFIX +'js/jquery.min.js',\r\n # '/static/js/jquery-1.9.1.min.js',\r\n # '/static/js/ajax_filtered_fields.js',\r\n )\r\n css = {\r\n # 'all': ('/static/css/ajax_filtered_fields.css',)\r\n }\r\n\r\n class Meta:\r\n fields = ('user', 'dt', 'supplier', 'article', 'good',\r\n 'total', 'comment', 'confirm_dublicate')\r\n model = Ticket\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(TicketAdminForm, self).__init__(*args, **kwargs)\r\n self.fields['dt'].initial = datetime.now()\r\n# if 0 == len(self.data):\r\n # чистим выборку при простом отображении формы\r\n# self.fields['good'].queryset = Good.objects.none()\r\n #получаем Категорию для товара\r\n if self.instance.pk:\r\n self.fields['article'].initial = self.instance.good.article\r\n self.fields['good'].queryset = Good.objects.filter(article=self.instance.good.article)\r\n\r\n def clean(self):\r\n cnf = self.cleaned_data.get('confirm_dublicate')\r\n if not cnf:\r\n good = self.cleaned_data.get('good')\r\n dt = self.cleaned_data.get('dt')\r\n total = self.cleaned_data.get('total')\r\n\r\n logging.debug(self.cleaned_data.get('pk'))\r\n try:\r\n tk = Ticket.objects.all().filter(good=good).filter(\r\n dt=dt).filter(total=total).exclude(pk=self.instance.id)\r\n except:\r\n # Nothing to do here, moving along.\r\n pass\r\n if tk.exists():\r\n raise forms.ValidationError(\r\n [u\"Найдены дубликаты, подтвердите добавление\", tk])\r\n return self.cleaned_data\r\n\r\n\r\nclass TicketGoodListFilter(admin.SimpleListFilter):\r\n title = _(u'Товар/услуга')\r\n parent_key = 'good__article__id__exact'\r\n\r\n # Parameter for the filter that will be used in the URL query.\r\n parameter_name = 'good_id'\r\n\r\n def lookups(self, request, model_admin):\r\n \"\"\"\r\n Returns a list of tuples. The first element in each\r\n tuple is the coded value for the option that will\r\n appear in the URL query. The second element is the\r\n human-readable name for the option that will appear\r\n in the right sidebar.\r\n \"\"\"\r\n if self.parent_key in request.GET:\r\n return [(c.id, c.label) for c in Good.objects.filter(article=request.GET[self.parent_key])]\r\n else:\r\n return ()\r\n\r\n def queryset(self, request, queryset):\r\n\r\n \"\"\"\r\n Returns the filtered queryset based on the value\r\n provided in the query string and retrievable via\r\n `self.value()`.\r\n \"\"\"\r\n # Compare the requested value (either '80s' or 'other')\r\n # to decide how to filter the queryset.key\r\n if self.value():\r\n curr_parent = Good.objects.get(pk=self.value()).article.pk\r\n logging.debug(\r\n str(curr_parent) + ' ' + request.GET[self.parent_key])\r\n if (self.parent_key in request.GET) and (curr_parent == int(request.GET[self.parent_key])):\r\n return queryset.filter(good=self.value())\r\n else:\r\n self.used_parameters[self.parameter_name] = None\r\n return queryset\r\n\r\n\r\nclass TicketAdmin(admin.ModelAdmin):\r\n form = TicketAdminForm\r\n\r\n def response_add(self, request, obj, post_url_continue='../%s/'):\r\n opts = obj._meta\r\n# pk_value = obj._get_pk_val()\r\n\r\n msg = _('The %(name)s \"%(obj)s\" was added successfully.') % {'name':\r\n force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}\r\n\r\n if '_addanother' in request.POST:\r\n self.message_user(request, msg + ' ' + (_(\"You may add another %s below.\") % force_unicode(opts.verbose_name)))\r\n add_req = '?'\r\n for key in ('comment', 'user', 'supplier', 'dt', 'good', 'article'):\r\n logging.debug(getattr(obj, key))\r\n add_req += '%s=%s&' % (key, request.POST[key])\r\n return HttpResponseRedirect(request.path + add_req)\r\n\r\n return super(TicketAdmin, self).response_add(request, obj, post_url_continue)\r\n\r\n #fields = ('dt', 'user','total', 'article','good', 'comment' )\r\n #comment = forms.CharField(widget=forms.TextInput(attrs={'size':'140'}))\r\n #comment = forms.CharField(max_length=10)\r\n #comment = forms.Textarea();\r\n #comment = forms.URLField(initial='http://')\r\n #good=ForeignKeyByLetter(Article, field_name=\"label\")\r\n list_select_related = True\r\n list_display = (\r\n 'dt', 'total', 'good', 'article', 'comment', 'supplier_name', 'user')\r\n list_filter = ('dt', 'good__article', TicketGoodListFilter, 'user')\r\n list_per_page = 25\r\n date_hierarchy = 'dt'\r\n ordering = ('-dt',)\r\n save_as = True\r\n\r\nclass GoodInline(admin.TabularInline):\r\n model = Good\r\n #max_num = 3\r\n label = forms.CharField(max_length=200)\r\n extra = 0\r\n\r\nclass ArticleAdmin(admin.ModelAdmin):\r\n fieldsets = [\r\n (None, {'fields': ['label']}),\r\n ]\r\n inlines = [GoodInline]\r\n formfield_overrides = {\r\n models.CharField: {'widget': TextInput(attrs={'size':'200'})},\r\n #models.TextField: {'widget': Textarea(attrs={'rows':4, 'cols':40})},\r\n }\r\n #list_display = ('label', 'article')\r\n #list_filter = ('article',)\r\n #ordering = ('article',)\r\n\r\nclass GoodAdmin(admin.ModelAdmin):\r\n #inlines = [TicketInfoInline]\r\n list_display = ('label', 'article')\r\n list_filter = ('article',)\r\n ordering = ('article',)\r\n\r\nadmin.site.register(Income)\r\nadmin.site.register(Article, ArticleAdmin)\r\nadmin.site.register(Good, GoodAdmin)\r\nadmin.site.register(Ticket, TicketAdmin)\r\nadmin.site.register(UserData)\r\nadmin.site.register(Supplier)\r\n","sub_path":"poluchka/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":7940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"317144894","text":"from pyramid.config import Configurator\nfrom pyramid.response import Response\n\n\ndef hellow_world(request):\n return Response(\n 'Hellow world from Pyramid! \\n',\n content_type='text/plain',\n )\n\n\nconfig = Configurator()\nconfig.add_route('hello', '/hello')\nconfig.add_view(hellow_world, route_name='hello')\napp = config.make_wsgi_app()","sub_path":"pyramidapp.py","file_name":"pyramidapp.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"333893808","text":"__author__ = 'ooyanetomohito'\n\n\nclass Node:\n def __init__(self, x):\n self.data = x\n self.left = None\n self.right = None\n def search(node, x):\n while node:\n if node.data == x: return True\n if x < node.data:\n node = node.left\n else:\n node = node.right\n\n return False\n","sub_path":"TestSample/practice/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"469344962","text":"cond_sexo=0\r\ncond_resp =0\r\ncont_id =0\r\ncont_masc = 0\r\ncont_fem =0\r\nresp = 'S'\r\nwhile resp !='N':\r\n cond_sexo = 0\r\n idade = int(input('Digite a sua idade: '))\r\n if idade > 18:\r\n cont_id += 1\r\n while cond_sexo ==0:\r\n sexo = input('Digite o seu sexo: ').upper().strip()\r\n if sexo == 'M' or sexo == 'F':\r\n cond_sexo = 1\r\n if sexo == 'M':\r\n cont_masc += 1\r\n if sexo == 'F' and idade <20:\r\n cont_fem += 1\r\n while cond_resp == 0:\r\n resp = input('Deseja continuar [S/N]: ').upper().strip()\r\n if resp == 'S' or resp == 'N':\r\n cond_resp =1\r\n print(sexo,resp,cont_id,cont_masc,cont_fem)","sub_path":"ex069.py","file_name":"ex069.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"484981451","text":"ex4 = {\n 1 : 35,\n 2 : 36,\n 3 : 40,\n 4 : 44\n}\nwhile True:\n print('''Answer the following algebra question\nIf x = 8, then what is the value of 4(x + 3) ?''')\n for i,j in ex4.items():\n print(i,j, sep = \". \")\n choice = int(input(\"Your choice: \"))\n if(choice == 4):\n print(\"Bingo!\")\n break\n elif(choice == 1 or choice == 2 or choice == 3):\n print(\":(\")\n else:\n print(\"Only allowed 1->4. Again!\")\n","sub_path":"ass4/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"206905353","text":"import serial\nimport time\nimport threading\nimport tuxconf as tc\nimport sys\n\nclass rfid(threading.Thread):\n\n def __init__(self):\n\n threading.Thread.__init__(self)\n self.running = True\n self.device = serial.Serial(tc.serial_path,tc.serial_baud,timeout=5)\n self.tag_arrived = False\n\n\n def run(self):\n \n while self.running:\n \n line = self.device.readline().decode('ascii')\n tag_time = str(int(time.time()))\n\n if (len(line) > 2):\n print(\"[TAG] \"+line)\n self.tag_arrived = True\n \n with open(tc.serial_log, 'a') as file:\n file.write(tag_time+\",\"+line+\"\\n\")\n\n self.device.close()\n\n\ndef main():\n print(\"Start RFID scanner\")\n rfid_loop = rfid()\n rfid_loop.setDaemon(True)\n rfid_loop.start()\n print(\"Scanner started\")\n\n try:\n while True:\n continue\n except KeyboardInterrupt:\n rfid.running=False\n sys.exit()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tuxcap_v2/rfid.py","file_name":"rfid.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"274448977","text":"#!/usr/bin/python3\n\"\"\" This is necesary ? \"\"\"\n\n\nclass Square:\n \"\"\" This is a Square class \"\"\"\n\n def __init__(self, size=0, position=(0, 0)):\n \"\"\"Example of docstring on the __init__ method.\n Args:\n size (int): Size of the square obj.\n position (tuple): tuple of position\n \"\"\"\n self.size = size\n self.position = position\n\n @property\n def size(self):\n \"\"\" property\n\n Returns:\n self size\n \"\"\"\n return self.__size\n\n @property\n def position(self):\n \"\"\" position\n\n Returns:\n self postion\n \"\"\"\n return self.__position\n\n @size.setter\n def size(self, value):\n \"\"\"Example of docstring on the __init__ method.\n Args:\n size (int): Size of the square obj.\n \"\"\"\n if type(value) is not int:\n raise TypeError(\"size must be an integer\")\n elif value < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = value\n\n @position.setter\n def position(self, value):\n if (type(value) is not tuple or len(value) is not 2 or\n type(value[0]) is not int or type(value[1]) is not int or\n value[0] < 0 or value[1] < 0):\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value\n\n def area(self):\n \"\"\" area of square\n\n Returns:\n area of square\n \"\"\"\n return self.__size * self.__size\n\n def my_print(self):\n \"\"\" print square \"\"\"\n if self.__size is 0:\n print(\"\")\n else:\n for i in range(self.__position[1]):\n print(\"\")\n for i in range(self.__size):\n if self.__position[0]:\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.__size)\n\n def __str__(self):\n \"\"\" print square \"\"\"\n listm = []\n if self.__size is 0:\n listm.append(\"\")\n else:\n for i in range(self.__position[1]):\n listm.append(\"\\n\")\n for i in range(0, self.__size):\n if self.__position[0]:\n listm.append(\" \" * self.__position[0])\n listm.append(\"#\" * self.__size)\n if i < self.__size - 1:\n listm.append(\"\\n\")\n return (\"\".join(listm))\n","sub_path":"0x06-python-classes/101-square.py","file_name":"101-square.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"358105467","text":"#!/usr/bin/env python\n#\n# Copyright (C) 2018 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"Symbolizes stack traces from logcat.\nSee https://developer.android.com/ndk/guides/ndk-stack for more information.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport re\nimport subprocess\nimport sys\n\n\ndef find_llvm_symbolizer():\n \"\"\"Finds the NDK llvm-symbolizer(1) binary.\n\n Returns: An absolute path to llvm-symbolizer(1).\"\"\"\n\n # ndk-stack is installed to $NDK/prebuilt//bin, so from\n # `~/Downloads/android-ndk-r18/prebuilt/linux-x86_64/bin/ndk-stack`...\n # ...get `/usr/enh/Downloads/android-ndk-r18/`:\n ndk_bin = os.path.dirname(os.path.realpath(__file__))\n ndk_root = os.path.abspath(os.path.join(ndk_bin, '../../..'))\n # ...get `linux-x86_64`:\n arch = os.path.basename(os.path.abspath(os.path.join(ndk_bin, '../')))\n # And from there construct the llvm-symbolizer path.\n llvm_bin = os.path.join(ndk_root, 'toolchains', 'llvm', 'prebuilt', arch,\n 'bin')\n return os.path.join(llvm_bin, 'llvm-symbolizer')\n\n\ndef main():\n \"\"\"\"Program entry point.\"\"\"\n parser = argparse.ArgumentParser(\n description='Symbolizes Android crashes.',\n epilog='See .')\n parser.add_argument(\n '-sym',\n '--sym',\n dest='symbol_dir',\n required=True, # TODO: default to '.'?\n help='directory containing unstripped .so files')\n parser.add_argument(\n '-i',\n '-dump',\n '--dump',\n dest='input',\n default='-',\n type=argparse.FileType('r'),\n help='input filename')\n args = parser.parse_args()\n\n if not os.path.exists(args.symbol_dir):\n sys.exit('{} does not exist!\\n'.format(args.symbol_dir))\n\n cmd = [\n find_llvm_symbolizer(), '--demangle', '--functions=linkage',\n '--inlining=true', '--use-symbol-table=true'\n ]\n proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n\n banner = '*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***'\n in_crash = False\n saw_frame = False\n for line in args.input:\n line = line.rstrip()\n\n if not in_crash:\n if banner in line:\n in_crash = True\n saw_frame = False\n print('********** Crash dump: **********')\n continue\n\n for tag in ['Build fingerprint:', 'Abort message:']:\n if tag in line:\n print(line[line.find(tag):])\n continue\n\n # See Backtrace::FormatFrameData in libbacktrace.\n # We're deliberately very loose because NDK users are likely to be\n # looking at crashes on ancient OS releases.\n # TODO: support asan stacks too?\n m = re.match('.* +(#[0-9]+) +pc ([0-9a-f]+) +(([^ ]+).*)', line)\n if m:\n saw_frame = True\n frame = m.group(1)\n pc = m.group(2)\n tail = m.group(3)\n lib = m.group(4)\n\n lib_path = os.path.join(args.symbol_dir, os.path.basename(lib))\n\n out_line = '%s 0x%s %s' % (frame, pc, tail)\n indent = (out_line.find('(') + 1) * ' '\n print(out_line)\n if os.path.exists(lib_path):\n print('\"%s\" 0x%s' % (lib_path, pc), file=proc.stdin)\n while True:\n symbolizer_output = proc.stdout.readline().rstrip()\n if not symbolizer_output:\n break\n # TODO: rewrite file names based on a source path?\n print('%s%s' % (indent, symbolizer_output))\n elif saw_frame:\n in_crash = False\n print('Crash dump is completed\\n')\n\n # for asan\n asan_begin = re.compile(r\".*==\\d+==ERROR:\\sAddressSanitizer:.*\")\n asan_end = re.compile(r\".*==\\d+==ABORTING.*\")\n asan_trace = re.compile(r\".*\\s+#\\d+\\s(0x[0-9a-fA-F]+)\\s+\\((.*)\\+(0x[0-9a-fA-F]+)\\).*\")\n asan_trace_header = re.compile(r\".*WRITE of size.*\"\n r\"|.*READ of size.*\"\n r\"|.*Thread .* created by .* here:.*\"\n r\"|.*freed by thread .* here:.*\"\n r\"|.*previously allocated by thread .* here:.*\")\n\n proc.stdout.flush()\n args.input.seek(0)\n\n in_asan = False\n for line in args.input:\n line = line.rstrip()\n\n if not in_asan:\n m = asan_begin.match(line)\n if m:\n in_asan = True\n print(\"##############################ASAN BEGIN##############################\")\n continue\n\n m = asan_trace_header.match(line)\n if m:\n print(line)\n continue\n\n m = asan_trace.match(line)\n if m:\n pc = m.group(1)\n lib = m.group(2)\n addr = m.group(3)\n\n print(line)\n lib_path = os.path.join(args.symbol_dir, os.path.basename(lib))\n if os.path.exists(lib_path):\n print('\"%s\" %s' % (lib_path, addr), file=proc.stdin)\n print('--------------------------------------------------------------------------------------------------------------------------------------------------------------------')\n while True:\n symbolizer_output = proc.stdout.readline().rstrip()\n if not symbolizer_output:\n break\n print('--> %s <--' % symbolizer_output)\n print('')\n elif asan_end.match(line):\n print(\"##############################ASAN END##############################\")\n in_asan = False\n continue\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ndk_stack_asan/ndk-stack-asan.py","file_name":"ndk-stack-asan.py","file_ext":"py","file_size_in_byte":6377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"270834330","text":"import random\nimport re\nimport numpy as np\nimport feedparser\n\ndef textParse(bigString):\n list0fTokens=re.split(r'\\W',bigString)\n return [tok.lower() for tok in list0fTokens if len(tok)>2]\n\n#遍历词汇表中的每个词并统计它在文本中出现的次数,后根据出现次数从高到低对词典进行排序,返回最高的30个单词\ndef calcMostFreq(vocabList,fullText):\n import operator\n freqDict={}\n for token in vocabList:\n freqDict[token]=fullText.count(token)\n sortedFreq=sorted(freqDict.items(),key=operator.itemgetter(1),reverse=True)\n return sortedFreq[:30]\n\ndef localWords(feed1,feed0):\n import feedparser\n docList=[];classList=[];fullText=[]\n minLen=min(len(feed1['entries']),len(feed0['entries']))\n for i in range(minLen):\n wordList=textParse(feed1['entries'][i]['summary'])\n docList.append(wordList)\n fullText.extend(wordList)\n classList.append(1)\n wordList=textParse(feed0['entries'][i]['summary'])\n docList.append(wordList)\n fullText.extend(wordList)\n classList.append(0)\n vocabList=createVocabList(docList)\n top30Words=calcMostFreq(vocabList,fullText)\n\n for pairW in top30Words:\n if pairW[0] in vocabList:\n vocabList.remove(pairW[0])\n trainingSet=list(range(2*minLen));testSet=[]\n\n for i in range(20):\n randIndex=int(random.uniform(0,len(trainingSet)))\n testSet.append(trainingSet[randIndex])\n del(trainingSet[randIndex])\n\n trainMat=[];trainClassses=[]\n\n for docIndex in trainingSet:\n trainMat.append(bagOfWords2VecMN(vocabList,docList[docIndex]))\n trainClassses.append(classList[docIndex])\n p0V,p1V,pSpam=trainNB0(np.array(trainMat),np.array(trainClassses))\n errorCount=0\n for docIndex in testSet:\n wordVector=bagOfWords2VecMN(vocabList,docList[docIndex])\n if classifyNB(np.array(wordVector),p0V,p1V,pSpam)!=classList[docIndex]:\n errorCount+=1\n print(\"the error rate is:\",float(errorCount)/len(testSet))\n return vocabList,p0V,p1V\n\n\ndef trainNB0(trainMatrix,trainCategory):\n numTrainDocs=len(trainMatrix)\n numWords=len(trainMatrix[0])\n pAbusive=sum(trainCategory)/float(numTrainDocs)\n p0Num=np.ones(numWords)#p0Num=np.zeros(numWords)\n p1Num=np.ones(numWords)#p1Num=np.zeros(numWords)\n p0Demo=2.0;p1Demo=2.0\n #p0Demo=0.0;p1Demo=0.0\n for i in range(numTrainDocs):\n if trainCategory[i]==1:\n p1Num+=trainMatrix[i]\n p1Demo+=sum(trainMatrix[i])\n else:\n p0Num+=trainMatrix[i]\n p0Demo+=sum(trainMatrix[i])\n p1Vect=p1Num/p1Demo\n p0Vect=p0Num/p0Demo\n return p0Vect,p1Vect,pAbusive\n\n#######################################朴素贝叶斯分类函数\ndef classifyNB(vec2Classify,p0Vec,p1Vec,pClass1):\n p1=sum(vec2Classify*p1Vec)+np.log(pClass1)\n p0=sum(vec2Classify*p0Vec)+np.log(1.0-pClass1)\n if p1>p0:\n return 1\n else:\n return 0\n\n\ndef setOfWords2Vec(vocabList,inputSet):\n returnVec=[0]*len(vocabList) #创建一个其中所含元素都为0的向量\n for word in inputSet:\n if word in vocabList:\n returnVec[vocabList.index(word)]=1\n else:\n print(\"the word: %s is not in my vocabulary!\"%word)\n return returnVec\n\n\ndef bagOfWords2VecMN(vocabList, inputSet):\n returnVec=[0]*len(vocabList)\n for word in inputSet:\n if word in vocabList:\n returnVec[vocabList.index(word)]+=1\n return returnVec\n\ndef createVocabList(dataSet):\n vocabSet=set([]) #创建一个空集\n for document in dataSet:\n vocabSet=vocabSet | set(document) #创建两个集合的并集\n return list(vocabSet)\n\n#显示地域相关的用词\ndef getTopWords(ny,sf):\n import operator\n vocabList,p0V,p1V=localWords(ny,sf)\n topNY=[];topSF=[]\n for i in range(len(p0V)):\n if(p0V[i]>-6.0):topSF.append((vocabList[i],p0V[i]))\n if(p1V[i]>-6.0):topNY.append((vocabList[i],p1V[i]))\n sortedSF=sorted(topSF,key=lambda pair:pair[1],reverse=True)\n print(\"SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**\")\n for item in sortedSF:\n print(item[0])\n sortedNY=sorted(topNY,key=lambda pair:pair[1],reverse=True)\n print(\"NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**\")\n for item in sortedNY:\n print(item[0])\n\nif __name__ == '__main__':\n #因书本给的不能更换\n ny = feedparser.parse('http://www.nasa.gov/rss/dyn/image_of_the_day.rss')\n sf = feedparser.parse('http://sports.yahoo.com/nba/teams/hou/rss.xml')\n vocabList,pSF,pNY=localWords(ny,sf)\n getTopWords(ny,sf)","sub_path":"Advertisement.py","file_name":"Advertisement.py","file_ext":"py","file_size_in_byte":4615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"39846746","text":"import os\n\nimport math\nimport matplotlib.pyplot as plt\nimport torch\nfrom gym import wrappers \nimport numpy as np\nimport wandb\n\n\nfrom network import TD3Agent\nfrom utils import *\n\n\ndef train():\n # simulation of the agent solving the spacecraft attitude control problem\n env = make(\"SatelliteContinuous\")\n #logger\n wandb.init(project='Satellite-continuous',\n config={\n \"batch_size\": 128,\n \"critic_lr\": 1e-3,\n \"actor_lr\": 1e-4,\n \"max_episodes\": 10000,\n \"max_steps\": 300,\n \"gamma\": 0.99,\n \"tau\" : 1e-3,\n \"buffer_maxlen\": 100000,\n \"policy_noise\": 0.2,\n \"policy_freq\": 2,\n \"noise_clip\": 0.5,\n \"prioritized_on\": False,\n \"State\": 'angle:4, ang_rate:4, ang_vel:3',}\n )\n config = wandb.config\n\n max_episodes = config.max_episodes\n max_steps = config.max_steps\n batch_size = config.batch_size\n\n policy_noise = config.policy_noise\n policy_freq = config.policy_freq\n noise_clip = config.noise_clip\n\n gamma = config.gamma\n buffer_maxlen = config.buffer_maxlen\n tau = config.tau\n critic_lr = config.critic_lr\n actor_lr = config.actor_lr\n\n agent = TD3Agent(env, gamma, tau, buffer_maxlen, critic_lr, actor_lr, True, max_episodes * max_steps,\n policy_freq, policy_noise, noise_clip)\n # wandb.watch([agent.critic,agent.actor], log=\"all\")\n # curr_dir = os.path.abspath(os.getcwd())\n # agent = torch.load(curr_dir + \"/models/spacecraft_control_ddpg.pkl\")\n # agent.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n episode_rewards = mini_batch_train(env, agent, max_episodes, max_steps, batch_size)\n\n plt.figure()\n plt.plot(episode_rewards)\n plt.xlabel(\"Episodes\")\n plt.ylabel(\"Reward\")\n\t# plt.show()\n # plt.savefig(curr_dir + \"/results/plot_reward_hist.png\")\n\n curr_dir = os.path.abspath(os.getcwd())\n if not os.path.isdir(\"models\"):\n os.mkdir(\"models\")\n torch.save(agent, curr_dir + \"/models/spacecraft_control_ddpg.pkl\")\n\ndef evaluate():\n # simulation of the agent solving the cartpole swing-up problem\n env = make(\"SatelliteContinuous\")\n # uncomment for recording a video of simulation\n # env = wrappers.Monitor(env, './video', force=True)\n\n curr_dir = os.path.abspath(os.getcwd())\n\n agent = torch.load(curr_dir + \"/models/spacecraft_control_ddpg.pkl\",map_location='cpu')\n agent.device = torch.device('cpu')\n agent.train = False\n\n state = env.reset()\n print('The goal angle :'+ str(env.goalEuler) + \" the target multi:\" + str(env.multi))\n r = 0\n qe = np.empty((0,4))\n q = np.empty((0,4))\n w = np.empty((0,3))\n actions = np.empty((0,3))\n r_hist = np.empty((0,3)) \n\n dt = 0.1\n simutime = 30\n max_steps = int(simutime/dt) # dt is 0.1\n\n for i in range(max_steps):\n action = agent.get_action(state)\n # action = np.squeeze(action)\n next_error_state, reward, done, next_state, _ = env.step(action)\n # env.render()\n q=np.append(q,next_state[0:4].reshape(1,-1),axis=0)\n qe=np.append(qe,next_error_state[0:4].reshape(1,-1),axis=0)\n w=np.append(w,next_error_state[8:11].reshape(1,-1),axis=0)\n r += reward\n actions = np.append(actions, action.reshape(1,-1),axis=0)\n # r_hist = np.append(r_hist, np.array([-env.r1,-env.r2,-env.r3]).reshape(1,-1),axis=0)\n\n state = next_error_state\n\n env.close()\n #-------------------------------結果のプロット----------------------------------\n #region\n #show the total reward\n print(\"Total Reward is : \" + str(r))\n # データの形の整理\n q = q.reshape([-1,4])\n qe = qe.reshape([-1,4])\n w = w.reshape([-1,3])\n \n # plot the angle and action curve\n #-------------------plot settings------------------------------\n plt.rcParams['font.family'] = 'Times New Roman' # font familyの設定\n plt.rcParams['mathtext.fontset'] = 'stix' # math fontの設定\n plt.rcParams[\"font.size\"] = 10 # 全体のフォントサイズが変更されます。\n plt.rcParams['xtick.labelsize'] = 10 # 軸だけ変更されます。\n plt.rcParams['ytick.labelsize'] = 10 # 軸だけ変更されます \n plt.rcParams['xtick.direction'] = 'in' # x axis in\n plt.rcParams['ytick.direction'] = 'in' # y axis in \n plt.rcParams['axes.linewidth'] = 1.0 # axis line width\n plt.rcParams['axes.grid'] = True # make grid\n plt.rcParams[\"legend.loc\"] = \"best\" # 凡例の位置、\"best\"でいい感じのところ\n plt.rcParams[\"legend.frameon\"] = True # 凡例を囲うかどうか、Trueで囲う、Falseで囲わない\n plt.rcParams[\"legend.framealpha\"] = 1.0 # 透��度、0.0から1.0の値を入れる\n plt.rcParams[\"legend.facecolor\"] = \"white\" # 背景色\n # plt.rcParams[\"legend.edgecolor\"] = \"black\" # 囲いの色\n plt.rcParams[\"legend.fancybox\"] = True # Trueにすると囲いの四隅が丸くなる\n #-------------------------------------------------------------- \n curr_dir = os.path.abspath(os.getcwd())\n if not os.path.isdir(\"results\"):\n os.mkdir(\"results\")\n \n plt.figure(figsize=(12,5),dpi=100)\n plt.tight_layout()\n plt.subplots_adjust(wspace=0.3, hspace=0.3)\n # plt.figure(figsize=(5.0,3.5),dpi=100)\n plt.subplot(231)\n plt.plot(np.arange(max_steps)*dt, q[:,0],label =r\"$q_{0}$\")\n plt.plot(np.arange(max_steps)*dt, q[:,1],label =r\"$q_{1}$\")\n plt.plot(np.arange(max_steps)*dt, q[:,2],label =r\"$q_{2}$\")\n plt.plot(np.arange(max_steps)*dt, q[:,3],label =r\"$q_{3}$\")\n plt.title('Quaternion')\n plt.ylabel('quaternion value')\n plt.xlabel(r'time [s]')\n plt.legend()\n plt.tight_layout()\n plt.grid(color='k', linestyle='dotted', linewidth=0.6)\n # plt.savefig(curr_dir + \"/results/plot_quaternion.png\")\n\n # plt.figure(figsize=(5.0,3.5),dpi=100)\n plt.subplot(232)\n plt.plot(np.arange(max_steps)*dt, qe[:,0],label =r\"$q_{0}$\")\n plt.plot(np.arange(max_steps)*dt, qe[:,1],label =r\"$q_{1}$\")\n plt.plot(np.arange(max_steps)*dt, qe[:,2],label =r\"$q_{2}$\")\n plt.plot(np.arange(max_steps)*dt, qe[:,3],label =r\"$q_{3}$\")\n plt.title('Quaternion Error')\n plt.ylabel('quaternion value')\n plt.xlabel(r'time [s]')\n plt.legend()\n plt.tight_layout()\n plt.grid(color='k', linestyle='dotted', linewidth=0.6)\n # plt.savefig(curr_dir + \"/results/plot_error_quaternion.png\")\n\n angle = np.array([np.rad2deg(env.dcm2euler(env.quaternion2dcm(q[i,:]))).tolist() for i in range(max_steps)])\n angle = angle.reshape([-1,3])\n print(angle[-1,:])\n # plt.figure(figsize=(5.0,3.5),dpi=100)\n plt.subplot(233)\n plt.plot(np.arange(max_steps)*dt, angle[:,0],label = r\"$\\phi$\")\n plt.plot(np.arange(max_steps)*dt, angle[:,1],label = r\"$\\theta$\")\n plt.plot(np.arange(max_steps)*dt, angle[:,2],label = r\"$\\psi$\")\n # plt.title('Action')\n plt.ylabel('angle [deg]')\n plt.xlabel(r'time [s]')\n plt.legend(loc=\"lower center\", bbox_to_anchor=(0.5,1.05), ncol=3)\n plt.tight_layout()\n # plt.ylim(-20, 20)\n plt.grid(True, color='k', linestyle='dotted', linewidth=0.8)\n # plt.savefig(curr_dir + \"/results/plot_angle.png\")\n\n # plt.figure(figsize=(5.0,3.5),dpi=100)\n plt.subplot(234)\n plt.plot(np.arange(max_steps)*dt, w[:,0],label =r\"$\\omega_{x}$\")\n plt.plot(np.arange(max_steps)*dt, w[:,1],label =r\"$\\omega_{y}$\")\n plt.plot(np.arange(max_steps)*dt, w[:,2],label =r\"$\\omega_{z}$\")\n plt.title('Angular velocity')\n plt.ylabel('angular velocity [rad/s]')\n plt.xlabel(r'time [s]')\n plt.legend()\n plt.tight_layout()\n plt.grid(color='k', linestyle='dotted', linewidth=0.6)\n # plt.savefig(curr_dir + \"/results/plot_ang_vel.png\")\n\n # plt.figure(figsize=(5.0,3.5),dpi=100)\n plt.subplot(235)\n plt.plot(np.arange(max_steps)*dt, actions[:,0],label = r\"$\\tau_{x}$\")\n plt.plot(np.arange(max_steps)*dt, actions[:,1],label = r\"$\\tau_{x}$\")\n plt.plot(np.arange(max_steps)*dt, actions[:,2],label = r\"$\\tau_{x}$\")\n plt.title('Action')\n plt.ylabel('Input torque [Nm]')\n plt.xlabel(r'time [s]')\n plt.legend()\n plt.tight_layout()\n plt.grid(color='k', linestyle='dotted', linewidth=0.6)\n # plt.savefig(curr_dir + \"/results/plot_torque.png\")\n plt.savefig(curr_dir + \"/results/total_results.png\")\n\n # plt.figure(figsize=(8,4),dpi=100)\n # plt.plot(np.arange(max_steps)*dt, r_hist[:,0],label = r\"$q$ pnlty\")\n # plt.plot(np.arange(max_steps)*dt, r_hist[:,1],label = r\"$\\omega$ pnlty\")\n # plt.plot(np.arange(max_steps)*dt, r_hist[:,2],label = r\"$\\tau$ pnlty\")\n # plt.plot(np.arange(max_steps)*dt, r_hist[:,0]+r_hist[:,1]+r_hist[:,2],label = r\"$toal$\",linestyle='dotted')\n # # plt.title('Action')\n # plt.ylabel('reward')\n # plt.xlabel(r'time [s]')\n # plt.tight_layout()\n # plt.legend()\n # # plt.ylim(-20, 20)\n # plt.grid(True, color='k', linestyle='dotted', linewidth=0.8)\n # plt.savefig(curr_dir + \"/results/reward_compo.png\")\n\n plt.show()\n #endregion\n # -------------------------結果プロット終わり--------------------------------\ndef env_test():\n\n # simulation of the agent solving the cartpole swing-up problem\n env = make(\"SatelliteContinuous\")\n curr_dir = os.path.abspath(os.getcwd())\n env.reset()\n print('The goal angle :'+ str(env.goalEuler) + \" the target multi:\" + str(env.multi))\n r = 0\n qe = np.empty((0,4))\n q = np.empty((0,4))\n w = np.empty((0,3))\n actions = np.empty((0,3))\n\n kp = 0.7\n kd = 1.9\n Kp = np.array([[0,kp,0,0],\n [0,0,kp,0],\n [0,0,0,kp]])\n Kd = np.array([[kd,0,0],\n [0,kd,0],\n [0,0,kd]])\n action = np.array([0,0,0]).reshape(1,3)\n actions = np.append(actions, action,axis=0)\n\n dt = 0.1\n simutime = 50\n simulation_iterations = int(simutime/dt) -1 # dt is 0.01\n\n for i in range(1, simulation_iterations):\n action = np.squeeze(action)\n next_error_state, reward, done, next_state, _ = env.step(action)\n # env.render()\n # q=np.append(q,next_state[0].reshape(1,-1),axis=0)\n # qe=np.append(qe,next_error_state[0].reshape(1,-1),axis=0)\n # w=np.append(w,next_error_state[2].reshape(1,-1),axis=0)\n q=np.append(q,next_state[:4].reshape(1,-1),axis=0)\n qe=np.append(qe,next_error_state[:4].reshape(1,-1),axis=0)\n w=np.append(w,next_error_state[-3:].reshape(1,-1),axis=0)\n r += reward\n # state = next_state\n #----------------control law (PID controller)-----------------------\n action = -Kp@next_error_state[:4].reshape(-1,1)-Kd@next_error_state[-3:].reshape(-1,1)\n actions = np.append(actions, action.reshape(1,-1),axis=0)\n #--------------------------------------------------------------------\n\n # env.close()\n #show the total reward\n print(\"Total Reward is : \" + str(r))\n # データの形の整理\n q = q.reshape([-1,4])\n qe = qe.reshape([-1,4])\n w = w.reshape([-1,3])\n # angle = [e for i in]\n\n # plot the angle and action curve\n curr_dir = os.path.abspath(os.getcwd())\n if not os.path.isdir(\"results\"):\n os.mkdir(\"results\")\n plt.figure(figsize=(12,5),dpi=100)\n # plt.figure(figsize=(5.0,3.5),dpi=100\n plt.subplot(231)\n plt.plot(np.arange(simulation_iterations-1)*dt, q[:,0],label =r\"$q_{0}$\")\n plt.plot(np.arange(simulation_iterations-1)*dt, q[:,1],label =r\"$q_{1}$\")\n plt.plot(np.arange(simulation_iterations-1)*dt, q[:,2],label =r\"$q_{2}$\")\n plt.plot(np.arange(simulation_iterations-1)*dt, q[:,3],label =r\"$q_{3}$\")\n plt.title('Quaternion')\n plt.ylabel('quaternion value')\n plt.xlabel(r'time [s]')\n plt.legend()\n plt.grid(color='k', linestyle='dotted', linewidth=0.6)\n # plt.savefig(curr_dir + \"/results/plot_angle.png\")\n\n # plt.figure(figsize=(5.0,3.5),dpi=100)\n plt.subplot(232)\n plt.plot(np.arange(simulation_iterations-1)*dt, qe[:,0],label =r\"$q_{0}$\")\n plt.plot(np.arange(simulation_iterations-1)*dt, qe[:,1],label =r\"$q_{1}$\")\n plt.plot(np.arange(simulation_iterations-1)*dt, qe[:,2],label =r\"$q_{2}$\")\n plt.plot(np.arange(simulation_iterations-1)*dt, qe[:,3],label =r\"$q_{3}$\")\n plt.title('Quaternion Error')\n plt.ylabel('quaternion value')\n plt.xlabel(r'time [s]')\n plt.legend()\n plt.grid(color='k', linestyle='dotted', linewidth=0.6)\n # plt.savefig(curr_dir + \"/results/plot_angle.png\")\n\n angle = np.array([np.rad2deg(env.dcm2euler(env.quaternion2dcm(q[i,:]))).tolist() for i in range(simulation_iterations-1)])\n angle = angle.reshape([-1,3])\n # plt.figure(figsize=(5.0,3.5),dpi=100)\n plt.subplot(233)\n plt.plot(np.arange(simulation_iterations-1)*dt, angle[:,0],label = r\"$\\phi$\")\n plt.plot(np.arange(simulation_iterations-1)*dt, angle[:,1],label = r\"$\\theta$\")\n plt.plot(np.arange(simulation_iterations-1)*dt, angle[:,2],label = r\"$\\psi$\")\n # plt.title('Action')\n plt.ylabel('angle [deg]')\n plt.xlabel(r'time [s]')\n plt.legend(loc=\"lower center\", bbox_to_anchor=(0.5,1.05), ncol=3)\n plt.tight_layout()\n # plt.ylim(-20, 20)\n plt.grid(True, color='k', linestyle='dotted', linewidth=0.8)\n\n # plt.figure(figsize=(5.0,3.5),dpi=100)\n plt.subplot(234)\n plt.plot(np.arange(simulation_iterations-1)*dt, w[:,0],label =r\"$\\omega_{x}$\")\n plt.plot(np.arange(simulation_iterations-1)*dt, w[:,1],label =r\"$\\omega_{y}$\")\n plt.plot(np.arange(simulation_iterations-1)*dt, w[:,2],label =r\"$\\omega_{z}$\")\n plt.title('Angular velocity')\n plt.ylabel('angular velocity [rad/s]')\n plt.xlabel(r'time [s]')\n plt.legend()\n plt.grid(color='k', linestyle='dotted', linewidth=0.6)\n # plt.savefig(curr_dir + \"/results/plot_angle.png\")\n\n # plt.figure(figsize=(5.0,3.5),dpi=100)\n plt.subplot(235)\n plt.plot(np.arange(simulation_iterations)*dt, actions[:,0],label = r\"$\\tau_{x}$\")\n plt.plot(np.arange(simulation_iterations)*dt, actions[:,1],label = r\"$\\tau_{x}$\")\n plt.plot(np.arange(simulation_iterations)*dt, actions[:,2],label = r\"$\\tau_{x}$\")\n plt.title('Action')\n plt.ylabel('Input torque [Nm]')\n plt.xlabel(r'time [s]')\n plt.legend()\n plt.grid(color='k', linestyle='dotted', linewidth=0.6)\n\n # plt.savefig(curr_dir + \"/results/plot_action.png\")\n plt.show()\n\n\nif __name__ == '__main__':\n plt.close()\n val = input('Enter the number 1:train 2:evaluate 3:env_test > ')\n if val == '1':\n train()\n elif val == '2':\n evaluate()\n elif val == '3':\n env_test()\n else:\n print(\"You entered the wrong number, run again and choose from 1 or 2 or 3.\")\n","sub_path":"td3/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"550540060","text":"\nimport pprint\nfrom pprint import pprint\nimport os\nimport sys\nimport time\nimport win32con\nimport win32gui\nfrom win32api import GetModuleHandle\nimport json\n\nclass WindowsBalloonTip:\n def __init__(self, title, msg):\n message_map = {win32con.WM_DESTROY: self.OnDestroy, }\n # Register the Window class.\n wc = win32gui.WNDCLASS()\n self.destroyed = False\n hinst = wc.hInstance = GetModuleHandle(None)\n wc.lpszClassName = \"PythonTaskbar\"\n wc.lpfnWndProc = message_map # could also specify a wndproc.\n class_atom = win32gui.RegisterClass(wc)\n # Create the Window.\n style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU\n self.hwnd = win32gui.CreateWindow(class_atom, \"Taskbar\", style,\n 0, 0, win32con.CW_USEDEFAULT,\n win32con.CW_USEDEFAULT, 0, 0,\n hinst, None)\n win32gui.UpdateWindow(self.hwnd)\n icon_path_name = os.path.abspath(os.path.join(sys.path[0],\n \"balloontip.ico\"))\n icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE\n try:\n hicon = win32gui.LoadImage(hinst, icon_path_name,\n win32con.IMAGE_ICON, 0, 0, icon_flags)\n except:\n hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)\n flags = win32gui.NIF_ICON | win32gui.NIF_MESSAGE | win32gui.NIF_TIP\n nid = (self.hwnd, 0, flags, win32con.WM_USER+20, hicon, \"tooltip\")\n win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, nid)\n win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY,\n (self.hwnd, 0, win32gui.NIF_INFO,\n win32con.WM_USER+20, hicon,\n \"Balloon tooltip\", msg, 200, title))\n # self.show_balloon(title, msg)\n time.sleep(20)\n win32gui.DestroyWindow(self.hwnd)\n win32gui.UnregisterClass(class_atom, hinst)\n self.destroyed = True\n\n def OnDestroy(self, hwnd, msg, wparam, lparam):\n nid = (self.hwnd, 0)\n win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, nid)\n win32gui.PostQuitMessage(0) # Terminate the app.\n\n def isDestroyed(self):\n return self.destroyed\n\n\ndef balloon_tip(title, msg):\n w = WindowsBalloonTip(title, msg)\n return w\n\n#############################################\n\nimport bs4, requests\n\ndef findSharePrice(url):\n res = requests.get(url)\n res.raise_for_status()\n soup = bs4.BeautifulSoup(res.text, 'html.parser')\n elems = soup.select('body > div.container.container--game.wrapper.clearfix > div.content-region.region--full.game-count.full-width > div > div > h3')\n #print('counter: '+ elems[0].text.strip())\n count = elems[0].text.strip()\n count = count.replace(\",\", \"\")\n #print('My counter: '+ count)\n return int(count)\n\nprint('Enter the threshold value for which you want to get notify: ')\nthreshold = input()\n\nwhile(1):\n actual = findSharePrice('https://www.marketwatch.com/vse')\n if(actual == int(threshold) ):\n print('Value is equal to threshold')\n balloon_tip('Threshold Value Reached...', ' Put your message here. Value is equal to threshold')\n break;\n\n","sub_path":"observer_notifier/observer_notifier.py","file_name":"observer_notifier.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"168910766","text":"######## Exercises 1 ########\n\n#### Input and Output ####\n\n# Write a program called \"hello.py\" which asks the user for a name, and then personally greets them.\n\ni = input(\"Enter your name: \")\n\nprint(\"Hello \", i, \"!\", sep='')\n\n# Write a program called repeater.py which reads a stirng and then prints it out twice.\n\ni = input(\"Enter a string: \")\nprint(i*2, sep=' ')\n\n# Write a program called multiplyer.py which reads a number, then prints out double that number.\n\ni = int(input(\"Enter a number: \"))\nprint(i*2)\n\n#### Box Printing ####\n\n# Write a program which prints a box. You should use the plus, minus and pipe characters.\n\nprint(\"+---+\\n| |\\n+---+\\n\")\n\n# Write a program which reads a string and prints a box around it.\n\ni = input(\"Enter a string: \")\nl = len(i) + 2\n\nprint(\"+\", \"-\"*l, \"+\", sep='')\nprint(\"| \", i, \" |\", sep='')\nprint(\"+\", \"-\"*l, \"+\", sep='')\n\n#### Calculator ####\n\n# Write a program which reads two numbesr and prints their sum.\n\n#a = int(input(\"Enter a number: \"))\n#b = int(input(\"Enter a number: \"))\n#print(a + b)\n\na, b = [int(a)\n for a in input(\"Enter two numbers separated by a space: \").split()]\nprint(a + b)\n\n# Subtract\nprint(a-b)\n\n# Multiply\nprint(a*b)\n\n# Divide\nprint(a/b)\n\n# General purpose calculator\n\na, o, b = [x for x in input(\n \"Enter a mathematical expression separated by spaces: \").split()]\na = int(a)\nb = int(b)\n\nif o == \"/\" or o == \"\\\\\":\n print(a/b)\nelif o == \"+\":\n print(a+b)\nelif o == \"-\":\n print(a-b)\nelif o == \"*\" or o == \"+\":\n print(a*b)\nelse:\n print(\"Invalid input!\")\n\n\ndef calculator():\n # Calculator any notation!\n opr = []\n nmbr = []\n\n for val in input(\"Enter a mathematical expression separated by spaces: \").split():\n check = list(val)\n # print(check)\n test = True\n for char in check:\n # print(ord(char),ord(\"0\"),ord(\"9\"))\n if ord(char) not in range(ord(\"0\"), ord(\"9\")):\n test = False\n # print(test)\n if test == False:\n opr = opr + [val]\n else:\n nmbr = nmbr + [int(val)]\n\n # print(opr,nmbr)\n\n for i in range(len(opr)):\n if i == 0:\n if opr[i] == \"/\" or opr[i] == \"\\\\\":\n res = nmbr[i]/nmbr[i+1]\n elif opr[i] == \"+\":\n res = nmbr[i]+nmbr[i+1]\n elif opr[i] == \"-\":\n res = nmbr[i]-nmbr[i+1]\n elif opr[i] == \"*\" or opr[i] == \"+\":\n res = nmbr[i]*nmbr[i+1]\n else:\n return(\"Invalid input!\")\n else:\n if opr[i] == \"/\" or opr[i] == \"\\\\\":\n res = res + nmbr[i+1]\n elif opr[i] == \"+\":\n res = res + nmbr[i+1]\n elif opr[i] == \"-\":\n res = res - nmbr[i+1]\n elif opr[i] == \"*\" or opr[i] == \"+\":\n res = res * nmbr[i+1]\n else:\n return(\"Invalid input!\")\n\n print(\"Result is\", res)\n\n\ncalculator()\n","sub_path":"Python/problemSets/caculator.py","file_name":"caculator.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"296265319","text":"from scapy.all import *\na = \" \"\n\nsrc = \"10.0.2.11\"\ndst = \"10.0.2.12\"\nsport = 11111 #random.randint(1024,65535)\ndport = 8333\nseq = 3587496741#random.randint(1587496741,9587496741)\nack = 0\npld_VERSION = '\\xf9\\xbe\\xb4\\xd9version\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00]\\xf6\\xe0\\xe2'\n\n#SYN\nip = IP(src = src, dst = dst)\nSYN = TCP(sport=sport, dport=dport, flags='S', seq = seq)\n##send(ip/SYN)\nSYNACK = sr1(ip/SYN)\n\n\n#SYN-ACK\nACK = TCP(sport=sport, dport=dport, flags='A', seq=SYNACK.ack, ack=SYNACK.seq+1)\nsend(ip/ACK)\n#FINACK=sr1(ip/ACK)\n'''\n#Bitcoin version\n#tcp = TCP(sport=sport, dport=dport, flags='PA', seq=FINACK.ack, ack=FINACK.seq+1)\ntcp = TCP(sport=sport, dport=dport, flags='PA', seq=ACK.seq+1, ack=ACK.ack)\nVERACK = sr1(ip/tcp/pld_VERSION)\n'''\n\n'''\ndef pkt_callback(pkt):\n src = \"10.0.2.12\"\n dst = \"10.0.2.7\"\n sport = \" \"\n dport = \" \"\n seq = 0\n ack = 0\n pld_VERSION = '\\xf9\\xbe\\xb4\\xd9version\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00]\\xf6\\xe0\\xe2'\n ip = IP(src = src, dst = dst)\n\n #if pkt[IP].src == \"10.0.2.15\" and pkt[IP].dst == \"10.0.2.7\":\n #pkt.show()\n try:\n if pkt[IP].src == \"10.0.2.12\" and pkt[IP].dst == \"10.0.2.7\" and pkt[TCP].payload:\n pkt.show()\n print(\"!!!!! outbound session, flags:\", pkt[TCP].flags)\n elif pkt[IP].src == \"10.0.2.12\" and pkt[IP].dst == \"10.0.2.7\" and pkt[TCP].flags == 16L:\n pkt.show()\n print(\"!!!!! outbound session, flags:\", pkt[TCP].flags) \n sport = pkt[TCP].sport\n dport = pkt[TCP].dport\n #synchronize the seq and ack\n if seq < pkt[TCP].seq or seq == pkt[TCP].seq:\n seq = pkt[TCP].seq\n #print(\"$$$seq:\", seq)\n if ack < pkt[TCP].ack or ack == pkt[TCP].ack:\n ack = pkt[TCP].ack\n #print(\"!!!!ack:\", ack)\n #construct the pkt_spoof\n tcp = TCP(sport=sport, dport=dport, flags='PA', seq=seq, ack =ack)\n pkt_spoof = ip/tcp/pld_VERSION\n print(pkt_spoof)\n send(pkt_spoof)\n print(\"!!!! $p00f scceeds!\")\n elif pkt[IP].dst == \"10.0.2.12\" and pkt[IP].src == \"10.0.2.7\" and pkt[TCP].payload:\n pkt.show()\n print(\"!!!!! inbound session, flags:\", pkt[TCP].flags)\n sport = pkt[TCP].dport\n dport = pkt[TCP].sport\n seq = pkt[TCP].ack\n ack = pkt[TCP].seq + 35\n tcp = TCP(sport=sport, dport=dport, flags='A', seq=seq, ack =ack)\n pkt_spoof = ip/tcp\n print(pkt_spoof)\n send(pkt_spoof)\n print(\"!!!! reply ack\")\n except:\n pass\n \n\nsniff(iface=\"enp0s3\",prn=pkt_callback, filter=\"tcp\", store=1)\n'''\n","sub_path":"tools/Networking/bitcoin_bogus_node.py","file_name":"bitcoin_bogus_node.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"143829413","text":"import database\nfrom functions import path_f, cli_f\nfrom queries import city_q, team_q\nfrom classes import team\nfrom rules import sad_rules\nimport time\nimport random\nimport math\nimport traceback, sys\n\n# http://woarl.com/board/viewtopic.php?f=114&t=3326\n# max_batches = 100# Shouldn't need more than this\nconfig = {\n\t\"batches\":\t100,\n\t\"hops\":\t\t1,\n\t\"max hops\":\t20,\n}\n\ndef sad_batch(w, verbose=True):\n\tcity_dict = w.live_cities()\n\t\n\t# raise Exception(\"Alter wealth production rate\")\n\t# raise Exception(\"Make it so cities take into account what they can afford\")\n\t\n\t# Reset buys\n\ttotal_demand = 0\n\ttotal_surplus = 0\n\twealth = 0\n\tneed_len = 0\n\tcities_needing_grain = 0\n\tfor city_id, the_city in city_dict.items():\n\t\tthe_city.buys = []\n\t\tthe_city.sells = []\n\t\t\n\t\t\n\t\ttotal_demand += sum([(-a if a < 0 else 0) for r, a in the_city.goods.items()])\n\t\t# total_demand += (-the_city.goods['Grain'] if the_city.goods['Grain'] < 0 else 0)\n\t\ttotal_surplus += sum([(a if a > 0 else 0) for r, a in the_city.goods.items()])\n\t\t# total_surplus += (the_city.goods['Grain'] if the_city.goods['Grain'] > 0 else 0)\n\t\twealth += the_city.wealth\n\t\t\n\t\tif the_city.goods['Grain'] < 0:\n\t\t\tcities_needing_grain += 1\n\t\t\n\t\tneed_len += len(the_city.current_demands())\n\t\n\t# 451\n\tprint(\"Kruzen: %s\" % str(city_dict[451].goods))\n\tprint(\"Kruzen: %s\" % str(city_dict[451].wealth))\n\tprint(\"Demand: %s, Avg demand: %s\" % (total_demand, total_demand/len(city_dict.keys())))\n\tprint(\"Surplus: %s\" % total_surplus)\n\tprint(\"Wealth: %s, avg wealth: %s\" % (wealth, wealth/len(city_dict.keys())))\n\tprint(\"cities_needing_grain: %s\" % cities_needing_grain)\n\tprint(\"Avg needs: %s\" % (need_len/len(city_dict.keys())))\n\t\n\t# Find new buys\n\tfind_buys(w, city_dict, verbose)\n\t\n\t# Appove them\n\tapprove_buys(w, city_dict, verbose)\n\t\n\tsale_total = 0\n\tfor city_id, the_city in city_dict.items():\n\t\tfor s in the_city.sells:\n\t\t\t# print(s)\n\t\t\tsale_total += s[2]\n\t\n\tprint(\"Quantity sold: %s\" % sale_total)\n\t\n\t# Execute\n\texecute_buys(w, city_dict, verbose)\n\t\n\tprint(\"Batch complete\\n\")\n\ndef find_buys(w, city_dict, verbose=True):\n\t# city_dict = w.live_cities()\n\t\n\t# End point stopper\n\t# w.suppliers = {r:set() for r in sad_rules.res_list}\n\t# for city_id, the_city in city_dict.items():\n\t# \tthe_city.best_price = {}\n\t# \t\n\t# \tfor r in sad_rules.res_list:\n\t# \t\tif the_city.goods[r] > 0:\n\t# \t\t\tw.suppliers[r].add(city_id)\n\t\n\tfor city_id, the_city in city_dict.items():\n\t\tthe_city.needs_cache = the_city.current_demands()\n\t\n\t\n\tif verbose:\n\t\tit = cli_f.progressbar(range(0, config['hops']+1), \"Hopping: \", 60, True)\n\telse:\n\t\tit = range(0, config['hops']+1)\n\t\n\tfor h in it:\n\t# for h in range(0, max_hops+1):\n\t\t# for city_id, the_city in cli_f.progressbar(city_dict.items(), \"Hopping %d: \" % h, 60, True):\n\t\tfor city_id, the_city in city_dict.items():\n\t\t\t_discover(w, city_dict, the_city, h)\n\t\n\tif verbose:\n\t\tit = cli_f.progressbar(city_dict.items(), \"Finding buys: \", 60, True)\n\telse:\n\t\tit = city_dict.items()\n\t\n\tfor city_id, the_city in it:\n\t\tif the_city.wealth <= 5: continue\n\t\t\n\t\tneeds = the_city.current_demands()\n\t\t\n\t\toffers = {}\n\t\tfor r in needs:\n\t\t\toffers[r] = []\n\t\t\n\t\t# For each supply we want\n\t\tfor c, d in the_city.connections.items():\n\t\t\tif city_id in city_dict[c].connections:\n\t\t\t\tfor r in needs:\n\t\t\t\t\tif city_dict[c].best_price[config['hops']][r][0] > 1000: continue\n\t\t\t\t\t\n\t\t\t\t\t# Price is made from price saved at other city times the get_price multiplier\n\t\t\t\t\toffer_price = get_price(w, the_city, city_dict[c]) * city_dict[c].best_price[config['hops']][r][0]\n\t\t\t\t\tavailiable = the_city.wealth/len(needs)\n\t\t\t\t\tamount = min(availiable/offer_price, -the_city.goods[r])\n\t\t\t\t\toffers[r].append((offer_price, list(city_dict[c].best_price[config['hops']][r][1]) + [the_city.id], amount))\n\t\t\n\t\t# Now to turn them into Buys\n\t\tfor r in needs:\n\t\t\tavailiable = the_city.wealth/len(needs)\n\t\t\tamount_needed = -the_city.goods[r]\n\t\t\t\n\t\t\twhile amount_needed > 0 and len(offers[r]) > 0:\n\t\t\t\tbest_offer = None\n\t\t\t\tbest_price = 99999\n\t\t\t\t\n\t\t\t\tfor i, o in enumerate(offers[r]):\n\t\t\t\t\tif o[0] < best_price:\n\t\t\t\t\t\tbest_offer = i\n\t\t\t\t\t\tbest_price = o[0]\n\t\t\t\t\n\t\t\t\taccepted_offer = offers[r][best_offer]\n\t\t\t\t\n\t\t\t\tdel(offers[r][best_offer])\n\t\t\t\t\n\t\t\t\t# print(\"\")\n\t\t\t\t# print(accepted_offer)\n\t\t\t\tcity_dict[accepted_offer[1][0]].sells.append((r, accepted_offer[1], accepted_offer[2]))\n\n# Asks the city what the best price it can get for a given supply is in a certain range\n# Best price is a tuple (price, city_pathway)\n# Cost: 9999 = can't get it\n# Pathway uses a stack system, build from supplier, read from buyer\ndef _discover(w, city_dict, the_city, hops_allowed=0):\n\t# If 0 hops then we're looking at suppliers\n\tif hops_allowed == 0:\n\t\tthe_city.best_price[0] = {}\n\t\tfor r in sad_rules.res_list:\n\t\t\tif the_city.goods[r] > 0:\n\t\t\t\tthe_city.best_price[0][r] = (sad_rules.base_rate[r], [the_city.id])\n\t\t\telse:\n\t\t\t\tthe_city.best_price[0][r] = (9999, [])\n\t\t\n\t# If we can hop then we check our neighbours for their best hop -1\n\telse:\n\t\t# Start by setting best price as what we can do with 1 fewer hop\n\t\tif hops_allowed in the_city.best_price:\n\t\t\ttot = sum([a[0] for r, a in the_city.best_price[hops_allowed].items()])\n\t\t\tif tot < 5:\n\t\t\t\tprint(tot)\n\t\tthe_city.best_price[hops_allowed] = {}\n\t\t\n\t\tfor r in sad_rules.res_list:\n\t\t\tthe_city.best_price[hops_allowed][r] = the_city.best_price[hops_allowed-1][r]\n\t\t\n\t\t# Now to try it with one more hop!\n\t\tfor c, d in the_city.connections.items():\n\t\t\tif the_city.id in city_dict[c].connections:\n\t\t\t\tfor r in sad_rules.res_list:\n\t\t\t\t\tif city_dict[c].best_price[hops_allowed-1][r][0] > 1000: continue\n\t\t\t\t\t\n\t\t\t\t\tbest_price = the_city.best_price[hops_allowed-1][r][0]\n\t\t\t\t\t\n\t\t\t\t\t# Price is made from price saved at other city times the get_price multiplier\n\t\t\t\t\tnew_price = get_price(w, the_city, city_dict[c]) * city_dict[c].best_price[hops_allowed-1][r][0]\n\t\t\t\t\t\n\t\t\t\t\tif new_price < best_price:\n\t\t\t\t\t\tthe_city.best_price[hops_allowed][r] = (new_price, list(city_dict[c].best_price[hops_allowed-1][r][1]) + [the_city.id])\n\nprice_cache = {}\ndef get_price(w, buyer, seller):\n\t\"\"\"\n\tPrice is always made by the code returning a price\n\tthus any price returned already has the multipliers worked out\n\t\"\"\"\n\t\n\t# Call count: 4360709 (that's with max_hops = 9)\n\t# No cache - 18.52, time in func: 4.7, approx time per city: 2.0s\n\t# With cache - 16.87, time in func: 3.4, approx time per city: 1.7s\n\t\n\tif (buyer.id, seller.id) not in price_cache:\n\t\tprice_cache[(buyer.id, seller.id)] = (w.distance_cache[(seller.id, buyer.id)]) * (1 + w.tax_cache[(seller.team, buyer.team)]/100)\n\treturn price_cache[(buyer.id, seller.id)]\n\ndef approve_buys(w, city_dict, verbose=True):\n\tteam_dict = w.teams()\n\t\n\tif verbose:\n\t\tit = cli_f.progressbar(city_dict.items(), \"Approving buys: \", 60, True)\n\telse:\n\t\tit = city_dict.items()\n\t\n\tsales_count = 0\n\tsales_lowered = 0\n\t\n\tfor city_id, the_city in it:\n\t\tif len(the_city.sells) == 0: continue\n\t\t\n\t\t# print(\"\\n\\n\")\n\t\t# print(the_city.sells)\n\t\t# exit()\n\t\t\n\t\taccepted_sells = []\n\t\tmarked_sells = []\n\t\t\n\t\t# We need to work out which sells we're gonna do\n\t\t# preferential treatment is given to those that we have a low tax rate to\n\t\t# it's possible we can honour more than one sell\n\t\t# the sells should all be for just a single supply\n\t\t\n\t\t# If the sells are not for a single supply it will be an inefficent system as only 1 favoured is tracked\n\t\t\n\t\t# Temporary counters\n\t\tgoods = sum([(a if a > 0 else 0) for r, a in the_city.goods.items()])\n\t\tgoods_dict = dict(the_city.goods)\n\t\t\n\t\t# Keep going while we can\n\t\ti = 0\n\t\twhile goods > 0 and len(accepted_sells) < len(the_city.sells):\n\t\t\ti += 1\n\t\t\tif i > 100:\n\t\t\t\tbreak\n\t\t\t\n\t\t\tfavoured = None\n\t\t\tfavoured_tax = 9999\n\t\t\t\n\t\t\tfor i, s in enumerate(the_city.sells):\n\t\t\t\tif i in marked_sells: continue\n\t\t\t\t\n\t\t\t\t# s[1] is the path\n\t\t\t\t# 0 is us\n\t\t\t\t# 1 is the first buyer\n\t\t\t\tbuyer = s[1][1]\n\t\t\t\t\n\t\t\t\tif w.tax_cache[(the_city.team, city_dict[buyer].team)] < favoured_tax:\n\t\t\t\t\tfavoured_i = i\n\t\t\t\t\tfavoured = s\n\t\t\t\t\tfavoured_tax = w.tax_cache[(the_city.team, city_dict[buyer].team)]\n\t\t\t\n\t\t\t# print(\"Fav: %s, %s\" % (favoured_tax, str(favoured)))\n\t\t\t\n\t\t\t# Actual amount needs to be limited\n\t\t\t# marked_sells.append(favoured)\n\t\t\tmarked_sells.append(favoured_i)\n\t\t\tfinal_buyer = favoured[1][-1]\n\t\t\tr = favoured[0]\n\t\t\t\n\t\t\tsales_count += 1\n\t\t\tif goods_dict[r] < favoured[2]:\n\t\t\t\tsales_lowered += 1\n\t\t\t\n\t\t\tamount = min(goods_dict[r], favoured[2])\n\t\t\t\n\t\t\t# print(amount, \" \")\n\t\t\t\n\t\t\t# Move counters, add amount to the sell\n\t\t\tfavoured = (favoured[0], favoured[1], amount)\n\t\t\taccepted_sells.append(favoured)\n\t\t\tgoods -= amount\n\t\t\tgoods_dict[r] -= amount\n\t\t\t\n\t\t\t# goods = -100\n\t\t\n\t\tthe_city.sells = accepted_sells\n\t\t# for s in the_city.sells:\n\t\t# \tprint(s)\n\t\t# exit()\n\t\n\tprint(\"Sale count: %s\" % sales_count)\n\tprint(\"Sales lowered: %s\" % sales_lowered)\n\ndef execute_buys(w, city_dict, verbose=True):\n\t# city_dict = w.live_cities()\n\t\n\tif verbose:\n\t\tit = cli_f.progressbar(city_dict.items(), \"Executing buys: \", 60, True)\n\telse:\n\t\tit = city_dict.items()\n\t\n\tfor city_id, the_city in it:\n\t\tfor s in the_city.sells:\n\t\t\t_execute(w, city_dict, s)\n\n\ndef _execute(w, city_dict, sell):\n\t\"\"\"\n\tif self.source != None:\n\t\tself.source.execute(root=False)\n\telse:\n\t\tself.seller.goods[self.resource] -= self.amount\n\t\n\tloss = self.price - (self.price / (_distance(self.buyer, self.seller)))\n\t\n\t# Loss is paid by buyer but never reaches the seller\n\tself.seller.wealth += self.price - loss\n\tself.buyer.wealth -= self.price\n\t\n\tif root:\n\t\tself.buyer.goods[self.resource] += self.amount\n\t\"\"\"\n\t\n\t# Aliases\n\tr = sell[0]\n\troute = sell[1]\n\tamount = sell[2]\n\t\n\t# Split up the route into city pairs\n\tpath = []\n\tfor i in range(1, len(route)):\n\t\tpath.append((route[i-1], route[i]))\n\t\n\t# For each path part, redistribute wealth\n\t# price_counter = sad_rules.base_rate[r]\n\tprevious_sale = sad_rules.base_rate[r] * amount\n\tfor seller, buyer in path:\n\t\t# price_cache[(buyer.id, seller.id)] = (w.distance_cache[(seller.id, buyer.id)]) * (1 + w.tax_cache[(seller.team, buyer.team)]/100)\n\t\t\n\t\t# Sell with distance and tax taken into account\n\t\tsale_price = previous_sale * price_cache[(buyer, seller)]\n\t\t\n\t\t# Seller gets price without the distance, that's lost\n\t\tprofit = sale_price/w.distance_cache[(seller, buyer)]\n\t\t\n\t\t# Apply wealth difference\n\t\tcity_dict[seller].wealth += profit\n\t\tcity_dict[buyer].wealth -= sale_price\n\t\t\n\t\tprevious_sale = sale_price\n\t\n\t# Apply difference to first buyer and seller's goods\n\tbuyer = route[-1]\n\tseller = route[0]\n\t\n\tcity_dict[buyer].goods[r] += amount\n\tcity_dict[seller].goods[r] -= amount\n\ndef supply_and_demand(the_world):\n\tcity_dict = the_world.live_cities()\n\tteam_dict = the_world.active_teams()\n\t\n\tthe_world.mass_get_team_techs()\n\tthe_world.mass_get_team_deities()\n\tthe_world.mass_get_team_evolutions()\n\t\n\t# Randomise city production type\n\trandom.seed()\n\tr = -1\n\tfor k, v in city_dict.items():\n\t\tr += 1\n\t\tif r >= len(sad_rules.res_list): r = 0\n\t\tv.supply_good = r\n\t\t# v.supply_good = random.randrange(0, len(sad_rules.res_list))\n\t\n\t# Build a cache of taxes and borders\n\tthe_world.tax_cache = {}\n\t# the_world.border_cache = {}\n\tfor t1 in team_dict.keys():\n\t\tfor t2 in team_dict.keys():\n\t\t\tif t1 == t2:\n\t\t\t\tthe_world.tax_cache[(t1, t2)] = 0\n\t\t\telse:\n\t\t\t\tthe_world.tax_cache[(t1, t2)] = the_world.get_taxes(t1, t2)\n\t\t\t\t# the_world.border_cache[(t1, t2)] = the_world.get_border(t1, t2)\n\t\n\t# Build a distance cache, K1 going to K2\n\tthe_world.distance_cache = {}\n\tfor k1, c1 in city_dict.items():\n\t\tfor k2, c2 in city_dict.items():\n\t\t\tif k1 == k2:\n\t\t\t\tthe_world.distance_cache[(k1, k2)] = 0\n\t\t\telse:\n\t\t\t\tif k2 in c1.connections:\n\t\t\t\t\tthe_world.distance_cache[(k1, k2)] = (1 + (sad_rules.distance_percentage * c1.connections[k2]/100))\n\t\n\t# Total, Min, Max\n\tproduction = {r:[0, 9999, -9999] for r in sad_rules.res_list}\n\tdemand = {r:[0, 9999, -9999] for r in sad_rules.res_list}\n\twealth = [0, 9999, -9999]\n\t\n\tcity_count = len(city_dict.keys())\n\tcity_size = sum([c.size for i, c in city_dict.items()])/city_count\n\t\n\t# First work out demand\n\tfor city_id, the_city in city_dict.items():\n\t\tsad_rules.produce_wealth(the_world, the_city)\n\t\twealth[0] += the_city.wealth\n\t\twealth[1] = min(wealth[1], the_city.wealth)\n\t\twealth[2] = max(wealth[2], the_city.wealth)\n\t\tthe_city.satisfied = False\n\t\t\n\t\tfor r in sad_rules.res_list:\n\t\t\td = sad_rules.demand[r](the_world, the_city, the_world._teams[the_city.team])\n\t\t\tdemand[r][0] += d\n\t\t\tdemand[r][1] = min(d, demand[r][1])\n\t\t\tdemand[r][2] = max(d, demand[r][2])\n\t\t\tthe_city.goods[r] = -d\n\t\t\n\t\tif the_city.goods[\"Linen\"] > the_city.goods[\"Wool\"]:\n\t\t\tthe_city.wool_is_nice = True\n\t\t\n\t\tcity_supply = sad_rules.res_list[the_city.supply_good]\n\t\t\n\t\tp = sad_rules.supply[city_supply](the_world, the_city, the_world._teams[the_city.team])\n\t\tproduction[city_supply][0] += p\n\t\tproduction[city_supply][1] = min(p, production[city_supply][1])\n\t\tproduction[city_supply][2] = max(p, production[city_supply][2])\n\t\tthe_city.goods[city_supply] += p\n\t\t# print(sad_rules.supply[\"Grain\"](the_city, the_world._teams[the_city.team].tech_levels), sad_rules.demand[\"Grain\"](the_city, the_world._teams[the_city.team].tech_levels))\n\t\n\tpre_report = generate_sad_pre_report(the_world)\n\t\n\t# Now begin to satisfy it\n\tsatisfied = False\n\tbatches = 0\n\twhile not satisfied:\n\t\tsad_batch(the_world)\n\t\t\n\t\tsatisfied = True\n\t\tfor city_id, the_city in city_dict.items():\n\t\t\tif not the_city.satisfied and the_city.wealth > 0:\n\t\t\t\tsatisfied = False\n\t\t\n\t\t# Force a breakout\n\t\tbatches += 1\n\t\tconfig['hops'] = min(config['hops'] + 1, config['max hops'])\n\t\tif batches > config['batches']:\n\t\t\tsatisfied = True\n\t\n\tpost_report = generate_sad_post_report(the_world)\n\t\n\t# print(pre_report['res_surplus']['Grain'])\n\t\n\treturn {\n\t\t\"pre\": pre_report,\n\t\t\"post\": post_report,\n\t\t\"production\": production,\n\t\t\"demand\": demand,\n\t\t\"wealth\": wealth,\n\t}\n\ndef print_reports(the_world, report_dict, reports=['res_summary', 'production', 'demand', 'wealth']):\n\tcity_dict = the_world.cities()\n\t\n\tcity_count = len(city_dict.keys())\n\tcity_size = sum([c.size for i, c in city_dict.items()])/city_count\n\t\n\tif 'res_summary' in reports:\n\t\tprint(print_res_dict(report_dict['pre']['res_summary'],\t\tthe_world, \"''Summary''\"), \"\\n\")\n\t\n\tif 'res_surplus' in reports:\n\t\tprint(print_res_dict(report_dict['pre']['res_surplus'],\t\tthe_world, \"''Surplus''\"), \"\\n\")\n\t\n\tif 'res_demand' in reports:\n\t\tprint(print_res_dict(report_dict['pre']['res_demand'],\t\tthe_world, \"''Demand''\"), \"\\n\")\n\t\n\tif 'res_producers' in reports:\n\t\tprint(print_res_dict(report_dict['pre']['res_producers'],\tthe_world, \"''Producers'' (out of %d)\" % city_count), \"\\n\")\n\t\n\tksize = city_size/1000\n\t\n\tif 'production' in reports:\n\t\tproduction = report_dict['production']\n\t\tprint(database.shell_text(\"''Production'' Min Max Avg Ratio\"))\n\t\tfor r in sad_rules.res_list:\n\t\t\tavg = production[r][0]/city_count\n\t\t\n\t\t\tprint(\"{res:17} {min:6} {max:6} {avg:6} {ratio:6}\".format(\n\t\t\t\tres = r,\n\t\t\t\ttotal = production[r][0],\n\t\t\t\tmin = round(production[r][1], 2),\n\t\t\t\tmax = round(production[r][2], 2),\n\t\t\t\tavg = round(avg, 2),\n\t\t\t\tratio = round(avg/ksize, 2),\n\t\t\t))\n\t\tprint(\"\")\n\t\n\tif 'demand' in reports:\n\t\tdemand = report_dict['demand']\n\t\t\n\t\tprint(database.shell_text(\"\\n''Demand'' Min Max Avg Ratio\"))\n\t\tfor r in sad_rules.res_list:\n\t\t\tavg = demand[r][0]/city_count\n\t\t\n\t\t\tprint(\"{res:17} {min:6} {max:6} {avg:6} {ratio:6}\".format(\n\t\t\t\tres = r,\n\t\t\t\ttotal = demand[r][0],\n\t\t\t\tmin = round(demand[r][1], 2),\n\t\t\t\tmax = round(demand[r][2], 2),\n\t\t\t\tavg = round(avg, 2),\n\t\t\t\tratio = round(avg/ksize, 2),\n\t\t\t))\n\t\n\tif 'wealth' in reports:\n\t\twealth = report_dict['wealth']\n\t\tavg = wealth[0]/city_count\n\t\t\n\t\tprint(database.shell_text(\"\\n''Wealth''\\n Min Max Avg Ratio\"))\n\t\tprint(\"{min:6} {max:6} {avg:6} {ratio:6}\".format(\n\t\t\ttotal = wealth[0],\n\t\t\tmin = round(wealth[1], 2),\n\t\t\tmax = round(wealth[2], 2),\n\t\t\tavg = round(avg, 2),\n\t\t\tratio = round(avg/ksize, 2),\n\t\t))\n\t\n\t# Attempt at ratios\n\tif 'ratios' in reports:\n\t\tprint(database.shell_text(\"\\n''Ratios''\"))\n\t\t# print(report_dict['pre'].keys())\n\t\tproduction = report_dict['production']\n\t\tdemand = report_dict['demand']\n\t\tfor r in sad_rules.res_list:\n\t\t\tprint(\"{res:17} {ratio:6}\".format(\n\t\t\t\tres = r,\n\t\t\t\tratio = round(production[r][0]/demand[r][0], 2),\n\t\t\t))\n\ndef print_res_dict(res_dict, the_world, title=\"\", one_line=False):\n\tcity_dict = the_world.live_cities()\n\toutput = []\n\t\n\tif title != \"\":\n\t\toutput.append(database.shell_text(title))\n\t\n\tfor r in sad_rules.res_list:\n\t\toutput.append(\"{0}: {1} avg: {2}\".format(r, res_dict[r], round(res_dict[r]/len(the_world.live_cities()), 2)))\n\t\n\treturn \"\\n\".join(output)\n\ndef generate_sad_pre_report(the_world):\n\t\"\"\"Runs through the cities of the world and finds out how many of them have XYZ\"\"\"\n\tcity_dict = the_world.live_cities()\n\t\n\tres_summary\t\t= {r:0 for r in sad_rules.res_list}\n\tres_surplus\t\t= {r:0 for r in sad_rules.res_list}\n\tres_demand\t\t= {r:0 for r in sad_rules.res_list}\n\tres_producers\t= {r:0 for r in sad_rules.res_list}\n\t\n\tfor city_id, the_city in city_dict.items():\n\t\tres_producers[sad_rules.res_list[the_city.supply_good]] += 1\n\t\tfor r in sad_rules.res_list:\n\t\t\tamount = the_city.goods[r]\n\t\t\t\n\t\t\tif amount > 0:\n\t\t\t\tres_surplus[r] += amount\n\t\t\telse:\n\t\t\t\tres_demand[r] -= amount\n\t\t\tres_summary[r] += amount\n\t\n\treturn {\n\t\t\"res_producers\":\tres_producers,\n\t\t\"res_summary\":\t\tres_summary,\n\t\t\"res_surplus\":\t\tres_surplus,\n\t\t\"res_demand\":\t\tres_demand,\n\t}\n\ndef generate_sad_post_report(the_world):\n\t\"\"\"Runs through the cities of the world and finds out how many of them have XYZ\"\"\"\n\tcity_dict = the_world.live_cities()\n\t\n\treturn {}\n\n","sub_path":"functions/sad_f copy 9-11.py","file_name":"sad_f copy 9-11.py","file_ext":"py","file_size_in_byte":17478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"591483406","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nfrom datetime import datetime\nimport discord\nimport os\nimport urllib.parse\n\nimport emojis\nimport excel\nimport pss_assert\nimport pss_core as core\nimport pss_fleet as fleet\nimport pss_login as login\nimport pss_lookups as lookups\nimport pss_tournament as tourney\nimport pss_user as user\nimport settings\nimport utility as util\n\n\n# ---------- Constants ----------\n\nFLEET_KEY_NAME = 'AllianceId'\nFLEET_DESCRIPTION_PROPERTY_NAME = 'AllianceName'\n\nFLEET_SHEET_COLUMN_NAMES = [\n 'Timestamp',\n 'Fleet',\n 'Player name',\n 'Rank',\n 'Last Login Date',\n 'Trophies',\n 'Stars',\n 'Join Date',\n 'Logged in ago',\n 'Joined ago'\n]\nFLEET_SHEET_COLUMN_TYPES = [\n settings.EXCEL_COLUMN_FORMAT_DATETIME,\n None,\n None,\n None,\n settings.EXCEL_COLUMN_FORMAT_DATETIME,\n settings.EXCEL_COLUMN_FORMAT_NUMBER,\n settings.EXCEL_COLUMN_FORMAT_NUMBER,\n settings.EXCEL_COLUMN_FORMAT_DATETIME,\n None,\n None\n]\n\n\n\n\n\n\n\n\n# ---------- Helper functions ----------\n\nasync def _get_fleet_details_by_info(fleet_info: dict, fleet_users_infos: dict) -> list:\n fleet_info = await _get_fleet_info_by_id(fleet_info[FLEET_KEY_NAME])\n\n division_design_id = fleet_info['DivisionDesignId']\n fleet_name = fleet_info[FLEET_DESCRIPTION_PROPERTY_NAME]\n fleet_description = fleet_info['AllianceDescription'].strip()\n member_count = int(fleet_info['NumberOfMembers'])\n min_trophy_required = fleet_info['MinTrophyRequired']\n ranking = util.get_ranking(fleet_info['Ranking'])\n requires_approval = fleet_info['RequiresApproval'].lower() == 'true'\n stars = int(fleet_info['Score'])\n trophies = sum([int(user_info['Trophy']) for user_info in fleet_users_infos.values()])\n\n if requires_approval:\n fleet_type = 'Private'\n else:\n fleet_type = 'Public'\n division = lookups.DIVISION_DESIGN_ID_TO_CHAR[division_design_id]\n\n lines = [f'**```{fleet_name}```**```']\n if fleet_description:\n lines.append(f'{fleet_description}``````')\n lines.append(f'Ranking - {ranking}')\n lines.append(f'Min trophies - {min_trophy_required}')\n lines.append(f'Members - {member_count}')\n lines.append(f'Trophies - {util.get_reduced_number_compact(trophies)}')\n if division != '-':\n lines.append(f'Division - {division}')\n lines.append(f'Stars - {util.get_reduced_number_compact(stars)}')\n lines.append(f'Type - {fleet_type}')\n\n lines[-1] += '```'\n\n return lines\n\n\nasync def _get_fleet_info_by_name(fleet_name: str, exact: bool = True):\n fleet_infos = await _get_fleet_infos_by_name(fleet_name)\n if exact:\n for fleet_info in fleet_infos.values():\n if fleet_info[FLEET_DESCRIPTION_PROPERTY_NAME] == fleet_name:\n return fleet_info\n if fleet_infos:\n return fleet_infos[0]\n else:\n return None\n\n\nasync def _get_fleet_info_from_tournament_data(fleet_info: dict, fleet_users_infos: dict, fleet_data: dict) -> list:\n fleet_id = fleet_info[FLEET_KEY_NAME]\n if fleet_id in fleet_data.keys():\n fleet_info['Score'] = fleet_data[fleet_id]['Score']\n return await _get_fleet_details_by_info(fleet_info, fleet_users_infos)\n\n\ndef _get_fleet_sheet_lines(fleet_users_infos: dict, retrieval_date: datetime, fleet_name: str = None) -> list:\n result = [FLEET_SHEET_COLUMN_NAMES]\n\n for user_info in fleet_users_infos.values():\n logged_in_ago = retrieval_date - util.parse_pss_datetime(user_info['LastLoginDate'])\n joined_ago = retrieval_date - util.parse_pss_datetime(user_info['AllianceJoinDate'])\n if fleet_name is None and FLEET_DESCRIPTION_PROPERTY_NAME in user_info.keys():\n fleet_name = user_info[FLEET_DESCRIPTION_PROPERTY_NAME]\n line = [\n util.format_excel_datetime(retrieval_date),\n fleet_name,\n user_info[user.USER_DESCRIPTION_PROPERTY_NAME],\n user_info['AllianceMembership'],\n util.convert_pss_timestamp_to_excel(user_info['LastLoginDate']),\n int(user_info['Trophy']),\n int(user_info['AllianceScore']),\n util.convert_pss_timestamp_to_excel(user_info['AllianceJoinDate']),\n util.get_formatted_timedelta(logged_in_ago, include_relative_indicator=False),\n util.get_formatted_timedelta(joined_ago, include_relative_indicator=False)\n ]\n result.append(line)\n return result\n\n\nasync def get_full_fleet_info_as_text(fleet_info: dict, fleet_data: dict = None, user_data: dict = None, data_date: datetime = None) -> (list, list):\n \"\"\"Returns a list of lines for the post, as well as the path to the spreadsheet created\"\"\"\n fleet_name = fleet_info[FLEET_DESCRIPTION_PROPERTY_NAME]\n fleet_id = fleet_info[FLEET_KEY_NAME]\n retrieval_date = util.get_utcnow()\n fleet_users_infos = await _get_fleet_users_by_id(fleet_id)\n if fleet_users_infos:\n fleet_info = list(fleet_users_infos.values())[0]['Alliance']\n else:\n fleet_info = await _get_fleet_info_by_name(fleet_name)\n\n post_content = await _get_fleet_details_by_info(fleet_info, fleet_users_infos)\n fleet_sheet_contents = _get_fleet_sheet_lines(fleet_users_infos, retrieval_date)\n fleet_sheet_path_current = excel.create_xl_from_data(fleet_sheet_contents, fleet_name, retrieval_date, FLEET_SHEET_COLUMN_TYPES)\n file_paths = [fleet_sheet_path_current]\n\n if fleet_data and fleet_id in fleet_data.keys() and user_data and data_date:\n fleet_info = fleet_data[fleet_id]\n fleet_name = fleet_info[fleet.FLEET_DESCRIPTION_PROPERTY_NAME]\n fleet_users_infos = dict({user_id: user_info for user_id, user_info in user_data.items() if user_info['AllianceId'] == fleet_id})\n fleet_sheet_contents = _get_fleet_sheet_lines(fleet_users_infos, data_date, fleet_name)\n file_name = f'{fleet_name}_tournament-{data_date.year}-{util.get_month_short_name(data_date).lower()}.xlsx'\n fleet_sheet_path_past = excel.create_xl_from_data(fleet_sheet_contents, fleet_name, data_date, FLEET_SHEET_COLUMN_TYPES, file_name=file_name)\n file_paths.append(fleet_sheet_path_past)\n\n return post_content, file_paths\n\n\nasync def _get_search_fleets_base_path(fleet_name: str) -> str:\n access_token = await login.DEVICES.get_access_token()\n result = f'AllianceService/SearchAlliances?accessToken={access_token}&skip=0&take=100&name={util.url_escape(fleet_name)}'\n return result\n\n\nasync def _get_get_alliance_base_path(fleet_id: str) -> str:\n access_token = await login.DEVICES.get_access_token()\n result = f'AllianceService/GetAlliance?accessToken={access_token}&allianceId={fleet_id}'\n return result\n\n\nasync def _get_search_fleet_users_base_path(fleet_id: str) -> str:\n access_token = await login.DEVICES.get_access_token()\n result = f'AllianceService/ListUsers?accessToken={access_token}&skip=0&take=100&allianceId={fleet_id}'\n return result\n\n\n\n\n\n\n\n\n\n\n# ---------- Alliance info ----------\n\nasync def get_fleet_details_by_name(fleet_name: str, as_embed: bool = settings.USE_EMBEDS) -> list:\n pss_assert.valid_parameter_value(fleet_name, 'fleet_name', min_length=0)\n\n fleet_infos = list((await _get_fleet_infos_by_name(fleet_name)).values())\n return fleet_infos\n\n\ndef get_fleet_search_details(fleet_info: dict) -> str:\n fleet_name = fleet_info[FLEET_DESCRIPTION_PROPERTY_NAME]\n fleet_trophies = fleet_info['Trophy']\n fleet_stars = fleet_info['Score']\n fleet_division = int(fleet_info['DivisionDesignId'])\n trophies = f' {emojis.trophy} {fleet_trophies}'\n if fleet_division > 0:\n stars = f' {emojis.star} {fleet_stars}'\n else:\n stars = ''\n result = f'{fleet_name}{trophies}{stars}'\n return result\n\n\nasync def _get_fleet_info_by_id(fleet_id: str) -> dict:\n path = await _get_get_alliance_base_path(fleet_id)\n fleet_data_raw = await core.get_data_from_path(path)\n fleet_info = core.xmltree_to_dict3(fleet_data_raw)\n return fleet_info\n\n\nasync def _get_fleet_infos_by_name(fleet_name: str) -> dict:\n path = await _get_search_fleets_base_path(fleet_name)\n fleet_data_raw = await core.get_data_from_path(path)\n fleet_infos = core.xmltree_to_dict3(fleet_data_raw)\n return fleet_infos\n\n\nasync def _get_fleet_users_by_id(alliance_id: str) -> dict:\n path = await _get_search_fleet_users_base_path(alliance_id)\n fleet_users_data_raw = await core.get_data_from_path(path)\n fleet_users_infos = core.xmltree_to_dict3(fleet_users_data_raw)\n return fleet_users_infos\n\n\nasync def get_fleet_users_by_info(fleet_info: dict) -> dict:\n fleet_id = fleet_info[FLEET_KEY_NAME]\n return await _get_fleet_users_by_id(fleet_id)\n\n\n\n\n\n\n\n\n\n\n# ---------- Stars ----------\n\ndef get_fleet_users_stars_from_info(fleet_info: dict, fleet_users_infos: dict, retrieved_date: datetime = None) -> list:\n fleet_name = fleet_info[FLEET_DESCRIPTION_PROPERTY_NAME]\n division = lookups.DIVISION_DESIGN_ID_TO_CHAR[fleet_info['DivisionDesignId']]\n\n fleet_users_infos = util.sort_entities_by(list(fleet_users_infos.values()), [('AllianceScore', int, True), (user.USER_KEY_NAME, int, False)])\n fleet_users_infos_count = len(fleet_users_infos)\n\n lines = [f'**{fleet_name} member stars (division {division})**']\n for i, user_info in enumerate(fleet_users_infos, 1):\n stars = user_info['AllianceScore']\n user_name = util.escape_markdown(user_info[user.USER_DESCRIPTION_PROPERTY_NAME])\n if i < fleet_users_infos_count:\n difference = int(user_info['AllianceScore']) - int(fleet_users_infos[i]['AllianceScore'])\n else:\n difference = 0\n lines.append(f'**{i}.** {stars} (+{difference}) {emojis.star} {user_name}')\n\n if retrieved_date is not None:\n lines.append(util.get_historic_data_note(retrieved_date))\n\n return lines\n\n\ndef get_fleet_users_stars_from_tournament_data(fleet_info: dict, fleet_data: dict, user_data: dict, retrieved_date: datetime) -> list:\n fleet_id = fleet_info[FLEET_KEY_NAME]\n fleet_users_infos = {}\n if fleet_id in fleet_data.keys():\n fleet_info['DivisionDesignId'] = fleet_data[fleet_id]['DivisionDesignId']\n fleet_users_infos = dict({user_info[user.USER_KEY_NAME]: user_info for user_info in user_data.values() if user_info[FLEET_KEY_NAME] == fleet_id})\n return get_fleet_users_stars_from_info(fleet_info, fleet_users_infos, retrieved_date=retrieved_date)\n\n\n\n\n\n\n\n\n\n\n\n\n# ---------- Testing ----------\n\n#if __name__ == '__main__':\n# test_fleets = ['Fallen An']\n# for fleet_name in test_fleets:\n# os.system('clear')\n# is_tourney_running = tourney.is_tourney_running()\n# fleet_infos = await get_fleet_details_by_name(fleet_name)\n# lines = [get_fleet_search_details(fleet_info) for fleet_info in fleet_infos]\n# for line in lines:\n# print(line)\n","sub_path":"src/pss_fleet.py","file_name":"pss_fleet.py","file_ext":"py","file_size_in_byte":10837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"386942418","text":"print('Intorduceti un cuvant palindrom')\ncheck_word = input()\n\n\nwhile True:\n letters_list = list(check_word)\n #print(letters_list)\n reverse_letters = letters_list[::-1]\n palindrom = '' .join(reverse_letters)\n #print(palindrom)\n if check_word == palindrom:\n print(check_word, 'este un palindrom')\n break\n else:\n print(check_word, 'nu este un palindrom')\n print('Adauga alt cuvant')\n check_word = input()\n\n\n\n","sub_path":"polindrom.py","file_name":"polindrom.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"286785825","text":"# Import standard libraries.\nimport math\nimport tempfile\nimport warnings\nimport numbers\n\n# Import external libraries.\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom matplotlib.patches import Patch\nimport seaborn as sns\nimport skbio as sb\nfrom skbio.stats.ordination import OrdinationResults\nfrom scipy import stats\nfrom scipy.spatial.distance import euclidean\nfrom skbio.stats.composition import clr\n\n# Import QIIME 2 libraries.\nimport qiime2\nfrom qiime2 import Artifact\nfrom qiime2 import Metadata\nfrom qiime2 import Visualization\nimport dokdo\n\ndef taxa_cols(df):\n \"\"\"Returns metadata columns from DataFrame object.\"\"\"\n cols = []\n for col in df.columns:\n if 'Unassigned' in col:\n cols.append(col)\n elif '__' in col:\n cols.append(col)\n else:\n continue\n return cols\n\ndef _get_mf_cols(df):\n \"\"\"Returns metadata columns from DataFrame object.\"\"\"\n cols = []\n for column in df.columns:\n if 'Unassigned' in column:\n continue\n elif '__' in column:\n continue\n else:\n cols.append(column)\n return cols\n\ndef _filter_samples(df, mf, exclude_samples, include_samples):\n \"\"\"Returns DataFrame objects after sample filtering.\"\"\"\n if exclude_samples and include_samples:\n m = (\"Cannot use 'exclude_samples' and \"\n \"'include_samples' arguments together\")\n raise ValueError(m)\n elif exclude_samples:\n for x in exclude_samples:\n for y in exclude_samples[x]:\n i = mf[x] != y\n df = df.loc[i]\n mf = mf.loc[i]\n elif include_samples:\n for x in include_samples:\n i = mf[x].isin(include_samples[x])\n df = df.loc[i]\n mf = mf.loc[i]\n else:\n pass\n return (df, mf)\n\ndef _sort_by_mean(df):\n \"\"\"Returns DataFrame object after sorting taxa by mean relative abundance.\"\"\"\n a = df.div(df.sum(axis=1), axis=0)\n a = a.loc[:, a.mean().sort_values(ascending=False).index]\n return df[a.columns]\n\ndef _pretty_taxa(s):\n \"\"\"Returns pretty taxa name.\"\"\"\n if isinstance(s, matplotlib.text.Text):\n s = s.get_text()\n ranks = list(reversed(s.split(';')))\n\n for i, rank in enumerate(ranks):\n if rank in ['Others', 'Unassigned']:\n return rank\n\n if rank == '__':\n continue\n\n if rank.split('__')[1] is '':\n continue\n\n if 'uncultured' in rank:\n continue\n\n # The species name can be sometimes tricky to parse because it could\n # be full (e.g. Helicobacter pylori) or partial (e.g. pylori). In the\n # latter case, I will borrow the genus name (e.g. Helicobacter) to\n # form the full species name.\n if 's__' in rank:\n rank = rank.split('__')[1]\n\n if len(rank.split('_')) == 1:\n genus = ranks[i+1].split('__')[1].split('_')[0]\n species = rank.split('_')[0]\n rank = f'{genus} {species}'\n else:\n rank = rank.replace('_', ' ')\n\n if '__' in rank:\n rank = rank.split('__')[1]\n\n return rank\n\ndef _artist(\n ax, title=None, title_fontsize=None, xlabel=None, xlabel_fontsize=None,\n ylabel=None, ylabel_fontsize=None, zlabel=None, zlabel_fontsize=None,\n xticks=None, yticks=None, xticklabels=None, xticklabels_fontsize=None,\n yticklabels=None, yticklabels_fontsize=None, xrot=None, xha=None,\n xmin=None, xmax=None, ymin=None, ymax=None, xlog=False, ylog=False,\n hide_xtexts=False, hide_ytexts=False, hide_ztexts=False,\n hide_xlabel=False, hide_ylabel=False, hide_zlabel=False,\n hide_xticks=False, hide_yticks=False, hide_zticks=False,\n hide_xticklabels=False, hide_yticklabels=False, hide_zticklabels=False,\n show_legend=False, legend_loc='best', legend_ncol=1,\n legend_labels=None, remove_duplicates=False, legend_only=False,\n legend_fontsize=None, legend_markerscale=None, legend_lw=None,\n legend_title=None, plot_method=None\n):\n \"\"\"\n This method controls various properties of a figure.\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object to draw the plot onto.\n title : str, optional\n Sets the figure title.\n title_fontsize : float or str, optional\n Sets the title font size.\n xlabel : str, optional\n Set the x-axis label.\n xlabel_fontsize : float or str, optional\n Sets the x-axis label font size.\n ylabel : str, optional\n Set the y-axis label.\n ylabel_fontsize : float or str, optional\n Sets the y-axis label font size.\n zlabel : str, optional\n Set the z-axis label.\n zlabel_fontsize : float or str, optional\n Sets the z-axis label font size.\n xticks : list, optional\n Positions of x-axis ticks.\n yticks : list, optional\n Positions of y-axis ticks.\n xticklabels : list, optional\n Tick labels for the x-axis.\n xticklabels_fontsize : float or str, optional\n Font size for the x-axis tick labels.\n yticklabels : list, optional\n Tick labels for the y-axis.\n yticklabels_fontsize : float or str, optional\n Font size for the y-axis tick labels.\n xrot : float, optional\n Rotation degree of tick labels for the x-axis.\n xha : str, optional\n Horizontal alignment of tick labels for the x-axis.\n xmin : float, optional\n Minimum value for the x-axis.\n xmax : float, optional\n Maximum value for the x-axis.\n ymin : float, optional\n Minimum value for the y-axis.\n ymax : float, optional\n Maximum value for the x-axis.\n xlog : bool, default: False\n Draw the x-axis in log scale.\n ylog : bool, default: False\n Draw the y-axis in log scale.\n hide_xtexts : bool, default: False\n Hides all the x-axis texts.\n hide_ytexts : bool, default: False\n Hides all the y-axis texts.\n hide_ztexts : bool, default: False\n Hides all the z-axis texts.\n hide_xlabel : bool, default: False\n Hides the x-axis label.\n hide_ylabel : bool, default: False\n Hides the y-axis label.\n hide_zlabel : bool, default: False\n Hides the z-axis label.\n hide_xticks : bool, default: False\n Hides ticks and tick labels for the x-axis.\n hide_yticks : bool, default: False\n Hides ticks and tick labels for the y-axis.\n hide_zticks : bool, default: False\n Hides ticks and tick labels for the z-axis.\n hide_xticklabels : bool, default: False\n Hides tick labels for the x-axis.\n hide_yticklabels : bool, default: False\n Hides tick labels for the y-axis.\n hide_zticklabels : bool, default: False\n Hides tick labels for the z-axis.\n show_legend : bool, default: False\n Show the figure legend.\n legend_loc : str, default: 'best'\n Legend location specified as in matplotlib.pyplot.legend.\n legend_ncol : int, default: 1\n Number of columns that the legend has.\n legend_only : bool, default: False\n Clear the figure and display the legend only.\n legend_fontsize : float or str, optional\n Sets the legend font size.\n legend_markerscale : float, optional\n Relative size of legend markers compared with the original.\n legend_lw : float, optional\n Width of the lines in the legend.\n legend_title: str, optional\n Legend title.\n plot_method : str, optional\n Name of the plotting method. This argument is internally used for\n the `alpha_rarefaction_plot` method. Not to be used by users.\n\n Returns\n -------\n matplotlib.axes.Axes\n Axes object with the plot drawn onto it.\n\n Notes\n -----\n Font size can be specified by provding a number or a string as defined in:\n {'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'}.\n \"\"\"\n if isinstance(title, str):\n ax.set_title(title, fontsize=title_fontsize)\n\n if isinstance(xlabel, str):\n ax.set_xlabel(xlabel, fontsize=xlabel_fontsize)\n\n if isinstance(ylabel, str):\n ax.set_ylabel(ylabel, fontsize=ylabel_fontsize)\n\n if isinstance(zlabel, str):\n ax.set_zlabel(zlabel, fontsize=zlabel_fontsize)\n\n if isinstance(xticks, list):\n ax.set_xticks(xticks)\n\n if isinstance(yticks, list):\n ax.set_yticks(yticks)\n\n if isinstance(xticklabels, list):\n a = len(ax.get_xticklabels())\n b = len(xticklabels)\n if a != b:\n raise ValueError(f\"Expected {a} items, but found {b}\")\n ax.set_xticklabels(xticklabels)\n\n if xticklabels_fontsize is not None:\n ax.tick_params(axis='x', which='major', labelsize=xticklabels_fontsize)\n\n if isinstance(yticklabels, list):\n a = len(ax.get_yticklabels())\n b = len(yticklabels)\n if a != b:\n raise ValueError(f\"Expected {a} items, but found {b}\")\n ax.set_yticklabels(yticklabels)\n\n if yticklabels_fontsize is not None:\n ax.tick_params(axis='y', which='major', labelsize=yticklabels_fontsize)\n\n if isinstance(xrot, numbers.Number):\n ax.set_xticklabels(ax.get_xticklabels(), rotation=xrot)\n\n if isinstance(xha, str):\n ax.set_xticklabels(ax.get_xticklabels(), ha=xha)\n\n ax.set_xlim(left=xmin, right=xmax)\n ax.set_ylim(bottom=ymin, top=ymax)\n\n if xlog:\n ax.set_xscale('log')\n\n if ylog:\n ax.set_yscale('log')\n\n if hide_xtexts:\n ax.set_xlabel('')\n ax.set_xticklabels([])\n\n if hide_ytexts:\n ax.set_ylabel('')\n ax.set_yticklabels([])\n\n if hide_ztexts:\n ax.set_zlabel('')\n ax.set_zticklabels([])\n\n if hide_xlabel:\n ax.set_xlabel('')\n\n if hide_ylabel:\n ax.set_ylabel('')\n\n if hide_zlabel:\n ax.set_zlabel('')\n\n if hide_xticks:\n ax.set_xticks([])\n\n if hide_yticks:\n ax.set_yticks([])\n\n if hide_zticks:\n ax.set_zticks([])\n\n if hide_xticklabels:\n ax.set_xticklabels([])\n\n if hide_yticklabels:\n ax.set_yticklabels([])\n\n # Control the figure legend.\n h, l = ax.get_legend_handles_labels()\n\n if legend_labels:\n a = len(legend_labels)\n b = len(l)\n if a != b:\n m = f\"Expected {b} legend labels, received {a}\"\n raise ValueError(m)\n l = legend_labels\n\n if remove_duplicates:\n if h:\n n = int(len(h) / 2)\n h, l = h[:n], l[:n]\n\n def _display_legend():\n leg = ax.legend(h, l, loc=legend_loc, ncol=legend_ncol,\n fontsize=legend_fontsize, markerscale=legend_markerscale,\n title=legend_title, title_fontsize=legend_fontsize)\n\n if plot_method == 'alpha_rarefaction_plot':\n i = 1\n else:\n i = 0\n\n if legend_lw is not None:\n for lh in leg.legendHandles[i:]:\n lh.set_linewidth(legend_lw)\n\n if legend_only:\n # The order matters.\n ax.clear()\n _display_legend()\n ax.axis('off')\n elif show_legend:\n if h:\n _display_legend()\n else:\n warnings.warn(\"No handles with labels found to put in legend.\")\n else:\n if ax.get_legend():\n ax.get_legend().remove()\n else:\n pass\n\n return ax\n\ndef _get_others_col(df, count, taxa_names, show_others):\n \"\"\"Returns DataFrame object after selecting taxa.\"\"\"\n if count is not 0 and taxa_names is not None:\n m = \"Cannot use 'count' and 'taxa_names' arguments together\"\n raise ValueError(m)\n elif count is not 0:\n if count < df.shape[1]:\n others = df.iloc[:, count-1:].sum(axis=1)\n df = df.iloc[:, :count-1]\n if show_others:\n df = df.assign(Others=others)\n else:\n pass\n elif taxa_names is not None:\n others = df.drop(columns=taxa_names).sum(axis=1)\n df = df[taxa_names]\n if show_others:\n df = df.assign(Others=others)\n else:\n pass\n\n return df\n\ndef _parse_input(input, temp_dir):\n \"\"\"Parse the input QIIME 2 object and export the files.\"\"\"\n if isinstance(input, qiime2.Artifact):\n fn = f'{temp_dir}/dokdo-temporary.qza'\n input.save(fn)\n input = fn\n Artifact.load(input).export_data(temp_dir)\n elif isinstance(input, qiime2.Visualization):\n fn = f'{temp_dir}/dokdo-temporary.qzv'\n input.save(fn)\n input = fn\n Visualization.load(input).export_data(temp_dir)\n elif isinstance(input, str) and input.endswith('.qza'):\n Artifact.load(input).export_data(temp_dir)\n elif isinstance(input, str) and input.endswith('.qzv'):\n Visualization.load(input).export_data(temp_dir)\n else:\n pass\n\ndef export(input, temp_dir):\n \"\"\"\n Export QIIME 2 data as files to a temporary directory.\n\n This method will automatically detect the type of the input file or\n object from QIIME 2 and then export the underlying data as files to the\n specified temporary directory.\n\n Parameters\n ----------\n input : str, qiime2.Artifact, or qiime2.Visualization\n Path to the input file. Or QIIME 2 object.\n temp_dir : str\n Path to the temporary directory.\n \"\"\"\n if isinstance(input, qiime2.Artifact):\n fn = f'{temp_dir}/temp.qza'\n input.save(fn)\n input = fn\n Artifact.load(input).export_data(temp_dir)\n elif isinstance(input, qiime2.Visualization):\n fn = f'{temp_dir}/temp.qzv'\n input.save(fn)\n input = fn\n Visualization.load(input).export_data(temp_dir)\n elif isinstance(input, str) and input.endswith('.qza'):\n Artifact.load(input).export_data(temp_dir)\n elif isinstance(input, str) and input.endswith('.qzv'):\n Visualization.load(input).export_data(temp_dir)\n else:\n raise TypeError(f'Incorrect input type detected: {type(input)}.')\n","sub_path":"dokdo/api/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":14018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"548392493","text":"import platform\n\nfrom conan.tools.env import Environment\nfrom conans.client.graph.graph import CONTEXT_HOST\n\n\nclass VirtualEnv:\n \"\"\" captures the conanfile environment that is defined from its\n dependencies, and also from profiles\n \"\"\"\n\n def __init__(self, conanfile):\n self._conanfile = conanfile\n self._conanfile.virtualenv = False\n\n def build_environment(self):\n \"\"\" collects the buildtime information from dependencies. This is the typical use case\n of build_requires defining information for consumers\n \"\"\"\n build_env = Environment()\n # First visit the direct build_requires\n for build_require in self._conanfile.dependencies.build_requires:\n # Lower priority, the runenv of all transitive \"requires\" of the build requires\n for require in build_require.dependencies.requires:\n build_env.compose(self._collect_transitive_runenv(require))\n # Second, the implicit self information in build_require.cpp_info\n build_env.compose(self._runenv_from_cpp_info(build_require.cpp_info))\n # Finally, higher priority, explicit buildenv_info\n if build_require.buildenv_info:\n build_env.compose(build_require.buildenv_info)\n\n # Requires in host context can also bring some direct buildenv_info\n def _collect_transitive_buildenv(d):\n r = Environment()\n for child in d.dependencies.requires:\n r.compose(_collect_transitive_buildenv(child))\n # Then the explicit self\n if d.buildenv_info:\n r.compose(d.buildenv_info)\n return r\n for require in self._conanfile.dependencies.requires:\n build_env.compose(_collect_transitive_buildenv(require))\n\n # The profile environment has precedence, applied last\n profile_env = self._conanfile.buildenv\n build_env.compose(profile_env)\n return build_env\n\n @staticmethod\n def _runenv_from_cpp_info(cpp_info):\n \"\"\" return an Environment deducing the runtime information from a cpp_info\n \"\"\"\n dyn_runenv = Environment()\n if cpp_info is None: # This happens when the dependency is a private one = BINARY_SKIP\n return dyn_runenv\n if cpp_info.bin_paths: # cpp_info.exes is not defined yet\n dyn_runenv.prepend_path(\"PATH\", cpp_info.bin_paths)\n # If it is a build_require this will be the build-os, otherwise it will be the host-os\n if cpp_info.lib_paths:\n dyn_runenv.prepend_path(\"LD_LIBRARY_PATH\", cpp_info.lib_paths)\n dyn_runenv.prepend_path(\"DYLD_LIBRARY_PATH\", cpp_info.lib_paths)\n if cpp_info.framework_paths:\n dyn_runenv.prepend_path(\"DYLD_FRAMEWORK_PATH\", cpp_info.framework_paths)\n return dyn_runenv\n\n def _collect_transitive_runenv(self, d):\n r = Environment()\n for child in d.dependencies.requires:\n r.compose(self._collect_transitive_runenv(child))\n # Apply \"d\" runenv, first the implicit\n r.compose(self._runenv_from_cpp_info(d.cpp_info))\n # Then the explicit\n if d.runenv_info:\n r.compose(d.runenv_info)\n return r\n\n def run_environment(self):\n \"\"\" collects the runtime information from dependencies. For normal libraries should be\n very occasional\n \"\"\"\n runenv = Environment()\n # At the moment we are adding \"test-requires\" (build_requires in host context)\n # to the \"runenv\", but this will be investigated\n for build_require in self._conanfile.dependencies.build_requires:\n if build_require.context == CONTEXT_HOST:\n runenv.compose(self._collect_transitive_runenv(build_require))\n for require in self._conanfile.dependencies.requires:\n runenv.compose(self._collect_transitive_runenv(require))\n\n # FIXME: Missing profile info\n result = runenv\n return result\n\n def generate(self):\n build_env = self.build_environment()\n run_env = self.run_environment()\n # FIXME: Use settings, not platform Not always defined :(\n # os_ = self._conanfile.settings_build.get_safe(\"os\")\n if build_env: # Only if there is something defined\n if platform.system() == \"Windows\":\n build_env.save_bat(\"conanbuildenv.bat\")\n else:\n build_env.save_sh(\"conanbuildenv.sh\")\n if run_env:\n if platform.system() == \"Windows\":\n run_env.save_bat(\"conanrunenv.bat\")\n else:\n run_env.save_sh(\"conanrunenv.sh\")\n","sub_path":"conan/tools/env/virtualenv.py","file_name":"virtualenv.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"74816149","text":"#!/usr/bin/env python3.2\n\n# Tentative features for /r/singapore\n# monday: eat what\n# wednesday: buy what\n# friday: do what\n\n# to be ran with cron, since it's only needed on specific days of the week\n# to use: ./weeklysubmissions.py username password \n\nimport praw\nimport sys\nimport datetime\n\ndisclaimer = \"\"\" \n---\n^(this is the resident bot moderator of /r/singapore. if you have a feedback or\nan error report, please message /u/infinitus_ about it.)\n\"\"\"\n\nsubmissions = { \n'Monday': ('Eat what?', \"\"\"\nNo idea what to eat this week? Bored of your daily lunch routine? New cafe/restaurant/hawker discovery? Post them here.\n\nWhen posting, please:\n\n* Be nice to one another\n* Agree to disagree \n\nPhotographs are encouraged. Let the salivating begin. \n\"\"\" + disclaimer),\n\n'Wednesday': ('Buy what?', \"\"\"\nWhat else is there to do in Singapore other than eating? Shopping, of course.\n\nThis thread is for sharing the deals that you've found, and purchases you've made recently. Anything goes: Clothes, watches, cars, holiday packages, pets, video games, a new house. Online shopping included.\n\nWhen posting, please:\n\n* Be nice to one another\n* Agree to disagree \n\n\n\"\"\" + disclaimer),\n\n'Friday' : ('Go where?', \"\"\" \nThe weekends are here! Where are you going this weekend? What events are happening around this island that people should know about? Post them here. \n\nWhen posting, please:\n\n* Be nice to one another\n* Agree to disagree \n\n\n\"\"\" + disclaimer)\n}\n\n# Gets the day in the word form (Monday, Tuesday..)\ndef get_current_day_of_the_week():\n dt = datetime.datetime.now()\n return dt.strftime(\"%A\")\n\n# Gets the date in the format: [Day Month]\ndef get_date_tag():\n dt = datetime.datetime.now()\n day = dt.day\n month = dt.strftime(\"%B\")\n return \"[\" +str(numeral_to_ordinal(day)) + \" \" + month + \"]\"\n\n# Converts number (1, 2, 3..) to their ordinal form (1st, 2nd, 3rd..)\ndef numeral_to_ordinal(num): \n suffix = \"\"\n if (num == 11 or num == 12 or num == 13):\n suffix = \"th\"\n elif (num % 10 == 1): \n suffix = \"st\"\n elif (num % 10 == 2):\n suffix = \"nd\"\n elif (num % 10 == 3):\n suffix = \"rd\"\n else:\n suffix = \"th\"\n\n return str(num) + suffix \n\n# Posts the thread with predefined texts, depending on the day this method was\n# called on\ndef post_thread(r):\n today = get_current_day_of_the_week()\n\n TITLE = get_date_tag() + \" \" + submissions[today][0]\n TEXT = submissions[today][1]\n SUBREDDIT = \"\" # the name only: \"subredditname\", not \"/r/subredditname\"\n \n r.submit(SUBREDDIT, TITLE, text=TEXT)\n\ndef main():\n # get username and password from the command line arguments\n user = sys.argv[1]\n pwd = sys.argv[2]\n\n r = praw.Reddit(user_agent='jrb')\n r.login(user, pwd)\n\n post_thread(r)\n\nif __name__ == '__main__':\n main()\n","sub_path":"weeklysubmissions.py","file_name":"weeklysubmissions.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"415953466","text":"from src.custom_logger import Logger\nimport re\nimport json\n\nwith open('config/training_config.json') as f:\n config = json.loads(f.read())\n\nNUMBER_OF_COLUMNS = config['numberOfColumns']\nCOLUMN_NAMES = [name for name in config['columns']]\nCOLUMN_DATA_TYPES = config['columns']\nDATA_TYPE_DICT = {'object': object, 'float': float, \"int\": int}\n\nlogger = Logger()\nlog_type = 'file_validation'\n\n\nclass Validator:\n\n @staticmethod\n def validate_file_name(filename):\n \"\"\"\n Perform Filename and filetype Validation\n :param filename: Exact Name of the file with extension\n :return: True if Filename and type are valid, else raise Exception\n \"\"\"\n pattern = r'^wafer_[0-9]{8}_[0-9]{6}\\.csv$'\n try:\n if not re.match(pattern, str(filename).lower()):\n raise Exception('TRAINING_VALIDATION : Invalid Filename/Filetype' + ':' + str(filename))\n else:\n return True\n except Exception as e:\n logger.log_file_validation(str(e))\n return False\n\n @staticmethod\n def validate_number_of_columns(df, filename):\n \"\"\"\n Validate the number of columns in the dataset\n :param df: Pandas Dataframe\n :return: True if valid, else Raise Exception\n \"\"\"\n try:\n if not len(df.columns) == NUMBER_OF_COLUMNS:\n raise Exception('TRAINING_VALIDATION : Invalid Number of Columns in file : ' + str(filename))\n else:\n return True\n except Exception as e:\n logger.log_file_validation(str(e))\n return False\n\n @staticmethod\n def validate_name_of_columns(df, filename):\n columns = df.columns\n try:\n if columns.tolist() == COLUMN_NAMES:\n return True\n else:\n raise Exception('TRAINING_VALIDATION : Invalid Columns Names in file : ' + str(filename))\n except Exception as e:\n logger.log_file_validation(str(e))\n return False\n\n @staticmethod\n def validate_column_data_type(df, filename):\n columns = df.columns\n try:\n col_data_types = df.dtypes\n for index in col_data_types.index:\n if col_data_types[index] != DATA_TYPE_DICT[COLUMN_DATA_TYPES[index]]:\n raise Exception('TRAINING_VALIDATION : Invalid columns data type in file : {}'.format(filename))\n return True\n except Exception as e:\n logger.log_file_validation(str(e))\n return False\n\n @staticmethod\n def validate_null_columns(df, filename):\n try:\n if len(df.isnull().sum()[df.isnull().sum() == len(df)]) == 0:\n return True\n else:\n raise Exception('TRAINING_VALIDATION : All Column Values are null in file : {}'.format(filename))\n except Exception as e:\n logger.log_file_validation(str(e))\n return False\n\n\n\n","sub_path":"src/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"5147200","text":"'''\nCreated on Mar 20, 2012\n\n@author: SFUser\n'''\n# -*- coding: utf-8 -*-\nimport urllib.request\nfrom html.parser import HTMLParser\n\nurl = \"http://www.google.cn\"\nf = urllib.request.urlopen(url)\nsource = f.read().decode('cp936').encode('utf-8')\nparser = HTMLParser()\nparser.feed(source)\n","sub_path":"src/web/htmlparser1.py","file_name":"htmlparser1.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"143645992","text":"from unittest.mock import patch\n\nimport flask\nimport pytest\n\nfrom app import create_app\nfrom app.constants.error_message import MISSING_TOKEN, INVALID_TOKEN\nfrom app.utils.app_exception import AuthenticationException\nfrom app.utils.token import token_required, encode_token\n\n\ndef test_request_with_missing_access_token():\n with pytest.raises(AuthenticationException) as exc_info:\n mock_header_and_test_token(None)\n assert str(exc_info.value) == MISSING_TOKEN\n\n\ndef test_request_with_invalid_token():\n with pytest.raises(AuthenticationException) as exc_info:\n mock_header_and_test_token(f'Bearer iikshf92.oifhsfds.98dfdsfh')\n assert str(exc_info.value) == INVALID_TOKEN\n\n with pytest.raises(AuthenticationException) as exc_info:\n mock_header_and_test_token(f'iikshf92.oifhsfds.98dfdsfh')\n assert str(exc_info.value) == INVALID_TOKEN\n\n with pytest.raises(AuthenticationException) as exc_info:\n mock_header_and_test_token(f'Bearer iikshf92.oifhsfds 98dfdsfh')\n assert str(exc_info.value) == INVALID_TOKEN\n\n\ndef test_token_with_invalid_user():\n access_token = encode_token({\"id\": 3})\n with pytest.raises(AuthenticationException) as exc_info:\n mock_header_and_test_token(f'Bearer {access_token}')\n assert str(exc_info.value) == INVALID_TOKEN\n\n\ndef mock_header_and_test_token(access_token):\n with patch.object(flask, 'request') as request_mock:\n request_mock.headers.get.return_value = access_token\n with create_app().app_context():\n response = token_required(lambda value: (value, 200))()\n\n request_mock.headers.get.assert_called_once_with('Authorization')\n return response[1]\n","sub_path":"flask-photo-app-master/tests/utils/test_token.py","file_name":"test_token.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"326488485","text":"import sys\n'''\n概述:处理一个列表[],将列表中的数据输出,可以规定是否缩进、缩进深度、输出到哪里\n参数说明\n\tthe_list:列表名称\n\tindent:遇到列表嵌套列表情况是否缩进\n\tlevel:初始缩进深度\n\tfh:输出到哪里,可以是文件或屏幕上\n'''\ndef print_lol(the_list,indent=False,level=0,fh=sys.stdout):\n\tfor i in the_list:\n\t\tif isinstance(i,list):\n\t\t\tprint_lol(i,indent,level+1,fh)\n\t\telse:\n\t\t\tif indent:\n\t\t\t\tfor tab_stop in range(level):\n\t\t\t\t\tprint(\"\\t\",end='',file=fh)\n\t\t\tprint(i,file=fh)\n# 测试\nmovies=['A',1975,'B',1991,['c',['d1','d2','d3','d4','d5']]]\nnester_file=open('neste_file.txt','w')\nprint_lol(movies,fh=nester_file)\n\n\n","sub_path":"pickle操作/nester.py","file_name":"nester.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"649772343","text":"#!/usr/bin/python3\n\"\"\"\n Starts a Flask web application to listen on 0.0.0.0:5000\n\"\"\"\n\nfrom flask import Flask, render_template\nfrom models import storage\n\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n@app.route('/hbnb_filters')\ndef all_states():\n \"\"\"\n List of all states\n \"\"\"\n states = storage.all(\"State\")\n amenities = storage.all(\"Amenity\")\n context = {'states': states, 'amenities': amenities}\n return render_template('10-hbnb_filters.html', **context)\n\n\n@app.teardown_appcontext\ndef teardown_app(exception):\n \"\"\"\n Closing the storage\n \"\"\"\n storage.close()\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"web_flask/10-hbnb_filters.py","file_name":"10-hbnb_filters.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"435679640","text":"#%%\nimport random\nimport os\nfrom shutil import copy2\n#split_folders.ratio('custom_data/*', output=\"output\", seed=1337, ratio=(.8, .2))\n\n#%%\ninput_folder = './asm_data'\nout_folder = list(0 for i in range(3))\nout_folder[0] = './asm_data_alt'\t# Train\nout_folder[1] = './asm_data_val_alt'\t# Validation\nout_folder[2] = './asm_data_test_alt'\t# Test\nratio = [1.0, 0.0, 0.0] # Train : Val : Test\nassert sum(ratio) == 1.0, \"Sum of ratio not 1\"\n#%%\ndef safe_make_dir(path):\n\tif not os.path.exists(out_dir[i]):\n\t\tos.makedirs(out_dir[i])\n\nrandom.seed(1337) # For reproducability of results\nfor root, dirs, files in os.walk(input_folder):\n\tout_dir = list(0 for i in range(3))\n\tfor f in files:\n\t\tfile_name = os.path.join(root, f)\n\t\t#print(file_name)\n\t\ttmp = root.split(os.sep)\n\t\tfor i in range(3):\n\t\t\tout_dir[i] = os.path.join(out_folder[i], tmp[-2] + '_' + tmp[-1])\n\t\t\tprint(out_dir[i])\n\t\t\tsafe_make_dir(out_dir[i])\n\t\trand = random.uniform(0, 1)\n\t\tif rand >= ratio[1] + ratio[2]:\n\t\t\tcopy2(file_name, out_dir[0])\n\t\telif rand < ratio[1] + ratio[2] and rand > ratio[2]:\n\t\t\tcopy2(file_name, out_dir[1])\n\t\telse:\n\t\t\tcopy2(file_name, out_dir[2])\n\n#%%\n","sub_path":"procedure_2/pre_processing_2.py","file_name":"pre_processing_2.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"92477660","text":"# Modules\nimport argparse\nfrom math import ceil\nfrom math import floor\nfrom math import log\n\n\ndef is_none(param):\n if param is None:\n return True\n return False\n\n\ndef is_negative(param):\n if float(param) < 0:\n return True\n return False\n\n\ndef calculate_annuity(principal, periods, interest):\n if is_none(principal) or is_none(periods) or is_none(interest)\\\n or is_negative(principal) or is_negative(periods) or is_negative(interest):\n print(\"Incorrect parameters\")\n return\n\n p = float(principal)\n n = int(periods)\n i = float(interest) / (12 * 100)\n\n annuity_payment = p * ((i * pow(1 + i, n)) / (pow(1 + i, n) - 1))\n annuity_payment = ceil(annuity_payment)\n overpayment = (n * annuity_payment) - p\n\n print(f\"Your annuity payment = {annuity_payment}!\")\n print(f\"Overpayment = {round(overpayment)}\")\n\n\ndef calculate_annuity_periods(principal, payment, interest):\n if is_none(principal) or is_none(payment) or is_none(interest) \\\n or is_negative(principal) or is_negative(payment) or is_negative(interest):\n print(\"Incorrect parameters\")\n return\n\n p = float(principal)\n i = float(interest) / (12 * 100)\n pay = float(payment)\n\n n = log(pay / (pay - i * p), i + 1)\n n = ceil(n)\n overpayment = (n * pay) - p\n\n counts = divmod(n, 12)\n years = round(counts[0])\n months = ceil(counts[1])\n\n if months == 12:\n years += 1\n months = 0\n\n if years == 0:\n if months == 1:\n print(f\"It will take {months} month to repay this loan!\")\n else:\n print(f\"It will take {months} months to repay this loan!\")\n elif months == 0:\n if years == 1:\n print(f\"It will take {years} year to repay this loan!\")\n else:\n print(f\"It will take {years} years to repay this loan!\")\n else:\n print(f\"It will take {years} \", end=\"\")\n print(\"year\" if years == 1 else \"years\", end=\"\")\n print(f\" and {months} \", end=\"\")\n print(\"month\" if months == 1 else \"months\", end=\"\")\n print(\" to repay this loan!\")\n\n print(f\"Overpayment = {round(overpayment)}\")\n\n\ndef calculate_annuity_principal(periods, payment, interest):\n if is_none(periods) or is_none(payment) or is_none(interest) \\\n or is_negative(periods) or is_negative(payment) or is_negative(interest):\n print(\"Incorrect parameters\")\n return\n\n n = int(periods)\n i = float(interest) / (12 * 100)\n pay = float(payment)\n principal = pay / ((i * pow(1 + i, n)) / (pow(1 + i, n) - 1))\n principal = floor(principal)\n overpayment = (n * pay) - principal\n\n print(f\"Your loan principal = {principal}!\")\n print(f\"Overpayment = {round(overpayment)}\")\n\n\ndef calculate_diff(principal, periods, interest):\n if is_none(principal) or is_none(periods) or is_none(interest)\\\n or is_negative(principal) or is_negative(periods) or is_negative(interest):\n print(\"Incorrect parameters\")\n return\n\n p = float(principal)\n n = int(periods)\n i = float(interest) / (12 * 100)\n\n overpayment = p\n month = 1\n for _ in range(n):\n d = (p / n) + i * (p - ((p * (month - 1)) / n))\n d = ceil(d)\n print(f\"Month {month + 1}: payment is {d}\")\n month += 1\n overpayment -= d\n overpayment = -1 * overpayment\n print(f\"\\nOverpayment = {overpayment}\")\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--type\", help=\"type of loan\")\nparser.add_argument(\"--principal\", help=\"the initial loan amount\")\nparser.add_argument(\"--periods\", help=\"amount of months to make a payment\")\nparser.add_argument(\"--payment\", help=\"payment amount per period\")\nparser.add_argument(\"--interest\", help=\"the interest rate on the loan\")\n\n# Store input\nargs = parser.parse_args()\n\n# Check: Type\nif args.type == \"annuity\":\n if args.payment is None:\n calculate_annuity(principal=args.principal, periods=args.periods, interest=args.interest)\n elif args.principal is None:\n calculate_annuity_principal(periods=args.periods, payment=args.payment, interest=args.interest)\n elif args.periods is None:\n calculate_annuity_periods(principal=args.principal, payment=args.payment, interest=args.interest)\nelif args.type == \"diff\":\n if args.payment is not None:\n print(\"Incorrect parameters\")\n else:\n calculate_diff(principal=args.principal, periods=args.periods, interest=args.interest)\nelse:\n print(\"Incorrect parameters\")\n","sub_path":"Loan Calculator/task/creditcalc/creditcalc.py","file_name":"creditcalc.py","file_ext":"py","file_size_in_byte":4512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"552003535","text":"import torch\nimport torch.nn.functional as F\nfrom torchtext import data\nfrom torchtext import datasets\nimport time\nimport random\n\ntorch.backends.cudnn.deterministic = True\n\n# General Settings\n\nRANDOM_SEED = 123\ntorch.manual_seed(RANDOM_SEED)\n\nVOCABULARY_SIZE = 20000\nLEARNING_RATE = 1e-4\nBATCH_SIZE = 128\nNUM_EPOCHS = 15\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nEMBEDDING_DIM = 128\nHIDDEN_DIM = 256\nOUTPUT_DIM = 1\n\n# Dataset\nTEXT = data.Field(tokenize = 'spacy')\nLABEL = data.LabelField(dtype = torch.float)\n# LABEL = data.LabelField()\ntrain_data, test_data = datasets.IMDB.splits(TEXT, LABEL)\ntrain_data, valid_data = train_data.split(random_state=random.seed(RANDOM_SEED),\n split_ratio=0.8)\n\nprint(f'Num Train: {len(train_data)}')\nprint(f'Num Valid: {len(valid_data)}')\nprint(f'Num Test: {len(test_data)}')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"dl_model_pytorch/Recurrent Neural Networks (RNNs)/Many-to-one: Sentiment Analysis /0_Simple RNN.py","file_name":"0_Simple RNN.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"128392045","text":"# -*- coding: utf-8 -*-\n\n# This is a simple NN (multi-layer perceptron) for testing TF on MNIST dataset.\n# Code copied from the book 'TensorFlow: 实战 Google 深度学习框架', with minor modifications.\n#\n# Author: Hao Wu, hwuu@outlook.com\n\nimport argparse\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n#\n# MNIST数据集相关的常数。\n#\n\nN_NODES_INPUT = 784 # 输入层的节点数。对于MNIST数据集,这个就等于图片的像素。\nN_NODES_OUTPUT = 10 # 输出层的节点数。这个等于类别的数目。因为在MNIST数据集中\n # 需要区分的是0~9这10个数字,所以这里输出层的节点数为10。\n\n#\n# 配置神经网络的参数。\n#\n\nN_NODES_LAYER1 = 500 # 隐藏层节点数。这里使用只有一个隐藏层的网络结构作为样例。\n # 这个隐藏层有500个节点。\nBATCH_SIZE = 100 # 一个训练batch中的训练数据个数。数字越小时,训练过程越接近\n # 随机梯度下降;数��越大时,训练越接近梯度下降。\nLEARNING_RATE = 0.5 # 学习率。\nTRAINING_STEPS = 30000 # 训练轮数。\n\n# 一个辅助函数,给定神经网络的输入和所有参数,计算神经网络的前向传播结果。在这里\n# 定义了一个使用ReLU激活函数的三层全连接神经网络。通过加入隐藏层实现了多层网络结构,\n# 通过ReLU激活函数实现了去线性化。\ndef inference(input_tensor, weights1, biases1, weights2, biases2):\n # 计算隐藏层的前向传播结果,这里使用了ReLU激活函数。\n layer1 = \\\n tf.nn.relu(\n tf.matmul(input_tensor, weights1) + \\\n biases1\n )\n # 计算输出层的前向传播结果。因为在计算损失函数时会一并计算softmax函数, \n # 所以这里不需要加入激活函数。而且不加入softmax不会影响预测结果。因为预测时\n # 使用的是不同类别对应节点输出值的相对大小,有没有softmax层对最后分类结果的\n # 计算没有影响。于是在计算整个神经网络的前向传播时可以不加入最后的softmax层。\n return \\\n tf.matmul(layer1, weights2) + \\\n biases2\n\n# 训练模型的过程。\ndef train(mnist):\n x = tf.placeholder(tf.float32, [None, N_NODES_INPUT], name='x-input')\n y_ = tf.placeholder(tf.float32, [None, N_NODES_OUTPUT], name='y-input')\n\n # 生成隐藏层的参数。\n weights1 = tf.Variable(\n tf.truncated_normal([N_NODES_INPUT, N_NODES_LAYER1], stddev=0.1))\n biases1 = tf.Variable(tf.constant(0.1, shape=[N_NODES_LAYER1]))\n # 生成输出层的参数。\n weights2 = tf.Variable(\n tf.truncated_normal([N_NODES_LAYER1, N_NODES_OUTPUT], stddev=0.1))\n biases2 = tf.Variable(tf.constant(0.1, shape=[N_NODES_OUTPUT]))\n\n # 计算在当前参数下神经网络前向传播的结果。\n y = inference(x, weights1, biases1, weights2, biases2)\n\n # 定义存储训练轮数的变量。这个变量不需要计算滑动平均值,所以这里指定这个变量为\n # 不可训练的变量(trainable=Fasle)。在使用TensorFlow训练神经网络时, \n # 一般会将代表训练轮数的变量指定为不可训练的参数。 \n global_step = tf.Variable(0, trainable=False)\n\n # 计算交叉熵作为刻画预测值和真实值之间差距的损失函数。这里使用了TensorFlow中提\n # 供的sparse_softmax_cross_entropy_with_logits函数来计算交叉熵。当分类\n # 问题只有一个正确答案时,可以使用这个函数来加速交叉熵的计算。MNIST问题的图片中\n # 只包含了0~9中的一个数字,所以可以使用这个函数来计算交叉熵损失。这个函数的第一个\n # 参数是神经网络不包括softmax层的前向传播结果,第二个是训练数据的正确答案。因为\n # 标准答案是一个长度为10的一维数组,而该函数需要提供的是一个正确答案的数字,所以需\n # 要使用tf.argmax函数来得到正确答案对应的类别编号。\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=tf.argmax(y_, 1), logits=y)\n # 计算在当前batch中所有样例的交叉熵平均值。\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n # 总损失等于交叉熵损失。\n loss = cross_entropy_mean\n # 使用tf.train.GradientDescentOptimizer优化算法来优化损失函数。\n train_step = \\\n tf.train.GradientDescentOptimizer(LEARNING_RATE) \\\n .minimize(loss, global_step=global_step)\n\n # 检验神经网络前向传播结果是否正确。tf.argmax(y, 1)\n # 计算每一个样例的预测答案。其中y是一个batch_size * 10的二维数组,每一行\n # 表示一个样例的前向传播结果。tf.argmax的第二个参数“1”表示选取最大值的操作仅在第一\n # 个维度中进行,也就是说,只在每一行选取最大值对应的下标。于是得到的结果是一个长度为\n # batch的一维数组,这个一维数组中的值就表示了每一个样例对应的数字识别结果。tf.equal\n # 判断两个张量的每一维是否相等,如果相等返回True,否则返回False。\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n # 这个运算首先将一个布尔型的数值转换为实数型,然后计算平均值。这个平均值就是模型在这\n # 一组数据上的正确率。\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # 初始化会话并开始训练过程。\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n # 准备验证数据。一般在神经网络的训练过程中会通过验证数据来大致判断停止的\n # 条件和评判训练的效果。\n validate_feed = {\n x: mnist.validation.images, \n y_: mnist.validation.labels\n }\n\n # 准备测试数据。在真实的应用中,这部分数据在训练时是��可见的,这个数据只是作为模\n # 型优劣的最后评价标准。\n test_feed = {\n x: mnist.test.images,\n y_: mnist.test.labels\n } \n\n # 迭代地训练神经网络。\n for i in range(TRAINING_STEPS):\n # 产生这一轮使用的一个batch的训练数据,并运行训练过程。\n xs, ys = mnist.train.next_batch(BATCH_SIZE)\n _, loss_val = sess.run([train_step, loss], feed_dict={x: xs, y_: ys})\n # 每1000轮输出一次在验证数据集上的测试结果。\n if i % 1000 == 0:\n # 计算滑动平均模型在验证数据上的结果。因为MNIST数据集比较小,所以一次\n # 可以处理所有的验证数据。为了计算方便,本样例程序没有将验证数据划分为更\n # 小的batch。当神经网络模型比较复杂或者验证数据比较大时,太大的batch\n # 会导致计算时间过长甚至发生内存溢出的错误。\n validate_acc = sess.run(accuracy, feed_dict=validate_feed)\n print(\"After %d training step(s), validation accuracy \"\n \"using average model is %g (loss = %g)\" % (i, validate_acc, loss_val))\n\n\n # 在训练结束之后,在测试数据上检测神经网络模型的最终正确率。\n test_acc = sess.run(accuracy, feed_dict=test_feed)\n print(\"After %d training step(s), test accuracy using average \"\n \"model is %g\" % (TRAINING_STEPS, test_acc))\n\n# Parameters from command line.\nargs_ = None\n\n# 主程序入口。\ndef main(_):\n # 声明处理MNIST数据集的类,这个类在初始化时会自动下载数据。\n mnist = input_data.read_data_sets(args_.data_dir, one_hot=True)\n train(mnist)\n\n# TensorFlow提供的一个主程序入口,tf.app.run会调用上面定义的main函数。\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Launch TensorFlow for training.')\n parser.add_argument(\"--data-dir\", default=\"/tmp/data\",\n help='Data folder. MNIST data files will be downloaded if they do not exist.')\n args_ = parser.parse_args()\n tf.app.run()\n","sub_path":"mnist/mnist-mlp.py","file_name":"mnist-mlp.py","file_ext":"py","file_size_in_byte":8372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"134153194","text":"import requests\nimport http.client, urllib\nimport os\nimport glob\nimport time\nimport argparse\nfrom time import sleep\n#!/usr/bin/env python3\nimport subprocess\n\nos.system('modprobe w1-gpio')\nos.system('modprobe w1-therm')\n \nbase_dir = '/sys/bus/w1/devices/'\ndevice_folder = glob.glob(base_dir + '28*')[0]\ndevice_file = device_folder + '/w1_slave'\n \ndef read_temp_raw():\n f = open(device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines\n \ndef read_temp():\n lines = read_temp_raw()\n while lines[0].strip()[-3:] != 'YES':\n time.sleep(0.2)\n lines = read_temp_raw()\n equals_pos = lines[1].find('t=')\n if equals_pos != -1:\n temp_string = lines[1][equals_pos+2:]\n temp_c = float(temp_string) / 1000.0\n return temp_c\n\n\n\ncar_on = True\ntemp = read_temp \ni = 0\nwhile car_on == True:\n i += 1\n #change the number after >= to change temperature that alert the phone at.\n if read_temp() >= 0:\n python3_command = \"amg88xx_still.py\"\n process = subprocess.Popen(python3_command.split(), stdout=subprocess.PIPE)\n output, error = process.communicate() # receive output from the python2 script\n r = requests.post(\"https://api.pushover.net/1/messages.json\", data={\n \"token\":\"aajaiutjvx1fwdqy15g2nqcp6av8dt\",\n \"user\":\"usqmx1mv4pams23f4nq8igh5a7okzf\",\n \"message\":\"Your vehicle has reached a DANGEROUS temperature. Please return to your vehicle.\"}, \n files={\"attachment\":open(\"/home/pi/Desktop/amg88xx_still.jpg\",\"rb\")})\n exit()\n","sub_path":"Edward_alert.py","file_name":"Edward_alert.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"214485758","text":"import sqlite3\nimport xmlrpclib,SimpleXMLRPCServer\n\nconn = sqlite3.connect('/data/issduser.db')\nc = conn.cursor()\n# Create table\n#c.execute('''create table count\n#(INT cnt)''')\ncount = 0\n\nclass Synchandler():\n\tdef add(self):\n\t\tc.execute('insert into count values (?)', count)\n\t\tconn.commit()\n\t\tcount += 1\n\t\nsynchandler = Synchandler()\nserver = SimpleXMLRPCServer.SimpleXMLRPCServer((\"192.168.0.201\",5678))\nserver.register_instance(synchandler)\nserver.serve_forever()\n\n","sub_path":"python/jsonrpc/test_rpc.py","file_name":"test_rpc.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"214167851","text":"import math\nimport re\nimport random\n#石头剪刀布游戏\ndef shitou():\n guess_list = [\"石头\", \"剪刀\", \"布\"]\n win_combination = [[\"布\", \"石头\"], [\"石头\", \"剪刀\"], [\"剪刀\", \"布\"]]\n\n while True:\n computer = random.choice(guess_list)\n people = input('请输入:石头,剪刀,布\\n').strip()\n if people not in guess_list:\n continue\n elif computer == people:\n print (\"平手,再玩一次!\")\n elif [computer, people] in win_combination:\n print (\"电脑获胜,再玩,人获胜才能退出!\")\n else:\n print (\"人获胜!\")\n break\nshitou()\n\n# 3.打印出所有的\"水仙花数\",所谓\"水仙花数\"是指一个三位数,\n# 其各位数字立方和等于该数本身。\n# 例如:153是一个\"水仙花数\",因为153=1的三次方+5的三次方+3的三次方。\ndef shuixianhua():\n for i in range(100,999):\n a=str(i)\n if math.pow(int(a[0]),3)+math.pow(int(a[1]),3)+math.pow(int(a[2]),3)==int(i):\n print(int(a))\n\ndef shuixianhua2():\n for i in range(100, 999):\n a=i%10\n b=int((i%100-a)/10)\n c=int((i%1000-b)/100)\n if math.pow(a,3)+math.pow(b,3)+math.pow(c,3)==i:\n print(i)\n\n# 4.输入一行字符,分别统计出其中英文字母、空格、数字和其它字符的个数。\ndef four():\n asd=input();\n chinazhenze = re.compile(\"[\\u4ea0-\\u9fa5a-zA-Z].*?\")\n kongge = re.compile(\" .*?\")\n shuzi = re.compile(\"[0-9].*?\")\n qitazifu = re.compile(\"[`~!@#$^&*()=|{}':;',\\\\[\\\\].<>/?~!@#¥……&*()——|{}【】‘;:”“'。,、?].*?\")\n china = chinazhenze.findall(asd)\n konggea = kongge.findall(asd)\n shuzia = shuzi.findall(asd)\n qitazifua = qitazifu.findall(asd)\n print(len(china))\n print(len(konggea))\n print(len(shuzia))\n print(len(qitazifua))\n#s输出匹配的字符\n # print(china)\n # print(konggea)\n # print(shuzia)\n # print(qitazifua)\n","sub_path":"Based on practice/three.py","file_name":"three.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"293856831","text":"#!/usr/bin/python\nimport logging\nimport sys\n \nclass StreamToLogger(object):\n \"\"\"\n Fake file-like stream object that redirects writes to a logger instance.\n \"\"\"\n def __init__(self, logger, log_level=logging.INFO):\n self.logger = logger\n self.log_level = log_level\n self.linebuf = ''\n \n def write(self, buf):\n for line in buf.rstrip().splitlines():\n self.logger.log(self.log_level, line.rstrip())\n\nstdout_logger = logging.getLogger('STDOUT')\nsl = StreamToLogger(stdout_logger, logging.INFO)\nsys.stdout = sl\n \nstderr_logger = logging.getLogger('STDERR')\nsl = StreamToLogger(stderr_logger, logging.ERROR)\nsys.stderr = sl","sub_path":"setupLogging.py","file_name":"setupLogging.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"544644473","text":"import sys\nimport quandl\nimport json\n#import calendar\n#import pandas as pd\nimport lib.settings as STNGS\nfrom dateutil import rrule\nfrom datetime import datetime\nfrom calendar import monthrange\nimport re\n\n#set apikey for authentication against quandl\nquandl.ApiConfig.api_key = STNGS.api_Key\n\nstartDate = STNGS.startDate\nendDate = STNGS.endDate\ntickers = STNGS.tickers\n\n\"\"\"\nfunction to perform quandl query for data retrieval\n :param ticker: the ticker symbol for which data is being fetched\n :param startDate: the start date for a query\n :param endDate: the end date for a query\n :return datatable containing the ticker symbol, the date, and the open and close prices for the given date range\n\"\"\"\ndef dataQuery(ticker, startDate, endDate):\n return quandl.get_table('WIKI/PRICES', qopts={\"columns\":[\"ticker\",\"date\",\"open\",\"close\"]},ticker=ticker, date = {'gte': startDate, 'lte':endDate})\n\n\n\"\"\"\nfunction to print the results\n :param dataResults: expects a properly formatted json style object - {key:value} \n\"\"\"\ndef prettyPrint(dataResults):\n print(json.dumps(dataResults, indent=3))\n\n\n\"\"\"\nfunction to check date\n :param aDate: date to check - expect yyyy-mm-dd format\n :return boolean: True if valid date, False otherwise\n\"\"\"\ndef isDate(aDate):\n try:\n #ensure date is in yyyy-mm-dd format\n if re.match('\\d{4}-\\d{2}-\\d{2}', aDate):\n return True\n\n except ValueError:\n return False\n\n return False\n\n\n\"\"\"\nfunction to determine which security had the most days where the closing price was lower than the opening price\n :param startDate: date to start data processing - expect yyyy-mm-dd format\n :param endDate: date to end data processing - expect yyyy-mm-dd format\n :param tickers: an array of tickers to fetch data and perform calculations on - expect ['tick1','tick2',...'tickx']\n\n :return tickerCalculations: a python dictionary of tickers with calculated lists\n of month, average_open, average_close values in format\n {'ticker':[{'month':'yyyy-dd','average_open':dd.dddd,'average_close':dd.dddd}]}\n\"\"\"\ndef biggestLoser(startDate, endDate, tickers):\n #loop through tickers, perform calculations, return \n print(\"Fetching data and calculating the biggest loser\")\n lostMost = {\"symbol\": None,\"totalDays\":0}\n for ticker in tickers:\n query = dataQuery(ticker, startDate, endDate)\n difference = (query.close.sub(query.open)).tolist()\n negativeDays = sum(1 for number in difference if number < 0)\n if (negativeDays > lostMost[\"totalDays\"]):\n lostMost[\"symbol\"] = ticker\n lostMost[\"totalDays\"] = negativeDays\n\n return lostMost;\n\n\n\n\"\"\"\nfunction calculating average open and average close by month for given list of stock tickers\n :param startDate: date to start data processing - expect yyyy-mm-dd format\n :param endDate: date to end data processing - expect yyyy-mm-dd format\n :param tickers: an array of tickers to fetch data and perform calculations on - expect ['tick1','tick2',...'tickx']\n\n :return tickerCalculations: a python dictionary of tickers with calculated lists\n of month, average_open, average_close values in format\n {'ticker':[{'month':'yyyy-dd','average_open':dd.dddd,'average_close':dd.dddd}]}\n\"\"\"\ndef tickerProcessing(startDate, endDate, tickers):\n tickerCalculations = {}\n\n print('Fetching data and calculating open and close averages...')\n for ticker in tickers:\n aList = []\n #using the dateutil rrule module loop from a start date to end date by month\n for dt in rrule.rrule(rrule.MONTHLY, dtstart=startDate, until=endDate):\n year = dt.strftime(\"%Y\")\n month = dt.strftime(\"%m\")\n daysInMonth = str(monthrange(int(year),int(dt.strftime(\"%m\")))[1])\n firstDateMonth = year + \"-\" + month + \"-\" \"1\"\n lastDateMonth = year + \"-\" + month + \"-\" + daysInMonth \n\n query = dataQuery(ticker, firstDateMonth, lastDateMonth)\n dataResults = {}\n dataResults[\"month\"] = year + \"-\" + month\n dataResults[\"average_open\"] = sum(query.open)/len(query.open)\n dataResults[\"average_close\"] = sum(query.close)/len(query.close)\n\n aList.append(dataResults)\n\n tickerCalculations[ticker] = aList\n\n return tickerCalculations;\n\n\"\"\"\ndisplay a help menu for users\n\"\"\"\ndef helpMenu():\n print(\n '''\n Coding Challenge:\n Usage:\n 1) python main.py #executes the program and runs the default tickerProcessing function\n 2) python main.py -biggest-loser #executes the program and runs the biggestLoser function\n 3) python main.py --help #displays this menu; additional flags to bring up the menu are -h or -H\n '''\n )\n\n\n\"\"\"\nmain execution\n\"\"\"\n#check to see if arguments are being passed\n#if not set args to execute the main function\n#otherwise set the args variable to the arguments passed by the user\n#(argv[0] is the file path, and thus is ignored)\nargs = sys.argv[1:] if len(sys.argv) > 1 else ['executeMain']\n\n#before doing anything else we check if help is being sought\n#if so display the help menu and end the program\nif ('--help' in args) or ('-h' in args) or ('-H' in args):\n helpMenu()\n sys.exit()\n\n#we made it here, so we are not asking for help and have executed the program successfully\n#loop through the args to determine if the biggest-loser argument was passed, or if we are running the default program\nfor x in args:\n print(\"Argument: \", x)\n\n #dataValidation (ensure dates properly formatted and tickers is a list)\n if (isDate(startDate) is False) or (isDate(endDate) is False):\n print(\"Invalid date provided: yyyy-mm-dd\")\n elif type(tickers) is not list:\n print(\"The provided tickers input is not a list: ['str1','str2',...,'strx']\")\n else:\n startDate = datetime.strptime(startDate, '%Y-%m-%d')\n endDate = datetime.strptime(endDate, '%Y-%m-%d')\n tickers = tickers\n\n print('Processing...')\n\n if (x == 'executeMain'):\n prettyPrint(tickerProcessing(startDate, endDate, tickers));\n elif (x == '-biggest-loser'):\n prettyPrint(biggestLoser(startDate, endDate, tickers));\n else:\n #invalid arguments likely passed\n #exit and return to the prompt for another go\n print(\n '''\n Passed arguments invalid...\n For usage please type:\n python main.py --help\n '''\n )\n sys.exit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"16162923","text":"\"\"\" TD PART III :\r\n#################################################################\r\n ANALYSIS AND HYPOTHESIS WITH A GENOME SCALE METABOLIC MODEL\r\nFrom the given functions, the information you got about the cobra language,\r\nand the result of previous TD part, try to answer the following questions.\r\nfiles: practical_metabolic_model.pdf\r\n\r\nFor a better understanding, you can also open the file function.py to see how some functions are built.\r\nDuring this tutorial you will use object oriented language.\r\n\"\"\"\r\n######A\r\n# 1- Load module:\r\nfrom functions import *\r\n\r\n#2- Dowload and import e_coli_core model:\r\n\r\n#3 - Print information about the model:\r\n\r\n#4- Launch the flux balance analysis without changing any parameter:\r\n\r\n#5- Print the objective value. Was it what you expected? \r\n\r\n#6- Print fluxes. How many reactions are active in this condition?\r\n\r\n#7 From the fluxes, define which metabolites are consume and produce by the model?\r\n#Verify your result with the summary function:\r\n\r\n\r\n#8- print the summary of one of these metabolites to see how it's produced and consumed:\r\n\r\n#9- Verify if the model can grow without oxygen:\r\n## Create a new model to keep initial state\r\n\r\n## Set upper bound and lower bound:\r\n\r\n## Launch fba\r\n\r\n## Conclude\r\n\r\n#10- Verify if the model can use other carbon sources : lactate or formate.\r\n## Create a copy of the model\r\n\r\n## Close the model\r\n\r\n## Change bound to allow entry of the target metabolite and verify the change is done:\r\n\r\n#11- It is possible to produce acetate and to grow? change the objective function (growth min 0.20):\r\n\r\n## Change the coefficient for ac from 1 to 0.5 for example. What do you observe?\r\n\r\n#######B\r\n#Let's look all of this in an entire model;\r\n\"\"\" In biggest model is more difficult to interpret something. Lets see if we can answer to some\r\nquestion using different functions.\r\n\"\"\"\r\n\r\n\"\"\" I - You know that you will receive a Saccharomyces cerevisiae strain that you have to grow.\r\nYou have 3 different media. Which one will you use to be sure that the strain will grow?\"\"\"\r\nMedium1 = ['ac_e','ade_e','ala__L_e','arg__L_e','asn__L_e','asp__L_e','ca2_e',\r\n 'cit_e','cys_L_e','man_e','gln__L_e','glu__L_e','h2o_e','his__L_e',\r\n 'ile__L_e','mg2_e','nad_e','nh4_e','nac_e','pi_e','pro__L_e','ser__L_e',\r\n 'so4_e','try__L_e','tyr__L_e','trp__L_e','h2_e','o2_e' ]\r\nMedium2 = ['ac_e','ade_e','ala__L_e','arg__L_e','asn__L_e','asp__L_e','ca2_e',\r\n 'cit_e','cu2_e','cys_L_e','fe2_e','fe3_e','fru_e','glc__D_e',\r\n 'gln__L_e','glu__L_e','h2o_e','his__L_e','ile__L_e','mg2_e','nad_e',\r\n 'nh4_e','nac_e','no3_e','pi_e','pro__L_e','ser__L_e','so4_e','try__L_e',\r\n 'tyr__L_e','ura_e','val__L_e','trp__L_e','h2_e','o2_e' ]\r\nMedium3 = ['4abz_e','ac_e','ace_e','ade_e','ala__L_e','arg__L_e','ascb__L_e','asn__L_e',\r\n 'asp__L_e','btn_e','cbl1_e','cit_e','cl_e','cobalt2_e',\r\n 'cu2_e','cys_L_e','fe2_e','fe3_e','fol_e','glc__D_e','glu__L_e','gly_e','gthrd_e',\r\n 'gua_e','h2o_e','hco3_e','his__L_e','i_e','ile__L_e','inost_e','k_e','leu_L_e','met__L_e','co2_e','no2_e',\r\n 'mg2_e','mn2_e','mndn_e','mobd_e','na1_e','nad_e','nh4_e','nac_e','ni2_e',\r\n 'no3_e','pi_e','pnto__R_e','pro__L_e','pydam_e','pydxn_e','rezrn_e','ribflv_e',\r\n 'ser__L_e','so4_e','thci_e','thm_e','try__L_e','tyr__L_e','ura_e','val__L_e',\r\n 'xan_e','zn2_e','h_e','trp__L_e', 'mobd_e','slnt_e', 'tungs_e','lipoate_e',\r\n 'pydx_e','o2_e' ]\r\n\r\n# Download one model from bigg database \r\n\r\n# Verify that the model can grow without changing any parameter:\r\n\r\n# Verify how the model can grow in each medium and conclude:\r\n\r\n\r\n\r\n\"\"\" II- You work for an agribusiness company specialized in yoghurt production.\r\nIn, purpose to improve the taste of some product, you want to produce the molecule RR 2 3 Butanediol which gave the butter/ creamy taste.\r\nYour company whant a biological way to produce this molecule instead of chemical.\r\nyou have 4 species in your bacteria collection:\r\n - Saccharomyces cerevisiae\r\n - Lactococcus lactis\r\n - Klebsiella pneumoniae\r\nCan you tell which species is the most likely to produce this molecule? \"\"\"\r\n\r\n# Find molecule id on Bigg database / its exchange reaction associated\r\n\r\n# Load the 3 models\r\n\r\n\r\n# Run following function for 'lactococcus lactis':\r\n#%%% = updateLactococcus(%%%)\r\n\r\n# Verify that models can grow:\r\n\r\n#for each model find biomass function position or name\r\n\r\n#=> Choice 1 :\r\n#- Update the biomass bound (>0)\r\n#- Change the objective function\r\n#- Run FBA\r\n\r\n#=> Choice 2 :\r\n#- Change the objective function with both reactions\r\n#- Run FBA\r\n\r\n\r\n# What can you conclude?\r\n\r\n\r\n# Did you check if models could spontaneously produce the molecule?\r\n\r\n\r\n\r\n","sub_path":"Python_R_Scripts/III-Anaysis.py","file_name":"III-Anaysis.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"538871295","text":"#import numpy as np\r\nimport random\r\n\r\nimport numpy as np\r\n\r\n# Finite-State, Finite-Action Markov Decission Process\r\nclass FiniteMDP:\r\n def __init__(self, transitions, rewards, discount_factor):\r\n self.model = transitions\r\n self.rewards = rewards\r\n self.discount_factor = discount_factor\r\n\r\n def next_state(self, curr_state, action):\r\n next_states_transitions = self.model[curr_state][action]\r\n\r\n states = []\r\n probs = []\r\n for next_state, prob in next_states_transitions.items():\r\n states.append(next_state)\r\n probs.append(prob)\r\n sample = np.random.random_sample(1)[0]\r\n s = 0\r\n for i in range(len(probs)):\r\n if sample < s + probs[i]:\r\n return states[i]\r\n s = s + probs[i]\r\n\r\n def qvalue_iteration(self, num_iter=100, iteration_callback=None):\r\n q_value = {}\r\n v_value = {} # This max_a{q_value(s, a)}. This is used for caching in order to reduce running complexity.\r\n # Initialize q_values and v_values to 0\r\n\r\n for state, action_dict in self.model.items():\r\n state_actions_values = {}\r\n for action in action_dict.keys():\r\n state_actions_values[action] = 0\r\n v_value[state] = 0\r\n q_value[state] = state_actions_values\r\n \r\n # Initialize random policy\r\n #random.seed(a=42)\r\n if iteration_callback is not None:\r\n prev_policy = {}\r\n for state, action_dict in q_value.items():\r\n prev_policy[state] = random.choice(list(action_dict.keys()))\r\n\r\n for t in range(num_iter):\r\n prev_q_value = q_value\r\n for state, action_dict in self.model.items():\r\n for action, new_state_dict in action_dict.items():\r\n expected_v_next = 0\r\n for new_state, prob in new_state_dict.items(): #prob is P(state, action, new_state)\r\n if new_state is not None: # Not terminal state\r\n expected_v_next = expected_v_next + (prob * v_value[new_state])\r\n # Update Q-Value by Bellman Equation\r\n q_value[state][action] = self.rewards[state] + self.discount_factor*expected_v_next\r\n\r\n # Update v-values for next iteration outside of nested loops above.\r\n for state, action_dict in q_value.items():\r\n v_value[state] = action_dict[max(action_dict, key=action_dict.get)]\r\n\r\n if iteration_callback is not None:\r\n curr_policy = prev_policy\r\n for state, action_dict in q_value.items():\r\n best_action = max(action_dict, key=action_dict.get)\r\n if action_dict[best_action] > prev_q_value[state][prev_policy[state]]:\r\n curr_policy[state] = best_action\r\n iteration_callback(t, curr_policy, q_value, v_value)\r\n prev_policy = curr_policy\r\n\r\n # Compute final optimal policy\r\n policy = {}\r\n for state, action_dict in q_value.items():\r\n policy[state] = max(action_dict, key=action_dict.get)\r\n return (policy, q_value, v_value)\r\n\r\n\r\n","sub_path":"mlp/reinforcement/environment/fmdp.py","file_name":"fmdp.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"415672835","text":"#! /usr/bin/env python3\n\nimport numpy,pandas as pd,math,string\n\n\nclass container:\n class data:\n master=None\n def maxStorage(self):\n number='0'*10\n bytesPerLine=10\n phoneNumberLen=len(number)\n phoneNumberDigits=len(string.digits)\n maxPhoneNumbers=math.pow(phoneNumberLen,phoneNumberDigits)\n maxStorage=maxPhoneNumbers*bytesPerLine\n return maxStorage\n\n class processing:\n master=None\n def engineering(self,num):\n if 1000 > num > 0:\n #b\n return str(num)+\"b\".upper()\n elif 1000**2 > num > 1000**1: \n #k\n return str(num/(1000**1))+\"kb\".upper()\n elif 1000**3 > num > 1000**2:\n #m\n return str(num/(1000**2))+\"mb\".upper()\n elif 1000**4 > num > 1000**3:\n #g\n return str(num/(1000**3))+\"gb\".upper()\n elif 1000**5 > num > 1000**4:\n #t\n return str(num/(1000**4))+\"tb\".upper()\n elif 1000**6 > num > 1000**5:\n #p\n return str(num/(1000**5))+\"pb\".upper()\n class tasks:\n #this is where the work should be done\n #so as to not pollute the assembler\n master=None\n def run(self):\n maxStorage=self.master.data.maxStorage()\n result=self.master.processing.engineering(maxStorage)\n print(result)\n\n class void:\n master=None\n\n def assembler(self):\n #this is just to put all objects in the work area wa\n wa=self.void()\n wa.master=wa\n\n wa.data=self.data()\n wa.data.master=wa\n\n wa.processing=self.processing()\n wa.processing.master=wa\n\n wa.tasks=self.tasks()\n wa.tasks.master=wa\n wa.tasks.run()\n\ncont=container()\ncont.assembler()\n","sub_path":"classing-python3/phoneNumbers-calc.py","file_name":"phoneNumbers-calc.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"91002593","text":"#!/usr/bin/env python\nimport re\nfrom itertools import combinations_with_replacement as all_combinations\nfrom collections import Counter\n\npuzzleinput = 'day15.txt'\n\nproperties = ['capacity', 'durability', 'flavor', 'texture', 'calories']\nteaspoons = 100\ncalories_required = 500\n\nregex = r'(.*):.{10}(-?\\d+),.{12}(-?\\d+),.{8}(-?\\d+),.{9}(-?\\d+),.{10}(-?\\d+)'\n\ndef score(recipe):\n property_tally = dict(zip(properties,[0] * len(properties)))\n for item in recipe:\n for i in ingredients_list:\n if i['name'] == item[0]:\n for p in properties:\n property_tally[p] += item[1] * i[p]\n calorie_target_met = property_tally['calories'] == calories_required\n score = 1\n for p in property_tally:\n if p != 'calories':\n if property_tally[p] > 0:\n score *= property_tally[p]\n else:\n return (0, False)\n return score, calorie_target_met\n\ningredients_list = list()\nrecipes_list = list()\nbest_ignoring_calories = 0\nbest_calorie_target = 0\n\nwith open(puzzleinput) as f:\n for ingredients_text in f:\n i = list(re.match(regex, ingredients_text).groups())\n i = [i[0], int(i[1]), int(i[2]), int(i[3]), int(i[4]), int(i[5])]\n ingredient = dict(zip(['name'] + properties, i))\n ingredients_list.append(ingredient)\n\ningredient_names = [i['name'] for i in ingredients_list]\nall_ingredient_combinations = all_combinations(ingredient_names, teaspoons)\nfor combination in all_ingredient_combinations:\n recipes_list.append(Counter(combination).items())\n\nfor recipe in recipes_list:\n current_score, calorie_target_met = score(recipe)\n best_ignoring_calories = max(current_score, best_ignoring_calories)\n if calorie_target_met:\n best_calorie_target = max(current_score, best_calorie_target)\nprint('part 1: %d' % best_ignoring_calories)\nprint('part 2: %d' % best_calorie_target)\n","sub_path":"day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"79917893","text":"from django import http\nfrom django.utils.encoding import smart_unicode as u\n\nfrom olympia.amo.helpers import page_title\n\nfrom . import get_service\nfrom .forms import ShareForm\n\n\ndef share(request, obj, name, description):\n try:\n service = get_service(request.GET['service'])\n except KeyError:\n raise http.Http404()\n\n if not service:\n raise http.Http404()\n\n form = ShareForm({\n 'title': page_title({'request': request}, name),\n 'url': u(obj.get_url_path()),\n 'description': description,\n })\n form.full_clean()\n return http.HttpResponseRedirect(service.url.format(**form.cleaned_data))\n","sub_path":"src/olympia/sharing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"347539166","text":"#\n# Copyright 2012, Piston Cloud Computing, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport mox\nimport unittest\n\nfrom burrow.common import cfg\nfrom burrow.common import context\nfrom burrow.common import exc\nfrom burrow.api.sqs import controller as sqs_controller\n\nfrom tests.fakes import queue\n\nclass TestSQSController(unittest.TestCase):\n def setUp(self):\n cfg.CFG.queue_backend = 'tests.fakes.queue'\n self.context = context.RequestContext(auth_tok='tik_tok',\n user='fake_user',\n tenant='tenant_uuid')\n self.context2 = context.RequestContext(auth_tok='tik_tok',\n user='fake_user2',\n tenant='not_my_tenant')\n\n self.out_of_context = context.RequestContext(auth_tok='tik_tok',\n user='fake_user2',\n tenant='out_of_context')\n\n self.uuids = (u for u in xrange(12345, 12355))\n\n self.controller = sqs_controller.Controller()\n self.controller.queue_api.setup_fixtures() # Bring nothing to table.\n self.mox = mox.Mox()\n\n def tearDown(self):\n self.mox.UnsetStubs()\n\n def test_list_queues(self):\n self.assertEqual([], self.controller.list_queues(self.context))\n self.assertRaises(exc.QueueError, \n self.controller.receive_message,\n self.context, 'fake_queue')\n\n self.controller.queue_api.setup_fixtures(\n queues=[{'uuid': '12345',\n 'owner': 'fake_user',\n 'tenant': self.context.tenant,\n 'name': 'fake_queue',},\n {'uuid': '123456',\n 'owner': 'not_me',\n 'tenant': 'not_my_tenant',\n 'name': 'fake_queue',}])\n\n self.assertEqual(1, len(self.controller.list_queues(self.context)))\n self.assertEqual(1, len(self.controller.list_queues(\n self.context, queue_name_prefix='fake_')))\n self.assertEqual(1, len(self.controller.list_queues(self.context2)))\n self.assertEqual(0, len(self.controller.list_queues(\n self.out_of_context)))\n self.assertEqual(0, len(self.controller.list_queues(\n self.context, queue_name_prefix='empty')))\n self.assertEqual([{'queue_url': 'http://localhost/tenant_uuid/fake_queue'}],\n self.controller.list_queues(self.context))\n\n def test_create_queue(self):\n self.mox.StubOutWithMock(queue.uuid, 'uuid4')\n queue.uuid.uuid4().AndReturn(self.uuids.next())\n queue.uuid.uuid4().AndReturn(self.uuids.next())\n\n self.mox.ReplayAll()\n self.assertEqual(0, len(self.controller.list_queues(self.context)))\n self.assertEqual('12345',\n self.controller.create_queue(self.context, 'tealeaf'))\n\n self.assertEqual(1, len(self.controller.list_queues(self.context)))\n self.assertRaises(exc.QueueError, self.controller.create_queue,\n self.context, 'tealeaf')\n \n self.assertEqual('12346',\n self.controller.create_queue(self.context, 'fffffffuuuuuuuuuuuu'))\n\n self.assertEqual(2, len(self.controller.list_queues(self.context)))\n\n def test_get_queue_url(self):\n self.mox.StubOutWithMock(queue.uuid, 'uuid4')\n queue.uuid.uuid4().AndReturn(self.uuids.next())\n\n self.mox.ReplayAll()\n self.assertEqual(0, len(self.controller.list_queues(self.context)))\n self.assertEqual('12345',\n self.controller.create_queue(self.context, 'tealeaf'))\n\n self.assertEqual({'queue_url': 'http://localhost/tenant_uuid/tealeaf'}, \n self.controller.get_queue_url(self.context, 'tealeaf'))\n\n\n def test_delete_queue(self):\n self.mox.StubOutWithMock(queue.uuid, 'uuid4')\n queue.uuid.uuid4().AndReturn(self.uuids.next())\n queue.uuid.uuid4().AndReturn(self.uuids.next())\n\n self.mox.ReplayAll()\n self.assertEqual(0, len(self.controller.list_queues(self.context)))\n self.assertEqual('12345',\n self.controller.create_queue(self.context, 'tealeaf'))\n\n self.assertEqual(1, len(self.controller.list_queues(self.context)))\n self.assertRaises(exc.QueueError, self.controller.create_queue,\n self.context, 'tealeaf')\n\n self.assertEqual('12346',\n self.controller.create_queue(self.context, 'fffffffuuuuuuuuuuuu'))\n\n self.assertEqual(2, len(self.controller.list_queues(self.context)))\n self.assertIsNone(self.controller.delete_queue(self.context, 'tealeaf'))\n\n self.assertEqual(1, len(self.controller.list_queues(self.context)))\n self.assertIsNone(self.controller.delete_queue(self.context,\n 'fffffffuuuuuuuuuuuu'))\n self.assertEqual(0, len(self.controller.list_queues(self.context)))\n","sub_path":"tests/api/sqs/test_controller.py","file_name":"test_controller.py","file_ext":"py","file_size_in_byte":5801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"297911274","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Convert HTML on the official site into CSV file.\n\n1. Download HTML page. (http://www.j-league.or.jp/schedule/)\n2. Convert HTML file encoding into UTF-8.\n3. Run this script to give HTML file path as argument, and save CSV output.\n4. Post CSV file via App Engine remote API.\n5. Check some iCal feeds.\n\n $ curl \"http://jleague-calendar.appspot.com/club/fctokyo.ics\"\n\nUsage:\n webpage2csv.py [-v | -q] [-d DELIM] [-o OUTPUT]\\\n \n webpage2csv.py -h | --help\n webpage2csv.py --version\n\nOptions:\n -h --help Show this screen.\n --version Show version.\n -v --verbose Increase logging verbosity [default: False].\n -q --quiet Supress logging [default: False].\n -d DELIM --delimiter=DELIM Input field delimiter [default: ,].\n -o OUTPUT --output=OUTPUT Output file path.\n\"\"\"\n\nimport csv\nimport datetime\nimport logging\nimport logging.config\nimport os\nimport re\nimport sys\n\nimport docopt\nimport schema\nfrom clitool.textio import RowMapper\nfrom bs4 import BeautifulSoup\n\nDEFAULT_ENCODING = 'utf-8'\n\nAPPNAME = os.path.splitext(os.path.basename(__file__))[0]\nVERSION = '0.1'\nLOG_FORMAT = '%(asctime)s|%(name)s:%(levelname)s|L%(lineno)d|%(message)s'\nLOG_DATEFMT = '%Y-%m-%d %H:%M:%S'\n\nCOMMON_ARGUMENTS = { # for \"schema.Schema\" constructor\n '--help': bool,\n '--version': bool,\n '--verbose': bool,\n '--quiet': bool,\n '--delimiter': schema.And(str, lambda d: len(d) == 1)\n}\n\nLOGGING_CONFIG = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': LOG_FORMAT,\n 'datefmt': LOG_DATEFMT\n },\n },\n 'handlers': {\n 'default': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n },\n },\n 'loggers': {\n APPNAME: {\n 'handlers': ['default'],\n 'level': 'INFO',\n 'propagate': False\n }\n }\n}\n\nFIXTURE = re.compile(r\"^J[1|2] 第([\\d]+)節$\")\nDAY = re.compile(r\"^([\\d]+)月([\\d]+)日\\(.*\\)$\")\n\nCLUB_FIELDS = [\n {\"id\": \"key\", \"type\": \"string\"},\n {\"id\": \"abbr\", \"type\": \"string\"},\n {\"id\": \"display_name\", \"type\": \"string\"},\n {\"id\": \"category\", \"type\": \"string\"},\n {\"id\": \"channel\", \"type\": \"string\"},\n {\"id\": \"url\", \"type\": \"string\"},\n {\"id\": \"stadium\", \"type\": \"string\"}\n]\n\nSTADIUM_FIELDS = [\n {\"id\": \"key\", \"type\": \"string\"},\n {\"id\": \"abbr\", \"type\": \"string\"},\n {\"id\": \"name\", \"type\": \"string\"},\n {\"id\": \"display_name\", \"type\": \"string\"},\n {\"id\": \"location\", \"type\": \"string\"},\n {\"id\": \"wikipedia_url\", \"type\": \"string\"}\n]\n\nFIELDS = [\n {\"id\": \"key\", \"type\": \"string\"},\n {\"id\": \"year\", \"type\": \"integer\"},\n {\"id\": \"fixture\", \"type\": \"integer\"},\n {\"id\": \"home\", \"type\": \"string\"},\n {\"id\": \"away\", \"type\": \"string\"},\n {\"id\": \"stadium\", \"type\": \"string\"},\n {\"id\": \"kickoff\", \"type\": \"datetime\", \"format\": \"%Y-%m-%dT%H:%M:%S\"},\n {\"id\": \"status\", \"type\": \"string\"},\n {\"id\": \"description\", \"type\": \"string\"}\n]\n\n\nclass DictMapper(object):\n \"\"\"Convert dictionary object to list of strings of values.\n \"\"\"\n\n def __init__(self, fields):\n self.fields = fields\n\n def __call__(self, dt):\n out = []\n for f in self.fields:\n k, t = f['id'], f['type']\n v = dt.get(k, f.get('default', ''))\n if t == 'string':\n val = v\n elif not v:\n val = ''\n elif t == 'datetime':\n val = v.strftime(f['format'])\n elif t in ('integer', 'float'):\n val = str(v)\n elif t == 'boolean':\n m = f.get('mapping', {})\n if v in m:\n val = m[v]\n else:\n val = str(v)\n else:\n raise ValueError('Unknown type \"{}\" for \"{}\"'.format(t, k))\n out.append(val)\n return out\n\n\nclass ScheduleWriter(object):\n\n fields = FIELDS\n\n def __init__(self, stream, delimiter=','):\n self.writer = csv.writer(stream, delimiter=delimiter)\n self.mapper = DictMapper(self.fields)\n\n def head(self):\n self.writer.writerow(tuple(map(lambda f: f['id'], self.fields)))\n\n def write(self, dt):\n self.writer.writerow(self.convert(dt))\n\n def convert(self, dt):\n dt['year'] = dt['kickoff'].year\n dt['key'] = '{}-{}-{}-{}-{}'.format(\n dt['year'], dt['category'], dt['fixture'], dt['home'], dt['away'])\n if 'status' not in dt:\n dt['status'] = 'yet'\n return self.mapper(dt)\n\n\nclass ScheduleBoxParser(object):\n\n def __init__(self, writer, clubs, stadiums):\n self.writer = writer\n self.stadiums = stadiums\n self.clubs = clubs\n self.logger = logging.getLogger(APPNAME)\n\n def eat(self, soup):\n schedules = soup.find_all('div', attrs={'class': 'sche-box'})\n for schedule in schedules:\n self.parse(schedule)\n\n def parse(self, schebox):\n self.logger.debug('Start parsing')\n mday = schebox.find('h3', 'match-day').string\n m1 = DAY.match(mday)\n if m1 is None:\n self.logger.error('Invalid match day string: %s', mday)\n return\n categories = schebox.find_all('h3', 'match-cate')\n tables = schebox.find_all('div', 'sche-table')\n if len(categories) != len(tables):\n self.logger.error('Sizes of \"match-cate\" and \"sche-table\" are not' +\n 'matched at %s. %d != %d', mday, len(categories),\n len(tables))\n return\n\n year = 2014 # Magic number\n month, day = map(int, m1.groups())\n\n for ecategory, ematches in zip(categories, tables):\n scategory = ecategory.string\n # TODO: Parse \"ヤマザキナビスコカップ\"\n m2 = FIXTURE.match(scategory)\n if m2 is None:\n self.logger.info('Category is not J League: %s', scategory)\n continue\n if u'J1' in scategory:\n category = 'J1'\n elif u'J2' in scategory:\n category = 'J2'\n else:\n self.logger.fatal('regex is wrong for: %s', scategory)\n continue\n fixture = int(m2.group(1))\n for match in ematches.find_all('tr'):\n dt = {'category': category, 'fixture': fixture}\n kickoff = match.find(class_='kickoff').string\n if kickoff != u'未定':\n hour, minute = map(int, kickoff.split(':'))\n dt['kickoff'] = datetime.datetime(year, month, day,\n hour, minute)\n else:\n dt['kickoff'] = datetime.date(year, month, day)\n dt['home'] = self.parse_club(match, 'home')\n dt['away'] = self.parse_club(match, 'away')\n dt['stadium'] = self.parse_stadium(match)\n score = self.parse_score(match)\n if score is not None:\n dt['status'] = 'done'\n dt['description'] = score\n self.writer.write(dt)\n self.logger.debug('Finish parsing')\n\n def parse_club(self, match, kind):\n elem = match.find(class_=kind).find('a')\n if elem is None:\n self.logger.error('Element is not found for club: %s', kind)\n return\n if kind == 'home':\n index = 0\n elif kind == 'away':\n index = 1\n else:\n self.logger.fatal('Unknown club type in match: %s', kind)\n return\n club = self.clubs.get(elem.contents[index])\n if club is None:\n self.logger.error('Element is not found for club: %s', kind)\n return\n return club['key']\n\n def parse_stadium(self, match):\n elem = match.find(class_='stadium').find('a')\n if elem is None:\n stadium = match.find('td', class_='stadium')\n abbr = elem.string.strip().lstrip(u'(').rstrip(u')')\n stadium = self.stadiums.get(abbr)\n if stadium is None:\n self.logger.error('Unknown stadium name: %s', abbr)\n return stadium\n else:\n return stadium['display_name']\n\n def parse_score(self, match):\n elem = match.find(class_='score').find('a')\n if elem is None:\n return\n if elem.get('href').startswith('/result/'):\n return elem.string\n # or \"href\" startswith '/live/' for live score.\n\n\ndef main():\n args = docopt.docopt(__doc__, version='{} {}'.format(APPNAME, VERSION))\n try:\n s = schema.Schema(dict({\n '--output': schema.Or(None, schema.Use(lambda f: open(f, 'w'))),\n '': os.path.isfile,\n '': os.path.isfile,\n '': os.path.isfile\n }, **COMMON_ARGUMENTS))\n args = s.validate(args)\n except schema.SchemaError as e:\n exit(e)\n\n # Setup logging verbosity.\n logging.config.dictConfig(LOGGING_CONFIG)\n logger = logging.getLogger(APPNAME)\n if args['--verbose']:\n logger.setLevel(logging.DEBUG)\n elif args['--quiet']:\n logger.setLevel(logging.CRITICAL)\n\n # Load file contents\n with open(args['']) as fp:\n soup = BeautifulSoup(fp)\n clubs = {}\n with open(args['']) as fp:\n mapper = RowMapper(CLUB_FIELDS, strict=False)\n reader = csv.reader(fp, delimiter=',')\n for r in map(mapper, reader):\n if 'abbr' in r:\n clubs[r['abbr']] = r\n stadiums = {}\n with open(args['']) as fp:\n mapper = RowMapper(STADIUM_FIELDS)\n reader = csv.reader(fp, delimiter=',')\n for r in map(mapper, reader):\n stadiums[r['abbr']] = r\n\n # Prepare output stream, and put header row.\n output_stream = args['--output'] or sys.stdout\n writer = ScheduleWriter(output_stream)\n writer.head()\n # Start parsing putting each match.\n parser = ScheduleBoxParser(writer, clubs, stadiums)\n parser.eat(soup)\n\n if not output_stream.isatty():\n output_stream.close()\n\n\nif __name__ == '__main__':\n main()\n\n# vim: set et ts=4 sw=4 cindent fileencoding=utf-8 :\n","sub_path":"scraping/webpage2csv.py","file_name":"webpage2csv.py","file_ext":"py","file_size_in_byte":10489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"39267662","text":"import os\n\n\ntemplatedir = './templates'\n\n\nclass Template(str):\n def __new__(self, string):\n if not os.path.exists(os.path.join(templatedir, string)):\n raise Exception('Template not found')\n return super().__new__(self, string)\n\n\nteam_register = Template('team_register.html')\n","sub_path":"templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"575100810","text":"#\n# -*- coding: utf-8 -*-\n# Copyright 2019 Red Hat\n# GNU General Public License v3.0+\n# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\"\"\"\nThe junos_l3_interfaces class\nIt is in this file where the current configuration (as dict)\nis compared to the provided configuration (as dict) and the command set\nnecessary to bring the current configuration to it's desired end-state is\ncreated\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nfrom ansible.module_utils.network.common.cfg.base import ConfigBase\nfrom ansible.module_utils.network.common.utils import to_list\nfrom ansible.module_utils.network.junos.facts.facts import Facts\nfrom ansible.module_utils.network.junos.junos import (\n locked_config, load_config, commit_configuration, discard_changes,\n tostring)\nfrom ansible.module_utils.network.common.netconf import (build_root_xml_node,\n build_child_xml_node)\n\n\nclass L3_interfaces(ConfigBase):\n \"\"\"\n The junos_l3_interfaces class\n \"\"\"\n\n gather_subset = [\n '!all',\n '!min',\n ]\n\n gather_network_resources = [\n 'l3_interfaces',\n ]\n\n def __init__(self, module):\n super(L3_interfaces, self).__init__(module)\n\n def get_l3_interfaces_facts(self):\n \"\"\" Get the 'facts' (the current configuration)\n\n :rtype: A dictionary\n :returns: The current configuration as a dictionary\n \"\"\"\n facts, _warnings = Facts(self._module).get_facts(\n self.gather_subset, self.gather_network_resources)\n l3_interfaces_facts = facts['ansible_network_resources'].get(\n 'l3_interfaces')\n if not l3_interfaces_facts:\n return []\n return l3_interfaces_facts\n\n def execute_module(self):\n \"\"\" Execute the module\n\n :rtype: A dictionary\n :returns: The result from module execution\n \"\"\"\n result = {'changed': False}\n warnings = list()\n\n existing_interfaces_facts = self.get_l3_interfaces_facts()\n\n config_xmls = self.set_config(existing_interfaces_facts)\n with locked_config(self._module):\n for config_xml in to_list(config_xmls):\n diff = load_config(self._module, config_xml, warnings)\n\n commit = not self._module.check_mode\n if diff:\n if commit:\n commit_configuration(self._module)\n else:\n discard_changes(self._module)\n result['changed'] = True\n\n if self._module._diff:\n result['diff'] = {'prepared': diff}\n\n result['commands'] = config_xmls\n\n changed_interfaces_facts = self.get_l3_interfaces_facts()\n\n result['before'] = existing_interfaces_facts\n if result['changed']:\n result['after'] = changed_interfaces_facts\n\n result['warnings'] = warnings\n return result\n\n def set_config(self, existing_l3_interfaces_facts):\n \"\"\" Collect the configuration from the args passed to the module,\n collect the current configuration (as a dict from facts)\n\n :rtype: A list\n :returns: the commands necessary to migrate the current configuration\n to the desired configuration\n \"\"\"\n want = self._module.params['config']\n have = existing_l3_interfaces_facts\n resp = self.set_state(want, have)\n return to_list(resp)\n\n def set_state(self, want, have):\n \"\"\" Select the appropriate function based on the state provided\n\n :param want: the desired configuration as a dictionary\n :param have: the current configuration as a dictionary\n :rtype: A list\n :returns: the list xml configuration necessary to migrate the current\n configuration\n to the desired configuration\n \"\"\"\n root = build_root_xml_node('interfaces')\n state = self._module.params['state']\n if state == 'overridden':\n config_xmls = self._state_overridden(want, have)\n elif state == 'deleted':\n config_xmls = self._state_deleted(want, have)\n elif state == 'merged':\n config_xmls = self._state_merged(want, have)\n elif state == 'replaced':\n config_xmls = self._state_replaced(want, have)\n\n for xml in config_xmls:\n root.append(xml)\n\n return tostring(root)\n\n def _get_common_xml_node(self, name):\n root_node = build_root_xml_node('interface')\n build_child_xml_node(root_node, 'name', name)\n intf_unit_node = build_child_xml_node(root_node, 'unit')\n return root_node, intf_unit_node\n\n def _state_replaced(self, want, have):\n \"\"\" The xml generator when state is replaced\n\n :rtype: A list\n :returns: the xml necessary to migrate the current configuration\n to the desired configuration\n \"\"\"\n intf_xml = []\n intf_xml.extend(self._state_deleted(want, have))\n intf_xml.extend(self._state_merged(want, have))\n return intf_xml\n\n def _state_overridden(self, want, have):\n \"\"\" The xml generator when state is overridden\n\n :rtype: A list\n :returns: the xml necessary to migrate the current configuration\n to the desired configuration\n \"\"\"\n intf_xml = []\n intf_xml.extend(self._state_deleted(have, have))\n intf_xml.extend(self._state_merged(want, have))\n return intf_xml\n\n def _state_merged(self, want, have):\n \"\"\" The xml generator when state is merged\n\n :rtype: A list\n :returns: the xml necessary to merge the provided into\n the current configuration\n \"\"\"\n intf_xml = []\n for config in want:\n root_node, unit_node = self._get_common_xml_node(config['name'])\n build_child_xml_node(unit_node, 'name',\n str(config['unit']))\n if config.get('ipv4'):\n self.build_ipaddr_et(config, unit_node)\n if config.get('ipv6'):\n self.build_ipaddr_et(config, unit_node, protocol='ipv6')\n intf_xml.append(root_node)\n return intf_xml\n\n def build_ipaddr_et(self, config, unit_node, protocol='ipv4',\n delete=False):\n family = build_child_xml_node(unit_node, 'family')\n inet = 'inet'\n if protocol == 'ipv6':\n inet = 'inet6'\n ip_protocol = build_child_xml_node(family, inet)\n for ip_addr in config[protocol]:\n if ip_addr['address'] == 'dhcp' and protocol == 'ipv4':\n build_child_xml_node(ip_protocol, 'dhcp')\n else:\n ip_addresses = build_child_xml_node(\n ip_protocol, 'address')\n build_child_xml_node(\n ip_addresses, 'name', ip_addr['address'])\n\n def _state_deleted(self, want, have):\n \"\"\" The xml configuration generator when state is deleted\n\n :rtype: A list\n :returns: the xml configuration necessary to remove the current\n configuration of the provided objects\n \"\"\"\n intf_xml = []\n existing_l3_intfs = [l3_intf['name'] for l3_intf in have]\n\n if not want:\n want = have\n\n for config in want:\n if config['name'] not in existing_l3_intfs:\n continue\n else:\n root_node, unit_node = self._get_common_xml_node(\n config['name'])\n build_child_xml_node(unit_node, 'name',\n str(config['unit']))\n family = build_child_xml_node(unit_node, 'family')\n ipv4 = build_child_xml_node(family, 'inet')\n intf = next(\n (intf for intf in have if intf['name'] == config['name']),\n None)\n if 'ipv4' in intf:\n if 'dhcp' in [x['address'] for x in intf.get('ipv4') if intf.get('ipv4') is not None]:\n build_child_xml_node(ipv4, 'dhcp', None, {'delete': 'delete'})\n else:\n build_child_xml_node(\n ipv4, 'address', None, {'delete': 'delete'})\n ipv6 = build_child_xml_node(family, 'inet6')\n build_child_xml_node(ipv6, 'address', None, {'delete': 'delete'})\n intf_xml.append(root_node)\n return intf_xml\n","sub_path":"env/lib/python3.9/site-packages/ansible/module_utils/network/junos/config/l3_interfaces/l3_interfaces.py","file_name":"l3_interfaces.py","file_ext":"py","file_size_in_byte":8590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"241771246","text":"n=int(input(\"Enter the number:\"))\nif n>1:\n for i in range(2,n):\n if n%i==0:\n print ('no')\n break\n \n else:\n print(\"yes\")\nelif n==1 or n==0:\n print ('neither prime nor composite')\n","sub_path":"prime66.py","file_name":"prime66.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"632886533","text":"import re\r\nfrom util.Log import Logger\r\nfrom util.ImageOperation import ImageOperation\r\nfrom services.ImageRecognition import ImageRecognition\r\n\r\n\r\nclass AssertImages:\r\n def __init__(self):\r\n self.log = Logger().logger\r\n\r\n def assert_images(self, devices, exc_img, act_img, comment):\r\n \"\"\"\r\n 断言:\r\n 1.预期图片&实际图片\r\n 2.分词识别正确\r\n \"\"\"\r\n\r\n # 校验ocr\r\n img_str = ImageRecognition().imgOCR(act_img)['words_result']\r\n self.log.info(f'ocr光学文字识别结果:{img_str}')\r\n\r\n count = 0\r\n for i in comment:\r\n res = re.search(i, str(img_str))\r\n if res:\r\n count += 1\r\n if count == len(comment):\r\n ocr_result = True\r\n else:\r\n ocr_result = False\r\n\r\n img_similar = ImageRecognition().imgSimilar(exc_img, act_img)\r\n self.log.info(f'图像相似度结果: {img_similar}')\r\n\r\n image_path = ImageOperation().save_compare_img(exc_img, act_img, devices)\r\n\r\n mes = {\r\n 'ocr_result': ocr_result,\r\n 'ocr_description': img_str,\r\n 'compare_img_path': image_path,\r\n 'similar_data': img_similar\r\n }\r\n return mes\r\n\r\n\r\nif __name__ == '__main__':\r\n a = AssertImages()\r\n path1 = r\"D:\\work\\code\\project_test\\ebuy_app\\statics\\exce_img\\e_buy_home_online.jpg\"\r\n path2 = r\"D:\\work\\code\\project_test\\ebuy_app\\statics\\exce_img\\e_buy_home_offline.jpg\"\r\n comments = [\"微信\", \"扫一扫\", \"摇一摇\", \"搜一搜\"]\r\n # print(a.assert_images(img1, img2, comment))\r\n print(a.assert_images(\"c2a1596b\", path1, path2, comments))\r\n","sub_path":"app_test_win/services/AssertImages.py","file_name":"AssertImages.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"112723964","text":"\n# Copyright (c) 2021, kang kimchhay and contributors\n# For license information, please see license.txt\n\nimport frappe\nfrom frappe.model.document import Document\n\nclass HotelBooking(Document):\n\tdef before_save(self):\n\t\tif self.from_date > self.to_date:\n\t\t\tfrappe.throw(\"From date must be smaller than to date\")\n\tdef before_submit(self):\n\t\tbooking = frappe.db.get_list(\n\t\t\t\t\"Hotel Booking\",\n\t\t\t\t\tfilters = {\n\t\t\t\t\t\t\"docstatus\":1,\n\t\t\t\t\t\t\"status\" : \"Booking\",\n\t\t\t\t\t\t\"hotel_room\": self.hotel_room\n\t\t\t\t\t})\n\t\tif 1 <= len(booking):\n\t\t\tfrappe.throw(\"Room already Booking\")\n\t\n","sub_path":"hotel_management/hotel_management/doctype/hotel_booking/hotel_booking.py","file_name":"hotel_booking.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"546451732","text":"from pomp.core.base import BaseHttpRequest, BaseHttpResponse, BaseDownloadWorker, BaseCrawlException\nfrom pomp.contrib.concurrenttools import ConcurrentDownloader\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nimport time\n\n\nclass PhantomRequest(BaseHttpRequest):\n def __init__(self, url):\n self.url = url\n self.driver_url = None\n\n def __str__(self):\n return '<{s.__class__.__name__} url: {s.url}> ' \\\n 'wdriver: {s.driver_url}'.format(s=self)\n\n\nclass PhantomResponse(BaseHttpResponse):\n def __init__(self, req, body, images=None):\n self.req = req\n self.content = body\n if images:\n self.images = images\n\n @property\n def request(self):\n return self.req\n\n\nclass PhantomWorker(BaseDownloadWorker):\n def __init__(self):\n self.pid = 'windows'\n # on linux\n # self.pid = os.getpid()\n\n def get_one(self, request):\n # attach webdriver to already started phantomjs node\n user_agent = (\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/39.0.2171.95 Safari/537.36'\n )\n dcap = DesiredCapabilities.PHANTOMJS.copy()\n dcap['phantomjs.page.settings.userAgent'] = user_agent\n driver = webdriver.Remote(\n command_executor=request.driver_url,\n desired_capabilities=dcap,\n )\n driver.get(request.url)\n # wait untill JS renders the page\n try:\n WebDriverWait(driver, 10).until(\n expected_conditions.presence_of_element_located(\n (By.XPATH, './/*[@id=\"footer\"]')\n )\n )\n except TimeoutException:\n print(request.url + ' не загрузился')\n return BaseCrawlException(request=request, exception=TimeoutException)\n image_elements = []\n\n try:\n # Этот элемент появляется на иностранных сайтах, и на некоторых русских.\n # Берлин, Дубай, Минск, Майами, Самара, Саратов, Чебоксары\n element = driver.find_element_by_xpath('.//*[@id=\"lang_select\"]')\n if element:\n opt_ru, opt_en = None, None\n for option in element.find_elements_by_tag_name('option'):\n if option.text == 'Ru':\n opt_ru = option\n elif option.text == 'En':\n opt_en = option\n if opt_ru:\n opt_ru.click()\n time.sleep(3)\n elif opt_en:\n opt_en.click()\n time.sleep(3)\n # if element:\n # select = Select(element)\n # select.deselect_all()\n # select.select_by_visible_text('En')\n # select.deselect_all()\n # select.select_by_visible_text('Ru')\n\n except NoSuchElementException:\n pass\n\n try:\n if driver.find_element_by_xpath('.//*[@id=\"quest-image\"]'):\n current_image = 'n/a'\n while current_image not in image_elements:\n \"\"\" Картинки на сайте переключаются нажатием на картинку стрелочки.\n через JS изменяется стиль элемента - меняется адрес картинки.\n Перебираем картинки и записываем их в list до тех пор, пока не начнут повторяться.\n И заодно обрезаем слева текст `background-image: url(` и справа `);`\n \"\"\"\n current_image = driver.find_element_by_xpath('.//*[@id=\"quest-image\"]').get_attribute('style')[22:][:-2]\n image_elements.append(current_image)\n driver.find_element_by_xpath('.//*[@id=\"next_img\"]').click()\n driver.implicitly_wait(4)\n # time.sleep(1)\n current_image = driver.find_element_by_xpath('.//*[@id=\"quest-image\"]').get_attribute('style')[22:][:-2]\n except NoSuchElementException:\n pass\n\n # finish - get current document body\n body = driver.execute_script('return document.documentElement.outerHTML;')\n return PhantomResponse(request, body, image_elements)\n\n\nclass PhantomDownloader(ConcurrentDownloader):\n\n def __init__(self, phantom_drivers, *args, **kwargs):\n self.drivers = phantom_drivers\n super().__init__(*args, **kwargs)\n\n def prepare(self, *args, **kwargs):\n super().prepare(*args, **kwargs)\n\n def get(self, requests):\n # associate each request with phantomjs node\n def _associate_driver_url(request):\n request.driver_url = self.drivers[0].command_executor._url\n self.drivers.rotate(1)\n return request\n\n return super().get(\n map(_associate_driver_url, requests)\n )\n\n def stop(self):\n super().stop()\n # for driver in self.drivers:\n # driver.close()\n # driver.quit()\n # for linux\n # pid = driver.service.process.pid\n # HACK - phantomjs does not exited after close and quit\n # try:\n # os.kill(pid, signal.SIGTERM)\n # except ProcessLookupError:\n # pass\n","sub_path":"phobiaru/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":5886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"420205795","text":"from selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\n#-------------------- Remove It -----------------------\n#import selenium\n#from selenium.webdriver.common.keys import Keys\n#from selenium import webdriver\n#driver=webdriver.Chrome()\n#driver.switch_to.default_content()\n#driver.find_element_by_xpath().is_selected()\n#driver.find_element_by_xpath().is_displayed()\n# -------------------------------------------------------------\n\nclass OpportunityPage:\n Icon_ShowMoreActions_XPath=\"//button[@class='slds-button slds-button_icon-border-filled']\"\n MenuItem_CreateQuoteProposal_XPath=\"//span[text()='Create Quote/Proposal']\"\n\n # Constructor\n def __init__(self, driver):\n self.driver = driver\n\n # Click on Create Quote/Proposal Menu Item from Opportunity Detail page\n def ClickShowMoreActions(self):\n print(\"---------- Method: ClickShowMoreActions\")\n element=WebDriverWait(self.driver,60).until(EC.element_to_be_clickable((By.XPATH,self.Icon_ShowMoreActions_XPath)))\n element.click()\n\n def ClickCreateProposalMenuItem(self):\n print(\"---------- Method: ClickCreateProposalMenuItem\")\n element1=WebDriverWait(self.driver, 60).until(EC.element_to_be_clickable((By.XPATH, self.MenuItem_CreateQuoteProposal_XPath)))\n element1.click()\n\n def ProposalRecordTypeClickButton(self,Button):\n print(\"---------- Method: ProposalRecordTypeClickButton\")\n btn=\"//input[@value='\"+Button+\"']\"\n RecTypPageBtn=WebDriverWait(self.driver, 60).until(EC.element_to_be_clickable((By.XPATH, btn)))\n RecTypPageBtn.click()","sub_path":"pageObjects/OpportunitiesPage.py","file_name":"OpportunitiesPage.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"318019199","text":"# Copyright 2022 Huawei Technologies Co., Ltd.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom mmdet.utils.util_random import ensure_rng\n\n\ndef random_boxes(num=1, scale=1, rng=None):\n \"\"\"Simple version of ``kwimage.Boxes.random``\n\n Returns:\n Tensor: shape (n, 4) in x1, y1, x2, y2 format.\n\n References:\n https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390\n\n Example:\n >>> num = 3\n >>> scale = 512\n >>> rng = 0\n >>> boxes = random_boxes(num, scale, rng)\n >>> print(boxes)\n tensor([[280.9925, 278.9802, 308.6148, 366.1769],\n [216.9113, 330.6978, 224.0446, 456.5878],\n [405.3632, 196.3221, 493.3953, 270.7942]])\n \"\"\"\n rng = ensure_rng(rng)\n\n tlbr = rng.rand(num, 4).astype(np.float32)\n\n tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])\n tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])\n br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])\n br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])\n\n tlbr[:, 0] = tl_x * scale\n tlbr[:, 1] = tl_y * scale\n tlbr[:, 2] = br_x * scale\n tlbr[:, 3] = br_y * scale\n\n boxes = torch.from_numpy(tlbr)\n return boxes\n","sub_path":"PyTorch/built-in/cv/detection/SSD_for_PyTorch/mmdet/core/bbox/demodata.py","file_name":"demodata.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"291087728","text":"# coding:utf-8\n\nimport json\nfrom tornado.web import RequestHandler, StaticFileHandler\n\nfrom config import r\nfrom connect import session\nfrom constants import certify_token\nfrom session import Session\nfrom user.user_modules import LoginCheck, User\n\n\nclass BaseHandler(RequestHandler):\n \"\"\"自定义基类\"\"\"\n @property\n def db(self):\n \"\"\"作为RequestHandler对象的db属性\"\"\"\n return self.application.db\n\n @property\n def redis(self):\n \"\"\"作为RequestHandler对象的redis属性\"\"\"\n return self.application.redis\n\n def prepare(self):\n \"\"\"预解析json数据\"\"\"\n if self.request.headers.get(\"Content-Type\", \"\").startswith(\"application/json\"):\n self.json_args = json.loads(self.request.body)\n else:\n self.json_args = {}\n\n def set_default_headers(self):\n \"\"\"设置默认json格式\"\"\"\n self.set_header(\"Access-Control-Allow-Headers\", \"Authorization, Content-Type, Depth, User-Agent, X-File-Size, X-Requested-With, X-Requested-By, If-Modified-Since, X-File-Name, X-File-Type, Cache-Control, Origin\")\n self.set_header(\"Content-Type\", \"application/json; charset=UTF-8\")\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n # self.set_header('Access-Control-Allow-Headers', 'Authorization')\n self.set_header('Access-Control-Allow-Methods', 'POST, GET, PUT, DELETE')\n # self.set_header(\"Access-Control-Allow-Credentials\", \"true\")\n\n def get_current_user(self):\n \"\"\"判断用户是否登录\"\"\"\n if self.request.method == 'GET':\n print(\"request method is get!!!\")\n user_id = self.get_query_argument(\"user_id\", None)\n token = self.get_query_argument(\"token\", None)\n if user_id and token:\n uid = r.get(token)\n if uid:\n uid = uid.decode()\n if uid == user_id:\n user = session.query(User).filter(User.id == uid)\n if user.count() == 1:\n # return {\"user_id\": user_id, \"token\": token, \"code\": 0}\n return 0\n else:\n return 2\n else:\n login = session.query(LoginCheck).filter(LoginCheck.u_id == user_id).filter(LoginCheck.login_key != None)\n if login.count() == 1:\n return 1\n else:\n return 2\n if self.request.method == 'POST':\n print(\"request method is post!!!\")\n user_id = self.get_body_argument(\"user_id\", None)\n token = self.get_body_argument(\"token\", None)\n if user_id and token:\n uid = r.get(token)\n if uid:\n print('exists uid')\n uid = uid.decode()\n if uid == user_id:\n user = session.query(User).filter(User.id == uid)\n if user.count() == 1:\n # return {\"user_id\": user_id, \"token\": token, \"code\": 0}\n return 0\n else:\n return 2\n else:\n login = session.query(LoginCheck).filter(LoginCheck.u_id == user_id).filter(LoginCheck.login_key != None)\n if login.count() == 1:\n return 1\n else:\n return 2\n\n\n # if self.request.method == \"POST\":\n # user_id = self.get_query_argument(\"user_id\", None)\n # token = self.get_query_argument(\"token\", None)\n # if user_id and token:\n\n\n\nclass StaticFileBaseHandler(StaticFileHandler):\n \"\"\"自定义静态文件处理类, 在用户获取html页面的时候设置_xsrf的cookie\"\"\"\n def __init__(self, *args, **kwargs):\n super(StaticFileBaseHandler, self).__init__(*args, **kwargs)\n self.xsrf_token\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"t1_tornado/BaseHandler.py","file_name":"BaseHandler.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"240067907","text":"import tensorflow as tf\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ntf.set_random_seed(777) # for reproducibility\n\n# Check out https://www.tensorflow.org/get_started/mnist/beginners for\n# more information about the mnist dataset\n#mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\ntrain, test = tf.keras.datasets.mnist.load_data()\n\nnb_classes = 10\n\ntrain_x, train_y = train\ntrain_y = train_y.reshape([-1, 1])\ntest_x, test_y = test\ntest_y = test_y.reshape([-1, 1])\n\ntrain_x = np.reshape(train_x, [-1, 784])\ntest_x = np.reshape(test_x, [-1, 784])\n\n\n# MNIST data image of shape 28 * 28 = 784\nX = tf.placeholder(tf.float32, [None, 784])\n# 0 - 9 digits recognition = 10 classes\nY = tf.placeholder(tf.int32, [None, 1])\nY_one_hot = tf.one_hot(Y, nb_classes)\nprint(\"one_hot\", Y_one_hot)\nY_one_hot = tf.reshape(Y_one_hot, [-1, nb_classes])\nprint(\"reshape\", Y_one_hot)\n\nkeep_prob = tf.placeholder(tf.float32)\n\n# reshape X for CNN input\nX_image = tf.reshape(X, [-1, 28, 28, 1])\n\nW1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.01))\n\nL1 = tf.nn.conv2d(X_image, W1, [1, 1, 1, 1], padding='SAME')\nL1 = tf.nn.relu(L1)\nL1 = tf.nn.max_pool(L1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\nL1 = tf.nn.dropout(L1, keep_prob=keep_prob)\n\nW2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01))\n\nL2 = tf.nn.conv2d(L1, W2, [1, 1, 1, 1], padding='SAME')\nL2 = tf.nn.relu(L2)\nL2 = tf.nn.max_pool(L2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\nL2 = tf.nn.dropout(L2, keep_prob=keep_prob)\n\nL2_flat = tf.reshape(L2, [-1, 7*7*64])\nW3 = tf.get_variable('W3', [7*7*64, 512], initializer=tf.contrib.layers.xavier_initializer())\nB3 = tf.Variable(tf.random_normal([512]))\nL3 = tf.nn.relu(tf.matmul(L2_flat, W3) + B3)\nL3 = tf.nn.dropout(L3, keep_prob=keep_prob)\n\nW4 = tf.get_variable('W4', [512, nb_classes], initializer=tf.contrib.layers.xavier_initializer())\nB4 = tf.Variable(tf.random_normal([nb_classes]))\n\nlogits = tf.matmul(L3, W4) + B4\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y_one_hot))\noptimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)\n\n\n\n# logits = tf.matmul(L3, W4)\n# hypothesis = tf.nn.relu(logits + b4)\n# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=([Y_one_hot])))\n# optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)\n#\n# Test model\nis_correct = tf.equal(tf.argmax(logits, 1), tf.cast(tf.reshape(Y, [-1]), tf.int64))\n# Calculate accuracy\naccuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n\n# parameters\ntraining_epochs = 15\nbatch_size = 100\n\nwith tf.Session() as sess:\n # Initialize TensorFlow variables\n sess.run(tf.global_variables_initializer())\n # one_hot_train_y = tf.one_hot(train_y, nb_classes).eval()\n # one_hot_test_y = tf.one_hot(test_y, nb_classes).eval()\n # Training cycle\n for epoch in range(training_epochs):\n avg_cost = 0\n total_batch = int(int(train_x.shape[0]) / batch_size)\n\n for i in range(total_batch):\n batch_xs, batch_ys = train_x[i*batch_size:(i+1)*batch_size], \\\n train_y[i*batch_size:(i+1)*batch_size],\n c, _ = sess.run([cost, optimizer], feed_dict={X: batch_xs, Y: batch_ys, keep_prob: 0.7})\n avg_cost += c / total_batch\n\n\n print('Epoch:', '%04d' % (epoch + 1),\n 'cost =', '{:.9f}'.format(avg_cost))\n\n\n print(\"Learning finished\")\n\n # Test the model using test sets\n\n # print(\"Accuracy: \", accuracy.eval(session=sess, feed_dict={\n # X: test_x, Y: test_y}))\n\n a = sess.run([accuracy], feed_dict={X: test_x, Y: test_y, keep_prob: 1})\n print(a)\n\n # Get one and predict\n r = random.randint(0, len(test_x))\n print(\"Label: \", test_y[r])\n print(\"Prediction: \", sess.run(tf.argmax(logits, 1), feed_dict={X: np.reshape(test_x[r], [1, -1]), keep_prob: 1}))\n\n plt.imshow(\n test_x[r].reshape(28, 28),\n cmap='Greys',\n interpolation='nearest')\n plt.show()\n\n","sub_path":"mnist_with_cnn_01.py","file_name":"mnist_with_cnn_01.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"231755471","text":"import numpy as np\nimport cv2\n\nimg0 = cv2.imread('used_images_videos/lena.jpg', 1)\nimg = np.zeros([512, 512, 3], np.uint8)\n\nimg2 = cv2.rectangle(img, (0, 0), (255, 255), (147, 96, 47), -1)\n\n\"\"\" Similarly we use circle, line, arrowedLine, polygon, ellipse etc to draw the shapes tacit from the names \"\"\"\n\nfont = cv2.FONT_HERSHEY_DUPLEX\nimg2 = cv2.putText(img2, 'Abhinav', (100, 100), font, 2, (100, 100, 20), 4, cv2.LINE_8)\n\n\ncv2.imshow('pagal', img2)\n\n# cv2.imwrite('gshape.jpg', img2)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","sub_path":"Practice/opencv/geometric_shapes.py","file_name":"geometric_shapes.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"443213564","text":"from django.core.exceptions import ObjectDoesNotExist\n\nfrom profiles.exceptions import ApiUnauthorized\nfrom back.settings import MIN_ANDROID_VERSION, MIN_IOS_VERSION\nfrom profiles.models import DeviceInfo\n\n\ndef check_version(meta):\n \"\"\"\n checks what version of the app the request is coming from\n throws a 401 if the version is lower than required\n \"\"\"\n error = ApiUnauthorized(code='WRONG_VERSION', message='Your app runs on an older version. Please update your app')\n version_header = meta.get('HTTP_X_APP_VER')\n if not version_header:\n raise error\n try:\n os, version = version_header.split('_')\n except Exception:\n raise error\n if os.lower() == 'android':\n compare_versions(MIN_ANDROID_VERSION, version.lower(), error)\n elif os.lower() == 'ios':\n compare_versions(MIN_IOS_VERSION, version.lower(), error)\n else:\n raise error\n\n\ndef compare_versions(minimum, incoming, error):\n \"\"\"\n :param minimum: version of the app that the server thinks is minimum\n :param incoming: version in the incoming request\n :param error exception to be thrown in case the version is wrong\n \"\"\"\n try:\n incoming_first, incoming_second, incoming_third = incoming.split('.')\n minimum_first, minimum_second, minimum_third = minimum.split('.')\n if minimum_first > incoming_first:\n raise error\n if minimum_second > incoming_second:\n raise error\n if minimum_third > incoming_third:\n raise error\n except Exception:\n raise error\n\n\ndef coordinates(bundle):\n meta = bundle.request.META\n profile = bundle.request.user\n lat = meta.get('HTTP_X_DEV_LAT')\n lon = meta.get('HTTP_X_DEV_LON')\n info = meta.get('HTTP_X_DEV_INFO')\n tz = meta.get('HTTP_X_DEV_TZ')\n\n # noinspection PyBroadException\n try:\n os, model = info.split('_')\n except Exception:\n os, model = None, None\n\n try:\n device = DeviceInfo.objects.get(owner=profile)\n if lat is not None:\n device.latitude = lat\n if lon is not None:\n device.longitude = lon\n if os is not None:\n device.os = os\n if model is not None:\n device.model = model\n if tz is not None:\n device.tz = tz\n except ObjectDoesNotExist:\n device = DeviceInfo.objects.create(owner=profile, latitude=lat, longitude=lon, os=os, model=model, tz=tz)\n device.save()\n","sub_path":"profiles/headers.py","file_name":"headers.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"200945464","text":"import sys\n\nif len(sys.argv) != 3:\n print(\"[-] Usage ./ecb_enc filetoencrypt outfilename\")\n exit(1)\nkey = \"A0E\"\nblocks = []\nwith open(sys.argv[1], \"rb\") as f:\n while True:\n block = f.read(16)\n out_block = b\"\"\n if len(block) == 0:\n break\n if len(block) != 16:\n block += b\"0\" * (16 - len(block))\n for i in range(len(block)):\n out_block += bytes([block[i] ^ ord(key[i % len(key)])])\n blocks.append(out_block)\nwith open(sys.argv[2], \"wb\") as f:\n for i in blocks :\n f.write(i)\n","sub_path":"security/CRYPTOGRAPHY/ecb_enc.py","file_name":"ecb_enc.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"192605100","text":"from pyspark.ml.linalg import Vectors\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.clustering import KMeans\n\ncluster_df = spark.read.csv('clustering_dataset.csv', header=True, inferSchema=True)\ncluster_df.show()\n\nvectorAssembler = VectorAssembler(inputCols=['col1', 'col2', 'col3'], outputCol='features')\nvcluster_df = vectorAssembler.transform(cluster_df)\n\nvcluster_df.show()\n\nkmeans = KMeans().setK(3)\nkmeans = kmeans.setSeed(1)\nkmodel = kmeans.fit(vcluster_df)\n\ncenters = kmodel.clusterCenters()\n\n\n# hierarchical clustering\nvcluster_df.show()\n\nfrom pyspark.ml.clustering import BisectingKMeans\nbkmeans = BisectingKMeans().setK(3)\nbkmeans = bkmeans.setSeed(1)\nbkmodel = bkmeans.fit(vcluster_df)\n\nbkcenters = bkmodel.clusterCenters()\n","sub_path":"clustering/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"600485312","text":"import unittest\nfrom api.api_poem import ApiPoem\nfrom tool.read_json import ReadJson\nfrom parameterized import parameterized\n\ndef get_data():\n data = ReadJson(\"poem.json\").read_json()\n arrs = []\n arrs.append((data.get(\"url\"),\n data.get(\"page\"),\n data.get(\"count\")))\n return arrs\n\ndef get_more_data():\n datas = ReadJson(\"poem_more.json\").read_json()\n arrs = []\n for data in datas.values():\n arrs.append((data.get(\"url\"),\n data.get(\"page\"),\n data.get(\"count\")))\n return arrs\n\n\nclass TestPoem(unittest.TestCase):\n\n @parameterized.expand(get_data())\n def test_Poem(self,url,page,count):\n\n url=url\n para={\"page\":page,\"count\":count}\n\n\n data = ApiPoem().api_poem(url,para)\n data1 = data.json()[\"message\"]\n return data1\n\n\n\n","sub_path":"script/test_poem.py","file_name":"test_poem.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"145300486","text":"\n\"\"\"Draft bizlogic\n\"\"\"\n\nimport os\nfrom django.conf import settings\nfrom app.usrlib import image_utils, common\nimport shutil\nfrom app.models import Post, Tag, Year\nimport datetime\nimport re\nfrom app.bizlogic import image_bizlogic\n\n\ndef register_all_archive_posts() -> None:\n \"\"\"Extract data from __drafts, archives folder and register them into DB.\"\"\"\n\n # Get all existing post folder paths. Sorted from past to now.\n DRAFT_TOP_DIR = os.path.join(settings.BASE_DIR, '__drafts')\n draft_dirs = filter(os.path.isdir,\n map(lambda _: os.path.join(DRAFT_TOP_DIR, _),\n sorted(os.listdir(DRAFT_TOP_DIR))))\n\n # UPSERT MODE -----------------------------------------\n for dirpath in draft_dirs:\n archive_post = __create_archive_post_obj(dirpath)\n Post.objects.update_or_create(\n code=archive_post.code,\n defaults={\n 'publish_at': archive_post.publish_at,\n 'title_ja': archive_post.title_ja,\n 'title_en': archive_post.title_en,\n 'tag': archive_post.tag,\n 'year': archive_post.year,\n 'thumbnail': archive_post.thumbnail,\n 'body_ja': archive_post.body_ja,\n 'body_en': archive_post.body_en,\n 'html': archive_post.html,\n },\n )\n\n # BULK_INSERT MODE -----------------------------------------\n # Create Post object one by one.\n # Cuz images will be copied within method, they will exist before registering Post.\n # Obstructs image duplication as well.\n # post_objs = (__create_archive_post_obj(dirpath) for dirpath in draft_dirs)\n # Post.objects.bulk_create(post_objs)\n\n\ndef __create_archive_post_obj(dirpath) -> Post:\n \"\"\"Extract archive data and create Post object with it.\"\"\"\n\n # Create new image names. {0.jpg : uniquename}\n MARKDOWNX_DIR = os.path.join(settings.MEDIA_ROOT, 'markdownx')\n image_correspondence_table = {\n basename: image_utils.get_unique_image_name(basename, MARKDOWNX_DIR)\n for basename in image_utils.get_image_basenames(dirpath)\n }\n\n # Copy images to media folder with new unique names. 0.jpg -> /media/markdownx/uniquename\n for origin, new_basename in image_correspondence_table.items():\n shutil.copyfile(os.path.join(dirpath, origin),\n os.path.join(MARKDOWNX_DIR, new_basename))\n\n # Get data of archive file.\n archive_data = __extract_archive_data(dirpath)\n\n # Create thumbnail for this post.\n thumbnail_basename = (image_correspondence_table[archive_data['mainimage']]\n if archive_data['mainimage'] in image_correspondence_table\n else None)\n if thumbnail_basename:\n image_bizlogic.generate_thumbnail(thumbnail_basename)\n\n # Create Post object.\n return Post(\n # Here set time with Japan timezone, then it will be registered with UTC in DB, minus 9 hours.\n publish_at=datetime.datetime.strptime(archive_data['publishdate'] + '+0900', '%Y-%m-%d%z'),\n code=archive_data['code'],\n title_ja=archive_data['title_ja'],\n title_en=archive_data['title_en'],\n tag=Tag.objects.filter(name_ja=archive_data['tag']).first(),\n year=Year.objects.filter(code=archive_data['publishdate'][:4]).first(),\n thumbnail=thumbnail_basename,\n body_ja=__manipulate_body_content(archive_data['ja_md'], image_correspondence_table),\n body_en=__manipulate_body_content(archive_data['en_md'], image_correspondence_table),\n html=__manipulate_body_content(archive_data['ja_html'], image_correspondence_table),\n )\n\n\ndef __extract_archive_data(dirpath) -> dict:\n \"\"\"Extract data required only from archive files.\"\"\"\n\n # Each path.\n JA_MD = os.path.join(dirpath, 'ja.md')\n EN_MD = os.path.join(dirpath, 'en.md')\n JA_HTML = os.path.join(dirpath, 'ja.html')\n\n # Get markdown meta data.\n meta = common.get_markdown_metadata(JA_MD)\n\n # Extract data.\n # There are little difference between having html(older posts) and not having html(newer posts).\n has_html = os.path.isfile(JA_HTML)\n return {\n 'publishdate': meta['publishdate'][0],\n 'code': meta['code'][0],\n 'title_ja': meta['title_ja'][0],\n 'title_en': meta['title_en'][0],\n 'tag': meta['tag'][0],\n 'mainimage': meta['mainimage'][0],\n 'ja_md': '' if has_html else common.read_file(JA_MD),\n 'en_md': '' if has_html else common.read_file(EN_MD),\n 'ja_html': common.read_file(JA_HTML) if has_html else '',\n }\n\n\ndef __manipulate_body_content(string, image_correspondence_table):\n \"\"\"Manipulate body.\"\"\"\n\n # Replace image paths.\n for key, value in image_correspondence_table.items():\n string = (string\n .replace(f'![]({key})', f'![](/media/markdownx/{value})')\n .replace(f'src=\"{key}\"', f'src=\"/media/markdownx/{value}\"')\n )\n\n # Remove meta description in markdown file. Looks very bizlogic.\n for r in [\n r'publishdate: .*?\\n',\n r'publish: .*?\\n',\n r'code: .*?\\n',\n r'title_ja: .*?\\n',\n r'title_en: .*?\\n',\n r'tag: .*?\\n',\n r'author: .*?\\n',\n r'hash: .*?\\n',\n r'originaleid: .*?\\n',\n r'mainimage: .*?\\n',\n r'convert2html: .*?\\n',\n ]:\n string = re.sub(r, '', string)\n return string\n","sub_path":"app/bizlogic/archive_bizlogic.py","file_name":"archive_bizlogic.py","file_ext":"py","file_size_in_byte":5452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"433404258","text":"#This is a program to find the sum of primes under two million\n\nimport math\n\nsumPrimes = 10\n\nfor number in range(7,2000000,2):\n\n for test in range(3,int(round(math.sqrt(number)))+2,2):\n\n if int(str(number)[-1])==5:\n break\n \n elif test==math.sqrt(number):\n break\n\n elif test==int(round(math.sqrt(number))):\n sumPrimes = sumPrimes + number\n elif test==int(round(math.sqrt(number))+1):\n sumPrimes = sumPrimes + number\n\n elif number%test==0:\n break\n\nprint(str(sumPrimes))\n","sub_path":"Old Solutions/10_primes.py","file_name":"10_primes.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"147284215","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('perfiles', '0003_auto_20141117_1525'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='perfilestudiantes',\n name='area',\n field=models.ForeignKey(blank=True, to='perfiles.Areas', null=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='perfilestudiantes',\n name='idioma',\n field=models.ManyToManyField(to='perfiles.Idiomas', null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='perfilestudiantes',\n name='lenguajes',\n field=models.ManyToManyField(to='perfiles.Lenguajes', null=True, blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"perfiles/migrations/0004_auto_20141117_1542.py","file_name":"0004_auto_20141117_1542.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"248978606","text":"# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration\n\n\nfrom AthenaCommon.SystemOfUnits import *\nfrom AthenaCommon.Logging import logging\n\nfrom JetRec.JetRecConf import FastJetInterfaceTool\n\n_fastjetLog = logging.getLogger(\"FastJetInterfaceConfiguration\")\n\n# set up some enumerator values\ndef enums(name='Enum',**enums):\n return type( name, (), enums)\n\n# recognized keys\nfastjet_conf_tags = enums('FastJetConfTags',\n Strategy=[ 'default', 'Best', \n 'N2MinHeapTiled','N2Tiled', 'N2PoorTiled', 'N2Plain',\n 'N3Dumb', \n 'NlnN', 'NlnN3pi', 'NlnN4pi', 'NlnNCam4pi', 'NlnNCam2pi2R', 'NlNCam',\n 'plugin_strategy' ],\n RecombScheme = [ 'default', 'E', 'pt', 'pt2', 'Et', 'Et2', 'BIpt', 'BIpt2' ],\n Algorithm = [ 'default', 'kt', 'Kt', 'anti-kt', 'AntiKt', 'cambridge', 'CamKt',\n 'genkt', 'passive cambridge', 'passive genkt',\n 'CMSCone', 'SISCone'],\n JetAreaMethod = [ 'default', 'VoronoiArea', 'ActiveArea', \n 'ActiveAreaExplicitGhost', 'PassiveArea', '1GhostPassiveArea' ],\n SISSplitMergeScale = [ 'default', 'pttilde', 'PtTilde', 'Pt', 'Et', 'mt' ], \n )\n\n# Ghosted area parameters\nfastjet_gas = enums('FastJetGhostAreaSettings',\n def_ghost_maxrap = 6.0, #fastjet::gas::def_ghost_maxrap\n def_repeat = 1, #fastjet::gas::def_repeat\n def_ghost_area = 0.01, #fastjet::gas::def_ghost_area\n def_grid_scatter = 1.0, #fastjet::gas::def_grid_scatter\n def_kt_scatter = 0.1, #fastjet::gas::def_kt_scatter\n def_mean_ghost_kt = 1e-100,#fastjet::gas::def_mean_ghost_kt\n )\n\n# ignored keys\nconfig_ignored_keys = enums('SetupIgnoredKeys',\n ControlKeys = [\"_alreadyChecked_\",\"_locked_\",\"_ignoreUnknown_\" ])\n\n# Default FastJet configuration dictionary: \n#\n# Most keys are the same as the corresponding FastJet tags or enumerator names.\n# In addition, for backward compatibility, the following tags are recognized:\n#\n# \ndefFastJetInterfaceConfigDict = {\n # -- overall setup and process control\n 'Algorithm' : \"anti-kt\",\n 'JetAreaMethod' : \"VoronoiArea\",\n 'CalculateJetArea' : False,\n # -- kt-style parameters\n 'Strategy' : \"Best\",\n 'RecombScheme' : \"E\",\n # -- CMS cone parameters\n 'CMS_SeedThreshold' : 15.*GeV,\n # -- SIS cone parameters\n 'SIS_OverlapThreshold' : 0.75,\n 'SIS_NumPassMax' : 0,\n 'SIS_ProtojetPtMin' : 0.0,\n 'SIS_DoCaching' : False,\n 'SIS_SplitMergeScale' : 'PtTilde',\n 'SIS_SplitMergeStopScale' : 0.0,\n # -- jet algorithm parameters\n 'Radius' : 0.4, # ATLAS default\n 'Inclusive' : True, # ATLAS default\n 'InclusivePtMin' : 0.*GeV,\n 'ExclusiveDcut' : 0.5, \n 'ExclusiveNjets' : 3,\n # -- jet area calculation directives and parameters\n 'VoronoiEffectiveRfact' : 1.0, # Voronoi\n 'GhostMaxRapidity' : fastjet_gas.def_ghost_maxrap, \n 'GhostMinRapidity' : -fastjet_gas.def_ghost_maxrap, \n 'GhostRepeats' : fastjet_gas.def_repeat,\n 'GhostAreaSize' : fastjet_gas.def_ghost_area,\n 'GhostGridScatter' : fastjet_gas.def_grid_scatter,\n 'GhostKtScatter' : fastjet_gas.def_kt_scatter,\n 'GhostMeanKt' : fastjet_gas.def_mean_ghost_kt\n }\n\n# Check whole dictionary or key/value assigments and return dictionary with\n# invalid options stripped (if allowed) or exception thrown for invalid options\ndef checkAndUpdate(**options):\n # already checked\n if options.get(\"_alreadyChecked_\",False) or options.get(\"_locked_\",False):\n return options\n\n # check what to do with unknowns\n ignoreUnknown = options.pop(\"_ignoreUnknown_\",False)\n\n # check every entry\n for k in options.keys():\n if k not in defFastJetInterfaceConfigDict :\n if ignoreUnknown :\n _fastjetLog.warning(\"Option %s unknown - ignoring it!\"%(k))\n options.pop(k)\n else :\n _fastjetLog.error(\"Option %s unknown - abort configuration!\"%(k))\n raise Exception\n\n checkedOptions = dict(defFastJetInterfaceConfigDict)\n for k,v in defFastJetInterfaceConfigDict.iteritems():\n t = type(v)\n if t in ( list, set, dict ) :\n checkedOptions[k] = t(v)\n\n checkedOptions['_alreadyChecked_'] = True\n checkedOptions.update(options)\n\n # check settings for Strategy\n key = \"Strategy\"\n # print checkedOptions\n tag = checkedOptions[key]\n _fastjetLog.info(\"Test option %s\",key)\n if checkedOptions[key] not in fastjet_conf_tags.Strategy :\n _fastjetLog.error(\"Strategy \\042%s\\042 not recognized - fatal! Allowed values are: \",checkedOptions['Strategy'])\n for s in fastjet_conf_tags.Strategy :\n _fastjetLog.error(\"\\042%s\\042\",s)\n raise Exception\n \n # check settings for RecombScheme\n if checkedOptions['RecombScheme'] not in fastjet_conf_tags.RecombScheme :\n _fastjetLog.error(\"RecombScheme \\042%s\\042 not recognized - fatal! Allowed values are: \",checkedOptions['RecombScheme'])\n for s in fastjet_conf_tags.RecombScheme :\n _fastjetLog.error(\"\\042%s\\042\",s)\n raise Exception\n \n # check settings for Algorithm\n if checkedOptions['Algorithm'] not in fastjet_conf_tags.Algorithm :\n _fastjetLog.error(\"Algorithm \\042%s\\042 not recognized - fatal! Allowed values are: \",checkedOptions['Algorithm'])\n for s in fastjet_conf_tags.Algorithm :\n _fastjetLog.error(\"\\042%%s\\042\",s)\n raise Exception\n\n # check settings for JetAreaMethod\n if checkedOptions['JetAreaMethod'] not in fastjet_conf_tags.JetAreaMethod :\n _fastjetLog.error(\"JetAreaMethod \\042%s\\042 not recognized - fatal! Allowed values are: \",checkedOptions['JetAreaMethod'])\n for s in fastjet_conf_tags.JetAreaMethod :\n _fastjetLog.error(\"\\042%s\\042\",s)\n raise Exception\n\n # check settings for SIS split merge scale\n if checkedOptions['SIS_SplitMergeScale'] not in fastjet_conf_tags.SISSplitMergeScale :\n _fastjetLog.error(\"SIS_SplitMergeScale \\042%2\\042 not recognized - fatal! Allowed values are: \",checkedOptions['SIS_SplitMergeScale'])\n for s in fastjet_conf_tags.SISSplitMergeScale :\n _fastjetLog.error(\"\\042%s\\042\",s)\n raise Exception\n\n return checkedOptions\n \n\ndef getFastJetInterfaceConfig(name,**options):\n # get tool configuration \n fjTool = FastJetInterfaceTool(name)\n from AthenaCommon.AppMgr import ToolSvc\n ToolSvc += fjTool\n # check job options\n options = checkAndUpdate(**options) \n # set tool properties\n for k,v in options.iteritems():\n if k not in config_ignored_keys.ControlKeys :\n setattr(fjTool,k,v)\n # return tool configuration object\n return fjTool\n\n#def dumpFastJetInterfaceConfig(**options=**defFastJetInterfaceConfigDict):\n# # write out all attributes\n# for k,v in options.iteritems():\n# _fastjetLog.message(\"Config::%s value %s\",%(k),%(v))\n \n","sub_path":"athena/Reconstruction/Jet/JetRec/python/FastJetInterfaceConfig.py","file_name":"FastJetInterfaceConfig.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"608668458","text":"import os\nimport csv\n\n\ncsv_path = os.path.join(\"Resources\", \"budget_data.csv\")\n\n\nwith open(csv_path, newline=\"\") as file:\n\n budget_data_reader = csv.reader(file, delimiter=\",\")\n\n csv_header = next(budget_data_reader)\n\n row_count = 0\n net_total = 0\n changes = []\n row_val = int(next(budget_data_reader)[1])\n row_val_2 = row_val\n months = []\n\n for row in budget_data_reader:\n row_count += 1\n net_total += int(row[1])\n\n\n change = int(row[1]) - row_val\n changes.append(change)\n row_val = int(row[1])\n\n months.append(row[0])\n\n greatest_increase = max(changes)\n increase_index = changes.index(greatest_increase)\n\n greatest_decrease = min(changes)\n decrease_index = changes.index(greatest_decrease)\n\n greatest_increase_month = months[increase_index]\n greatest_decrease_month = months[decrease_index]\n\n # To account for second next()\n row_count = row_count + 1\n net_total = net_total + row_val_2\n\n\n average_change = round(sum(changes) / len(changes),2)\n\n\n\n\nprint(\"Financial Analysis\")\nprint(\"----------------------------\")\nprint(f\"Total Months: {row_count}\")\nprint(f\"Total: ${net_total}\")\nprint(f\"Average Change: ${average_change}\")\nprint(f\"Greatest Increase in Profits: {greatest_increase_month} (${greatest_increase})\")\nprint(f\"Greatest Decrease in Profits: {greatest_decrease_month} (${greatest_decrease})\")\n\n\nresults = open(\"Financial_Analysis.txt\", 'w')\nresults.write(f\"\"\"Financial Analysis\n----------------------------\nTotal Months: {row_count}\nTotal: ${net_total}\nAverage Change: ${average_change}\nGreatest Increase in Profits: {greatest_increase_month} (${greatest_increase})\nGreatest Decrease in Profits: {greatest_decrease_month} (${greatest_decrease})\n\"\"\")\n","sub_path":"Homework-3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"306368086","text":"import os\nimport sys\nimport traceback\nfrom time import sleep\nfrom selenium import webdriver\nfrom PIL import Image\n\n\nclass BrowserHelper(object):\n display = None\n driver = None\n\n def __del__(self):\n self.stop()\n\n def start(self):\n \"\"\"Start the browser.\"\"\"\n if self.driver is None: \n if \"darwin\" in sys.platform:\n CHROME_DRIVER_PATH = \"drivers/mac/chromedriver\"\n elif \"win\" in sys.platform:\n CHROME_DRIVER_PATH = \"drivers/windows/chromedriver.exe\"\n else:\n CHROME_DRIVER_PATH = \"drivers/linux/chromedriver\"\n options = webdriver.ChromeOptions()\n options.add_experimental_option(\"prefs\", {\"profile.managed_default_content_settings.images\":2})\n options.add_argument(\"--headless\")\n options.add_argument('disable-infobars')\n options.add_argument(\"--disable-extensions\")\n self.driver = webdriver.Chrome(CHROME_DRIVER_PATH, chrome_options=options)\n \n def stop(self):\n \"\"\"Stop the browser.\"\"\"\n if self.driver is not None:\n self.driver.quit()\n self.driver = None\n\n def restart(self):\n \"\"\"Restart the browser.\"\"\"\n self.stop()\n self.start()\n\n def save_screenshot(self, path=\"screenshot.jpg\"):\n \"\"\"Create screenshot.\"\"\"\n self.driver.save_screenshot(path)\n\n def get_cookies(self):\n \"\"\"Return cookies.\"\"\"\n cookies = {}\n for cookie in self.driver.get_cookies():\n cookies[cookie[\"name\"]] = cookie[\"value\"]\n return cookies\n\n def element_exist(self, xpath):\n \"\"\"Check if element exist.\"\"\"\n try:\n self.driver.find_element_by_xpath(xpath)\n return True\n except:\n return False\n \n def wait_element(self, xpath, sec=10):\n \"\"\"Wait and element.\"\"\"\n i = 0\n while i < sec:\n sleep(1)\n if self.element_exist(xpath):\n return True\n \n def click(self, xpath):\n \"\"\"Click.\"\"\"\n if self.element_exist(xpath):\n self.driver.find_element_by_xpath(xpath).click()\n\n def send_keys(self, xpath, keys):\n \"\"\"Send keys.\"\"\"\n if self.element_exist(xpath):\n self.driver.find_element_by_xpath(xpath).send_keys(keys)\n\n def save_screenshot_of_element(self, xpath, path):\n \"\"\"Save screenshot of element.\"\"\"\n element = self.driver.find_element_by_xpath(xpath)\n location = element.location\n size = element.size\n self.driver.save_screenshot(path)\n im = Image.open(path)\n left = location['x']\n top = location['y']\n right = location['x'] + size['width']\n bottom = location['y'] + size['height']\n im = im.crop((left, top, right, bottom))\n im.save(path)\n \n def get_page(self, link, cookies={},check_captha=False):\n \"\"\"Get page.\"\"\"\n self.driver.get(link)\n sleep(1)\n content = self.driver.page_source\n return content\n \n def get_page_source(self):\n return self.driver.page_source\n\n def login(self, url, login, password, loginXpath, passwordXpath, submitXpath = \"\", loginButtonXpath = \"\", frameXpath = \"\"):\n \"\"\"Login into the site.\"\"\"\n if url != \"\":\n self.driver.get(url)\n if loginButtonXpath != \"\":\n self.driver.find_element_by_xpath(loginButtonXpath).click()\n if frameXpath != \"\":\n self.driver.switch_to_frame(self.driver.find_element_by_xpath(frameXpath))\n self.driver.find_element_by_xpath(loginXpath).send_keys(login)\n self.driver.find_element_by_xpath(passwordXpath).send_keys(password)\n self.driver.find_element_by_xpath(submitXpath).click()\n sleep(1)\n","sub_path":"worker/helpers/browser_helper.py","file_name":"browser_helper.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"9905702","text":"import datetime\r\nfrom django.test import TestCase\r\nfrom .models import Anken\r\n\r\n\r\nclass AnkenModelTests(TestCase):\r\n\r\n def test_anken_has_date(self):\r\n \"\"\"\r\n 作成した日記データに日付が付与されているか確認 \r\n \"\"\"\r\n Anken.objects.create(pub_date='2021-09-14',\r\n ankenmei='A案件',\r\n iraibusho='A部署',\r\n iraisha='Aさん',\r\n nouki='2021-09-30',\r\n mitumorikousu='0',\r\n naiyou='AAA',\r\n genjouchi='0', \r\n kitaikouka='0', \r\n tantousha='W', \r\n koumoku='案件', \r\n joutai='対応中', \r\n jissekikousu='0',)\r\n actual_anken = Anken.objects.get(pub_date='2021-09-14')\r\n print(actual_anken)\r\n self.assertIsInstance(actual_anken.pub_date, datetime.date)\r\n\r\n ","sub_path":"akapp/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"79742923","text":"# START LAB EXERCISE 05\nprint('Lab Exercise 05 \\n')\n\n# [IMPORTANT NOTE]\n# The autograder in Gradescope will directly test functions and files instead of variables\n# So even though the variables printed seems right, it's possible your code didn't pass all the test cases.\n\n# In this lab, we use txt files. Be very careful that a line in txt files\n# should contain a new line \"\\n\" character at the end of this line.\n\n# PROBLEM 1 (5 Points)\n# Define a function named \"concatenate_name_type\".\n# The function accepts two arguments - one is \"file_name\", the other is \"file_type\". Both two arguments are strings.\n# For given arguments, the function should return \".\"\n# Pass two defined variables \"file_name\", \"file_type\" to the function, assign the result to \"full_file_name\"\n# Print \"full_file_name\".\n\n# BEGIN PROBLEM 1 SOLUTION\nf_name = \"file1\"\nf_type = \"txt\"\n\ndef concatenate_name_type(file_name, file_type):\n return f\"{file_name}.{file_type}\"\n\nfull_file_name = concatenate_name_type(f_name, f_type)\nprint(full_file_name)\n\n# END PROBLEM 1 SOLUTION\n\n\n# PROBLEM 2 (10 Points)\n# Define a function named \"write_into_file\"\n# The function accepts two arguments - one is \"filename\", the other is \"file_content\"\n# \"filename\" is a string and \"file_content\" is a list of strings\n# Open the file with \"full_file_name\", read all lines, store the last two lines into the variable \"last_two_lines\"\n# Make sure that there is a new line character \"\\n\" at the end of each line in \"last_two_lines\"\n# Write \"last_two_lines\" into a new file called \"file2.txt\" using the function \"write_into_file\"\n# Print \"last_two_lines\"\n\n# BEGIN PROBLEM 2 SOLUTION\n\n#def write_into_file(filename, file_content):\n #path = filename\n #file_handle = open(path)\n #lines = file_handle.readlines()\n #file_handle.close()\n #for line in lines[-2:]:\n #file_content.append(line)\n\n#last_two_lines = []\n#path = 'file2.txt'\n#file_handle = open(path, 'w')\n#write_into_file(full_file_name, last_two_lines)\n#for line in last_two_lines:\n #file_handle.write(f\"{line}\")\n#file_handle.close()\n#print(last_two_lines)\n\ndef write_into_file(filename, file_content):\n file_open = open(filename, 'w')\n #for line in file_content:\n for line in file_content:\n file_open.write(f\"{line}\")\n file_open.close()\n\nlast_two_lines = []\nfile_handle = open(full_file_name)\nfile_content = file_handle.readlines()\nprint(file_content)\nlast_two_lines = file_content[1:]\nfilename = 'file2.txt'\nwrite_into_file(filename,last_two_lines)\nprint(last_two_lines)\n\n# END PROBLEM 2 SOLUTION\n\n\n# PROBLEM 3 (10 Points)\n# Finally, put all you've learned together.\n# Open each file with file_name in \"file_name_list\" and \"file_type\",\n# read all lines and store those unique lines into the variable \"unique_lines\".\n# Make sure that there is a new line character \"\\n\" at the end of each line in \"unique_lines\".\n# Write \"unique_lines\" into a new file called \"summary.txt\" using the function \"write_into_file\".\n# Print \"unique_lines\".\n\n# BEGIN PROBLEM 3 SOLUTION\nfile_name_list = [\"file1\", \"file2\", \"file3\"]\nfile_type = \"txt\"\nunique_lines = []\nall_files = []\nall_lines=[]\nall_lines_string = []\nunique_lines_list = []\nfor file in file_name_list:\n all_files.append(concatenate_name_type(file, file_type))\nprint(all_files)\n\nfor file in all_files:\n #print(file)\n file_opening = open(file, 'r')\n all_lines.append(file_opening.readlines())\n for line in all_lines:\n for x in line:\n all_lines_string.append(x.split(','))\nprint(all_lines_string)\n\nfor line in all_lines_string:\n if line not in unique_lines_list:\n unique_lines_list.append(line)\nfor line in unique_lines_list:\n for x in line:\n unique_lines.append(x)\n\n\n#for line in unique_lines:\n #print(line)\n #file_opening.close()\nwrite_into_file('summary.txt',unique_lines)\nprint(unique_lines)\n# END PROBLEM 3 SOLUTION\n#how can we check if a value already exists in unique_lines and only append it if it doesn't already exist\n#you'll need for loops to iterate through file_name_list and the lines within each file\n# END LAB EXERCISE\n","sub_path":"lab_05.py","file_name":"lab_05.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"325572400","text":"from data_unit.windowQueue import mData\n\nmList = [\n [1, 8], [2, 2], [3, 4], [5, 11], [6, 7]\n]\nmList.sort(key=lambda each : each[1], reverse=True)\nprint(mList)\n\ni = 0\nlength = len(mList)\nwhile (i < length):\n mList[i][1] += 1\n i += 1\nprint(mList, end=\"\\n\\n\")\n\n\nmmList = [0,1,2,0,0,3,0,0,4,5,7]\nprint(mmList)\nfor i in range (0, 5):\n oneEl = mmList.pop(0)\n print(oneEl)\n print(mmList)\n\n\n# flow : list = []\n#\n# with open(\"streamSample.txt\", \"r\") as streamSample:\n# allData = streamSample.read().replace(' ', '').replace('\\n', '')\n# print(allData)\n# for N in allData:\n# newData = mData([N])\n# flow.append(newData)\n#\n# print(\"\\nStart Printing\")\n# i = 0\n# for x in flow:\n# print(i, end=\" \")\n# print(str(x))\n# i += 1\n# print(\"\\nAgain\")\n# i = 0\n# flowLength = len(flow)\n# for i in range (0, flowLength):\n# print(i, end=\" \")\n# print(flow[i])\n# i += 1\n# print(\"\\nEnumerate\")\n# for x in list(enumerate(flow)):\n# print(x[0], end=\" \")\n# print(str(x[1]))\n# exit(1)\n\n\n","sub_path":"pytest001/src/mytest04/testanything.py","file_name":"testanything.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"472099454","text":"lista_stip = []\nwhile True:\n stipendio=int(input(\"A quanto ammonta lo stipendio del dipendente? (se viene inviata la quota -1 verrà calcolata la media) \"))\n if stipendio == -1:\n break\n else:\n lista_stip.append(stipendio)\n\nmedia = sum(lista_stip)/len(lista_stip)\nprint(\"Media stipendi: \" + str(media))\n","sub_path":"es 26 pag 73.py","file_name":"es 26 pag 73.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"143567040","text":"\n\n\n# 419. Battleships in a Board\n\n# Given an 2D board, count how many battleships are in it. \n# The battleships are represented with 'X's, empty slots are represented with '.'s. You may assume the following rules:\n\n# You receive a valid board, made of only battleships or empty slots.\n# Battleships can only be placed horizontally or vertically.\n# In other words, they can only be made of the shape 1xN (1 row, N columns) or Nx1 (N rows, 1 column), where N can be of any size.\n\n#At least one horizontal or vertical cell separates between two battleships - there are no adjacent battleships.\n# Example:\n# X..X\n# ...X\n# ...X\n# In the above board there are 2 battleships.\n\n# Invalid Example:\n# ...X\n# XXXX\n# ...X\n# This is an invalid board that you will not receive - as battleships will always have a cell separating between them.\n\n# Follow up:\n# Could you do it in one-pass, using only O(1) extra memory and without modifying the value of the board?\n\nclass countBattleships:\n def doit1(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: int\n \"\"\"\n # Will not working in \"invalid case\"\n if not board :\n return 0\n\n m, n = len(board), len(board[0])\n count = 0\n for i in range(m):\n for j in range(n):\n\n if board[i][j] == 'X' and (i == 0 or board[i-1][j] == '.') and (j == 0 or board[i][j-1] == '.'):\n count += 1\n\n return count\n\n\n def doit(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: int\n \"\"\"\n if len(board) == 0:\n return 0;\n \n shipCount = 0;\n \n for y, row in enumerate(board):\n for x, cell in enumerate(row):\n if cell == \"X\" and checkShip(board, x, y):\n shipCount += 1;\n \n return shipCount;\n \n def checkShip(board, x, y):\n\n if checkLeft(board, x, y) or checkTop(board, x, y):\n return False;\n \n if checkRight(board, x, y):\n while checkThis(board, x, y):\n if checkTop(board, x, y) or checkBottom(board, x, y):\n return False;\n x += 1; \n\n elif checkBottom(board, x, y):\n while checkThis(board, x, y):\n if checkLeft(board, x, y) or checkRight(board, x, y):\n return False;\n y += 1;\n \n return True;\n \n def checkLeft(self, board, x, y):\n return x > 0 and board[y][x - 1] == \"X\";\n \n def checkRight(self, board, x, y):\n return x < len(board[0]) - 1 and board[y][x + 1] == \"X\";\n \n def checkTop(self, board, x, y):\n return y > 0 and board[y - 1][x] == \"X\";\n \n def checkBottom(self, board, x, y):\n return y < len(board) - 1 and board[y + 1][x] == \"X\";\n \n def checkThis(self, board, x, y):\n return x >= 0 and x < len(board[0]) and y >= 0 and y < len(board) and board[y][x] == \"X\";\n\n\n\nif __name__==\"__main__\":\n\n a = [\n ['X','.','.', 'X'],\n ['.','.','.', 'X'],\n ['.','.','.', 'X']\n ]\n\n res = countBattleships().doit(a)\n \n\n a = [\n ['.','.','.', 'X'],\n ['X','X','X', 'X'],\n ['.','.','.', 'X']\n ]\n\n res = countBattleships().doit(a)\n pass\n\n ","sub_path":"PythonLeetcode/LeetCodeE/429_BattleshipsinBoard.py","file_name":"429_BattleshipsinBoard.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"270138971","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.spatial import ConvexHull\r\nimport seaborn as sns\r\nfrom pywaffle import Waffle\r\nimport squarify\r\nimport random\r\nimport math\r\nimport matplotlib; matplotlib.use('TkAgg')\r\nimport warnings; warnings.filterwarnings('ignore')\r\nimport warnings; warnings.filterwarnings(action='once')\r\n\r\n\r\nlarge = 22; med = 16; small = 12\r\nparams = {'axes.titlesize': large,\r\n 'legend.fontsize': med,\r\n 'figure.figsize': (16, 10),\r\n 'axes.labelsize': med,\r\n 'axes.titlesize': med,\r\n 'xtick.labelsize': med,\r\n 'ytick.labelsize': med,\r\n 'figure.titlesize': large}\r\n\r\n###=============================================================================================\r\n\r\n\r\ndef Waffle_Chart(self):\r\n\r\n df_raw = pd.read_csv(\"dataVset/mpg_ggplot2.csv\")\r\n\r\n # Prepare Data\r\n df = df_raw.groupby('class').size().reset_index(name='counts')\r\n n_categories = df.shape[0]\r\n colors = [plt.cm.inferno_r(i / float(n_categories)) for i in range(n_categories)]\r\n\r\n # Draw Plot and Decorate\r\n fig = plt.figure(\r\n \"Waffle Chart\",\r\n FigureClass=Waffle,\r\n plots={\r\n '111': {\r\n 'values': df['counts'],\r\n 'labels': [\"{0} ({1})\".format(n[0], n[1]) for n in df[['class', 'counts']].itertuples()],\r\n 'legend': {'loc': 'upper left', 'bbox_to_anchor': (1.05, 1), 'fontsize': 12},\r\n 'title': {'label': '# Vehicles by Class', 'loc': 'center', 'fontsize': 18}\r\n },\r\n },\r\n rows=7,\r\n colors=colors,\r\n figsize=(16, 9)\r\n )\r\n plt.show()\r\n\r\n###=============================================================================================\r\n\r\ndef Pie_Chart(self):\r\n df_raw = pd.read_csv(\"dataVset/mpg_ggplot2.csv\")\r\n\r\n # Prepare Data\r\n df = df_raw.groupby('class').size().reset_index(name='counts')\r\n\r\n # Draw Plot\r\n fig, ax = plt.subplots(figsize=(12, 7), subplot_kw=dict(aspect=\"equal\"), dpi=80)\r\n\r\n data = df['counts']\r\n categories = df['class']\r\n explode = [0, 0, 0, 0, 0, 0.1, 0]\r\n\r\n def func(pct, allvals):\r\n absolute = int(pct / 100. * np.sum(allvals))\r\n return \"{:.1f}% ({:d} )\".format(pct, absolute)\r\n\r\n wedges, texts, autotexts = ax.pie(data,\r\n autopct=lambda pct: func(pct, data),\r\n textprops=dict(color=\"w\"),\r\n colors=plt.cm.Dark2.colors,\r\n startangle=140,\r\n explode=explode)\r\n\r\n # Decoration\r\n ax.legend(wedges, categories, title=\"Vehicle Class\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))\r\n plt.setp(autotexts, size=10, weight=700)\r\n ax.set_title(\"Class of Vehicles: Pie Chart\")\r\n plt.show()\r\n\r\n###=============================================================================================\r\n\r\ndef Treemap(self):\r\n df_raw = pd.read_csv(\"dataVset/mpg_ggplot2.csv\")\r\n\r\n # Prepare Data\r\n df = df_raw.groupby('class').size().reset_index(name='counts')\r\n labels = df.apply(lambda x: str(x[0]) + \"\\n (\" + str(x[1]) + \")\", axis=1)\r\n sizes = df['counts'].values.tolist()\r\n colors = [plt.cm.Spectral(i / float(len(labels))) for i in range(len(labels))]\r\n\r\n # Draw Plot\r\n plt.figure(\"Treemap\",figsize=(12, 8), dpi=80)\r\n squarify.plot(sizes=sizes, label=labels, color=colors, alpha=.8)\r\n\r\n # Decorate\r\n plt.title('Treemap of Vechile Class')\r\n plt.axis('off')\r\n plt.show()\r\n\r\n###=============================================================================================\r\n\r\ndef Bar_Chart(self):\r\n df_raw = pd.read_csv(\"dataVset/mpg_ggplot2.csv\")\r\n\r\n # Prepare Data\r\n df = df_raw.groupby('manufacturer').size().reset_index(name='counts')\r\n n = df['manufacturer'].unique().__len__() + 1\r\n all_colors = list(plt.cm.colors.cnames.keys())\r\n random.seed(100)\r\n c = random.choices(all_colors, k=n)\r\n\r\n # Plot Bars\r\n plt.figure(\"Bar Chart\",figsize=(16, 10), dpi=80)\r\n plt.bar(df['manufacturer'], df['counts'], color=c, width=.5)\r\n for i, val in enumerate(df['counts'].values):\r\n plt.text(i, val, float(val), horizontalalignment='center', verticalalignment='bottom',\r\n fontdict={'fontweight': 500, 'size': 12})\r\n\r\n # Decoration\r\n plt.gca().set_xticklabels(df['manufacturer'], rotation=60, horizontalalignment='right')\r\n plt.title(\"Number of Vehicles by Manaufacturers\", fontsize=22)\r\n plt.ylabel('# Vehicles')\r\n plt.ylim(0, 45)\r\n plt.show()\r\n\r\n###=============================================================================================","sub_path":"TrafficSolutionAnalysis/TrafficRoadCal/DVFunction/Composition.py","file_name":"Composition.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"423264845","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\nPublish a service scheduler to SNS.\n\nThis script runs on a fixed schedule to send an SNS notification to start\none of our adapters. It receives a blob of JSON from a CloudWatch timed\nevent, and publishes that to the service scheduler topic.\n\"\"\"\n\nfrom wellcome_lambda_utils.sns_utils import publish_sns_message\n\n\ndef main(event, _):\n print(f'event = {event!r}')\n message = {\n 'cluster': event['cluster'],\n 'service': event['service'],\n 'desired_count': event['desired_count'],\n }\n publish_sns_message(topic_arn=event['topic_arn'], message=message)\n","sub_path":"shared_infra/service_scheduler/src/service_scheduler.py","file_name":"service_scheduler.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"576917371","text":"import time\nimport sys\n\ndef instrucoes():\n print(\"Este é um jogo da velha.\")\n print(\"As jogadas serão passadas por coordenadas indicadas por números de 1-9\")\n print()\n print(\"|1 2 3|\")\n print(\"|4 5 6|\")\n print(\"|7 8 9|\")\n print(\"------------------------------------------------------------------------\")\n \ndef tabuleiro():\n global tabu\n tabu= [\".\",\".\",\".\",\".\",\".\",\".\",\".\",\".\",\".\"]\n \n \ndef mostra():\n print(tabu[0],tabu[1],tabu[2])\n print(tabu[3],tabu[4],tabu[5])\n print(tabu[6],tabu[7],tabu[8])\n \ndef jogadas():\n for i in range(0,9):\n jog=True\n if i%2 == 0:\n j1 = int(input(\"Jogador1: \"))\n while jog:\n if tabu[j1-1]==\".\":\n tabu[j1-1]=str(\"X\")\n mostra()\n if tabu[0]==\"X\" and tabu[1]==\"X\" and tabu[2]==\"X\":\n print(\"JOGADOR 1 VENCEU!!!\")\n time.sleep(2)\n dnovo()\n elif tabu[3]==\"X\" and tabu[4]==\"X\" and tabu[5]==\"X\":\n print(\"JOGADOR 1 VENCEU!!!\")\n time.sleep(2)\n dnovo()\n elif tabu[6]==\"X\" and tabu[7]==\"X\" and tabu[8]==\"X\":\n print(\"JOGADOR 1 VENCEU!!!\")\n time.sleep(2)\n dnovo()\n elif tabu[0]==\"X\" and tabu[3]==\"X\" and tabu[6]==\"X\":\n print(\"JOGADOR 1 VENCEU!!!\")\n time.sleep(2)\n dnovo()\n elif tabu[1]==\"X\" and tabu[4]==\"X\" and tabu[7]==\"X\":\n print(\"JOGADOR 1 VENCEU!!!\")\n time.sleep(2)\n dnovo()\n elif tabu[2]==\"X\" and tabu[5]==\"X\" and tabu[8]==\"X\":\n print(\"JOGADOR 1 VENCEU!!!\")\n time.sleep(2)\n dnovo()\n elif tabu[0]==\"X\" and tabu[4]==\"X\" and tabu[8]==\"X\":\n print(\"JOGADOR 1 VENCEU!!!\")\n time.sleep(2)\n dnovo()\n elif tabu[2]==\"X\" and tabu[4]==\"X\" and tabu[6]==\"X\":\n print(\"JOGADOR 1 VENCEU!!!\")\n time.sleep(2)\n dnovo()\n else:\n pass \n jog=False\n else:\n print(\"Escolha uma casa vazia!\")\n j1 = int(input(\"Jogador1: \"))\n else:\n j2 = int(input(\"Jogador2: \"))\n while jog:\n if tabu[j2-1]==\".\":\n tabu[j2-1]=str(\"O\")\n mostra()\n if tabu[0]==\"O\" and tabu[1]==\"O\" and tabu[2]==\"O\":\n print(\"JOGADOR 2 VENCEU!!!\")\n dnovo()\n elif tabu[3]==\"O\" and tabu[4]==\"O\" and tabu[5]==\"O\":\n print(\"JOGADOR 2 VENCEU!!!\")\n dnovo()\n elif tabu[6]==\"O\" and tabu[7]==\"O\" and tabu[8]==\"O\":\n print(\"JOGADOR 2 VENCEU!!!\")\n dnovo()\n elif tabu[0]==\"O\" and tabu[3]==\"O\" and tabu[6]==\"O\":\n print(\"JOGADOR 2 VENCEU!!!\")\n dnovo()\n elif tabu[1]==\"O\" and tabu[4]==\"O\" and tabu[7]==\"O\":\n print(\"JOGADOR 2 VENCEU!!!\")\n dnovo()\n elif tabu[2]==\"O\" and tabu[5]==\"O\" and tabu[8]==\"O\":\n print(\"JOGADOR 2 VENCEU!!!\")\n dnovo()\n elif tabu[0]==\"O\" and tabu[4]==\"O\" and tabu[8]==\"O\":\n print(\"JOGADOR 2 VENCEU!!!\")\n dnovo()\n elif tabu[2]==\"O\" and tabu[4]==\"O\" and tabu[6]==\"O\":\n print(\"JOGADOR 2 VENCEU!!!\")\n dnovo()\n else:\n pass\n jog=False\n else:\n print(\"Escolha uma casa vazia!\")\n j2 = int(input(\"Jogador2: \"))\n\ndef dnovo():\n x=input(\"Deseja jogar de novo? s/n: \")\n if x==\"s\":\n instrucoes()\n tabuleiro()\n mostra()\n jogadas()\n elif x==\"n\":\n time.sleep(2)\n sys.exit()\n else:\n print(\"Opção inválida!\")\n print(\"Digite s para sim ou n para não\")\n dnovo()\n\n \ninstrucoes()\ntabuleiro()\nmostra()\njogadas()\n","sub_path":"jv.py","file_name":"jv.py","file_ext":"py","file_size_in_byte":4692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"504681442","text":"import numpy as np\n\n\ndef getVesselMeanWidth(dict_vesselWidth):\n ##first, get the mean vessel width for each segments\n dict_meanVesselWidth = {}\n for vesselkey in dict_vesselWidth.keys():\n segVesselWidths = dict_vesselWidth[vesselkey]\n meanwidth0 = np.mean(segVesselWidths)\n std0 = np.std(segVesselWidths)\n filteredWidths = segVesselWidths[np.abs(segVesselWidths - meanwidth0) <= np.minimum(2*std0, 2.5)]\n if len(filteredWidths) > 30: #if the width is less than 30 pixels, then remove it.\n meanWidth = np.mean(filteredWidths)\n # else:\n # meanWidth = meanwidth0\n\n dict_meanVesselWidth[vesselkey] = meanWidth #[meanWidth, meanwidth0, std0]\n\n\n return dict_meanVesselWidth","sub_path":"pulseWavePropagation/VesselWidth/GetVesselMeanWidth.py","file_name":"GetVesselMeanWidth.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"408411568","text":"import pandas as pd\nfrom utils.constants import LEVEL_1_keyword, OCCUPATION_keyword, ALSFRSR_keyword\n\n'''\nfile and data loading functions\n'''\ndef readMetricsExcelData(data_folder, file_name, sheet_name_or_index):\n file_to_open = data_folder / file_name\n #read sheet\n df = pd.read_excel(file_to_open, sheet_name=sheet_name_or_index, header=None)\n #convert all feature values to string\n df = df.astype(str)\n return df\n\n# for script a_baseline_classification.py\n\n#remove .0 from values\ndef removeFloatPartOfString(s): \n #forceful convertion to str\n if type(s) != \"str\":\n s = str(s) \n #no point, nothing to do\n if s.find(\".\") == -1:\n return s\n else:\n #remove all after the dot (inclusive)\n return s[:s.find(\".\")]\n\n#returns ndarrays and a list of features\ndef load_original_data_with_missings(file_name, features_to_keep=['ALL']):\n df = pd.read_csv(file_name, engine='python')\n # keep only the features in features_to_keep\n if features_to_keep[0] != 'ALL':\n df = df[features_to_keep]\n #convert missings to -1\n df = df.fillna(-1) \n #convert all feature values to string\n df = df.astype(str)\n #sort values by group class\n df = df.sort_values(by=['group'])\n #remove all decimal parts \n for i in range(0, df.columns.size):\n df[df.columns[i]] = df[df.columns[i]].apply(removeFloatPartOfString) \n #convert the data frame read from the csv file to a matrix\n table_X = df.values\n #table_y: last column of the file, the target class column\n table_y = table_X[:, -1]\n #cast all table_y elements to int\n table_y = table_y.astype(int)\n #remove first (subject id) and last (target class) columns of the data matrix \n #to only keep the data\n table_X = table_X[:, list(range(1,len(df.columns) - 1))]\n #cast all table_X elements to int\n table_X = table_X.astype(int)\n #get list of feature names (also removing first and last columns)\n feature_names = list(df)[1:-1]\n #return the feature data and the target data\n return table_X, table_y, feature_names\n\n# for script b_subject_biclusters_classification.py\n\n#returns ndarrays and a list of features\ndef load_matrix_data_no_missings(file_name):\n df = pd.read_csv(file_name, engine='python')\n #convert the data frame read from the csv file to a matrix\n table_X = df.values\n #table_y: last column of the file, the target class column\n table_y = table_X[:, -1]\n #cast all table_y elements to int\n table_y = table_y.astype(int)\n #remove first (subject id) and last (target class) columns of the data matrix \n #to only keep the data\n table_X = table_X[:, list(range(1,len(df.columns) - 1))]\n #cast all table_X elements to int\n table_X = table_X.astype(int)\n #get list of feature names (also removing first and last columns)\n feature_names = list(df)[1:-1]\n #return the feature data and the target data\n return table_X, table_y, feature_names\n\n# for script c_merged_data_classification.py\n\n#returns dataframe\ndef load_to_merge_original_data_with_missings(file_name, features_to_keep=['ALL']):\n df = pd.read_csv(file_name, engine='python')\n # keep only the features in features_to_keep\n if features_to_keep[0] != 'ALL':\n df = df[features_to_keep]\n #convert missings to -1\n df = df.fillna(-1) \n #convert all to string\n df = df.astype(str)\n #remove all decimal parts \n for i in range(0, df.columns.size):\n df[df.columns[i]] = df[df.columns[i]].apply(removeFloatPartOfString) \n return df\n\n#returns dataframe\ndef load_to_merge_matrix_data_no_missings(file_name):\n df = pd.read_csv(file_name, engine='python')\n return df\n\n# for baseline scripts (to translate from category values to labels)\ndef correctZeroPrefixCategories(value):\n if value not in [\"NA\", \"nan\", \"NaN\"] and 0 <= int(value) < 10:\n return \"0\" + value\n else:\n return value\n\ndef getTranslationMapCategoriesLabels(data_folder, xslx_translation_categories_labels):\n # create map\n label_map = {}\n # read file\n file_to_open = data_folder / xslx_translation_categories_labels\n # read first and only sheet (with header in the first line)\n label_data = pd.read_excel(file_to_open, sheet_name=0, header=0)\n # convert everything to strings (NaN are \"nan\" now and integers gain a decimal case :S) \n label_data = label_data.astype(str)\n #for each second column, remove all decimal parts \n for i in range(0, label_data.columns.size, 2):\n label_data[label_data.columns[i+1]] = label_data[label_data.columns[i+1]].apply(removeFloatPartOfString) \n # if column is one of ALSFRSR, remove floats from first column too\n if (ALSFRSR_keyword in label_data.columns[i]):\n label_data[label_data.columns[i]] = label_data[label_data.columns[i]].apply(removeFloatPartOfString) \n # #failsafe for Occupation (level 1) columns (that have categories like 01)\n # for i in range(0, label_data.columns.size, 2):\n # if (OCCUPATION_keyword in label_data.columns[i]) and (LEVEL_1_keyword in label_data.columns[i]):\n # label_data[label_data.columns[i+1]] = label_data[label_data.columns[i+1]].apply(correctZeroPrefixCategories) \n # iterate through all columns, two by two (first column: labels, second column: category values)\n for i in range(0, label_data.columns.size, 2):\n # get column name\n columnName = label_data.columns[i]\n # remove nan's from labels and category values' columns\n temp_labels = [x for x in label_data[label_data.columns[i]].tolist() if x != 'nan']\n temp_cat_values = [x for x in label_data[label_data.columns[i + 1]].tolist() if x != 'nan']\n # create new map\n feature_map = {}\n # add category values as keys, labels as values\n for j in range(0, len(temp_labels)):\n feature_map[temp_cat_values[j]] = temp_labels[j]\n # add feature map to label map\n label_map[columnName] = feature_map\n\n #return map\n return label_map\n\n\n\n","sub_path":"Python_code/scripts_task_2/utils/load_data_utils.py","file_name":"load_data_utils.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"206109588","text":"#Imports\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport datetime\r\nimport os\r\nimport sys\r\n\r\nyear = datetime.datetime.today().year\r\n\r\ndef Pirates():\r\n #Outfile\r\n outfile = open('Pirates Pitching Stats.txt', 'a') \r\n #Loop 1\r\n again = 1\r\n while again == 1: \r\n #Pirates 2019 Pitching Staff\r\n fnames = ['Chris', 'Steven', 'Nick', 'Kyle', 'Michael', 'Keone', 'Nick', 'Francisco', 'Jordan', 'Joe', 'Jameson', 'Richard', 'Felipe', 'Trevor' ]\r\n lnames = ['Archer', 'Brault', 'Burdi', 'Crick', 'Feliz', 'Kela', 'Kingham', 'Liriano', 'Lyles', 'Musgrove', 'Taillon', 'Rodriguez', 'Vazquez', 'Williams'] \r\n #Counters and Variables\r\n i = 0\r\n games_won = 0\r\n games_lost = 0\r\n era = 0\r\n #ave_era = 0\r\n year = datetime.datetime.today().year\r\n #Title\r\n print(f'\\n\\nPittsburgh Pirates Pitching Staff, {year}\\n')\r\n #Loop 2\r\n while i < len(lnames):\r\n #Rodriguez is on a different html page\r\n if lnames[i] == 'Rodriguez':\r\n page = '05.shtml'\r\n else: \r\n page = '01.shtml'\r\n #Print Player Info\r\n player = lnames[i].lower()[:5]+fnames[i].lower()[:2]+page\r\n firstletter = lnames[i][:1].lower()\r\n #URL\r\n if lnames[i] == 'Vazquez':\r\n url = 'https://www.baseball-reference.com/players/r/riverfe01.shtml' \r\n else:\r\n url = 'https://www.baseball-reference.com/players/%s/%s' % (firstletter, player)\r\n response = requests.get(url)\r\n #Soup\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n #Soup Finds\r\n wins = soup.find('h4', class_='poptip', attrs={'data-tip': 'Wins'})\r\n losses = soup.find('h4', class_='poptip', attrs={'data-tip': 'Losses'})\r\n era = soup.find('h4', class_='poptip', attrs={'data-tip': '9 * ER / IP
For recent years, leaders need 1 IP
per team game played.
Bold indicates lowest ERA using current stats
Gold means awarded ERA title at end of year.'})\r\n games_pitched = soup.find('h4', class_='poptip', attrs={'data-tip': 'Games Played
This includes all times that the player appeared on the lineup card. Pitchers in non-DH games that appeared on the lineup card but didn\\'t bat will still have a game in this column.'})\r\n games_started = soup.find('h4', class_='poptip', attrs={'data-tip': 'Games Started'})\r\n games_saved = soup.find('h4', class_='poptip', attrs={'data-tip': 'Saves'})\r\n innings_pitched = soup.find('h4', class_='poptip', attrs={'data-tip': 'Innings Pitched'}) \r\n whip = soup.find('h4', class_='poptip', attrs={'data-tip': '(BB + H)/IP
For recent years, leaders need 1 IP
per team game played'}) \r\n #Soup Next Siblings\r\n wins = wins.next_sibling\r\n losses = losses.next_sibling\r\n era = era.next_sibling\r\n games_pitched = games_pitched.next_sibling\r\n games_started = games_started.next_sibling\r\n games_saved = games_saved.next_sibling\r\n innings_pitched = innings_pitched.next_sibling\r\n whip = whip.next_sibling\r\n #Print Player's name\r\n #print(lnames[i]+', '+fnames[i]+'\\n'+soup.find(itemprop='name').find_next('p').find_next('p').contents[2].strip()+'-Handed '+soup.find(itemprop='name').find_next('p').contents[2].strip()+' ('+player+')')\r\n print(lnames[i]+', '+fnames[i])\r\n outfile.write(lnames[i]+', '+fnames[i]+':\\n')\r\n #Print \r\n if wins.text == 0:\r\n print('\\tWins (YTD): 0')\r\n outfile.write('\\tWins (YTD): 0\\n')\r\n else:\r\n print('\\tWins (YTD): '+wins.text)\r\n outfile.write('\\tWins (YTD): '+wins.text+'\\n')\r\n if losses.text == 0:\r\n print('\\tLosses (YTD): 0') \r\n outfile.write('\\tLosses (YTD): 0\\n')\r\n else:\r\n print('\\tLosses (YTD): '+losses.text)\r\n outfile.write('\\tLosses (YTD): '+losses.text+'\\n') \r\n if era.text == 0:\r\n print('\\tERA (YTD): 0')\r\n outfile.write('\\tERA (YTD): 0\\n')\r\n else:\r\n print('\\tERA (YTD): '+era.text) \r\n outfile.write('\\tERA (YTD): '+era.text+'\\n')\r\n print('\\tGames Pitched (YTD): '+games_pitched.text)\r\n outfile.write('\\tGames Pitched (YTD): '+games_pitched.text+'\\n')\r\n if games_started.text == 0:\r\n print('\\tGames Started (YTD): 0')\r\n outfile.write('\\tGames Started (YTD): 0\\n')\r\n else:\r\n print('\\tGames Started (YTD): '+games_started.text)\r\n outfile.write('\\tGames Started (YTD): '+games_started.text+'\\n')\r\n if games_saved.text == 0:\r\n print('\\tGames Saved (YTD): 0')\r\n outfile.write('\\tGames Saved (YTD): 0')\r\n else:\r\n print('\\tGames Saved (YTD): '+games_saved.text)\r\n outfile.write('\\tGames Saved (YTD): '+games_saved.text+'\\n') \r\n print('\\tInnings Pitched (YTD): '+innings_pitched.text)\r\n outfile.write('\\tInnings Pitched (YTD): '+innings_pitched.text+'\\n')\r\n print('\\tWHIP (YTD): '+whip.text)\r\n outfile.write('\\tWHIP (YTD): '+whip.text+'\\n')\r\n print('-' * 45)\r\n outfile.write('-' * 52)\r\n outfile.write('\\n')\r\n #Increment\r\n games_won = games_won + int(wins.text)\r\n games_lost = games_lost + int(losses.text)\r\n i += 1\r\n #Team Stats\r\n print(f'\\nPirates Pitching Staff Record ({year} YTD):\\n')\r\n print('Games Won (YTD): ',games_won,'\\n')\r\n print('Games Lost (YTD): ',games_lost,'\\n')\r\n print(f'Games Won/Lost Record (YTD): {games_won/(games_won+games_lost):.3f}')\r\n outfile.write(f'\\nPirates Pitching Staff Record ({year} YTD):\\n')\r\n outfile.write('Games Won (YTD): '+str(games_won)+'\\n')\r\n outfile.write('Games Lost (YTD): '+str(games_lost)+'\\n')\r\n outfile.write(f'Games Won/Lost Record (YTD): {games_won/(games_won+games_lost):.3f}')\r\n #Close file\r\n outfile.close()\r\n #Again\r\n again = int(input('\\nWould you like to run again (1) or select another pitcher (2) or (3) to quit.'))\r\n if again == 2:\r\n AnotherPitcher()\r\n \r\ndef AnotherPitcher():\r\n #Loop 1\r\n again = 1\r\n while again == 1:\r\n another_pitcher_lname = input('Enter another pitcher\\'s Last Name: ')\r\n another_pitcher_fname = input('Enter another pitcher\\'s First Name: ')\r\n #Counters and Variables\r\n i = 0\r\n games_won = 0\r\n games_lost = 0\r\n era = 0\r\n year = datetime.datetime.today().year\r\n #Title\r\n print(f'\\n\\n{another_pitcher_fname} {another_pitcher_lname}, {year} Stats\\n')\r\n #Print Player Info\r\n page = '01.shtml'\r\n player = another_pitcher_lname.lower()[:5]+another_pitcher_fname.lower()[:2]+page\r\n firstletter = another_pitcher_lname[:1].lower()\r\n #URL\r\n url = 'https://www.baseball-reference.com/players/%s/%s' % (firstletter, player)\r\n response = requests.get(url)\r\n #Soup\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n #Soup Finds\r\n wins = soup.find('h4', class_='poptip', attrs={'data-tip': 'Wins'})\r\n losses = soup.find('h4', class_='poptip', attrs={'data-tip': 'Losses'})\r\n era = soup.find('h4', class_='poptip', attrs={'data-tip': '9 * ER / IP
For recent years, leaders need 1 IP
per team game played.
Bold indicates lowest ERA using current stats
Gold means awarded ERA title at end of year.'})\r\n games_pitched = soup.find('h4', class_='poptip', attrs={'data-tip': 'Games Played
This includes all times that the player appeared on the lineup card. Pitchers in non-DH games that appeared on the lineup card but didn\\'t bat will still have a game in this column.'})\r\n games_started = soup.find('h4', class_='poptip', attrs={'data-tip': 'Games Started'})\r\n games_saved = soup.find('h4', class_='poptip', attrs={'data-tip': 'Saves'})\r\n innings_pitched = soup.find('h4', class_='poptip', attrs={'data-tip': 'Innings Pitched'}) \r\n whip = soup.find('h4', class_='poptip', attrs={'data-tip': '(BB + H)/IP
For recent years, leaders need 1 IP
per team game played'}) \r\n #Soup Next Siblings\r\n wins = wins.next_sibling\r\n losses = losses.next_sibling\r\n era = era.next_sibling\r\n games_pitched = games_pitched.next_sibling\r\n games_started = games_started.next_sibling\r\n games_saved = games_saved.next_sibling\r\n innings_pitched = innings_pitched.next_sibling\r\n whip = whip.next_sibling\r\n #Print Player's name\r\n #print(another_pitcher_lname+', '+another_pitcher_fname+'\\n'+soup.find(itemprop='name').find_next('p').find_next('p').contents[2].strip()+'-Handed '+soup.find(itemprop='name').find_next('p').contents[2].strip()+' ('+player+')')\r\n print(another_pitcher_lname+', '+another_pitcher_fname)\r\n #Print \r\n print('Wins (YTD): 0') if wins.text == 0 else print('Wins (YTD): '+wins.text)\r\n print('Losses (YTD): 0') if losses.text == 0 else print('Losses (YTD): '+losses.text)\r\n print('ERA (YTD): 0') if era.text == 0 else print('ERA (YTD): '+era.text) \r\n print('Games Pitched (YTD): '+games_pitched.text)\r\n print('Games Started (YTD): 0') if games_started.text == 0 else print('Games Started (YTD): '+games_started.text)\r\n print('Games Saved (YTD): 0') if games_saved.text == 0 else print('Games Saved (YTD): '+games_saved.text)\r\n print('Innings Pitched (YTD): '+innings_pitched.text)\r\n print('WHIP (YTD): '+whip.text)\r\n print('-' * 45)\r\n #Increment\r\n games_won = games_won + int(wins.text)\r\n games_lost = games_lost + int(losses.text)\r\n i += 1\r\n #Again\r\n again = int(input('\\nWould you like to run again? Type (1) or (2) to Quit.'))\r\n if again not in [1, 2, 3]:\r\n print('You must enter a digit between 1 and 2. Please enter again: ')\r\n if again == 1:\r\n Main()\r\n elif again == 2:\r\n quit\r\n \r\ndef OpenFile():\r\n os.system ('notepad.exe Pirates Pitching Stats.txt')\r\n \r\ndef Main():\r\n \r\n stats = int(input('Enter (1) for the Pirates Pitching Stats (Writes data to: Pittsburgh Pirates Stats.txt), (2) to look-up another Pitcher\\'s stats, (3) Open Pittsburgh Pirates Stats.txt: '))\r\n if stats == 1:\r\n Pirates()\r\n elif stats == 2:\r\n AnotherPitcher()\r\n elif stats == 3: \r\n OpenFile()\r\n elif stats not in [1, 2, 3]:\r\n print('You must enter a digit between 1 and 3. Please enter again: ')\r\n stats = int(input('Enter (1) for the Pirates Pitching Stats (Writes data to: Pittsburgh Pirates Stats.txt), (2) to look-up another Pitcher\\'s stats, (3) Open Pittsburgh Pirates Stats.txt: '))\r\n if stats == 1:\r\n Pirates()\r\n elif stats == 2:\r\n AnotherPitcher()\r\n elif stats == 3: \r\n OpenFile()\r\n \r\n#Main Function \r\nMain()","sub_path":"Pirates Pitching Stats - MLB Pitcher Scrape.py","file_name":"Pirates Pitching Stats - MLB Pitcher Scrape.py","file_ext":"py","file_size_in_byte":11466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"393168301","text":"import random\n\nclass Hash:\n\n\n def randomizer(self):\n return self.salt\n\n def __init__(self, keys):\n self.lower_characters = ['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p', 'a',\n 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'z', 'x', 'c', 'v', 'b', 'n', 'm']\n self.upper_characters = ['Q', 'W', 'E', 'R', 'T', 'Y', 'U', 'I', 'O', 'P', 'A',\n 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L', 'Z', 'X', 'C', 'V', 'B', 'N', 'M']\n self.numbers_characters = ['1', '2', '3',\n '4', '5', '6', '7', '8', '9', '0']\n self.special_chars = ['!', '@', '#', '$', '%', '^', '&',\n '*', '(', ')', '-', '_', '+', '=', ',', ' ', ';', ':']\n self.reference_characters = self.lower_characters + \\\n self.upper_characters + self.numbers_characters + self.special_chars\n self.combined = ['a', 'x', 'T', 'F', 'B', '8', '*', ';', 'p', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'w', 'z', 'c', 'v', 'b', 'n', 'm', 'Q', 'W', 'E', 'e', 'R', 'Y', 'U', 'I', 'O', 'P', 'A', 'S', 'r', 'D',\n 'G', 'H', 'J', 'K', 'L', 'Z', 'X', 'C', 't', 'V', 'N', 'M', '1', '2', '3', '4', '5', '6', 'y', '7', '9', '0', '!', '@', '#', '$', '%', '^', 'u', '&', '(', ')', '-', '_', '+', '=', ',', 'i', ' ', ':', 'o', 'q']\n\n # Generates a float from the key which will be used to randomize all characters, numbers and special symbols\n self.salt = ''\n for key in keys:\n self.salt = self.salt + str(self.combined.index(key))\n self.salt = int(self.salt)/10**len(self.salt)\n\n # Uses a float generated from the key to randomize the characters\n random.shuffle(self.reference_characters, self.randomizer)\n\n def encrypt(self, message):\n encrypted_message = ''\n for message_character in message:\n target = self.reference_characters.index(message_character)\n encrypted_message = encrypted_message + self.combined[target]\n return encrypted_message\n\n def decrypt(self, message):\n decrypted_message = ''\n for encrypted_message_character in message:\n target = self.combined.index(encrypted_message_character)\n decrypted_message = decrypted_message + \\\n self.reference_characters[target]\n return decrypted_message\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"326661547","text":"import numpy as np\nimport matplotlib.pyplot as plt\n#\nflow_label = np.load('flow_label.npy')\nflow_predict_distill = np.load('flow_distill.npy')\nflow_predict_student = np.load('flow_student.npy')\nflow_predict_student_no_weather = np.load('flow_student_no_weather.npy')\n\n\n# MSE 时刻平均(最后一维是时间)\nmse_distill = np.mean(np.mean(np.mean(np.square(flow_label - flow_predict_distill), axis=0), axis=0), axis=0)\nmse_student = np.mean(np.mean(np.mean(np.square(flow_label - flow_predict_student), axis=0), axis=0), axis=0)\nmse_student_no_weather = np.mean(np.mean(np.mean(np.square(flow_label - flow_predict_student_no_weather), axis=0), axis=0), axis=0)\n\n# RMSE\nrmse_distill = np.sqrt(mse_distill)\nrmse_student = np.sqrt(mse_student)\nrmse_student_no_weather = np.sqrt(mse_student_no_weather)\n\n# MAE\nmae_distill = np.mean(np.mean(np.mean(np.abs(flow_label - flow_predict_distill), axis=0), axis=0), axis=0)\nmae_student = np.mean(np.mean(np.mean(np.abs(flow_label - flow_predict_student), axis=0), axis=0), axis=0)\nmae_student_no_weather = np.mean(np.mean(np.mean(np.abs(flow_label - flow_predict_student_no_weather), axis=0), axis=0), axis=0)\n\n# MAPE\ndef cal_mape(flow_label, flow_predict):\n diff = np.abs(flow_label - flow_predict)\n diff_ratio = diff / flow_label\n # diff_ratio[diff_ratio == np.math.nan] = 0.0\n diff_ratio[diff_ratio == np.math.inf] = 0.0\n return np.mean(np.mean(np.mean(diff_ratio, axis=0), axis=0), axis=0)\n\nmape_distill = cal_mape(flow_label, flow_predict_distill)\nmape_student = cal_mape(flow_label, flow_predict_student)\nmape_student_no_weather = cal_mape(flow_label, flow_predict_student_no_weather)\n\nablation_option = 'teacher'#'weather'\nindex = 'MAPE' #'RMSE'\n\nf = {\n 'family': 'Times New Roman',\n 'weight': 'normal',\n 'size': 22\n}\nf_legend = {\n 'family': 'Times New Roman',\n 'weight': 'normal',\n 'size': 17\n}\nbar_width = 0.2 # 条形宽度\nT = [\"10\", \"20\", \"30\", \"40\", \"50\", \"60\"]\nif ablation_option == 'teacher':\n # teacher\n index_distill = np.arange(len(rmse_distill)) # 的横坐标\n index_student = index_distill + bar_width # 横坐标\n if index == 'RMSE':\n plt.bar(index_distill, height=rmse_distill, width=bar_width, color='b', label='own')\n plt.bar(index_student, height=rmse_student, width=bar_width, color='c', label='student')\n plt.ylabel('RMSE', fontdict=f) # 纵坐标轴标题\n plt.ylim(2.45, 2.58)\n elif index == 'MAE':\n plt.bar(index_distill, height=mae_distill, width=bar_width, color='b', label='own')\n plt.bar(index_student, height=mae_student, width=bar_width, color='c', label='student')\n plt.ylabel('MAE', fontdict=f) # 纵坐标轴标题\n plt.ylim(1.72, 1.83)\n elif index == 'MAPE':\n plt.bar(index_distill, height=mape_distill, width=bar_width, color='b', label='own')\n plt.bar(index_student, height=mape_student, width=bar_width, color='c', label='student')\n plt.ylabel('MAPE', fontdict=f) # 纵坐标轴标题\n plt.ylim(0.34,0.39)\n plt.legend(['ST-KDN', 'ST-KDN-NT'], prop=f_legend,loc=2) # 显示图例\n # plt.title('Effect of Teacher Guidance', fontdict=f1) # 图形标题\n\nelif ablation_option == 'weather':\n index_student = np.arange(len(rmse_student)) # 的横坐标\n index_student_no_weather = index_student + bar_width # 横坐标\n if index == 'RMSE':\n plt.bar(index_student, height=rmse_student, width=bar_width, color='b', label='student')\n plt.bar(index_student_no_weather, height=rmse_student_no_weather, width=bar_width, color='c', label='NoWeather')\n plt.ylabel('RMSE', fontdict=f) # 纵坐标轴标题\n plt.ylim(2.45, 2.6)\n elif index == 'MAE':\n plt.bar(index_student, height=mae_student, width=bar_width, color='b', label='student')\n plt.bar(index_student_no_weather, height=mae_student_no_weather, width=bar_width, color='c', label='NoWeather')\n plt.ylabel('MAE', fontdict=f) # 纵坐标轴标题\n plt.ylim(1.76,1.82)\n elif index == 'MAPE':\n plt.bar(index_student, height=mape_student, width=bar_width, color='b', label='student')\n plt.bar(index_student_no_weather, height=mape_student_no_weather, width=bar_width, color='c', label='NoWeather')\n plt.ylabel('MAPE', fontdict=f) # 纵坐标轴标题\n plt.ylim(0.35,0.40)\n plt.legend(['ST-KDN-NT','ST-KDN-NTW'],prop=f_legend,loc=2) # 显示图例\n # plt.title('Effect of Weather Feature Modeling',fontdict=f1) # 图形标题\n\nplt.subplots_adjust(left=0.2,bottom=0.2)\nplt.xticks(index_student + bar_width / 2, T, fontproperties='Times New Roman',\n size=22) #\nplt.yticks(fontproperties='Times New Roman', size=22)\nplt.xlabel('Time/min', fontdict=f)\n\n\nplt.show()","sub_path":"result_analyse/plot_ablation.py","file_name":"plot_ablation.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"457676437","text":"import re\n\ndef max_min_normalize(all_datapoints, datapoint):\n\treturn ( datapoint - min(all_datapoints) ) / ( max(all_datapoints) - min(all_datapoints) )\n\nclass DataSet:\n\n\tdef __init__(self, matrix, name=\"DataSet\"):\n\t\tself.data = matrix\n\t\tself.name = name\n\n\n\tdef getAttribute(self, index):\n\t\td = []\n\t\tfor i in range(0, len(self.data)):\n\t\t\td += [self.data[i][index]]\n\t\treturn d\n\n\tdef getNumInstances(self):\n\t\treturn len(self.data)\n\n\tdef getNumAttributes(self):\n\t\treturn len(self.data[0])\n\n\tdef normalize(self, normFunction=max_min_normalize):\n\t\tfor i in range(0, len(self.data[0])):\n\t\t\tattribute = self.getAttribute(i)\n\t\t\tif type(attribute[0]) != float:\n\t\t\t\t\tcontinue\n\t\t\tfor j in range(0, len(attribute)):\n\t\t\t\tself.data[j][i] = normFunction(attribute, attribute[j])\n\n\tdef simb2num(self, columnIndex):\n\t\tif type(self.data[0][columnIndex]) != str:\n\t\t\treturn\n\t\tclss = set()\n\t\tfor li in range(0, len(self.data)):\n\t\t\tclss.add(self.data[li][columnIndex])\n\t\tdic = {}\n\t\ti = 0\n\t\tfor cls in clss:\n\t\t\tdic[cls] = i\n\t\t\ti += 1\n\t\tfor li in range(0, len(self.data)):\n\t\t\tself.data[li][columnIndex] = dic[self.data[li][columnIndex]]\n\n\t@staticmethod\n\tdef readFromArff(arffFileName):\n\t\tdata_matrix = []\n\t\tf = open(arffFileName)\n\t\tds = f.read()\n\t\tf.close()\n\t\tname = \"\"\n\t\tlines = \"\"\n\t\ttry:\n\t\t\tname = re.search(\"\\@relation\\s+([\\w\\d]+)\", ds, re.I).group(1)\n\t\texcept Exception:\n\t\t\tname = \"DataSet\"\n\t\ttry:\n\t\t\tlines = ds.split(\"@data\")[1]\n\t\t\tlines = lines.split(\"\\n\")\n\t\texcept Exception:\n\t\t\tprint(\"File does not conform with ARFF file format\")\n\t\t\treturn\n\t\tdata = []\n\t\tfor line in lines:\n\t\t\tif line == \"\":\n\t\t\t\tcontinue\n\t\t\tdataline = []\n\t\t\tattrs = line.split(\",\")\n\t\t\tfor attr in attrs:\n\t\t\t\tat = ''\n\t\t\t\ttry:\n\t\t\t\t\tat = float(attr)\n\t\t\t\texcept ValueError:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tat = str(attr)\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tcontinue\n\t\t\t\tdataline += [at]\n\t\t\tdata += [dataline]\n\t\treturn DataSet(data, name)\n","sub_path":"pyRecog/dataaccess/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"403959198","text":"import re\nimport logging\nfrom collections import OrderedDict, namedtuple\nfrom functools import total_ordering\nfrom delphin._exceptions import XmrsStructureError\nfrom .config import (\n IVARG_ROLE, CONSTARG_ROLE,\n HANDLESORT, CVARSORT, ANCHOR_SORT, QUANTIFIER_SORT,\n QEQ\n)\n\n# VARIABLES, LNKS, and HOOKS\n\n@total_ordering\nclass MrsVariable(object):\n \"\"\"An MrsVariable has an id (vid), sort, and sometimes properties.\n\n MrsVariables combine an integer variable ID (or *vid*)) with a\n string sortal type (*sort*). In MRS and RMRS, variables may be the\n bearers of the properties of an |EP| (thus the name \"variable\n properties\"). MrsVariables are used for several purposes:\n\n * **intrinsic variables** (aka IVs)\n * **handles** (labels or holes)\n * **anchors** (a nodeid with a sort)\n * **variable argument values** (IVs, labels, holes, or\n underspecified variables for unexpressed arguments)\n\n Example:\n\n Within an Xmrs structure, a *vid* must be unique. MrsVariables\n can then be compared using either the sort and the vid or the vid\n by itself. For example, ``v1`` and ``v2`` below are not equal\n despite having the same vid (so they should not appear in the same\n Xmrs structure), but they are both equal to their shared vid of\n ``1``. Also note that an MrsVariable can be compared to its string\n representation.\n\n >>> v1 = MrsVariable(vid=1, sort='x')\n >>> v2 = MrsVariable(vid=1, sort='e')\n >>> v1 == v2\n False\n >>> v1 == 1\n True\n >>> v2 == 1\n True\n >>> v1 == 'x1'\n True\n >>> v1 == 'x2'\n False\n >>> v1 == 'e1'\n False\n\n Args:\n vid: an number for the variable ID\n sort: a string for the sortal type\n properties: a dictionary of variable properties\n Returns:\n an instantiated MrsVariable object\n Raises:\n ValueError: when *vid* is not castable to an int\n \"\"\"\n def __init__(self, vid, sort, properties=None):\n # vid is the number of the name (e.g. 1, 10003)\n self.vid = int(vid)\n # sort is the letter(s) of the name (e.g. h, x)\n self.sort = sort\n if sort == HANDLESORT and properties:\n pass # handles cannot have properties. Log this?\n self.properties = properties or OrderedDict()\n\n @classmethod\n def from_string(cls, varstring):\n \"\"\"\n Construct an |MrsVariable| by its string representation.\n\n Args:\n varstring: a string containing the sort and vid of an\n MrsVariable, such as \"x1\" or \"event3\"\n Returns:\n an instantiated MrsVariable object if the string represents\n an MrsVariable, or None otherwise\n \"\"\"\n try:\n sort, vid = MrsVariable.sort_vid_split(varstring)\n return cls(vid, sort)\n except (ValueError, TypeError):\n return None\n\n @classmethod\n def anchor(cls, vid):\n return cls(vid, ANCHOR_SORT)\n\n def __eq__(self, other):\n # try both as MrsVariables\n try:\n return self.vid == other.vid and self.sort == other.sort\n except AttributeError:\n pass # other is not an MrsVariable\n # attempt as string\n try:\n sort, vid = MrsVariable.sort_vid_split(other)\n return self.sort == sort and self.vid == int(vid)\n except (ValueError, TypeError):\n pass # doesn't match a variable\n # try again as vid only\n try:\n vid = int(other)\n return self.vid == vid\n except (ValueError, TypeError):\n pass # nope.. return False\n return False\n\n def __lt__(self, other):\n vid1 = self.vid\n # only compare vids for lt\n try:\n return vid1 < int(other)\n except (ValueError, TypeError):\n pass # not an int or MrsVariable\n # try as a string\n try:\n sort, vid2 = MrsVariable.sort_vid_split(other)\n return vid1 < int(vid2)\n except (ValueError, TypeError):\n pass # not a string... no good output\n raise ValueError('Cannot compare MrsVariable to {} of type {}'\n .format(str(other), type(other)))\n\n def __int__(self):\n return self.vid\n\n def __hash__(self):\n return hash(str(self))\n\n def __repr__(self):\n return ''.format(\n self.sort, self.vid, id(self)\n )\n\n def __str__(self):\n return '{}{}'.format(str(self.sort), str(self.vid))\n\n @property\n def sortinfo(self):\n \"\"\"\n Return the properties including a mapping of \"cvarsort\" to\n the sort of the MrsVariable. Sortinfo is used in DMRS objects,\n which don't have variables, in order to capture the sortal type\n of a |Node|.\n \"\"\"\n # FIXME: currently gets CVARSORT even if the var is not a IV\n sortinfo = OrderedDict([(CVARSORT, self.sort)])\n sortinfo.update(self.properties)\n return sortinfo\n\n @staticmethod\n def sort_vid_split(vs):\n try:\n sort, vid = re.match(r'^(\\w*\\D)(\\d+)$', vs).groups()\n return sort, vid\n except AttributeError:\n raise ValueError('Invalid variable string: {}'.format(str(vs)))\n\n\n# I'm not sure this belongs here, but anchors are MrsVariables...\nclass AnchorMixin(object):\n @property\n def anchor(self):\n \"\"\"\n The anchor of the |EP|, |Node|, or |Argument| is just the\n nodeid wrapped in an MrsVariable. In |Xmrs| functions, integer\n nodeids are used instead of anchors.\n \"\"\"\n if self.nodeid is not None:\n return MrsVariable(vid=self.nodeid, sort=ANCHOR_SORT)\n return None\n\n @anchor.setter\n def anchor(self, anchor):\n self.nodeid = anchor.vid\n\n\nclass VarGenerator(object):\n \"\"\"Simple class to produce MrsVariables, incrementing the vid for\n each one.\"\"\"\n\n def __init__(self, starting_vid=1):\n self.vid = starting_vid\n\n def new(self, sort, properties=None):\n v = MrsVariable(self.vid, sort, properties=properties)\n self.vid += 1\n return v\n\n\nclass Lnk(object):\n \"\"\"\n Lnk objects link predicates to the surface form in one of several\n ways, the most common of which being the character span of the\n original string.\n\n Args:\n data: the Lnk specifiers, whose quality depends on *type*\n type: the way the Lnk relates the semantics to the surface form\n\n Note:\n\n Valid *types* and their associated *data* shown in the table\n below.\n\n ========= =================================================\n type data\n ========= =================================================\n charspan a tuple of start and end character positions from\n the surface string\n chartspan a tuple of start and end parse chart vertices\n tokens a list of token identifiers\n edge an edge identifier\n ========= =================================================\n\n Example:\n\n Lnk objects should be created using the classmethods:\n\n >>> Lnk.charspan(0,5)\n '<0:5>'\n >>> Lnk.chartspan(0,5)\n '<0#5>'\n >>> Lnk.tokens([0,1,2])\n '<0 1 2>'\n >>> Lnk.edge(1)\n '<@1>'\n\n \"\"\"\n\n # These types determine how a lnk on an EP or MRS are to be\n # interpreted, and thus determine the data type/structure of the\n # lnk data.\n CHARSPAN = 0 # Character span; a pair of offsets\n CHARTSPAN = 1 # Chart vertex span: a pair of indices\n TOKENS = 2 # Token numbers: a list of indices\n EDGE = 3 # An edge identifier: a number\n\n def __init__(self, data, type):\n if type not in (Lnk.CHARSPAN, Lnk.CHARTSPAN, Lnk.TOKENS, Lnk.EDGE):\n raise ValueError('Invalid lnk type: {}'.format(type))\n self.type = type\n self.data = data\n\n @classmethod\n def charspan(cls, start, end):\n \"\"\"\n Create a Lnk object for a character span.\n\n Args:\n start: the initial character position (cfrom)\n end: the final character position (cto)\n \"\"\"\n return cls((int(start), int(end)), Lnk.CHARSPAN)\n\n @classmethod\n def chartspan(cls, start, end):\n \"\"\"\n Create a Lnk object for a chart span.\n\n Args:\n start: the initial chart vertex\n end: the final chart vertex\n \"\"\"\n return cls((int(start), int(end)), Lnk.CHARTSPAN)\n\n @classmethod\n def tokens(cls, tokens):\n \"\"\"\n Create a Lnk object for a token range.\n\n Args:\n tokens: a list of token identifiers\n \"\"\"\n return cls(tuple(map(int, tokens)), Lnk.TOKENS)\n\n @classmethod\n def edge(cls, edge):\n \"\"\"\n Create a Lnk object for an edge (used internally in generation).\n\n Args:\n edge: an edge identifier\n \"\"\"\n return cls(int(edge), Lnk.EDGE)\n\n def __str__(self):\n if self.type == Lnk.CHARSPAN:\n return '<{}:{}>'.format(self.data[0], self.data[1])\n elif self.type == Lnk.CHARTSPAN:\n return '<{}#{}>'.format(self.data[0], self.data[2])\n elif self.type == Lnk.EDGE:\n return '<@{}>'.format(self.data)\n elif self.type == Lnk.TOKENS:\n return '<{}>'.format(' '.join(self.data))\n\n def __repr__(self):\n return ''.format(str(self), id(self))\n\n def __eq__(self, other):\n return self.type == other.type and self.data == other.data\n\n\nclass LnkMixin(object):\n \"\"\"\n A mixin class for predications (|EPs| or |Nodes|) or full |Xmrs|\n objects, which are the types that can be linked to surface strings.\n This class provides the :py:attr:`~delphin.mrs.lnk.LnkMixin.cfrom`\n and :py:attr:`~delphin.mrs.lnk.LnkMixin.cto` properties so they are\n always available (defaulting to a value of -1 if there is no lnk or\n if the lnk is not a Lnk.CHARSPAN type).\n \"\"\"\n @property\n def cfrom(self):\n \"\"\"\n The initial character position in the surface string. Defaults\n to -1 if there is no valid cfrom value.\n \"\"\"\n cfrom = -1\n try:\n if self.lnk.type == Lnk.CHARSPAN:\n cfrom = self.lnk.data[0]\n except AttributeError:\n pass # use default cfrom of -1\n return cfrom\n\n @property\n def cto(self):\n \"\"\"\n The final character position in the surface string. Defaults\n to -1 if there is no valid cto value.\n \"\"\"\n cto = -1\n try:\n if self.lnk.type == Lnk.CHARSPAN:\n cto = self.lnk.data[1]\n except AttributeError:\n pass # use default cto of -1\n return cto\n\n\nclass Hook(object):\n \"\"\"\n A container class for TOP, INDEX, and XARG.\n\n This class simply encapsulates three variables associated with an\n |Xmrs| object, and none of the arguments are required.\n\n Args:\n top: the global top handle\n index: the semantic index\n xarg: the external argument (not likely used for a full |Xmrs|)\n ltop: an alternate spelling of top (top is preferred)\n \"\"\"\n def __init__(self, top=None, index=None, xarg=None, ltop=None):\n self.top = top or ltop\n self.index = index\n self.xarg = xarg\n\n def __repr__(self):\n return ''.format(\n self.top, self.index, self.xarg, id(self)\n )\n\n def __eq__(self, other):\n if not isinstance(other, Hook):\n return False\n return (\n self.top == other.top and\n self.index == other.index and\n self.xarg == other.xarg\n )\n\n # for compatibility\n @property\n def ltop(self):\n return self.top\n\n @ltop.setter\n def ltop(self, value):\n self.top = value\n\n\n# ARGUMENTS, LINKS, and CONSTRAINTS\n\nclass Argument(AnchorMixin):\n \"\"\"\n An argument of an \\*MRS predicate.\n\n Args:\n nodeid: the nodeid of the node with the argument\n argname: the name of the argument (sometimes called \"rargname\")\n value: the MrsVariable or constant value of the argument\n \"\"\"\n\n INTRINSIC_ARG = 0 # ARG0, conventionally\n VARIABLE_ARG = 1 # The value is the ARG0 of some other EP\n HANDLE_ARG = 2 # The value is a handle (supertype of next two)\n LABEL_ARG = 3 # The value is the label of some other EP(s)\n HCONS_ARG = 4 # The value is the hi variable of an HCONS\n CONSTANT_ARG = 5 # The value is a constant (e.g. a string)\n\n def __init__(self, nodeid, argname, value):\n self.nodeid = nodeid\n self.argname = argname\n self.value = value\n self._type = None\n\n def __repr__(self):\n return ''.format(\n self.nodeid, self.argname, self.value, id(self)\n )\n\n def __eq__(self, other):\n # ignore missing nodeid?\n # argname is case insensitive\n snid = self.nodeid\n onid = other.nodeid\n return (\n (None in (snid, onid) or snid == onid) and\n self.argname.lower() == other.argname.lower() and\n self.value == other.value\n )\n\n @classmethod\n def mrs_argument(cls, argname, value):\n return cls(None, argname, value)\n\n @classmethod\n def rmrs_argument(cls, anchor, argname, value):\n return cls(anchor.vid, argname, value)\n\n def infer_argument_type(self, xmrs=None):\n if self.argname == IVARG_ROLE:\n return Argument.INTRINSIC_ARG\n elif isinstance(self.value, MrsVariable):\n if self.value.sort == HANDLESORT:\n # if there's no xmrs given, then use HANDLE_ARG as it\n # is the supertype of LABEL_ARG and HCONS_ARG\n if xmrs is not None:\n if xmrs.get_hcons(self.value) is not None:\n return Argument.HCONS_ARG\n else:\n return Argument.LABEL_ARG\n else:\n return Argument.HANDLE_ARG\n else:\n return Argument.VARIABLE_ARG\n else:\n return Argument.CONSTANT_ARG\n\n @property\n def type(self):\n if self._type is None:\n self._type = self.infer_argument_type()\n return self._type\n\n @type.setter\n def type(self, value):\n self._type = value\n\n\nclass Link(object):\n \"\"\"DMRS-style Links are a way of representing arguments without\n variables. A Link encodes a start and end node, the argument\n name, and label information (e.g. label equality, qeq, etc).\"\"\"\n def __init__(self, start, end, argname=None, post=None):\n self.start = int(start)\n self.end = int(end)\n self.argname = argname\n self.post = post\n\n def __repr__(self):\n return ' #{}) at {}>'.format(\n self.start, self.argname or '', self.post, self.end, id(self)\n )\n\n\nclass HandleConstraint(object):\n \"\"\"A relation between two handles.\"\"\"\n\n def __init__(self, hi, relation, lo):\n self.hi = hi\n self.relation = relation\n self.lo = lo\n\n @classmethod\n def qeq(cls, hi, lo):\n return cls(hi, QEQ, lo)\n\n def __eq__(self, other):\n return (self.hi == other.hi and\n self.relation == other.relation and\n self.lo == other.lo)\n\n def __hash__(self):\n return hash(repr(self))\n\n def __repr__(self):\n return ''.format(\n str(self.hi), self.relation, str(self.lo), id(self)\n )\n\nIndividualConstraint = namedtuple('IndividualConstraint',\n ['target', 'relation', 'clause'])\n\n# PREDICATES AND PREDICATIONS\n\n\nclass Pred(object):\n \"\"\"\n A semantic predicate.\n\n Args:\n predtype: the type of predicate; valid values are grammarpred,\n stringpred, or realpred, although in practice one won't use\n this constructor directly, but instead use one of the\n classmethods\n lemma: the lemma of the predicate\n pos: the part-of-speech; a single, lowercase character\n sense: the (often omitted) sense of the predicate\n Returns:\n an instantiated Pred object\n\n Preds come in three flavors:\n\n * **grammar preds** (gpreds): preds defined in a semantic hierarchy\n in the grammar, and are not necessarily tied to a lexical entry;\n grammar preds may not begin with a leading underscore\n * **real preds** (realpreds): preds that are defined as the\n composition of a lemma, a part-of-speech (pos), and sometimes a\n sense---parts-of-speech are always single characters, and senses\n may be numbers or string descriptions\n * **string preds** (spreds): a string (often double-quoted) that\n represents a real pred; string preds must begin with a leading\n underscore\n\n While MRS representations may distinguish real preds and string\n preds, in pyDelphin they are equivalent. All well-formed predicates,\n when represented as strings, end with ``_rel``, but in practice this\n may not be true (some may end in ``_relation``, or have no such\n suffix).\n\n Example:\n\n Preds are compared using their string representations.\n Surrounding quotes (double or single) are ignored, and\n capitalization doesn't matter. In addition, preds may be\n compared directly to their string representations:\n\n >>> p1 = Pred.stringpred('_dog_n_1_rel')\n >>> p2 = Pred.realpred(lemma='dog', pos='n', sense='1')\n >>> p3 = Pred.grammarpred('dog_n_1_rel')\n >>> p1 == p2\n True\n >>> p1 == '_dog_n_1_rel'\n True\n >>> p1 == p3\n False\n \"\"\"\n pred_re = re.compile(\n r'_?(?P.*?)_' # match until last 1 or 2 parts\n r'((?P[a-z])_)?' # pos is always only 1 char\n r'((?P([^_\\\\]|(?:\\\\.))+)_)?' # no unescaped _s\n r'(?Prel(ation)?)$', # NB only _rel is valid\n re.IGNORECASE\n )\n # Pred types (used mainly in input/output, not internally in pyDelphin)\n GRAMMARPRED = 0 # only a string allowed (quoted or not)\n REALPRED = 1 # may explicitly define lemma, pos, sense\n STRINGPRED = 2 # quoted string form of realpred\n\n def __init__(self, predtype, lemma=None, pos=None, sense=None):\n \"\"\"Extract the lemma, pos, and sense (if applicable) from a pred\n string, if given, or construct a pred string from those\n components, if they are given. Treat malformed pred strings\n as simple preds without extracting the components.\"\"\"\n # GRAMMARPREDs and STRINGPREDs are given by strings (with or without\n # quotes). STRINGPREDs have an internal structure (defined here:\n # http://moin.delph-in.net/RmrsPos), but basically:\n # _lemma_pos(_sense)?_rel\n # Note that sense is optional. The initial underscore is meaningful.\n self.type = predtype\n self.lemma = lemma\n self.pos = pos\n self.sense = str(sense) if sense is not None else sense\n self.string = None # set by class methods\n\n def __eq__(self, other):\n\n if isinstance(other, Pred):\n other = other.string\n return self.string.strip('\"\\'') == other.strip('\"\\'')\n\n def __str__ (self):\n return self.string\n\n def __repr__(self):\n return ''.format(self.string, id(self))\n\n def __hash__(self):\n return hash(self.string)\n\n @classmethod\n def stringpred(cls, predstr):\n lemma, pos, sense, end = Pred.split_pred_string(predstr.strip('\"\\''))\n pred = cls(Pred.STRINGPRED, lemma=lemma, pos=pos, sense=sense)\n pred.string = predstr\n return pred\n\n @classmethod\n def grammarpred(cls, predstr):\n lemma, pos, sense, end = Pred.split_pred_string(predstr.strip('\"\\''))\n pred = cls(Pred.GRAMMARPRED, lemma=lemma, pos=pos, sense=sense)\n pred.string = predstr\n return pred\n\n @staticmethod\n def string_or_grammar_pred(predstr):\n if predstr.strip('\"').lstrip(\"'\").startswith('_'):\n return Pred.stringpred(predstr)\n else:\n return Pred.grammarpred(predstr)\n\n @classmethod\n def realpred(cls, lemma, pos, sense=None):\n pred = cls(Pred.REALPRED, lemma=lemma, pos=pos, sense=sense)\n string_tokens = list(filter(bool, [lemma, pos, str(sense or '')]))\n pred.string = '_'.join([''] + string_tokens + ['rel'])\n return pred\n\n @staticmethod\n def split_pred_string(predstr):\n \"\"\"\n Extract the components from a pred string and log errors for any\n malformedness.\n\n Args:\n predstr: a predicate string\n\n Examples:\n\n >>> Pred.split_pred_string('_dog_n_1_rel')\n ('dog', 'n', '1', 'rel')\n >>> Pred.split_pred_string('quant_rel')\n ('quant', None, None, 'rel')\n \"\"\"\n if not predstr.lower().endswith('_rel'):\n logging.debug('Predicate does not end in \"_rel\": {}'\n .format(predstr))\n match = Pred.pred_re.search(predstr)\n if match is None:\n logging.debug('Unexpected predicate string: {}'.format(predstr))\n return (predstr, None, None, None)\n # _lemma_pos(_sense)?_end\n return (match.group('lemma'), match.group('pos'),\n match.group('sense'), match.group('end'))\n\n @staticmethod\n def is_valid_pred_string(predstr, suffix_required=True):\n \"\"\"\n Return True if the given predicate string represents a valid\n Pred, False otherwise. If suffix_required is False,\n abbreviated Pred strings will be accepted (e.g. _dog_n_1\n instead of _dog_n_1_rel)\n \"\"\"\n predstr = predstr.strip('\"').lstrip(\"'\")\n if (not suffix_required and\n predstr.rsplit('_', 1)[-1] not in ('rel', 'relation')):\n predstr += '_rel'\n return Pred.pred_re.match(predstr) is not None\n\n @staticmethod\n def normalize_pred_string(predstr):\n \"\"\"\n Make pred strings more consistent by removing quotes and using\n the _rel suffix.\n \"\"\"\n predstr = predstr.strip('\"').lstrip(\"'\")\n match = (Pred.pred_re.match(predstr) or\n Pred.pred_re.match(predstr + '_rel'))\n if match:\n d = match.groupdict()\n tokens = [d['lemma']]\n if d['pos']:\n tokens.append(d['pos'])\n if d['sense']:\n tokens.append(d['sense'])\n tokens.append('rel')\n return '_'.join(tokens)\n return None\n\n def short_form(self):\n \"\"\"\n Return the pred string without quotes or a _rel suffix.\n\n Example:\n\n >>> p = Pred.stringpred('\"_cat_n_1_rel\"')\n >>> p.short_form()\n '_cat_n_1'\n \"\"\"\n return self.string.strip('\"').lstrip(\"'\").rsplit('_', 1)[0]\n\n def is_quantifier(self):\n return self.pos == QUANTIFIER_SORT\n\n\n@total_ordering\nclass Node(LnkMixin):\n \"\"\"\n A very simple predication for DMRSs. Nodes don't have |Arguments|\n or labels like |EPs|, but they do have a\n :py:attr:`~delphin.mrs.node.Node.carg` property for constant\n arguments, and their sortal type is given by the `cvarsort` value\n on their property mapping.\n\n Args:\n nodeid: node identifier\n pred: node's |Pred|\n sortinfo: node properties (with cvarsort)\n lnk: links pred to surface form or parse edges\n surface: surface string\n base: base form\n carg: constant argument string\n \"\"\"\n\n def __init__(self, nodeid, pred, sortinfo=None,\n lnk=None, surface=None, base=None, carg=None):\n self.nodeid = int(nodeid) if nodeid is not None else None\n self.pred = pred\n # sortinfo is the properties plus cvarsort\n self.sortinfo = OrderedDict(sortinfo or [])\n self.lnk = lnk\n self.surface = surface\n self.base = base\n self.carg = carg\n # accessor method\n self.get_property = self.sortinfo.get\n\n def __repr__(self):\n return ''.format(\n self.nodeid, self.pred.string, str(self.lnk), id(self)\n )\n\n def __eq__(self, other):\n # not doing self.__dict__ == other.__dict__ right now, because\n # functions like self.get_property show up there\n snid = self.nodeid\n onid = other.nodeid\n return ((None in (snid, onid) or snid == onid) and\n self.pred == other.pred and\n # make one side a regular dict for unordered comparison\n dict(self.sortinfo.items()) == other.sortinfo and\n self.lnk == other.lnk and\n self.surface == other.surface and\n self.base == other.base and\n self.carg == other.carg)\n\n def __lt__(self, other):\n x1 = (self.cfrom, self.cto, -self.is_quantifier(), self.pred.lemma)\n try:\n x2 = (other.cfrom, other.cto, -other.is_quantifier(),\n other.pred.lemma)\n return x1 < x2\n except AttributeError:\n return False # comparing Node to non-Node means Node is greater?\n\n @property\n def cvarsort(self):\n \"\"\"\n The sortal type of the predicate.\n \"\"\"\n return self.sortinfo.get(CVARSORT)\n\n @cvarsort.setter\n def cvarsort(self, value):\n self.sortinfo[CVARSORT] = value\n\n @property\n def properties(self):\n \"\"\"\n The properties of the Node (without `cvarsort`, so it's the set\n of properties a corresponding |EP| would have).\n \"\"\"\n return OrderedDict((k, v) for (k, v) in self.sortinfo.items()\n if k != CVARSORT)\n\n def is_quantifier(self):\n \"\"\"\n Return True if the Node is a quantifier, or False otherwise.\n \"\"\"\n return self.pred.is_quantifier()\n\n\n@total_ordering\nclass ElementaryPredication(LnkMixin, AnchorMixin):\n \"\"\"\n An elementary predication (EP) combines a predicate with various\n structural semantic properties.\n\n EPs must have a |Pred| and a |MrsVariable| *label*. Well-formed EPs\n will have an intrinsic argument (e.g. ARG0) on their *args* list,\n which specifies the intrinsic variable (IV), though it is not\n required by pyDelphin. However, some methods use an index of IVs to\n calculate semantic structure, so the absence of an intrinsic\n argument could cause unexpected behavior.\n\n Args:\n pred: The |Pred| of the EP\n label: label handle\n anchor: an |MrsVariable| anchor or int nodeid\n args: a list of the EP's |Arguments|\n lnk: |Lnk| object associated with the pred\n surface: surface string\n base: base form\n \"\"\"\n\n def __init__(self, pred, label, anchor=None, args=None,\n lnk=None, surface=None, base=None):\n self.label = label\n # first args, then can get IV\n self.argdict = OrderedDict((a.argname, a) for a in (args or []))\n # Only fill in other attributes if pred is given, otherwise ignore.\n # This behavior is to help enable the from_node classmethod.\n self._node = None\n if pred is not None:\n iv = self.iv\n self._node = Node(\n anchor.vid if anchor else None,\n pred,\n sortinfo=iv.sortinfo if iv else None,\n lnk=lnk,\n surface=surface,\n base=base,\n carg=self.carg\n )\n\n @classmethod\n def from_node(cls, label, node, args=None):\n ep = cls(None, label, args=args)\n ep._node = node\n return ep\n\n def __repr__(self):\n return ''.format(\n self.pred.string, str(self.iv or '?'), id(self)\n )\n\n def __eq__(self, other):\n return (self.label == other.label and\n self.argdict == other.argdict and\n self._node == other._node)\n\n def __lt__(self, other):\n try:\n return self._node < other._node\n except AttributeError:\n return False # comparing EP to non-EP means EP is greater?\n\n # these properties provide an interface to the node attributes\n\n @property\n def nodeid(self):\n return self._node.nodeid\n\n @nodeid.setter\n def nodeid(self, value):\n self._node.nodeid = value\n # also update the args' nodeids\n for arg in self.argdict.values():\n arg.nodeid = value\n\n @property\n def pred(self):\n return self._node.pred\n\n @pred.setter\n def pred(self, value):\n self._node.pred = value\n\n @property\n def sortinfo(self):\n return self.iv.sortinfo\n\n @property\n def lnk(self):\n return self._node.lnk\n\n @lnk.setter\n def lnk(self, value):\n self._node.lnk = value\n\n @property\n def surface(self):\n return self._node.surface\n\n @surface.setter\n def surface(self, value):\n self._node.surface = value\n\n @property\n def base(self):\n return self._node.base\n\n @base.setter\n def base(self, value):\n self._node.base = value\n\n # carg property intentionally left out. It should be accessed from\n # the arg list (see the property below)\n\n # these properties are specific to the EP's qualities\n\n @property\n def intrinsic_variable(self):\n return self.arg_value(IVARG_ROLE)\n\n #: A synonym for :py:meth:`intrinsic_variable`\n iv = intrinsic_variable\n\n @property\n def properties(self):\n try:\n return self.iv.properties\n except AttributeError: # in case iv is None\n return OrderedDict()\n\n @property\n def carg(self):\n return self.arg_value(CONSTARG_ROLE)\n\n @property\n def args(self):\n return list(self.argdict.values())\n\n def get_arg(self, rargname):\n return self.argdict.get(rargname)\n\n def arg_value(self, rargname):\n try:\n arg = self.argdict[rargname]\n return arg.value\n except KeyError:\n return None\n\n def add_argument(self, arg):\n if arg.nodeid is None:\n arg.nodeid = self.nodeid\n elif arg.nodeid != self.nodeid:\n raise XmrsStructureError(\n \"Argument's nodeid must match the EP's (or be None).\"\n )\n if arg.argname in self.argdict:\n raise XmrsStructureError(\n \"Argument with role {} already exists in the EP.\"\n .format(arg.argname)\n )\n self.argdict[arg.argname] = arg\n\n def is_quantifier(self):\n return self.pred.is_quantifier()\n","sub_path":"delphin/mrs/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":30954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"30595869","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tombs', '0009_tombimage'),\n ('geo', '0005_auto_20151123_1434'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='gpstracksession',\n name='graveyard',\n field=models.ForeignKey(to='tombs.Graveyard', default=1, verbose_name='graveyard', related_name='gps_tracks'),\n preserve_default=False,\n ),\n ]\n","sub_path":"backend/geo/migrations/0006_gpstracksession_graveyard.py","file_name":"0006_gpstracksession_graveyard.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"385924053","text":"\nfrom django.conf.urls import url,include\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom django.views.decorators.cache import cache_page\nfrom bookstore import views\n\nurlpatterns = format_suffix_patterns([\n url(r'^$', views.api_root),\n url(r'^books/$',\n views.BookList.as_view(),\n name='book-list'),\n url(r'^books/(?P[0-9]+)/$',\n views.BookDetail.as_view(),\n name='book-detail'),\n\n url(r'^genres/$',\n views.GenreList.as_view(),\n name='genre-list'),\n url(r'^genres/(?P[0-9]+)/$',\n views.GenreDetail.as_view(),\n name='genre-detail'),\n\n])\n\n\n# Login and logout views for the browsable API\nurlpatterns += [\n url(r'^api-auth/', include('rest_framework.urls',\n namespace='rest_framework')),\n\n]\n\n\n","sub_path":"bookstore/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"184747539","text":"import uuid\n\nfrom twisted.trial import unittest\nfrom twisted.internet.task import Clock\nfrom smpp.pdu_builder import DeliverSM, BindTransceiverResp\nfrom smpp.pdu import unpack_pdu\n\nfrom vumi.tests.utils import FakeRedis\nfrom vumi.transports.smpp.clientserver.client import (\n EsmeTransceiver,\n EsmeCallbacks,\n KeyValueBase,\n KeyValueStore,\n ESME)\nfrom vumi.transports.smpp.clientserver.config import ClientConfig\n\n\nclass KeyValueStoreTestCase(unittest.TestCase):\n\n def setUp(self):\n self.kvs = KeyValueStore()\n KeyValueBase.register(self.kvs.__class__)\n self.prefix = \"smpp_test_%s\" % uuid.uuid4()\n\n def tearDown(self):\n pass\n\n def run_all_tests_on_instance(self, instance):\n self.kvs = instance\n KeyValueBase.register(instance.__class__)\n self.test_implements_abstract()\n self.test_set_get_delete()\n self.test_incr()\n\n def test_instance_test(self):\n newKeyValueStoreTestCase = KeyValueStoreTestCase()\n newKeyValueStoreTestCase.prefix = \"smpp_test_%s\" % uuid.uuid4()\n instance = KeyValueStore()\n newKeyValueStoreTestCase.run_all_tests_on_instance(instance)\n\n def test_implements_abstract(self):\n self.assertTrue(issubclass(KeyValueStore, KeyValueBase))\n self.assertTrue(isinstance(self.kvs, KeyValueBase))\n\n def test_set_get_delete(self):\n key1 = \"%s#cookie\" % self.prefix\n\n try:\n self.assertEqual(self.kvs.get(key1), None)\n self.kvs.set(key1, \"monster\")\n self.assertEqual(self.kvs.get(key1), \"monster\")\n self.kvs.set(key1, \"crumbles\")\n self.assertNotEqual(self.kvs.get(key1), \"monster\")\n self.assertEqual(self.kvs.get(key1), \"crumbles\")\n self.assertEqual(self.kvs.delete(key1), True)\n self.assertEqual(self.kvs.get(key1), None)\n\n except:\n self.kvs.delete(key1)\n raise\n\n def test_incr(self):\n key1 = \"%s#counter\" % self.prefix\n\n try:\n self.assertEqual(self.kvs.get(key1), None)\n self.assertEqual(self.kvs.incr(key1), 1)\n self.kvs.set(key1, 1)\n self.assertEqual(self.kvs.incr(key1), 2)\n self.kvs.set(key1, \"1\")\n self.assertEqual(self.kvs.incr(key1), 2)\n self.kvs.delete(key1)\n self.assertEqual(self.kvs.incr(key1), 1)\n self.assertEqual(self.kvs.incr(key1), 2)\n self.assertEqual(self.kvs.incr(key1), 3)\n self.assertEqual(self.kvs.delete(key1), True)\n\n except:\n self.kvs.delete(key1)\n raise\n\n\nclass FakeTransport(object):\n def __init__(self):\n self.connected = True\n\n def loseConnection(self):\n self.connected = False\n\n\nclass FakeEsmeTransceiver(EsmeTransceiver):\n\n def __init__(self, *args, **kwargs):\n EsmeTransceiver.__init__(self, *args, **kwargs)\n self.transport = FakeTransport()\n self.clock = Clock()\n self.callLater = self.clock.callLater\n\n def send_pdu(self, *args):\n pass\n\n\nclass EsmeSequenceNumberTestCase(unittest.TestCase):\n\n def test_sequence_rollover(self):\n config = ClientConfig(host=\"127.0.0.1\", port=\"0\",\n system_id=\"1234\", password=\"password\")\n esme = FakeEsmeTransceiver(config, FakeRedis(), None)\n self.assertEqual(1, esme.get_seq())\n esme.get_next_seq()\n self.assertEqual(2, esme.get_seq())\n esme.set_seq(4004004004)\n self.assertEqual(4004004004, esme.get_seq())\n esme.get_next_seq()\n self.assertEqual(1, esme.get_seq())\n\n\nclass EsmeTransceiverTestCase(unittest.TestCase):\n def get_esme(self, **callbacks):\n config = ClientConfig(host=\"127.0.0.1\", port=\"0\",\n system_id=\"1234\", password=\"password\")\n esme_callbacks = EsmeCallbacks(**callbacks)\n esme = FakeEsmeTransceiver(config, FakeRedis(), esme_callbacks)\n return esme\n\n def get_sm(self, msg, data_coding=3):\n sm = DeliverSM(1, short_message=msg, data_coding=data_coding)\n return unpack_pdu(sm.get_bin())\n\n def test_deliver_sm_simple(self):\n \"\"\"A simple message should be delivered.\"\"\"\n def cb(**kw):\n self.assertEqual(u'hello', kw['short_message'])\n\n esme = self.get_esme(deliver_sm=cb)\n esme.handle_deliver_sm(self.get_sm('hello'))\n\n def test_deliver_sm_ucs2(self):\n \"\"\"A UCS-2 message should be delivered.\"\"\"\n def cb(**kw):\n self.assertEqual(u'hello', kw['short_message'])\n\n esme = self.get_esme(deliver_sm=cb)\n esme.handle_deliver_sm(self.get_sm('\\x00h\\x00e\\x00l\\x00l\\x00o', 8))\n\n def test_bad_sm_ucs2(self):\n \"\"\"An invalid UCS-2 message should be discarded.\"\"\"\n def cb(**kw):\n self.assertEqual(bad_msg, kw['short_message'])\n self.flushLoggedErrors()\n\n esme = self.get_esme(deliver_sm=cb)\n bad_msg = '\\n\\x00h\\x00e\\x00l\\x00l\\x00o'\n esme.handle_deliver_sm(self.get_sm(bad_msg, 8))\n\n def test_bind_timeout(self):\n esme = self.get_esme()\n esme.connectionMade()\n\n self.assertEqual(True, esme.transport.connected)\n self.assertNotEqual(None, esme._lose_conn)\n\n esme.clock.advance(esme.smpp_bind_timeout)\n\n self.assertEqual(False, esme.transport.connected)\n self.assertEqual(None, esme._lose_conn)\n\n def test_bind_no_timeout(self):\n esme = self.get_esme()\n esme.connectionMade()\n\n self.assertEqual(True, esme.transport.connected)\n self.assertNotEqual(None, esme._lose_conn)\n\n esme.handle_bind_transceiver_resp(unpack_pdu(\n BindTransceiverResp(1).get_bin()))\n\n self.assertEqual(True, esme.transport.connected)\n self.assertEqual(None, esme._lose_conn)\n esme.lc_enquire.stop()\n\n\nclass ESMETestCase(unittest.TestCase):\n\n def setUp(self):\n self.client_config = ClientConfig(\n host='localhost',\n port=2775,\n system_id='test_system',\n password='password',\n )\n self.kvs = None\n self.esme_callbacks = None\n self.esme = ESME(self.client_config, self.kvs,\n self.esme_callbacks)\n\n def test_bind_as_transceiver(self):\n self.esme.bindTransciever()\n","sub_path":"vumi/transports/smpp/clientserver/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"354889888","text":"import os\n\nfrom reframe.core.pipeline import RunOnlyRegressionTest\nfrom reframe.settings import settings\nfrom reframe.utility.functions import standard_threshold\nfrom reframe.utility.parsers import StatefulParser\n\n\nclass VASPBaseCheck(RunOnlyRegressionTest):\n def __init__(self, name, **kwargs):\n super().__init__(name, os.path.dirname(__file__), **kwargs)\n\n # Uncomment and set the valid prog. environments for your site\n # self.valid_prog_environs = [ 'PrgEnv-intel' ]\n\n # Uncomment and adjust to load the VASP module\n # self.modules = [ 'VASP' ]\n\n self.sanity_patterns = {\n '-': {\n '1 F=\\s+(?P\\S+)': [\n ('result', float,\n lambda value, **kwargs:\n standard_threshold(\n value, (-.85026214E+03, -1e-5, 1e-5)))\n ],\n }\n }\n\n self.keep_files = ['OUTCAR']\n self.parser = StatefulParser(standard_threshold)\n self.perf_patterns = {\n 'OUTCAR': {\n '(?PGeneral timing and accounting)': [\n ('perf_section', str, self.parser.on)\n ],\n 'Total CPU time used \\(sec\\):\\s+(?P\\S+)': [\n ('perf', float, self.parser.match)\n ]\n }\n }\n\n self.reference = {\n # Uncomment and adjust the references for your systems/partitions\n # 'cpusys' : {\n # 'perf' : (213, None, 0.10)\n # },\n # 'gpusys' : {\n # 'perf' : (71.0, None, 0.10)\n # },\n '*': {\n 'perf_section': None,\n }\n }\n\n # Uncomment and set the maintainers and/or tags\n # self.maintainers = [ 'me' ]\n # self.tags = { 'example' }\n\n def setup(self, partition, environ, **job_opts):\n super().setup(partition, environ, **job_opts)\n\n # Needed from VASP to avoid segfaults\n self.job.pre_run = ['ulimit -s unlimited']\n\n\nclass VASPGPUCheck(VASPBaseCheck):\n def __init__(self, **kwargs):\n super().__init__('vasp_gpu_check', **kwargs)\n\n # Uncomment and adjust for your gpu systems\n # self.valid_systems = ['gpusys']\n\n self.descr = 'VASP GPU check'\n\n # Reset sources dir relative to the SCS apps prefix\n self.sourcesdir = os.path.join(self.sourcesdir, 'gpu')\n\n self.executable = 'vasp_gpu'\n self.variables = {'CRAY_CUDA_MPS': '1'}\n\n # Uncomment and adjust for your site\n # self.num_tasks = 16\n # self.num_tasks_per_node = 1\n self.num_gpus_per_node = 1\n\n\nclass VASPCPUCheck(VASPBaseCheck):\n def __init__(self, **kwargs):\n super().__init__('vasp_cpu_check', **kwargs)\n\n self.descr = 'VASP CPU check'\n\n # Uncomment and adjust for your gpu systems\n # self.valid_systems = [ 'cpusys' ]\n\n # Reset sources dir relative to the SCS apps prefix\n self.sourcesdir = os.path.join(self.sourcesdir, 'cpu')\n self.executable = 'vasp_std'\n\n # Uncomment and adjust for your site\n # self.use_multithreading = True\n # self.num_tasks = 32\n # self.num_tasks_per_node = 2\n\n\ndef _get_checks(**kwargs):\n return [VASPGPUCheck(**kwargs), VASPCPUCheck(**kwargs)]\n","sub_path":"examples/apps/vasp/vasp_check.py","file_name":"vasp_check.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"40017228","text":"#! /usr/bin/python\nfrom __future__ import print_function\nfrom random import *\nimport sys\nimport redis\nimport time\nsys.path.append('..')\n#from orderbook import OrderBook\n\nmyredis = redis.Redis(host='localhost', port=6379, db=0)\n\ndef generate_new_buy(trade_id):\n return {'type' : 'limit', \n 'side' : 'bid', \n 'quantity' : randint(1,1000),\n 'price' : randint(900,1050),\n 'trade_id' : trade_id}\n \ndef generate_cross_buy(trade_id):\n return {'type' : 'limit', \n 'side' : 'bid', \n 'quantity' : randint(1,1000), \n 'price' : randint(1055,1200),\n 'trade_id' : trade_id}\n\ndef generate_new_sell(trade_id):\n return {'type' : 'limit', \n 'side' : 'ask', \n 'quantity' : randint(1,1000), \n 'price' : randint(1055,1200),\n 'trade_id' : trade_id}\n \ndef generate_cross_sell(trade_id):\n return {'type' : 'limit', \n 'side' : 'ask', \n 'quantity' : randint(1,1000),\n 'price' : randint(900,1050),\n 'trade_id' : trade_id}\n\ndef gen_orders(nb_orders_prefilled, verbose = False):\n \n o_id = 0\n orders = []\n for trade_id in range(nb_orders_prefilled):\n orders.append( generate_new_buy(trade_id) )\n orders.append( generate_new_buy(trade_id) )\n orders.append( generate_cross_buy(trade_id) )\n orders.append( generate_new_sell(trade_id) )\n orders.append( generate_new_sell(trade_id) )\n\n orders.append( generate_new_sell(trade_id) )\n orders.append( generate_new_sell(trade_id) )\n orders.append( generate_cross_sell(trade_id) )\n orders.append( generate_new_sell(trade_id) )\n orders.append( generate_new_sell(trade_id) )\n \n for order in orders:\n newitem = []\n\n newitem.append(str(o_id))\n newitem.append(order[\"type\"][0].upper())\n newitem.append(order[\"side\"][0].upper())\n newitem.append(str(order[\"quantity\"]))\n newitem.append(str(order[\"price\"]))\n newitem.append(str(order[\"trade_id\"]))\n newitem.append(str(current_milli_time()))\n\n insertstring = \"-\".join(newitem)\n myredis.rpush(\"order\", insertstring)\n\n o_id = o_id + 1\n print (o_id)\n \ndef current_milli_time():\n return int(round(time.time() * 1000))\n\ndef main(argv):\n \n # nb buys and sells to prefill orderbook\n gen_orders(100)\n\n #pprint(data)\n pass\n\nif __name__ == \"__main__\":\n main(sys.argv)\n\n\n","sub_path":"HelperScripts/genOrders.py","file_name":"genOrders.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"241557891","text":"#!/usr/bin/env python3\n\nfrom flask import Flask, render_template, request, redirect\nimport hash_it\nimport hash_crack\nimport time\n\n#default (starting) values of the variables used in the app\nhashes=[]\ncracked=\"Not cracked (yet)\"\ncrack_time=\"n/a\"\n\napp = Flask(__name__)\napp.config['APPLICATION_ROOT'] = \"/fun-with-hashes\"\nprefix=\"/fun-with-hashes\"\n\n@app.route(prefix+'/')\ndef hasher():\n\n\treturn render_template(\"hash.html\", hashes = hashes, cracked = cracked, crack_time = crack_time, prefix=prefix)\n\n#when the user clicks on the \"Hish Hash\" button, the key from the input box is passed to the hasher module\n@app.route(prefix+'/hish-hash', methods = ['POST'])\ndef hish_hash():\n\n\tif request.method == \"POST\":\n\n\t\tsource_string=request.form['source_string']\n\n\t\tdel hashes[:]\n\t\t#hashes.clear()\n\n\t\thashes.append({\"type\":\"MD5\", \"digest\": hash_it.hash_md5(source_string)})\n\t\thashes.append({\"type\":\"SHA1\", \"digest\": hash_it.hash_sha1(source_string)})\n\t\thashes.append({\"type\":\"SHA224\", \"digest\": hash_it.hash_sha224(source_string)})\n\t\thashes.append({\"type\":\"SHA256\", \"digest\": hash_it.hash_sha256(source_string)})\n\t\thashes.append({\"type\":\"SHA384\", \"digest\": hash_it.hash_sha384(source_string)})\n\t\thashes.append({\"type\":\"SHA512\", \"digest\": hash_it.hash_sha512(source_string)})\n\n\treturn redirect(prefix+'/')\n\n#When the user clicks on the \"Crick Crack\" button, the hash from the input box and the chosen hash function are passed to the cracker module\n@app.route(prefix+'/crick-crack', methods = ['POST'])\ndef crick_crack():\n\n\tstart = time.time()\n\n\t#varibales should are used by the main function (hasher) as well\n\tglobal cracked\n\tglobal crack_time\n\t\n\tif request.method == \"POST\":\n\n\t\ttarget_hash=request.form['target_hash'].strip()\n\t\tcracked=hash_crack.crack_it(target_hash, request.form[\"hash_type\"])\n\n\tend = time.time()\n\t\n\tcrack_time = str(end - start)\n\n\treturn redirect(prefix+'/')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug = True)\n","sub_path":"hash_web.py","file_name":"hash_web.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"458288219","text":"import pygame\nimport sys\nfrom settings import Settings\nfrom background import Background\nfrom player import Player \nfrom weapon import Weapon \nfrom score import Score \nfrom pygame.sprite import Group\nfrom pygame.sprite import Sprite\nfrom random import randint\nfrom button import Button\nisGirl = False\n\ndef onKeyDown(screen, player, key, running):\n\tif running == False and key == pygame.K_TAB:\n\t\treturn True\n\tif player == None:\n\t\treturn \n\tif key == pygame.K_DOWN:\n\t\tplayer.offset(player.profile.playerSpeed)\n\telif key == pygame.K_UP:\n\t\tplayer.offset(-player.profile.playerSpeed)\n\telif key == pygame.K_SPACE:\n\t\tif len(player.weapons) < 3:\n\t\t\tplayer.attack()\n\telif key == pygame.K_h:\n\t\tglobal isGirl \n\t\tif isGirl:\n\t\t\tisGirl = False\n\t\telse:\n\t\t\tisGirl = True\n\ndef onKeyUp(player, running):\n\tif running:\n\t\tplayer.offset(0)\n\ndef handleEvents(screen, player, event, playButton, running):\n\tif event.type == pygame.QUIT:\n\t\tpygame.quit()\n\t\tsys.exit()\n\telif event.type == pygame.KEYDOWN:\n\t\treturn onKeyDown(screen, player, event.key, running)\n\telif event.type == pygame.KEYUP:\n\t\tonKeyUp(player, running)\n\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\treturn playButton.check(pygame.mouse.get_pos())\n\nrunning = False\nprofile = Settings()\npygame.init()\nscreen = pygame.display.set_mode((profile.screen_width, profile.screen_height))\npygame.display.set_caption('Konoha Crush')\n\nwall = Background(screen, 'images/konohaCrush.jpg')\nforest = Background(screen, 'images/background.jpg')\nplayButton = Button(screen, 'Play', (0, 0, 0), (screen.get_rect().centerx, screen.get_rect().centery))\nmessage = Button(screen, 'Click TAB to play', (255, 255, 255), (screen.get_rect().centerx, screen.get_rect().bottom))\nopeningImage = True\n\nreqInit = True\nvillain, hero = None, None\n\nwhile True:\n\tfor event in pygame.event.get():\n\t\tif handleEvents(screen, hero, event, playButton, running) == True:\n\t\t\trunning = True\n\t\t\topeningImage = False\n\n\tif running == False:\n\t\tif openingImage: \n\t\t\twall.show()\n\t\telse: \n\t\t\tforest.show()\n\t\t\thero.show()\n\t\t\tvillain.show()\n\t\tplayButton.show()\n\t\tmessage.show()\n\telse:\n\t\tif reqInit:\n\t\t\thero = Player(screen, 'images/naruto.png', 'images/rasengan.png', True, 'images/girl.png', 'images/love.png')\n\t\t\tvillain = Player(screen, 'images/snake.png', 'images/sword.png', False)\n\t\t\treqInit = False\n\t\tif isGirl:\n\t\t\thero.changeGirl()\n\t\telse:\n\t\t\thero.changeBoy()\n\n\t\tforest.show()\n\t\thero.update(villain)\n\t\tvillain.update(hero)\n\t\thero.show()\t\t\t\n\t\tvillain.show()\n\t\tif hero.get_points() <= 0 or villain.get_points() <= 0: \n\t\t\trunning = False\n\t\t\treqInit = True\n\t\t\tisGirl = False\n\t\t\tif hero.get_points() > 0:\n\t\t\t\tstatus = 'won'\n\t\t\telse:\n\t\t\t\tstatus = 'lost'\n\t\t\tplayButton = Button(screen, 'Naruto ' + status, (255, 255, 255), (screen.get_rect().centerx, screen.get_rect().centery))\n\tpygame.display.flip()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"245721717","text":"'''\nTask \nRead a line of input from stdin and save it to a variable, . Then print the contents of to stdout.\n\nInput Format\n\nA single line containing sentence .\n\nConstraints\n\nOutput Format\n\nPrint the contents of to stdout.\n\nSample Input\n\nHow many chickens does it take to cross the road?\nSample Output\n\nHow many chickens does it take to cross the road?\n\n'''\n\ndef read():\n s = input();\n return s\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Python/Introduction_Reading Raw_Input .py","file_name":"Introduction_Reading Raw_Input .py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"500391840","text":"from datetime import datetime, timedelta\r\n\r\nfrom flask import g, request, render_template\r\n\r\nfrom . import main\r\nfrom ..forms import SearchForm\r\nfrom ..models import Project\r\n\r\n\r\n@main.before_app_request\r\ndef before_request():\r\n g.previous_url = request.referrer\r\n g.search_form = SearchForm()\r\n\r\n\r\n@main.route(\"/\")\r\ndef index():\r\n page = request.args.get(\"page\", 1, type=int)\r\n yesterday = datetime.today() - timedelta(days=1)\r\n projects = (\r\n Project.query.filter(Project.created >= yesterday)\r\n .order_by(Project.created.desc())\r\n .paginate(page=page, per_page=2)\r\n )\r\n\r\n return render_template(\"index.html\", title=\"Home\", projects=projects)\r\n","sub_path":"shareproject/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"517494515","text":"from django.db import models\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.conf import settings\nfrom django.db.models import F\n\nfrom jsonfield import JSONField\n\nimport random\nimport json\n\nfrom experiments.dateutils import now\nfrom experiments import conf\n\n\nCONTROL_STATE = 0\nENABLED_STATE = 1\nTRACK_STATE = 3\n\nSTATES = (\n (CONTROL_STATE, 'Default/Control'),\n (ENABLED_STATE, 'Enabled'),\n (TRACK_STATE, 'Track'),\n)\n\nclass Counter(models.Model):\n key = models.CharField(primary_key=True, max_length=128)\n hash = models.CharField( max_length=96)\n field = models.CharField( max_length=20)\n count = models.IntegerField(default=0,blank=False,null=False)\n\nclass Count(): \n @classmethod\n def next(cls, hash=\"default\", field=\"default\", count = 1):\n key = \"%s:%s\" % (hash, field)\n try:\n Counter.objects.filter(pk = key).update(count = (F('count') + count))\n return Counter.objects.get(pk = key).count\n except Counter.DoesNotExist:\n return Counter.objects.create(key = key, hash=hash, field=field, count = count).count\n\n @classmethod\n def get(cls, hash=\"default\", field=\"default\"):\n key = \"%s:%s\" % (hash, field)\n try:\n return Counter.objects.get(pk = key).count\n except Counter.DoesNotExist:\n return Counter.objects.create(key = key, hash=hash, field=field, count = 0).count\n\n @classmethod\n def getall(cls, hash=\"default\"):\n try:\n return Counter.objects.filter(hash = hash)\n except Counter.DoesNotExist:\n return None\n\n\n @classmethod\n def delete(cls, hash=\"default\"):\n try:\n objects = Counter.objects.filter(hash = hash)\n \n for o in objects:\n o.delete()\n \n objects = Counter.objects.filter(hash = hash)\n return None\n except Counter.DoesNotExist:\n return None\n\n @classmethod\n def deletefield(cls, hash=\"default\", field=\"default\"):\n key = \"%s:%s\" % (hash, field)\n try:\n return Counter.objects.get(pk = key).delete()\n except Counter.DoesNotExist:\n return None\n \n @classmethod\n def len(cls, hash=\"default\"):\n try:\n return Counter.objects.filter(hash = hash).count()\n except Counter.DoesNotExist:\n return 0\n \n @classmethod\n def next_hex(cls, key = 'default:default'):\n return hex(Counter.next(key)).replace('0x', '').replace('L', '')\n \n def __unicode__(self):\n return u'%s = %s' % (self.pk, self.count)\n \n\nclass Experiment(models.Model):\n name = models.CharField(primary_key=True, max_length=128)\n description = models.TextField(default=\"\", blank=True, null=True)\n alternatives = JSONField(default={}, blank=True)\n relevant_chi2_goals = models.TextField(default=\"\", null=True, blank=True)\n relevant_mwu_goals = models.TextField(default=\"\", null=True, blank=True)\n\n state = models.IntegerField(default=CONTROL_STATE, choices=STATES)\n\n start_date = models.DateTimeField(default=now, blank=True, null=True, db_index=True)\n end_date = models.DateTimeField(blank=True, null=True)\n\n def is_displaying_alternatives(self):\n if self.state == CONTROL_STATE:\n return False\n elif self.state == ENABLED_STATE:\n return True\n elif self.state == TRACK_STATE:\n return True\n else:\n raise Exception(\"Invalid experiment state %s!\" % self.state)\n\n def is_accepting_new_users(self):\n if self.state == CONTROL_STATE:\n return False\n elif self.state == ENABLED_STATE:\n return True\n elif self.state == TRACK_STATE:\n return False\n else:\n raise Exception(\"Invalid experiment state %s!\" % self.state)\n\n def ensure_alternative_exists(self, alternative, weight=None):\n if alternative not in self.alternatives:\n self.alternatives[alternative] = {}\n self.alternatives[alternative]['enabled'] = True\n self.save()\n if weight is not None and 'weight' not in self.alternatives[alternative]:\n self.alternatives[alternative]['weight'] = float(weight)\n self.save()\n\n @property\n def default_alternative(self):\n for alternative, alternative_conf in self.alternatives.items():\n if alternative_conf.get('default'):\n return alternative\n return conf.CONTROL_GROUP\n\n def set_default_alternative(self, alternative):\n for alternative_name, alternative_conf in self.alternatives.items():\n if alternative_name == alternative:\n alternative_conf['default'] = True\n elif 'default' in alternative_conf:\n del alternative_conf['default']\n\n def random_alternative(self):\n if all('weight' in alt for alt in self.alternatives.values()):\n return weighted_choice([(name, details['weight']) for name, details in self.alternatives.items()])\n else:\n return random.choice(list(self.alternatives.keys()))\n\n def __unicode__(self):\n return self.name\n\n def to_dict(self):\n data = {\n 'name': self.name,\n 'start_date': self.start_date,\n 'end_date': self.end_date,\n 'state': self.state,\n 'description': self.description,\n 'relevant_chi2_goals': self.relevant_chi2_goals,\n 'relevant_mwu_goals': self.relevant_mwu_goals,\n 'default_alternative': self.default_alternative,\n 'alternatives': ','.join(self.alternatives.keys()),\n }\n return data\n\n def to_dict_serialized(self):\n return json.dumps(self.to_dict(), cls=DjangoJSONEncoder)\n\n\nclass Enrollment(models.Model):\n \"\"\" A participant in a split testing experiment \"\"\"\n user = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'))\n experiment = models.ForeignKey(Experiment)\n enrollment_date = models.DateTimeField(auto_now_add=True)\n last_seen = models.DateTimeField(null=True)\n alternative = models.CharField(max_length=50)\n\n class Meta:\n unique_together = ('user', 'experiment')\n\n def __unicode__(self):\n return u'%s - %s' % (self.user, self.experiment)\n\n\ndef weighted_choice(choices):\n total = sum(w for c, w in choices)\n r = random.uniform(0, total)\n upto = 0\n for c, w in choices:\n upto += w\n if upto >= r:\n return c\n\n\n","sub_path":"experiments/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"630169812","text":"from django.conf.urls import url\n\nfrom blog import views\nurlpatterns = [\n url(r'^$', views.index, name=\"blog_index\"),\n url(r'^detail/(?P\\d+)/$', views.detail, name=\"blog_detail\"),\n url(r'^aboutme$', views.aboutme, name=\"aboutme\"),\n url(r'^tag/(?P\\d+)/(?P\\d+)/$', views.tagindex, name=\"blog_tagindex\"),\n url(r'^cata/(?P\\d+)/(?P\\d+)/$', views.cataindex, name=\"blog_cataindex\"),\n ]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"243404240","text":"import pandas as pd\nfrom sklearn.neighbors import KNeighborsClassifier\n\nimport classes.KnnNetwork as KnnNetwork\n\n\n# Import iris dataset\ndata = pd.read_csv(\"assets/data/iris.csv\")\n\nkn = KnnNetwork.KnnNetwork()\n\n#print(\"Single entry\", data.iloc[0])\n#print(\"Several entries\", data.iloc[:5])\n#print(\"Several keys\", data.iloc[:5, :-1])\n#print(\"Several names\", data.iloc[:5, -1:])\n#print(data.head())\n\n\ndef processData(data):\n\tkeys = [ x[-1] for x in data.values.tolist() ]\n\tentries = [ x[:-1] for x in data.values.tolist() ]\n\treturn list(zip(keys, entries))\n\ntrainingSet = processData(data.iloc[:105])\nvalidationSet = processData(data.iloc[105:135])\ntestSet = processData(data.iloc[135:])\n\nkn.train(trainingSet)\n\ncorrectTraining = 0\ndef runOwn(data, k, printLabel):\n\tcorrect = 0\n\tfor key, values in data:\n\t\tif kn.guess(values, k)[0] == key:\n\t\t\tcorrect += 1\n\n\tprint(printLabel, correct, \"/\", len(data))\n\ndef runSciKit(valData, k, printLabel):\n\tcorrect = 0\n\tclazz = KNeighborsClassifier(n_neighbors=k)\n\tclazz.fit(data.iloc[:105, 0:4], data.iloc[:105, -1])\n\tfor key, values in valData:\n\t\tif clazz.predict([values])[0] == key:\n\t\t\tcorrect += 1\n\n\tprint(printLabel, correct, \"/\", len(valData))\n\n\ntest = [7.2, 3.6, 5.1, 2.5]\n\n#clazz = KNeighborsClassifier(n_neighbors=5)\n#clazz.fit(data.iloc[:105, 0:4], data.iloc[:105, -1])\n#for key, values in validationSet:\n#\tprint(\"Own\", kn.guess(values, 5), \" with actual \", key)\n#\tprint(\"SciKit\", clazz.predict([values]), clazz.kneighbors([values])[1], \" with actual\", key)\nfor x in range(1, 8, 2):\n\trunOwn(validationSet, x, \"Own\" + str(x))\n\trunSciKit(validationSet, x, \"SciKit\" + str(x))\n\n#print(kn.guess(validationSet[0][1], 3))\n\n","sub_path":"first-steps/k_nearest_neighbour/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"549093433","text":"# Copyright 2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# -----------------------------------------------------------------------------\n\nimport unittest\n\nfrom sawtooth_sdk.consensus.zmq_service import ZmqService\nfrom sawtooth_sdk.messaging.future import Future\nfrom sawtooth_sdk.messaging.future import FutureResult\nfrom sawtooth_sdk.protobuf import consensus_pb2\nfrom sawtooth_sdk.protobuf.validator_pb2 import Message\n\n\nclass TestService(unittest.TestCase):\n def setUp(self):\n self.mock_stream = unittest.mock.Mock()\n self.service = ZmqService(\n stream=self.mock_stream,\n timeout=10)\n\n def _make_future(self, message_type, content):\n fut = Future('test')\n fut.set_result(FutureResult(\n message_type=message_type,\n content=content))\n return fut\n\n def test_send_to(self):\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_SEND_TO_RESPONSE,\n content=consensus_pb2.ConsensusSendToResponse(\n status=consensus_pb2.ConsensusSendToResponse.OK\n ).SerializeToString())\n\n self.service.send_to(\n receiver_id=b'receiver_id',\n message_type='message_type',\n payload=b'payload')\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_SEND_TO_REQUEST,\n content=consensus_pb2.ConsensusSendToRequest(\n message_type='message_type',\n content=b'payload',\n receiver_id=b'receiver_id').SerializeToString())\n\n def test_broadcast(self):\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_BROADCAST_RESPONSE,\n content=consensus_pb2.ConsensusBroadcastResponse(\n status=consensus_pb2.ConsensusBroadcastResponse.OK\n ).SerializeToString())\n\n self.service.broadcast(\n message_type='message_type',\n payload=b'payload')\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_BROADCAST_REQUEST,\n content=consensus_pb2.ConsensusBroadcastRequest(\n message_type='message_type',\n content=b'payload').SerializeToString())\n\n def test_initialize_block(self):\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_INITIALIZE_BLOCK_RESPONSE,\n content=consensus_pb2.ConsensusInitializeBlockResponse(\n status=consensus_pb2.ConsensusInitializeBlockResponse.OK\n ).SerializeToString())\n\n self.service.initialize_block(previous_id=b'test')\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_INITIALIZE_BLOCK_REQUEST,\n content=consensus_pb2.ConsensusInitializeBlockRequest(\n previous_id=b'test').SerializeToString())\n\n def test_summarize_block(self):\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_SUMMARIZE_BLOCK_RESPONSE,\n content=consensus_pb2.ConsensusSummarizeBlockResponse(\n status=consensus_pb2.ConsensusSummarizeBlockResponse.OK,\n summary=b'summary').SerializeToString())\n\n result = self.service.summarize_block()\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_SUMMARIZE_BLOCK_REQUEST,\n content=consensus_pb2.ConsensusSummarizeBlockRequest()\n .SerializeToString())\n\n self.assertEqual(result, b'summary')\n\n def test_finalize_block(self):\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_FINALIZE_BLOCK_RESPONSE,\n content=consensus_pb2.ConsensusFinalizeBlockResponse(\n status=consensus_pb2.ConsensusFinalizeBlockResponse.OK,\n block_id=b'block_id').SerializeToString())\n\n result = self.service.finalize_block(data=b'test')\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_FINALIZE_BLOCK_REQUEST,\n content=consensus_pb2.ConsensusFinalizeBlockRequest(\n data=b'test').SerializeToString())\n\n self.assertEqual(result, b'block_id')\n\n def test_cancel_block(self):\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_CANCEL_BLOCK_RESPONSE,\n content=consensus_pb2.ConsensusCancelBlockResponse(\n status=consensus_pb2.ConsensusCancelBlockResponse.OK\n ).SerializeToString())\n\n self.service.cancel_block()\n\n request = consensus_pb2.ConsensusCancelBlockRequest()\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_CANCEL_BLOCK_REQUEST,\n content=request.SerializeToString())\n\n def test_check_blocks(self):\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_CHECK_BLOCKS_RESPONSE,\n content=consensus_pb2.ConsensusCheckBlocksResponse(\n status=consensus_pb2.ConsensusCheckBlocksResponse.OK\n ).SerializeToString())\n\n self.service.check_blocks(priority=[b'test1', b'test2'])\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_CHECK_BLOCKS_REQUEST,\n content=consensus_pb2.ConsensusCheckBlocksRequest(\n block_ids=[b'test1', b'test2']).SerializeToString())\n\n def test_commit_block(self):\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_COMMIT_BLOCK_RESPONSE,\n content=consensus_pb2.ConsensusCommitBlockResponse(\n status=consensus_pb2.ConsensusCommitBlockResponse.OK\n ).SerializeToString())\n\n self.service.commit_block(block_id=b'test')\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_COMMIT_BLOCK_REQUEST,\n content=consensus_pb2.ConsensusCommitBlockRequest(\n block_id=b'test').SerializeToString())\n\n def test_ignore_block(self):\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_IGNORE_BLOCK_RESPONSE,\n content=consensus_pb2.ConsensusIgnoreBlockResponse(\n status=consensus_pb2.ConsensusIgnoreBlockResponse.OK\n ).SerializeToString())\n\n self.service.ignore_block(block_id=b'test')\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_IGNORE_BLOCK_REQUEST,\n content=consensus_pb2.ConsensusIgnoreBlockRequest(\n block_id=b'test').SerializeToString())\n\n def test_fail_block(self):\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_FAIL_BLOCK_RESPONSE,\n content=consensus_pb2.ConsensusFailBlockResponse(\n status=consensus_pb2.ConsensusFailBlockResponse.OK\n ).SerializeToString())\n\n self.service.fail_block(block_id=b'test')\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_FAIL_BLOCK_REQUEST,\n content=consensus_pb2.ConsensusFailBlockRequest(\n block_id=b'test').SerializeToString())\n\n def test_get_blocks(self):\n block_1 = consensus_pb2.ConsensusBlock(\n block_id=b'block1',\n previous_id=b'block0',\n signer_id=b'signer1',\n block_num=1,\n payload=b'test1')\n\n block_2 = consensus_pb2.ConsensusBlock(\n block_id=b'block2',\n previous_id=b'block1',\n signer_id=b'signer2',\n block_num=2,\n payload=b'test2')\n\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_BLOCKS_GET_RESPONSE,\n content=consensus_pb2.ConsensusBlocksGetResponse(\n status=consensus_pb2.ConsensusBlocksGetResponse.OK,\n blocks=[block_1, block_2]).SerializeToString())\n\n blocks = self.service.get_blocks(block_ids=[b'id1', b'id2'])\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_BLOCKS_GET_REQUEST,\n content=consensus_pb2.ConsensusBlocksGetRequest(\n block_ids=[b'id1', b'id2']).SerializeToString())\n\n self.assertEqual({\n block_id: (\n block.previous_id,\n block.signer_id,\n block.block_num,\n block.payload)\n for block_id, block in blocks.items()\n }, {\n b'block1': (b'block0', b'signer1', 1, b'test1'),\n b'block2': (b'block1', b'signer2', 2, b'test2'),\n })\n\n def test_get_chain_head(self):\n block = consensus_pb2.ConsensusBlock(\n block_id=b'block',\n previous_id=b'block0',\n signer_id=b'signer',\n block_num=1,\n payload=b'test')\n\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_CHAIN_HEAD_GET_RESPONSE,\n content=consensus_pb2.ConsensusChainHeadGetResponse(\n status=consensus_pb2.ConsensusChainHeadGetResponse.OK,\n block=block).SerializeToString())\n\n chain_head = self.service.get_chain_head()\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_CHAIN_HEAD_GET_REQUEST,\n content=consensus_pb2.ConsensusChainHeadGetRequest()\n .SerializeToString())\n\n self.assertEqual(chain_head.block_id, b'block')\n self.assertEqual(chain_head.previous_id, b'block0')\n self.assertEqual(chain_head.signer_id, b'signer')\n self.assertEqual(chain_head.block_num, 1)\n self.assertEqual(chain_head.payload, b'test')\n\n def test_get_settings(self):\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_SETTINGS_GET_RESPONSE,\n content=consensus_pb2.ConsensusSettingsGetResponse(\n status=consensus_pb2.ConsensusSettingsGetResponse.OK,\n entries=[\n consensus_pb2.ConsensusSettingsEntry(\n key='key1',\n value='value1'),\n consensus_pb2.ConsensusSettingsEntry(\n key='key2',\n value='value2')]).SerializeToString())\n\n entries = self.service.get_settings(\n block_id=b'test',\n settings=['test1', 'test2'])\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_SETTINGS_GET_REQUEST,\n content=consensus_pb2.ConsensusSettingsGetRequest(\n block_id=b'test',\n keys=['test1', 'test2']).SerializeToString())\n\n self.assertEqual(\n entries, {\n 'key1': 'value1',\n 'key2': 'value2',\n })\n\n def test_get_state(self):\n self.mock_stream.send.return_value = self._make_future(\n message_type=Message.CONSENSUS_STATE_GET_RESPONSE,\n content=consensus_pb2.ConsensusStateGetResponse(\n status=consensus_pb2.ConsensusStateGetResponse.OK,\n entries=[\n consensus_pb2.ConsensusStateEntry(\n address='address1',\n data=b'data1'),\n consensus_pb2.ConsensusStateEntry(\n address='address2',\n data=b'data2')]).SerializeToString())\n\n entries = self.service.get_state(\n block_id=b'test',\n addresses=['test1', 'test2'])\n\n self.mock_stream.send.assert_called_with(\n message_type=Message.CONSENSUS_STATE_GET_REQUEST,\n content=consensus_pb2.ConsensusStateGetRequest(\n block_id=b'test',\n addresses=['test1', 'test2']).SerializeToString())\n\n self.assertEqual(\n entries, {\n 'address1': b'data1',\n 'address2': b'data2',\n })\n","sub_path":"tests/test_zmq_service.py","file_name":"test_zmq_service.py","file_ext":"py","file_size_in_byte":12818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"481445878","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n__author__ = 'Mike'\n\n'''scrapy初始url的两种方法\n 一种是常量start_urls,并需要定义一个方法parse()\n 另一种是直接定义一个方法:start_request() \n'''\n\nclass simpleUrl(scrapy.Spider):\n name = \"simpleUrl\"\n\n '''无需定义start_request的写法,只要定义一个start_url'''\n start_urls = ['http://lab.scrapyd.cn/page/1/',\n 'http://lab.scrapyd.cn/page/2/', ]\n\n #如果简写url,必须定义parse方法\n\n def parse(self, response):\n page = response.url.split(\"/\")[-2]\n filename = 'mingyan-%s.html' % page\n with open(filename, 'wb') as f:\n f.write(response.body) # 刚才请求的页面去哪里了?response.body就代表刚才下载的页面\n self.log('保存文件: %s' % filename)","sub_path":"articlespider/articlespider/spiders/simpleUrl.py","file_name":"simpleUrl.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"352770409","text":"#!/usr/bin/env python\n# Author: Angus Allen.\n# Date: 13 July 2016.\n# Summary: Executes queries on the tournament database for the tournament project.\n# No extra credits attempted\n\nimport psycopg2\nimport bleach\n\ndef connect():\n \"\"\"Connect to the PostgreSQL database. Returns a database connection.\"\"\"\n return psycopg2.connect(\"dbname=tournament\")\n\ndef deleteMatches():\n \"\"\"Remove all the match records from the database.\"\"\"\n db = connect()\n c = db.cursor()\n c.execute('DELETE FROM matches')\n db.commit()\n db.close()\n\ndef deletePlayers():\n \"\"\"Remove all the player records from the database.\"\"\"\n db = connect()\n c = db.cursor()\n c.execute('DELETE FROM players')\n db.commit()\n db.close()\n\ndef countPlayers():\n \"\"\"Returns the number of players currently registered.\"\"\"\n db = connect()\n c = db.cursor()\n c.execute('SELECT COALESCE(count(*),0) FROM players')\n rows = c.fetchall()\n db.close()\n for row in rows:\n result = row[0]\n return result\n\ndef registerPlayer(name):\n \"\"\"Adds a player to the tournament database.\n\n The database assigns a unique serial id number for the player. (This\n should be handled by your SQL database schema, not in your Python code.)\n\n Args:\n name: the player's full name (need not be unique).\n \"\"\"\n db = connect()\n c = db.cursor()\n c.execute('INSERT INTO players (player_name) VALUES (%s)', (name,))\n db.commit()\n db.close()\n\ndef playerStandings():\n \"\"\"Returns a list of the players and their win records, sorted by wins.\n\n The first entry in the list should be the player in first place, or a player\n tied for first place if there is currently a tie.\n\n Returns:\n A list of tuples, each of which contains (id, name, wins, matches):\n id: the player's unique id (assigned by the database)\n name: the player's full name (as registered)\n wins: the number of matches the player has won\n matches: the number of matches the player has played\n \"\"\"\n standings = []\n db = connect()\n c = db.cursor()\n \"\"\"Query to be executed below joins two new tables with the following names:\n win_rank\n loss_count\n\n win_rank is itself the output table from a join of a new table\n called win_count on the table called players\n \"\"\"\n c.execute('SELECT '\n ' win_rank.id, '\n ' win_rank.name, '\n ' win_rank.wins, '\n ' COALESCE(loss_count.losses, 0) AS losses '\n 'FROM '\n ' (SELECT players.player_id AS id, '\n ' players.player_name AS name, '\n ' COALESCE(win_count.wins, 0) AS wins '\n ' FROM '\n ' (SELECT winner_id, '\n ' COUNT(winner_id) AS wins '\n ' FROM matches '\n ' GROUP BY winner_id) AS win_count '\n ' FULL OUTER JOIN '\n ' players '\n ' ON players.player_id = win_count.winner_id) AS win_rank '\n 'FULL OUTER JOIN '\n ' (SELECT loser_id, '\n ' COUNT(loser_id) AS losses '\n ' FROM matches '\n ' GROUP BY loser_id) AS loss_count '\n 'ON win_rank.id = loss_count.loser_id '\n 'ORDER BY win_rank.wins DESC')\n\n rows = c.fetchall()\n db.close()\n for row in rows:\n total_matches = int(row[2]) + int(row[3])\n player_tuple = (row[0],row[1],int(row[2]),total_matches)\n standings.append(player_tuple)\n return standings\n\ndef reportMatch(winner, loser):\n \"\"\"Records the outcome of a single match between two players.\n\n Args:\n winner: the id number of the player who won\n loser: the id number of the player who lost\n \"\"\"\n db = connect()\n c = db.cursor()\n c.execute('INSERT INTO matches (winner_id, loser_id) '\n ' VALUES (%s, %s)', (winner, loser,))\n db.commit()\n db.close()\n\ndef swissPairings():\n \"\"\"Returns a list of pairs of players for the next round of a match.\n\n Assuming that there are an even number of players registered, each player\n appears exactly once in the pairings. Each player is paired with another\n player with an equal or nearly-equal win record, that is, a player adjacent\n to him or her in the standings.\n\n Returns:\n A list of tuples, each of which contains (id1, name1, id2, name2)\n id1: the first player's unique id\n name1: the first player's name\n id2: the second player's unique id\n name2: the second player's name\n \"\"\"\n standings = playerStandings()\n num = int(countPlayers())\n pairings = []\n players_per_match = 2\n next_in_standings = 1\n if num > 0:\n for i in range (num):\n if i % players_per_match == 0:\n id1 = standings[i][0]\n name1 = standings[i][1]\n id2 = standings[i+next_in_standings][0]\n name2 = standings[i+next_in_standings][1]\n pair = (id1, name1, id2, name2)\n pairings.append(pair)\n return pairings\n","sub_path":"tournament.py","file_name":"tournament.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"533649771","text":"import glob\nimport logging\nimport pickle\nimport pyltp\nimport time\n\nimport pymysql\nfrom bert_serving.client import BertClient\nimport numpy as np\nimport json\nfrom annoy import AnnoyIndex\n\n\n# from service.client import BertClient\nfrom gensim import similarities\n\nfrom scipy.linalg import norm\n\nclass sentenceParser():\n def __init__(self):\n\n self.bc = BertClient(ip='192.168.1.103')\n self.splitter = pyltp.SentenceSplitter()\n # fin = open('./resources/ninteenth_sents.txt', 'r', encoding='UTF-8')\n #\n # self.lines = fin.readlines()\n # fin.close()\n\n self.annoyIndex=AnnoyIndex(768)\n # self.annoyIndex.load('./mod/annoy_19th.model')\n self.annoyIndex.load('./mod/annoy_19th_vedio.mod')\n load_file=open('./resources/19th_vedio.bin','rb')\n self.sents=pickle.load(load_file)\n self.lines=[sent['articleContent'] for sent in self.sents]\n\n def find_19th_org(self,content,threadhold=0.9):\n sents = list(self.splitter.split(content))\n sents_encode=self.bc.encode(sents)\n result=[]\n for i,sencode in enumerate(sents_encode):\n sentindex,dis=self.annoyIndex.get_nns_by_vector(sencode,1,include_distances=True)\n print(sents[i]+str(np.cos(dis[0])))\n if(np.cos(dis[0])>threadhold):\n result.append({'org':self.lines[sentindex[0]].strip(),'subcontent':sents[i],'score':np.cos(dis[0]),'video':self.sents[sentindex[0]] if self.sents[sentindex[0]] else {} })\n return result\n\n\n def connect_wxremit_db(self):\n return pymysql.connect(host='127.0.0.1',\n port=3306,\n user='root',\n password='',\n database='xinhua',\n charset='utf8')\n\n\n def query_country_name(self, cc2):\n sql_str = (\"SELECT distinct(FILE_UUID),txt\"\n + \" FROM e20190313\"\n + \" WHERE txt like '%s' group by FILE_UUID,txt limit 100\" % cc2)\n logging.info(sql_str)\n\n con = self.connect_wxremit_db()\n cur = con.cursor()\n cur.execute(sql_str)\n rows = cur.fetchall()\n cur.close()\n con.close()\n\n # assert len(rows) == 1, 'Fatal error: country_code does not exists!'\n return rows\n def prepare_vedio(self):\n f = glob.glob('./resources/19/*.txt')\n sents=[]\n for file in f:\n print(file)\n with open(file,'r') as fin:\n lines=fin.readlines()\n for line in lines:\n line=line.replace('\\'','\"')\n js = json.loads(line)\n # jsonp = jsonpath.jsonpath(js, \"$..news.*\")\n sents.append(js)\n #应该做成KEY vALUE\n self.sents=sents#[{str(i):sent} for i,sent in enumerate(sents)]\n annoyIndex = AnnoyIndex(768)\n for i,sent in enumerate(sents):\n encode = self.bc.encode([sent['articleContent']])[0]\n # sent_id=int(sent['sentenceId'][12:])\n annoyIndex.add_item(i,encode)\n annoyIndex.build(10)\n annoyIndex.save('./mod/annoy_19th_vedio.mod')\n fou=open('./resources/19th_vedio.bin','wb')\n pickle.dump(self.sents,fou)\n fou.close()\n # fou = np.array(self.sents, dtype=np.float32)\n # fou.tofile(\"./resources/19th_vedio.bin\")\n\n\n def prepare(self):\n fin=open('./resources/ninteenth','r',encoding='UTF-8')\n sentfile=open('./resources/ninteenth_sents.txt','w',encoding='UTF-8')\n\n '''\n AnnoyIndex(f, metric='angular') returns a new index that’s read-write and stores vector of f dimensions. Metric can be \"angular\", \"euclidean\", \"manhattan\", \"hamming\", or \"dot\".\n '''\n annoyIndex = AnnoyIndex(768)\n i=0\n sents_encode=[]\n lines=fin.readlines()\n for line in lines:\n line=line.strip()\n if line!='':\n sents=list(self.splitter.split(line))\n encodes=self.bc.encode(sents)\n\n for sent in zip(sents,encodes):\n sents_encode.append(sent[1])\n sentfile.write(sent[0]+'\\n')\n annoyIndex.add_item(i, sent[1])\n i+=1\n # # sents_encode.append({'content':sent[0],'encode':sent[1]})\n annoyIndex.build(100)\n annoyIndex.save('./mod/annoy_19th.model')\n\n fin.close()\n sentfile.close()\n\n fou=np.array(sents_encode,dtype=np.float32)\n fou.tofile(\"./resources/19.bin\")\n # fou=open('./resources/ninteenth_encode.txt','w',encoding='UTF-8')\n # for index,sent in enumerate(sents_encode):\n # fou.write(str(index)+'\\t'+sent['content']+'\\t'+str(sent['encode'])+'\\n')\n # fou.close()\n # import numpy as np\n # dis1 = np.dot(a[1], b[1]) / (norm(a[1]) * norm(b[1]))\n # dis2 = np.dot(b[0], c[0]) / (norm(b[0]) * norm(c[0]))\n # print(dis1, dis2)\n\n\n def proccess_main(self):\n # self.encodes=np.fromfile('./resources/19.bin',dtype=np.float32)\n # print(encodes)\n content='习近平总书记在党的十九大报告中指出,中国秉持共商共建共享的全球治理观,积极参与全球治理体系改革和建设,不断贡献中国智慧和力量。'\n content='世界正处于大发展大变革大调整时期,和平与发展仍然是时代主题'\n content='我们愿同意方共建“一带一路”,发挥两国“一带一路”合作的历史、文化、区位等优势,把“一带一路”互联互通建设同意大利“北方港口建设计划”、“投资意大利计划”等对接,在海上、陆地、航空、航天、文化等多个维度打造新时期的“一带一路”'\n # content='同志们:\n content='党的十九大报告指出:“我国社会主要矛盾已经转化为人民日益增长的美好生活需要和不平衡不充分的发展之间的矛盾”。从群众的需求上看,美好生活日益多样化,不仅包括物质方面的需求,还包括非物质需求。2017年12月份,厦门开始全力打造“五安工程”,从群众最关心、最直接的“家安、路安、食安、业安、心安”等平安需求着力,为打造最具安全感城市加油助力。'\n # self.getSimilray(content)\n # print('*'*15+content)\n # self.getSimilray2(content)\n print(self.find_19th_org(content))\n\n\n def getSimilray2(self,content):\n current = self.bc.encode([content])\n # annoyIndex=AnnoyIndex(768)\n # for i,encode in enumerate(self.encodes.reshape(-1, 768)):\n # annoyIndex.add_item(i,encode)\n # annoyIndex.build(100)\n result,distance=self.annoyIndex.get_nns_by_vector(current[0],5,include_distances=True)\n orgs=[]\n print(content)\n for i,r in enumerate(result):\n print(str(r)+' '+str(np.cos(distance[i])))\n # print(self.lines[r])\n # orgs.append(self.lines[r])\n orgs.append(self.sents[r])\n print(self.sents[r])\n\n distance=np.array(np.cos(distance)).tolist()\n return orgs,distance\n\n def getSimilray1(self,content):\n # None\n # current = self.bc.encode([content])\n # index=similarities.MatrixSimilarity(self.encodes.reshape(-1, 768),num_features=768,chunksize=768)\n # sim=index[current[0]]\n # sim=sorted(enumerate(sim),key=lambda item:item[1],reverse=True)\n # for i in sim[:-5]:\n # print(i)\n # print(self.lines[i[0]])\n current = self.bc.encode([content])\n score=np.sum(current[0]*self.encodes.reshape(-1, 768),axis=1)/np.linalg.norm(self.encodes.reshape(-1, 768),axis=1)\n topk=np.argsort(score)[::-1][:5]\n print(topk)\n\n\n\n def getSimilray(self,content):\n current=self.bc.encode([content])\n dtype = [('index',np.int), ('score',np.float32)]\n\n all=[]\n\n for index,encode in enumerate(self.encodes.reshape(-1,768)):\n distance=np.dot(current[0],encode)/(norm(current[0])*norm(encode))\n # if(distance>top[1]):\n all.append([index,distance])\n # all = np.array(all, dtype=dtype)\n # top= np.sort(all,order='score')\n top=sorted(all,key=lambda elem:elem[1],reverse=True)\n\n for i in top[:5]:\n print(i)\n print(self.lines[i[0]])\n\n def test(self):\n # feat=np.random.random((100000,4096))\n # annoyIndex = AnnoyIndex(4096)\n # annoyIndex.on_disk_build('a')\n # for i,v in enumerate(feat):\n # annoyIndex.add_item(i,v)\n # for i,v in enumerate(feat):\n # annoyIndex.add_item(i,v)\n t=time.time()\n #\n # annoyIndex.build(100)\n # print(time.time()-t)\n annoyIndex = AnnoyIndex(4096)\n annoyIndex.load('a')\n print(annoyIndex.get_nns_by_item(0,5))\n print(time.time() - t)\n\n def test1(self):\n rows=self.query_country_name('%')\n annoyIndex = AnnoyIndex(768)\n # for i,row in enumerate(rows):\n # encode=self.bc.encode([row[1]])\n # annoyIndex.add_item(i,encode[0])\n # annoyIndex.build(10)\n # annoyIndex.save('articles')\n annoyIndex.load('articles')\n result,index=annoyIndex.get_nns_by_item(10,5,include_distances=True)\n print(rows[10])\n print(np.cos(index))\n for i in result:\n\n print(rows[i])\n\nsentP=sentenceParser()\n# sentP.prepare()\n# sentP.prepare_vedio()\n\nsentP.proccess_main()\n# sentP.test1()\n","sub_path":"ninteenth.py","file_name":"ninteenth.py","file_ext":"py","file_size_in_byte":9661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"162980754","text":"#Shared\nimport time\nfrom botocore.exceptions import WaiterError as WaiterError\n\ndef dateconverter(o):\n import datetime\n if isinstance(o, datetime.datetime):\n return o.__str__()\n\nimport json\nimport sys\n\ndef json2str(o):\n return json.dumps(o, default=dateconverter, indent=2, sort_keys=True)\n\ndef jsonprint(o):\n my_print(json2str(0)+'\\n')\n\ndef jsonprint_err(o):\n my_print_err(json2str(0)+'\\n')\n\ndef my_print_err(str):\n red = '\\033[01;31m'\n sys.stderr.write(red+str+'\\n')\n\ndef my_print(str):\n green = '\\033[01;32m'\n sys.stdout.write(green+str+'\\n')\n\n\ndef create_changeset_and_wait(cf, stackName, file, params, type):\n with open(file, 'r') as f:\n cs_response = cf.create_change_set(\n StackName=stackName,\n TemplateBody=f.read(),\n Parameters=params,\n Capabilities=['CAPABILITY_NAMED_IAM'],\n ChangeSetType=type,\n ChangeSetName=stackName + \"-cs-\" +str(int(time.time()))\n )\n # print(str(cs_response))\n change_set_name = cs_response['Id']\n waiter = cf.get_waiter('change_set_create_complete')\n my_print(\"Waiting chagne set '%s' to be created\" % change_set_name)\n try:\n waiter.wait(ChangeSetName=change_set_name)\n my_print(\"Change Set '%s' created\" % change_set_name)\n # Execute change set\n try:\n ex_response = cf.execute_change_set(ChangeSetName=change_set_name)\n my_print(\"Change Set '%s' executed.\" % change_set_name )\n try:\n if type == 'UPDATE':\n stackWaiter = cf.get_waiter('stack_update_complete')\n else:\n stackWaiter = cf.get_waiter('stack_create_complete')\n my_print(\"Waiting stack '%s' creation to complete\" % stackName)\n stackWaiter.wait(StackName=stackName)\n my_print(\"Stack '%s' created\" % stackName)\n # DONE\n except:\n my_print_err(\"Failed to %s stack: '%s'\" % (type, stackName))\n jsonprint_err(cf.describe_stack_events(StackName=stackName))\n sys.exit()\n\n except:\n my_print_err(\"Failed to execute '%s' Change Set\" % change_set_name)\n jsonprint_err(cf.describe_change_set(ChangeSetName=change_set_name))\n\n\n except WaiterError as e:\n error_msg = e.last_response['StatusReason']\n if error_msg in ['No updates are to be performed.', 'The submitted information didn\\'t contain changes. Submit different information to create a change set.'] :\n my_print(\"%s. Going to delete change set %s\" % (error_msg, change_set_name))\n # don't need to keep ChangeSet that contains no changes\n response = cf.delete_change_set( ChangeSetName=change_set_name, StackName=stackName)\n my_print(\"Change set %s for stack %s has been deleted\" % (change_set_name, stackName))\n else:\n my_print_err(\"Unexpected WaiterError error: %s. %s\" % (stackName, error_msg))\n sys.exit()\n\n\n# def create_stack_and_wait(cf, name, file, params):\n# with open(file, 'r') as f:\n# result = cf.create_stack(\n# StackName=name,\n# TemplateBody=f.read(),\n# Parameters=params,\n# Capabilities=['CAPABILITY_NAMED_IAM']\n# )\n# my_print(\"Requesting stack %s creation.\" % name)\n# jsonprint(result)\n# waiter = cf.get_waiter('stack_create_complete')\n# my_print(\"Waiting stack '%s' to be created\" % name)\n# try:\n# waiter.wait(StackName=name)\n# except:\n# my_print_err(\"Failed to create stack: '%s'\" % name)\n# jsonprint_err(cf.describe_stack_events(StackName=name))\n# # TODO: throw exception istead of just EXIT\n# sys.exit()\n# else:\n# my_print(\"Stack '%s' created\" % name)\n# return cf.describe_stacks(StackName=name)\n\n\n# def update_stack_and_wait(cf, name, file, params):\n# with open(file, 'r') as f:\n# result = cf.update_stack(\n# StackName=name,\n# TemplateBody=f.read(),\n# Parameters=params,\n# Capabilities=['CAPABILITY_NAMED_IAM']\n# )\n# my_print(\"Requesting stack %s update.\" % name)\n# jsonprint(result)\n# waiter = cf.get_waiter('stack_update_complete')\n# my_print(\"Waiting stack '%s' to be updated\" % name)\n# try:\n# waiter.wait(StackName=name)\n# except:\n# my_print_err(\"Failed to update stack: '%s'\" % name)\n# jsonprint_err(cf.describe_stack_events(StackName=name))\n# # TODO: throw exception istead of just EXIT\n# sys.exit()\n# else:\n# my_print(\"Stack '%s' updated\" % name)\n# return cf.describe_stacks(StackName=name)\n","sub_path":"my.py","file_name":"my.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"613780136","text":"import sys\nimport re\nimport os\n\n\ndef main():\n\n fn1 = sys.argv[1]\n fn2 = sys.argv[2]\n f1 = open(fn1, 'ru')\n f2 = open(fn2, 'w')\n\n line = f1.readline()\n while line:\n f2.write(re.sub(r':[123456789]\\d*', r':1', line))\n line = f1.readline()\n \n f1.close()\n f2.close()\n\n \nif __name__ == '__main__':\n main()\n","sub_path":"hw2/q2/binarization.py","file_name":"binarization.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"147916138","text":"#!/usr/bin/env python3\n\nimport os\n\nfps = ['30', '60', '120']\nbitrate = ['20M', '40M', '60M', '80M']\ncodec = ['XVID', 'WebM', 'H264']\n\nfor c in codec:\n for b in bitrate:\n for f in fps:\n cmd = \"./ffmpeg.py -f \" + f + \" -b \" + b + \" -c \" + c + \" -o output_\" + c.lower() + \"_\" + f + \"fps_\" + b + \".mkv\"\n os.system(cmd)\n","sub_path":"scripts/create-video.py","file_name":"create-video.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"175053194","text":"# __author__ : slade\n# __time__ : 17/12/21\n\nimport pandas as pd\nimport numpy as np\nfrom xgboost.sklearn import XGBClassifier\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.linear_model.logistic import LogisticRegression\nfrom sklearn.grid_search import GridSearchCV\n\n# load data\nX_train = pd.read_csv('ensemble_X_train.csv').iloc[:, 1:]\nY_train = pd.read_csv('ensemble_Y_train.csv', header=None).iloc[:, 1:]\nX_test = pd.read_csv('ensemble_X_test.csv').iloc[:, 1:]\nY_test = pd.read_csv('ensemble_Y_test.csv', header=None).iloc[:, 1:]\nY_train = np.array(Y_train).ravel()\nY_test = np.array(Y_test).ravel()\n\n\n# define the correction rate , the res1 is the positive case rate , the res2 is the correction rate\ndef metrics_spec(actual_data, predict_data, cutoff=0.5):\n actual_data = np.array(actual_data)\n predict_data = np.array(predict_data)\n bind_data = np.c_[actual_data, predict_data]\n res1 = 1.0 * (bind_data[bind_data[:, 0] == 1][:, 1] >= cutoff).sum() / bind_data[bind_data[:, 0] == 1].shape[0]\n res2 = 1.0 * (\n (bind_data[bind_data[:, 0] == 1][:, 1] >= cutoff).sum() + (\n bind_data[bind_data[:, 0] == 0][:, 1] < cutoff).sum()) / \\\n bind_data.shape[0]\n return res1, res2\n\n\n# if you have read the article 'Kaggle-TianChi分类问题相关纯算法理论剖析', you may know the suggestion of tuning methods , let's follow\n\n# you can adjust scale_weight_suggestion = (len(Y_train) - Y_train.sum()) / Y_train.sum() to balance your scale between positive cases and negtive cases\n# get the n_estimators and learning_rate first\n# if necessary ,increasing param:cv can increase the confidence degree of the current model's result\nparam_test = {\n 'learning_rate': [0.1, 0.3, 0.9],\n 'n_estimators': [50, 100, 300, 500]\n}\ngsearch = GridSearchCV(\n estimator=XGBClassifier(\n learning_rate=0.1,\n objective='binary:logistic',\n scale_pos_weight=1.002252816020025,\n seed=27),\n param_grid=param_test,\n scoring='roc_auc',\n n_jobs=4,\n iid=False,\n cv=2)\ngsearch.fit(X_train, Y_train)\nprint(gsearch.best_params_)\n# {'learning_rate': 0.1, 'n_estimators': 100}\n# we should also consider the training speed of each process,sometimes we can sacrifice some effect to improve the speed. but don't worry , you can also retrain the two param at last if needed\n\n\n# get subsample next\nparam_test1 = {\n 'subsample': [0.6, 0.7, 0.8, 0.9]\n}\ngsearch1 = GridSearchCV(\n estimator=XGBClassifier(\n learning_rate=0.1,\n n_estimators=100,\n objective='binary:logistic',\n scale_pos_weight=1.002252816020025,\n seed=27),\n param_grid=param_test1,\n scoring='roc_auc',\n n_jobs=4,\n iid=False,\n cv=2)\ngsearch1.fit(X_train, Y_train)\nprint(gsearch1.best_params_)\n# {'subsample': 0.7}\n\n# if you want your model more accurate , you can calculate the accuration at your test set after each training process\n# Compared with the last time at your test set if the accuracy rate decline, you should follow actions from the article guide 'Kaggle-TianChi分类问题相关纯算法理论剖析' to adjust the params\n\n# i have train the max_leaf_nodes and min_weight_fraction_leaf privately but it doesn't work ,so we skip it. get min_samples_split and max_depth result directly\nparam_test2 = {\n 'max_depth': [3, 5, 7],\n 'min_child_weight': [0.8, 1, 1.2]\n\n}\ngsearch2 = GridSearchCV(\n estimator=XGBClassifier(\n learning_rate=0.1,\n n_estimators=100,\n subsample=0.7,\n objective='binary:logistic',\n scale_pos_weight=1.002252816020025,\n seed=27),\n param_grid=param_test2,\n scoring='roc_auc',\n n_jobs=4,\n iid=False,\n cv=2)\ngsearch2.fit(X_train, Y_train)\nprint(gsearch2.best_params_)\n# {'max_depth': 3, 'min_child_weight': 0.8}\n\n# train colsample_bytree next\nparam_test3 = {\n 'colsample_bytree': [0.6, 0.7, 0.8, 0.9]\n}\ngsearch3 = GridSearchCV(\n estimator=XGBClassifier(\n learning_rate=0.1,\n n_estimators=100,\n max_depth=3,\n subsample=0.8,\n min_child_weight=0.7,\n objective='binary:logistic',\n scale_pos_weight=1.002252816020025,\n seed=27),\n param_grid=param_test3,\n scoring='roc_auc',\n n_jobs=4,\n iid=False,\n cv=2)\ngsearch3.fit(X_train, Y_train)\nprint(gsearch3.best_params_)\n# {'colsample_bytree': 0.7}\n\n# reg_lambda and reg_alpha at last\nparam_test4 = {\n 'reg_lambda': [0.1, 0.3, 0.9, 3],\n 'reg_alpha': [0.1, 0.3, 0.9, 3]\n}\ngsearch4 = GridSearchCV(\n estimator=XGBClassifier(\n learning_rate=0.1,\n n_estimators=100,\n max_depth=3,\n subsample=0.7,\n min_child_weight=0.8,\n colsample_bytree=0.7,\n objective='binary:logistic',\n scale_pos_weight=1.002252816020025,\n seed=27),\n param_grid=param_test4,\n scoring='roc_auc',\n n_jobs=4,\n iid=False,\n cv=2)\ngsearch4.fit(X_train, Y_train)\nprint(gsearch4.best_params_)\n# {'reg_alpha': 0.3, 'reg_lambda': 0.1}\n\n\n# for short, we skip the process of training the max_features and the process of training the pairs between learning_rate and n_estimators,but if u want to train a nice model these ways should be added at your process.\n# with the same reason,i skip the code '鞍点逃逸' and '极限探索' ,follow the methods mentioned at the article 'Kaggle&TianChi分类问题相关纯算法理论剖析' ,try it by yourself\n\n# define the final param\nclf = XGBClassifier(\n learning_rate=0.1,\n n_estimators=100,\n max_depth=3,\n subsample=0.7,\n min_child_weight=0.8,\n colsample_bytree=0.7,\n objective='binary:logistic',\n scale_pos_weight=1.002252816020025,\n reg_alpha=0.3,\n reg_lambda=0.1,\n seed=27\n)\n\n# train the values\nmodel_sklearn = clf.fit(X_train, Y_train)\ny_bst = model_sklearn.predict_proba(X_test)[:, 1]\nmetrics_spec(Y_train, model_sklearn.predict_proba(X_train)[:, 1])\nmetrics_spec(Y_test, y_bst)\n\n# make new features\n# we can get the spare leaf nodes for the input of stacking\ntrain_new_feature = clf.apply(X_train)\ntest_new_feature = clf.apply(X_test)\nenc = OneHotEncoder()\nenc.fit(train_new_feature)\ntrain_new_feature2 = np.array(enc.transform(train_new_feature).toarray())\ntest_new_feature2 = np.array(enc.transform(test_new_feature).toarray())\nres_data = pd.DataFrame(np.c_[Y_train, train_new_feature2])\nres_data.columns = ['f' + str(x) for x in range(res_data.shape[1])]\nres_test = pd.DataFrame(np.c_[Y_test, test_new_feature2])\nres_test.columns = ['f' + str(x) for x in range(res_test.shape[1])]\n\n# stacking a model , it can be logistic or fm, nerual network and they will come to be beyond all expectations\n# attention points of the stacking model can be obtained from the article mentioned at the top of the code\nlr = LogisticRegression(C=1, penalty='l2', max_iter=100, solver='sag', multi_class='ovr')\nmodel_lr = lr.fit(res_data.iloc[:, 1:], res_data['f0'])\ny_train_lr = model_lr.predict_proba(res_data.iloc[:, 1:])[:, 1]\ny_test_lr = model_lr.predict_proba(res_test.iloc[:, 1:])[:, 1]\nres = metrics_spec(Y_test, y_test_lr)\ncorrect_rank = X_train.columns\n\n\n\n# save models, you will load them if u want to deploy a trained model\nfrom sklearn.externals import joblib\n\njoblib.dump(model_sklearn, 'model_sklearn.pkl')\njoblib.dump(correct_rank, 'correct_rank.pkl')\njoblib.dump(enc, 'enc.pkl')\njoblib.dump(model_lr, 'model_lr.pkl')\n\n\n\n# 算法评估 ks值\n# ks_xgb_lr = np.c_[Y_test,y_test_lr]\n# ks_xgb_lr = sorted(ks_xgb_lr , key = lambda x : x[1],reverse = True)\n# ks_xgb_lr = pd.DataFrame(ks_xgb_lr)\n# for i in range(9):\n# \tend = (i+1)*break_cut\n# \tres1 = 1.0*ks_xgb_lr.iloc[:end,:][ks_xgb_lr.iloc[:end,0]==0].shape[0]/ks_xgb_lr[ks_xgb_lr.iloc[:,0]==0].shape[0]\n# \tres2 = 1.0*ks_xgb_lr.iloc[:end,:][ks_xgb_lr.iloc[:end,0]==1].shape[0]/ks_xgb_lr[ks_xgb_lr.iloc[:,0]==1].shape[0]\n# \tres = res2-res1\n# \tprint(res1,res2,res)\n","sub_path":"Ensemble/Stacking_xgboost_logistic_regression.py","file_name":"Stacking_xgboost_logistic_regression.py","file_ext":"py","file_size_in_byte":7834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"111577910","text":"import requests\r\nimport urllib\r\nimport json\r\nimport sys\r\nimport traceback\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\n\r\nimport traceback\r\nimport time\r\n\r\n\r\nclass Wst:\r\n \r\n empCount = 0\r\n\r\n def __init__(self):\r\n pass\r\n \r\n def call_ws(self, url): \r\n try: \r\n\r\n response = requests.get(url)\r\n\r\n todos = json.loads(response.text)\r\n \r\n return(json.dumps(todos, sort_keys=True, indent=4, separators=(',', ': ')))\r\n \r\n #for todo in todos[\"teams\"]:\r\n # self.responseEdit.append(todo[\"name\"] ) \r\n # self.responseEdit.append(todo[\"_links\"][\"self\"][\"href\"]) \r\n # self.responseEdit.append(todo[\"_links\"][\"fixtures\"][\"href\"]) \r\n # self.responseEdit.append(todo[\"_links\"][\"players\"][\"href\"]) \r\n # self.responseEdit.append(todo[\"crestUrl\"])\r\n # self.responseEdit.append(\"\") \r\n except Exception:\r\n return(\"Unexpected error:\", sys.exc_info())\r\n raise \r\n \r\n def popuniKladionicu(self): \r\n print('starting browser')\r\n try:\r\n chrome_options = Options() \r\n #chrome_options.add_argument('--headless')\r\n chrome_options.add_argument('--ignore-certificate-errors')\r\n chrome_options.add_argument(\"--test-type\")\r\n chrome_options.binary_location = 'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe' \r\n \r\n chrome_options.add_argument(\"--disable-popup-blocking\"); \r\n \r\n driver = webdriver.Chrome(chrome_options = chrome_options)\r\n \r\n driver.get('https://apex.oracle.com/pls/apex/f?p=51345:LOGIN_DESKTOP:17023163753850:::::&tz=2:00')\r\n \r\n emailid=driver.find_element_by_id(\"P101_USERNAME\")\r\n emailid.send_keys(\"branko\")\r\n\r\n\r\n passw=driver.find_element_by_id(\"P101_PASSWORD\")\r\n passw.send_keys(\"kkk\")\r\n \r\n signin=driver.find_element_by_id(\"B27311205195543441998\")\r\n signin.click() \r\n \r\n try:\r\n alert = driver.switch_to_alert()\r\n alert.accept()\r\n except:\r\n print('No alert')\r\n \r\n unos=driver.find_element_by_id(\"B37164898911002989583\")\r\n unos.click() \r\n \r\n time.sleep(10) \r\n \r\n \r\n trs = driver.find_element_by_class_name('t-Report-report').find_elements_by_tag_name('tr')\r\n \r\n #h = driver.find_element_by_class_name('t-Body-contentInner').text\r\n \r\n #print(h)\r\n \r\n print( len(trs) )\r\n \r\n for tr in trs:\r\n print('node:', tr)\r\n \r\n tds = tr.find_elements_by_tag_name('td')\r\n \r\n \r\n for td in tds:\r\n header = td.get_attribute('headers')\r\n txt = td.get_attribute('textContent')\r\n \r\n txt = txt.encode('utf-8')\r\n \r\n print(header, ': ', txt)\r\n \r\n if header == 'HOME_REZ':\r\n inp = td.find_element_by_tag_name('input')\r\n inp_value = inp.get_attribute('value')\r\n if inp_value == '':\r\n inp.send_keys('2')\r\n \r\n #inp.send_keys('\\b')\r\n #inp.send_keys('\\b')\r\n \r\n \r\n if header == 'GUEST_REZ':\r\n inp = td.find_element_by_tag_name('input')\r\n inp_value = inp.get_attribute('value')\r\n if inp_value == '':\r\n inp.send_keys('1') \r\n \r\n #inp.send_keys('\\b')\r\n #inp.send_keys('\\b') \r\n \r\n \r\n save=driver.find_element_by_id(\"B37172830720567434957\")\r\n save.click() \r\n \r\n \r\n except:\r\n print(\"Unexpected error:\", sys.exc_info()) \r\n traceback.print_exc() ","sub_path":"my_project/root/nested/wst.py","file_name":"wst.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"58936388","text":"#! /usr/bin/python3\n# kmwho\n\nfrom __future__ import print_function\n\ndef solvecase():\n K,C,S = map(int, input().strip().split() )\n Smin = (K+C-1)//C\n if S < Smin:\n return \"IMPOSSIBLE\"\n kpow = [ K**i for i in range(C+1) ]\n nums = []\n for j in range( min(S,Smin) ):\n nums.append( tuple( sorted( (C*j + i) % K for i in range(C) ) ) )\n vals = [ str( 1 + sum( num[i]*kpow[C-i-1] for i in range(C) )) for num in nums ]\n #print( nums )\n return \" \".join( vals )\n\ndef solve():\n T = int(input())\n for t in range(T):\n res = solvecase()\n print( \"Case #\" + str(t+1) + \": \" + str(res) )\n\ndef main():\n\tsolve()\n\n\nmain()\n","sub_path":"codes/CodeJamCrawler/16_0_4/kmwho/smallD.py","file_name":"smallD.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"337278449","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Mengxuan Chen\n@emails: CHENMENGXUAN163@pingan.com.cn\n@description:\n # 互换收益计算\n@revise log:\n 2020.11.03 创建程序\n 2020.11.04 平均持仓成本用FIFO法计算\n 股票和股指期货分别计算\n 封装函数\n 2020.11.05 股票和股指期货合并计算\n 计算固定端收益\n 整体修复日期和index的bug\n 修复FIFO\n 测试:\n 2020.11.06 测试并修改FIFO\n 股指期货价格adj=股指期货价格*合约乘数\n 修复当天未交易当时有持仓的浮盈计算\n 修复当天交易所有持仓股票的情况\n 单独处理正好完全清仓的情况\n\"\"\"\n# In[]\nimport pandas as pd\nimport numpy as np\nimport os\nimport re\nimport datetime\nimport time\nimport warnings\n\nwarnings.filterwarnings('ignore')\nfrom WindPy import *\nw.start()\n\n\ndef extractData(Date, Codes, type='stock'):\n '''\n 使用wind API 接口提取数据,股票对应的是收盘价数据,股指期货对应结算价数据\n :param Date:\n :param Codes:\n :return:\n '''\n codes = ','.join(Codes)\n if isinstance(Date, str) == True:\n pass\n else:\n Date = datetime.strftime(Date, \"%Y-%m-%d\")\n if type == 'stock':\n tradeDate = \"tradeDate=\" + str(Date).replace('-', '') + \";priceAdj=U;cycle=D\"\n data = w.wss(codes, \"close\", tradeDate)\n elif type == 'futures':\n tradeDate = \"tradeDate=\" + str(Date).replace('-', '') + \";cycle=D\"\n data = w.wss(codes, \"settle\", tradeDate)\n else:\n raise ValueError('we do not have this type of product!')\n name = w.wss(codes, \"sec_name\", tradeDate)\n df1 = pd.DataFrame(name.Data[0], columns=name.Fields, index=name.Codes)\n df2 = pd.DataFrame(data.Data[0], columns=data.Fields, index=data.Codes)\n df = pd.concat([df1, df2], axis=1)\n df.reset_index(inplace=True)\n df.columns = ['证券代码', '证券简称', '收盘价']\n return df\n\ndef extractContractmultiplier(Codes):\n '''\n # 提取期货合约的合约乘数\n :param Date:\n :param Codes:\n :return:\n '''\n codes = ','.join(Codes)\n data = w.wss(codes, \"contractmultiplier\")\n df = pd.DataFrame(data.Data[0], columns=data.Fields, index=data.Codes)\n return df\n\ndef nameNorm(code, type):\n '''\n 对每天输入数据的股票代码进行标准化处理\n :param code:\n :param type:\n :return:\n '''\n if type == 'stock':\n code = str(int(code))\n if len(code) < 6:\n code = (6 - len(code)) * '0' + code\n else:\n pass\n if code[0] == '6':\n code = code + '.SH'\n elif code[0:3] == '688':\n code = code + '.SZ'\n else:\n code = code + '.SZ'\n elif type == 'futures':\n code = str(code)\n code = code + '.CFE'\n else:\n raise ValueError('we do not have this type of product!')\n return code\n\n\ndef FIFO(df):\n '''\n # 使用FIFO方法计算成本价格\n :param df:\n :return:\n '''\n if len(df.loc[(df['是否卖出'] == False) & (df['剩余数量'] > 0)]) == len(df.loc[(df['是否卖出'] == False)]):\n df.loc[(df['是否卖出'] == False) & (df['剩余数量'] > 0),'剩余数量'] = \\\n df.loc[(df['是否卖出'] == False) & (df['剩余数量'] > 0),'交易数量']\n dff = df.loc[(df['剩余数量'] > 0)].append(df.iloc[-1])\n # df = df.loc[~df['是否卖出'] == True]\n sum = 0\n xx = 0\n for kk, ii in enumerate(list(dff['交易数量'])):\n if sum > abs(dff['交易数量'].iloc[-1]):\n xx = kk\n break\n sum += ii\n if float(xx) > 0:\n df_x = dff.iloc[0:int(xx)]\n a = (df_x['交易价格'].iloc[:-1] * df_x['剩余数量'].iloc[:-1] ).sum()\n b = df_x['交易价格'].iloc[-1] * (abs(df['交易数量'].iloc[-1]) - df_x['剩余数量'].iloc[:-1].sum())\n c = abs(df['交易数量'].iloc[-1])\n df['成本价格'].iloc[-1] = (a+b) / c\n # 先计算最近一笔的剩余数量,再将之前吃掉的所有单的剩余数量设为0\n d = (abs(df['交易数量'].iloc[-1]) - df_x['剩余数量'].iloc[:-1].sum())\n df['剩余数量'].iloc[0:int(xx)][-1:] = df_x['剩余数量'].iloc[-1] - d\n df['剩余数量'].iloc[0:int(xx)][:-1] = 0\n else:\n # 正好完全清仓的情况要单独处理\n df_x = dff.iloc[0:1]\n a = (df_x['交易价格'].iloc[:-1] * df_x['剩余数量'].iloc[:-1] ).sum()\n b = df_x['交易价格'].iloc[-1] * (abs(df['交易数量'].iloc[-1]) - df_x['剩余数量'].iloc[:-1].sum())\n c = abs(df['交易数量'].iloc[-1])\n df['成本价格'].iloc[-1] = (a+b) / c\n # 先计算最近一笔的剩余数量,再将之前吃掉的所有单的剩余数量设为0\n d = (abs(df['交易数量'].iloc[-1]) - df_x['剩余数量'].iloc[:-1].sum())\n df['剩余数量'].iloc[0:1][-1:] = df_x['剩余数量'].iloc[-1] - d\n df['剩余数量'].iloc[0:1][:-1] = 0\n return df\n\n\ndef calToday(Date, Data, Type):\n '''\n # 计算当天汇总情况\n :param Date:\n :param Data:\n :param Div:\n :param type:\n :return:\n '''\n # 对Data数据集删除空行,对证券代码标准化\n Data.dropna(inplace=True, subset=['证券代码'])\n Data['证券代码'] = Data['证券代码'].apply(lambda x: nameNorm(x, type=Type))\n\n Data['成交数量方向'] = ''\n if Type == 'stock':\n Data['产品ID'] = Data['证券代码']\n\n Data.loc[Data['委托方向'] == '卖出', '成交数量方向'] = -1 * Data.loc[Data['委托方向'] == '卖出', '成交数量']\n Data.loc[Data['委托方向'] == '买入', '成交数量方向'] = Data.loc[Data['委托方向'] == '买入', '成交数量']\n\n elif Type == 'futures':\n Data.loc[Data['委托方向'] == '卖出开仓', '产品ID'] = Data.loc[Data['委托方向'] == '卖出开仓', '证券代码'] + '.PUT'\n Data.loc[Data['委托方向'] == '卖出平仓', '产品ID'] = Data.loc[Data['委托方向'] == '卖出平仓', '证券代码'] + '.PUT'\n Data.loc[Data['委托方向'] == '买入开仓', '产品ID'] = Data.loc[Data['委托方向'] == '买入开仓', '证券代码'] + '.CALL'\n Data.loc[Data['委托方向'] == '买入平仓', '产品ID'] = Data.loc[Data['委托方向'] == '买入平仓', '证券代码'] + '.CALL'\n\n Data.loc[Data['委托方向'] == '卖出开仓', '成交数量方向'] = Data.loc[Data['委托方向'] == '卖出开仓', '成交数量']\n Data.loc[Data['委托方向'] == '买入开仓', '成交数量方向'] = Data.loc[Data['委托方向'] == '买入开仓', '成交数量']\n Data.loc[Data['委托方向'] == '卖出平仓', '成交数量方向'] = -1 * Data.loc[Data['委托方向'] == '卖出平仓', '成交数量']\n Data.loc[Data['委托方向'] == '买入平仓', '成交数量方向'] = -1 * Data.loc[Data['委托方向'] == '买入平仓', '成交数量']\n\n num = pd.DataFrame(Data.groupby(['产品ID'])['成交数量方向'].sum())\n price = pd.DataFrame(Data.groupby(['产品ID'])['成交价格'].sum())\n fee = pd.DataFrame(Data.groupby(['产品ID'])['佣金'].sum()\n + Data.groupby(['产品ID'])['过户费'].sum()\n + Data.groupby(['产品ID'])['交割费'].sum()\n + Data.groupby(['产品ID'])['经手费'].sum()\n + Data.groupby(['产品ID'])['结算费'].sum()\n + Data.groupby(['产品ID'])['交易费'].sum()\n + Data.groupby(['产品ID'])['证管费'].sum()\n + Data.groupby(['产品ID'])['其他费用'].sum()\n + Data.groupby(['产品ID'])['全额过户费'].sum())\n tax = pd.DataFrame(Data.groupby(['产品ID'])['印花税'].sum())\n\n tclose = extractData(Date, Data['证券代码'].drop_duplicates().to_list())\n tfutures_multiplier = extractContractmultiplier(Data['证券代码'].drop_duplicates().to_list())\n\n if Type == 'futures':\n tclose_ = tclose.copy()\n tclose__ = tclose.copy()\n tclose_['证券代码'] = tclose_['证券代码'] + '.PUT'\n tclose__['证券代码'] = tclose__['证券代码'] + '.CALL'\n tclose = tclose_.append(tclose__)\n\n tfutures_multiplier_ = tfutures_multiplier.copy()\n tfutures_multipliere__ = tfutures_multiplier.copy()\n tfutures_multiplier_.index = tfutures_multiplier_.index + '.PUT'\n tfutures_multipliere__.index = tfutures_multipliere__.index + '.CALL'\n tfutures_multiplier = tfutures_multiplier_.append(tfutures_multipliere__)\n\n elif Type == 'stock':\n tfutures_multiplier = pd.DataFrame([1] * len(tclose),index= Data['产品ID'].drop_duplicates())\n\n close = pd.DataFrame(np.array(tclose['收盘价']), index=tclose['证券代码'])\n today = pd.concat([num, price, fee, tax, close, price, tfutures_multiplier], axis=1)\n today = today.reset_index()\n today.columns = ['证券代码', '交易数量', '交易价格', '交易费用', '印花税', '收盘价', '成本价格', '合约乘数']\n return today\n\n\ndef divCal(Div, Today):\n '''\n 考虑现金分红和股票分红\n :param Div:\n :param Today:\n :return:\n '''\n # 对分红数据集进行预处理\n Div['证券代码'] = Div['证券代码'].apply(lambda x: nameNorm(x, type='stock'))\n\n # 现金分红\n divCash = Div[Div['发生业务'] == '红利到帐']\n divCash.index = divCash['证券代码']\n divCash = divCash['发生金额']\n divCash = divCash.reset_index()\n divCash.columns = ['证券代码', '现金红利']\n TOday = pd.merge(divCash, Today, how='outer')\n\n # 股票分红\n divStock = data_div[data_div['发生业务'] == '红股上市']\n divStock.index = divStock['证券代码']\n divStock = divStock['发生数量']\n divStock = divStock.reset_index()\n divStock.columns = ['证券代码', '股票股利']\n TODAY = pd.merge(divStock, TOday, how='outer')\n return TODAY\n\n\ndef todayPerform(Date,Position,Today,Type):\n '''\n\n :param Position:\n :param Today:\n :return:\n '''\n Today['日期'] = Date\n Today.fillna(0,inplace=True)\n asset_not_trade = []\n\n if Type == 'futures':\n for m in Position['证券代码'].apply(lambda x: x[:10]).drop_duplicates().to_list():\n if m not in Today.loc[Today['交易数量'] != 0]['证券代码'].apply(lambda x: x[:10]).drop_duplicates().to_list():\n asset_not_trade.append(m)\n ydate = Position['日期'].drop_duplicates().to_list()[-1]\n asset_not_trade_ = asset_not_trade.copy()\n\n if len(asset_not_trade) != 0:\n tclose_not_trade = extractData(Date,asset_not_trade_)\n multiplier = extractContractmultiplier(tclose_not_trade['证券代码'].drop_duplicates().to_list())\n\n tclose_not_trade_ = tclose_not_trade.copy()\n tclose_not_trade__ = tclose_not_trade.copy()\n tclose_not_trade_['证券代码'] = tclose_not_trade_['证券代码'] + '.PUT'\n tclose_not_trade__['证券代码'] = tclose_not_trade__['证券代码'] + '.CALL'\n tclose_not_trade = tclose_not_trade_.append(tclose_not_trade__)\n tclose_not_trade = pd.DataFrame(np.array(tclose_not_trade['收盘价']), index=tclose_not_trade['证券代码'])\n\n multiplier_ = multiplier.copy()\n multiplier__ = multiplier.copy()\n multiplier_.index = multiplier_.index + '.PUT'\n multiplier__.index = multiplier__.index + '.CALL'\n multiplier = multiplier_.append(multiplier__)\n else:\n pass\n\n elif Type == 'stock':\n for m in Position['证券代码'].drop_duplicates().to_list():\n if m not in Today.loc[Today['交易数量'] != 0]['证券代码'].to_list():\n asset_not_trade.append(m)\n ydate = Position['日期'].drop_duplicates().to_list()[-1]\n\n if len(asset_not_trade) != 0:\n asset_not_trade_ = pd.DataFrame(asset_not_trade).iloc[:, 0].apply(lambda x: x[:10]).to_list()\n close_ = extractData(Date, asset_not_trade)\n tclose_not_trade = pd.DataFrame(np.array(close_['收盘价']), index=close_['证券代码'])\n multiplier = pd.DataFrame([1] * len(tclose_not_trade),index=close_['证券代码'])\n else:\n pass\n\n if len(asset_not_trade) != 0:\n nanlist = pd.DataFrame([0] * len(tclose_not_trade),index=tclose_not_trade.index)\n asset_not_trade_today = pd.concat([nanlist,nanlist,nanlist,nanlist,tclose_not_trade,nanlist,multiplier],axis=1)\n asset_not_trade_today.reset_index(inplace=True)\n asset_not_trade_today.columns = ['证券代码','交易数量', '交易价格','交易费用','印花税',\n '收盘价','成本价格','合约乘数']\n asset_not_trade_today['日期'] = Date\n\n Today_all = pd.concat([asset_not_trade_today,Today],axis = 0)\n else:\n Today_all = Today.copy()\n\n Position = pd.concat([Position,Today_all],axis = 0)\n\n Position.index = list(range(len(Position.index)))\n Position['交易数量'].fillna(0, inplace=True)\n Position['股票股利'].fillna(0, inplace=True)\n Position.loc[:, '持仓数量'] = np.array(pd.DataFrame(Position.groupby(['证券代码'])['交易数量'].cumsum())['交易数量']) + \\\n np.array(pd.DataFrame(Position.groupby(['证券代码'])['股票股利'].fillna(0).cumsum())['股票股利'])\n Position['现金红利'].fillna(0, inplace=True)\n # Position.loc[:,'累计现金红利'] = np.array(pd.DataFrame(Position.groupby(['证券代码'])['现金红利'].cumsum())['现金红利'])\n\n Position['是否卖出'] = Position['交易数量'] < 0\n Position.loc[Position['交易数量'] == 0, '是否卖出'] = np.nan\n Position_sell = Position.loc[(Position['日期'] == Date) & (Position['是否卖出'] ==True)]['证券代码']\n Position_buy = Position.loc[(Position['日期'] == Date) & (Position['是否卖出'] ==False)]['证券代码']\n for stockj in Position_buy.to_list():\n Position.loc[Position['证券代码'] == stockj, '剩余数量'] = \\\n Position.loc[Position['证券代码'] == stockj, '交易数量']\n for stocki in Position_sell.to_list():\n Position.loc[Position['证券代码']== stocki,:] = \\\n FIFO(Position.loc[(Position['证券代码']== stocki)]) # & Position['交易数量'] != 0])\n\n Position['前收盘价'] = Position.groupby(['证券代码'])['收盘价'].shift(1)\n Position.loc[Position['日期'] ==Date,'浮盈'] = Position.loc[Position['日期'] ==Date,:].apply(\n lambda x: x['合约乘数'] * x['持仓数量'] *(x['收盘价']-x['前收盘价']),axis = 1)\n\n if Type == 'stock':\n Position.loc[Position['日期'] ==Date,'实盈'] = Position.loc[Position['日期'] ==Date,:].apply(\n lambda x:x['现金红利'] + abs(x['交易数量']) * (x['交易价格']-x['成本价格']),axis = 1)\n\n\n elif Type == 'futures':\n Position['期权类型'] = Position['证券代码'].apply(lambda x: x.split('.',2)[2])\n\n Position.loc[(Position['日期'] == Date) & (Position['期权类型'] == 'CALL'), '实盈'] = \\\n Position.loc[Position['日期'] == Date, :].apply(\n lambda x: x['交易数量'] * (x['收盘价'] - x['交易价格']), axis=1)\n\n Position.loc[(Position['日期'] == Date) & (Position['期权类型'] == 'PUT'), '实盈'] = \\\n Position.loc[Position['日期'] == Date, :].apply(\n lambda x: - x['交易数量'] * (x['收盘价'] - x['交易价格']), axis=1)\n else:\n raise ValueError('we do not have this type of product!')\n Position.loc[(Position['日期'] == Date)].index = [Date] * len(Position.loc[(Position['日期'] == Date)])\n return Position\n\n\ndef sum(Date, Position, Position_futures):\n '''\n\n :param Date:\n :param Position:\n :param Position_futures:\n :return:\n '''\n Position.index = Position['日期']\n Position = Position.iloc[:,1:]\n Position_futures.index = Position_futures['日期']\n Position_futures = Position_futures.iloc[:,1:]\n sum_today = Position.loc[Position.index == Date, :].apply(lambda x: x.sum())\n sum_today_futures = Position_futures.loc[Position_futures.index == Date, :].apply(lambda x: x.sum())\n\n sum = Position.apply(lambda x: x.sum())\n sum_futures = Position_futures.apply(lambda x: x.sum())\n\n balance = sum_today['实盈'] + sum_today['现金红利'] - sum_today['交易费用'] - sum_today['印花税'] \\\n + sum_today_futures['实盈'] + sum_today_futures['现金红利'] - sum_today_futures['交易费用'] - sum_today_futures['印花税']\n\n all_sum_ = pd.DataFrame({'日期':Date,\n '账户余额变动': balance,\n '当日现金红利': sum_today['现金红利'] + sum_today_futures['现金红利'],\n '当日浮盈': sum_today['浮盈'] + sum_today_futures['浮盈'],\n '当日实盈': sum_today['实盈'] + sum_today_futures['实盈'],\n '当日交易佣金': sum_today['交易费用'] + sum_today_futures['交易费用'],\n '当日印花税': sum_today['印花税'] + sum_today_futures['印花税'],\n '累计现金红利': sum['现金红利'] + sum_futures['现金红利'],\n '浮盈': sum_today['浮盈'] + sum_today_futures['浮盈'],\n '累计实盈': sum['实盈'] + sum_futures['实盈'],\n '累计交易佣金': sum['交易费用'] + sum_futures['交易费用'],\n '累计印花税': sum['印花税'] + sum_futures['印花税']},\n index = [Date])\n\n return all_sum_\n\n\ndef fixLeg(Begin, End, Principal, Rate):\n '''\n\n :param Begin:\n :param End:\n :param Principal:\n :param Rate:\n :return:\n '''\n if isinstance(Begin, str) == True:\n begin = datetime.strptime(Begin, \"%Y-%m-%d\")\n else:\n begin = Begin\n\n if isinstance(End, str) == True:\n end = datetime.strptime(End, \"%Y-%m-%d\")\n else:\n end = End\n interval_days = end - begin\n Interest = Principal * ((1 + Rate) ** (interval_days.days / 365) - 1)\n return Interest\n\n\nif __name__ == '__main__':\n # In[]\n if os.path.exists('./result/all_position.xlsx') == True:\n all_position_ = pd.read_excel('./result/all_position.xlsx')\n else:\n all_position_ = pd.DataFrame(columns=['日期','证券代码','股票股利','现金红利','交易数量',\n '交易价格','交易费用','印花税','收盘价','剩余数量',\n '成本价格','累计现金红利','浮盈','实盈'])\n if os.path.exists('./result/all_positionfutures.xlsx') == True:\n all_position_futures = pd.read_excel('./result/all_positionfutures.xlsx')\n else:\n all_position_futures = pd.DataFrame(columns=['日期','证券代码','股票股利','现金红利','交易数量',\n '交易价格','交易费用','印花税','收盘价','剩余数量',\n '成本价格','累计现金红利','浮盈','实盈'])\n all_sum = pd.read_excel('./result/all_sum.xlsx')\n ydate = all_sum.iloc[-1,0]\n # In[]\n if os.path.exists('./data/新综合信息查询_成交回报明细(股票).xls') == True:\n data_stock = pd.read_excel('./data/新综合信息查询_成交回报明细(股票).xls')\n tdate_ = data_stock['日期'].drop_duplicates().dropna() # 取计算当天的日期\n tdate = tdate_.iloc[0]\n today = calToday(Date=tdate, Data=data_stock, Type='stock')\n if os.path.exists('./data/综合信息查询_资金流水20201104.xls') == True:\n data_div = pd.read_excel('./data/综合信息查询_资金流水20201104.xls').dropna(subset=['证券代码'])\n today = divCal(Div=data_div, Today=today)\n else:\n print('ATTENTION: we did not have cash div or stock div today!')\n else:\n today = all_position_.loc[all_position_['日期'] == ydate, '证券代码':'收盘价']\n tdate = input('请输入日期(格式为xxxx-xx-xx):')\n today['日期'] = [tdate] * len(today)\n tclose = extractData(tdate, today['证券代码'].drop_duplicates().to_list())\n today['收盘价'] = tclose['收盘价']\n print('ATTENTION: we did not trade stocks today!')\n\n # In[]\n if os.path.exists('./data/新综合信息查询_成交回报明细(股指).xls') == True:\n data_futures = pd.read_excel('./data/新综合信息查询_成交回报明细(股指).xls').dropna(subset=['证券代码'])\n tdate_ = data_futures['日期'].drop_duplicates().dropna() # 取计算当天的日期\n tdate = tdate_.iloc[0]\n today_futures = calToday(Date=tdate, Data=data_futures, Type='futures')\n else:\n today_futures = all_position_futures.loc[all_position_futures['日期'] == ydate, '证券代码':'收盘价']\n tdate = input('请输入日期(格式为xxxx-xx-xx):')\n today_futures['日期'] = [tdate] * len(today_futures)\n tclose_futures = extractData(tdate, today_futures['证券代码'].drop_duplicates().to_list())\n today_futures['收盘价'] = tclose_futures['收盘价']\n print('ATTENTION: we did not trade futures today!')\n\n # In[]\n #\n all_position = todayPerform(Date=tdate, Position=all_position_, Today=today, Type='stock')\n all_position_futures = todayPerform(Date=tdate, Position=all_position_futures, Today=today_futures, Type='futures')\n\n all_position.to_excel('./result/all_position.xlsx',index = False)\n all_position_futures.to_excel('./result/all_positionfutures.xlsx',index = False)\n\n # # In[]\n all_sum_ = sum(Date=tdate, Position=all_position, Position_futures=all_position_futures)\n all_sum = all_sum.append(all_sum_)\n all_sum.to_excel('./result/all_sum.xlsx',index = False)\n print(all_sum_)\n\n # In[]\n rate = (313687.579294248 / 383717693.51 + 1) ** (365 / 4) - 1\n fix = fixLeg(Begin='2020-11-01', End=tdate, Principal=383717693.51, Rate=rate)\n print('fix', '%.2f' % fix)\n print('TRS', '%.2f' % (all_sum['账户余额变动'].sum() - fix))\n\n","sub_path":"TRS_test202011061746.py","file_name":"TRS_test202011061746.py","file_ext":"py","file_size_in_byte":22618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"92092275","text":"#!/usr/bin/env python3\n# Copyright (c) 2018 The Zcash developers\n# Copyright (c) 2020-2021 The PIVX Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or https://www.opensource.org/licenses/mit-license.php .\n\nfrom decimal import Decimal\nfrom time import sleep\n\nfrom test_framework.test_framework import PivxTestFramework\nfrom test_framework.util import (\n assert_equal,\n assert_raises_rpc_error,\n connect_nodes,\n disconnect_nodes,\n satoshi_round,\n get_coinstake_address,\n wait_until,\n)\n\n\n# Test wallet behaviour with Sapling addresses\nclass WalletSaplingTest(PivxTestFramework):\n\n def set_test_params(self):\n self.num_nodes = 4\n saplingUpgrade = ['-nuparams=v5_shield:201']\n self.extra_args = [saplingUpgrade, saplingUpgrade, saplingUpgrade, saplingUpgrade]\n self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi')\n\n def check_tx_in_mempool(self, txids):\n self.sync_mempools()\n mempool = self.nodes[0].getrawmempool(True)\n for txid in txids:\n assert txid in mempool\n\n def wait_for_spork(self, fEnabled, spork_id):\n sleep(2)\n for i in range(self.num_nodes):\n wait_until(lambda: self.is_spork_active(i, spork_id) == fEnabled, timeout=5)\n\n def run_test(self):\n self.nodes[0].generate(2)\n self.sync_all()\n assert_equal(self.nodes[1].getblockcount(), 202)\n taddr1 = self.nodes[1].getnewaddress()\n saplingAddr0 = self.nodes[0].getnewshieldaddress()\n saplingAddr1 = self.nodes[1].getnewshieldaddress()\n\n # Verify addresses\n assert(saplingAddr0 in self.nodes[0].listshieldaddresses())\n assert(saplingAddr1 in self.nodes[1].listshieldaddresses())\n\n # Verify balance\n assert_equal(self.nodes[0].getshieldbalance(saplingAddr0), Decimal('0'))\n assert_equal(self.nodes[1].getshieldbalance(saplingAddr1), Decimal('0'))\n assert_equal(self.nodes[1].getreceivedbyaddress(taddr1), Decimal('0'))\n\n recipients = [{\"address\": saplingAddr0, \"amount\": Decimal('10')}]\n\n # Try fee too low\n fee_too_low = 0.001\n self.log.info(\"Trying to send a transaction with fee too low...\")\n assert_raises_rpc_error(-4, \"Fee set (%.3f) too low. Must be at least\" % fee_too_low,\n self.nodes[0].rawshieldsendmany,\n \"from_transparent\", recipients, 1, fee_too_low)\n\n # Try fee too high.\n fee_too_high = 20\n self.log.info(\"Good. It was not possible. Now try a tx with fee too high...\")\n assert_raises_rpc_error(-4, \"The transaction fee is too high: %.2f >\" % fee_too_high,\n self.nodes[0].rawshieldsendmany,\n \"from_transparent\", recipients, 1, fee_too_high)\n\n # Trying to send a rawtx with low fee directly\n self.log.info(\"Good. It was not possible. Now try with a raw tx...\")\n self.restart_node(0, extra_args=self.extra_args[0]+['-minrelaytxfee=0.0000001'])\n rawtx_hex = self.nodes[0].rawshieldsendmany(\"from_transparent\", recipients, 1)\n self.restart_node(0, extra_args=self.extra_args[0])\n connect_nodes(self.nodes[0], 1)\n assert_raises_rpc_error(-26, \"insufficient fee\",\n self.nodes[0].sendrawtransaction, rawtx_hex)\n self.log.info(\"Good. Not accepted in the mempool.\")\n\n # Fixed fee\n fee = 0.05\n\n # Node 0 shields some funds\n # taddr -> Sapling\n self.log.info(\"TX 1: shield funds from specified transparent address.\")\n mytxid1 = self.nodes[0].shieldsendmany(get_coinstake_address(self.nodes[0]), recipients, 1, fee)\n\n # shield more funds automatically selecting the transparent inputs\n self.log.info(\"TX 2: shield funds from any transparent address.\")\n mytxid2 = self.nodes[0].shieldsendmany(\"from_transparent\", recipients, 1, fee)\n self.check_tx_in_mempool([mytxid1, mytxid2])\n\n self.nodes[2].generate(1)\n self.sync_all()\n\n # shield more funds creating and then sending a raw transaction\n self.log.info(\"TX 3: shield funds creating and sending raw transaction.\")\n tx_hex = self.nodes[0].rawshieldsendmany(\"from_transparent\", recipients, 1, fee)\n\n # Check SPORK_20 for sapling maintenance mode\n SPORK_20 = \"SPORK_20_SAPLING_MAINTENANCE\"\n self.activate_spork(0, SPORK_20)\n self.wait_for_spork(True, SPORK_20)\n assert_raises_rpc_error(-26, \"bad-tx-sapling-maintenance\",\n self.nodes[0].sendrawtransaction, tx_hex)\n self.log.info(\"Good. Not accepted when SPORK_20 is active.\")\n\n # Try with RPC...\n assert_raises_rpc_error(-8, \"SHIELD in maintenance (SPORK 20)\",\n self.nodes[0].shieldsendmany, \"from_transparent\", recipients, 1, fee)\n\n # Disable SPORK_20 and retry\n sleep(5)\n self.deactivate_spork(0, SPORK_20)\n self.wait_for_spork(False, SPORK_20)\n mytxid3 = self.nodes[0].sendrawtransaction(tx_hex)\n self.log.info(\"Good. Accepted when SPORK_20 is not active.\")\n self.check_tx_in_mempool([mytxid3])\n\n self.nodes[2].generate(1)\n self.sync_all()\n\n # Verify balance\n assert_equal(self.nodes[0].getshieldbalance(saplingAddr0), Decimal('30'))\n assert_equal(self.nodes[1].getshieldbalance(saplingAddr1), Decimal('0'))\n assert_equal(self.nodes[1].getreceivedbyaddress(taddr1), Decimal('0'))\n self.log.info(\"Balances check out\")\n\n # Now disconnect the block, activate SPORK_20, and try to reconnect it\n disconnect_nodes(self.nodes[0], 1)\n tip_hash = self.nodes[0].getbestblockhash()\n self.nodes[0].invalidateblock(tip_hash)\n assert tip_hash != self.nodes[0].getbestblockhash()\n assert_equal(self.nodes[0].getshieldbalance(saplingAddr0), Decimal('20'))\n self.log.info(\"Now trying to connect block with shield tx, when SPORK_20 is active\")\n self.activate_spork(0, SPORK_20)\n self.nodes[0].reconsiderblock(tip_hash)\n assert tip_hash != self.nodes[0].getbestblockhash() # Block NOT connected\n assert_equal(self.nodes[0].getshieldbalance(saplingAddr0), Decimal('20'))\n self.log.info(\"Good. Not possible.\")\n\n # Deactivate SPORK_20 and reconnect\n sleep(1)\n self.deactivate_spork(0, SPORK_20)\n self.nodes[0].reconsiderblock(tip_hash)\n self.nodes[0].syncwithvalidationinterfacequeue()\n assert_equal(tip_hash, self.nodes[0].getbestblockhash()) # Block connected\n assert_equal(self.nodes[0].getshieldbalance(saplingAddr0), Decimal('30'))\n self.log.info(\"Reconnected after deactivation of SPORK_20. Balance restored.\")\n connect_nodes(self.nodes[0], 1)\n\n # Node 0 sends some shield funds to node 1\n # Sapling -> Sapling\n # -> Sapling (change)\n self.log.info(\"TX 4: shield transaction from specified sapling address.\")\n recipients4 = [{\"address\": saplingAddr1, \"amount\": Decimal('10')}]\n mytxid4 = self.nodes[0].shieldsendmany(saplingAddr0, recipients4, 1, fee)\n self.check_tx_in_mempool([mytxid4])\n\n self.nodes[2].generate(1)\n self.sync_all()\n\n # Send more shield funds (this time with automatic selection of the source)\n self.log.info(\"TX 5: shield transaction from any sapling address.\")\n recipients5 = [{\"address\": saplingAddr1, \"amount\": Decimal('5')}]\n mytxid5 = self.nodes[0].shieldsendmany(\"from_shield\", recipients5, 1, fee)\n self.check_tx_in_mempool([mytxid5])\n\n self.nodes[2].generate(1)\n self.sync_all()\n\n # Send more shield funds (with create + send raw transaction)\n self.log.info(\"TX 6: shield raw transaction.\")\n tx_hex = self.nodes[0].rawshieldsendmany(\"from_shield\", recipients5, 1, fee)\n mytxid6 = self.nodes[0].sendrawtransaction(tx_hex)\n self.check_tx_in_mempool([mytxid6])\n\n self.nodes[2].generate(1)\n self.sync_all()\n\n # Shield more funds to a different address to verify multi-source notes spending\n saplingAddr2 = self.nodes[0].getnewshieldaddress()\n self.log.info(\"TX 7: shield funds to later verify multi source notes spending.\")\n recipients = [{\"address\": saplingAddr2, \"amount\": Decimal('10')}]\n mytxid7 = self.nodes[0].shieldsendmany(get_coinstake_address(self.nodes[0]), recipients, 1, fee)\n self.check_tx_in_mempool([mytxid7])\n\n self.nodes[2].generate(5)\n self.sync_all()\n\n # Verify multi-source notes spending\n tAddr0 = self.nodes[0].getnewaddress()\n self.log.info(\"TX 8: verifying multi source notes spending.\")\n recipients = [{\"address\": tAddr0, \"amount\": Decimal('11')}]\n mytxid8 = self.nodes[0].shieldsendmany(\"from_shield\", recipients, 1, fee)\n self.check_tx_in_mempool([mytxid8])\n\n self.nodes[2].generate(1)\n self.sync_all()\n\n # Verify balance\n assert_equal(self.nodes[0].getshieldbalance(saplingAddr0), Decimal('4.9')) # 30 received - (20 sent + 0.15 fee) - 4.95 sent\n assert_equal(self.nodes[1].getshieldbalance(saplingAddr1), Decimal('20')) # 20 received\n assert_equal(self.nodes[0].getshieldbalance(saplingAddr2), Decimal('3.9')) # 10 received - 10 sent + 3.9 change\n assert_equal(self.nodes[1].getreceivedbyaddress(taddr1), Decimal('0'))\n assert_equal(self.nodes[0].getshieldbalance(), Decimal('8.8'))\n self.log.info(\"Balances check out\")\n\n # Node 1 sends some shield funds to node 0, as well as unshielding\n # Sapling -> Sapling\n # -> taddr\n # -> Sapling (change)\n self.log.info(\"TX 10: deshield funds from specified sapling address.\")\n recipients7 = [{\"address\": saplingAddr0, \"amount\": Decimal('8')}]\n recipients7.append({\"address\": taddr1, \"amount\": Decimal('10')})\n mytxid7 = self.nodes[1].shieldsendmany(saplingAddr1, recipients7, 1, fee)\n self.check_tx_in_mempool([mytxid7])\n\n self.nodes[2].generate(1)\n self.sync_all()\n\n # Verify balance\n assert_equal(self.nodes[0].getshieldbalance(saplingAddr0), Decimal('12.9')) # 4.9 prev balance + 8 received\n assert_equal(self.nodes[1].getshieldbalance(saplingAddr1), Decimal('1.95')) # 20 prev balance - (18 sent + 0.05 fee)\n assert_equal(self.nodes[1].getreceivedbyaddress(taddr1), Decimal('10'))\n self.log.info(\"Balances check out\")\n\n # Verify existence of Sapling related JSON fields\n resp = self.nodes[0].getrawtransaction(mytxid7, 1)\n assert_equal(Decimal(resp['valueBalance']), Decimal('10.05')) # 20 shield input - 8 shield spend - 1.95 change\n assert_equal(len(resp['vShieldSpend']), 3)\n assert_equal(len(resp['vShieldOutput']), 2)\n assert('bindingSig' in resp)\n shieldedSpend = resp['vShieldSpend'][0]\n assert('cv' in shieldedSpend)\n assert('anchor' in shieldedSpend)\n assert('nullifier' in shieldedSpend)\n assert('rk' in shieldedSpend)\n assert('proof' in shieldedSpend)\n assert('spendAuthSig' in shieldedSpend)\n shieldedOutput = resp['vShieldOutput'][0]\n assert('cv' in shieldedOutput)\n assert('cmu' in shieldedOutput)\n assert('ephemeralKey' in shieldedOutput)\n assert('encCiphertext' in shieldedOutput)\n assert('outCiphertext' in shieldedOutput)\n assert('proof' in shieldedOutput)\n self.log.info(\"Raw transaction decoding checks out\")\n\n # Verify importing a spending key will update the nullifiers and witnesses correctly\n self.log.info(\"Checking exporting/importing a spending key...\")\n sk0 = self.nodes[0].exportsaplingkey(saplingAddr0)\n saplingAddrInfo0 = self.nodes[2].importsaplingkey(sk0, \"yes\")\n assert_equal(saplingAddrInfo0[\"address\"], saplingAddr0)\n assert_equal(self.nodes[2].getshieldbalance(saplingAddrInfo0[\"address\"]), Decimal('12.9'))\n sk1 = self.nodes[1].exportsaplingkey(saplingAddr1)\n saplingAddrInfo1 = self.nodes[2].importsaplingkey(sk1, \"yes\")\n assert_equal(saplingAddrInfo1[\"address\"], saplingAddr1)\n assert_equal(self.nodes[2].getshieldbalance(saplingAddrInfo1[\"address\"]), Decimal('1.95'))\n\n # Verify importing a viewing key will update the nullifiers and witnesses correctly\n self.log.info(\"Checking exporting/importing a viewing key...\")\n extfvk0 = self.nodes[0].exportsaplingviewingkey(saplingAddr0)\n saplingAddrInfo0 = self.nodes[3].importsaplingviewingkey(extfvk0, \"yes\")\n assert_equal(saplingAddrInfo0[\"address\"], saplingAddr0)\n assert_equal(Decimal(self.nodes[3].getshieldbalance(saplingAddrInfo0[\"address\"], 1, True)), Decimal('12.9'))\n extfvk1 = self.nodes[1].exportsaplingviewingkey(saplingAddr1)\n saplingAddrInfo1 = self.nodes[3].importsaplingviewingkey(extfvk1, \"yes\")\n assert_equal(saplingAddrInfo1[\"address\"], saplingAddr1)\n assert_equal(self.nodes[3].getshieldbalance(saplingAddrInfo1[\"address\"], 1, True), Decimal('1.95'))\n # no balance in the wallet\n assert_equal(self.nodes[3].getshieldbalance(), Decimal('0'))\n # watch only balance\n assert_equal(self.nodes[3].getshieldbalance(\"*\", 1, True), Decimal('14.85'))\n\n # Now shield some funds using sendmany\n self.log.info(\"TX11: Shielding coins to multiple destinations with sendmany RPC...\")\n prev_balance = self.nodes[0].getbalance()\n recipients8 = {saplingAddr0: Decimal('8'), saplingAddr1: Decimal('1'), saplingAddr2: Decimal('0.5')}\n mytxid11 = self.nodes[0].sendmany(\"\", recipients8)\n self.check_tx_in_mempool([mytxid11])\n self.log.info(\"Done. Checking details and balances...\")\n\n # Decrypted transaction details should be correct\n pt = self.nodes[0].viewshieldtransaction(mytxid11)\n fee = pt[\"fee\"]\n assert_equal(pt['txid'], mytxid11)\n assert_equal(len(pt['spends']), 0)\n assert_equal(len(pt['outputs']), 3)\n found = [False] * 3\n for out in pt['outputs']:\n assert_equal(pt['outputs'].index(out), out['output'])\n if out['address'] == saplingAddr0:\n assert_equal(out['outgoing'], False)\n assert_equal(out['value'], Decimal('8'))\n found[0] = True\n elif out['address'] == saplingAddr1:\n assert_equal(out['outgoing'], True)\n assert_equal(out['value'], Decimal('1'))\n found[1] = True\n else:\n assert_equal(out['address'], saplingAddr2)\n assert_equal(out['outgoing'], False)\n assert_equal(out['value'], Decimal('0.5'))\n found[2] = True\n assert_equal(found, [True] * 3)\n\n # Verify balance\n self.nodes[2].generate(1)\n self.sync_all()\n assert_equal(self.nodes[0].getshieldbalance(saplingAddr0), Decimal('20.9')) # 12.9 prev balance + 8 received\n assert_equal(self.nodes[1].getshieldbalance(saplingAddr1), Decimal('2.95')) # 1.95 prev balance + 1 received\n assert_equal(self.nodes[0].getshieldbalance(saplingAddr2), Decimal('4.4')) # 3.9 prev balance + 0.5 received\n # Balance of node 0 is: prev_balance - 1 PIV (+fee) sent externally + 250 PIV matured coinbase\n assert_equal(self.nodes[0].getbalance(), satoshi_round(prev_balance + Decimal('249') - Decimal(fee)))\n\n # Now shield some funds using sendtoaddress\n self.log.info(\"TX12: Shielding coins with sendtoaddress RPC...\")\n prev_balance = self.nodes[0].getbalance()\n mytxid12 = self.nodes[0].sendtoaddress(saplingAddr0, Decimal('10'))\n self.check_tx_in_mempool([mytxid12])\n self.log.info(\"Done. Checking details and balances...\")\n\n # Decrypted transaction details should be correct\n pt = self.nodes[0].viewshieldtransaction(mytxid12)\n fee = pt[\"fee\"]\n assert_equal(pt['txid'], mytxid12)\n assert_equal(len(pt['spends']), 0)\n assert_equal(len(pt['outputs']), 1)\n out = pt['outputs'][0]\n assert_equal(out['address'], saplingAddr0)\n assert_equal(out['outgoing'], False)\n assert_equal(out['value'], Decimal('10'))\n\n # Verify balance\n self.nodes[2].generate(1)\n self.sync_all()\n assert_equal(self.nodes[0].getshieldbalance(saplingAddr0), Decimal('30.9')) # 20.9 prev balance + 10 received\n\n self.log.info(\"All good.\")\n\n\nif __name__ == '__main__':\n WalletSaplingTest().main()\n","sub_path":"test/functional/sapling_wallet.py","file_name":"sapling_wallet.py","file_ext":"py","file_size_in_byte":16788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"240527026","text":"import json\nimport wtf\nimport wtf.typ\n\nclass Object:\n\n __slots__ = (\"__dict__\", \"__path__\", \"_type\")\n\n def __init__(self):\n super().__init__()\n self._type = wtf.typ.get_type(self)\n\n def __iter__(self):\n return iter(self.__dict__)\n\n def __len__(self):\n return len(self.__dict__)\n\n def __str__(self):\n return json.dumps(self, default=default, indent=4, sort_keys=True)\n\n def json(self):\n return json.dumps(self, default=default, sort_keys=True)\n\ndef default(o):\n if isinstance(o, Object):\n return vars(o)\n if isinstance(o, dict):\n return o.items()\n if isinstance(o, list):\n return iter(o)\n if type(o) in [str, True, False, int, float]:\n return o\n return repr(o)\n\ndef edit(o, setter):\n if not setter:\n setter = {}\n count = 0\n for key, value in items(setter):\n count += 1\n if \",\" in value:\n value = value.split(\",\")\n if value in [\"True\", \"true\"]:\n set(o, key, True)\n elif value in [\"False\", \"false\"]:\n set(o, key, False)\n else:\n set(o, key, value)\n return count\n\ndef eq(o1, o2):\n if isinstance(o2, (Dict, dict)):\n return o1.__dict__ == o2.__dict__\n return False\n\ndef format(o, keys=None, full=False):\n if keys is None:\n keys = vars(o).keys()\n res = []\n txt = \"\"\n for key in keys:\n if \"ignore\" in dir(o) and key in o.ignore:\n continue\n val = get(o, key, None)\n if not val:\n continue\n val = str(val)\n if key == \"text\":\n val = val.replace(\"\\\\n\", \"\\n\")\n if full:\n res.append(\"%s=%s \" % (key, val))\n else:\n res.append(val)\n for val in res:\n txt += \"%s%s\" % (val.strip(), \" \")\n return txt.strip()\n\ndef get(o, key, default=None):\n try:\n return o[key]\n except (TypeError, KeyError):\n try:\n return o.__dict__[key]\n except (AttributeError, KeyError):\n return getattr(o, key, default)\n\ndef items(o):\n try:\n return o.__dict__.items()\n except AttributeError:\n return o.items()\n \ndef keys(o):\n return o.__dict__.keys()\n\ndef ne(o1, o2):\n return o1.__dict__ != o2.__dict__\n\ndef search(o, match={}):\n res = False\n for key, value in items(match):\n val = get(o, key, None)\n if val:\n if not value:\n res = True\n continue\n if value in str(val):\n res = True\n continue\n else:\n res = False\n break\n else:\n res = False\n break\n return res\n\ndef set(o, key, val):\n setattr(o, key, val)\n\ndef setter(o, d):\n if not d:\n d = {}\n count = 0\n for key, value in d.items():\n if \",\" in value:\n value = value.split(\",\")\n otype = type(value)\n if value in [\"True\", \"true\"]:\n set(o, key, True)\n elif value in [\"False\", \"false\"]:\n set(o, key, False)\n elif otype == list:\n set(o, key, value)\n elif otype == str:\n set(o, key, value)\n else:\n setattr(o, key, value)\n count += 1\n return count\n\ndef sliced(o, keys=None):\n t = type(o)\n val = t()\n if not keys:\n keys = o.keys()\n for key in keys:\n try:\n val[key] = o[key]\n except KeyError:\n pass\n return val\n\ndef typed(o):\n return update(type(o)(), o)\n\ndef update(o1, o2, keys=None, skip=False):\n if not o2:\n return o1\n for key in o2:\n val = get(o2, key)\n if keys and key not in keys:\n continue\n if skip and not val:\n continue\n set(o1, key, val)\n\ndef update2(o1, o2):\n try:\n o1.__dict__.update(o2)\n except:\n o1.update(o2)\n\ndef values(o):\n return o.__dict__.values()\n\ndef xdir(o, skip=\"\"):\n for k in dir(o):\n if skip and skip in k:\n continue\n yield k\n","sub_path":"wtf/obj.py","file_name":"obj.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"368037579","text":"import prediction as pred\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom itertools import islice\nimport matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.colorbar as mplcb\nfrom matplotlib.colors import LogNorm\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport sys\nimport cProfile, pstats, io\nimport copy\nimport csv\nimport mmap\nimport os\n\n__author__ = 'Pierce Rixon'\nselect = 1 #up to 5\n\nnull = None\n\n\ndef main():\n #ws='whitespace'\n #ts='timescale'\n #f='frequency'\n #bw='bandwidth'\n #fn='frame_no'\n \n\n print('Running Analysis Suite')\n\n matplotlib.style.use('ggplot')\n\n root = tk.Tk()\n root.withdraw()\n \n #Use metadata to strip the 'Total whitespace' message out of the end of each csv file selected\n #metadata()\n\n print('Select configuration file')\n cfgfile = filedialog.askopenfilename(filetypes = [(\"config files\",\"*.cfg\")])\n\n with open(cfgfile) as cfg:\n config = cfg.read().splitlines()\n\n #occupancy(config)\n #occupancy_v2(config)\n occupancy_csv(config) #NOT WORKING FOR LARGE DATASETS 100MB+\n #dutycycle(config)\n #PSD(config)\n\n #print('Select window file')\n filename = filedialog.askopenfilename(filetypes = [(\"csv files\",\"*.csv\")]) #('Window_dump.csv') \n \n #pred.pred(filename)\n\n #hexbin(filename)\n #analyse(filename,select) #For analysis package\n #dev_sim(filename) #For device simulator\n #legacy_sns(filename)\n #legacy(filename)\n\n\n\ndef metadata():\n \n print('Select files to remove \\'total whitespace\\' message and dump to metadata file')\n print('because im lazy, if the file does not have \\'total whitespace\\' at the end, it will take 5ever for the function to return')\n filenames = filedialog.askopenfilenames(filetypes = [(\"csv files\",\"*.csv\")])\n filelist = list(filenames)\n\n mpath = os.path.dirname(filelist[0])+'/metadata.txt'\n\n print(mpath)\n\n print('number of files: {}'.format(len(filelist)))\n #print(filelist)\n\n meta_file = open(mpath, 'w+')\n meta_file.write('Number of files: {}\\n'.format(len(filelist)))\n\n for f in filelist:\n meta_file.write(f)\n # meta_file.write('\\n')\n with open(f, 'r+') as file:\n file.seek(0, os.SEEK_END)\n pos = file.tell() - 1\n end = file.tell() - 1\n\n while pos > 0 and file.read(1) != \"T\":\n pos -= 1\n file.seek(pos, os.SEEK_SET)\n\n pos -= 1\n file.seek(pos, os.SEEK_SET)\n\n if pos > 0:\n string = file.read(end-pos)\n print(string)\n meta_file.write(string+'\\n\\n')\n file.seek(pos, os.SEEK_SET)\n file.truncate()\n else:\n print('File does not have Total Whitespace message')\n file.close()\n\ndef PSD(config):\n print('Now running PSD plot, ensure a PSD dataset is selected')\n filename = filedialog.askopenfilename(filetypes = [(\"csv files\",\"*.csv\")])\n dataset = pd.read_csv(filename, header=0)\n\n c_freq = np.float(config[2])\n #config[3] has the filter bandwidth, which is not active for a WBX daughterboard, as its locked to 40MHz\n c_bandwidth = np.float(config[4])\n\n print(dataset)\n\n d_len = dataset['avg'].size\n\n print(d_len)\n\n freq = np.linspace(0,d_len-1,d_len)\n \n fig = plt.figure(figsize=(22,10))\n fig.subplots_adjust(left=0.08, bottom=0.12, right=.97, top=.95)\n ax = fig.add_subplot(1,1,1) \n\n for spine in ['left','right','top','bottom']:\n ax.spines[spine].set_color('k')\n \n ax.tick_params(which = 'major', width=1, length=3, color='k')\n ax.tick_params(which = 'minor', width=.5, length=1, color='k')\n \n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n \n start = (c_freq - (c_bandwidth*.5))/1e6\n stop = (c_freq + (c_bandwidth*.5))/1e6\n ax.set_xticklabels(np.linspace(start,stop,11))\n ax.set_xticks(np.linspace(0,d_len-1,11))\n ax.set_xlim(0,d_len-1)\n ax.set_ylim(-130,-50)\n\n #ax.set_title('Power Spectrum Density', fontsize=36)\n ax.set_xlabel('Frequency (MHz)', fontsize=(36+6))\n ax.set_ylabel('Power (dBm)', fontsize=(36+6))\n ax.tick_params(axis='both', labelsize=(26+6))\n\n ax.plot(freq,dataset['max'], color='r', label=\"Max\")\n ax.plot(freq,dataset['avg'], color='k', label=\"Avg\")\n ax.plot(freq,dataset['min'], color='b', label=\"Min\")\n\n ax.axvline(x=d_len*.1, color='k', linestyle='--')\n ax.axvline(x=d_len*.9, color='k', linestyle='--')\n\n handles, labels = ax.get_legend_handles_labels()\n\n ax.legend(handles,labels,fontsize=24)\n plt.rc('axes', labelsize=(36+6)) \n plt.show() \n\n # here we are plotting stacked slices on top of eachother, plotting frequency vs time (in frames)\ndef occupancy_csv(config):\n print('Now running occupancy plot, ensure a bandwidth dataset is selected')\n filename = filedialog.askopenfilename(filetypes = [(\"csv files\",\"*.csv\")])\n \n with open(filename, 'r') as r:\n next(r)\n dataset = [[int(x) for x in rec] for rec in csv.reader(r, delimiter=',')]\n #dataset = pd.read_csv(filename, header=0)\n\n c_freq = np.float(config[2])\n #config[3] has the filter bandwidth, which is not active for a WBX daughterboard, as its locked to 40MHz\n c_bandwidth = np.float(config[4])\n\n resolution = 131072 #this will have to be modified depending on how the FFT is computed\n\n framemax = np.amax(dataset, axis=0)[4] #frame_no\n print(framemax)\n #framemax = np.max(dataset['frame_no'])\n\n hslices = 512 #number of horizontal slices\n vslices = 1024 #number of vertical slices\n \n hscans = int(framemax/hslices) + 1\n vscans = int((resolution*0.8)/vslices) #this should be an integer, if it isnt, tough :)\n\n freq_arry = np.zeros(resolution*0.8)\n indent = int(resolution * 0.1)\n\n mesh = np.zeros((hslices+1,vslices))\n\n hidx = 0\n count = 0\n\n #row:idx,timescale,frequency,bandwidth,whitespace,frame_no\n for row in dataset:\n #as the rows are sorted by frame number, we can just iterate through them\n if row[4] > (hidx+1)*hscans:\n\n #zero array\n if hidx%100 == 0 : print(hidx)\n for i in range(vslices):\n mesh[hidx,i] = (np.sum(freq_arry[i*vscans:((i+1)*vscans - 1)])/hscans - (vscans-1))*-1\n hidx = hidx + 1\n freq_arry.fill(0)\n \n count = count + 1\n freq_arry[row[1] - indent : row[1]+row[2]-1 - indent] += 1\n\n if count%100000 == 0:\n print(count)\n\n #drop lastrow\n #for i in range(vslices):\n # mesh[hidx,i] = (np.sum(freq_arry[i*vscans:((i+1)*vscans - 1)])/hscans - vscans)*-1\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1) \n \n for spine in ['left','right','top','bottom']:\n ax.spines[spine].set_color('k')\n \n ax.tick_params(which = 'major', width=1, length=3, color='k')\n ax.tick_params(which = 'minor', width=.5, length=1, color='k')\n \n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n #plt.subplot(111)\n meshm = np.ma.masked_where(mesh < 1, mesh)\n m = ax.pcolormesh(meshm, vmin=1, vmax=np.amax(mesh), cmap='gnuplot_r')\n \n ax.axis([0,vslices,0,hidx])\n ax.set_xticks(np.linspace(1,vslices,5))#, 26316, 52632])\n\n start = (c_freq - (c_bandwidth*.4))/1e6\n stop = (c_freq + (c_bandwidth*.4))/1e6\n ax.set_xticklabels(np.linspace(start,stop,5)/60)\n #ax.set_ticks(True)\n \n \n plt.colorbar(m,ax=ax)\n #plt.grid(True, which='major', axis='both', linestyle='-', color='none')\n \n ax.set_title('Spectrum Occupancy')\n ax.set_xlabel('Frequency (MHz)')\n ax.set_ylabel('Time (min)')\n\n ax.set_yticks(np.linspace(0,hidx,num=12))\n ax.set_yticklabels(np.around(np.linspace(0,hidx,num=12)*.005*hscans,decimals=2))\n\n plt.tight_layout()\n plt.show()\n\ndef occupancy(config):\n print('Now running occupancy plot, ensure a bandwidth dataset is selected')\n filename = filedialog.askopenfilename(filetypes = [(\"csv files\",\"*.csv\")])\n dataset = pd.read_csv(filename, header=0)\n\n c_freq = np.float(config[2])\n #config[3] has the filter bandwidth, which is not active for a WBX daughterboard, as its locked to 40MHz\n c_bandwidth = np.float(config[4])\n\n resolution = 131072 #this will have to be modified depending on how the FFT is computed\n\n framemax = np.max(dataset['frame_no'])\n\n hslices = 512 #number of horizontal slices\n vslices = 1024 #number of vertical slices\n \n hscans = int(framemax/hslices) + 1\n vscans = int((resolution*0.8)/vslices) #this should be an integer, if it isnt, tough :)\n\n freq_arry = np.zeros(resolution*0.8)\n indent = int(resolution * 0.1)\n\n mesh = np.zeros((hslices+1,vslices))\n\n hidx = 0\n count = 0\n\n #row:idx,timescale,frequency,bandwidth,whitespace,frame_no\n for row in dataset.itertuples():\n #as the rows are sorted by frame number, we can just iterate through them\n if row[5] > (hidx+1)*hscans:\n\n #zero array\n if hidx%100 == 0: print(hidx)\n for i in range(vslices):\n mesh[hidx,i] = (np.sum(freq_arry[i*vscans:((i+1)*vscans - 1)])/hscans - (vscans-1))*-1\n hidx = hidx + 1\n freq_arry.fill(0)\n \n count = count + 1\n freq_arry[row[2] - indent : row[2]+row[3]-1 - indent] += 1\n\n if count%100000 == 0:\n print(count)\n\n #drop lastrow\n #for i in range(vslices):\n # mesh[hidx,i] = (np.sum(freq_arry[i*vscans:((i+1)*vscans - 1)])/hscans - vscans)*-1\n fig = plt.figure(figsize=(22,10))\n fig.subplots_adjust(left=0.07, bottom=0.12, right=1.08, top=.97)\n ax = fig.add_subplot(1,1,1) \n \n for spine in ['left','right','top','bottom']:\n ax.spines[spine].set_color('k')\n \n ax.tick_params(which = 'major', width=1, length=3, color='k')\n ax.tick_params(which = 'minor', width=.5, length=1, color='k')\n \n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n #plt.subplot(111)\n meshm = np.ma.masked_where(mesh < 1, mesh)\n m = ax.pcolormesh(meshm, vmin=1, vmax=np.amax(mesh), cmap='inferno_r')\n \n ax.axis([0,vslices,0,hidx])\n ax.set_xticks(np.linspace(1,vslices,5))#, 26316, 52632])\n\n start = (c_freq - (c_bandwidth*.4))/1e6\n stop = (c_freq + (c_bandwidth*.4))/1e6\n ax.set_xticklabels(np.linspace(start,stop,5))\n #ax.set_ticks(True)\n \n cb = plt.colorbar(m,ax=ax)\n #labelstr = 'Observation Density (p/'+str(hscans*vscans)+')'\n labelstr = 'Observation Density'\n cb.set_label(labelstr, fontsize=(26+6))\n cb.ax.tick_params(labelsize=(20+6))\n #plt.grid(True, which='major', axis='both', linestyle='-', color='none')\n \n #ax.set_title('Spectrum Occupancy',fontsize=)\n ax.set_xlabel('Frequency (MHz)', fontsize=(36+6))\n ax.set_ylabel('Time (min)', fontsize=(36+6))\n ax.tick_params(axis='both', labelsize=(26+6))\n\n ax.set_yticks(np.linspace(0,hidx,num=11))\n #ax.set_yticklabels(np.around(np.linspace(0,hidx,num=12)*.005*hscans/60,decimals=1))\n ax.set_yticklabels(np.around(np.linspace(0,5,num=11),decimals=1))\n\n plt.rc('axes', labelsize=(32+6)) \n plt.show()\n\ndef occupancy_v2(config):\n\n#Occupancy is a measure of duration\n print('Now running occupancy plot. This will work for any partitioned dataset')\n filename = filedialog.askopenfilename(filetypes = [(\"csv files\",\"*.csv\")])\n dataset = pd.read_csv(filename, header=0)\n\n c_freq = np.float(config[2])\n #config[3] has the filter bandwidth, which is not active for a WBX daughterboard, as its locked to 40MHz\n c_bandwidth = np.float(config[4])\n\n resolution = 131072 #this will have to be modified depending on how the FFT is computed\n\n framemax = np.max(dataset['frame_no'])\n\n hslices = 1001 #number of horizontal slices\n vslices = 2001 #number of vertical slices\n \n hscans = int(framemax/hslices) + 1\n vscans = np.ceil((resolution*0.8)/vslices) #this should be an integer, if it isnt, tough :)\n\n indent = int(resolution * 0.1)\n\n #mesh[row,cols]\n mesh = np.full((hslices-1,vslices-1),np.int(hscans*vscans), dtype=np.int)\n\n count = 0\n\n #Condition the dataframe accordingly\n dataset['start'] = dataset['frame_no'].subtract(dataset['timescale']-1)\n dataset['frequency'] = dataset['frequency'].subtract(indent)\n \n print(dataset)\n print(\"hscans: {}\".format(hscans))\n print(\"vscans: {}\".format(vscans))\n # 0 1 2 3 4 5 6\n #row:idx,timescale,frequency,bandwidth,whitespace,frame_no,start\n for row in dataset.itertuples():\n\n\n #try slicing lel\n\n #mesh[np.int((row[6] - 1)/hscans) : np.int((row[2] - 1)/vscans), \\\n # np.int((row[6] + row[1] - 1)/hscans) : np.int((row[2] + row[3] - 1)/vscans)] += 1\n\n x = np.floor(row[2]/vscans)\n y = np.floor(row[6]/hscans)\n\n d = np.int(np.ceil(row[1]/hscans))\n b = np.int(np.ceil(row[3]/vscans))\n\n for i in range(d):\n for j in range(b):\n\n cell = vscans * hscans\n\n #weight = \n w_x = 0\n w_y = 0\n\n w_xl = row[2] - (x + j) * vscans \n w_xh = row[2] + row[3]-1 - (x + j + 1) * vscans\n\n if w_xl > 0 and w_xh > 0:\n w_x = vscans - w_xl + 1\n elif w_xl > 0 and w_xh <= 0:\n w_x = row[3]\n elif w_xh > 0 and w_xl <= 0:\n w_x = vscans\n else: \n w_x = row[2] + row[3]-1 - (x + j) * vscans\n\n w_yl = row[6] - (y + i) * hscans\n w_yh = row[6] + row[1]-1 - (y + i + 1) * hscans\n\n if w_yl > 0 and w_yh > 0:\n w_y = hscans - w_yl + 1\n elif w_yl > 0 and w_yh <= 0:\n w_y = row[1]\n elif w_yh > 0 and w_yl <= 0:\n w_y = hscans\n else:\n w_y = row[6] + row[1]-1 - (y + i) * hscans\n \n #print(row)\n #print(\"x:{},y:{} j{},i{} wx{} wy{}\".format(x,y,j,i,w_x,w_y))\n\n mesh[y + i, x + j] -= w_y*w_x\n \n count = count + 1\n \n if count%100 == 0:\n print(count)\n\n\n\n #Determine the starting time of a partition\n\n #print(\"Start added to dataset \\n {}\".format(dataset))\n\n ##perform thresholding\n #dataset['start'] = np.ceil(dataset['start'].div(hscans))\n ##dataset['start'] = np.ceil(dataset['start']) \n \n #dataset['frequency'] = np.ceil(dataset['frequency'].subtract(indent).div(vscans))\n ##dataset['frequency'] = np.ceil(dataset['frequency'])\n \n #dataset['bandwidth'] = np.ceil(dataset['bandwidth'].div(vscans))\n ##dataset['bandwidth'] = np.ceil(dataset['bandwidth'])\n \n #dataset['timescale'] = np.ceil(dataset['timescale'].div(hscans))\n ##dataset['timescale'] = np.ceil(dataset['timescale'])\n\n\n #dataset = dataset.sort_values(['frequency','start'], ascending=True)\n\n #print(\"Thresholded dataset \\n {}\".format(dataset))\n\n #dataset_g = dataset.groupby(['start','frequency','timescale','bandwidth']).size().reset_index().rename(columns={0:'count'})\n\n #print(dataset_g)\n\n ##dataset = dataset.sort_values(['frequency','start'], ascending=True).reset_index()\n \n ##print(\"Sorted by frequency \\n {}\".format(dataset))\n ##Grouping must be the last operation, otherwise numpy functions will not work correctly\n\n\n\n ##row:idx,start,frequency,timescale,bandwidth,count\n #for row in dataset_g.itertuples():\n \n # for i in range(np.int(row[3])):\n # for j in range(np.int(row[4])):\n # mesh[np.int(row[1]) + i - 1, np.int(row[2]) + j - 1] += np.int(row[5])\n # count += 1\n\n # if count%10000 == 0:\n # print(count)\n\n #drop lastrow\n #for i in range(vslices):\n # mesh[hidx,i] = (np.sum(freq_arry[i*vscans:((i+1)*vscans - 1)])/hscans - vscans)*-1\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1) \n \n for spine in ['left','right','top','bottom']:\n ax.spines[spine].set_color('k')\n \n ax.tick_params(which = 'major', width=1, length=3, color='k')\n ax.tick_params(which = 'minor', width=.5, length=1, color='k')\n \n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n #plt.subplot(111)\n meshm = np.ma.masked_where(mesh < 1, mesh)\n m = ax.pcolormesh(meshm, vmin=1, vmax=np.amax(mesh), cmap='gnuplot_r')\n \n ax.axis([0,vslices,0,hslices])\n ax.set_xticks(np.linspace(1,vslices,5))#, 26316, 52632])\n\n start = (c_freq - (c_bandwidth*.4))/1e6\n stop = (c_freq + (c_bandwidth*.4))/1e6\n ax.set_xticklabels(np.linspace(start,stop,5))\n #ax.set_ticks(True)\n \n \n plt.colorbar(m,ax=ax)\n #plt.grid(True, which='major', axis='both', linestyle='-', color='none')\n \n ax.set_title('Spectrum Occupancy')\n ax.set_xlabel('Frequency (MHz)')\n ax.set_ylabel('Time (s)')\n\n #ax.set_yticks(np.linspace(0,hidx,num=12))\n #ax.set_yticklabels(np.around(np.linspace(0,hidx,num=12)*.005*hscans,decimals=2))\n\n plt.tight_layout()\n plt.show()\n\n\n\n#new improved spicy hexbin with distributions :D\ndef hexbin(filename):\n print('Now running hexbin plot, ensure a partitioned dataset is selected')\n dataset = pd.read_csv(filename, header=0)\n\n #matplotlib.rcParams.update({'font.size': 21}\n #plt.figure(figsize=(6,8), dpi=600)\n\n #xmax = np.power(10,np.ceil(np.log10(np.max(dataset['bandwidth']))))*.5\n bwmax = np.max(dataset['bandwidth'])\n print(bwmax*190)\n if np.power(10,np.ceil(np.log10(bwmax))) > 4*bwmax:\n xmax = np.power(10,np.ceil(np.log10(bwmax))) / 4\n elif np.power(10,np.ceil(np.log10(bwmax))) > 2*bwmax:\n xmax = np.power(10,np.ceil(np.log10(bwmax))) / 2\n else: \n xmax = np.power(10,np.ceil(np.log10(bwmax)))\n\n if xmax < 25000: xmax = 25000 #This is here as 4.75MHz is the minimax (smallest max) x axis value. \n \n print(xmax*190)\n\n# xmin = 1 #Just for the BW=1 TS=1 dataset\n# ymin = 1\n xmin = 50\n #ymax = np.maximum(1e6,np.power(10,np.ceil(np.log10(np.max(dataset['timescale'])))))\n ymax = np.power(10,np.ceil(np.log10(np.max(dataset['timescale'])))) #for 5min segment\n ymin = np.min(dataset['timescale'])\n\n ##cheekybonusbits\n #array = np.histogram(dataset['timescale'], bins=np.logspace(np.log10(ymin), np.log10(ymax), num=200))\n #print(array)\n #x = array[1]\n #y = array[0]\n #print(x)\n #print(y)\n #cr_y = np.cumsum(y[::-1])[::-1] \n #print(cr_y)\n #print(\"len x: {}, len y: {}\".format(len(x[:-1]),len(cr_y)))\n #plt.figure()\n #ax = plt.subplot(111)\n #ax.scatter(x[:-1],cr_y)\n ##ax.set_yscale('log')\n #ax.set_xscale('log')\n #plt.show()\n\n #create figure hook, assign figure size and adjust padding around borders\n fig = plt.figure(figsize=(22,14))\n fig.subplots_adjust(left=0.1, bottom=0.09, right=.98, top=.98)\n\n #arrange the various axes nicely using gridspec\n gs=gridspec.GridSpec(5,6)\n gs.update(wspace = 0.03, hspace= 0.03)\n\n ax1 = plt.subplot(gs[1:,:-1])\n ax2 = plt.subplot(gs[1:,-1])\n ax3 = plt.subplot(gs[0,:-1])\n ax4 = gs[0,-1]\n\n gs2=gridspec.GridSpecFromSubplotSpec(12,16,ax4)\n cbax = plt.subplot(gs2[4,1:-1])\n\n #create the hexbin plot and a hook for the colourmap\n cbmap = ax1.hexbin(dataset['bandwidth'], dataset['timescale'], mincnt=1, xscale='log', yscale='log', cmap='inferno', norm=matplotlib.colors.LogNorm(), reduce_C_function=np.sum)\n ax1.axis([xmin, xmax, ymin, ymax])\n \n #populate the appropriate spines\n for spine in ['left','bottom']:\n ax1.spines[spine].set_color('k')\n \n ax2.spines['bottom'].set_color('k')\n ax3.spines['left'].set_color('k')\n \n #set tick sizes, colours and lengths\n ax1.tick_params(which = 'major', width=1, length=4, color='k')\n ax1.tick_params(which = 'minor', width=1, length=2, color='k')\n\n #manual xtick placement\n ax1.set_xticks([66, 132, 263, 526, 1316, 2632, 5263, 13158])#, 26316])#, 52632])\n ax1.set_xticklabels([r'12.5kHz', r'25kHz', r'50kHz', r'100kHz', r'250kHz', r'500kHz', r'1MHz', r'2.5MHz', r'5MHz', r'10MHz'])\n ax1.xaxis.set_ticks_position('bottom')\n ax1.set_xlabel('Bandwidth', fontsize=(24+18))\n \n #manual ytick placement\n ax1.set_yticks([10,19,48,95,190,477,1907,5722,11444,57220])#,171661,686646])\n ax1.set_yticklabels([r'50ms',r'100ms',r'250ms',r'500ms',r'1s',r'2.5s',r'10s',r'30s',r'1m',r'5m',r'15m',r'1h'])\n ax1.yaxis.set_ticks_position('left')\n ax1.set_ylabel('Duration', fontsize=(24+18))\n ax1.tick_params(axis='both', labelsize=(16+18))\n \n #ax1.set_title(\"With a log color scale\")\n #cbax = mplcb.make_axes_gridspec(ax4)\n\n cb = plt.colorbar(cbmap,cax=cbax, orientation='horizontal')\n cb.outline.set_visible(True)\n cb.outline.set_edgecolor('black') \n cb.set_label('Observation Density', fontsize=(14+12))\n cbax.xaxis.set_label_position('top')\n cb.ax.tick_params(labelsize=(14+12))\n #cb.set_ticks([np.log10(1),np.log10(10),np.log10(50),np.log10(100),np.log10(500)])\n #cb.set_ticklabels([1,10,50,100,500])\n\n #Timescale histogram \n ax2hist = ax2.hist(dataset['timescale'], bins=np.logspace(np.log10(ymin), np.log10(ymax), num=100), orientation='horizontal', log=True, color='k')\n ax2.set_yscale('log')\n ax2.set_xlim(1,np.power(10,np.ceil(np.log10(np.amax(ax2hist[0])))))\n\n ax2.yaxis.set_visible(False)\n ax2.xaxis.set_ticks_position('bottom')\n ax2.tick_params(which = 'major', width=1, length=4, color='k')\n ax2.tick_params(which = 'minor', width=1, length=2, color='k')\n \n ax2.tick_params(axis='both', labelsize=(14+18))\n\n #Better bandwidth histogram\n ax3.hist(dataset['bandwidth'], bins=np.logspace(np.log10(xmin), np.log10(xmax), num=200), log=True, color='k')\n ax3.set_xscale('log')\n ax3.set_xlim(xmin, xmax)\n #ax3.axis([xmin, xmax, 1, ymax])\n \n ax3.xaxis.set_visible(False)\n ax3.yaxis.set_ticks_position('left')\n ax3.tick_params(which = 'major', width=1, length=4, color='k')\n ax3.tick_params(which = 'minor', width=1, length=2, color='k')\n\n ax3.tick_params(axis='both', labelsize=(14+18))\n\n plt.rc('axes', labelsize=(20+18)) \n #dpi=plt.gcf().dpi is CRITICAL for saving an image that looks identical to the one displayed in plt.show()\n #plt.savefig('Basic.png', dpi=plt.gcf().dpi)\n plt.show()\n\n # here we are plotting totals of duration per frequency as a percentage of total duration\ndef dutycycle(filename):\n print('Now running dutycycle plot, ensure a duration dataset is selected')\n filename = filedialog.askopenfilename(filetypes = [(\"csv files\",\"*.csv\")])\n dataset = pd.read_csv(filename, header=0, dtype=np.int)\n \n resolution = 131072 #this will have to be modified depending on how the FFT is computed\n\n framemax = np.max(dataset['frame_no'])\n\n vslices = 1024\n vscans = int((resolution*0.8)/vslices)+1 #this should be an integer, if it isnt, tough :)\n \n newrez = int(resolution*0.8) + 1\n \n freq_arry = np.zeros(newrez)\n indent = int(resolution * 0.1)\n\n count = 0\n #row:idx,timescale,frequency,bandwidth,whitespace,frame_no\n for row in dataset.itertuples():\n #freq_arry[int((row[2] - indent)/vscans)] += row[1]\n freq_arry[row[2] - indent] += row[1]\n count = count + 1\n \n if count%100000 == 0:\n print(count)\n \n #now manipulate freq_arry, yes its a roundabout way, but ohwell\n #normalise\n freq_arry /= framemax #normalise\n freq_arry *= 100 #out of 100\n freq_arry -= 100 #whitespace is positive, so incumbent activity here will now be negative\n freq_arry *= -1 #make incumbent activity positive\n\n freq_max = np.zeros(vslices)\n freq_avg = np.zeros(vslices)\n for i in range(newrez):\n if freq_arry[i] > freq_max[int(i/vscans)]:\n freq_max[int(i/vscans)] = freq_arry[i]\n\n freq_avg[int(i/vscans)] += freq_arry[i]/vscans\n\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1) \n\n freq = np.linspace(0,vslices-1,vslices)\n# ax.scatter(freq,freq_max, edgecolor = '', c='k')\n# ax.scatter(freq,freq_avg, edgecolor = '', c='r')\n\n ax.plot(freq,freq_max, c='k')\n ax.plot(freq,freq_avg, c='r')\n\n ax.set_title('Duty Cycle')\n ax.set_xlabel('Frequency')\n ax.set_ylabel('Percentage')\n\n ax.axis([0,vslices,0,100])\n\n plt.show()\n\n\n\n\ndef analyse(filename,test):\n \n print('Now running test: {}'.format(test))\n dataset = pd.read_csv(filename, header=0)#, converters={0: np.int32, 1: np.int32, 2: np.int32, 3: np.int32, 4: np.int32}, dtype=np.int32)\n\n print(dataset.dtypes)\n #converters={'timescale': np.int32, 'frequency': np.int32, 'bandwidth': np.int32,'whitespace': np.int32, 'frame_no': np.int32}\n #cols:[0] timescale, [1] frequency, [2] bandwidth, [3] whitespace, [4] frame_no\n #in a loop (iterator), [0] = index, [1] = timescale ...\n print(dataset.columns)\n cols = dataset.columns\n\n \n nbins = 131072\n\n \n if(test == 1):\n #1. BW vs TS density plot, with TS and BW histograms/KDEs on the top and right hand side of the density plot\n\n bwmax = np.max(dataset[cols[2]])\n tsmax = np.max(dataset[cols[0]])\n\n print('BWMax: {}, TSMax: {}'.format(bwmax,tsmax))\n\n bwtsframe = pd.concat([dataset[cols[2]],dataset[cols[0]]], axis=1, keys=[cols[2],cols[0]])\n print(bwtsframe)\n\n #bwtsframe_red = bwtsframe.drop_duplicates()\n bwtsframe_red = bwtsframe.groupby([cols[2], cols[0]]).size().reset_index().rename(columns={0:'count'})\n print(bwtsframe_red)\n\n bwtsframe_red['ws_count'] = bwtsframe_red[cols[2]] * bwtsframe_red[cols[0]] * bwtsframe_red['count']\n\n print(bwtsframe_red)\n\n subprogram = 3\n ### PLOTTING THINGS HERE ###\n if (subprogram == 1):\n\n fig3 = plt.figure()\n axh = fig3.add_subplot(2,1,1)\n axh2 = fig3.add_subplot(2,1,2)\n\n bwtsframe[cols[2]].hist(ax=axh, bins = 200, bottom = .1, log = True)\n axh.set_yscale('log')\n axh.set_xlim(0,30000)\n axh.set_ylim(.1,1e8)\n #axh.set_xscale('log')\n\n bwtsframe[cols[0]].hist(ax=axh2, bins = 200, bottom = .1, log = True)\n axh2.set_yscale('log')\n axh2.set_xlim(0,60000)\n axh2.set_ylim(.1,1e8)\n #axh2.set_xscale('log')\n\n axh.set_title('Bandwidth Histogram')\n axh.set_xlabel('Bandwidth in Bins (190Hz/bin)')\n axh.set_ylabel('Density')\n\n axh2.set_title('Duration Histogram')\n axh2.set_xlabel('Timescale (5.3ms/unit)')\n axh2.set_ylabel('Density')\n\n plt.show()\n\n ##ax1 = fig.add_subplot(1,2,1) #convention (row,col,idx)\n ##ax2 = fig.add_subplot(1,2,2)\n fig = plt.figure()\n ax = fig.add_subplot(2,1,1)\n ax2 = fig.add_subplot(2,1,2)\n # plt.pcolormesh(mesh, norm=LogNorm(vmin=1, vmax=np.amax(mesh)), cmap='inferno')\n \n #plt.scatter(dataset[cols[2]],dataset[cols[0]])\n\n sc1 = ax.scatter(bwtsframe_red[cols[2]],bwtsframe_red[cols[0]],edgecolor='',c=bwtsframe_red['count'],cmap='inferno',norm=LogNorm(vmin=1,vmax=bwtsframe_red['count'].max()))\n sc2 = ax2.scatter(bwtsframe_red[cols[2]],bwtsframe_red[cols[0]],edgecolor='',c=bwtsframe_red['ws_count'],cmap='inferno',norm=LogNorm(vmin=1,vmax=bwtsframe_red['ws_count'].max()))\n\n #plt.hist2d(dataset[cols[2]],dataset[cols[0]],bins=1000)\n plt.colorbar(sc1,ax=ax)\n plt.colorbar(sc2,ax=ax2)\n\n #ticks = np.arange(0, bwmax, 6)\n #labels = range(ticks.size)\n #plt.xticks(ticks, labels)\n\n ax.set_yscale('log')\n ax.set_xscale('log')\n ax.set_ylim(1,1e5)\n ax.set_xlim(10,1e5)\n ax2.set_yscale('log')\n ax2.set_xscale('log')\n ax2.set_ylim(1,1e5)\n ax2.set_xlim(10,1e5)\n #ax.set_xlim(0,pxls)\n #ax.set_ylim(0,pxls)\n\n ax.set_title('Window Count')\n ax.set_xlabel('Bandwidth in Bins (190Hz/bin)')\n ax.set_ylabel('Timescale (5.3ms/unit)')\n\n ax2.set_title('Whitespace Density')\n ax2.set_xlabel('Bandwidth in Bins (190Hz/bin)')\n ax2.set_ylabel('Timescale (5.3ms/unit)')\n \n plt.show()\n\n if (subprogram == 2):\n #this second set of tests provides a plot of number of observations on the y axis versus the timescale or bandwidth. \n\n # wsvfframe = wsvfframe.groupby(['frequency','bandwidth'])['timescale'].sum().reset_index()\n\n bwsum = bwtsframe_red.groupby(['bandwidth'])['ws_count'].sum().reset_index()\n print('bwsum printout')\n print(bwsum)\n\n tssum = bwtsframe_red.groupby(['timescale'])['ws_count'].sum().reset_index()\n print('tssum printout')\n print(tssum)\n\n \n fig = plt.figure()\n ax = fig.add_subplot(2,2,2)\n ax2 = fig.add_subplot(2,2,4)\n ax.scatter(bwsum['bandwidth'],bwsum['ws_count'],edgecolor='')\n ax2.scatter(tssum['timescale'],tssum['ws_count'],edgecolor='')\n\n ax.set_yscale('log')\n ax.set_xscale('log')\n ax.set_xlim(1,1e5)\n ax.set_ylim(1,1e9)\n \n ax2.set_yscale('log')\n ax2.set_xscale('log')\n ax2.set_xlim(1,1e5)\n ax2.set_ylim(1,1e10)\n\n ax.set_title('Whitespace Distribution vs Bandwidth')\n ax.set_xlabel('Bandwidth in Bins (190Hz/bin)')\n ax.set_ylabel('Unique Whitespace')\n\n ax2.set_title('Whitespace Distribution vs Timescale')\n ax2.set_xlabel('Timescale (5.3ms/unit)')\n ax2.set_ylabel('Unique Whitespace')\n\n ## window count plots - number of unique window observations\n\n bwhist = dataset[cols[2]]\n tshist = dataset[cols[0]]\n\n bwhist = bwhist.groupby(bwhist).size().reset_index().rename(columns={0:'count'})\n\n tshist = tshist.groupby(tshist).size().reset_index().rename(columns={0:'count'})\n\n print(bwhist)\n\n axa = fig.add_subplot(2,2,1)\n ax2a = fig.add_subplot(2,2,3)\n axa.scatter(bwhist[cols[2]],bwhist['count'],edgecolor='')\n ax2a.scatter(tshist[cols[0]],tshist['count'],edgecolor='')\n\n axa.set_yscale('log')\n axa.set_xscale('log')\n axa.set_xlim(1,1e5)\n axa.set_ylim(1,1e6)\n \n ax2a.set_yscale('log')\n ax2a.set_xscale('log')\n ax2a.set_xlim(1,1e5)\n ax2a.set_ylim(1,1e6)\n\n axa.set_title('Window Bandwidth Distribution')\n axa.set_xlabel('Bandwidth in Bins (190Hz/bin)')\n axa.set_ylabel('Number of Observations')\n\n ax2a.set_title('Window Duration Distribution')\n ax2a.set_xlabel('Timescale (5.3ms/unit)')\n ax2a.set_ylabel('Number of Observations')\n\n plt.show()\n\n if (subprogram == 3):\n\n bwsum = bwtsframe_red.groupby(['bandwidth'])['ws_count'].sum().reset_index()\n print('bwsum printout')\n print(bwsum)\n\n tssum = bwtsframe_red.groupby(['timescale'])['ws_count'].sum().reset_index()\n print('tssum printout')\n print(tssum)\n\n numops = bwtsframe.groupby(['timescale']).size().reset_index().rename(columns={0:'count'})\n\n print(numops)\n \n\n tsmax = np.max(bwtsframe_red['timescale'])\n xmax = np.power(10,np.ceil(np.log10(tsmax)))\n\n ts_cumsum = np.cumsum(tssum['ws_count'][::-1])[::-1] \n\n ymax = np.power(10,np.ceil(np.log10(np.max(ts_cumsum))))\n\n print('Partitioned Whitespace: {}'.format(ts_cumsum[0]))\n\n delta = np.diff(ts_cumsum)\n delta = delta*-1\n print(delta)\n\n fig = plt.figure()\n ax = fig.add_subplot(3,1,1)\n ax2 = fig.add_subplot(3,1,2)\n #ax.scatter(bwsum['bandwidth'],bwsum['ws_count'],edgecolor='') \n ax.scatter(numops['timescale'],numops['count'],edgecolor='')\n ax2.scatter(tssum['timescale'],ts_cumsum,edgecolor='')\n ax2.scatter(tssum['timescale'],tssum['ws_count'],edgecolor='',color='r')\n #ax2.scatter(tssum['timescale'],ts_cumsum,edgecolor='')\n\n ax.set_yscale('log')\n ax.set_xscale('log')\n ax.set_xlim(1,xmax)\n ax.set_ylim(.1,1e4)\n \n ax2.set_yscale('log')\n ax2.set_xscale('log')\n ax2.set_xlim(1,xmax)\n ax2.set_ylim(1,ymax)\n\n #ax.set_title('Whitespace Distribution vs Bandwidth')\n #ax.set_xlabel('Bandwidth in Bins (190Hz/bin)')\n #ax.set_ylabel('Unique Whitespace')\n \n ax.set_title('Whitespace Observation Count vs Timescale')\n ax.set_xlabel('Timescale (5.3ms/unit)')\n ax.set_ylabel('Observation Count')\n\n ax2.set_title('Cumulative Whitespace vs Timescale')\n ax2.set_xlabel('Timescale (5.3ms/unit)')\n ax2.set_ylabel('Unique Whitespace')\n\n z = np.polyfit(tssum['timescale'],tssum['ws_count'], 1)\n p = np.poly1d(z)\n\n ax2.plot(tssum['timescale'],p(tssum['timescale']), 'k--')\n\n z2 = np.polyfit(np.log(tssum['timescale']),tssum['ws_count'], 2)\n p2 = np.poly1d(z2)\n ax2.plot(tssum['timescale'],p2(np.log(tssum['timescale'])), 'g--')\n\n ax3 = fig.add_subplot(3,1,3)\n\n perh_cumsum = (ts_cumsum/np.max(ts_cumsum)) *100\n ax3.scatter(tssum['timescale'],perh_cumsum, edgecolor='')\n ax3.set_xscale('log')\n ax3.set_xlim(1,xmax)\n\n z3 = np.polyfit(np.log(tssum['timescale']),perh_cumsum, 3)\n p3 = np.poly1d(z3)\n\n ax3.plot(tssum['timescale'],p3(np.log(tssum['timescale'])), 'k--')\n ## window count plots - number of unique window observations\n plt.show()\n\n #elif(test == 8):\n #BW vs WS and TS vs WS analysis - Test 1 is the complementary analysis of this test\n #we want to plot both unique whitespace incurred as well as total whitespace as the x axis decreases\n\n \n\n elif(test == 2):\n #2. WS vs Bins (real frequency - may need to preserve band starting frequency ....) \n #Series 1 - WS per bin due to partitioning algorithm\n #Series 2 - WS per bin based on the unpartitioned spectrum\n \n subprogram = 3\n\n wsvfframe = pd.concat([dataset[cols[1]],dataset[cols[2]],dataset[cols[0]]], axis=1, keys=[cols[1],cols[2],cols[0]])\n\n #wfc:[0] frequency, [1] bandwidth, [2] timescale\n #in a loop (iterator), [0] = index, [1] = frequency ...\n wfc = wsvfframe.columns\n print(wfc)\n \n freq = np.linspace(0,nbins-1,nbins)\n\n print(freq)\n relws = np.zeros(nbins)\n compws = np.zeros(nbins)\n diffarry = np.zeros(nbins)\n \n wsvfframe = wsvfframe.groupby(['frequency','bandwidth'])['timescale'].sum().reset_index()\n\n for row in wsvfframe.itertuples():\n relws[row[1]:row[1]+row[2]] += row[3]\n\n \n if True:\n fig = plt.figure()\n ax = fig.add_subplot(2,1,1)\n axa = fig.add_subplot(2,1,2)\n \n ax.scatter(freq,relws,edgecolor='')\n\n relwsS = np.sort(relws)\n\n axa.scatter(freq,relwsS[::-1],edgecolor='')\n\n axa.set_title('Whitespace Frequency Distribution CCDF')\n axa.set_xlabel('Descending Magnitude Ordered Bin Count')\n axa.set_ylabel('Whitespace Units over Observation Period')\n\n ax.set_title('Whitespace Frequency Distribution')\n ax.set_xlabel('Bandwidth in Bins (190Hz/bin)')\n ax.set_ylabel('Whitespace Units')\n\n plt.show()\n\n ### comaprison dataset NOTE: BW must = 1 for this dataset to be valid ###\n\n print('Select comparison windowset. ENSURE BW = 1')\n bw_1_file = filedialog.askopenfilename()\n\n bw_1_dataset = pd.read_csv(bw_1_file, header=0, dtype=np.int32)\n bcols = bw_1_dataset.columns\n\n bw_1_dataset = bw_1_dataset.groupby(by=['frequency'])['timescale'].sum().reset_index()\n\n for row in bw_1_dataset.itertuples():\n compws[row[1]] = row[2]\n \n #### test zone ####\n\n ### Profile the thing :)\n #pr = cProfile.Profile()\n #pr.enable()\n ### ... do something ...\n #pr.disable()\n #s = io.StringIO()\n #sortby = 'cumulative'\n #ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\n #ps.print_stats()\n #print(s.getvalue()) \n\n ##### mmm tests ####\n\n diffarry = np.subtract(compws,relws)\n\n dfcomp = pd.DataFrame(data={'rel': relws, 'comp': compws})\n \n print(dfcomp)\n dfcomp = dfcomp.sort_values(['comp','rel'],ascending=False)\n\n print(dfcomp)\n\n d2comp = np.array([compws,relws])\n d2comp[d2comp[:,0].argsort()]\n\n if False:\n figz = plt.figure()\n \n axz = figz.add_subplot(2,1,1)\n axz.scatter(freq,dfcomp['comp'],edgecolor='',label='Absolute Spectrum')\n axz.scatter(freq,dfcomp['rel'],edgecolor='',c='r',label='Windowed Spectrum')\n axz.legend()\n\n dfcomp = dfcomp.sort_values(['rel','comp'],ascending=False)\n\n axza = figz.add_subplot(2,1,2)\n axza.scatter(freq,dfcomp['comp'],edgecolor='',label='Absolute Spectrum')\n axza.scatter(freq,dfcomp['rel'],edgecolor='',c='r',label='Windowed Spectrum')\n axza.legend()\n\n plt.show()\n\n if False:\n\n fig = plt.figure()\n ax = fig.add_subplot(3,2,1)\n ax2 = fig.add_subplot(3,2,3)\n ax3 = fig.add_subplot(3,2,5)\n\n axa = fig.add_subplot(3,2,2)\n ax2a = fig.add_subplot(3,2,4)\n ax3a = fig.add_subplot(3,2,6)\n #2.1 Companion plot to WS vs Bins, - % covered vs Bins, showing the amount of real spectrum covered by the partitioned algo as a percentage\n\n ax.scatter(freq,relws,edgecolor='')\n ax2.scatter(freq,diffarry,edgecolor='')\n # ax3.scatter(freq,compws,edgecolor='',label='Absolute Spectrum')\n ax3.scatter(bw_1_dataset['frequency'],bw_1_dataset['timescale'],edgecolor='',label='Absolute Spectrum')\n ax3.scatter(freq,relws,edgecolor='',c='r',label='Windowed Spectrum')\n\n #CDFs?\n\n relwsS = np.sort(relws)\n diffarryS = np.sort(diffarry)\n compwsS = np.sort(compws) \n\n # compwspad_r = np.resize(compwspad,(1,131072))\n # compwsS = np.sort(compwspad_r)\n # relwsS = np.sort(relws)\n\n axa.scatter(freq,relwsS[::-1],edgecolor='')\n axa.set_title('Whitespace Frequency Distribution CCDF')\n axa.set_xlabel('Descending Magnitude Ordered Bin Count')\n axa.set_ylabel('Whitespace Units over Observation Period')\n ax2a.scatter(freq,diffarryS[::-1],edgecolor='')\n ax2a.set_title('Whitespace Frequency Distribution Difference CCDF')\n ax2a.set_xlabel('Descending Magnitude Ordered Bin Count')\n ax2a.set_ylabel('Whitespace Units Difference over Observation Period')\n \n ax3a.set_title('Whitespace Frequency Distribution CCDF')\n ax3a.set_xlabel('Descending Magnitude Ordered Bin Count')\n ax3a.set_ylabel('Whitespace Units over Observation Period')\n ax3a.scatter(freq,compwsS[::-1],edgecolor='',label='Absolute Spectrum')\n \n # ax3a.scatter(freq,compwsS[::-1],edgecolor='',label='Absolute Spectrum')\n ax3a.scatter(freq,relwsS[::-1],edgecolor='',c='r',label='Windowed Spectrum')\n ax3a.legend()\n\n # ax.set_yscale('log')\n # ax.set_xscale('log')\n # ax2.set_yscale('log')\n # ax2.set_xscale('log')\n\n ax.set_title('Whitespace Frequency Distribution')\n ax.set_xlabel('Bandwidth in Bins (190Hz/bin)')\n ax.set_ylabel('Whitespace Units')\n\n ax2.set_title('Missed Window Coverage (Absolute minus Windowed)')\n ax2.set_xlabel('Bandwidth in Bins (190Hz/bin)')\n ax2.set_ylabel('Number of Whitespace Units Difference')\n\n ax3.set_title('Window Coverage Comparison')\n ax3.set_xlabel('Bandwidth in Bins (190Hz/bin)')\n ax3.set_ylabel('Whitespace Units')\n ax3.legend()\n plt.show()\n\n\n\n\n elif(test == 3):\n\n #3. Instantaneous bandwidth (total) vs Time (real - in frames) \n #Possible second series showing the number of instantaneous windows/fragmentation - fragmentation is probably more interesting\n \n #cols:[0] timescale, [1] frequency, [2] bandwidth, [3] whitespace, [4] frame_no\n bwvt = pd.concat([dataset[cols[2]],dataset[cols[0]],dataset[cols[4]]], axis=1, keys=[cols[2],cols[0],cols[4]])\n\n #bcols:[0] bandwidth, [1] timescale, [2] frame_no\n bcols = bwvt.columns\n\n maxframe = np.max(bwvt[bcols[2]])\n bwspread = np.zeros(maxframe)\n\n time = np.linspace(0,maxframe-1,maxframe)\n print(time)\n\n for row in bwvt.itertuples():\n bwspread[row[3]-1-row[2]:row[3]] += row[1]\n\n #bwspread[maxframe-1] = 0 #clear the final value as it gives an improper indication of continuous bandwidths\n\n fig = plt.figure()\n ax = fig.add_subplot(2,1,1)\n ax.scatter(time,bwspread,edgecolor='')\n\n #ax.set_yscale('log')\n #ax.set_xscale('log')\n\n ax.set_title('Bandwidth over Time')\n ax.set_xlabel('Continuous Time (5.3ms/unit)')\n ax.set_ylabel('Aggregate Instantaneous Bandwidth (190Hz/bin)')\n \n plt.show()\n\n #4. Average window duration vs Time - Showing the average window persistance (in frames) with respect to time (in frames) - this may be confusing?\n\n elif(test == 4):\n\n #cols:[0] timescale, [1] frequency, [2] bandwidth, [3] whitespace, [4] frame_no\n dvt = pd.concat([dataset[cols[0]],dataset[cols[4]]], axis=1, keys=[cols[0],cols[4]])\n\n #dcols:[0] timescale, [1] frame_no\n dcols = dvt.columns\n\n maxframe = np.max(dvt[dcols[1]])\n tavgspread = np.zeros(maxframe)\n ttotalspread = np.zeros(maxframe)\n\n time = np.linspace(0,maxframe-1,maxframe)\n\n wincount = np.zeros(maxframe)\n \n print(dvt)\n\n currframe = 0\n count = 1\n\n for row in dvt.itertuples():\n if row[2] > currframe:\n \n tavgspread[currframe-1] /= count\n\n wincount[currframe] = count\n count=1\n currframe = row[2]\n else: count += 1\n\n tavgspread[row[2]-1] += row[1]\n ttotalspread[row[2]-1] += row[1]\n\n tavgspread[maxframe-1] /= count #clear the final value as it gives an improper indication of continuous bandwidths\n ttotalspread[maxframe-1] = 0\n wincount[maxframe-1] = 0\n\n fig = plt.figure()\n ax = fig.add_subplot(3,2,1)\n ax2 = fig.add_subplot(3,2,3)\n ax3 = fig.add_subplot(3,2,5)\n\n axa = fig.add_subplot(3,2,2)\n ax2a = fig.add_subplot(3,2,4)\n ax3a = fig.add_subplot(3,2,6)\n\n ax.scatter(time,tavgspread,edgecolor='')\n ax2.scatter(time,ttotalspread,edgecolor='')\n ax3.scatter(time,wincount,edgecolor='')\n\n #ax.set_yscale('log')\n #ax.set_xscale('log')\n\n ax.set_title('Window Duration over Time')\n ax.set_xlabel('Continuous Time (5.3ms/unit)')\n ax.set_ylabel('Average Window Duration (5.3ms/unit)')\n\n ax2.set_title('Aggregate Window Duration over Time')\n ax2.set_xlabel('Continuous Time (5.3ms/unit)')\n ax2.set_ylabel('Total Aggregate Window Duration (5.3ms/unit)')\n\n ax3.set_title('Total Windows over Time')\n ax3.set_xlabel('Continuous Time (5.3ms/unit)')\n ax3.set_ylabel('Number of Windows')\n \n tavgsS = np.sort(tavgspread)\n ttotsS = np.sort(ttotalspread)\n wincS = np.sort(wincount)\n \n axa.scatter(time,tavgsS[::-1],edgecolor='')\n axa.set_title('Window Duration over Time CCDF')\n axa.set_xlabel('Descending Magnitude Observation Index')\n axa.set_ylabel('Average Window Duration (5.3ms/unit)')\n\n ax2a.scatter(time,ttotsS[::-1],edgecolor='')\n ax2a.set_title('Aggregate Window Duration over Time CCDF')\n ax2a.set_xlabel('Descending Magnitude Observation Index')\n ax2a.set_ylabel('Total Aggregate Window Duration (5.3ms/unit)')\n \n ax3a.set_title('Total Windows over Time CCDF')\n ax3a.set_xlabel('Descending Magnitude Observation Index')\n ax3a.set_ylabel('Window Count')\n ax3a.scatter(time,wincS[::-1],edgecolor='')\n\n plt.show()\n\n\n\n\n if (test == 5):\n\n #5. Quality vs Bins - quality computed using weighting algorithm and assigning those weights to the relative bin(s) spanned by the appropriate window. - Sum the total weights, and/or average weights and display those?\n\n \n #cols:[0] timescale, [1] frequency, [2] bandwidth, [3] whitespace, [4] frame_no\n qvf = pd.concat([dataset[cols[0]],dataset[cols[2]],dataset[cols[1]]], axis=1, keys=[cols[0],cols[2],cols[1]])\n\n #qcols: [0] timescale, [1] bandwidth, [2] frequency\n qcols = qvf.columns\n\n freq = np.linspace(0,nbins-1,nbins)\n qualarry = np.zeros(nbins)\n\n tmin = 10\n bmin = 66 #These should already be adhered to\n\n for row in qvf.itertuples():\n qual = (row[1]-tmin)/tmin * (row[2]/bmin)\n if qual > 1:\n qualarry[row[3]:row[3]+row[2]-1] += qual\n\n fig = plt.figure()\n ax = fig.add_subplot(2,1,1)\n ax.scatter(freq,qualarry,edgecolor='')\n\n qualarryord = np.sort(qualarry)\n# qualarryord = qualarryord[::-1]\n ax2 = fig.add_subplot(2,1,2)\n ax2.scatter(freq,qualarryord[::-1],edgecolor='')\n\n #ax.set_yscale('log')\n #ax.set_xscale('log')\n\n ax.set_title('Quality per Bin over Observation Period')\n ax.set_xlabel('Frquency in Bins (190Hz/bin)')\n ax.set_ylabel('Total Bin Quality')\n\n ax2.set_title('Quality per Bin over Observation Period Ordered')\n ax2.set_xlabel('Count (Eventually as a percentage)')\n ax2.set_ylabel('Total Bin Quality')\n\n plt.show()\n\ndef legacy_sns (filename):\n\n wins = pd.read_csv(filename, header=0) #, chunksize = chnk)\n print(wins.columns)\n\n #cols:timescale,frequency,bandwidth,whitespace,frame_no\n cols = wins.columns\n \n #print(wins.head(10))\n #ts = wins.sort_values([cols[0],cols[2]])\n \n #uniq = pd.unique(wins[cols[0]].values.ravel())#, columns = [cols[0],cols[3]])\n #ts_sort = np.c_[uniq,np.zeros(uniq.size)] \n #ts_sum = np.empty_like(ts_sort)\n\n ##Row[0] is the index given by the dataframe\n ##Here the unique TS is used as the index, where the unique whitespace value is summed for each TS \n ##TS is already sorted ascending, by nature of the detection\n ## THIS WILL ONLY WORK IF EVERY TS IS OBSERVED !!!BAD!!!\n #for row in wins.itertuples():\n # ts_sort[row[1]-1,1] += row[4]\n\n \n sns.jointplot(wins[cols[2]],wins[cols[0]],kind=\"hex\",color=\"#4CB391\", stat_func=None)\n sns.jointplot(x='bandwidth',y='timescale',data = wins,kind=\"kde\",color=\"#4CB391\", stat_func=None)\n\n plt.savefig(\"heatmap.png\")\n plt.show()\n\ndef legacy(filename):\n\n wins = pd.read_csv(filename, header=0) #, chunksize = chnk)\n print(wins.columns)\n\n #cols:timescale,frequency,bandwidth,whitespace,frame_no\n cols = wins.columns\n \n #print(wins.head(10))\n #ts = wins.sort_values([cols[0],cols[2]])\n \n #uniq = pd.unique(wins[cols[0]].values.ravel())#, columns = [cols[0],cols[3]])\n #ts_sort = np.c_[uniq,np.zeros(uniq.size)] \n #ts_sum = np.empty_like(ts_sort)\n\n ##Row[0] is the index given by the dataframe\n ##Here the unique TS is used as the index, where the unique whitespace value is summed for each TS \n ##TS is already sorted ascending, by nature of the detection\n ## THIS WILL ONLY WORK IF EVERY TS IS OBSERVED !!!BAD!!!\n #for row in wins.itertuples():\n # ts_sort[row[1]-1,1] += row[4]\n\n \n #sns.jointplot(wins[cols[2]],wins[cols[0]],kind=\"hex\",color=\"#4CB391\", stat_func=None)\n #sns.jointplot(x='bandwidth',y='timescale',data = wins,kind=\"kde\",color=\"#4CB391\", stat_func=None)\n #plt.show()\n\n x = max(wins[cols[2]])\n y = max(wins[cols[0]])\n fn = max(wins[cols[4]])\n\n nbins = 131072\n\n print('Max Bandwidth: {} Max Timescale: {}'.format(x,y))\n\n mesh = np.zeros((x,y))\n num_frames = np.zeros(fn)\n\n freqdensity = np.zeros(nbins)\n\n #print(mesh)\n\n uniq = pd.unique(wins[cols[2]].values.ravel())\n uniq.sort()\n\n ws_sort = np.c_[uniq,np.zeros(uniq.size)] \n ws_sum = np.empty_like(ws_sort)\n\n count = 0\n totalws = 0\n #Row[0] is the index given by the dataframe\n for row in wins.itertuples():\n mesh[row[3]-1,row[1]-1] += row[4] \n #temp = np.where(ws_sort[:,0]==row[3])\n #ws_sort[temp,1] += row[4]\n totalws += row[4]\n ## mesh[row[1]-1,row[3]-1] += row[4]\n \n #populate frequency density array\n #for i in range(row[2],row[2]+row[3]-1):\n # freqdensity[i]+=row[1] \n\n count += 1\n if (count%100000 == 0): print('Up to: {}'.format(count))\n\n print('Total Whitespace: ')\n print(totalws)\n\n fig = plt.figure()\n ##ax1 = fig.add_subplot(1,2,1) #convention (row,col,idx)\n ##ax2 = fig.add_subplot(1,2,2)\n\n ##pd.DataFrame.hist(wins,column=cols[0],bins=(x+1),log=True,ax=ax1)\n ##ax1.set_xlim([0,50])\n ##ax1.set_title('Timescale Density')\n ##ax1.set_xlabel('Timescale')\n ##ax1.set_ylabel('# Observations')\n\n ##ax2.plot(num_frames)\n ##ax2.set_ylim([0,100])\n ##ax2.set_title('Whitespace Channels')\n ##ax2.set_xlabel('Frame Number')\n ##ax2.set_ylabel('# Whitespace Channels')\n ##plt.show()\n\n #print('Mesh populated')\n #print(mesh)\n \n ax = fig.add_subplot(1,1,1)\n plt.pcolormesh(mesh, norm=LogNorm(vmin=1, vmax=np.amax(mesh)))\n plt.colorbar()\n #ax.set_yscale('log')\n #ax.set_xscale('log')\n ax.set_xlim(1,x)\n ax.set_ylim(1,y)\n ax.set_title('Whitespace Density')\n ax.set_xlabel('Bandwidth in Bins (190Hz/bin)')\n ax.set_ylabel('Timescale (5.3ms/unit)')\n\n #ax1 = fig.add_subplot(2,1,1)\n #ax1.plot(freqdensity)\n ##ax1.set_xlim([0,50])\n #ax1.set_title('Whitespace Density per Bin')\n #ax1.set_xlabel('Frequency in Bins (190Hz/bin)')\n #ax1.set_ylabel('Whitespace')\n\n plt.show()\n\n #ts_sum = ts_sort\n # np.copyto(ts_sum,ts_sort)\n np.copyto(ws_sum,ws_sort)\n \n #df = pd.DataFrame(data=ts_sort,columns=[cols[0],cols[3]])\n #print(df)\n\n #for i in range(uniq.size-1, 0, -1):\n # ts_sum[i-1,1] += ts_sum[i,1]\n #print(ts_sum[i,1])\n for i in range(uniq.size-1, 0, -1):\n ws_sum[i-1,1] += ws_sum[i,1]\n\n print('\\nws_sort:')\n print(ws_sort)\n print('\\nws_sum:')\n print(ws_sum)\n\n sns.jointplot(ws_sort[:,0],ws_sort[:,1],kind=\"hex\",color=\"#4CB391\")\n plt.show()\n\n #for i in range(len(wins)):\n # set1 = np.array(wins[wins.columns[0::2]])[i]\n\n #wins.plot(x=cols[0],y=cols[3],kind='scatter')\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n #ax.plot(ts_sort)\n #ax.set_yscale('log')\n #plt.semilogy(ts_sort[0], ts_sort[1], ts_sum[0], ts_sum[1])\n ax.semilogy(ws_sort[:,0], ws_sort[:,1], label='Unique Whitespace')\n ax.semilogy(ws_sum[:,0], ws_sum[:,1], label='Cumulative Whitespace')\n\n ax.set_yscale('symlog') #super important to plot '0' values\n ax.legend()\n plt.show()\n\n\n\n\n#BEGIN THE SIMULATOR - Should put this in a separate file, but visualstudio is mean when it comes to selecting the file to execute etc\n\nclass Device:\n def __init__(self, ts, bw, id):\n self.ts = ts\n self.bw = bw\n self.id = id\n \n self.freq = 0\n\n self.run_time = 50 #this number should be modified based on access model\n\n self.sched_time = 0\n\n self.total_sched_time = 0 #total running time\n self.last_scheduled = 0 \n self.waiting_time = 0 #total waiting time IF communication interrupted\n #maybe need a scheduling flag?\n\n #FIXME\n def schedule(self, freq, bandwidth, startframe):\n if bandwidth < self.bw:\n print('DEVICE {} SCHEDULING ERROR, BW TOO SMALL'.format(self.id))\n else: \n self.freq = freq\n self.waiting_time = self.waiting_time + (startframe - (self.last_scheduled - 1))\n self.sched_time = startframe\n\n def deschedule(self, endframe):\n if endframe - self.sched_time < self.run_time:\n print(\"device {} only ran for {}. Required runtime: {}\".format(self.id,endframe-self.sched_time,self.run_time))\n\n self.total_sched_time = self.total_sched_time + (endframe - self.sched_time) \n self.last_scheduled = endframe\n\n def totalTime(self):\n return self.total_sched_time\n\n def totalWait(self):\n return self.waiting_time\n\n def getFreq(self):\n return self.freq\n\n def getBW(self):\n return self.bw\n\n def getID(self):\n return self.id\n\n def getRemianingRun(self, frame):\n return self.run_time - (frame - self.sched_time)\n\n #will not implement getTS yet, as we have no true concept of TS as everything is running without a-priori knowledge\n\n#class DeviceCreator\n\nclass Slice: #has been supersceeded by simply using a dataframe\n def __init__(self,frequency,bandwidth):\n self.freq = frequency\n self.bw = bandwidth\n \n def getFreq(self):\n return self.freq\n\n def getBW(self):\n return self.bw\n\nclass SpectrumEntity:\n def __init__(self,filename):\n self.framecount = 0\n self.dataset = pd.read_csv(filename, header=0)#, converters={0: np.int32, 1: np.int32, 2: np.int32, 3: np.int32, 4: np.int32}, dtype=np.int32)\n\n self.framemax = np.max(self.dataset['frame_no'])\n self.dataset = self.dataset.sort_values(['frequency'], ascending=True)\n self.dataset['start'] = self.dataset['frame_no'].subtract(self.dataset['timescale']-1)\n\n self.state = self.dataset.groupby(self.dataset['start'])\n #self.stateGroups = dict(list(self.state))\n\n self.spect_df = pd.DataFrame()\n #self.sliceList = []\n\n self.sliceFrame = pd.DataFrame()\n\n #when nextFrame is called check that result is not null\n def getNextFrame(self):\n\n if self.framecount == self.framemax:\n print(\"what we gon do now?\")\n return null\n\n #for i in range(13):\n #clear current buffer\n #del self.sliceList[:]\n self.framecount = self.framecount + 1\n\n #create frame list of slices\n \n # for key, group in self.state:\n # if key < 13:\n # print(key)\n # #print(group)\n # self.spect_df = self.spect_df.append(group)\n # self.spect_df = self.spect_df[self.spect_df.frame_no > key] #this is pretty inefficient\n # print(self.spect_df) #this should be a self contained dataframe of the current windows, just take a slice from this dataset and gg\n\n if self.framecount in self.state.groups:\n self.spect_df = self.spect_df.append(self.state.get_group(self.framecount))\n self.spect_df = self.spect_df[self.spect_df.frame_no > self.framecount]\n #self.dataset = self.dataset.sort_values(['frequency'], ascending=True)\n self.spect_df = self.spect_df.sort_values(['frequency'], ascending=True)\n else: \n self.spect_df = self.spect_df[self.spect_df.frame_no > self.framecount] #this is pretty inefficient\n\n #slice spect_df\n #print(self.spect_df)\n self.sliceFrame = self.spect_df[['frequency','bandwidth']].reset_index(drop=True)\n \n if False: \n print(self.framecount)\n print(self.spect_df)\n print(self.sliceFrame)\n\n #check for adjacency - maybe later, currently ignore adjacency\n\n\n #slices should be sorted as the generation of them is procedural\n #return self.sliceList\n return self.sliceFrame\n \nclass DeviceQueue:\n def __init__(self,numDev):\n self.queue = []\n\n #create the devices to place in the queue\n #ts and bw could be generated uniquely for each device, currently we are using a placeholder\n self.ts = 11\n self.bw = 66\n \n for i in range(numDev):\n self.queue.append(Device(self.ts,self.bw,i))\n\n print(\"{} devices created\".format(len(self.queue)))\n\n #this is called by the scheduler\n def push(self,dev,frame):\n #push takes in a device object and adds it to the queue\n dev.deschedule(frame)\n self.queue.append(dev) \n\n def peek(self):\n if not self.queue:\n return null\n else:\n return self.queue[0]\n\n def search(self,bw,time):\n for dev in self.queue:\n if dev.getBW <= bw:\n temp = dev\n self.queue.remove(dev)\n return temp\n break\n\n def pop(self):\n if not self.queue:\n return null\n else:\n return self.queue.popleft()\n\n def len(self):\n return len(self.queue)\n\nclass Scheduler:\n def __init__(self,filename,numDev):\n #create spectrum entity\n #create UDQ\n #create devices (inform queue of number of devices to populate)\n \n #implement logging\n \n self.RSL = [] #remainingSpectrumList\n self.ADL = [] #allocatedDeviceList\n\n self.currentFrame = 1\n\n #devices created within queue object\n #self.UDQ = DeviceQueue(numDev) #working around the queue as it does not seem efficient\n\n self.UDL = [] #unallocatedDeviceList\n self.ts = 11\n self.bw = 66\n \n for i in range(numDev):\n self.UDL.append(Device(self.ts,self.bw,i))\n\n print(\"{} devices created\".format(len(self.UDL)))\n\n #slices prepared within SE object\n self.SE = SpectrumEntity(filename)\n \n self.run_list()\n\n #implement operational loop within here\n def run(self):\n #check SE for spectrumstate\n self.spectrumState = self.SE.getNextFrame()\n\n #print(self.spectrumState)\n\n adevs = len(self.ADL)\n adevIdx = 0\n\n udevs = len(self.UDL)\n udevIdx = 0\n\n remainBW = 0\n self.tempADL = self.ADL.copy()\n\n uFlag = False\n noAlloc = False\n\n #compare spectrum state with ADL to ensure allocations still valid\n while not self.spectrumState.empty:\n #print('Frame number: {}'.format(self.currentFrame))\n\n if self.ADL:\n for row in self.spectrumState.itertuples(): #iterate through all of the available spectrum\n #row['frequency'], row['bandwidth']\n\n uBW = row['bandwidth']\n uFreq = row['frequency'] #beginnign of the unallocated frequency for that particular slice\n\n for adev in range(adevIdx,len(self.ADL)): #FIX ALL THESE TO HAVE CORRECT INDEXATION!!!!!!!!!!!\n\n #still need to take into account duration expired cases\n \n #check frequency and bandwidth \n \n #if we are in to the slice and the previous slice was not befitting\n if adev.getFreq + adev.getBW < uFreq and uFlag:\n #device needs to be deallocated\n uFlag = False\n self.UDL.append(adev)\n self.tempADL.remove(adev)\n\n adevIdx = adevIdx + 1\n continue \n\n if adev.getFreq + adev.getBW <= row['frequency'] + row['bandwidth'] and adev.getFreq >= row['frequency']:\n #device exists within slice\n\n #this does not work correctly for fractioned slices methinks.\n if uFreq != adev.getFreq:\n print('Gap in allocation detected. Dev start: {}, window start {}'.format(adev.getFreq,uFreq))\n self.RSL.append([uFreq,uFreq - dev.getFreq], columns=['frequency','bandwidth'])\n uBW = uBW - (uFreq - dev.getFreq)\n \n uFreq = adev.getFreq + adev.getBW #this will be one position after the end of the allocation, i.e. the next position not conflicting with the current device\n uBW = uBW - adev.getBW\n\n if uBW < 0:\n print('Well, we have a negative bandwidth here, thats not good ....')\n elif uBW == 0:\n print('Slice completely allocated')\n adevIdx = adevIdx + 1\n continue\n else: \n print('Allocation valid')\n \n elif dev.getFreq >= row['frequency'] + row['bandwidth']:\n #device does not exist in this slice, thus remaining slice is free\n print('Device {} does not exist in this slice, remaining slice: {} - validate with uBW: {}'.format(adevIdx,row['frequency']+row['bandwidth']-uFreq,uBW))\n\n adevIdx = adevIdx #this device still needs to be checked\n\n self.RSL.append([uFreq,uBW], columns=['frequency','bandwidth'])\n uFlag = True;\n \n break #currently allocated devices do not exist in this slice\n\n\n #other devices may also not exist within this bound ... probably should not break then ..\n #not if they are sorted.\n\n adevIdx = adevIdx + 1\n #update RSL\n\n #deallocate conflicting devices\n \n #for udev in range(self.UDQ.len):\n else: \n noAlloc = True\n\n print('Allocated devices checked: {}. Devices removed: {}'.format(len(self.ADL), len(self.ADL) - len(self.tempADL)))\n self.ADL = self.tempADL.copy() #I REALLY dislike this, however its annoying to remove elements from a list as you iterate over it\n\n if self.UDL:\n #unallocated devices, interrogate unallocated list then find potential slots for each device waiting\n if noAlloc:\n self.RSL = self.spectrumState.copy()\n\n for udev in range(len(self.UDL)):\n print(udev)\n #to schedule device increment the frequency by the devices bandwidth and subtract the available bandwidth for that opportunity\n print(self.RSL[self.RSL['frequency'] >= self.UDL[udev].getBW()].iloc[0:2,0:2]) \n print(self.RSL[self.RSL['frequency'] >= self.UDL[udev].getBW()].iloc[0:2,0:2].values.tolist()) \n\n\n #generate RSL\n\n #check RSL against UDQ and update RSL accordingly\n\n #perform logging on current state\n\n #sort ADL by frequency to reduce number of comparisons\n\n #loop again until we have exhausted the spectrum\n self.currentFrame = self.currentFrame + 1\n self.spectrumState = self.SE.getNextFrame()\n\n if self.spectrumState.empty:\n print(\"End of spectrum reached!\")\n break\n\n print(\"should output things here, plots etc\")\n #finish up\n print(\"scheduler terminating\")\n\n def run_list(self):\n #check SE for spectrumstate\n self.spectrumState = self.SE.getNextFrame().values.tolist()\n\n #print(self.spectrumState)\n remainBW = 0\n adevIdx = 0\n\n noAlloc = False\n\n removed = 0\n #compare spectrum state with ADL to ensure allocations still valid\n while self.spectrumState:\n #print('Frame number: {}'.format(self.currentFrame))\n\n \n if self.ADL:\n i = 0\n for row in self.spectrumState: #iterate through all of the available spectrum\n #row['frequency'], row['bandwidth']\n\n uFreq = row[0] #beginnign of the unallocated frequency for that particular slice\n uBW = row[1]\n \n while i < len(self.ADL):\n #for adev in islice(self.ADL,adevIdx,None): #FIX ALL THESE TO HAVE CORRECT INDEXATION!!!!!!!!!!!\n\n #check frequency and bandwidth \n #if we are in to the slice and the previous slice was not befitting\n if self.ADL[i].getFreq() + self.ADL[i].getBW() < uFreq:\n #device needs to be descheduled\n print('Deallocating: dev {} from [{},{}]'.format(self.ADL[i].getID(),self.ADL[i].getFreq(),self.ADL[i].getBW()))\n self.ADL[i].deschedule(self.currentFrame)\n self.UDL.append(self.ADL[i])\n del self.ADL[i]\n\n #i = i - 1\n removed = removed + 1\n continue \n\n uFlag = False\n\n #still need to take into account duration expired cases\n if self.ADL[i].getRemianingRun(self.currentFrame) <= 0:\n #device needs to be descheduled\n print('Runtime expired: dev {} from [{},{}]'.format(self.ADL[i].getID(),self.ADL[i].getFreq(),self.ADL[i].getBW()))\n self.ADL[i].deschedule(self.currentFrame)\n self.UDL.append(self.ADL[i])\n del self.ADL[i]\n\n #i = i - 1\n removed = removed + 1\n continue \n\n elif self.ADL[i].getFreq() >= row[0] + row[1]:\n #device does not exist in this slice, thus remaining slice is free\n #print('Device {} does not exist in this slice, remaining bandwidth in slice: {} - validate with uBW: {}'.format(adev.getID(),row[0]+row[1]-uFreq,uBW))\n\n self.RSL.append([uFreq,uBW])\n\n #this device still needs to be checked\n break #currently allocated devices do not exist in this slice\n\n elif self.ADL[i].getFreq() + self.ADL[i].getBW() <= row[0] + row[1] and self.ADL[i].getFreq() >= row[0]:\n #device exists within slice\n\n #this does not work correctly for fractioned slices methinks.\n if uFreq != self.ADL[i].getFreq():\n print('Gap in allocation detected. Dev start: {}, window start {}'.format(self.ADL[i].getFreq(),uFreq))\n self.RSL.append([uFreq,uFreq - self.ADL[i].getFreq()])\n uBW = uBW - (uFreq - self.ADL[i].getFreq())\n \n uFreq = self.ADL[i].getFreq() + self.ADL[i].getBW() #this will be one position after the end of the allocation, i.e. the next position not conflicting with the current device\n uBW = uBW - self.ADL[i].getBW()\n\n if uBW < 0:\n print('Well, we have a negative bandwidth here, thats not good ....')\n elif uBW == 0:\n print('Slice completely allocated!')\n #else: \n #print('Allocation valid')\n\n #end of loop and device is valid, increment i\n i = i + 1\n\n #remaining spectrum is unoccupied\n if i == len(self.ADL):\n self.RSL.append([row[0],row[1]])\n\n #for udev in range(self.UDQ.len):\n\n #sort newly populated RSL\n adevIdx = 0\n #print('Remaining Spectrum List')\n #print(self.RSL)\n self.RSL.sort(key=lambda x: x[0])\n #print(self.RSL)\n else: \n noAlloc = True\n\n allocated = 0\n if self.UDL:\n #unallocated devices, interrogate unallocated list then find potential slots for each device waiting\n if noAlloc:\n self.RSL = self.spectrumState.copy()\n noAlloc = False\n\n #for udev in self.UDL:\n # iterations = iterations + 1\n # #to schedule device increment the frequency by the devices bandwidth and subtract the available bandwidth for that opportunity\n # #print(self.RSL[self.RSL['frequency'] >= self.UDL[udev].getBW()].iloc[0:2,0:2]) \n # #print(self.RSL[self.RSL['frequency'] >= self.UDL[udev].getBW()].iloc[0:2,0:2].values.tolist()) \n # for row in self.RSL:\n # if row[1] >= udev.getBW():\n # #print(row)\n # udev.schedule(row[0],row[1],self.currentFrame)\n # row[1] = row[1] - udev.getBW()\n # row[0] = row[0] + udev.getBW()\n # self.ADL.append(udev)\n # self.UDL.remove(udev) #wont work\n # break \n\n i = 0\n offset = 0\n while i < len(self.UDL):\n #to schedule device increment the frequency by the devices bandwidth and subtract the available bandwidth for that opportunity\n \n #this looks very C and not at all pythonic :)\n for row in self.RSL:\n if row[1] >= self.UDL[i].getBW():\n #print(i)\n #print(offset\n print('Allocating {} to [{},{}]'.format(self.UDL[i].getID(),row[0],row[1]))\n self.UDL[i].schedule(row[0],row[1],self.currentFrame) \n row[0] = row[0] + self.UDL[i].getBW()\n row[1] = row[1] - self.UDL[i].getBW()\n self.ADL.append(self.UDL[i])\n del self.UDL[i] #might work\n #offset = offset + 1\n i = i - 1\n allocated = allocated + 1\n break \n i = i + 1\n \n #print('Iterations of self.UDL: {}, self.UDL size: {}'.format(i,len(self.UDL)))\n print('Original spectrum:')\n print(self.spectrumState)\n print('Remaining spectrum for frame {}:'.format(self.currentFrame)) \n print(self.RSL)\n\n #generate RSL\n\n #check RSL against UDQ and update RSL accordingly\n\n #perform logging on current state\n\n #sort ADL by frequency to reduce number of comparisons\n\n #loop again until we have exhausted the spectrum\n self.currentFrame = self.currentFrame + 1\n self.spectrumState = self.SE.getNextFrame().values.tolist()\n\n print('Frame {} - Allocated devices checked: {}. Devices removed: {}. Devices reallocated: {}. Devices unallocated: {}'.format(self.currentFrame-1,len(self.ADL), removed, allocated, len(self.UDL)))\n removed = 0\n del self.RSL[:]\n\n if not self.spectrumState:\n print(\"End of spectrum reached!\")\n break\n\n print(\"should output things here, plots etc\")\n #finish up\n print(\"scheduler terminating\")\n \n\n\n \n\n\ndef dev_sim(filename):\n\n print('Now running Device Simulation')\n\n\n runtest = Scheduler(filename,200)\n\n print('Device simulation ended')\n dataset = pd.read_csv(filename, header=0)#, converters={0: np.int32, 1: np.int32, 2: np.int32, 3: np.int32, 4: np.int32}, dtype=np.int32)\n\n #print(dataset.dtypes)\n\n #cols:[0] timescale, [1] frequency, [2] bandwidth, [3] whitespace, [4] frame_no\n #in a loop (iterator), [0] = index, [1] = timescale ...\n print(dataset.columns)\n cols = dataset.columns\n \n nbins = 131072\n\n #This set of tests are designed to simulate channel occupancy/utilisation of secondary devces accessing the whitespace spectrum, given particular access requirements.\n #These requirements are (initially): Bandwidth, minimum timescale.\n #The tests will be performed accross the number of devices operating and the resulting utilisation of the spectrum\n #Note that complex scheduling algorithms will not be explored (at least initially), as this is an extremely deep topic within itself. \n\n #the resulting plots will be: Spectrum utilisation/total throughput versus number of devices, with a family of curves, each curve will either be a variance on device required timescale or bandwidth. \n #a comparison plot for this will also be generated detailing the per device throughput and how it decays as a function of device density.\n\n #it is also of note that these models do not take into account channel capacity models, instead raw spectrum resources are focused upon and is of the unit resource blocks. Where a resource block is a 1TS * 1BW segment. (The TS may be 5ms, and BW be 12.5kHz) ...\n\n #will also need to perform these analyses as a function of time, as the sectrum changes over time, so classic, throughput vs device plots are not entirely valid here, as there is an additional variable in play. \n\n framemax = np.max(dataset['frame_no'])\n dev_arry = np.zeros(framemax)\n\n bwmin = 66;\n tsmin = 10; #these will not be required, as the windowing function already uses these basic values as the windows are observed.\n\n dev_bw = 66 #bandwidth for each device\n dev_ts = 11 #timescale for each device\n\n #use dataset, a subset here is not required\n\n dataset = dataset.sort_values(['frequency'], ascending=True)\n dataset['start'] = dataset['frame_no'].subtract(dataset['timescale']-1)\n\n #print('Printing frame 1')\n #print(dataset.loc[dataset['start'] == 1])\n\n ds_grp = dataset.groupby(dataset['start'])\n #ds_grp = ds_grp['frequency'].apply(lambda x: x.sort_values(ascending=False))\n\n #for c in ds_grp.groups: \n # print(c)\n #print(ds_grp.get_group(1))\n\n cur_frame = 1\n spect_df = pd.DataFrame()\n for key, group in ds_grp:\n if key < 13:\n print(key)\n #print(group)\n spect_df = spect_df.append(group)\n spect_df = spect_df[spect_df.frame_no > key] #this is pretty inefficient\n print(spect_df)\n #key is the identifier for the group - this is the start number\n #group becomes the entires within the group, ordered with ascending frequency\n #diff = \n #cur_frame = key\n\n\n\nif __name__ == \"__main__\":\n sys.exit(int(main() or 0))","sub_path":"Analysis_Suite.py","file_name":"Analysis_Suite.py","file_ext":"py","file_size_in_byte":76615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"128686892","text":"#!/usr/bin/env python\n\nfrom optparse import OptionParser\nimport os\nimport pipes\nimport sys\nimport fnmatch\nfrom os import listdir\nfrom os.path import isfile, join\nimport glob\nimport subprocess\n\nsys.path.append(os.path.join('.', 'tools', 'ninja', 'misc'))\nimport ninja_syntax\n\n\nclass cd:\n def __init__(self, newPath):\n self.newPath = newPath\n\n def __enter__(self):\n self.savedPath = os.getcwd()\n os.chdir(self.newPath)\n\n def __exit__(self, etype, value, traceback):\n os.chdir(self.savedPath)\n\nclass BuildSystem:\n instance = None\n def __init__(self, options = []):\n if not BuildSystem.instance:\n BuildSystem.instance = BuildSystem.__BuildSystem(options)\n\n def __getattr__(self, name):\n return getattr(self.instance, name)\n\n class __BuildSystem:\n def __init__(self, options):\n self.options = options\n self.targets = []\n self.buildfile = open('build.ninja', 'w')\n self.ninja = ninja_syntax.Writer(self.buildfile)\n self.ninja.variable('configure_args', ' '.join(sys.argv[1:]))\n\n env_keys = set(['CC', 'CXX', 'AR'])\n configure_env = dict((k, os.environ[k]) for k in os.environ if k in env_keys)\n config_str = ''\n if configure_env:\n config_str = ' '.join([k + '=' + pipes.quote(configure_env[k]) for k in configure_env])\n self.ninja.variable('configure_env', config_str + '$ ')\n\n self.ninja.variable('cc', configure_env.get('CC', 'gcc'))\n self.ninja.variable('cxx', configure_env.get('CXX', 'g++'))\n self.ninja.variable('ar', configure_env.get('AR', 'ar'))\n\n self.ninja.rule('gcc',\n command='$cc -MMD -MT $out -MF $out.d $cflags -c $in -o $out ',\n depfile='$out.d',\n deps='gcc',\n description='CC $out')\n\n self.ninja.rule('gxx',\n command='$cxx -MMD -MT $out -MF $out.d $cxxflags -c $in -o $out ',\n depfile='$out.d',\n deps='gcc',\n description='CXX $out')\n\n self.ninja.rule('ar', command='$ar rcs $out $in', description='AR $out')\n self.ninja.rule('link', command='$cxx $in $ldflags $include -o $out', description='LINK $out')\n\n def add_target(self, target):\n self.targets.append(target)\n self.build(target)\n return target\n\n def build_dir(self, filename):\n return os.path.join(os.path.join('.', 'build'), filename)\n\n def gcc(self, sources, **kwargs) :\n lib_obj = []\n for src in sources:\n lib_obj += self.ninja.build(self.build_dir(src + '.o'), 'gcc', src, **kwargs)\n return lib_obj\n\n def gxx(self, sources, **kwargs) :\n lib_obj = []\n for src in sources:\n lib_obj += self.ninja.build(self.build_dir(src + '.o'), 'gxx', src, **kwargs)\n return lib_obj\n\n def ar(self, lib_name, name, **kwargs):\n self.ninja.build(self.build_dir(lib_name), 'ar', name, **kwargs)\n\n def link(self, outfile, name, **kwargs):\n if GetPlatform() == 'win':\n outfile += '.exe'\n else:\n outfile += '.out'\n self.ninja.build(self.build_dir(outfile), 'link', name, **kwargs)\n\n def build(self, target):\n if self.options.no_os:\n target.NoOS()\n\n if self.options.no_rng:\n target.NoRNG()\n\n if self.options.development:\n target.Development()\n\n if self.options.debug:\n target.Debug()\n else:\n target.Release()\n\n if self.options.coverity:\n target.Coverity()\n\n if self.options.no_os == False:\n if (GetPlatform() == 'win'):\n target.Windows()\n elif(GetPlatform() == 'linux'):\n target.Linux()\n\n target.build()\n target.link()\n\nclass BuildFlags(object):\n def __init__(self):\n self.name = ''\n self.src_cc = []\n self.src_cxx = []\n self.objs = []\n self.cflags = ['-std=c11']\n self.cxxflags = ['-std=gnu++11']\n self.ldflags = []\n self.include = []\n\n def Development(self):\n common_warnings = ['-Wall', '-Wextra', '-Wpedantic']\n self.cflags += ['-Wbad-function-cast', '-Wc99-c11-compat', '-Wc++-compat', '-Wmissing-prototypes', '-Wnested-externs' ] + common_warnings\n self.cxxflags += ['-Wnonnull','-Wmissing-include-dirs', '-Wswitch-default', '-Wunused', '-Wuninitialized',\n '-fstrict-overflow', '-Wstrict-overflow=5',\n '-ftree-vrp', '-Warray-bounds', '-Wtrampolines', '-Wfloat-equal',\n '-Wundef', '-Wshadow', '-Wunsafe-loop-optimizations', '-Wcast-qual', '-Wcast-align', '-Wconversion',\n '-Wzero-as-null-pointer-constant', '-Wuseless-cast', '-Wlogical-op', '-Waggregate-return',\n '-Wmissing-declarations', '-Wmissing-field-initializers', '-Wredundant-decls', '-Winline', '-Wstack-protector'\n ] + common_warnings\n\n def Debug(self):\n debug_flags = ['-g3','-O0']\n self.cflags += debug_flags\n self.cxxflags += debug_flags\n self.name += '_d'\n\n def Release(self):\n release_flags = ['-O3', '-DNDEBUG', '-s']\n self.cflags += release_flags\n self.cxxflags += release_flags\n\n def Coverity(self):\n coverity_flags = ['-fprofile-arcs', '-ftest-coverage']\n self.cflags += coverity_flags\n self.cxxflags += coverity_flags\n self.ldflags += ['-lgcov']\n\n def NoOS(self):\n self.cflags += ['-DNO_OS_DEPENDENCE']\n self.cxxflags += ['-DNO_OS_DEPENDENCE']\n\n def NoRNG(self):\n self.cflags += ['-DOS_RNG_AVAILABLE']\n self.cxxflags += ['-DOS_RNG_AVAILABLE']\n\n def Linux(self):\n self.ldflags += ['-lpthread']\n self.cflags += ['-D_X86INTRIN_H_INCLUDED']\n self.cxxflags += ['-D_X86INTRIN_H_INCLUDED']\n\n def Windows(self):\n self.cflags += ['-D_WIN32_WINNT=0x0501']\n self.cxxflags += ['-D_WIN32_WINNT=0x0501']\n self.ldflags += ['-lws2_32', '-lwsock32']\n\n def build(self):\n self.objs += BuildSystem().gcc(self.src_cc, variables = [('cflags', self.cflags + self.include)])\n self.objs += BuildSystem().gxx(self.src_cxx, variables = [('cxxflags', self.cxxflags + self.include)])\n\nclass BuildCryptoPPLibrary(BuildFlags):\n def __init__(self):\n BuildFlags.__init__(self)\n self.name = 'libcryptopp'\n self.src_cc = glob.glob(os.path.join('.', 'src', '*.c')) + glob.glob(os.path.join('.', 'src', '*', '*.c'))\n self.src_cxx = glob.glob(os.path.join('.', 'src', '*.cpp')) + glob.glob(os.path.join('.', 'src', '*', '*.cpp'))\n self.include += ['-I' + os.path.join('.', 'include')]\n\n def link(self):\n BuildSystem().ar(self.name + '.a', self.objs)\n\nclass BuildCryptoPPTests(BuildFlags):\n def __init__(self, libcrypto):\n BuildFlags.__init__(self)\n self.libcrypto = libcrypto\n self.name = 'crypt_test'\n self.src_cc = glob.glob(os.path.join('.', 'tests', 'cryptopp', '*.c')) + glob.glob(os.path.join('.', 'tests', 'cryptopp', '*', '*.c'))\n self.src_cxx = glob.glob(os.path.join('.', 'tests', 'cryptopp', '*.cpp')) + glob.glob(os.path.join('.', 'tests', 'cryptopp', '*', '*.cpp'))\n self.include += self.libcrypto.include\n\n def link(self):\n BuildSystem().link(self.name, self.objs + self.libcrypto.objs, variables = [('ldflags', self.ldflags)])\n\n\nclass BuildCryptoPPTestsUnit(BuildFlags):\n def __init__(self, libcrypto):\n BuildFlags.__init__(self)\n self.libcrypto = libcrypto\n self.name = 'crypt_test_unit'\n self.src_cc = glob.glob(os.path.join('.', 'tests', 'unit_tests', '*.c')) + glob.glob(os.path.join('.', 'tests', 'unit_tests', '*', '*.c'))\n self.src_cxx = glob.glob(os.path.join('.', 'tests', 'unit_tests', '*.cpp')) + glob.glob(os.path.join('.', 'tests', 'unit_tests', '*', '*.cpp'))\n self.include += ['-I' + os.path.join('.', 'tools', 'cppunit', 'include')] + self.libcrypto.include + ['-I' + os.path.join('.', 'tests', 'unit_tests')]\n self.ldflags += ['-L' + os.path.join('.', 'tools', 'cppunit', 'src', 'cppunit', '.libs'), '-Wl,-Bstatic', '-lcppunit', '-Wl,-Bdynamic']\n\n def build(self):\n if not os.path.isfile(os.path.join('.', 'tools', 'cppunit', 'src', 'cppunit', '.libs', 'libcppunit.a')):\n with cd(os.path.join('.', 'tools', 'cppunit')):\n subprocess.call(['./configure'])\n make_process = subprocess.Popen('make', stderr=subprocess.STDOUT)\n if make_process.wait() != 0:\n exit(1)\n BuildFlags.build(self)\n\n def link(self):\n BuildSystem().link(self.name, self.objs + self.libcrypto.objs, variables = [('ldflags', self.ldflags)])\n\ndef GetPlatform():\n if sys.platform.startswith('cygwin') or sys.platform.startswith('win'):\n return 'win'\n elif sys.platform.startswith('darwin'):\n return 'mac'\n elif sys.platform.startswith('linux'):\n return 'linux'\n else:\n raise Error(\"Unknown platform: %s\" % sys.platform)\n\ndef main():\n parser = OptionParser()\n parser.add_option('--debug', action='store_true', help='enable debugging extras', default=False)\n parser.add_option('--no_os', action='store_true', help='remove os dependencies', default=False)\n parser.add_option('--no_rng', action='store_true', help='remove rng', default=False)\n parser.add_option('--build', action='store_true', help='compile code', default=False)\n parser.add_option('--coverity', action='store_true', help='coverity flags', default=False)\n parser.add_option('--development', action='store_true', help='development flags', default=False)\n\n parser.add_option('--coverity_test', action='store_true', help='perform coverity test', default=False)\n\n (options, args) = parser.parse_args()\n if args:\n print('ERROR: extra unparsed command-line arguments:', args)\n sys.exit(1)\n\n if options.build:\n build_path = os.path.join('tools', 'ninja')\n build_script = 'ninja'\n if (GetPlatform() == 'win'):\n build_script += '.exe'\n with cd(build_path):\n if not os.path.isfile(os.path.join(build_script)):\n subprocess.call(['python', 'configure.py', '--bootstrap'])\n subprocess.call([os.path.join(build_path, build_script)])\n exit(0)\n\n if options.coverity_test:\n gcovr_script = os.path.join('.', 'tools', 'gcovr', 'scripts', 'gcovr')\n subprocess.call(['python', gcovr_script, '-r', '.', '--html', '--html-details', '-o', 'coverity_result/'])\n exit(0)\n\n BuildSystem(options)\n libcryptopp = BuildSystem().add_target(BuildCryptoPPLibrary())\n BuildSystem().add_target(BuildCryptoPPTests(libcryptopp))\n BuildSystem().add_target(BuildCryptoPPTestsUnit(libcryptopp))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":10282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"58006066","text":"# Lambda functions or anonymous functions\n# 1. It is a function without name\n# 2.It should have only one expression\n# 3.It can take multiple arguments\n# def minus(x,y):\n# return x-y\n# print(minus(8,7))\n\n# minus = lambda x,y : x-y #one liner function\n# print(minus(8,7))\n\n# Both are same only.\n\n##################################################################################################\n#Uses of lambda function filter, map and reduce\nnums=[4,5,8,2,7,1,6,9,10]\n# way 1 :\n# def is_even(n):\n# return n%2==0 :\n#\n# iss func ko bss fiter k sath hmlog ko use krna tha ar khi nhi..so y to write such\n# long code jb hame iss reuse nhi krna h to ham lambda func use kr skte h\n\n#way 2:\nis_even = lambda n :n%2==0\n#program to fetch all the even nos from this list:\n#so here we are using an inbuilt method called filter which will take a function (to decide on what basis we need to filter)\n#and a iterable it can be list tuple or anything..(to decide on which item we have to apply the filter)\nevens=list(filter(is_even,nums))\nprint(evens)\n\n#or\n\neven=list(filter(lambda n :n%2==0,nums))\nprint(even)\n########################################################################################\n# 2.map : lets say we got all the even nos and our job is to double the nos\n# in simple words whenever we want to change each value in an iterable(list,tuple..)\n# we use\n# if map:\n#\n# #this is same as big data concept we take a chunk of data and we\n# filter the data , then we map it and try to reduce it.\n\n","sub_path":"lambda funcs.py","file_name":"lambda funcs.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"453876266","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\n\ndef readData(images_file, labels_file):\n x = np.loadtxt(images_file, delimiter=',')\n y = np.loadtxt(labels_file, delimiter=',')\n return x, y\n\n\ndef softmax(x):\n \"\"\"\n Compute softmax function for input. \n Use tricks from previous assignment to avoid overflow\n \"\"\"\n # YOUR CODE HERE\n (m, n) = x.shape\n s = np.zeros(x.shape)\n for i in range(m):\n for j in range(n):\n x_minum = np.exp(x[i] - x[i][j])\n s[i][j] = 1. / x_minum.sum()\n # END YOUR CODE\n return s\n\n\ndef sigmoid(x):\n \"\"\"\n Compute the sigmoid function for the input here.\n \"\"\"\n # YOUR CODE HERE\n s = 1./ (1 + np.exp(-x))\n # END YOUR CODE\n return s\n\n\ndef forward_prop(data, labels, params):\n \"\"\"\n return hidder layer, output(softmax) layer and loss\n \"\"\"\n W1 = params['W1']\n b1 = params['b1']\n W2 = params['W2']\n b2 = params['b2']\n\n # YOUR CODE HERE\n z1 = data.dot(W1) + b1\n h = sigmoid(z1)\n z2 = h.dot(W2) + b2\n y = softmax(z2)\n cost = compute_cost(y, labels)\n # END YOUR CODE\n return h, y, cost\n\n\ndef backward_prop(data, labels, params):\n \"\"\"\n return gradient of parameters\n \"\"\"\n W1 = params['W1']\n b1 = params['b1']\n W2 = params['W2']\n b2 = params['b2']\n\n # YOUR CODE HERE\n lamda = 0.0001\n (x, h, y) = data\n m = x.shape[0]\n gradb2 = np.sum(y - labels, axis = 0) / m\n\n gradW2 = np.dot(h.transpose(), y-labels) / m + 2*lamda*W2\n \n gradb1 = np.dot(y-labels, W2.transpose()) * (h *(1-h))\n gradW1 = np.dot(x.transpose(), gradb1) / m + 2*lamda*W1\n gradb1 = np.sum(gradb1, axis = 0) / m\n \n # END YOUR CODE\n\n grad = {}\n grad['W1'] = gradW1\n grad['W2'] = gradW2\n grad['b1'] = gradb1\n grad['b2'] = gradb2\n\n return grad\n\n\ndef nn_train(trainData, trainLabels, devData, devLabels):\n (m, n) = trainData.shape\n num_hidden = 300\n num_output = 10\n learning_rate = 5\n params = {}\n\n # YOUR CODE HERE\n\n # initialize params\n W1 = np.random.standard_normal((n, num_hidden))\n W2 = np.random.standard_normal((num_hidden, num_output))\n b1 = np.zeros(num_hidden)\n b2 = np.zeros(num_output)\n params['W1'] = W1\n params['b1'] = b1\n params['W2'] = W2\n params['b2'] = b2\n\n loss_train = []\n accuracy_train = []\n loss_val = []\n accuracy_val = []\n num_epochs = 30\n batch_size = 1000\n num_iters = int(m / batch_size)\n for epoch in range(num_epochs):\n loss_epoch_train = 0\n correct_epoch_train = 0\n for iter in range(num_iters):\n input = trainData[iter * batch_size: (iter + 1) * batch_size]\n labels = trainLabels[iter * batch_size: (iter + 1) * batch_size]\n\n h, output, loss = forward_prop(input, labels, params)\n \n grad = backward_prop((input, h, output), labels, params)\n \n loss_epoch_train += loss * batch_size\n correct_epoch_train += (np.argmax(output, axis=1) == np.argmax(\n labels, axis=1)).sum()\n\n # update params\n W1 = params['W1']\n b1 = params['b1']\n W2 = params['W2']\n b2 = params['b2']\n gradW1 = grad['W1']\n gradW2 = grad['W2']\n gradb1 = grad['b1']\n gradb2 = grad['b2']\n \n W1 = W1 - learning_rate * gradW1\n b1 = b1 - learning_rate * gradb1\n W2 = W2 - learning_rate * gradW2\n b2 = b2 - learning_rate * gradb2\n params['W1'] = W1\n params['b1'] = b1\n params['W2'] = W2\n params['b2'] = b2\n\n\n loss_train.append(loss_epoch_train / m)\n accuracy_train.append(correct_epoch_train / m)\n print(\"Epoch %d: training loss: %f, training accuracy: %f\" % (epoch+1, loss_train[epoch], accuracy_train[epoch]))\n\n # test on validation set\n loss_epoch_val, accuracy_epoch_val = nn_test(devData, devLabels, params)\n loss_val.append(loss_epoch_val)\n accuracy_val.append(accuracy_epoch_val)\n print(\"val loss: %f, val accuracy: %f\" % (loss_val[epoch], accuracy_val[epoch]))\n\n pickle.dump(params, open(\"params_reg.p\", \"wb\" ))\n\n # draw\n epochs = range(1, num_epochs + 1)\n plt.figure(111)\n handles = []\n curve1, = plt.plot(epochs, loss_train, label='training loss')\n curve2, = plt.plot(epochs, loss_val, label='validation loss')\n handles.append(curve1)\n handles.append(curve2)\n plt.legend(handles=handles)\n plt.xlabel('number of epochs')\n plt.ylabel('loss')\n plt.title('loss v.s. epochs (regularization)')\n # plt.show()\n plt.savefig(\"p1_3.png\")\n\n plt.figure(112)\n handles = []\n curve1, = plt.plot(epochs, accuracy_train, label='training accuracy')\n curve2, = plt.plot(epochs, accuracy_val, label='validation accuracy')\n handles.append(curve1)\n handles.append(curve2)\n plt.legend(handles=handles)\n plt.xlabel('number of epochs')\n plt.ylabel('accuracy')\n plt.title('accuracy v.s. epochs (regularization)')\n # plt.show()\n plt.savefig('p1_4.png')\n\n # END YOUR CODE\n\n return params\n\n\ndef nn_test(data, labels, params):\n h, output, cost = forward_prop(data, labels, params)\n accuracy = compute_accuracy(output, labels)\n return cost, accuracy\n\n\ndef compute_accuracy(output, labels):\n accuracy = (np.argmax(output, axis=1) == np.argmax(\n labels, axis=1)).sum() * 1. / labels.shape[0]\n return accuracy\n\n\ndef compute_cost(output, labels):\n y_log = np.log(output)\n cost = 0\n for i in range(output.shape[0]):\n cost += -y_log[i].dot(labels[i]).sum()\n return 1. * cost / output.shape[0]\n\n\ndef one_hot_labels(labels):\n one_hot_labels = np.zeros((labels.size, 10))\n one_hot_labels[np.arange(labels.size), labels.astype(int)] = 1\n return one_hot_labels\n\n\ndef main():\n np.random.seed(100)\n trainData, trainLabels = readData('images_train.csv', 'labels_train.csv')\n\n # np.savetxt('images_debug.csv', trainData[:3000], delimiter=',')\n # np.savetxt('labels_debug.csv', trainLabels[:3000], delimiter=',')\n # return\n\n trainLabels = one_hot_labels(trainLabels)\n p = np.random.permutation(60000)\n trainData = trainData[p, :]\n trainLabels = trainLabels[p, :]\n\n devData = trainData[0:10000, :]\n devLabels = trainLabels[0:10000, :]\n trainData = trainData[10000:, :]\n trainLabels = trainLabels[10000:, :]\n\n # trainData, trainLabels = readData('images_debug.csv', 'labels_debug.csv')\n\n # trainLabels = one_hot_labels(trainLabels)\n # p = np.random.permutation(3000)\n # trainData = trainData[p, :]\n # trainLabels = trainLabels[p, :]\n\n # devData = trainData[0:2000, :]\n # devLabels = trainLabels[0:2000, :]\n # trainData = trainData[2000:, :]\n # trainLabels = trainLabels[2000:, :]\n\n mean = np.mean(trainData)\n std = np.std(trainData)\n trainData = (trainData - mean) / std\n devData = (devData - mean) / std\n\n testData, testLabels = readData('images_test.csv', 'labels_test.csv')\n testLabels = one_hot_labels(testLabels)\n testData = (testData - mean) / std\n\n params = nn_train(trainData, trainLabels, devData, devLabels)\n\n # readyForTesting = False\n # if readyForTesting:\n # accuracy = nn_test(testData, testLabels, params)\n # print('Test accuracy: %f' % accuracy)\n\nif __name__ == '__main__':\n main()\n","sub_path":"hw4/nn_starter.py","file_name":"nn_starter.py","file_ext":"py","file_size_in_byte":7436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"173545629","text":"################################################\n# File: hanoi.py\n# Writer: Dor Roter\n# Login: dor.roter\n# Exercise: ------\n# More:\n# Consulted: -----\n# Internet:\n# Notes:\n################################################\nimport sys\n\n\nclass MockHanoi:\n def __init__(self):\n self.counter = 0\n self.moves = list()\n\n def move(self, src, dst):\n self.counter += 1\n self.moves.append((src, dst))\n\n def is_minimum(self, i):\n if i >= 1:\n return self.counter == ((2 ** i) - 1)\n else:\n return self.counter == 0\n\n # returns false if any moves where wrong\n def _run(self, disks):\n towers = dict()\n towers['A'] = [i for i in range(disks, 0, -1)]\n towers['B'] = list()\n towers['C'] = list()\n\n FROM = 0\n TO = 1\n for move in self.moves:\n disk = towers[move[FROM]].pop()\n if len(towers[move[TO]]) > 0 and \\\n disk > towers[move[TO]][-1]:\n # larger disk\n return False\n towers[move[TO]].append(disk)\n\n if len(towers['A']) == 0 and (len(towers['B']) == disks or\n len(towers['C']) == disks):\n return True\n return False\n\n def validate_moves(self, disks):\n if not self.moves or disks <= 0:\n return self.counter == 0\n try:\n return self._run(disks)\n except:\n print(sys.exc_info()[0])\n return False\n","sub_path":"hanoi.py","file_name":"hanoi.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"351464537","text":"#!/usr/bin/env python\n########################## 70 ########################################\nfrom kzpy3.vis3 import *\nimport default_values\nimport rospy\nimport torch\nimport torch.autograd\nimport cv_bridge\nfrom sensor_msgs.msg import Image\nimport kzpy3.VT_net2__1June2019.rectangles as rectangles\n\nexec(identify_file_str)\n\nfull_width,full_height = 168,94\nmeta_width,meta_height = 41,23\n\nbridge = cv_bridge.CvBridge()\n\nnum_rectangle_patterns = 4\nRectangles = rectangles.Random_black_white_rectangle_collection(\n num_rectangle_patterns=num_rectangle_patterns\n)\n\ndef Camera_Shot(data): #######################################\n D = {}\n\n img = bridge.imgmsg_to_cv2(data,'rgb8')\n #mci(img)\n if shape(img)[0] > 94:\n img = cv2.resize(img,(full_width,full_height),interpolation=0)\n cr('img = cv2.resize(img,(full_width,full_height),interpolation=0)')\n \n D['img'] = img\n if not default_values.P['MOCK_ARDUINO_VERSION']:\n D['ts'] = data.header.stamp.secs + \\\n data.header.stamp.nsecs / 10.0**9\n D['seq'] = data.header.seq\n else:\n D['ts'] = time.time()\n\n return D\n\n\n\ndef Quartet(name='no_name'):\n D = {}\n D['name'] = name\n for side in ['left','right']:\n D[side] = {}\n for when in ['now','prev']:\n D[side][when] = {}\n for size_ in ['full','small']:\n D[side][when][size_] = None\n\n D['ready'] = True\n\n def _function_display(\n delay_blank=0,\n delay_prev=0,\n delay_now=0,\n scale=4,\n size_=None):\n\n shape_ = np.shape(D['left']['now'][size_])\n\n width,height = shape_[1],shape_[0]\n img_now = np.zeros(\n (height,2*width+int(width/16),3),np.uint8) + 127\n img_prev = img_now.copy()\n img_blank = img_now.copy()\n img_now[:,:width,:] = D['right']['now'][size_]\n img_now[:,-width:,:] = D['left']['now'][size_]\n img_prev[:,:width,:] = D['right']['prev'][size_]\n img_prev[:,-width:,:] = D['left']['prev'][size_]\n img_blank[:,:width,:] = 0*D['right']['prev'][size_]\n img_blank[:,-width:,:] = 0*D['left']['prev'][size_]\n if delay_blank > 0:\n mci(img_blank,\n scale=scale,\n delay=delay_blank,\n title='Quartet '+D['name'])\n if delay_prev > 0:\n mci(img_prev,\n scale=scale,\n delay=delay_prev,\n title='Quartet '+D['name'])\n if delay_now > 0:\n mci(img_now,\n scale=scale,\n delay=delay_now,\n title='Quartet '+D['name'])\n\n def _function_add_rectangles(rectangles_xys,backup_parameter):\n xys4 = rectangles_xys.reshape(len(rectangles_xys)/4,4)\n #cy(xys4)\n xys4_prev = xys4.copy()\n xys4_prev[:,1] += 0.0375\n #cm(-1)\n I = {\n 'now':{\n 'R':D['right']['now']['full'],\n 'L':D['left']['now']['full'],\n },\n 'prev':{\n 'R':D['right']['prev']['full'],\n 'L':D['left']['prev']['full'],\n },\n }\n #cm(0)\n Xys = {\n 'now': xys4,\n 'prev': xys4_prev,\n }\n for when in ['now','prev']:\n rectangles.paste_rectangles_into_drive_images(\n Xys[when],\n I[when],\n Rectangles,\n backup_parameter,\n )\n #cm(1)\n D['right']['now']['full'] = I['now']['R']\n D['left']['now']['full'] = I['now']['L']\n D['right']['prev']['full'] = I['prev']['R']\n D['left']['prev']['full'] = I['prev']['L']\n #mci(I['now']['L'],title='add_rectangles')\n #mci(D['right']['now']['full'],title='add_rectangles')\n\n\n def _function_to_torch(size_='full'):\n listoftensors = []\n for when in ['prev','now']:\n for side in (['left','right']):\n listoftensors.append(\n torch.from_numpy(D[side][when][size_]))\n camera_data = torch.cat(listoftensors, 2)\n camera_data = camera_data.cuda().float()/255. - 0.5\n camera_data = torch.transpose(camera_data, 0, 2)\n camera_data = torch.transpose(camera_data, 1, 2)\n camera_data = camera_data.unsqueeze(0)\n\n return camera_data\n\n def _function_from_torch(net_cuda,channel=0,offset=0):\n net_data = net_cuda.data.cpu().numpy()\n configs = (\n ('left','prev'),\n ('right','prev'),\n ('left','now'),\n ('right','now'),\n )\n for i in rlen(configs):\n a = 3*i\n b = 3*(i+1)-1\n\n \n\n c = net_data[channel,offset+a:offset+b+1,:,:]\n\n ccm(1,shape(c))\n\n assert shape(c) == (3,full_height,full_width) \\\n or shape(c) == (3,meta_height,meta_width)\n\n c = c.transpose(1,2,0) \n\n assert shape(c) == (full_height,full_width,3) \\\n or shape(c) == (meta_height,meta_width,3)\n\n ccm(2,shape(c))\n\n c = z55(c) # now in rgb\n if shape(c)[0] > 30:\n size_ = 'full'\n else:\n size_ = 'small'\n ccm(size_)\n side = configs[i][0]\n when = configs[i][1]\n D[side][when][size_] = c\n \"\"\"\n if shape(c)[0] > 30:\n D['size_'] = 'full'\n else:\n D['size_'] = 'small'\n ccm(D['size_'])\n side = configs[i][0]\n when = configs[i][1]\n D[side][when][D['size_']] = c\n \"\"\"\n D['display'] = _function_display\n D['to_torch'] = _function_to_torch\n D['from_torch'] = _function_from_torch\n D['add_rectangles'] = _function_add_rectangles\n\n return D\n\n\n\ndef ZED(): #######################################\n D={}\n D['full_shape'] = (full_height,full_width)\n D['small_shape'] = (meta_height,meta_width)\n D['left_list'] = []\n D['right_list'] = []\n D['left_ready'] = False\n D['stats'] = {}\n D['stats']['call'] = 0\n D['stats']['success'] = 0\n D['stats']['fail a'] = 0\n D['stats']['fail b'] = 0\n D['stats']['fail c'] = 0\n\n def _function_limit_list_lengths(max_len,min_len):\n for list_side in ['left_list','right_list']:\n if len(D[list_side]) > max_len:\n D[list_side] = D[list_side][-min_len:]\n\n def _function_build_quartet(label_frames=False):\n\n D['stats']['call'] += 1\n\n if False:#default_values.P['MOCK_ARDUINO_VERSION']:\n\n Q = Quartet(name='from ROS')\n\n Q['left']['now']['full'] = \\\n D['left_list'][-1]['img']\n\n Q['right']['now']['full'] = \\\n D['right_list'][-1]['img']\n\n Q['left']['prev']['full'] = \\\n D['left_list'][-2]['img']\n\n Q['right']['prev']['full'] = \\\n D['right_list'][-2]['img']\n\n D['stats']['success']+=1\n\n return Q\n\n else:\n try:\n\n for i in [-1,-2,-3]:\n\n dt_now = D['left_list'][i]['ts'] \\\n - D['right_list'][-1]['ts']\n if (dt_now > -0.01 and dt_now < 0.02) or default_values.P['MOCK_ARDUINO_VERSION']:\n break\n else:\n D['stats']['fail c']+=1\n return None\n\n dt_left = D['left_list'][i]['ts'] \\\n - D['left_list'][i-1]['ts']\n\n dt_right = D['right_list'][-1]['ts'] \\\n - D['right_list'][-2]['ts']\n \n if (dt_left > 0.025 and dt_left < 0.04) or default_values.P['MOCK_ARDUINO_VERSION']:\n if (dt_right > 0.025 and dt_right < 0.04) or default_values.P['MOCK_ARDUINO_VERSION']:\n\n Q = Quartet(name='from ROS')\n\n\n if label_frames:\n\n Q['left']['now']['full'] = \\\n D['left_list'][i]['img'].copy()\n\n Q['right']['now']['full'] = \\\n D['right_list'][-1]['img'].copy()\n\n Q['left']['prev']['full'] = \\\n D['left_list'][i-1]['img'].copy()\n\n Q['right']['prev']['full'] = \\\n D['right_list'][-2]['img'].copy()\n\n else:\n\n Q['left']['now']['full'] = \\\n D['left_list'][i]['img']\n\n Q['right']['now']['full'] = \\\n D['right_list'][-1]['img']\n\n Q['left']['prev']['full'] = \\\n D['left_list'][i-1]['img']\n\n Q['right']['prev']['full'] = \\\n D['right_list'][-2]['img']\n\n\n if label_frames:\n for side in ['left','right']:\n for when in ['now','prev']:\n color = (0,255,0)\n if when == 'now':\n color = (255,0,0)\n cv2.putText(\n Q[side][when]['full'],\n d2s(side,when),\n (10,20),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1.0,color,2)\n\n\n Q['left']['now']['small'] = \\\n cv2.resize(Q['left']['now']['full'],\n (meta_width,meta_height),interpolation=0)\n Q['right']['now']['small'] = \\\n cv2.resize(Q['right']['now']['full'],\n (meta_width,meta_height),interpolation=0)\n Q['left']['prev']['small'] = \\\n cv2.resize(Q['left']['prev']['full'],\n (meta_width,meta_height),interpolation=0)\n Q['right']['prev']['small'] = \\\n cv2.resize(Q['right']['prev']['full'],\n (meta_width,meta_height),interpolation=0)\n\n D['stats']['success']+=1\n\n return Q\n else:\n D['stats']['fail a']+=1\n return None\n else:\n D['stats']['fail b']+=1\n return None\n\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n file_name = os.path.split(\n exc_tb.tb_frame.f_code.co_filename)[1]\n CS_('Exception!',emphasis=True)\n CS_(d2s(exc_type,file_name,exc_tb.tb_lineno),\n emphasis=False)\n return None\n\n D['limit_list_lengths'] = _function_limit_list_lengths\n D['build_quartet'] = _function_build_quartet\n\n return D\n\n\nZed = ZED()\ndef left_callback(data):\n Zed['left_list'].append(Camera_Shot(data))\n Zed['limit_list_lengths'](6,4)\n Zed['left_ready'] = True\n\n\ndef right_callback(data):\n Zed['right_list'].append(Camera_Shot(data))\n\n\n\n\n\n\n\n\nrospy.Subscriber(\n \"/bair_car/zed/right/image_rect_color\",\n Image,\n right_callback,\n queue_size = 1)\n\nrospy.Subscriber(\n \"/bair_car/zed/left/image_rect_color\",\n Image,\n left_callback,\n queue_size = 1)\n\n\n\nQUIT = False\ndef maintain_quartet_list(Q_list): ##############################\n\n cb('*** starting maintain_quartet_list(Q_list) thread. ***')\n hz = Timer(60)\n print_timer = Tr(60)\n timer = Timer()\n while True:\n if rospy.is_shutdown():\n break\n if QUIT == True:\n break\n if True:#try:\n\n if len(Zed['left_list']) > 3 and Zed['left_ready']:\n\n Zed['left_ready'] = False\n Q = Zed['build_quartet']()\n #clp('Q == None',Q == None)\n if Q != None:\n Q_list.append(Q)\n\n while len(Q_list) > 3:\n Q_list.pop(0)\n hz.freq(\" (camera.py) \")\n print_timer.message(\n d2s(\" (camera.py)\",\n dp(timer.time()),'seconds',\n dp(100*Zed['stats']['success']/\n (1.0*Zed['stats']['call'])),'%'))\n else:\n time.sleep(1/10000.)\n else:#except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n file_name = os.path.split(\n exc_tb.tb_frame.f_code.co_filename)[1]\n CS_('Exception!',emphasis=True)\n CS_(d2s(exc_type,file_name,exc_tb.tb_lineno),\n emphasis=False)\n\n cg('\\nExiting maintain_quartet_list thread.\\n')\n\n\n\nQ_list = []\nthreading.Thread(target=maintain_quartet_list,args=[Q_list]).start()\n\n\n\"\"\"\n\nif __name__ == '__main__':\n\n rospy.init_node('camera',anonymous=True,disable_signals=True)\n\n metadata = torch.from_numpy(zeros((1,256,23,41)))\n metadata = metadata.cuda().float()\n metadata = torch.autograd.Variable(metadata)\n\n camera_data = torch.from_numpy(zeros((1,12,23,41)))\n camera_data = camera_data.cuda().float()\n camera_data = torch.autograd.Variable(camera_data)\n\n hz = Timer(10)\n wait = Timer()\n wait2 = Timer(10)\n size_ = 'full'\n\n while True:\n\n if rospy.is_shutdown():\n break\n\n if QUIT == True:\n break\n\n if wait.time() > 10:\n if wait2.check():\n cr('wait.time() =',int(wait.time()))\n wait2.reset()\n try:\n if len(Q_list) > 0:\n Q = Q_list[-1]\n if Q['ready']:\n Q['ready'] = False\n #Q['display'](1000,1000,1000,size_,4)\n hz.freq(' (main) ')\n wait.reset()\n camera_data = Q['to_torch'](size_=size_)\n U = Quartet(name='from torch '+size_)\n if size_ == 'small':\n metadata[0,128+1+4:128+1+4+12,:,:] = camera_data\n offset = 128+1+4 \n U['from_torch'](metadata,offset=offset)\n else:\n offset = 0 \n U['from_torch'](camera_data,offset=offset) #\n\n U['display'](delay_now=1,size_=size_,scale=4)\n continue\n time.sleep(1./100000.)\n \n except KeyboardInterrupt:\n QUIT = True\n cr('\\n\\n*** KeyboardInterrupt ***\\n')\n time.sleep(1)\n\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n file_name = os.path.split(\n exc_tb.tb_frame.f_code.co_filename)[1]\n CS_('Exception!',emphasis=True)\n CS_(d2s(exc_type,file_name,exc_tb.tb_lineno),\n emphasis=False)\n #QUIT = True\n #cr('\\n\\n*** Exception ***\\n')\n #time.sleep(1)\n \n\"\"\"\n \n\n#EOF","sub_path":"Cars/j26June2019__/nodes/network_utils/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":15410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"514156842","text":"# python RNNLM.py train.txt test.txt 1\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\nimport numpy as np\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nfrom collections import Counter, OrderedDict\nimport sys\nfrom datetime import datetime\n\nfile1 = \"/exp/data/SEAME/train.txt\"\nfile2 = \"/exp/data/SEAME/dev.txt\"\nfile3 = \"/exp/data/SEAME/test.txt\"\n\nfile4 = \"data/clwe.txt\" # File containing embedding \n\nindex1 = []\nindex2 = []\nindex3 = []\nmodel_init_path = \"/home/vishwajeet/exp/src/models/clwe-rnn_model-5-freeze.ckpt\"\nmodel_save_path = model_init_path\nmodel_restore_path = model_init_path\n\nclass OrderedCounter(Counter, OrderedDict):\n\t'Counter that remembers the order elements are first seen'\n\tdef __repr__(self):\n\t\treturn '%s(%r)' % (self.__class__.__name__,\n\t\t\tOrderedDict(self))\n\tdef __reduce__(self):\n\t\treturn self.__class__, (OrderedDict(self),)\n\ndef make_combined_data(filename):\n\n\tfile = open(filename,'r')\n\tdata = []\n\tfor line in file:\n\t\tline = line.rstrip()\n\t\tfor word in line.split():\n\t\t\tdata.append(word)\n\t\tdata.append(\"\")\n\tfile.close()\n\treturn np.array(data)\n\ndef make_wordid_map(data, k):\n\t\"\"\"\n\tk is the number of least frequently occuring words in the training \n\tset that will be treated as so as to facilitate good estimates of words\n\t\"\"\"\n\n\tcounter = OrderedCounter(data)\n\tcommon_words = counter.most_common()\n\ttotal_words = sum(counter.values())\n\t\n\titem_to_id = dict()\n\tleast_word_dict = dict(common_words[:-k-1:-1])\n\titem_to_id[\"\"] = len(item_to_id)\n\ti = 1\n\tfor word in counter:\n\t\tif word not in least_word_dict.keys():\n\t\t\titem_to_id[word] = i\n\t\t\ti += 1\n\t\telse:\n\t\t\titem_to_id[word] = 0\n\t\t\t\n\treturn item_to_id\n\ndef encode(data, wordid_map):\n\n\twordid_list = []\n\tfor word in data:\n\t\tif word in wordid_map.keys():\n\t\t\twordid_list.append(wordid_map[word])\n\t\telse:\n\t\t\twordid_list.append(wordid_map[''])\n\treturn np.array(wordid_list)\n\ndef make_batch(index,data, wordid_map, batch_index, batch_size, num_steps):\n\ttemp_index = [i+batch_index*num_steps for i in index]\n\ttemp_index2 = [i+batch_index*num_steps+1 for i in index]\n\ttotal_batch = [i for i in data[temp_index]]\n\ttotal_batch = encode(total_batch, wordid_map)\n\ttotal_batch_2 = [i for i in data[temp_index2]]\n\ttotal_batch_2 = encode(total_batch_2, wordid_map)\n\tbatch_x = []\n\tbatch_y = []\n\tfor i in range(0,batch_size*num_steps,num_steps):\n\t\ttemp = total_batch[i:i+num_steps]\n\t\ttemp2 = total_batch_2[i:i+num_steps]\n\t\tbatch_x.append(temp)\n\t\tbatch_y.append(temp2)\n\treturn (batch_x,batch_y)\n\ndef get_batch(index,data,wordid_map ,batch_index, batch_size, num_steps):\n\n\treturn make_batch(index,data, wordid_map, batch_index, batch_size, num_steps)\n\ndef initialize_index(batch_size,num_steps,length):\n\tt = length//(batch_size*num_steps)\n\tindex = range(batch_size)\n\ttemp = []\n\t[temp.extend(range(i*t*num_steps,i*t*num_steps+num_steps)) for i in index]\n\treturn temp\n\ndef get_embedding(filename, wordid_map):\n\tembedding = np.zeros([word_vocab_size, rnn_size])\n\tembedding_mask = [[True]*rnn_size for _ in range(word_vocab_size)]\n\n\tword_count = 0\n\tfile = open(filename, 'r')\n\tfor index, line in enumerate(file):\n\t\tline = line.rstrip().split(\" \")\n\t\tword = line[0]\n\t\tline = line[1:]\n\n\t\tif word in wordid_map.keys():\n\t\t\ttry:\n\t\t\t\tembedding[wordid_map[word]] = list(map(float, line))\n\t\t\t\tembedding_mask[wordid_map[word]] = [False]*rnn_size\n\t\t\t\tword_count += 1\n\t\t\texcept ValueError:\n\t\t\t\tprint(word, index)\n\n\tprint(\"The number of words found in the wiki corpus: %d\" % word_count)\n\treturn embedding, embedding_mask\n\nbatch_size = 32\nnum_steps = 32\nnum_hidden_units = 512\nrnn_size = 300\nnum_hidden_layers = 2\ngrad_clip = 5\nmomentum = 0.95\ninit_scale = 0.1\nlearning_rate = 0.001\nepoch = 100\nunk_word_k = 1400\nword_vocab_size = 24635 - unk_word_k + 1 # Total distinct words - the least words not being considered plus \ngradient_flow = False\n\ndata = make_combined_data(file1)\ndev_data = make_combined_data(file2)\ntest_data = make_combined_data(file3)\n\nindex1 = initialize_index(batch_size,num_steps,len(data))\nindex2 = initialize_index(batch_size,num_steps,len(dev_data))\nindex3 = initialize_index(batch_size,num_steps,len(test_data))\n\nwordid_map = make_wordid_map(data, unk_word_k)\nembedding_trained, mask = get_embedding(file4, wordid_map)\n\nwith tf.device('/gpu:0'):\n\n\tinput_data = tf.placeholder(tf.int32, shape=[batch_size, num_steps])\n\ttarget = tf.placeholder(tf.int32, shape=[batch_size, num_steps])\n\tkeep_prob = tf.placeholder(tf.float32)\n\tembedding_mask = tf.constant(mask)\n\tembedding_init = tf.constant(embedding_trained, dtype=tf.float32)\n\tembedding = tf.Variable(embedding_init, name=\"embedding\")\n\tinputs = tf.nn.embedding_lookup(embedding, input_data)\n\tdef rnn_cell():\n\t\treturn tf.contrib.rnn.DropoutWrapper(\n\t\t\trnn.BasicLSTMCell(num_hidden_units,reuse=False)\n\t\t\t,output_keep_prob=keep_prob\n\t\t\t,variational_recurrent=True\n\t\t\t,dtype=tf.float32)\n\n\tcells = rnn.MultiRNNCell([rnn_cell() for _ in range(num_hidden_layers)])\n\trnn_initial_state = cells.zero_state(batch_size, dtype=tf.float32)\n\toutputs, final_state = tf.nn.dynamic_rnn(cells,inputs,initial_state=rnn_initial_state,dtype=tf.float32)\n\t\n\toutputs = tf.reshape(tf.concat(outputs,1),[-1,num_hidden_units])\n\tsoftmax_w = tf.get_variable(\"softmax_w\", [num_hidden_units, word_vocab_size])\n\tsoftmax_b = tf.get_variable(\"softmax_b\", [word_vocab_size])\n\n\tlogits = tf.matmul(outputs,softmax_w) + softmax_b\n\tlogits = tf.reshape(logits, [batch_size, num_steps, word_vocab_size])\t\n\n\tloss = tf.contrib.seq2seq.sequence_loss(logits\n\t\t\t\t\t\t\t\t\t\t\t, target\n\t\t\t\t\t\t\t\t\t\t\t, tf.ones([batch_size, num_steps]\n\t\t\t\t\t\t\t\t\t\t\t, dtype=tf.float32)\n\t\t\t\t\t\t\t\t\t\t\t, average_across_timesteps=True\n\t\t\t\t\t\t\t\t\t\t\t, average_across_batch=False)\n\n\tcost = tf.reduce_sum(loss) / num_steps\n\n\ttvars = tf.trainable_variables()\n\tgrads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)\n\n\t# optimizer = tf.train.AdamOptimizer(learning_rate)\n\toptimizer = tf.train.GradientDescentOptimizer(1.0)\n\n\tif not gradient_flow: \n\t\t# Fix some embeddings in the matrix\n\t\tgrad_dash = grads[0] * tf.cast(embedding_mask, dtype=grads[0].dtype)\n\t\tgrads[0] = grad_dash\n\n\ttrain_op = optimizer.apply_gradients(zip(grads, tvars))\n\t\n\tprint (\"Network Created\")\n\tinitializer = tf.random_uniform_initializer(-init_scale, init_scale) \n\tsaver = tf.train.Saver()\n\t\n\n\tprint (\"Training Started\")\n\tinit = tf.global_variables_initializer()\n\n\tconfig=tf.ConfigProto(allow_soft_placement=True)\n\tconfig.gpu_options.allow_growth = True\n\twith tf.Session(config=config) as sess:\n\t\tif not os.path.isfile(model_restore_path+\".meta\"):\n\t\t\tsess.run(init)\n\t\t\tsave_path = saver.save(sess, model_init_path)\n\t\t\tprint(\"Model saved in file: %s\" % save_path)\n\n\t\t\n\t\ttt = 0\n\t\tsaver.restore(sess,model_restore_path)\n\t\twhile tt < epoch :\n\t\t\tprint (\"Epoch %d : \" % tt)\n\t\t\tstep = 0\n\t\t\ttotal_cost = 0.0\n\t\t\tstate = sess.run(rnn_initial_state)\n\t\t\twhile (step+1)*batch_size*num_steps < len(data):\n\t\t\t\tbatch_x, batch_y = get_batch(index1,data, wordid_map ,step, batch_size, num_steps)\n\t\t\t\tstate,train_cost,_ = sess.run([final_state,cost,train_op],\n\t\t\t\t\t\t\t\t\t\t\tfeed_dict = {input_data:batch_x,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttarget:batch_y,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\trnn_initial_state: state,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tkeep_prob :0.4})\n\t\t\t\ttotal_cost += train_cost\n\t\t\t\tstep += 1\n\t\t\tprint (\"Training perplexity %f\" % np.exp(total_cost/step))\n\t\t\tstep = 0\n\t\t\ttotal_cost = 0.0\n\t\t\tstate = sess.run(rnn_initial_state)\n\t\t\twhile (step+1)*batch_size*num_steps < len(dev_data):\n\t\t\t\tbatch_x, batch_y = get_batch(index2,dev_data, wordid_map ,step, batch_size, num_steps)\n\t\t\t\tstate,dev_cost = sess.run([final_state,cost], \n\t\t\t\t\t\t\t\t\t\t\tfeed_dict = {input_data:batch_x,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttarget:batch_y, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\trnn_initial_state: state,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tkeep_prob : 1.0})\n\t\t\t\ttotal_cost += dev_cost\n\t\t\t\tstep += 1\n\n\t\t\ttotal_cost = np.exp(total_cost/step)\n\t\t\tprint(\"Dev Perplexity %f\" % total_cost)\n\t\t\ttt +=1\n\t\t\tsave_path = saver.save(sess, model_save_path)\n\t\t\t# print(\"Checkpoint at \" + str(datetime.now()))\n\n\t\tstep = 0\n\t\ttotal_cost = 0.0\n\t\tstate = sess.run(rnn_initial_state)\n\t\twhile (step+1)*batch_size*num_steps < len(test_data):\n\t\t\tbatch_x, batch_y = get_batch(index3,test_data, wordid_map ,step, batch_size, num_steps)\n\t\t\tstate, test_cost = sess.run([final_state,cost],\n\t\t\t\t\t\t\t\t\t\tfeed_dict = {input_data:batch_x,\n\t\t\t\t\t\t\t\t\t\t\t\t\ttarget:batch_y,\n\t\t\t\t\t\t\t\t\t\t\t\t\trnn_initial_state: state,\n\t\t\t\t\t\t\t\t\t\t\t\t\tkeep_prob :1.0})\n\n\t\t\ttotal_cost += test_cost\n\t\t\tstep += 1\n\t\tprint (\"Testing perplexity %f\" % np.exp(total_cost/step))\n","sub_path":"server_code/clwe-rnnlm.py","file_name":"clwe-rnnlm.py","file_ext":"py","file_size_in_byte":8412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"148725925","text":"def maxScore(cardPoints, k) -> int:\n n = len(cardPoints)\n now = n-k\n end = n\n if(k>=n):\n return sum(cardPoints)\n card = cardPoints*2\n n*=2\n total = 0\n max_point = 0\n while(now <= end):\n if (total == 0):\n total = sum(card[now:now+k])\n else:\n total = total- card[now-1] + card[now+k-1]\n max_point = max(total,max_point)\n now+=1\n return max_point\nprint(maxScore([1,2,3,4,5,6,1],3))\nprint(maxScore([2,2,2],2))\nprint(maxScore( [1,79,80,1,1,1,200,1],3))\nprint(maxScore( [96,90,41,82,39,74,64,50,30],8))\n# 1 2 3 4 5 6 1 1 2 3 4 5 6 1 ","sub_path":"problemset/1423/1423.py","file_name":"1423.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"233861946","text":"import argparse\nimport collections\nimport csv\n\n# For the meaning of these identifiers, read https://openflights.org/data.html\nAirport = collections.namedtuple('Airport', ['id', 'name', 'city', 'country', 'faa_iata', 'icao', 'lat', 'long', 'alt', 'utc_offset', 'dst', 'tz', 'type', 'source'])\nAirline = collections.namedtuple('Airline', ['id', 'name', 'alias', 'iata', 'icao', 'callsign', 'country', 'active'])\nRoute = collections.namedtuple('Route', ['airline', 'airline_id', 'source_airport', 'source_airport_id', 'dest_airport', 'dest_airport_id', 'codeshare', 'stops', 'equipment'])\n\ndef load_data():\n with open('airports.dat') as f:\n airports = {}\n for line in csv.reader(f):\n airport = Airport._make(line)\n airports[airport.id] = airport\n\n with open('airlines.dat') as f:\n airlines = {}\n for line in csv.reader(f):\n airline = Airline._make(line)\n airlines[airline.id] = airline\n\n with open('routes.dat') as f:\n # top-level keyed by source airport ID, next level keyed by destination airport ID\n routes = collections.defaultdict(lambda: collections.defaultdict(list))\n for line in csv.reader(f):\n route = Route._make(line)\n routes[route.source_airport][route.dest_airport].append(route)\n\n return airports, airlines, routes\n\n# def get_adjacent_airports(routes, airport):\n# return itertools.chain(*(.values() for x in d.values()))\n\ndef find_flights(routes, source_airport, destination_airport, max_segments):\n # We implement a basic BFS algorithm for following the routes\n # Taken from http://eddmann.com/posts/depth-first-search-and-breadth-first-search-in-python/\n queue = [(source_airport, [source_airport])]\n while queue:\n airport, path = queue.pop(0)\n if len(path) > max_segments:\n return\n for next_airport in set(routes[airport].keys()) - set(path):\n if next_airport == destination_airport:\n yield path + [next_airport]\n else:\n queue.append((next_airport, path + [next_airport]))\n\ndef build_parser():\n parser = argparse.ArgumentParser(description='Find flights.')\n parser.add_argument('source', help='source airport (e.g. SFO)')\n parser.add_argument('destination', help='destination airport (e.g. JFK)')\n parser.add_argument('segments', type=int, help='maximum number of segments')\n return parser\n\nif __name__ == '__main__':\n import sys\n parser = build_parser()\n args = parser.parse_args(sys.argv[1:])\n\n _airports, _airlines, routes = load_data()\n for flight in find_flights(routes, args.source, args.destination, args.segments):\n print(' -> '.join(flight))\n","sub_path":"solutions/lab-8/flights.py","file_name":"flights.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"133872287","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# from torch.nn import conv, Linear\n# from torch.nn.utils import _pair\n\n# from .utils import *\n\nfrom typing import Tuple\ntry:\n from .more_ops import *\nexcept (ModuleNotFoundError, ImportError):\n from more_ops import *\n\n\n@torch.jit.script\ndef channel_shuffle(x, n_group: int):\n \"\"\"\n 通道扰乱操作\n \"\"\"\n B, C = x.shape[:2]\n shape2 = list(x.shape[2:])\n x = x.reshape([B, n_group, C // n_group] + shape2)\n x = x.transpose(1, 2)\n x = x.reshape([B, C] + shape2)\n return x\n\n\n@torch.jit.script\ndef resize_ref(x, shortpoint, method: str = 'bilinear', align_corners: bool = None):\n \"\"\"\n :type x: torch.Tensor\n :type shortpoint: torch.Tensor\n :type method: str\n :type align_corners: bool\n \"\"\"\n hw = shortpoint.shape[2:4]\n ihw = x.shape[2:4]\n if hw != ihw:\n x = torch.nn.functional.interpolate(x, hw, mode=method, align_corners=align_corners)\n return x\n\n\n@torch.jit.script\ndef add_coord(x: torch.Tensor):\n \"\"\"\n 增加两层坐标层\n \"\"\"\n b, c, h, w = x.shape\n\n y_coord = torch.linspace(-1, 1, h, dtype=x.dtype, device=x.device)\n y_coord = y_coord.reshape(1, 1, -1, 1)\n y_coord = y_coord.repeat(b, 1, 1, w)\n\n x_coord = torch.linspace(-1, 1, w, dtype=x.dtype, device=x.device)\n x_coord = x_coord.reshape(1, 1, 1, -1)\n x_coord = x_coord.repeat(b, 1, h, 1)\n\n o = torch.cat((x, y_coord, x_coord), 1)\n return o\n\n\n@torch.jit.script\ndef pixelwise_norm(x, eps: float = 1e-8):\n \"\"\"\n Pixelwise feature vector normalization.\n :param x: input activations volume\n :param eps: small number for numerical stability\n :return: y => pixel normalized activations\n \"\"\"\n return x * x.pow(2).mean(dim=1, keepdim=True).add(eps).rsqrt()\n\n\n@torch.jit.script\ndef flatten(x):\n y = x.reshape(x.shape[0], -1)\n return y\n\n\n@torch.jit.script\ndef adaptive_instance_normalization(content_feat, style_feat):\n assert (content_feat.size()[:2] == style_feat.size()[:2])\n\n ss = style_feat.shape\n cs = content_feat.shape\n\n style_mean = style_feat.mean((2, 3), keepdim=True)\n style_std = style_feat.reshape(ss[0], ss[1], -1).std(2, unbiased=False).reshape_as(style_mean)\n content_mean = content_feat.mean((2, 3), keepdim=True)\n content_std = content_feat.reshape(cs[0], cs[1], -1).std(2, unbiased=False).reshape_as(content_mean)\n\n normalized_feat = (content_feat - content_mean) / (content_std + 1e-8)\n return normalized_feat * style_std + style_mean\n\n\n# mod from https://github.com/NVlabs/stylegan/blob/master/training/networks_stylegan.py\n@torch.jit.script\ndef minibatch_stddev(x, group_size: int = 4, num_new_features: int = 1, eps: float = 1e-8):\n group_size = group_size if group_size < x.shape[0] else x.shape[0]\n s = x.shape\n y = x.reshape(group_size, -1, num_new_features, s[1] // num_new_features, s[2], s[3])\n y = y - y.mean(dim=0, keepdim=True)\n y = y.pow(2).mean(dim=0)\n y = (y + eps).sqrt()\n y = y.mean(dim=(2, 3, 4), keepdim=True)\n y = y.mean(dim=2)\n y = y.repeat(group_size, 1, s[2], s[3])\n return torch.cat((x, y), dim=1)\n\n\n@torch.jit.script\ndef pixelshuffle(x: torch.Tensor, factor_hw: Tuple[int, int]):\n pH = factor_hw[0]\n pW = factor_hw[1]\n y = x\n B, iC, iH, iW = y.shape\n oC, oH, oW = iC // (pH * pW), iH * pH, iW * pW\n y = y.reshape(B, oC, pH, pW, iH, iW)\n y = y.permute(0, 1, 4, 2, 5, 3) # B, oC, iH, pH, iW, pW\n y = y.reshape(B, oC, oH, oW)\n return y\n\n\n@torch.jit.script\ndef pixelshuffle_invert(x: torch.Tensor, factor_hw: Tuple[int, int]):\n pH = factor_hw[0]\n pW = factor_hw[1]\n y = x\n B, iC, iH, iW = y.shape\n oC, oH, oW = iC * (pH * pW), iH // pH, iW // pW\n y = y.reshape(B, iC, oH, pH, oW, pW)\n y = y.permute(0, 1, 3, 5, 2, 4) # B, iC, pH, pW, oH, oW\n y = y.reshape(B, oC, oH, oW)\n return y\n\n\n@torch.jit.script\ndef one_hot(class_array: torch.Tensor, class_num: int, dim: int = -1, dtype: torch.dtype = torch.int32):\n '''\n 可将[D1, D2, D3, ..., DN] 矩阵转换为 [D1, D2, ..., DN, D(N+1)] 的独热矩阵\n :param class_array: [D1, D2, ..., DN] 类别矩阵\n :param class_num: 类别数量\n :param dim:\n :param dtype:\n :return: y => onehot array\n '''\n a = torch.arange(class_num, dtype=torch.int32, device=class_array.device)\n for _ in range(class_array.ndim):\n a = torch.unsqueeze(a, 0)\n b = (class_array[..., None] == a).to(dtype)\n if dim != -1:\n b = torch.movedim(b, -1, dim)\n return b\n\n\n@torch.jit.script\ndef one_hot_invert(onehot_array, dim: int = -1, dtype: torch.dtype = torch.int32):\n '''\n 上面one_hot的逆操作\n 可将[D1, D2, D3, ..., DN] 的独热矩阵转换为 [D1, D2, ..., D(N-1)] 的类别矩阵\n :param onehot_array: [D1, D2, ..., DN] 独热矩阵\n :param dim:\n :param dtype:\n :return: y => class array\n '''\n class_arr = torch.max(onehot_array, dim)[1]\n class_arr = class_arr.to(dtype)\n return class_arr\n","sub_path":"model_utils_torch/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":5008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"585269222","text":"\n# training and testing settings\nmodel = dict(\n train_cfg=dict(\n assigner=dict(\n type='GridAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0)),\n test_cfg=dict(\n nms_pre=1000,\n min_bbox_size=0,\n score_thr=0.05,\n conf_thr=0.005,\n nms=dict(type='nms', iou_threshold=0.45),\n max_per_img=100))\n","sub_path":"configs/edgeailite/_xbase_/hyper_params/yolov3_config.py","file_name":"yolov3_config.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"126419863","text":"class Solution(object):\n def sortColors(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n zero = 0\n sec = len(nums) - 1\n i = 0\n while i <= sec:\n while nums[i] == 2 and i < sec:\n nums[i], nums[sec] = nums[sec], nums[i]\n sec -= 1\n while nums[i] == 0 and i > zero:\n nums[i], nums[zero] = nums[zero], nums[i]\n zero += 1\n i += 1","sub_path":"75_SortColors.py","file_name":"75_SortColors.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"492992485","text":"# -*- coding: utf-8 -*-\n\"\"\"bugzilla(\\*bugs, skip_when=None, xfail_when=None, unskip={}, ignore=[]): Marker for bugzilla\n\nIntelligent bugzilla integration py.test plugin. Specifically tuned for cfme_tests.\nYou can specify possibly unlimited amount of bugs. Each bug is then examined by machinery, that\nresolves dupe bugs and clones in such way that it generates all possible instances of the bug\nthat can be then checked. This means you don't have to specify all variants of the bug. You just\nspecify one, it doesn't matter whether the original or any clone or dupe, and it will be expanded.\n\nThese conditions apply:\n\n* If the bug is open, test will be skipped.\n* If POST/MODIFIED and upstream, it checks build date of the appliance vs. date of last change\n of the bug. If the change was sooner than appliance build, the test is not skipped.\n* If POST/MODIFIED and downstream, it is skipped.\n\nAfter these checks, custom checks follow. We have three hooks, ``skip_when``, ``xfail_when`` and\n``unskip``. Each of the hooks is executed per-bug and receives variables via parameters. You specify\nparameters, test machinery injects them. These are available:\n\n* bugs (all bugs for the test item)\n* appliance_version\n* appliance_downstream\n* bug (current bug)\n\nAlso fixtures (funcargs) for the test are injected into the parameters (if present). Sometimes it is\nnot possible to retrieve them, when you face such thing just ping me and I will investigate further.\n\nThe order of function parameters does not matter.\n\nThe ``bug`` objects have the specified version fields (as in cfme_data.yaml) converted to\n:py:class:`utils.version.LooseVersion`. If those fields are specified as \"---\" or \"unspecified\",\nthey return None instead of :py:class:`utils.version.LooseVersion`.\n\nThe ``unskip`` hook is a little bit different. It is a dict of ``bug_id: function``, where if a bug\nis marked to be skipped by any of the machinery that marks it as skipped, it will look in the dict\nand if it finds a bug id specified there, it then calls the function associated with the ID. If\nthe function retuns True, the test will be unmarked as skipped.\n\nxfailing has precedence over skipping.\n\nExample:\n\n.. code-block:: python\n\n @pytest.mark.parametrize(\"something_parametrized\", [1,2,3])\n @pytest.mark.bugzilla(\n 1234, 2345, 3456,\n xfail_when=lambda bug, appliance_version: bug.fixed_in > appliance_version,\n unskip={\n # Something easy\n 1234: lambda bug: bug.something == \"foo\",\n # This works too. Will be never skipped on this bug's conditions.\n 2345: True,\n # Do not skip if fixture `something_parametrized` is not 1\n 3456: lambda something_parametrized: something_parametrized != 1\n })\n def test_something(bugs, something_parametrized):\n pass\n\n @pytest.mark.bugzilla # Needed so far, it stores bugzilla instance into the test for using it\n def test_something2(bug):\n if bug(123).status in {\"ON_QA\", \"ON_DEV\", \"ASSIGNED\"}:\n do_some_workaround()\n do_tests()\n\nMaintainer and responsible person: mfalesni\n\"\"\"\nimport pytest\nimport xmlrpclib\nfrom random import choice\nfrom urlparse import urlparse\n\nfrom fixtures.terminalreporter import reporter\nfrom utils import kwargify as _kwargify\nfrom utils.bz import Bugzilla\nfrom utils.conf import cfme_data\nfrom utils.log import logger\nfrom utils.version import appliance_is_downstream, current_version\n\n_bugs_cache = {}\n\n\ndef kwargify(f):\n \"\"\"Convert function having only positional args to a function taking dictionary.\n\n If you pass False or None, a function which always returns False is returned.\n If you pass True, a function which always returns True is returned.\n \"\"\"\n if f is None or f is False:\n f = lambda: False\n elif f is True:\n f = lambda: True\n\n return _kwargify(f)\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\", __doc__.splitlines()[0])\n\n\n@pytest.mark.trylast\ndef pytest_collection_modifyitems(session, config, items):\n if not config.getvalue(\"bugzilla\"):\n return\n\n terminalreporter = reporter(config)\n terminalreporter.write(\"\\nChecking bugs in Bugzilla...\\n\")\n bz = Bugzilla.from_config()\n progress = (\"-\", \"\\\\\", \"|\", \"/\") # Very simple eye-candy to not pollute tests output\n progressbar = 0\n last_line_length = 0\n\n try:\n for item in filter(lambda item: item.get_marker(\"bugzilla\") is not None, items):\n marker = item.get_marker(\"bugzilla\")\n terminalreporter.write(\"\\r{}\".format(last_line_length * \" \"))\n terminalreporter.write(\"\\r{}: {}\".format(progress[progressbar], item.name))\n progressbar = (progressbar + 1) % len(progress)\n last_line_length = 3 + len(item.name)\n item._ignore_bugs = set(map(int, marker.kwargs.get(\"ignore\", set([]))))\n item._bugzilla_bugs = set(\n filter(lambda b: b is not None, map(\n lambda bug_id: bz.resolve_blocker(\n bug_id, ignore_bugs=item._ignore_bugs), marker.args)))\n item._skip_func = kwargify(marker.kwargs.get(\"skip_when\", None))\n item._xfail_func = kwargify(marker.kwargs.get(\"xfail_when\", None))\n item._unskip_dict = {}\n for bug_id, function in marker.kwargs.get(\"unskip\", {}).iteritems():\n item._unskip_dict[bug_id] = kwargify(function)\n terminalreporter.write(\"\\n\")\n terminalreporter.write(\"\\r{} bugs retrieved\\n\".format(bz.bug_count))\n terminalreporter.write(\"All bugs summary:\\n\")\n for bug in bz.bugs:\n terminalreporter.write(\"#{}:{} - {}\\n\".format(bug.id, bug.status, bug.summary))\n except xmlrpclib.Fault as exception:\n # It can happen that the user account does not have required rights.\n if exception.faultCode == 102:\n terminalreporter.write(\"\\n\\n======= !!!BAILING OUT. NOT ENOUGH RIGHTS!!! =======\\n\")\n # remove any possible bugzilla markings in the test items so that does not get tested\n for item in filter(lambda item: item.get_marker(\"bugzilla\") is not None, items):\n if hasattr(item, \"_bugzilla_bugs\"):\n delattr(item, \"_bugzilla_bugs\")\n terminalreporter.write(\"======= !!!BUGZILLA INTEGRATION DISABLED!!! =======\\n\")\n\n\n@pytest.mark.tryfirst\ndef pytest_runtest_setup(item):\n if not hasattr(item, \"_bugzilla_bugs\"):\n return\n\n if not item._bugzilla_bugs:\n return\n\n skippers = set([])\n xfailers = set([])\n\n for bug, forceskip in item._bugzilla_bugs:\n if forceskip or bug.is_opened:\n skippers.add(bug)\n if bug.upstream_bug:\n if not appliance_is_downstream() and bug.can_test_on_upstream:\n skippers.remove(bug)\n\n # Custom skip/xfail handler\n global_env = dict(\n bugs=map(lambda b: b[0], item._bugzilla_bugs),\n appliance_version=current_version(),\n appliance_downstream=appliance_is_downstream(),\n )\n # We will now extend the env with fixtures, so they can be used in the guard functions\n # We will however add only those that are not in the global_env otherwise we could overwrite\n # our own stuff.\n if hasattr(item, \"callspec\"):\n params = item.callspec.params\n else:\n # Some of the test items do not have this, so fall back\n # This can cause some problems if the fixtures are used in the guards in this case, but\n # that will tell use where is the problem and we can then find it out properly.\n params = {}\n for funcarg, value in params.iteritems():\n if funcarg not in global_env:\n global_env[funcarg] = value\n for bug, _ in item._bugzilla_bugs:\n local_env = {\"bug\": bug}\n local_env.update(global_env)\n if item._skip_func(**local_env):\n skippers.add(bug.id)\n if item._xfail_func(**local_env):\n xfailers.add(bug.id)\n\n # Separate loop for unskipping\n discards = []\n for root_bug, _ in item._bugzilla_bugs:\n # Check skippers\n resolved_bug = root_bug.bugzilla.resolve_blocker(\n root_bug.id, ignore_bugs=item._ignore_bugs)[0]\n if resolved_bug not in skippers:\n continue\n bug_id = resolved_bug.id\n # If we can't find the bug id, refer to the original ID (remember, the bug is expanded)\n if bug_id not in item._unskip_dict:\n bug_id = root_bug.id\n if bug_id not in item._unskip_dict:\n continue\n local_env = {\"bug\": resolved_bug}\n local_env.update(global_env)\n if item._unskip_dict[bug_id](**local_env):\n discards.append(resolved_bug)\n for bug in discards:\n skippers.discard(bug)\n\n # We now have to resolve what to do with this test item\n # xfailing takes precedence over skipping (xfail is via custom function)\n if xfailers:\n message = \"Marking as xfail due to these bugs: {}\".format(\", \".join(map(str, xfailers)))\n logger.info(message)\n item.add_marker(pytest.mark.xfail(reason=message))\n elif skippers:\n bz_url = urlparse(choice(list(skippers)).bugzilla.bugzilla.url)\n message = \"Skipping due to these bugs:\\n{}\".format(\n \"\\n\".join([\n \"{}: {} ({}://{}/show_bug.cgi?id={})\".format(\n bug.status, bug.summary, bz_url.scheme, bz_url.netloc, bug.id)\n for bug\n in set(skippers)\n ])\n )\n logger.info(message)\n pytest.skip(message)\n else:\n logger.info(\"No action required by Bugzilla for {}. All good!\".format(item.nodeid))\n\n\ndef pytest_addoption(parser):\n group = parser.getgroup('Bugzilla integration')\n group.addoption('--bugzilla',\n action='store_true',\n default=cfme_data.get(\"bugzilla\", {}).get(\"enabled\", False),\n dest='bugzilla',\n help='Enable Bugzilla support.')\n\n\nclass BugMock(object):\n \"\"\"Class used when Bugzilla integration is off or the fixtures are used on unmarked tests.\"\"\"\n def __getattr__(self, attr):\n return False\n\n def __cmp__(self, other):\n return False\n\n def __eq__(self, other):\n return False\n\n\n@pytest.fixture(scope=\"function\")\ndef bug(request):\n \"\"\"Fixture, that when called provides specific bug. No machinery that changes the ID is involved\n\n Usage:\n\n @pytest.mark.bugzilla(1234)\n # or just @pytest.mark.bugzilla if you want no generic skipping and so\n def test_something(bug):\n if bug(345).status is \"blabla\":\n foo()\n bar()\n baz()\n\n It works only on ``bugzilla``-marked tests so far. After I find some neat 'global' store in\n py.test, I will modify it to be usable everywhere.\n\n If bugzilla integration is disabled, it returns BugMock instance which answers False on each\n comparison, equality or attribute.\n \"\"\"\n try:\n return lambda bug: Bugzilla.from_config().resolve_blocker(bug)[0]\n except AttributeError:\n return lambda *args, **kwargs: BugMock()\n","sub_path":"markers/bz.py","file_name":"bz.py","file_ext":"py","file_size_in_byte":11179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"236888022","text":"from decimal import Decimal\n\nfrom recipemd.data import RecipeParser, Recipe, Ingredient, Amount\n\n\ndef extract(url,soup):\n\tif not 'chefkoch.de' in url:\n\t\treturn\n\n\t# title\n\ttitle = soup.find('h1', attrs={'class': 'page-title'}).text\n\tif title == 'Fehler: Seite nicht gefunden' or title == 'Fehler: Rezept nicht gefunden':\n\t\traise ValueError('No recipe found, check URL')\n\t# summary\n\tsummaryTag = soup.find('div', attrs={'class': 'summary'})\n\tsummary = summaryTag.text if summaryTag else None\n\t# servings\n\tservings= soup.find('input', attrs={'id':'divisor'}).attrs['value']\n\tyields=[Amount(Decimal(servings), f'Portion{\"en\" if int(servings) > 1 else \"\"}')]\n\n\t# tags\n\ttags=[]\n\ttagcloud=soup.find('ul', attrs={'class':'tagcloud'})\n\tfor tag in tagcloud.find_all('a'):\n\t\ttags.append(tag.text)\n\t# ingredients\n\ttable = soup.find('table', attrs={'class': 'incredients'})\n\trows = table.find_all('tr')\n\n\tingreds=[]\n\tfor row in rows:\n\t\tcols = row.find_all('td')\n\t\tcols = [s.text.strip() for s in cols]\n\t\tamount = RecipeParser.parse_amount(cols[0])\n\t\tingreds.append(Ingredient(name=cols[1],amount=amount))\n\t# instructions\n\tinstruct = soup.find('div', attrs={'id': 'rezept-zubereitung'}).text # only get text\n\tinstruct = instruct.strip() # remove leadin and ending whitespace\n\t# write to file\n\treturn Recipe(title=title, ingredients=ingreds, instructions=instruct, description=summary, tags=tags, yields=yields)\n","sub_path":"recipemd_extract/plugins/chefkoch.py","file_name":"chefkoch.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"486817084","text":"#!/usr/bin/env python3\n\"\"\"\nProvides a `bind` function to plugins so they can simply bind a function to a queue.\n\"\"\"\n\nimport logging\nfrom json import loads, dumps, decoder\nfrom os import environ\nfrom os.path import join\nimport urllib\nimport time\n\nimport pika\nfrom minio import Minio\nfrom minio.error import AccessDenied\n\n\nclass ProcessingError(Exception):\n \"\"\"Exception raised for errors during processing.\n\n Attributes:\n message -- explanation of the error\n \"\"\"\n\n def __init__(self, message: str, job_id: str=None, task_id: str=None, extra: dict=None):\n super(ProcessingError, self).__init__(message)\n self.message = message\n self.extra = extra or {}\n self.meta = {\"job_id\": job_id, \"task_id\": task_id}\n\n\ndef bind(function: callable, name: str, version=\"1.0.0\"):\n \"\"\"binds a function to the input message queue\"\"\"\n\n logger = logging.getLogger(\"nanowire-plugin\")\n\n parameters = pika.ConnectionParameters(\n host=environ[\"AMQP_HOST\"],\n port=int(environ[\"AMQP_PORT\"]),\n credentials=pika.PlainCredentials(environ[\"AMQP_USER\"], environ[\"AMQP_PASS\"]),\n heartbeat_interval=600)\n\n connection = pika.BlockingConnection(parameters)\n input_channel = connection.channel()\n output_channel = connection.channel()\n\n minio_client = Minio(\n environ[\"MINIO_HOST\"] + \":\" + environ[\"MINIO_PORT\"],\n access_key=environ[\"MINIO_ACCESS\"],\n secret_key=environ[\"MINIO_SECRET\"],\n secure=True if environ[\"MINIO_SCHEME\"] == \"https\" else False)\n minio_client.set_app_info(name, version)\n\n minio_client.list_buckets()\n\n monitor_url = environ[\"MONITOR_URL\"]\n\n logger.info(\"initialised nanowire lib\", extra={\n \"monitor_url\": monitor_url,\n \"minio\": environ[\"MINIO_HOST\"],\n \"rabbit\": environ[\"AMQP_HOST\"]\n })\n\n def send(chan, method, properties, body: str):\n \"\"\"unwraps a message and calls the user function\"\"\"\n\n logger.info(\"consumed message\", extra={\n \"chan\": chan,\n \"method\": method,\n \"properties\": properties})\n\n raw = body.decode(\"utf-8\")\n payload = loads(raw)\n validate_payload(payload, name)\n set_status(monitor_url,\n payload[\"nmo\"][\"job\"][\"job_id\"],\n payload[\"nmo\"][\"task\"][\"task_id\"],\n name + \".consumed\", error)\n\n next_plugin = get_next_plugin(name, payload[\"nmo\"][\"job\"][\"workflow\"])\n if next_plugin is None:\n logger.info(\"this is the final plugin\", extra={\n \"job_id\": payload[\"nmo\"][\"job\"][\"job_id\"],\n \"task_id\": payload[\"nmo\"][\"task\"][\"task_id\"]})\n\n path = join(\n payload[\"nmo\"][\"task\"][\"task_id\"],\n \"input\",\n \"source\",\n payload[\"nmo\"][\"source\"][\"name\"])\n\n if not minio_client.bucket_exists(payload[\"nmo\"][\"job\"][\"job_id\"]):\n raise ProcessingError(\n \"job_id does not have a bucket\",\n job_id=payload[\"nmo\"][\"job\"][\"job_id\"],\n task_id=payload[\"nmo\"][\"task\"][\"task_id\"])\n\n url = minio_client.presigned_get_object(payload[\"nmo\"][\"job\"][\"job_id\"], path)\n\n # calls the user function to mutate the JSON-LD data\n\n result = function(payload[\"nmo\"], payload[\"jsonld\"], url)\n\n # if there are issues, just use the input and carry on the pipeline\n\n if result is None:\n logger.error(\"return value is None\")\n result = payload[\"jsonld\"]\n\n if not isinstance(result, dict):\n logger.error(\"return value must be of type dict, not %s\", type(result))\n result = payload[\"jsonld\"]\n\n if \"jsonld\" in result:\n result = result[\"jsonld\"]\n else:\n result = result\n\n payload[\"jsonld\"] = result\n\n logger.info(\"finished running user code\", extra={\n \"job_id\": payload[\"nmo\"][\"job\"][\"job_id\"],\n \"task_id\": payload[\"nmo\"][\"task\"][\"task_id\"]})\n\n input_channel.basic_ack(method.delivery_tag)\n\n if next_plugin:\n output_channel.queue_declare(\n next_plugin,\n False,\n True,\n False,\n False,\n )\n output_channel.basic_publish(\n \"\",\n next_plugin,\n dumps(payload)\n )\n\n return {\n \"job_id\": payload[\"nmo\"][\"job\"][\"job_id\"],\n \"task_id\": payload[\"nmo\"][\"task\"][\"task_id\"]\n }\n\n logger.info(\"consuming from\", extra={\"queue\": name})\n\n try:\n while True:\n queue_state = input_channel.queue_declare(name, False, True, False, False)\n if queue_state.method.message_count == 0:\n time.sleep(3)\n continue\n\n method_frame, header_frame, body = input_channel.basic_get(name)\n if (method_frame, header_frame, body) == (None, None, None):\n time.sleep(3)\n continue # queue empty\n\n if body is None:\n logger.error(\"body received was empty\")\n time.sleep(3)\n continue # body empty\n\n meta = {\"job_id\": None, \"task_id\": None}\n error = \"\"\n\n try:\n meta = send(input_channel, method_frame, header_frame, body)\n\n except ProcessingError as exp:\n input_channel.basic_reject(method_frame.delivery_tag, False)\n logger.exception(\"Processing Error: \" + exp.message,\n extra={**exp.meta, **exp.extra})\n\n error = exp.message\n meta = exp.meta\n\n except Exception as exp:\n input_channel.basic_reject(method_frame.delivery_tag, False)\n logger.exception(exp)\n\n finally:\n if meta[\"job_id\"] is not None and meta[\"task_id\"] is not None:\n set_status(monitor_url, meta[\"job_id\"], meta[\"task_id\"], name + \".done\", error)\n\n except pika.exceptions.RecursionError as exp:\n connection.close()\n raise exp\n\n\ndef validate_payload(payload: dict, name: str) -> bool:\n \"\"\"ensures payload includes the required metadata and this plugin is in there\"\"\"\n\n if \"nmo\" not in payload:\n raise ProcessingError(\"no job in nmo\")\n\n if \"job\" not in payload[\"nmo\"]:\n raise ProcessingError(\"no job in nmo\")\n\n if \"task\" not in payload[\"nmo\"]:\n raise ProcessingError(\"no task in nmo\")\n\n if not ensure_this_plugin(name, payload[\"nmo\"][\"job\"][\"workflow\"]):\n raise ProcessingError(\n \"declared plugin name does not match workflow\",\n job_id=payload[\"nmo\"][\"job\"][\"job_id\"],\n task_id=payload[\"nmo\"][\"task\"][\"task_id\"])\n\n\ndef ensure_this_plugin(this_plugin: str, workflow: list)->bool:\n \"\"\"ensures the current plugin is present in the workflow\"\"\"\n for workpipe in workflow:\n if workpipe[\"config\"][\"name\"] == this_plugin:\n return True\n return False\n\n\ndef get_next_plugin(this_plugin: str, workflow: list) -> str:\n \"\"\"returns the next plugin in the sequence\"\"\"\n found = False\n for workpipe in workflow:\n if not found:\n if workpipe[\"config\"][\"name\"] == this_plugin:\n found = True\n else:\n return workpipe[\"config\"][\"name\"]\n\n return None\n\n\ndef set_status(monitor_url: str, job_id: str, task_id: str, name: str, error: str):\n \"\"\"sends a POST request to the monitor to notify it of task position\"\"\"\n req = urllib.request.Request(\n urllib.parse.urljoin(\n monitor_url,\n \"/v3/task/status/%s/%s\" % (job_id, task_id)),\n data=dumps({\n \"t\": int(time.time()),\n \"id\": task_id,\n \"p\": name,\n \"e\": error\n }).encode(),\n headers={\n \"Content-Type\": \"application/json\"\n })\n urllib.request.urlopen(req)\n","sub_path":"nanowire_plugin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"378901586","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass BST(object):\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n # LC700 https://leetcode.com/problems/search-in-a-binary-search-tree/\n def searchBST(self, root, val):\n if root is None:\n return None \n if val == root.val: \n return root \n elif val < root.val:\n return self.searchBST(root.left, val)\n else:\n return self.searchBST(root.right, val)\n\n # LC701 https://leetcode.com/problems/insert-into-a-binary-search-tree/\n def insertIntoBST(self, root, val):\n if root is None:\n return TreeNode(val)\n # if val == root.val:\n # BST 中一般不会插入已存在元素\n elif val < root.val:\n root.left = self.insertIntoBST(root.left, val)\n else:\n root.right = self.insertIntoBST(root.right, val)\n return root\n \n # LC450 https://leetcode.com/problems/delete-node-in-a-bst/\n # 情况 1:A 恰好是末端节点,两个子节点都为空,那么它可以当场去世了。\n # 情况 2:A 只有一个非空子节点,那么它要让这个孩子接替自己的位置。\n # 情况 3:A 有两个子节点,麻烦了,为了不破坏 BST 的性质,A 必须找到左子树中最大的那个节点,或者右子树中最小的那个节点来接替自己。\n def deleteNode(self, root, key):\n if root is None:\n return None \n if root.val == key:\n if root.left is None and root.right is None:\n return None \n elif root.left is None:\n return root.right\n elif root.right is None:\n return root.left\n else: # root.left is not None and root.right is not None:\n # First find the right most leaf of the left sub-tree\n rightmost_LeftSubTree = root.left \n while rightmost_LeftSubTree.right:\n rightmost_LeftSubTree = rightmost_LeftSubTree.right\n rightmost_LeftSubTree.right = root.right\n return root.left\n elif key < root.val:\n root.left = self.deleteNode(root.left, key)\n else:\n root.right = self.deleteNode(root.right, key)\n return root\n\n # LC98 https://leetcode.com/problems/validate-binary-search-tree\n def isValidBST(self, root):\n def helper(root, floor, ceil): \n if root is None:\n return True\n if root.val <= floor or root.val >= ceil:\n return False\n if root.left and root.left.val >= root.val:\n return False \n if root.right and root.right.val <= root.val:\n return False\n else:\n return helper(root.left, floor, min(ceil, root.val)) and helper(root.right, max(floor, root.val), ceil)\n\n return helper(root, -2**31-1, 2**31+1)\n","sub_path":"Framework/BST.py","file_name":"BST.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"367091899","text":"from unittest import TestCase\nfrom io import BytesIO\nimport copy\n\nfrom vertx import Request\n\n\nclass RequestTestCase(TestCase):\n\n def test_string_representation(self):\n env = mock_env()\n env['REQUEST_METHOD'] = 'POST'\n env['PATH_INFO'] = '/dashboard/products'\n request = Request(env)\n self.assertEqual(str(request), '')\n self.assertEqual(repr(request), '')\n\n def test_request_method(self):\n env = mock_env()\n request = Request(env)\n env['REQUEST_METHOD'] = 'POST'\n request = Request(env)\n self.assertEqual(request.method, 'POST')\n\n def test_url(self):\n env = mock_env()\n env['wsgi.url_scheme'] = 'https'\n env['HTTP_HOST'] = 'myserver.com:8080'\n env['PATH_INFO'] = '/dashboard/products'\n env['QUERY_STRING'] = 'page=1&order=price'\n request = Request(env)\n self.assertEqual(request.url, 'https://myserver.com:8080/dashboard/products?page=1&order=price')\n\n def test_base_url(self):\n env = mock_env()\n env['wsgi.url_scheme'] = 'https'\n env['HTTP_HOST'] = 'myserver.com:8080'\n env['PATH_INFO'] = '/dashboard/products'\n env['QUERY_STRING'] = 'page=1&order=price'\n request = Request(env)\n self.assertEqual(request.base_url, 'https://myserver.com:8080')\n\n def test_scheme(self):\n env = mock_env()\n env['wsgi.url_scheme'] = 'https'\n request = Request(env)\n self.assertEqual(request.scheme, 'https')\n\n def test_host(self):\n env = mock_env()\n env['HTTP_HOST'] = 'myserver.com:8080'\n request = Request(env)\n self.assertEqual(request.host, 'myserver.com:8080')\n\n def test_path(self):\n env = mock_env()\n env['PATH_INFO'] = '/dashboard/products'\n request = Request(env)\n self.assertEqual(request.path, '/dashboard/products')\n\n def test_query_string(self):\n env = mock_env()\n env['QUERY_STRING'] = 'page=1&order=price'\n request = Request(env)\n self.assertEqual(request.query_string, 'page=1&order=price')\n\n def test_body(self):\n env = mock_env()\n env['wsgi.input'].write(b'

Hello World

')\n env['wsgi.input'].seek(0)\n request = Request(env)\n self.assertEqual(request.body, b'

Hello World

')\n\n def test_query(self):\n env = mock_env()\n env['QUERY_STRING'] = 'page=1&order=price'\n request = Request(env)\n self.assertEqual(request.query, {'page': '1', 'order': 'price'})\n\n def test_headers(self):\n env = mock_env()\n env['HTTP_AUTH'] = 'token'\n env['HTTP_X_FORWARDED_FOR'] = '203.0.113.195, 70.41.3.18, 150.172.238.178'\n request = Request(env)\n self.assertEqual(request.headers, {\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'accept-encoding': 'gzip, deflate, sdch',\n 'accept-language': 'pt-BR,pt;q=0.8,en-US;q=0.6,en;q=0.4',\n 'auth': 'token',\n 'connection': 'keep-alive',\n 'host': 'localhost:8000',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36',\n 'x-forwarded-for': '203.0.113.195, 70.41.3.18, 150.172.238.178',\n })\n\n def test_header_keys_are_case_insensitive(self):\n env = mock_env()\n env['HTTP_AUTH'] = 'token'\n request = Request(env)\n self.assertEqual(request.headers['auTH'], 'token')\n\n def test_cookies(self):\n env = mock_env()\n env['HTTP_COOKIE'] = 'foo=bar; bar=biz'\n request = Request(env)\n self.assertEqual(request.cookies, {'foo': 'bar', 'bar': 'biz'})\n\n def test_empty_cookies(self):\n env = mock_env()\n request = Request(env)\n self.assertEqual(request.cookies, {})\n\n def test_cookie_with_special_characters(self):\n env = mock_env()\n env['HTTP_COOKIE'] = 'token=\"abc/\\\\073\\\\054~\\\\341\\\\347[\\'!\\\\\"\\\\\"]\"'\n request = Request(env)\n self.assertEqual(request.cookies, {'token': 'abc/;,~áç[\\'!\"\"]'})\n\n def test_ip(self):\n env = mock_env()\n env['REMOTE_ADDR'] = '127.0.0.1'\n request = Request(env)\n self.assertEqual(request.ip, '127.0.0.1')\n\n def test_ip_with_x_forwarded_for_header(self):\n env = mock_env()\n env['HTTP_X_FORWARDED_FOR'] = '203.0.113.195, 70.41.3.18, 150.172.238.178'\n request = Request(env)\n self.assertEqual(request.ip, '203.0.113.195')\n\n def test_referer(self):\n env = mock_env()\n env['HTTP_REFERER'] = 'http://localhost:8000/app/hello'\n request = Request(env)\n self.assertEqual(request.referer, 'http://localhost:8000/app/hello')\n\n def test_empty_referer(self):\n env = mock_env()\n request = Request(env)\n self.assertIsNone(request.referer)\n\n def test_user_agent(self):\n env = mock_env()\n request = Request(env)\n self.assertEqual(request.user_agent, 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36')\n\n def test_empty_user_agent(self):\n env = mock_env()\n env.pop('HTTP_USER_AGENT')\n request = Request(env)\n self.assertEqual(request.user_agent, None)\n\n\n\ndef mock_env():\n return copy.deepcopy(sample_env)\n\n\nsample_env = {\n 'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'HTTP_ACCEPT_ENCODING': 'gzip, deflate, sdch',\n 'HTTP_ACCEPT_LANGUAGE': 'pt-BR,pt;q=0.8,en-US;q=0.6,en;q=0.4',\n 'HTTP_CONNECTION': 'keep-alive',\n 'HTTP_HOST': 'localhost:8000',\n 'HTTP_UPGRADE_INSECURE_REQUESTS': '1',\n 'HTTP_USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36',\n 'PATH_INFO': '',\n 'QUERY_STRING': '',\n 'RAW_URI': '',\n 'REMOTE_ADDR': '127.0.0.1',\n 'REMOTE_PORT': '54130',\n 'REQUEST_METHOD': 'GET',\n 'SCRIPT_NAME': '',\n 'SERVER_NAME': '127.0.0.1',\n 'SERVER_PORT': '8000',\n 'SERVER_PROTOCOL': 'HTTP/1.1',\n 'SERVER_SOFTWARE': 'gunicorn/19.6.0',\n 'wsgi.errors': BytesIO(),\n 'wsgi.file_wrapper': BytesIO(),\n 'wsgi.input': BytesIO(),\n 'wsgi.multiprocess': False,\n 'wsgi.multithread': False,\n 'wsgi.run_once': False,\n 'wsgi.url_scheme': 'http',\n 'wsgi.version': (1, 0),\n}\n","sub_path":"tests/test_request.py","file_name":"test_request.py","file_ext":"py","file_size_in_byte":6555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"542265710","text":"import time\nimport ptah\nimport transaction\nfrom ptah import config\nfrom datetime import datetime\n\nfrom base import Base\n\n\nclass TestContent(Base):\n\n def tearDown(self):\n config.cleanup_system(self.__class__.__module__)\n super(TestContent, self).tearDown()\n\n def _make_app(self):\n global ApplicationRoot\n class ApplicationRoot(ptah.cms.ApplicationRoot):\n __type__ = ptah.cms.Type('app')\n\n ApplicationRoot.__type__.cls = ApplicationRoot\n\n return ApplicationRoot\n\n def test_content_path(self):\n import ptah.cms\n self._setRequest(self._makeRequest())\n\n class MyContent(ptah.cms.Content):\n\n __mapper_args__ = {'polymorphic_identity': 'mycontent'}\n __uri_factory__ = ptah.UriFactory('mycontent')\n\n ApplicationRoot = self._make_app()\n\n factory = ptah.cms.ApplicationFactory(\n ApplicationRoot, '/app1', 'root', 'Root App')\n\n root = factory(self.request)\n\n content = MyContent(__name__='test',\n __parent__ = root,\n __path__ = '%stest/'%root.__path__)\n c_uri = content.__uri__\n ptah.cms.Session.add(content)\n\n self.assertTrue(\n content.__name__ == 'test')\n\n self.assertTrue(\n content.__resource_url__(self.request, {}) == '/app1/test/')\n transaction.commit()\n\n # same content inside same root but in different app factory\n\n factory2 = ptah.cms.ApplicationFactory(\n ApplicationRoot, '/app2', 'root', 'Root App')\n root = factory2(self.request)\n\n c = ptah.cms.Session.query(MyContent).filter(\n MyContent.__uri__ == c_uri).one()\n\n self.assertTrue(\n c.__resource_url__(self.request, {}) == '/app2/test/')\n\n def test_content_events(self):\n import ptah.cms\n\n class MyContent(ptah.cms.Content):\n __mapper_args__ = {'polymorphic_identity': 'mycontent'}\n __uri_factory__ = ptah.UriFactory('mycontent')\n\n content = MyContent()\n\n config.notify(ptah.cms.ContentCreatedEvent(content))\n\n self.assertTrue(isinstance(content.created, datetime))\n self.assertTrue(isinstance(content.modified, datetime))\n time.sleep(0.1)\n\n config.notify(ptah.cms.ContentModifiedEvent(content))\n self.assertTrue(content.modified != content.created)\n\n def test_content_set_owner_on_create(self):\n import ptah, ptah.cms\n\n class MyContent(ptah.cms.Content):\n __mapper_args__ = {'polymorphic_identity': 'mycontent'}\n __uri_factory__ = ptah.UriFactory('mycontent')\n\n content = MyContent()\n\n config.notify(ptah.cms.ContentCreatedEvent(content))\n\n self.assertEqual(content.__owner__, None)\n\n ptah.authService.set_userid('user')\n config.notify(ptah.cms.ContentCreatedEvent(content))\n\n self.assertEqual(content.__owner__, 'user')\n\n def test_content_info(self):\n import ptah, ptah.cms\n\n class MyContent(ptah.cms.Content):\n __mapper_args__ = {'polymorphic_identity': 'mycontent'}\n __uri_factory__ = ptah.UriFactory('mycontent')\n\n content = MyContent()\n config.notify(ptah.cms.ContentCreatedEvent(content))\n\n info = content.info()\n self.assertIn('__name__', info)\n self.assertIn('__type__', info)\n\n class MyContent(ptah.cms.Content):\n __type__ = ptah.cms.Type('mycontent', 'MyContent')\n\n content = MyContent()\n config.notify(ptah.cms.ContentCreatedEvent(content))\n\n info = content.info()\n self.assertIn('title', info)\n self.assertIn('description', info)\n\n def test_content_update(self):\n import ptah, ptah.cms\n\n class MyContent(ptah.cms.Content):\n __type__ = ptah.cms.Type('mycontent', 'MyContent')\n\n content = MyContent()\n config.notify(ptah.cms.ContentCreatedEvent(content))\n\n modified = content.modified\n time.sleep(0.1)\n\n content.update(title='Test title')\n info = content.info()\n\n self.assertEqual(info['title'], 'Test title')\n self.assertEqual(content.title, 'Test title')\n self.assertTrue(content.modified > modified)\n\n def test_content_delete(self):\n import ptah, ptah.cms\n\n class MyContent(ptah.cms.Content):\n __type__ = ptah.cms.Type('mycontent', 'MyContent')\n\n class MyContainer(ptah.cms.Container):\n __type__ = ptah.cms.Type('container', 'Container')\n\n content = MyContent()\n\n self.assertRaises(ptah.cms.Error, content.delete)\n\n container = MyContainer()\n container['content'] = content\n\n content.delete()\n self.assertEqual(container.keys(), [])\n","sub_path":"ptah/cms/tests/test_content.py","file_name":"test_content.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"}