diff --git "a/6283.jsonl" "b/6283.jsonl" new file mode 100644--- /dev/null +++ "b/6283.jsonl" @@ -0,0 +1,638 @@ +{"seq_id":"7876624255","text":"\"\"\"\n6. 匹配所有合法的电子邮件地址(先写出一个限制比较宽松的正则表达式,然后尽可能加 强限制条件,但要保证功能的正确性)。\nimport re\n\n\ndef main():\n \"\"匹配合法的邮箱名主逻辑\"\"\n # 获取邮箱列表\n email_list = [\"291470550@qq.com\",\"123456@163.com\",\"wangzirangy11@gmail.com\"]\n # 遍历邮箱列表,匹配出满足条件的邮箱\n for email in email_list:\n res = re.match(r'[\\da-zA-Z_]{8,20}@(qq|163|gmail)\\.com$',email)\n if res:\n print(\"满足邮箱命名条件:%s\" % res.group())\n else:\n print(\"不满足邮箱命名条件:%s\" % email)\n\nif __name__ == '__main__':\n main()\n\"\"\"\nimport re\n\n\ndef main():\n mail = input('请输入邮件地址:')\n rule = r'[a-zA-Z0-9_]{8,20}@(gmail|qq|163)\\.com$'\n\n res = re.match(rule, mail)\n if res:\n print('满足邮箱命名条件:%s' % res.group())\n else:\n print('不满足邮箱命名条件:%s' % mail)\n\n\nif __name__ == '__main__':\n main()","repo_name":"Icecarry/learn","sub_path":"code/day08/匹配邮件地址.py","file_name":"匹配邮件地址.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24149523700","text":"from collections import deque\n\nINF = 987654321\n\n\ndef get_abs(side, n):\n return abs(2 * side - n)\n\n\ndef bfs(start, disabled_a, disabled_b, graph, visited):\n q = deque()\n q.append(start)\n visited[start] = True\n\n while q:\n node = q.popleft()\n\n for nxt in graph[node]:\n if nxt == disabled_a or nxt == disabled_b:\n continue\n\n if not visited[nxt]:\n visited[nxt] = True\n q.append(nxt)\n\n\ndef get_adj(n, wires):\n ret = [[] for _ in range(n + 1)]\n for a, b in wires:\n ret[a].append(b)\n ret[b].append(a)\n return ret\n\n\ndef solution(n, wires):\n result = INF\n graph = get_adj(n, wires)\n\n for a, b in wires:\n # 한 쪽을 구하면 나머지 한 쪽은 자동으로 계산된다.\n visited = [False] * (n + 1)\n for i in range(1, n + 1):\n if not visited[i]:\n bfs(i, a, b, graph, visited)\n break\n\n result = min(result, get_abs(len(list(filter(lambda x: x, visited))),\n n))\n\n return result\n","repo_name":"mrbartrns/algorithm-v2","sub_path":"programmers/lv2/전력망을_둘로_나누기.py","file_name":"전력망을_둘로_나누기.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1524081823","text":"from PIL import Image, ImageFont, ImageDraw\nimport shutil\nimport os\n\nfont = ImageFont.truetype(\"/usr/share/fonts/truetype/arphic/uming.ttc\", 25)\n\ndef mark_text(img, overlay_text):\n width, height = img.size\n draw = ImageDraw.Draw(img)\n draw.text((width-40,0), overlay_text, (255,0,0), font=font)\n draw = ImageDraw.Draw(img)\n return img\n\n# Clean output\nshutil.rmtree('output', ignore_errors=True, onerror=None)\nos.mkdir('output')\n\ncounter = 1\nfor filename in sorted(os.listdir('input')):\n img = Image.open('input/' + filename)\n img = mark_text(img, \"#\" + str(counter))\n img.save(\"output/\" + filename)\n counter = counter + 1\n","repo_name":"yillkid/pic_text_overlay","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35460572894","text":"\"\"\"\n\nhttps://leetcode-cn.com/problems/build-array-where-you-can-find-the-maximum-exactly-k-comparisons/solution/dong-tai-gui-hua-by-nks5117/\n\ndp[i][j][k] : the number of ways to build the array[1:i] and max(arr[1:i])=j and search cost is k\n\nif arr[i] is the largest among arr[1:i] => arr[i] = j, 那么前i-1个元素最大值可以是1,...,j-1任意一个\ndp[i][j][k] += sum(dp[i-1][1..j-1][k-1]), 最大值是j, 前面的可以是任何小于j的数, 就有j中可能性\n\n\nif arr[i] is not the largest among arr[1:i] => arr[i] <= j\ndp[i][j][k] += dp[i-1][1..j-1][k-1] * j\n\n\n----------------------------------------------------------------------------------------------------------------------------\ndp[i][j][k] : the number of ways to build the array[1:i] and max(arr[1:i])-j and search cost is k\n\n\nif arr[i] is the largest among arr[1:i] => arr[i] = j\ndp[i][j][k] += sum(dp[i-1][1..j-1][k-1])\n\nif arr[i] is not the largest among arr[1:i] => arr[i] <= j\ndp[i][j][k] += dp[i-1][1..j-1][k-1]\n\n\n1420.Build-Array-Where-You-Can-Find-The-Maximum-Exactly-K-Comparisons\n乍看没有头绪,不妨将题目中的三个变量都作为dp状态变量的下标试一下。第一版本是:dp[i][j][k]表示对于前i个元素、当nums[i]等于j、总共用了k次cost时,总共有多少种方案。\n\n我们试图来转移dp[i][j][k]到前一个状态dp[i-1][?][?]。考虑假设我们在处理第i个元素的时候动用了一次cost,那么意味着前i-1个元素必须都小于j。但是我们的dp设计里并没有这样的信息。\ndp[i-1][j'][k-1]中的j'表示的仅仅是nums[i]==j',没有合适的状态来表示前i-1个元素的最大值。\n\n所以我们容易想到并改进得到第二个版本:dp[i][j][k]表示对于前i个元素、最大值等于j、总共用了k次cost时,总共有多少种方案。\n\n同样,考虑假设我们在处理第i个元素的时候新增一次cost,那么意味着nums[i]就是前i个元素的最大值,即是j。于是我们需要前i-1个元素的最大值小于j就可以了。\n因此有dp[i][j][k] = dp[i-1][j'][k-1],其中j'=1,2,...,j-1.\n\n考虑假设我们在处理第i个元素的时候没有新增一次cost,那么意味着nums[i]并不是前i个元素的最大值,因此nums[i]的取值可以是1,2,..j.\n而对于前i-1个元素的最大值则必须是j。因此有dp[i][j][k] = dp[i-1][j][k]*j.\n\n这里根据加法原理,dp[i][j][k]应该是上面两种情况之和。\n\n最后的答案是dp[n-1][j][k], j=1,2,..m 的总和。\n\n\"\"\"\n\n\nclass SolutionWisdom:\n def numOfArrays(self, n: int, m: int, k: int) -> int:\n K = k\n dp = [[[0 for i in range(K + 1)] for j in range(m + 1)] for k in range(n)]\n mod = 10 ** 9 + 7\n\n for j in range(1, m + 1):\n dp[0][j][1] = 1\n\n for i in range(1, n):\n for j in range(1, m + 1):\n for k in range(1, K + 1):\n for t in range(1, j):\n dp[i][j][k] += dp[i - 1][t][k - 1]\n dp[i][j][k] %= mod\n dp[i][j][k] += dp[i - 1][j][k] * j\n dp[i][j][k] %= mod\n\n res = sum([dp[n - 1][j][k] for j in range(1, m + 1)])\n res %= mod\n return res\n\n\n\n\n\n\"\"\"\nExplanation\n\nIn this question, searchcost is basically the number of times when the largest number in array increased.\n\nIt's obvious that we should use dynamic programming to approach this problem. Let's define dp(arr_len, lrg_num, search_cost) as follows:\n\narr_len: the length of current array\nlrg_num: the largest number in current array\nsearch_cost: the searchcost of current array\nAnd dp(arr_len, lrg_num, search_cost) means the number of arrays with current length arr_len, largest number lrg_num, \nand the largest number increased by search_cost times so far.\n\nSuppose we are building an array from left to right. The transition function has two cases: the last number contributes to searchcost vs not.\n\nLet's take array [num, 5] as an example. We loop through 1 to m for num.\n\nnum = 5, ..., m: No searchcost happens on last number. The largest number is unchanged from [num] to [num, 5].\nnum = 1, ..., 4: Searchcost happens on last number.\nAnd we have the following transition function:\n\ndp(arr_len, lrg_num, search_cost) += dp(arr_len - 1, lrg_num, search_cost) for num = lrg_num + 1, ..., m\ndp(arr_len, lrg_num, search_cost) += dp(arr_len - 1, num, search_cost - 1) for num = 1, ..., lrg_num\n\"\"\"\n\nfrom functools import lru_cache\n\nclass SolutionAlan:\n def numOfArrays(self, n: int, m: int, k: int) -> int:\n\n @lru_cache(None)\n def dp(arr_len, lrg_num, search_cost):\n if arr_len == 1:\n return 1 if search_cost == 1 else 0\n if search_cost == 0: # optional\n return 0\n\n # no searchcost happens on last number\n res = dp(arr_len - 1, lrg_num, search_cost) * lrg_num\n\n # searchcost happens on last number\n res += sum(dp(arr_len - 1, num, search_cost - 1) for num in range(1, lrg_num))\n\n return res % 1000000007\n\n return sum([dp(n, num, k) for num in range(1, m + 1)]) % 1000000007\n\n\n\n\n\nclass Solution2:\n def dfs(self, n, i, k):\n if (self.tmp[n][i][k] != -1):\n return self.tmp[n][i][k]\n if n == 0 or k == 0 or i == 0:\n self.tmp[n][i][k] = 0\n return 0\n if n == 1 and k == 1:\n self.tmp[n][i][k] = 1\n return 1\n res = 0\n for j in range(1, i):\n res += self.dfs(n - 1, j, k - 1)\n res %= 1000000007\n res += self.dfs(n - 1, i, k) * i\n res %= 1000000007\n self.tmp[n][i][k] = res\n return res\n\n def numOfArrays(self, n: int, m: int, k: int) -> int:\n self.tmp = [[[-1 for t in range(k + 1)] for j in range(m + 1)] for i in range(n + 1)]\n res = 0\n for i in range(1, m + 1):\n res += self.dfs(n, i, k)\n res %= 1000000007\n return res\n\n\n\n\n\n\n\n","repo_name":"Taoge123/OptimizedLeetcode","sub_path":"LeetcodeNew/python2/LC_1420.py","file_name":"LC_1420.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"23135194809","text":"# Ian McLoughlin\n# A program that displays Fibonacci numbers.\n# Topic 1 Exercise\n# synced to GitHub Repository using Git\n\ndef fib(n):\n \"\"\"This function returns the nth Fibonacci number.\"\"\"\n i = 0\n j = 1\n n = n - 1\n\n while n >= 0:\n i, j = j, i + j\n n = n - 1\n \n return i\n\n# Test the function with the following value.\nx = 18+20\nans = fib(x)\nprint(\"Fibonacci number\", x, \"is\", ans)\n\n# Fibonacci number 38 is 39088169\n\n#-----------------------------------------------------------\n#-----------------------------------------------------------\n\n# Ian McLoughlin\n# A program that displays Fibonacci numbers using people's names.\n# Topic 2 Exercise\n\ni = 0\nj = 1\nn = n - 1\n\nwhile n >= 0:\n i, j = j, i + j\n n = n - 1\n \nreturn i\n\nname = \"Higgins\"\nfirst = name[0]\nprint(first)\n\n# H\n\nlast = name[-1]\nprint(last)\n# s\n\nfirstno = ord(first)\nprint(firstno)\n# 72\n\nlastno = ord(last)\nprint(lastno)\n# 115\n\nx = firstno + lastno\n\nans = fib(x)\nprint(\"My surname is\", name)\n# My surname is Higgins\n\nprint(\"The first letter\", first, \"is number\", firstno)\n# The first letter H is number 72\n\nprint(\"The last letter\", last, \"is number\", lastno)\n# The last letter s is number 115\n\nprint(\"Fibonacci number\", x, \"is\", ans)\n# Fibonacci number 187 is 538522340430300790495419781092981030533\n\n\n# H\n# s\n# 72\n# 115\n# My surname is Higgins\n# The first letter H is number 72\n# The last letter s is number 115\n# Fibonacci number 187 is 538522340430300790495419781092981030533\n","repo_name":"rhiggins2308/G00364712","sub_path":"Week2Task.py","file_name":"Week2Task.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19210759096","text":"a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nnumber = int(input(\"give limit: \"))\n\nb = [x for x in a if x < number]\n\nprint(b)\n\n\n#a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\n\nnum = int(input(\"Choose a number: \"))\n\nnew_list = []\n\nfor i in a:\n\tif i < num:\n\t\tnew_list.append(i)\n\nprint (new_list)","repo_name":"ia101/python-files","sub_path":"tests/test03.py","file_name":"test03.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15258527628","text":"'''Interface between DB and program. Uses sqlalchemy session.'''\nimport os\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom .entities import Product, Category, ProductCategory\nfrom .substitute import Substitute as s\n\n\nclass SessionManager():\n '''Instances of this class will hold an engine and a session binded to it,\n it can manage adding, committing and querying db'''\n\n def __init__(self, username, password):\n self.engine = create_engine(f'mysql+pymysql://{username}:{password}@localhost/OpenFoodFacts')\n makesession = sessionmaker(bind=self.engine)\n self.session = makesession()\n\n def append(self, entry):\n '''Add entry to queue'''\n self.session.add(entry)\n\n def commit(self):\n '''commit queue'''\n self.session.commit()\n\n def query(self, queried):\n '''return a list of entry from queried mapped object'''\n return self.session.query(queried)\n\n def cat_to_prod(self, category):\n '''for a Category object, retrieve all Product object related'''\n result = []\n for entry in self.query(ProductCategory).join(Category).\\\n filter(Category.category_name == category.category_name):\n for entrance in self.query(Product).filter(Product.product_url\n == entry.product_url):\n result.append(entrance)\n\n result.sort(key=s.get_product_grade, reverse=True)\n return result\n\n def commit_cache(self, cache):\n '''Upload cache-loaded content to db'''\n if cache.assert_cache:\n all_category = list()\n appended_product = list()\n product_incomplete = 0\n for product_file in cache.load_cache():\n for entry in product_file['products']:\n if entry in appended_product:\n continue\n else:\n appended_product.append(entry)\n try:\n if entry['stores'] == '':\n continue\n product = Product(product_name=entry['product_name'],\n nutrition_grade=entry['nutrition_grades'],\n product_url=entry['url'],\n store=entry['stores']\n )\n self.append(product)\n\n category = entry['categories_hierarchy'][0][3:].\\\n replace(\"-\", \" \").upper()\n\n product_category = ProductCategory(product_url=entry['url'],\n category_name=category\n )\n self.append(product_category)\n if category not in all_category:\n all_category.append(category)\n except KeyError:\n product_incomplete += 1\n continue\n\n for entry in all_category:\n category = Category(category_name=entry)\n self.append(category)\n print(\"Committing to db...\")\n print(f\"{product_incomplete} products not committed because of missing informations\")\n self.commit()\n os.system(\"pause\")\n else:\n print('No file in cache, please download data')\n","repo_name":"priandey/projet_5","sub_path":"models/sessionmanager.py","file_name":"sessionmanager.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37129147686","text":"\n\nimport time\nfrom selenium import webdriver\n\n\ndriver = webdriver.Chrome()\nbrowser.implicitly_wait(5)\n\n\ntry:\n driver.get(\"https://stepik.org/lesson/25969/step/12\")\n\n textarea = driver.find_element_by_css_selector(\".textarea\")\n textarea.send_keys(\"get()\")\n\n submit_button = driver.find_element_by_css_selector(\n \".submit-submission\").click()\n\n\nfinally:\n time.sleep(5)\n driver.quit()\n","repo_name":"Escape198/selenium-course","sub_path":"get_method.py","file_name":"get_method.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9001630667","text":"\"\"\"spot URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom portal.views import EmailAutofillSignupView\n\n# Use separate namespaces (where necessary)\n# in order to eliminate any possible collisions\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n\n url(r'^', include('portal.urls', namespace='portal')),\n\n # API\n url(r'^api/v1/', include('dataset.api.urls', namespace='dataset-api')),\n url(r'^api/v1/', include('experiment.api.urls', namespace='experiment-api')),\n\n # Third Party\n url(r'^accounts/signup/', EmailAutofillSignupView.as_view(), name='account_signup'),\n url(r'^accounts/', include('allauth.urls')),\n url(r'^watchman/', include('watchman.urls', namespace='watchman')),\n]\n","repo_name":"mei-chen/beagle","sub_path":"Spot/spot/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"72091417964","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\nfrom sklearn import preprocessing\n\ndataset = pd.read_csv('HR_file.csv')\n\n# CAMBIA LOS VALORES DE TEXTO A NUMERO\nle = LabelEncoder()\ndataset['Departments'] = le.fit_transform(dataset['Departments'])\ndataset['salary'] = le.fit_transform(dataset['salary'])\n\ny=dataset['Quit the Company']\nfeatures = ['Satisfaction Level','Last Evaluation','Number of Projects',\n 'Monthly Hours','Total Time at the Company','Work Accidents',\n 'Quit the Company','Promoted in Last 5 yrs','Departments','salary','Management']\nx=dataset[features]\ns=StandardScaler()\nx=s.fit_transform(x)\n\nx_train,x_test,y_train, y_Test = train_test_split(x,y)\n\nlog = LogisticRegression()\nlog.fit(x_train, y_train)\ny_pred = log.predict(x)\ny_prob = log.predict_proba(x)[:,1]\n\ndataset['predictions'] = y_pred\ndataset['probabilities'] = y_prob\n\nprint('Fin del programa')\n\n","repo_name":"anderszzon/Project_Engineer_DataMinning","sub_path":"RegresionLogisticaB_MLPowerBI.py","file_name":"RegresionLogisticaB_MLPowerBI.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12292896781","text":"from jasmin_metrics.scripts.storage_metrics import StorageMetrics\nfrom jasmin_metrics.scripts.users_metrics import UsersMetrics\nfrom django.http import HttpResponse\nimport csv\n\nclass VolumeReport:\n\n def __init__(self):\n self.sm = StorageMetrics()\n\n\n def create_view(self):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"volume_report.csv\"'\n\n writer = csv.writer(response)\n writer.writerow(['Realm', 'Path', 'Used TB', 'Available TB','Size TB'])\n for df in [self.sm.gws_df, self.sm.get_archive_df()]:\n for i,g in df.iterrows():\n line = [g[1],\n g[4],\n g[5]/10**3,\n (g[2]-g[5])/10**3,\n g[2]/10**3]\n writer.writerow(line)\n\n return response\n\nclass GWSUsersReport:\n def __init__(self):\n self.users = UsersMetrics()\n\n def create_view(self):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"gws_users_report.csv\"'\n\n writer = csv.writer(response)\n writer.writerow(['GWS Name', 'Users'])\n for name in self.users.get_list_gws():\n writer.writerow([name, self.users.get_users_gws_active_today(name)])\n\n return response","repo_name":"cedadev/jasmin-metrics","sub_path":"jasmin_metrics/scripts/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73849727403","text":"import math\nimport pygame\nimport sys\n\nMAX_ITER = 80\n\ndef mandelbrot(c, z,cnt):\n # so you can use complex hahahahah i want to cry\n if abs(z) < 2 and cnt < MAX_ITER:\n z = z*z + c\n cnt += 1\n return mandelbrot(c, z,cnt)\n else:\n return cnt\n\npygame.init()\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nBLUE = (0, 0, 225)\nGREEN = (0, 225, 0)\nRED = (255, 0, 0)\nYELLOW = (255, 255, 0)\n\nsize = [900, 600] # 900, 600 later\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Mandelbrot\")\nscreen.fill(WHITE)\n\n# creating array of pixels\npixAr = pygame.PixelArray(screen)\n\nfor a in range (-600, 300):\n for b in range(-300, 300):\n c = complex(a/300, b/300)\n v = mandelbrot(c,0,0)\n if v < MAX_ITER:\n pixAr[a + 600, b + 300] = RED\n else:\n pixAr[a + 600, b + 300] = BLACK\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.quit()\n pygame.display.update()","repo_name":"zhansoft/pythoncourseh200","sub_path":"Assignment11/spaceship.py","file_name":"spaceship.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18739043388","text":"# Copyright (c) 2009 Nokia Corporation\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport test.test_support\r\nimport unittest\r\n# In case the module is not loaded, test_scriptext\r\n# would be skipped - \"No module named scriptext\"\r\nimport scriptext\r\nimport time\r\nimport e32\r\ncount1 = 0\r\ncount2 = 0\r\nevent_type_list = [0, 1, 2, 3, 4]\r\n# Corresponds to EKLogCallEventType,EKLogDataEventType, EKLogFaxEventType,\r\n# EKLogShortMessageEventType, EKLogPacketDataEventType respectively\r\n\r\n\r\nclass SynchronousLoggingTest(unittest.TestCase):\r\n def __init__(self, methodName='runTest'):\r\n unittest.TestCase.__init__(self, methodName=methodName)\r\n self.logging_handle = None\r\n self.logging_handle = scriptext.load('Service.Logging', 'IDataSource') \r\n \r\n def test_getlist(self):\r\n \"\"\" Get list of logs\"\"\"\r\n logging_info = self.logging_handle.call('GetList', {'Type': u'Log',})\r\n for item in logging_info:\r\n event_type = item['EventType']\r\n if event_type not in event_type_list:\r\n self.fail('Event Type is invalid')\r\n remote_party = item['RemoteParty']\r\n if 'Direction' in item:\r\n direction = item['Direction'] \r\n event_time = ['EventTime']\r\n sub = item['Subject']\r\n phone_no = item['PhoneNumber']\r\n description = item['Description']\r\n event_data = item['EventData']\r\n\r\n def test_add(self):\r\n \"\"\" Add a log\"\"\"\r\n log_id = self.logging_handle.call('Add',\r\n {'Type': u'Log',\r\n 'Item': {'EventType': 0,}})\r\n \r\n def test_delete(self):\r\n \"\"\" Delete a log\"\"\"\r\n log_id = self.logging_handle.call('Add', {'Type': u'Log', \r\n 'Item': {'EventType': 3,\r\n 'Direction': 1,\r\n 'EventDuration': 2, \r\n 'DeliveryStatus': 1,\r\n 'PhoneNumber': u'666'}})\r\n self.logging_handle.call('Delete', {'Type': u'Log', \r\n 'Data': {'id': log_id,}}) \r\n\r\nclass AsynchronousLoggingTest(unittest.TestCase):\r\n\r\n def __init__(self, methodName='runTest'):\r\n unittest.TestCase.__init__(self, methodName=methodName)\r\n self.lock = e32.Ao_lock()\r\n self.timer = e32.Ao_timer()\r\n self.async_request_failed = False\r\n \r\n def ao_timer_callback(self):\r\n self.async_request_failed = True\r\n self.output_params['TestFailureReason'] = \"Wait timeout. Callback\" + \\\r\n \"function for the async request not hit\"\r\n self.lock.signal()\r\n\r\n def check_error_and_signal(self, output_params):\r\n if 'TestFailureReason' in output_params:\r\n self.async_request_failed = True\r\n self.output_params = output_params\r\n self.timer.cancel()\r\n self.lock.signal()\r\n\r\n def do_load(self):\r\n service_handle = scriptext.load('Service.Logging', 'IDataSource')\r\n return service_handle\r\n\r\n\r\ndef test_main():\r\n test.test_support.run_unittest(SynchronousLoggingTest,\r\n AsynchronousLoggingTest)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test_main()","repo_name":"SymbianSource/oss.FCL.sf.incubator.python","sub_path":"src/ext/scriptext/src/test_scriptext_logging.py","file_name":"test_scriptext_logging.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"7771792257","text":"import re\nclass Solution:\n def isMatch(self, s: str, p: str) -> bool:\n pat = re.compile(p)\n match = re.search(pat, s)\n if not match:\n return False\n else:\n if s == s[match.start(): match.end()]:\n return True\n else:\n return False\n\n","repo_name":"zziri/itudy","sub_path":"zziri/ps/leetcode/problems/10. Regular Expression Matching.py","file_name":"10. Regular Expression Matching.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"74472586604","text":"def sudoku(arr):\n # 가로\n for i in range(9):\n if len(set(arr[i])) != 9:\n return False\n # 세로\n for i in range(9):\n local = set()\n for j in range(9):\n local.add(arr[j][i])\n if len(local) != 9:\n return False\n\n # 각 박스 : 근데 생각해보니까 박스가 안맞으려면 이미 가로나 세로 중 하나는 안맞을 수밖에 없는데, 이거 할필요 없는거 아닌가??\n # for i in range(3):\n # for j in range(3):\n # local = set()\n # for k in range(3*i, 3*i+3):\n # for l in range(3*j, 3*j+3):\n # local.add(arr[l][k])\n # print(local)\n # if len(local) != 9:\n # return False\n\n return True\n\n\narr = []\n\nfor i in range(9):\n arr += [list(map(int, input().split()))]\n\nprint(\"YES\" if sudoku(arr) else \"NO\")\n","repo_name":"ddosang/AlgorithmStudy","sub_path":"sec3/3_10.py","file_name":"3_10.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36378696793","text":"from random import randint\n\nfrom common.enums import Direction, MapCellType, PortalDestination\nfrom common.level import GameLevel\nfrom common.point import Point\nfrom common.game_map import Portal\n\n\nclass Game:\n def __init__(self, levels: list[GameLevel]):\n self._next_level_portal = None\n self._levels = levels\n self._level = None\n self._level_number = 0\n self._is_game_clear = False\n self._food_point = None\n self._previous_direction = None\n self._score = 0\n self._score_on_level = 0\n self._snake = None\n self._is_game_over = not self._next_level()\n\n def _next_level(self) -> bool:\n self._score_on_level = 0\n if len(self._levels) == 0:\n self._is_game_clear = True\n return False\n\n level = self._levels.pop(0)\n self._level_number += 1\n self._level = level\n if not self._snake:\n self._snake = level.snake\n else:\n self._snake.teleport(level.snake.head)\n self._snake.decrease_to_one()\n if not self._previous_direction:\n self._previous_direction = level.start_direction\n self._map = level.map\n self._snake.set_coordinate_limits(\n self._map.width,\n self._map.height)\n self._next_level_portal = None\n self._next_food()\n return True\n\n def _get_random_empty_point(self, direction_free: Direction = None) -> Point:\n \"\"\"Returns random point, which is empty on current map. If direction_free is provided, there\n will be an additional requirement for random point to have empty neighbour at provided location\"\"\"\n is_point_generated = False\n candidate = None\n while not is_point_generated:\n x = randint(0, self._map.width - 1)\n y = randint(0, self._map.height - 1)\n candidate = Point(x, y)\n if self.get(x, y) == MapCellType.Empty:\n if isinstance(direction_free, Direction):\n additional_point = candidate + direction_free.value\n additional_point = (self._map_size + additional_point) % self._map_size\n if self.get(additional_point.x, additional_point.y) != MapCellType.Empty:\n continue\n is_point_generated = True\n return candidate\n\n def _next_food(self) -> None:\n self._food_point = self._get_random_empty_point()\n\n @property\n def map_dimensions(self) -> Point:\n return Point(self._map.width, self._map.height)\n\n @property\n def _map_size(self):\n return Point(self._map.width, self._map.height)\n\n @property\n def score(self):\n return self._score\n\n @property\n def is_game_over(self) -> bool:\n return self._is_game_over\n\n @property\n def level_number(self) -> int:\n return self._level_number\n\n @property\n def is_game_clear(self) -> bool:\n return self._is_game_clear\n\n def get(self, x: int, y: int) -> MapCellType:\n \"\"\"Get current map representation for view\"\"\"\n point = Point(x, y)\n\n if self._next_level_portal and point == self._next_level_portal.position:\n return MapCellType.PortalIn\n\n if point == self._food_point:\n return MapCellType.Food\n for candidate in self._snake.get_points():\n candidate_truncated = candidate % self._map_size\n if candidate_truncated == point:\n return MapCellType.Snake\n\n return self._map.get(x, y)\n\n def is_direction_valid(self, direction: Direction) -> bool:\n \"\"\"Check, if submitted direction is valid to move\"\"\"\n return (not self._previous_direction or\n self._previous_direction.value != -direction.value)\n\n def _process_portal(self, portal: Portal) -> None:\n if portal.destination_type == PortalDestination.StaticPoint:\n self._snake.teleport(portal.destination)\n return\n if portal.destination_type == PortalDestination.RandomPoint:\n destination = self._get_random_empty_point(self._previous_direction)\n self._snake.teleport(destination)\n return\n if portal.destination_type == PortalDestination.NextLevel:\n self._is_game_over = not self._next_level()\n return\n\n raise ValueError(f\"Unknown portal destination type at coordinates {portal.position}\")\n\n def move(self, direction: Direction = None) -> None:\n \"\"\"Move snake in specified direction\"\"\"\n if not direction:\n direction = self._previous_direction\n\n if not self.is_direction_valid(direction):\n raise AttributeError(\"Snake can't move on opposite direction\")\n\n if self._is_game_over:\n return\n\n self._previous_direction = direction\n head = self._snake.head % self._map_size\n if self._snake.can_collide_with_itself(direction) or self._map.get(head.x, head.y) == MapCellType.Obstacle:\n self._is_game_over = True\n return\n\n portals = (self._next_level_portal,) + self._map.portals\n portals_starts = {x.position if x else None for x in portals}\n if head in portals_starts:\n for portal in portals:\n if not portal:\n continue\n if portal.position == head:\n self._process_portal(portal)\n break\n else:\n self._snake.move(direction)\n\n if self._snake.head == self._food_point:\n self._snake.grow()\n self._score += 1\n self._score_on_level += 1\n if self._score_on_level >= self._level.clear_score and self._next_level_portal is None:\n position = self._get_random_empty_point()\n self._next_level_portal = Portal(position, PortalDestination.NextLevel)\n self._next_food()\n","repo_name":"ngrpv/snake-game-python","sub_path":"common/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36301776320","text":"from rest_framework.response import Response\nfrom rest_framework import status\nfrom django.db.models import Sum\nfrom datetime import datetime\nfrom django.db import transaction\nfrom rest_framework.generics import CreateAPIView\nfrom pytz import utc\nfrom customer.models import KnoxAuthtoken, UserProfile, UserRole, Role, UserAddress\nfrom super_admin.models import Product,variants, CompanyProfile,images\nfrom payments.models import Transaction_table\nfrom order.models import Order,OrderItemHistory\nimport requests, json\nfrom .models import shipment\nfrom Ecomerce_project.settings import SHIPMENT_TOKEN\nfrom rest_framework import status\n\n\nclass CreateReturnAPI(CreateAPIView):\n\n @transaction.atomic()\n def post(self,request,token,oid):\n try:\n token1 = KnoxAuthtoken.objects.get(token_key=token)\n except:\n data = {\"message\" : \"Invalid Access Token\"}\n return Response(data, status=status.HTTP_404_NOT_FOUND)\n \n try:\n o = shipment.objects.get(shipment_order_id=oid)\n except:\n data = {\"message\" : \"Invalid Order Id\"}\n return Response(data, status=status.HTTP_404_NOT_FOUND)\n\n user = token1.user_id\n usertable = UserProfile.objects.get(id=user)\n if (UserRole.objects.filter(user_id=usertable.id).exclude(role_id=4)).exists():\n url = \"https://apiv2.shiprocket.in/v1/external/orders/show/\"+str(o.shipment_order_id)\n payload={}\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': SHIPMENT_TOKEN\n }\n\n response1 = requests.request(\"GET\", url, headers=headers, data=payload)\n j = response1.json()\n if response1.status_code==200 or response1.status==200:\n url = \"https://apiv2.shiprocket.in/v1/external/orders/create/return\"\n\n payload = json.dumps({\n \"order_id\": j['data']['id'],\n \"order_date\": j['data']['order_date'],\n \"channel_id\": j['data']['shipments']['channel_id'],\n \"pickup_customer_name\": j['data']['customer_name'],\n # \"pickup_last_name\": '',\n \"pickup_address\": j['data']['customer_address'],\n \"pickup_address_2\": j['data']['customer_address_2'],\n \"pickup_city\": j['data']['customer_city'],\n \"pickup_state\": j['data']['customer_state'],\n \"pickup_country\": j['data']['customer_country'],\n \"pickup_pincode\": j['data']['customer_pincode'],\n \"pickup_email\": j['data']['customer_email'],\n \"pickup_phone\": j['data']['customer_phone'],\n\n \"shipping_customer_name\": j['data']['pickup_address']['name'],\n \"shipping_address\": j['data']['pickup_address']['address'],\n \"shipping_city\":j['data']['pickup_address']['city'],\n \"shipping_country\": j['data']['pickup_address']['country'],\n \"shipping_pincode\": j['data']['pickup_address']['pin_code'],\n \"shipping_state\": j['data']['pickup_address']['state'],\n \"shipping_email\": j['data']['pickup_address']['email'],\n # \"shipping_isd_code\": \"91\",\n \"shipping_phone\": j['data']['pickup_address']['phone'],\n \"order_items\": [\n {\n \"sku\": j['data']['products'][0]['sku'],\n \"name\": j['data']['products'][0]['name'],\n \"units\": j['data']['others']['order_items'][0]['units'],\n \"selling_price\": j['data']['products'][0]['selling_price'],\n \"discount\": j['data']['products'][0]['discount'],\n \"hsn\": j['data']['products'][0]['hsn'],\n \"brand\": j['data']['products'][0]['brand'],\n # \"qc_size\": \"43\"\n }\n ],\n \"payment_method\": j['data']['payment_method'],\n # \"total_discount\": \"0\",\n \"sub_total\": j['data']['net_total'],\n \"length\": j['data']['shipments']['length'],\n \"breadth\": j['data']['shipments']['breadth'],\n \"height\": j['data']['shipments']['height'],\n \"weight\": j['data']['products'][0]['weight']\n })\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': SHIPMENT_TOKEN\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n data=response.json()\n if response.status_code==200 or response.status==200:\n shipment.objects.filter(shipment_order_id=o.shipment_order_id).update(return_shipment_id=data['shipment_id'],return_shipment_order_id=data['order_id'])\n OrderItemHistory.objects.filter(id=o.order_item_id).update(shipment_status=data['status'])\n data={\n \"message\":\"Return Request Successful\",\n \"shipment\":response.json()\n }\n return Response(data,status=status.HTTP_201_CREATED)\n elif response.status_code==400:\n data={\n \"message\":\"Failed To Create Return Request\",\n \"error\" : \"Can't Return Product after Cancelling Request\"\n }\n return Response(data,status=status.HTTP_429_TOO_MANY_REQUESTS)\n else:\n return Response(response.json())\n else:\n return Response(response1.json())\n else:\n data={\n \"message\" :\"Unauthorized to Ship Order\",\n \"error\":\"Admin,SuperAdmin or Vendor is accepted to Start Shipment Process\",\n \"status\":status.HTTP_401_UNAUTHORIZED\n }\n return Response(data, status=status.HTTP_401_UNAUTHORIZED)\n\nclass GenerateReturnAWB(CreateAPIView):\n\n @transaction.atomic()\n def get(self,request,token,oid):\n try:\n token1 = KnoxAuthtoken.objects.get(token_key=token)\n except:\n data = {\"message\" : \"Invalid Access Token\"}\n return Response(data, status=status.HTTP_404_NOT_FOUND)\n \n try:\n o = shipment.objects.get(shipment_order_id=oid)\n except:\n data = {\"message\" : \"Invalid Order Id\"}\n return Response(data, status=status.HTTP_404_NOT_FOUND)\n\n user = token1.user_id\n usertable = UserProfile.objects.get(id=user)\n if (UserRole.objects.filter(user_id=usertable.id).exclude(role_id=4)).exists():\n #Check If order has been shipped already and not cancelled by admin/vendor \n url = \"https://apiv2.shiprocket.in/v1/external/courier/assign/awb\"\n\n payload = json.dumps({\n \"shipment_id\": o.return_shipment_id,\n \"courier_id\": \"\",\n \"status\": \"\",\n \"is_return\": 1\n })\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': SHIPMENT_TOKEN\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n r = response.json()\n if response.status_code == 200:\n print(r['response']['data']['awb_code'])\n shipment.objects.filter(shipment_order_id=o.shipment_order_id).update(return_awb_code=r['response']['data']['awb_code'])\n return Response(r)\n else:\n data={\n \"message\":\"Failed To Get Return Awb From Shiprocket API\",\n \"error\":response.json()\n }\n return Response(data)\n else:\n data={\n \"message\" :\"Unauthorized to Ship Order\",\n \"error\":\"Admin,SuperAdmin or Vendor is accepted to Start Shipment Process\",\n \"status\":status.HTTP_401_UNAUTHORIZED\n }\n return Response(data, status=status.HTTP_401_UNAUTHORIZED)","repo_name":"Ecommerce695/Demo","sub_path":"shipment/return_shipping.py","file_name":"return_shipping.py","file_ext":"py","file_size_in_byte":8134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17503206277","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport sys\nsys.path.append('core')\n\nfrom utils.utils import build_module\n\n\nclass Combine(nn.Module):\n def __init__(self, args):\n super().__init__()\n self.loss_names = args.loss\n self.loss_weights = args.loss_weights\n self.loss_num = len(self.loss_names)\n self.loss = []\n for i in range(self.loss_num):\n self.loss.append(build_module(\"core.loss\", self.loss_names[i])(args))\n\n def forward(self, output, target):\n\n loss_all = 0.\n loss_dict = {}\n for i in range(self.loss_num):\n loss_each, loss_metric = self.loss[i](output, target)\n loss_all += loss_each * self.loss_weights[i]\n loss_dict.update(loss_metric)\n\n loss_dict.update({\n \"loss\": loss_all,\n })\n\n return loss_dict\n","repo_name":"danqu130/DCEIFlow","sub_path":"core/loss/Combine.py","file_name":"Combine.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"19"} +{"seq_id":"19774292573","text":"import time\n\nfrom django.conf import settings\nimport requests\n\nfrom base.apps.github.models import Token, User as GithubUser, User\nfrom base.apps.github.utils import get_api_timestamp\nfrom base.apps.user.models import User\n\n\"\"\"\nhttps://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps\n\"\"\"\n\ndef create_user(data):\n defaults = {'login':data['login']}\n user, created = User.objects.update_or_create(defaults,id=data['id'])\n return user\n\ndef create_github_user(data):\n followers_change, following_change = True, True\n try:\n github_user = GithubUser.objects.get(id=data['id'])\n change_data = {}\n if data['followers']!=github_user.followers_count:\n followers_change = True\n if data['following']!=github_user.following_count:\n following_change = True\n new_user = False\n except GithubUser.DoesNotExist:\n followers_change=data['followers']>0\n following_change=data['following']>0\n new_user = True\n defaults = {\n 'login':data['login'],\n 'name':data['name'],\n 'blog':data['blog'],\n 'location':data['location'],\n 'twitter_username':data['twitter_username'],\n 'public_gists_count':data['public_gists'],\n 'followers_count':data['followers'],\n 'following_count':data['following'],\n 'created_at':get_api_timestamp(data['created_at']),\n 'updated_at':get_api_timestamp(data['updated_at'])\n }\n GithubUser.objects.update_or_create(defaults,id=data['id'])\n\ndef create_github_token(user_id,access_token):\n defaults = {\n 'token':access_token,\n 'core_ratelimit_limit':5000,\n 'core_ratelimit_remaining':5000,\n 'core_ratelimit_reset':None,\n 'graphql_ratelimit_limit':5000,\n 'graphql_ratelimit_remaining':5000,\n 'graphql_ratelimit_reset':None,\n 'created_at':int(time.time())\n }\n token,created = Token.objects.update_or_create(defaults,user_id=user_id)\n\ndef create_user(data):\n try:\n user = User.objects.get(login=data['login'])\n if data['login']!=user.login:\n User.objects.filter(id=data['id']).update(login=data['login'])\n except User.DoesNotExist:\n user, created = User(id=data['id'],login=data['login']).save()\n return user\n\ndef get_access_token(code):\n data = {\n 'code': code,\n 'client_id': settings.GITHUB_OAUTH_CLIENT_ID,\n 'client_secret': settings.GITHUB_OAUTH_SECRET,\n 'grant_type': 'authorization_code',\n 'redirect_uri': settings.GITHUB_OAUTH_CALLBACK_URL\n }\n url = 'https://github.com/login/oauth/access_token'\n attempts_count=0\n while True:\n attempts_count+=1\n r = requests.post(url,data=data,timeout=10)\n if r.status_code==200:\n # access_token=XXX&scope=gist&token_type=bearer\n for s in filter(lambda s: 'access_token' in s, r.text.split('&')):\n return s.split('=')[1]\n raise ValueError(r.text)\n else:\n if attempts_count>3:\n r.raise_for_status()\n","repo_name":"andrewp-as-is/gist-list-django-server","sub_path":"views/auth/github/callback/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12280431491","text":"from django.urls import path\n\n\nfrom . views import TourDetailAPIView, TourListCreateAPIView, OtourListCreateAPIView,OtourDetailAPIView, CustomerCreateAPIView, UserRegistrationAPIView, login_view, BookingListCreateAPIView\n\nurlpatterns = [\n path('tours', TourListCreateAPIView.as_view(), name= \"tour-list\"),\n path('customer', CustomerCreateAPIView.as_view(), name= \"customers\"),\n path('tours//', TourDetailAPIView.as_view(), name= \"tour-detail\"),\n path('otours', OtourListCreateAPIView.as_view(), name= \"otour-list\"),\n path('otours//', OtourDetailAPIView.as_view(), name= \"otour-detail\"),\n path('bookings', BookingListCreateAPIView.as_view(), name= \"bookings-list\"),\n path('register/', UserRegistrationAPIView.as_view(), name='user-registration'),\n path('login/', login_view, name='api-login'),\n\n]\n","repo_name":"qamar62/akt_2023","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43323529049","text":"import mediapipe as mp\r\nimport cv2\r\n\r\nmp_drawing= mp.solutions.drawing_utils\r\n# draw the detcted land mark directly on the screen\r\nmp_holistic = mp.solutions.holistic\r\n# detect the holitic model\r\n\r\n\r\n# get real time webcam feed\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\n# using base cv2 repo to open the selfie video \r\nwith mp_holistic.Holistic(min_detection_confidence = 0.5, min_tracking_confidence = 0.5) as holistic:\r\n while cap.isOpened(): \r\n ret, frame = cap.read()\r\n # frame is the image itself\r\n results = holistic.process(frame)\r\n #print(results.face_landmarks)\r\n mp_drawing.draw_landmarks(frame, results.face_landmarks, mp_holistic.FACEMESH_CONTOURS)\r\n mp_drawing.draw_landmarks(frame, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS)\r\n mp_drawing.draw_landmarks(frame, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS)\r\n mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS)\r\n\r\n # draw the land mark and lineup\r\n \r\n cv2.imshow('Holistic landmark detection', frame)\r\n\r\n if (cv2.waitKey(10) & 0xFF == ord('q')):\r\n break\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n\r\n","repo_name":"saliteta/sribd_blender","sub_path":"creating_csv/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"26800053503","text":"import random\nimport time\nprint('*'*10, '\\033[7:30:47mJOGO DE JOKENPÔ\\033[m', '*'*10)\nescolha = input('''\\033[32mJokeeeeeenpôô!\\033[m Digite sua escolha:\n(pedra, papel ou tesoura): ''').upper()\nprint('-'*20)\nlista = ['PEDRA', 'PAPEL', 'TESOURA']\ntime.sleep(1)\njkp = random.choice(lista)\nif escolha == 'PAPEL' and jkp == 'PEDRA':\n print('Parabéns, {} ganha de {}'.format(escolha, jkp))\nelif escolha == 'PAPEL' and jkp == 'TESOURA':\n print('Perdeu! {} ganha de {}.'.format(jkp, escolha))\nelif escolha == 'PEDRA' and jkp == 'PAPEL':\n print('Perdeu! {} ganha de {}.'.format(jkp, escolha))\nelif escolha == 'PEDRA' and jkp == 'TESOURA':\n print('Ganhou!!! {} vence {}.'.format(escolha, jkp))\nelif escolha == 'TESOURA' and jkp == 'PAPEL':\n print('Parabéns!!! {} vence {}.'.format(escolha, jkp))\nelif escolha == 'TESOURA' and jkp == 'PEDRA':\n print('Perdeu! {} vence a {}.'.format(jkp, escolha))\nelse:\n print('Empatamos! Escolhi {} e você {}.'.format(escolha, jkp))\n\n#print(jkp)","repo_name":"SimoesTiago/Python","sub_path":"curso em video/Desafios_Mundo2/Desafios2/def045.py","file_name":"def045.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18671968329","text":"# This files contains your custom actions which can be used to run\n# custom Python code.\n#\n# See this guide on how to implement these action:\n# https://rasa.com/docs/rasa/custom-actions\n\n\n# This is a simple example for a custom action which utters \"Hello World!\"\n\nfrom typing import Any, Text, Dict, List\n\nfrom rasa_sdk import Action, Tracker\nfrom rasa_sdk.executor import CollectingDispatcher\nfrom rasa_sdk.events import SlotSet\n\n\nfrom utilities import zomatopy\nfrom utilities.location_check import LocationCheck\n\nfrom utilities.email_utility import send_email\nimport settings\nglobal restaurants\n\nclass ActionSearchRestaurants(Action):\n def name(self):\n return 'action_search_restaurants'\n\n def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n zomato = zomatopy.initialize_app(settings.ZOMATO_CONFIG)\n try:\n loc = tracker.get_slot('location')\n cuisine = tracker.get_slot('cuisine')\n price = tracker.get_slot('price')\n restaurant_found = 'notfound'\n global restaurants\n restaurants, response = zomato.get_loc_cus_price_results(loc, price, cuisine)\n if response == 'no results':\n response = \"No results found for your search criteria!!!\"\n else:\n top5_restaurants = restaurants.head(5)\n print(restaurants.head())\n # top 5 results to display\n if len(top5_restaurants)>0:\n restaurant_found = 'found'\n\n response = \"We found the following restaurants for you!!!\\n\"\n for index, row in top5_restaurants.iterrows():\n response = response + str(row[\"restaurant_name\"]) + ' (rated ' + row[\n 'restaurant_rating'] + ') in ' + row[\n 'restaurant_address'] + ' and the average budget for two people ' + str(\n row['budget_for2people']) + \"\\n\"\n else:\n response = \"No results found for your search criteria!!!\"\n dispatcher.utter_message(\"-----\\n\" + response)\n return [SlotSet('location', loc), SlotSet('cuisine', cuisine), SlotSet('price', price), SlotSet('restaurant_found', restaurant_found)]\n except Exception as e:\n dispatcher.utter_message(\"-----\\n Sorry we don't serve the specified location. Please specify a different location\")\n return [SlotSet('location', None), SlotSet('cuisine', None), SlotSet('price', None),\n SlotSet('restaurant_found', 'notfound')]\n\n\nclass EmailService(Action):\n def name(self):\n return 'action_email_details'\n\n def run(self, dispatcher, tracker, domain):\n recipient = tracker.get_slot('email')\n print('recipient is {}'.format(recipient))\n try:\n global restaurants\n res = restaurants\n if len(res) > 0:\n top10 = res.head(10)\n print(\"got this correct email is {}\".format(recipient))\n send_email(recipient, top10)\n except Exception as e:\n dispatcher.utter_message(\"We are facing an issue while sending email :(\")\n\n\nclass CheckLocation(Action):\n def name(self):\n return 'action_check_location'\n\n def run(self, dispatcher, tracker, domain):\n print('checking location')\n try:\n loc = tracker.get_slot('location')\n location_check = LocationCheck()\n check = location_check.check_location(loc)\n print(check)\n return [SlotSet('location', check['location']), SlotSet('location_found', check['location_found'])]\n except:\n dispatcher.utter_message(\n \"-----\\n Sorry we don't serve the specified location. Please specify a different location\")\n return [SlotSet('location', None), SlotSet('location_found', 'notfound')]\n\nclass ResetSlots(Action):\n def name(self):\n return 'action_reset_slots'\n\n def run(self, dispatcher, tracker, domain):\n print('reset slots')\n\n return [SlotSet('location', None),\n SlotSet('location_found', None),\n SlotSet('price', None),\n SlotSet('email', None),\n SlotSet('cuisine', None)]","repo_name":"ranitha84/rasa-chatbot","sub_path":"actions/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18288344647","text":"from pyglet.gl import *\nfrom creatures import Wall\nfrom animation_example import Tween, ease_none, ease_in_quad\n\nclass Render(object):\n\n def __init__(self, game_data):\n \"\"\"\n width, height: dimension in tiles\n background: background image to use\n \"\"\"\n self.window = game_data[\"window\"]\n self.game_data = game_data\n self.game = game_data[\"game\"]\n self.width = self.window.width\n self.height = self.window.height\n self.game.add_handler(self)\n self.sprite = pyglet.sprite.Sprite(self.game_data['data']['agents']['Monster01']['animations']['Monster_Up1.png'], 100, 100)\n self.Tween = Tween(self.sprite, \"x\", ease_in_quad, self.sprite.x, self.sprite.x+200, 5, True, False, \"Testobj1\")\n self.Tween2 = Tween(self.sprite, \"y\", ease_none, self.sprite.y, self.sprite.y+100, 10, True, False, \"Testobj2\")\n self.Tween.start()\n self.Tween2.start()\n #def __init__(self, obj, prop, func, begin, finish, duration, use_seconds, looping=False, name=None)\n def on_draw(self):\n glColor3f(1.0, 1.0, 1.0)\n glPushMatrix() \n #Tile.tile_batch.draw()\n #EffectsManager.effects_batch.draw()\n #Bug.bug_batch.draw()\n Wall.object_batch.draw()\n self.sprite.draw()\n #Creature.creature_batch.draw()\n #Add in animation code\n glPopMatrix()\n glLoadIdentity()\n \n","repo_name":"facepalm/CampCannibal","sub_path":"lib/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"42846896106","text":"class Config:\n @staticmethod\n def train_regression():\n datasets_dir = \"C:\\\\Users\\\\Maxim\\\\Documents\\\\Uni\\\\Bachelorarbeit\\\\datasets\"\n\n return {\n \"log_dir\": \"S:\\\\Data\\\\pytorch-utils\\\\impress-deaugment\",\n # \"resume\": \"Impress-2021-07-18-02-34\",\n \"name\": \"Impress\",\n \"pre_training_epochs\": 100,\n \"epochs\": 600,\n\n 'batchsize': 16,\n\n 'model': {\n 'input_channels': 1,\n 'num_freq_bands': 4,\n 'encoder_depth': 4,\n 'decoder_depth': 4,\n 'max_freq': 256,\n 'freq_base': 2,\n 'input_axis': 2,\n 'num_latents': 128,\n 'latent_dim': 256,\n 'cross_heads': 1,\n 'latent_heads': 16,\n 'cross_dim_head': 16,\n 'latent_dim_head': 16,\n 'attn_dropout': 0.0,\n 'ff_dropout': 0.0,\n 'weight_tie_layers': False\n },\n\n \"pre_training\": {\n 'worker': 0,\n 'sample_interval': 2,\n 'n_checkpoints': 2,\n 'grad_vis': True,\n\n # \"data\": {\"base\": datasets_dir, \"dataset\": \"Impress_DeAugment\", \"set\": \"unused\", 'cache': True},\n \"data\": {\"base\": datasets_dir, \"dataset\": \"Impress_2\", \"set\": \"clean\", 'cache': False, 'return_path': False, 'shuffle_data': True},\n },\n \"training\": {\n 'worker': 0,\n 'sample_interval': 2,\n 'n_checkpoints': 2,\n 'grad_vis': True,\n\n # \"data\": {\"base\": datasets_dir, \"dataset\": \"Impress_DeAugment\", \"set\": \"unused\", 'cache': True},\n \"data\": {\"base\": datasets_dir, \"dataset\": \"Impress_2\", \"set\": \"clean-L\", 'cache': False, 'return_path': False, 'shuffle_data': True},\n },\n\n \"validation\": {\n 'worker': 0,\n 'sample_interval': 1.5,\n \"data\": {\"base\": datasets_dir, \"dataset\": \"Impress_2\", \"set\": \"clean\", 'cache': True, 'return_path': True},\n },\n\n 'optimizer': {\n 'lr': 0.001,\n 'step_size': 200,\n 'beta1': 0.9,\n 'beta2': 0.999,\n # 'beta2': 0.,\n 'gamma': 0.5\n },\n\n }\n\n def __call__(self):\n args = self.train_regression()\n # args['training']['sample_interval'] = math.floor(args['training']['sample_interval'] / args['batchsize'])\n return args\n\n\nconfig__ = Config()\n","repo_name":"Maximilian-Rieger/unsupervised_representation_learning_for_latent_space_distance_based_search_of_footwear_impressions","sub_path":"configs/config_perceiver_deaugmenting.py","file_name":"config_perceiver_deaugmenting.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42831708278","text":"# -*- coding: utf-8 -*-\n__author__ = \"Antonio Sánchez (asanchez@plutec.net)\"\n__version__ = \"0.6\"\n__copyright__ = \"Copyright (c) 2014 Antonio Sánchez\"\n__license__ = \"GPL2\"\n\nimport bashtask\nimport database\n\n__all__ = ['bashtask']\n\ndef insert(command, priority=None):\n bashtask.insert(command, priority)\n\ndef create_database():\n database.Database().create_db()\n ","repo_name":"plutec/bashtask","sub_path":"bashtask/__init22__.py","file_name":"__init22__.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5509427112","text":"# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport segmentation_models_pytorch as smp\nfrom .activation import Activation\n\n\nclass UpBlock(nn.Module):\n def __init__(self, in_channel, out_channel):\n super(UpBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_channel, out_channel, 3, padding=1)\n self.up = nn.Upsample(scale_factor=2, mode=\"bilinear\")\n self.act = nn.ReLU()\n \n def forward(self, x):\n x = self.conv1(x)\n x = self.up(x)\n x = self.act(x)\n return x\n\n \nclass DownBlock(nn.Module):\n def __init__(self, in_channel, out_channel):\n super(DownBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_channel, out_channel, 3, padding=1)\n self.pool = nn.MaxPool2d(2)\n self.act = nn.ReLU()\n \n def forward(self, x):\n x = self.conv1(x)\n x = self.pool(x)\n x = self.act(x)\n return x\n\n\n\n\nclass Decoder(nn.Module):\n def __init__(self, channels=(128, 64, 32), out_channel=1):\n super(Decoder, self).__init__()\n \n blocks = []\n for ix in range(len(channels)-1):\n blocks += [ DownBlock(channels[ix], channels[ix+1]) ]\n blocks += [nn.InstanceNorm2d(channels[ix])]\n \n blocks += [DownBlock(channels[-1], out_channel)]\n self.blocks = nn.Sequential(*blocks)\n \n def forward(self, x):\n return self.blocks(x)\n\n\n\nclass Encoder(nn.Module):\n def __init__(self, in_channel=1, channels=(32, 64, 128)):\n super(Encoder, self).__init__()\n \n blocks = [ UpBlock(in_channel, channels[0]) ]\n for ix in range(len(channels)-1):\n blocks += [nn.InstanceNorm2d(channels[ix])]\n blocks += [UpBlock(channels[ix], channels[ix+1]) ]\n \n \n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, x):\n return self.blocks(x)\n\n\n \nclass OvercompleteAE(nn.Module): # 必要 inherit nn.Module\n def __init__(self, in_channel=1, out_channel=1):\n super().__init__()\n \n self.encoder = Encoder(in_channel=in_channel, channels=(16, 32, 64))\n self.decoder = Decoder(out_channel=out_channel, channels=(64, 32, 16))\n\n def forward(self, x):\n \n x = self.encoder(x)\n x = self.decoder(x)\n\n return x\n\n","repo_name":"junjun1023/cbct_enhancement","sub_path":"codes/overcompleteAE.py","file_name":"overcompleteAE.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"} +{"seq_id":"23937697129","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/4/26 19:55\n# @Author : SeniorZhu1994\n# @Email : SeniorZhu1994@foxmail.com\n# @Site :\n# @File : call_record.py\n# @Software: PyCharm\n\nimport sys\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QCursor, QPalette, QColor\nimport qtawesome\n\nfrom record import Ui_MainWindow\n\n\nclass MyMainWindow(QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n # Python3中的继承只用一个super()就可以了,继承后初始化父类的属性\n super().__init__(parent)\n self.setupUi(self)\n self.ui_custom()\n self.init()\n\n def ui_custom(self):\n self.Btn_Close.setGeometry(QtCore.QRect(30, 20, 30, 30))\n self.Btn_Close.setToolTip(\"关闭窗口\")\n\n self.Btn_Min.setGeometry(QtCore.QRect(80, 20, 30, 30))\n self.Btn_Min.setToolTip(\"最小化\")\n\n self.Btn_Max_Nor.setGeometry(QtCore.QRect(130, 20, 30, 30))\n self.Btn_Max_Nor.setToolTip(\"最大化\")\n\n self.Btn_R.setGeometry(QtCore.QRect(190, 250, 101, 71))\n self.label.setGeometry(QtCore.QRect(80, 60, 351, 70))\n self.label_2.setGeometry(QtCore.QRect(80, 130, 351, 91))\n\n # 设置窗口透明度,隐藏原始边框\n self.setWindowOpacity(0.9) # 设置窗口透明度\n # Ui_MainWindow3.setAttribute(QtCore.Qt.WA_TranslucentBackground) #\n # 设置窗口背景透明\n self.setWindowFlag(QtCore.Qt.FramelessWindowHint) # 隐藏边框\n pe = QPalette()\n self.setAutoFillBackground(True)\n pe.setColor(QPalette.Window, Qt.lightGray) # 设置背景色\n\n # 设置标题和图标\n self.setPalette(pe)\n self.setWindowTitle(\"语音识别\")\n spin_icon = qtawesome.icon('fa5s.microphone-alt', color='black')\n # self.pushButton.setIcon(spin_icon)#设置图标\n self.setWindowIcon(spin_icon)\n\n # 美化左上角的三个按钮。美化的效果就是圆形,红\n # 黄\n # 绿色\n # 悬停时颜色会加深。\n self.Btn_Close.setStyleSheet('''QPushButton{background:#F76677;border-radius:15px;}\n QPushButton:hover{background:red;}''')\n self.Btn_Min.setStyleSheet('''QPushButton{background:#F7D674;border-radius:15px;}\n QPushButton:hover{background:yellow;}''')\n self.Btn_Max_Nor.setStyleSheet('''QPushButton{background:#6DDF6D;border-radius:15px;}\n QPushButton:hover{background:green;}''')\n\n # 美化中间靠上的label\n self.label.setStyleSheet(\n '''QLabel{color:white;font-size:40px;font-family:Roman times;}''')\n # 美化中间的label\n self.label_2.setStyleSheet('''QLabel{color:darkGray;background:white;border:2px solid #F3F3F5;border-radius:45px;\n font-size:14pt; font-weight:400;font-family: Roman times;} ''')\n\n # 对于label的设置还有\n # 使字体居中显示。\n self.label_2.setAlignment(Qt.AlignCenter)\n self.label.setAlignment(Qt.AlignCenter)\n\n # 最下面的按钮美化\n spin_icon = qtawesome.icon('fa5s.microphone-alt', color='white')\n self.Btn_R.setIcon(spin_icon) # 设置图标\n self.Btn_R.setIconSize(QtCore.QSize(50, 50))\n self.Btn_R.setStyleSheet('''QPushButton{border:none;}\n QPushButton:hover{color:white;\n border:2px solid #F3F3F5;\n border-radius:35px;\n background:darkGray;}''')\n\n def init(self):\n self.m_drag = False\n self.max_flag = True\n self.Btn_Close.clicked.connect(self.close)\n self.Btn_Min.clicked.connect(self.showMinimized)\n self.Btn_Max_Nor.clicked.connect(self.Max_or_Nor)\n\n def Max_or_Nor(self):\n if self.max_flag == True:\n self.showMaximized()\n self.max_flag = False\n self.Btn_Max_Nor.setToolTip(\"恢复\")\n else:\n self.showNormal()\n self.max_flag = True\n self.Btn_Max_Nor.setToolTip(\"最大化\")\n\n # 重写三个方法使我们的Example窗口支持拖动,上面参数window就是拖动对象\n def mousePressEvent(self, event):\n if event.button() == Qt.LeftButton:\n self.m_drag = True\n self.m_DragPosition = event.globalPos() - self.pos()\n event.accept()\n self.setCursor(QCursor(Qt.OpenHandCursor))\n\n def mouseMoveEvent(self, QMouseEvent):\n if Qt.LeftButton and self.m_drag:\n self.move(QMouseEvent.globalPos() - self.m_DragPosition)\n QMouseEvent.accept()\n\n def mouseReleaseEvent(self, QMouseEvent):\n self.m_drag = False\n self.setCursor(QCursor(Qt.ArrowCursor))\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n myWin = MyMainWindow()\n myWin.show()\n sys.exit(app.exec_())\n","repo_name":"nicedayzhu/pyqt5_uienhanced","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"19"} +{"seq_id":"45275169873","text":"input_file = open(\"input/rosalind_hamm.txt\", \"r\")\nstrands = input_file.read().strip(\"\\n\").split()\ninput_file.close()\n\nmiss = 0\n\nfor i in range(len(strands[0])):\n if strands[0][i] != strands[1][i]:\n miss += 1\n\nprint(miss)\n\noutput = open(\"output/miss.txt\", \"w\") \noutput.write(str(miss)) \noutput.close() \n","repo_name":"Halw-gnun/Rosalind","sub_path":"bioinformatics_stronghold/6_HAMM/6_HAMM.py","file_name":"6_HAMM.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35478084961","text":"from PIL import Image\nimport colorpixel_utils as pts\n\nclass ParseReadyImage(Image.Image):\n def __init__(self, image):\n self.image = image\n return\n\n # Gets basic metadata informations from an image\n def parse_image_metadata(self) -> None:\n meta_dict = {\n 'filename': pts.colored_txt\n (118,226,246,self.image.filename.split('/')[-1]),\n 'width': pts.colored_txt(222,246,118,self.image.width),\n 'height': pts.colored_txt(222,246,118,self.image.height),\n 'format': pts.colored_txt(244,72,209,self.image.format),\n 'pixel-mode': pts.rainbow_txt(str(self.image.mode)),\n 'extrainfo': self.image.info\n }\n print(\"*** BASIC METADATA ***\")\n for key in meta_dict: \n if key == 'extrainfo':\n if meta_dict['extrainfo'] == {}:\n continue\n print('*** EXTRA METADATA ***')\n for key in meta_dict['extrainfo']:\n print(pts.colored_txt(158,160,151,f\" {key} -> {meta_dict['extrainfo'][key]}\"))\n return\n print(f\" {pts.colored_txt(158,160,151,key)} -> {meta_dict[key]}\")\n return\n\n # Parsing methods:\n def char_parse(self) -> None:\n width, height = self.image.size\n pixels = self.image.load()\n for y in range(height):\n for x in range(width):\n r = pixels[x,y][0]\n g = pixels[x,y][1]\n b = pixels[x,y][2]\n char = f'{pts.colored_txt(r, g, b, pts.density[pts.get_density_index(pts.rgb_to_luminance(r, g, b))])} ' \n # ^ Extra space at the end for the image to be sized correctly ^\n print(f\"{char}\", end = '')\n print()\n return\n\n\n def solid_parse(self) -> None:\n width, height = self.image.size\n pixels = self.image.load()\n for y in range(height):\n for x in range(width):\n r = pixels[x,y][0]\n g = pixels[x,y][1]\n b = pixels[x,y][2]\n char = f'{pts.get_color(r, g, b)}'\n print(f\"{char}\", end = '')\n print()\n return\n\n # Function to determine and execute stdout methods.\n def parse_image(self, stdout_method: str) -> None:\n if stdout_method == '-c':\n self.char_parse()\n elif stdout_method == '-s':\n self.solid_parse()\n else:\n print(pts.error_msg(f'You did not supply a valid stdout method; here is a template to call commands:', ' ➤ python filename.py width height stdout-method\\n\\nAnd a list of valid stdout methods and their uses.\\n( -c | characters ) (-s | solid pixels)'))\n return\n\n\n def resize_image(self, width, height) -> Image:\n try:\n newimg = self.image.resize((int(width), int(height)))\n return ParseReadyImage(newimg)\n except ValueError:\n print(pts.error_msg('You supplied invalid width/height pairs, width/height are supposed to be integers; here is a template to call commands:', ' ➤ python filename.py width height stdout-method'))\n return\n","repo_name":"spearphishing/TermImage","sub_path":"src/image_parsing.py","file_name":"image_parsing.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"29381786207","text":"import gym\nimport numpy as np\nENV_NAME = 'SpaceInvaders-v0'\nEPISODE = 5000\nSTEP_LIMIT = 3000\n\ndef main():\n env = gym.make(ENV_NAME)\n obervation = env.reset()\n env.render()\n obervation_num = env.observation_space.shape[0]\n print(obervation_num)\n # print(env.observation_space.low)\n # print(env.observation_space.high)\n action = env.action_space.sample()\n print(env.action_space.n)\n # obervation_, reward, done, _ = env.step(action)\n\nif __name__ == '__main__':\n main()","repo_name":"LiuYu0521/RL","sub_path":"gym_demo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"22131612304","text":"\nimport xmlrpclib\n\nclass SafeTransportWithCert(xmlrpclib.SafeTransport):\n __cert_file = '../ssl/client/client.crt'\n __key_file = '../ssl/client/client.key'\n\n def make_connection(self, host):\n host_with_cert = (host, {\n 'key_file': self.__key_file,\n 'cert_file': self.__cert_file\n })\n return xmlrpclib.SafeTransport.make_connection(self, host_with_cert)\n\ntransport = SafeTransportWithCert()\nclient = xmlrpclib.Server('https://trigger-server:9000', transport=transport)\n\n# Or use python-requests to do SSL certificate authentication and validation:\n# http://docs.python-requests.org/en/latest/user/advanced/#ssl-cert-verification\n\n\"\"\"\n# From Client host\nimport requests\nurl = 'https://trigger-server:9000'\nr = requests.post(\n url,\n cert=('ssl/client/client.crt',\n 'ssl/client/client.key'),\n verify='certs/rootCA.pem'\n)\nprint r.content\n\"\"\"\n","repo_name":"hollowpoint/hollowpoint","sub_path":"xmlrpc/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"30671315721","text":"from pyautogui import spam\nfrom time import sleep\nlimite_msg=input('Enter n de msgs: ')\nmsg=int(input('Coloque a msg: '))\ni=0\nsleep(2)\nwhile i 새 창 열림\n regist_upload_button = driver.find_element(By.XPATH, '//button[./span[contains(text(), \"상품등록송신\")]]')\n driver.execute_script(\"arguments[0].click();\", regist_upload_button)\n time.sleep(0.2)\n\n try:\n driver.switch_to.window(driver.window_handles[1])\n self.send_regist(store_name)\n\n except Exception as e:\n print(e)\n self.log_msg.emit(f\"{store_name} 작업 실패 {e}\")\n\n finally:\n # 원래 탭으로 돌아오기\n driver.close()\n driver.switch_to.window(driver.window_handles[0])\n time.sleep(0.5)\n\n # 즉시송신\n def send_regist(self, store_name):\n driver = self.driver\n WebDriverWait(driver, 10).until(\n EC.visibility_of_element_located((By.XPATH, '//div[./span[contains(text(), \"상품등록 송신\")]]'))\n )\n time.sleep(0.5)\n\n # 쇼핑몰ID\n # 위메프는 사용할 계정을 선택해야 함\n if store_name == StoreNameEnum.WeMakePrice.value:\n wemakeprice_account_checkbox = driver.find_element(\n By.XPATH, '//label[./span[contains(text(), \"cocoblanc(79459)\")]]//input'\n )\n driver.execute_script(\"arguments[0].click();\", wemakeprice_account_checkbox)\n time.sleep(0.2)\n\n # 판매가 선택\n # 브랜디, 카카오톡 스토어\n if store_name == StoreNameEnum.Brandi.value or store_name == StoreNameEnum.KakaoTalkStore.value:\n use_store_sell_price = driver.find_element(\n By.XPATH, '//label[./span[contains(text(), \"쇼핑몰별 판매가에 등록된 판매가로 전송\")]]//input'\n )\n driver.execute_script(\"arguments[0].click();\", use_store_sell_price)\n time.sleep(0.2)\n\n # 카테고리 매핑선택여부\n # 위메프를 제외한 모든 상점에서 '사방넷 카테고리 매핑적용함' 라디오버튼을 체크함\n if store_name != StoreNameEnum.WeMakePrice.value:\n use_sabangnet_mapping = driver.find_element(\n By.XPATH, '//label[./span[contains(text(), \"사방넷 카테고리 매핑적용함\")]]//input'\n )\n driver.execute_script(\"arguments[0].click();\", use_sabangnet_mapping)\n time.sleep(0.2)\n\n # 상세설명 선택\n # 위메프에서는 '쇼핑몰별 상세설명에 등록된 상세설명으로 전송' 선택\n if store_name == StoreNameEnum.WeMakePrice.value:\n use_detail_note = driver.find_element(\n By.XPATH, '//label[./span[contains(text(), \"쇼핑몰별 상세설명에 등록된 상세설명으로 전송\")]]//input'\n )\n driver.execute_script(\"arguments[0].click();\", use_detail_note)\n time.sleep(0.2)\n\n # 쇼핑몰 부가정보\n # 각 쇼핑몰마다 선택해야하는 라디오버튼이 다름\n if store_name == StoreNameEnum.ElevenStreet.value:\n store_sub_note = driver.find_element(By.XPATH, '//label[./span[./button[./span[text()=\"11번가\"]]]]//input')\n driver.execute_script(\"arguments[0].click();\", store_sub_note)\n time.sleep(0.2)\n elif store_name == StoreNameEnum.WeMakePrice.value:\n store_sub_note = driver.find_element(\n By.XPATH, '//label[./span[./button[./span[text()=\"위메프_1번코드\"]]]]//input'\n )\n driver.execute_script(\"arguments[0].click();\", store_sub_note)\n time.sleep(0.2)\n elif store_name == StoreNameEnum.Cafe24.value:\n store_sub_note = driver.find_element(By.XPATH, '//label[./span[./button[./span[text()=\"카페24\"]]]]//input')\n driver.execute_script(\"arguments[0].click();\", store_sub_note)\n time.sleep(0.2)\n elif store_name == StoreNameEnum.Coupang.value:\n store_sub_note = driver.find_element(By.XPATH, '//label[./span[./button[./span[text()=\"쿠팡\"]]]]//input')\n driver.execute_script(\"arguments[0].click();\", store_sub_note)\n time.sleep(0.2)\n elif store_name == StoreNameEnum.Grip.value:\n store_sub_note = driver.find_element(By.XPATH, '//label[./span[./button[./span[text()=\"그립\"]]]]//input')\n driver.execute_script(\"arguments[0].click();\", store_sub_note)\n time.sleep(0.2)\n elif store_name == StoreNameEnum.Brandi.value:\n store_sub_note = driver.find_element(By.XPATH, '//label[./span[./button[./span[text()=\"브랜디\"]]]]//input')\n driver.execute_script(\"arguments[0].click();\", store_sub_note)\n time.sleep(0.2)\n elif store_name == StoreNameEnum.KakaoTalkStore.value:\n store_sub_note = driver.find_element(By.XPATH, '//label[./span[./button[./span[text()=\"카카오\"]]]]//input')\n driver.execute_script(\"arguments[0].click();\", store_sub_note)\n time.sleep(0.2)\n\n # 쇼핑몰 카테고리\n # 위메프에서는 '쉬폰/시스루 블라우스' 선택\n if store_name == StoreNameEnum.WeMakePrice.value:\n store_category = driver.find_element(\n By.XPATH, '//label[./span[./button[./span[text()=\"쉬폰/시스루 블라우스\"]]]]//input'\n )\n driver.execute_script(\"arguments[0].click();\", store_category)\n time.sleep(0.2)\n\n # 즉시송신 클릭\n print(\"즉시송신 클릭 시점\")\n\n upload_now_button = driver.find_element(By.XPATH, '//button[./span[contains(text(), \"즉시송신\")]]')\n driver.execute_script(\"arguments[0].click();\", upload_now_button)\n time.sleep(1)\n\n # 로딩화면\n self.wait_loading()\n\n time.sleep(5)\n\n # 로딩 완료 후 자동으로 창 닫김 -> 다른 새 창 발생\n try:\n driver.switch_to.window(driver.window_handles[0])\n time.sleep(0.5)\n driver.switch_to.window(driver.window_handles[1])\n time.sleep(0.5)\n except Exception as e:\n print(e)\n\n self.wait_loading()\n\n print()\n\n # 상품품절 송신 화면 -> 사방넷 클라이언트가 설치되어있어야 작동한다.\n # $x('//b[contains(text(), \"종료\") and contains(text(), \"송신작업이 완료되었습니다\")]')\n # $x('//b[contains(text(), \"결과\") and contains(text(), \"실패건은 Message 확인후 재송신바랍니다\")]')\n try:\n WebDriverWait(driver, self.default_wait).until(\n EC.visibility_of_element_located(\n (By.XPATH, '//b[contains(text(), \"결과\") and contains(text(), \"실패건은 Message 확인후 재송신바랍니다\")]')\n )\n )\n time.sleep(0.5)\n\n upload_result_message = driver.find_element(\n By.XPATH, '//b[contains(text(), \"결과\") and contains(text(), \"실패건은 Message 확인후 재송신바랍니다\")]'\n ).get_attribute(\"textContent\")\n\n print(upload_result_message)\n\n self.log_msg.emit(upload_result_message)\n\n print(\"송신 성공 시점\")\n\n except Exception as e:\n print(e)\n raise Exception(\"작업 성공 메시지를 발견하지 못했습니다.\")\n\n finally:\n pass\n\n def wait_loading(self):\n driver = self.driver\n # $x('//img[contains(@src, \"loading\") and contains(@src, \".gif\")]')\n loading = True\n wait_count = 1\n driver.implicitly_wait(1)\n try:\n loading_screen = driver.find_element(\n By.XPATH, '//img[contains(@src, \"loading\") and contains(@src, \".gif\")]'\n )\n except Exception as e:\n print(f\"loading finished\")\n\n driver.implicitly_wait(1)\n while loading:\n try:\n print(f\"wait_count: {wait_count}\")\n loading_screen = driver.find_element(\n By.XPATH, '//img[contains(@src, \"loading\") and contains(@src, \".gif\")]'\n )\n wait_count = wait_count + 1\n loading = True\n if wait_count > self.maximum_wait:\n loading = False\n raise Exception(\"무한 로딩\")\n\n except UserWarning as ue:\n print(f\"최대 대기시간 {self.maximum_wait}초 초과\")\n raise Exception(\"무한 로딩\")\n\n except Exception as e:\n print(f\"로딩 완료\")\n break\n\n finally:\n time.sleep(1)\n\n print(f\"wait_loading finished\")\n driver.implicitly_wait(self.default_wait)\n\n # 전체작업 시작\n def work_start(self):\n print(f\"process: work_start\")\n\n try:\n self.sabangnet_login()\n\n for target_date in self.guiDto.target_date_list:\n self.target_date = target_date\n\n try:\n print(f\"{self.target_date} 작업 시작\")\n self.log_msg.emit(f\"{self.target_date} 작업 시작\")\n\n self.sabangnet_main()\n\n if self.guiDto.is_eleven:\n self.eleven_shop_regist()\n else:\n self.shop_regist()\n\n self.log_msg.emit(f\"{self.target_date} 작업 완료\")\n\n except Exception as e:\n print(e)\n self.log_msg.emit(f\"{self.target_date} 작업 실패 {str(e)}\")\n continue\n\n except Exception as e:\n print(e)\n\n finally:\n self.driver.quit()\n time.sleep(0.2)\n\n\nif __name__ == \"__main__\":\n # process = SabangnetAutoUploaderProcess()\n # process.work_start()\n pass\n","repo_name":"dlgnlwo3/sabangnet-auto-uploader","sub_path":"process/sabangnet_regist_upload_process.py","file_name":"sabangnet_regist_upload_process.py","file_ext":"py","file_size_in_byte":18721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21617886768","text":"from flask import Flask, request, make_response\nfrom flask_cors import CORS\nfrom helper import Server\nimport json\nimport time\napp = Flask(__name__)\nCORS(app)\n\nmyServer = Server()\n\n\n@app.route('/')\ndef hello():\n return 'hello,world'\n\n\n@app.route('/test', methods=['GET'])\ndef testfunc():\n question = str(request.args['ques'])\n query_dict = {\"request_id\": 'ExamServer', \"query\": question}\n try:\n result = myServer.get_result(query_dict)\n return result\n except KeyError:\n return f'输入无效!'\n\n\n# @app.route('/QA', methods=['POST', 'OPTIONS'])\n# def model_server(request):\n# try:\n# json_bytes = request.body\n# json_string = json_bytes.decode('utf-8')\n# json_dict = json.loads(json_string)\n# start_time = time.time()\n# result = myServer.get_result(json_dict)\n# print('耗时:', time.time()-start_time)\n# except Exception as e:\n# result = {\"code\": 400, \"message\": \"预测失败\", \"Error\": e}\n# return make_response(result)\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=8080, debug=app.config['DEBUG'])","repo_name":"MachaCroissant/qaRetreivalBot","sub_path":"code/api/flask_server.py","file_name":"flask_server.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"13535585397","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.prev = None\n self.next = None\n\n\nclass DoublyLinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def append(self, data):\n new_node = Node(data)\n if self.head is None:\n self.head = self.tail = new_node\n else:\n new_node.prev = self.tail\n self.tail.next = new_node\n self.tail = new_node\n\n def prepend(self, data):\n new_node = Node(data)\n if self.head is None:\n self.head = self.tail = new_node\n else:\n new_node.next = self.head\n self.head.prev = new_node\n self.head = new_node\n\n def delete(self, data):\n current = self.head\n while current:\n if current.data == data:\n if current.prev:\n current.prev.next = current.next\n else:\n self.head = current.next\n if current.next:\n current.next.prev = current.prev\n else:\n self.tail = current.prev\n return\n current = current.next\n\n def insert_after(self, node, data):\n new_node = Node(data)\n if node is None:\n return\n new_node.prev = node\n new_node.next = node.next\n node.next = new_node\n if new_node.next:\n new_node.next.prev = new_node\n else:\n self.tail = new_node\n\n def insert_before(self, node, data):\n new_node = Node(data)\n if node is None:\n return\n new_node.next = node\n new_node.prev = node.prev\n node.prev = new_node\n if new_node.prev:\n new_node.prev.next = new_node\n else:\n self.head = new_node\n\n def delete_at_end(self):\n if self.tail is None:\n return\n if self.head == self.tail:\n self.head = self.tail = None\n return\n self.tail = self.tail.prev\n self.tail.next = None\n\n def print_list(self):\n current = self.head\n while current:\n print(current.data)\n current = current.next\n\n\nfruits = DoublyLinkedList()\nfruits.append('banana')\nfruits.append('manggo')\nfruits.insert_after(fruits.head.next, 'apple')\nfruits.append('grapes')\nfruits.append('orange')\nfruits.insert_before(fruits.tail.prev ,'durian')\nfruits.delete_at_end()\n\nfruits.print_list()\n","repo_name":"ThomasAlberto21/struktur_data","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"50642034433","text":"print('Este algoritmo verifica o dia da semana mais votado, de acordo com o votos inputados.')\n\nprint('A seguir, digite a quantidade de votos de acordo com o dia da semana:')\n\nmonday = int(input('Segunda-feira: '))\ntuesday = int(input('Terça-feira: '))\nwednesday = int(input('Quarta-feira: '))\nthursday = int(input('Quinta-feira: '))\nfriday = int(input('Sexta-feira: '))\n\nmost_voted = 0\nwinner = ''\n\nif monday > most_voted:\n most_voted = monday\n winner = 'Segunda-feira'\n\nif tuesday > most_voted:\n most_voted = tuesday\n winner = 'Terça-feira'\n\nif wednesday > most_voted:\n most_voted = wednesday\n winner = 'Quarta-feira'\n\nif thursday > most_voted:\n most_voted = thursday\n winner = 'Quinta-feira'\n\nif friday > most_voted:\n most_voted = friday\n winner = 'Sexta-feira'\n\nprint('O dia mais votado foi ' + winner + '!')\n","repo_name":"DougAnTr/FIAP","sub_path":"programacao/python/Problemas_Cap2/RM86617_EX03.py","file_name":"RM86617_EX03.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"3537323554","text":"#####################################################################################################################################################################################################\n #3PAM Extract Validator\n #Input : .xlsx file extracted from Chevron 3Pam system\n #Output: Text indicating if columns of input file are correct as required for 432 Data Migration transform script\n#####################################################################################################################################################################################################\n\n\nfrom openpyxl import Workbook, load_workbook\nimport datetime\nimport sys\n\nfilename = sys.argv[1] #Filename is expected to be the first argument on the commandline.\n\ndef checkColumn(ws, checkColumn, expectedValue):\n \"\"\"Given a column number and an expected value return true/false indicating if that value is present at column\"\"\"\n returnValue = False\n if ws.cell(row=1, column=checkColumn).value == expectedValue:\n returnValue = True\n return returnValue\n\n\ndef runTest(ws, column, value):\n if checkColumn(ws, column,value):\n print(\"Column \" + numberToLetter(column) + \" is correct.\")\n else:\n print(\"Column \" + numberToLetter(column) + \" is not correct! Expected \" + value)\n\n\n\ndef numberToLetter(colNumber):\n \"\"\"Given a column number return the excel column heading\"\"\"\n ascii = colNumber + 64\n return chr(ascii)\n\n\ndef main():\n #load workbook\n print(\"Loading: \" + filename)\n wb = load_workbook(filename, read_only=True) #We load read only here because we are not modifying data and it is CONSIDERABALLY faster. \n\n #activate sheet\n ws = wb.active\n\n headerContents = {}\n headerContents[1] = \"EntityID\"\n headerContents[2] = \"EntityName\"\n headerContents[3] = \"EntityCountry\"\n headerContents[4] = \"EntityCountryName\"\n headerContents[5] = \"EntityAliasName\"\n headerContents[6] = \"ParentEntityName\"\n headerContents[7] = \"SourcingCompanyName\"\n headerContents[8] = \"SourcingCompanyID\"\n headerContents[9] = \"EntityTechData\"\n headerContents[10] = \"EntitySPData\"\n headerContents[11] = \"EntityIPData\"\n headerContents[12] = \"ResourceID\"\n headerContents[13] = \"ResourceType\"\n headerContents[14] = \"ResourceName\"\n headerContents[15] = \"ResourceCountry\"\n headerContents[16] = \"ResourceDescription\"\n headerContents[17] = \"ResourceURL\"\n headerContents[18] = \"ResourceECCN\"\n headerContents[19] = \"ApprovedAccessOPCO\"\n headerContents[20] = \"ApprovedAccessApprovalDate\"\n headerContents[21] = \"ApprovedAccessSourceInfo\"\n\n\n for c in range(1,22):\n runTest(ws,c,headerContents[c])\n\n\nif (__name__ == \"__main__\"):\n main()\n\n\n\n","repo_name":"jamielacivita/3PAMValidator","sub_path":"threePamOuputValidator_181005-0804.py","file_name":"threePamOuputValidator_181005-0804.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70613295083","text":"# python send_to_db.py 'https://www.encodeproject.org/search/?searchTerm=H3K4ME3&type=Experiment&replication_type=isogenic&assembly=GRCh38&award.rfa=ENCODE4&format=json'\nimport boto3\nimport subprocess\n# import os.path\nfrom os import path\n\nfrom ..models import Correlations\nfrom .lambda_async_s3_uri import filter_complete\n\ndef insert_db(args):\n if path.exists('input.txt'):\n bashCommand = 'rm input.txt'\n process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)\n process.communicate()\n\n if path.exists('output.csv'):\n bashCommand = 'rm output.csv'\n process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)\n process.communicate()\n\n table_values = filter_complete(args)\n\n ordered_table_values = sorted(table_values, key=lambda i: (i['rowNum'], i['colNum']))\n\n for value_set in ordered_table_values:\n new_value_set = Correlations.objects.create(**value_set)\n new_value_set.save()\n\n sqs = boto3.client('sqs')\n sqs.purge_queue(QueueUrl='https://sqs.us-west-2.amazonaws.com/618537831167/jaccard3-success')","repo_name":"ABehal2020/chipseq-visualization","sub_path":"back-end/corr_end/send_values/api/send_to_db.py","file_name":"send_to_db.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"70223192043","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/8/23 2:24\n# @公众号 :Python自动化办公社区 \n# @File : 26_wxpy_用Python玩微信:可视化统计好友地理位置.py\n# @Software: PyCharm\n# @Description:\n\n# pip install wxpy\nfrom wxpy import *\n\n# 初始化一个机器人对象\n# cache_path缓存路径,给定值为第一次登录生成的缓存文件路径\nbot = Bot()\n# 获取好友列表(包括自己)\nmy_friends = bot.friends(update=False)\n'''\nstats_text 函数:帮助我们简单统计微信好友基本信息\n简单的统计结果的文本 \n:param total: 总体数量 \n:param sex: 性别分布 \n:param top_provinces: 省份分布 \n:param top_cities: 城市分布 \n:return: 统计结果文本\n'''\nprint(my_friends.stats_text())\n","repo_name":"zhaofeng092/python4office","sub_path":"CourseCode/Python自动化办公(2021最新版!有源代码 ,适合小白~)/06机器人/26_wxpy_用Python玩微信:可视化统计好友地理位置.py","file_name":"26_wxpy_用Python玩微信:可视化统计好友地理位置.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"12160893105","text":"## maybe should store C^-1 determinant separately??\n### get lots of fp errors especially in first iteration of the MCMC\n\n\n# normalize to 1 at peak\n# could just use inheritance for a version with unit area...\n\n## nb. at and __call__ work with (individual) arrays of x and y!\n\n## functions for getting the beam at a series of params w/ fixed\n## location?\n\n## parameters are currently a sequence of sequences, converted from an\n## array with 'package' and 'unpackage' functions\n\n## changed: init can now accept 'setup' parameters (maybe should do with metaclass?)\n## so may need to setparams() rather than just __init__ in Likelihood\n\n\n\nfrom numpy import array, exp, asarray, cos, sin, sqrt, float64\nimport math\nfrom .. import Proposal\n\n\nseparate_sigma12 = False\n\nif separate_sigma12:\n print(\"requiring sigma1>sigma2 in prior\")\n\n\nclass GaussianBeamModel2D(object):\n \"\"\"\n model of an unnormalized 2d gaussian beam; parameters\n are location (x, y); and various parameterizations of the shape:\n sigma_major, sigma_minor, angle\n sigma2_x, sigma2_y, rho=corr. coeff.\n \"\"\"\n \n ### class variables\n centerMin = None\n centerMax = None\n sigMax = None\n \n nparam = 5\n \n fmtstring = \"(%.3f %.3f) (%.3f %.3f) %.3f\"\n \n #paramBlocks = [0, 0, 1, 1, 2]\n paramBlocks = [0, 1, 2, 3, 4]\n nBlock = max(paramBlocks)+1\n \n texNames = [r\"x\", r\"y\", r\"$\\sigma_1$\", r\"$\\sigma_2$\", r\"$\\alpha$\", r\"$A$\"]\n \n def setParameters_MajMinAng(self, center, sigmas, angle):\n \"\"\"set the parameters from x, y, sigma_major, sigma_minor, angle\"\"\"\n \n \n self.center = center\n self.sigmas = sigmas\n self.angle = angle % math.pi\n\n \n c = cos(angle); s = sin(angle)\n s12 = sigmas[0]*sigmas[0]\n s22 = sigmas[1]*sigmas[1]\n \n self.sig2_xy = (c*c*s12 + s*s*s22, s*s*s12 + c*c*s22)\n self.rho = c*s*(s22-s12)/sqrt(self.sig2_xy[0]*self.sig2_xy[1]) # sign issue???\n \n self.set_Cinv()\n \n __init__ = setParameters_MajMinAng ## default to these params\n \n @classmethod\n def setsigMax(cls, xmax, ymax=None):\n \"\"\"\n set the maximum sigma (nb. same for x, y)\n \"\"\"\n \n try:\n cls.sigMax = max(max(xmax.x)-min(xmax.x), max(xmax.y)-min(xmax.y))\n except (NameError, AttributeError):\n if ymax is None:\n ymax = xmax\n cls.sigMax = max(max(xmax)-min(xmax), max(ymax)-min(ymax))\n \n @classmethod\n def setxyRange(cls, xrng, yrng=None, scale=None):\n \"\"\"\n set the min and max possible values for the x,y coordinates\n nb. this is a classmethod: sets the values for *all* instances!!!\n can set from any of\n a dataset with x and y members\n a sequence's min and max\n a (min, max) tuples\n set the yrange the same as the xrange if the former isn't given\n \"\"\"\n \n try:\n cls.centerMin = min(xrng.x), min(xrng.y)\n cls.centerMax = max(xrng.x), max(xrng.y)\n except NameError:\n if yrng is None:\n yrng = xrng\n cls.centerMin = (min(xrng), min(yrng))\n cls.centerMax = (max(xrng), max(yrng))\n \n if scale is not None:\n xr = 0.5*(cls.centerMax[0] - cls.centerMin[0])\n yr = 0.5*(cls.centerMax[1] - cls.centerMin[1])\n x0 = 0.5*(cls.centerMax[0] + cls.centerMin[0])\n y0 = 0.5*(cls.centerMax[1] + cls.centerMin[1])\n \n cls.centerMin = x0-scale*xr, y0-scale*yr\n cls.centerMax = x0+scale*xr, y0+scale*yr\n \n \n \n def setParameters_XYRho(self, center, sigma_xy, rho):\n \"\"\"set the parameters from x, y, sig_x, sig_y, rho=corr.coeff.\"\"\"\n \n self.center = center\n self.sig2_xy=array(sigma_xy, float64)**2\n self.rho = rho\n self.set_Cinv()\n \n def set_Cinv(self):\n \"\"\"set the packed inverse correlation matrix values\"\"\"\n \n sig2x, sig2y = self.sig2_xy\n det = sig2x * sig2y * (1.0 - self.rho*self.rho)\n \n ## inverse array in a \"packed\" form (xx, xy, yy)\n self.Cinv = array([\n sig2y, -self.rho*sqrt(sig2x*sig2y), sig2x], float64)/det\n \n def get_XYRho(self):\n \"\"\"\n get the parameters tuple((x,y), (s2x, s2y), rho)\n nb. for now we return sigma^2, not sigma!\n \"\"\"\n return self.center, self.sig2_xy, self.rho\n \n def get_MajMinAng(self):\n \"\"\" get the parameters tuple((x,y), (s1, s2), angle)\n \"\"\"\n if self.sigmas is None:\n ## calculate sigma_maj, min; angle from sig_xy, rho\n pass # for now\n return self.center, self.sigmas, self.angle\n \n def at(self, data):\n \"\"\"get the [array of] value[s] of the beam for the given dataset\"\"\"\n return gauss2d(data.x, data.y, self.center[0], self.center[1], *self.Cinv)\n \n def atxy(self, x, y):\n \"\"\"\n get the [array of] value[s] of the beam for the given x, y value[s]\n \"\"\"\n return gauss2d(x, y, self.center[0], self.center[1], *self.Cinv)\n \n __call__ = at\n \n @classmethod\n def prior(cls, center, sigmas, angle):\n \"\"\"get the unnormalized prior for the parameters\n \"\"\"\n \n \n if cls.centerMin is not None and (\n center[0] < cls.centerMin[0] or center[1] < cls.centerMin[1] or\n center[0] > cls.centerMax[0] or center[1] > cls.centerMax[1]):\n return 0\n \n if cls.sigMin is None and (sigmas[0]<0 or sigmas[1]<0):\n return 0\n elif sigmas[0] cls.sigMax:\n return 0 # too restrictive?\n \n if separate_sigma12 and sigma2>sigma1:\n return 0\n \n return 1\n \n ## note that we do (angle % pi) in these [probably really only needed in 'package'?]\n def unpackage(param_seqs):\n \"\"\" convert from structured sequence of parameters to flat array \"\"\"\n xy, sig12, ang = param_seqs\n return array( [ xy[0], xy[1], sig12[0], sig12[1], ang % math.pi], dtype=float64)\n \n def package(params_flat):\n \"\"\" convert from flat array to structured sequence of parameters \"\"\"\n return (tuple(params_flat[0:2]), tuple(params_flat[2:4]),\n params_flat[4] % math.pi)\n \n ## nb. an *instance* of proposal; should pass the class [name] to this?\n proposal = Proposal.GenericGaussianProposal(package=package,\n unpackage=unpackage)\n\n## need to do this conversion after we send the methods to the Proposal class\n unpackage=staticmethod(unpackage)\n package=staticmethod(package)\n\n \n def startfrom(self, data, random=None):\n \"\"\"\n generate a set of starting parameters for the model:\n non-random version\n center = , \n sigmas = -^2, -^2\n angle=0\n \"\"\"\n if random is not None:\n dx = (self.centerMin[0], self.centerMax[0])\n dy = (self.centerMin[1], self.centerMax[1])\n \n start_params = ( (uniform(*dx), uniform(*dy)),\n (uniform(0,(dx[1]-dx[0])/5), uniform(0,(dy[1]-dy[0])/5)),\n uniform(0,math.pi/2) )\n else:\n pass\n \n \n\nclass GaussianBeamModel2D_xy(GaussianBeamModel2D):\n \"\"\"like GaussianBeamModel2D, but explicitly use sig_x, sig_y as parameters\"\"\"\n\n texNames = [r\"x\", r\"y\", r\"$\\sigma_x$\", r\"$\\sigma_y$\", r\"$\\rho$\", r\"$A$\"]\n \n def __init__(self, center, sigmas, rho):\n super(GaussianBeamModel2D_xy, self).setParameters_XYRho(center, sigmas, rho)\n\n @classmethod\n def prior(cls, center, sigmas, rho):\n \"\"\"get the unnormalized prior for the parameters\n \"\"\"\n \n if not super(GaussianBeamModel2D_xy, cls).prior(center, sigmas, 0):\n return 0\n \n if rho<-1 or rho>1:\n return 0\n \n return 1\n \n ## note that we do (angle % pi) in these [probably really only needed in 'package'?]\n def unpackage_xy(param_seqs):\n \"\"\" convert from structured sequence of parameters to flat array \"\"\"\n xy, sigxy, rho = param_seqs\n return array( [ xy[0], xy[1], sigxy[0], sigxy[1], rho], dtype=float64)\n\n def package_xy(params_flat):\n \"\"\" convert from flat array to structured sequence of parameters \"\"\"\n return (tuple(params_flat[0:2]), tuple(params_flat[2:4]), params_flat[4])\n\n ## nb. an *instance* of proposal; should pass the class [name] to this?\n proposal = Proposal.GenericGaussianProposal(package=package_xy,\n unpackage=unpackage_xy)\n\n## need to do this conversion after we send the methods to the Proposal class\n unpackage=staticmethod(unpackage_xy)\n package=staticmethod(package_xy)\n\n\n def startfrom(self, data, random=None):\n \"\"\"\n generate a set of starting parameters for the model:\n non-random version\n center = , \n sigmas = -^2, -^2\n rho=0\n \"\"\"\n if random is not None:\n dx = (self.centerMin[0], self.centerMax[0])\n dy = (self.centerMin[1], self.centerMax[1])\n\n start_params = ( (uniform(*dx), uniform(*dy)),\n (uniform(0,(dx[1]-dx[0])/5), uniform(0,(dy[1]-dy[0])/5)),\n uniform(-1,1) )\n else:\n pass\n\n\ndef gauss2d(x, y, x0, y0, Cinv_xx, Cinv_xy, Cinv_yy):\n dx = x-x0; dy = y-y0\n return exp(-0.5 * (dx*dx*Cinv_xx + dy*dy*Cinv_yy + 2*dx*dy*Cinv_xy))\n\n ","repo_name":"defjaf/MCMC","sub_path":"MCMC/BeamFit/BeamModel.py","file_name":"BeamModel.py","file_ext":"py","file_size_in_byte":9876,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"13655266186","text":"import turtle\nimport pandas\n\nscreen = turtle.Screen()\nscreen.title(\"U S State Game\")\n# tr=turtle.Turtle()\nimage = \"blank_states_img.gif\"\nscreen.addshape(\"blank_states_img.gif\")\nturtle.shape(image)\n\n# answer_state = screen.textinput(title=\"Guess the State\", prompt=\"What's another state ?\")\n\ndata = pandas.read_csv(\"50_states.csv\")\n# x = data[data['state'] == 'Alabama']\n# x.to_dict()\n\n\n#print(str(x['state'])) Putem da asa loop prin ele \n# x = data[data['state'] == answer_state]['state']\n# if x.item() == answer_state:\n# print(answer_state)\nguess_number = 0\nwhile True:\n\n answer_state = screen.textinput(title=f\"Guess the State {guess_number}/50\", prompt=\"What's another state ?\")\n x = data[data['state'] == answer_state]['state']\n try:\n if x.item() == answer_state:\n print(answer_state)\n turtle.setposition(int(data[data['state'] == answer_state]['x']), int(data[data['state'] == answer_state]['y']))\n turtle.write(f\"{str(x)}\")\n turtle.home()\n guess_number += 1\n \n except ValueError:\n pass\n\n\n\n\nturtle.exitonclick()\n","repo_name":"Maisu09/US-states-names","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6249811861","text":"# -*- coding: utf-8 -*-\n\"\"\"\n!/usr/bin/python3\n@CreateDate : 2019-07-12\n@Author : jet\n@Filename : common_mock_server.py\n@Software : pycharm\n\"\"\"\nimport sys\nimport json\nimport os.path\nfrom flask import Flask, request\nfrom utility.Logger import handler\nfrom views.demo_views_json_datas import demo_views_json_datas\n\ninstance_path = os.path.abspath(os.path.dirname(__file__))\napp = Flask(__name__, root_path=instance_path)\napp.logger.addHandler(handler)\napp.debug = True\n\napp.register_blueprint(demo_views_json_datas, url_prefix='/demo_views_json_datas')\n\n\ndef config_app(app):\n app.logger.info('Setting up application...')\n app.config.from_mapping(HOST='0.0.0.0')\n# for handler in logger.handlers:\n# app.logger.addHandler(handler)\n\n @app.before_request\n def before_request():\n app.logger.info(\n 'request url is {0}; method is {1}; remote_addr is {2}'.format(\n request.url,\n request.method,\n request.remote_addr))\n app.logger.info('request data is {0}'.format(request.data))\n\n @app.after_request\n def after_request(response):\n app.logger.info('response_status is {0}'.format(response._status))\n if hasattr(response, 'data'):\n data = response.data\n if isinstance(data, dict):\n response.data = json.dumps(data)\n elif not data:\n response.data = \"{}\"\n else:\n response.data = \"{}\"\n app.logger.info('response data is {0}'.format(response.data))\n return response\n\n\ndef dispatch_handlers(app):\n\n @app.errorhandler(403)\n def permission_error(error):\n return json.dumps({'error': str(error), 'errorCode': 403})\n\n @app.errorhandler(404)\n def page_not_found(error):\n return json.dumps({'error': str(error), 'errorCode': 404})\n\n @app.errorhandler(500)\n def page_error(error):\n return json.dumps({'error': str(error), 'errorCode': 500})\n\n\nconfig_app(app)\ndispatch_handlers(app)\n\n\n@app.route('/test')\ndef test():\n return json.dumps({'recode': 0, 'message': 'success'}), 200\n\n\nif __name__ == '__main__':\n port = \"\"\n if len(sys.argv) > 1:\n port = sys.argv[1]\n if not port:\n port = 5000\n app.run(host='0.0.0.0', port=port)\n","repo_name":"joker-i5700/http_server_tools","sub_path":"demo/flask_mock_server/common_mock_server.py","file_name":"common_mock_server.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30759932470","text":"\"\"\"\nTest RetailSite model\n\"\"\"\n\nimport pytest\nfrom basketbot import datamodel as dm\nfrom basketbot import DefaultRuleNotUnique\n\ndef test_retail_site_get_item_rule(db_with_items):\n \"\"\"\n Test that the get_item_rule method on RetailSite object\n correctly provides item exception rule or default rule\n \"\"\"\n rs = dm.RetailSite.query.filter(dm.RetailSite.name==\"Superstore\").scalar()\n sr_1 = dm.ScrapingRule(\n default_rule=True,\n retail_site_id=rs.id,\n parent_elem_id=dm.DOMElem.query.first().id,\n parent_id =\"testing_scraping_rule\",\n class_chain = {}\n )\n sr_2 = dm.ScrapingRule(\n default_rule=False,\n retail_site_id=rs.id,\n parent_elem_id=dm.DOMElem.query.first().id,\n parent_id =\"testing_scraping_rule_2\",\n class_chain = {}\n )\n banana = dm.Item.query.filter(dm.Item.name==\"banana\").scalar()\n apple = dm.Item.query.filter(dm.Item.name==\"apple\").scalar()\n snapple = dm.Item.query.filter(dm.Item.name==\"snapple\").scalar()\n sr_2.items = [banana, apple]\n db_with_items.add_all([sr_1, sr_2])\n db_with_items.commit()\n assert rs.get_item_rule(banana) == sr_2\n assert rs.get_item_rule(apple) == sr_2\n assert rs.get_item_rule(snapple) == sr_1\n\ndef test_retail_site_scraping_attributes(db_with_items):\n \"\"\"\n Test that the RetailSite ORM class attributes returns correct\n default and exception rules\n \"\"\"\n # Test where have both defaults and exceptions\n rs = dm.RetailSite.query.filter(dm.RetailSite.name==\"Superstore\").scalar()\n sr_1 = dm.ScrapingRule(\n default_rule=True,\n retail_site_id=rs.id,\n parent_elem_id=dm.DOMElem.query.first().id,\n parent_id =\"testing_scraping_rule\",\n class_chain = {}\n )\n sr_2 = dm.ScrapingRule(\n default_rule=False,\n retail_site_id=rs.id,\n parent_elem_id=dm.DOMElem.query.first().id,\n parent_id =\"testing_scraping_rule_2\",\n class_chain = {}\n )\n sr_2.items = [\n dm.Item.query.filter(dm.Item.name==\"banana\").scalar(), \n dm.Item.query.filter(dm.Item.name==\"apple\").scalar()\n ]\n db_with_items.add_all([sr_1, sr_2])\n db_with_items.commit()\n\n assert rs.default_rule == sr_1\n assert rs.exception_rules == [sr_2]\n\n # Test for error when have no or multiple default_rules associated\n rs2 = dm.RetailSite.query.filter(dm.RetailSite.name==\"Megastore\").scalar()\n assert rs2.exception_rules == []\n with pytest.raises(DefaultRuleNotUnique):\n rs2.default_rule\n db_with_items.rollback()\n sr_2.items = [] # NB: Important to clear items first as autoflush will raise...\n sr_2.default_rule = True # ... DefaultRuleNotUniquen prematurely here otherwise.\n db_with_items.add(sr_2)\n db_with_items.commit() # Associate two default rules with a store\n with pytest.raises(DefaultRuleNotUnique):\n rs.default_rule # This store now has two default scraping rules\n\n@pytest.mark.parametrize(\"url\",\n [\n (\n \"http://www.superstore.com\",\n True\n ), # In DB\n (\n \"http://example.com\",\n True\n ), # In DB\n (\n \"http://www.superstore.com/products/2423452345\",\n True\n ), # In DB (with extra path)\n (\n \"http://www.superstore.com/products/4544?ra=45@la=55\",\n True\n ), # In DB (with extra path and params)\n (\n \"http://www.megastore.com/products/4544?ra=45@la=55\",\n True\n ), # In DB (another entry)\n (\n \"http://megastore.com/products/4544?ra=45@la=55\",\n False\n ), # (lack of) subdomain not in DB \n (\n \"http://www.crinklefunk.com\",\n False\n ), # Domain not in DB\n (\n \"https://www.megastore.com/products/4544?ra=45@la=55\",\n False\n ), # Wrong protocol\n (\n \"https://www.superstore.co.uk/products/4544?ra=45@la=55\",\n False\n ), # Suffix not in DB\n ])\ndef test_get_site_from_url(db_with_items, url):\n \"\"\"\n Check the RetailSite class method for finding sites by url elements\n \"\"\"\n rslt = dm.RetailSite.get_site_from_url(url[0])\n if url[1]:\n assert isinstance(rslt, dm.RetailSite)\n else:\n assert rslt is None\n\n\n","repo_name":"sparrigan/BasketBot","sub_path":"tests/test_retail_site.py","file_name":"test_retail_site.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"75329299239","text":"import sys\n\n\ndef find_unused_ports_from_netstat():\n \"\"\"\n Get a list of ports that are not already taken.\n \n Parameters\n ----------\n input_file\n input file based on netstat output generated e.g.: \n netstat -nlp|grep LISTEN|grep : > used_ports_$1_`hostname`.txt\n v\n verbose if 1.\n \n Notes:\n ------\n | The input file may be the concatenation of files for multiple nodes to \n | find common nodes available on all nodes.\n | \n | This is to overcome the issue of some ports held by previous Hadoop processes.\n |\n |\n | **TO DO:**\n |\n | -Add default ports, checks, etc.\n \"\"\"\n\n start_port_range=20000\n end_port_range=32000\n num_ports=3\n #default_ports=[20000,20001,20002]\n \n input_file=sys.argv[1]\n v=1\n \n with open(input_file,'r') as f:\n ports_v=[]\n for line in f:\n line_short=' '.join(line.strip().split())\n line_split=line_short.split()\n port=line_split[3].split(':')[-1]\n ports_v.append(port)\n \n found_ports=[]\n id_port=0\n for test_port in range(start_port_range,end_port_range):\n if str(test_port) not in ports_v:\n found_ports.append(test_port)\n id_port+=1\n if id_port==num_ports:\n break\n #if v==1: \n # print(\"Used ports: \"+','.join(map(str,ports_v)))\n # print(\"Available ports: \"+','.join(map(str,found_ports)))\n \n str_out=\",nmlocport=\"+str(found_ports[0])+\",nmwebport=\"+str(found_ports[1])+\",shuffleport=\"+str(found_ports[2])\n print(str_out)\n return(str_out)\n\n\nif __name__ == '__main__':\n find_unused_ports_from_netstat()\n\n\n\n\n","repo_name":"MITHaystack/CorrelX","sub_path":"sh/get_params_ports.py","file_name":"get_params_ports.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"26386670429","text":"# 4) Crie um programa que peça 2 números. \n# Depois crie um menu interativo que peça qual \n# operação matemática deseja realizar (+, -, /, *). \n# Utilize as funções criadas no exercício anterior e\n# mostre o resultado da operação escolhida. \n\nimport os # importa uma opção para limpar a tela\n\nimport time # importa opções de timer \n\ndef fprg_soma(p_valor1 , p_valor2):\n vloc_soma = p_valor1 + p_valor2\n\n return print(f\"Soma entre {p_valor1} e {p_valor2} é: {vloc_soma}\")\n\ndef fprg_subtrai(p_valor1 , p_valor2):\n vloc_subtrai = p_valor1 - p_valor2\n\n return print(f\"Sbtração entre {p_valor1} e {p_valor2} é: {vloc_subtrai}\")\n\ndef fprg_divisao(p_valor1 , p_valor2):\n if p_valor2 == 0:\n print(\"Divisão inválida\")\n else:\n vloc_divisao = p_valor1 / p_valor2\n \n return print(f\"Divisão entre {p_valor1} e {p_valor2} é: {vloc_divisao}\")\n\ndef fprg_multiplica(p_valor1 , p_valor2):\n vloc_multiplica = p_valor1 * p_valor2\n\n return print(f\"Multiplicação entre {p_valor1} e {p_valor2} é: {vloc_multiplica}\")\n\ndef pprg_menu():\n print('''\n *********** MENU *********** \n *** [1] - SOMA ***\n *** [2] - SUBTRAÇÃO ***\n *** [3] - MULTIPLICAÇÃO ***\n *** [4] - DIVISÃO ***\n *** [5] - SAIR ***\n ****************************\n '''\n )\n\n\ndef pprg_tlimpa(): # cria um um procedimento que executa o código abaixo \n \n time.sleep(2) # conseguimos fazer um temporizador de tela\n \n os.system('cls' if os.name == 'nt' else 'clear') # com esse código conseguimos limpar a tela\n\nwhile True:\n \n pprg_menu()\n vprg_menu = input(\"Escolha uma opção: \")\n\n if vprg_menu >= '1' and vprg_menu <= '4' :\n vprg_valor1 = int(input(\"Digite 1º valor: \"))\n vprg_valor2 = int(input(\"Digite 2º valor: \"))\n # elif vprg_menu <= '4':\n # vprg_valor1 = input(\"Digite 1º valor: \")\n # vprg_valor2 = input(\"Digite 2º valor: \")\n\n if vprg_menu == '1':\n fprg_soma(vprg_valor1,vprg_valor2)\n elif vprg_menu == '2':\n fprg_subtrai(vprg_valor1,vprg_valor2)\n elif vprg_menu == '3':\n fprg_multiplica(vprg_valor1,vprg_valor2)\n elif vprg_menu == '4':\n fprg_divisao(vprg_valor1,vprg_valor2)\n elif vprg_menu == '5':\n print(\"Você saiu do programa!\")\n break\n else:\n print(\"Opção inválida!\")\n \n pprg_tlimpa()","repo_name":"botaoap/DesenvolvimentoPython_Botao_2020","sub_path":"Aula 4/Exercicio11/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29137994196","text":"from Analyzing_GUI import *\nfrom Plotting_GUI import *\nfrom Modify_Dataset_GUI import *\nimport nexfile\n\ndef spikeStatistics(*myInput):\n Datas = myInput[0]\n Input = myInput[1]\n DataGroup = myInput[2]\n TimeStamps = myInput[3]\n lock = myInput[4]\n bins = int(Input[0]['DoubleSpinBox'][0])\n fr_bin_sec = Input[0]['DoubleSpinBox'][1]\n dictIsi = {}\n dictFR = {}\n outputTableFR = np.zeros(0, dtype={'names':('Subject','Neuron','Time(sec)','Firing Rate'),\n 'formats':('S100','S30',float,float)})\n log2ISIoutput = np.zeros(0,dtype={'names':('Subject','Neuron','Edge 0','Edge 1','log2(ISI)'),\n 'formats':('S100','S30',float,float,float)})\n for name in DataGroup:\n \n dictIsi[name] = {}\n dictFR[name] = {}\n try:\n lock.lockForRead()\n data_spk = deepcopy(Datas.takeDataset(name))\n finally:\n lock.unlock()\n num_vars = data_spk['FileHeader']['NumVars']\n ch_list = []\n num_ch = 0\n for ch in xrange(num_vars):\n if data_spk['Variables'][ch]['Header']['Type'] == 0: # if it is a spike train\n ch_list += [ch]\n num_ch += 1\n rec_dur = data_spk['FileHeader']['End']\n steps = int(rec_dur // fr_bin_sec)\n fr_mat = np.zeros((num_ch,steps))\n histisi_mat = np.zeros((num_ch,bins))\n edgeisi_mat = np.zeros((num_ch,bins+1))\n tmpOutputFR = np.zeros(num_ch*steps,dtype={'names':('Subject','Neuron','Time(sec)','Firing Rate'),\n 'formats':('S100','S30',float,float)})\n tmpOutputFR['Subject'] = name\n tmplog2ISIoutput = np.zeros(num_ch*bins,dtype={'names':('Subject','Neuron','Edge 0','Edge 1','log2(ISI)'),\n 'formats':('S100','S30',float,float,float)})\n tmplog2ISIoutput['Subject'] = name\n nameList = []\n ch_idx = 0\n for ch in ch_list:\n nameList += [name + '\\n' + data_spk['Variables'][ch]['Header']['Name']]\n spk_ts = data_spk['Variables'][ch]['Timestamps']\n log2isi = np.log2(spk_ts[1:] - spk_ts[:-1])\n histisi, edgeisi = np.histogram(log2isi,bins=bins,normed=True)\n firingRate = np.zeros(steps)\n for k in xrange(steps):\n bl = (spk_ts >= k * fr_bin_sec) * (spk_ts < (k + 1) * fr_bin_sec)\n firingRate[k] = np.sum(bl) / fr_bin_sec\n k += 1\n fr_mat[ch_idx,:] = firingRate\n histisi_mat[ch_idx,:] = histisi\n edgeisi_mat[ch_idx,:] = edgeisi\n tmpOutputFR['Neuron'][ch_idx*firingRate.shape[0]: (ch_idx+1)*firingRate.shape[0]] = data_spk['Variables'][ch]['Header']['Name']\n tmpOutputFR['Firing Rate'][ch_idx*firingRate.shape[0]: (ch_idx+1)*firingRate.shape[0]] = firingRate\n tmpOutputFR['Time(sec)'][ch_idx*firingRate.shape[0]: (ch_idx+1)*firingRate.shape[0]] = np.arange(steps) * fr_bin_sec\n tmplog2ISIoutput['Neuron'][ch_idx*bins: (ch_idx+1)*bins] = data_spk['Variables'][ch]['Header']['Name']\n tmplog2ISIoutput['Edge 1'][ch_idx*bins: (ch_idx+1)*bins] = edgeisi[:-1]\n tmplog2ISIoutput['Edge 0'][ch_idx*bins: (ch_idx+1)*bins] = edgeisi[1:]\n tmplog2ISIoutput['log2(ISI)'][ch_idx*bins: (ch_idx+1)*bins] = histisi\n ch_idx += 1\n outputTableFR = np.hstack((outputTableFR,tmpOutputFR))\n log2ISIoutput = np.hstack((log2ISIoutput,tmplog2ISIoutput))\n dictIsi[name]['histISI'] = histisi_mat\n dictIsi[name]['edgeISI'] = edgeisi_mat\n dictIsi[name]['nameISI'] = nameList\n dictFR[name]['firingRate'] = fr_mat\n dictFR[name]['time(Sec)'] = np.arange(steps) * fr_bin_sec\n dictFR[name]['nameFR'] = nameList\n DataDict = {}\n DataDict['Spike Statistics'] = {}\n DataDict['Spike Statistics']['Firing Rate'] = outputTableFR\n DataDict['Spike Statistics']['log2(ISI)'] = log2ISIoutput\n dictPlot = {}\n dictPlot['Fig:Spike Statistics'] = {}\n dictPlot['Fig:Spike Statistics']['Firing Rate'] = dictFR\n dictPlot['Fig:Spike Statistics']['ISI'] = dictIsi\n info = {}\n info['Firing Rate'] = {}\n info['Firing Rate']['Types'] = ['Binned Firing Rate']\n info['log2(ISI)'] = {}\n info['log2(ISI)']['Types'] = ['log2(ISI)']\n return DataDict,dictPlot,info\n","repo_name":"MFalappa/Molabeach3","sub_path":"libraries/custom_Analysis.py","file_name":"custom_Analysis.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"17585380386","text":"# https://leetcode.com/problems/reverse-nodes-in-even-length-groups/\nclass Solution:\n def reverseEvenLengthGroups(self, head: Optional[ListNode]) -> Optional[ListNode]:\n section_id = 2 # total number of groups\n cur = head.next\n count = 0 # number of elements in the last group\n \n while cur:\n if count < section_id:\n count += 1\n else:\n section_id += 1\n count = 1\n \n cur = cur.next\n \n if section_id == 2 and count < 2:\n return head\n \n last_section_reverse = False # flag whether the last group needs to be reversed\n if (count & 1) == 0: # the length of last section is even\n last_section_reverse = True\n \n\n cur = head.next\n section_count = 2 # the id of current group\n i = 1\n prev = head # the element ahead of the 1st element in an even-length group\n \n lim = (section_id - 2) if (section_id & 1) == 0 and not last_section_reverse else (section_id - 1)\n \n while section_count <= lim:\n if (section_count & 1) == 0: # if current group is even-length\n end = cur\n left = cur # mark the element on the left\n cur = cur.next\n\n while i < section_count:\n i += 1\n right = cur.next\n cur.next = left # make cur -> left\n left = cur\n cur = right\n \n # when it comes to the end of even group, connect prev -> left ->....-> end -> right\n prev.next = left\n end.next = cur\n cur = end\n \n else: # if current group is odd-length\n while i < section_count:\n i += 1\n cur = cur.next\n\n # if current group is odd-length, mark the last element in current group as prev, \n # which will connect to the last element of the next even group \n prev = cur\n \n # start a new group\n section_count += 1\n i = 1\n cur = cur.next \n\n \n if last_section_reverse:\n # if the second to last group is even-length,\n # mark the last element in the second to last group as prev\n if (section_id & 1):\n prev = end\n \n end = cur\n left = cur\n cur = cur.next\n\n while cur:\n right = cur.next\n cur.next = left\n left = cur\n cur = right\n\n prev.next = left\n end.next = None\n \n return head \n","repo_name":"liangym2014/anchunmao-Leetcode","sub_path":"2074. Reverse Nodes in Even Length Groups - medium.py","file_name":"2074. Reverse Nodes in Even Length Groups - medium.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74934536999","text":"# Day 16: Packet Decoder\nimport abc\nimport functools as ft\nimport itertools as it\nimport operator as op\nimport os\nimport sys\n\n# pylint: disable=c-extension-no-member\nsys.path.append(os.path.join(sys.path[0], '..', 'algorithms'))\nimport algorithms as algo # # noqa: E402, pylint: disable=wrong-import-position\n\n\nclass Packet(abc.ABC):\n def __init__(self, length, version, type_id):\n self.length = length\n self.version = version\n self.type_id = type_id\n\n\nclass Literal(Packet):\n def __init__(self, length, version, type_id, value):\n super().__init__(length, version, type_id)\n self.value = value\n\n def __repr__(self):\n return f\"Literal({self.length}, {self.version}, {self.type_id}, {self.value})\"\n\n\nclass Operator(Packet):\n def __init__(self, length, version, type_id, operands):\n super().__init__(length, version, type_id)\n self.operands = operands\n\n def __repr__(self):\n return f\"Operator({self.length}, {self.version}, {self.type_id}, {self.operands})\"\n\n\ndef parse(file_path):\n with open(file_path) as file:\n return file.readline().rstrip()\n\n\ndef part_one(text):\n packet = parse_packet(tokenizer(text))\n answer = sum_version_numbers(packet)\n print(f\"Part one: {answer}\")\n\n\ndef part_two(text):\n packet = parse_packet(tokenizer(text))\n answer = eval_packet(packet)\n print(f\"Part two: {answer}\")\n\n\ndef tokenizer(text):\n binary = algo.hex_to_bin(text)\n for bit in binary:\n yield bit\n\n\ndef extract_binary(lexer, n):\n return ''.join(it.islice(lexer, n))\n\n\ndef extract_int(lexer, n):\n return algo.bin_to_int(extract_binary(lexer, n))\n\n\ndef parse_packet(packet):\n version = extract_int(packet, 3)\n type_id = extract_int(packet, 3)\n packet_length = 6\n if type_id == 4:\n literal, length = parse_literal(packet)\n packet_length += length\n return Literal(packet_length, version, type_id, literal)\n else:\n operator, length = parse_operator(packet)\n packet_length += length\n return Operator(packet_length, version, type_id, operator)\n\n\ndef parse_literal(text):\n value = ''\n length = 0\n while True:\n last = next(text) == '0'\n value += extract_binary(text, 4)\n length += 5\n if last:\n break\n return algo.bin_to_int(value), length\n\n\ndef parse_operator(text):\n mode = next(text)\n length = 1\n packets = []\n if mode == '0':\n subpackets_length = extract_int(text, 15)\n length += 15 + subpackets_length\n while subpackets_length > 0:\n packet = parse_packet(text)\n packets.append(packet)\n subpackets_length -= packet.length\n else:\n assert mode == '1'\n number_of_packets = extract_int(text, 11)\n length += 11\n for _ in range(number_of_packets):\n packet = parse_packet(text)\n packets.append(packet)\n length += packet.length\n return packets, length\n\n\ndef sum_version_numbers(packet):\n if isinstance(packet, Literal):\n return packet.version\n else:\n return packet.version + sum(sum_version_numbers(operand) for operand in packet.operands)\n\n\ndef eval_packet(packet):\n if isinstance(packet, Literal):\n return packet.value\n else:\n assert isinstance(packet, Operator)\n type_to_operation = {0: op.add,\n 1: op.mul,\n 2: min,\n 3: max,\n 5: op.gt,\n 6: op.lt,\n 7: op.eq,\n }\n operands = [eval_packet(operand) for operand in packet.operands]\n return ft.reduce(type_to_operation[packet.type_id], operands)\n\n\ndef main():\n text = parse('16.txt')\n part_one(text)\n part_two(text)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fredrik-rose/AdventOfCode2021","sub_path":"adventofcode/day16/16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32116451133","text":"\"\"\"Module to add switch pair id information to 'InterSwitch links', 'InterFabric links' customer report tables\"\"\"\n\n\nimport pandas as pd\n\nimport utilities.database_operations as dbop\nimport utilities.dataframe_operations as dfop\nimport utilities.module_execution as meop\nimport utilities.report_operations as report\nimport utilities.servicefile_operations as sfop\n\nfrom .isl_statistics import isl_statistics\n\n\ndef isl_sw_pair_update(isl_aggregated_df, fcredge_aggregated_df, switch_pair_df, project_constants_lst):\n \"\"\"Main function to add switch pair ID to ISL and IFR tables\"\"\"\n\n # imported project constants required for module execution\n project_steps_df, max_title, io_data_names_df, _, report_headers_df, report_columns_usage_sr, *_ = project_constants_lst\n\n # data titles obtained after module execution (output data)\n # data titles which module is dependent on (input data)\n data_names, analyzed_data_names = dfop.list_from_dataframe(io_data_names_df, 'isl_sw_pair_analysis_out', 'isl_sw_pair_analysis_in')\n # module information\n meop.show_module_info(project_steps_df, data_names)\n # read data from database if they were saved on previos program execution iteration\n data_lst = dbop.read_database(project_constants_lst, *data_names)\n \n # force run when any output data from data_lst is not found in database or \n # procedure execution explicitly requested (force_run flag is on) for any output or input data\n force_run = meop.verify_force_run(data_names, data_lst, project_steps_df, \n max_title, analyzed_data_names)\n if force_run:\n # data imported from init file (regular expression patterns) to extract values from data columns\n pattern_dct, _ = sfop.regex_pattern_import('common_regex', max_title)\n \n # current operation information string\n info = f'Updating ISL and IFL tables'\n print(info, end =\" \")\n\n isl_aggregated_df = dfop.dataframe_join(isl_aggregated_df, switch_pair_df, ['switchWwn', 'switchPair_id'], 1)\n if not fcredge_aggregated_df.empty:\n fcredge_aggregated_df = dfop.dataframe_join(fcredge_aggregated_df, switch_pair_df, ['switchWwn', 'switchPair_id'], 1)\n\n isl_statistics_df = isl_statistics(isl_aggregated_df, pattern_dct)\n # after finish display status\n meop.status_info('ok', max_title, len(info)) \n\n isl_report_df = report.generate_report_dataframe(isl_aggregated_df, report_headers_df, report_columns_usage_sr, data_names[3]) \n isl_report_df = report.translate_values(isl_report_df, translate_dct={'Yes': 'Да', 'No': 'Нет'})\n isl_report_df = dfop.drop_column_if_all_na(isl_report_df, columns=['Идентификатор транка', 'Deskew', 'Master', 'Идентификатор IFL'])\n # check if IFL table required\n if not fcredge_aggregated_df.empty:\n # ifl_report_df, = dataframe_segmentation(fcredge_df, [data_names[3]], report_columns_usage_sr, max_title)\n ifl_report_df = report.generate_report_dataframe(fcredge_aggregated_df, report_headers_df, report_columns_usage_sr, data_names[4]) \n else:\n ifl_report_df = fcredge_aggregated_df.copy()\n\n isl_statistics_report_df = isl_statistics_report(isl_statistics_df, report_headers_df, report_columns_usage_sr)\n\n # create list with partitioned DataFrames\n data_lst = [isl_aggregated_df, isl_statistics_df, fcredge_aggregated_df, isl_report_df, ifl_report_df, isl_statistics_report_df]\n # writing data to sql\n dbop.write_database(project_constants_lst, data_names, *data_lst) \n # verify if loaded data is empty and replace information string with empty DataFrame\n else:\n data_lst = dbop.verify_read_data(max_title, data_names, *data_lst)\n isl_aggregated_df, isl_statistics_df, *_ = data_lst\n\n # save data to service file if it's required\n for data_name, data_frame in zip(data_names, data_lst):\n report.dataframe_to_excel(data_frame, data_name, project_constants_lst)\n return isl_aggregated_df, isl_statistics_df\n\n\ndef isl_statistics_report(isl_statistics_df, report_headers_df, report_columns_usage_sr):\n \"\"\"Function to create report table out of isl_statistics_df DataFrame\"\"\"\n\n isl_statistics_report_df = pd.DataFrame()\n\n if not isl_statistics_df.empty:\n chassis_column_usage = report_columns_usage_sr.get('chassis_info_usage')\n isl_statistics_report_df = isl_statistics_df.copy()\n # identify columns to drop and drop columns\n drop_columns = ['switchWwn', 'Connected_switchWwn', 'sort_column_1', 'sort_column_2', 'Connection_ID']\n if not chassis_column_usage:\n drop_columns.append('chassis_name')\n drop_columns = [column for column in drop_columns if column in isl_statistics_df.columns]\n isl_statistics_report_df.drop(columns=drop_columns, inplace=True)\n\n # translate values in columns and headers\n translated_columns = [column for column in isl_statistics_df.columns if 'note' in column and isl_statistics_df[column].notna().any()]\n translated_columns.extend(['Fabric_name', 'Trunking_lic_both_switches'])\n isl_statistics_report_df = report.translate_dataframe(isl_statistics_report_df, report_headers_df, \n 'Статистика_ISL_перевод', translated_columns)\n # drop empty columns\n isl_statistics_report_df.dropna(axis=1, how='all', inplace=True)\n # remove zeroes to clean view\n dfop.drop_zero(isl_statistics_report_df)\n return isl_statistics_report_df","repo_name":"KonstantinAlxVlasenko/san_report_automation","sub_path":"san_analysis/isl/isl_sw_pair.py","file_name":"isl_sw_pair.py","file_ext":"py","file_size_in_byte":5678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43191764771","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom time import sleep\nimport json\nfrom os import path\nimport certifi\n\nbase_url = 'https://new.myubam.com'\n\nurls = [base_url]\nproducts = set()\nvisited = set()\n\nif path.exists('product_links.json'):\n with open('product_links.json', 'r') as infile:\n products = json.load(infile)\n print(len(products), \"products listed. Delete product_links.json to reset.\")\nelse:\n for url in urls:\n if url in visited:\n continue\n\n print(\"Visiting:\", url)\n res = requests.get(url, verify=certifi.where())\n soup = BeautifulSoup(res.text, features='html.parser')\n\n visited.add(url)\n\n for link in soup.find_all('a', href=True):\n href = link['href']\n if len(href) > 1 and href[0] == '/':\n href = base_url + href\n\n if '/p/' in href:\n products.add(href)\n elif '/c/' in href:\n urls.append(href)\n \n sleep(5) # don't DDOS the poor folks\n\n with open('product_links.json', 'w') as outfile:\n json.dump([p for p in products], outfile)\n\n with open('visisted_links.json', 'w') as outfile:\n json.dump([v for v in visited], outfile)\n","repo_name":"mdesenfants/probam","sub_path":"usborne.py","file_name":"usborne.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29158596976","text":"# leetcode-937-重新排列日志文件.py\n# 用户通过次数 4\n# 用户尝试次数 7\n# 通过次数 4\n# 提交次数 9\n# 题目难度 Easy\n# 你有一个日志数组 logs。每条日志都是以空格分隔的字串。\n\n# 对于每条日志,其第一个字为字母数字标识符。然后,要么:\n\n# 标识符后面的每个字将仅由小写字母组成,或;\n# 标识符后面的每个字将仅由数字组成。\n# 我们将这两种日志分别称为字母日志和数字日志。保证每个日志在其标识符后面至少有一个字。\n\n# 将日志重新排序,使得所有字母日志都排在数字日志之前。字母日志按字母顺序排序,忽略标识符,标识符仅用于表示关系。数字日志应该按原来的顺序排列。\n\n# 返回日志的最终顺序。\n\n \n\n# 示例 :\n\n# 输入:[\"a1 9 2 3 1\",\"g1 act car\",\"zo4 4 7\",\"ab1 off key dog\",\"a8 act zoo\"]\n# 输出:[\"g1 act car\",\"a8 act zoo\",\"ab1 off key dog\",\"a1 9 2 3 1\",\"zo4 4 7\"]\n \n\n# 提示:\n\n# 0 <= logs.length <= 100\n# 3 <= logs[i].length <= 100\n# logs[i] 保证有一个标识符,并且标识符后面有一个字。\n\n\n\nclass Solution:\n def reorderLogFiles(self, logs):\n \"\"\"\n :type logs: List[str]\n :rtype: List[str]\n \"\"\"\n charlogs = []\n numlogs = []\n\n for log in logs:\n tmp = log.split(' ')\n if tmp[1].isnumeric():\n numlogs.append(log)\n else:\n charlogs.append((\" \".join(tmp[1:]),log))\n charlogs.sort()\n\n return [charlog[1] for charlog in charlogs]+numlogs","repo_name":"ZX1209/gl-algorithm-practise","sub_path":"leetcode-gl-python/leetcode-937-重新排列日志文件.py","file_name":"leetcode-937-重新排列日志文件.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26911284894","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\n\nimport numpy as np\nimport torch.nn.functional as F\nimport torchvision\nimport torch\nimport torch.nn as nn\nprint(torch.__version__)\ntry:\n from torch.hub import load_state_dict_from_url\nexcept ImportError:\n from torch.utils.model_zoo import load_url as load_state_dict_from_url\n \n__all__ = ['resnet34']\nmodel_urls = {\n 'resnet34':'https://downloads.pytorch.org/models/resnet34-333f7ec4.pth'\n}\n# model_path = '../resnet50-19c8e357.pth'\n\nimport sys \nsys.path.append(\"..\") \n\nfrom models.fots import conv, Decoder\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n # 3x3 convolution with padding\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation,\n groups=groups, bias=False, dilation=dilation)\n\ndef conv1x1(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\ndef resnet34(pretrained=False, progress=True, **kwargs):\n return _resnet('resnet34', BasicBlock, [3,4,6,3], pretrained, progress, **kwargs)\n\ndef _resnet(arch, block, layers, pretrained, progress, **kwargs):\n model = _ResNet(block, layers, **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)\n model.load_state_dict(state_dict)\n return model\n\nclass BasicBlock(nn.Module):\n \n expansion = 1\n \n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=False)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n \n def forward(self, x):\n identity = x\n \n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n if self.downsample is not None:\n identity = self.downsample(x)\n out += identity\n out = self.relu(out)\n \n return out\n\nclass _ResNet(nn.Module):\n def __init__(self, block, layers, num_classes=1000, groups=1, width_per_group=64,\n replace_stride_with_dilation=None, norm_layer=None):\n super(_ResNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n \"\"\"\n # Original code\n self.inplanes = 64\n \"\"\"\n\n # Set new inplaces as 32\n self.inplanes = 32\n self.dilation = 1\n if replace_stride_with_dilation is None:\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be none\")\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=False)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n \"\"\"\n # Original code\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])\n \"\"\"\n\n # Set layers with new channels\n self.layer1 = self._make_layer(block, 32, layers[0])\n self.layer2 = self._make_layer(block, 64, layers[1], stride=2, dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 128, layers[2], stride=2, dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block, 256, layers[3], stride=2, dilate=replace_stride_with_dilation[2])\n self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n \n self.fc = nn.Linear(512 * block.expansion, num_classes)\n \n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n \n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n \n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, \n previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width,\n dilation=self.dilation, norm_layer=norm_layer))\n return nn.Sequential(*layers)\n \n def _forward_impl(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n \n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n \n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n \n return x\n \n def forward(self, x):\n return self._forward_impl(x)\n\nclass FOTS_pruned(nn.Module):\n def __init__(self, crop_height=640):\n super().__init__()\n self.crop_height = crop_height\n self.resnet = resnet34(pretrained=False)\n self.conv1 = nn.Sequential(\n self.resnet.conv1,\n self.resnet.bn1,\n self.resnet.relu,\n ) # 32\n self.encoder1 = self.resnet.layer1 # 32\n self.encoder2 = self.resnet.layer2 # 64\n self.encoder3 = self.resnet.layer3 # 128\n self.encoder4 = self.resnet.layer4 # 256\n\n self.center = nn.Sequential(\n conv(256, 256, stride=2),\n conv(256, 512)\n )\n self.decoder4 = Decoder(512, 256)\n self.decoder3 = Decoder(512, 128)\n self.decoder2 = Decoder(256, 64)\n self.decoder1 = Decoder(128, 32)\n self.remove_artifacts = conv(64, 64)\n\n \"\"\"\n # Original code \n self.center = nn.Sequential(\n conv(512, 512, stride=2),\n conv(512, 1024)\n )\n self.decoder4 = Decoder(1024, 512)\n self.decoder3 = Decoder(1024, 256)\n self.decoder2 = Decoder(512, 128)\n self.decoder1 = Decoder(256, 64)\n self.remove_artifacts = conv(128, 64)\n \"\"\"\n\n self.confidence = conv(64, 1, kernel_size=1, padding=0, bn=False, relu=False)\n self.distances = conv(64, 4, kernel_size=1, padding=0, bn=False, relu=False)\n self.angle = conv(64, 1, kernel_size=1, padding=0, bn=False, relu=False)\n\n def forward(self, x):\n# print(\"input shape \", x.shape)\n #x = self.conv1(x)\n x = self.resnet.conv1(x)\n x = self.resnet.bn1(x)\n x = self.resnet.relu(x)\n x = F.max_pool2d(x, kernel_size=2, stride=2)\n \n e1 = self.encoder1(x)\n e2 = self.encoder2(e1)\n e3 = self.encoder3(e2)\n e4 = self.encoder4(e3)\n \n f = self.center(e4)\n\n d4 = self.decoder4(f, e4)\n d3 = self.decoder3(d4, e3)\n d2 = self.decoder2(d3, e2)\n d1 = self.decoder1(d2, e1)\n\n final = self.remove_artifacts(d1)\n\n confidence = self.confidence(final)\n distances = self.distances(final)\n distances = torch.sigmoid(distances) * self.crop_height\n angle = self.angle(final)\n angle = torch.sigmoid(angle) * np.pi / 2\n return confidence, distances, angle","repo_name":"wuzhenyusjtu/LPCVC20-VideoTextSpotting","sub_path":"lpcv_fots/models/pruned_fots.py","file_name":"pruned_fots.py","file_ext":"py","file_size_in_byte":8845,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"41196325551","text":"from __future__ import print_function\r\n\r\nimport math\r\n\r\ntry:\r\n raw_input\r\nexcept:\r\n raw_input = input\r\n\r\nimport numpy as np\r\nfrom matplotlib import gridspec\r\nimport pickle\r\nimport time\r\nimport datetime\r\nimport os\r\nfrom PIL import Image\r\nimport json\r\nimport sys\r\nimport argparse\r\nimport imageio\r\n\r\nimsave = imageio.imsave\r\nimport itertools\r\nimport copy\r\nimport pywt\r\nfrom pywt import dwt2, wavedec2\r\nfrom keras.applications.resnet50 import ResNet50\r\nfrom keras.preprocessing import image\r\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions\r\n\r\nRESNET_MEAN = np.array([103.939, 116.779, 123.68])\r\n\r\n\r\ndef preprocess(sample_path):\r\n sample_path = 'imagenet/' + sample_path\r\n img = image.load_img(sample_path, target_size=(224, 224))\r\n x = image.img_to_array(img)\r\n return x\r\n\r\n\r\ndef preprocess_target(sample_path):\r\n img = image.load_img(sample_path, target_size=(224, 224))\r\n x = image.img_to_array(img)\r\n return x\r\n\r\n\r\ndef image_process_for_save(x):\r\n x = np.expand_dims(x, axis=0)\r\n x = preprocess_input(x)\r\n return x\r\n\r\n\r\ndef SLIA(model,\r\n sample,\r\n clip_max=1,\r\n clip_min=0,\r\n constraint='l2',\r\n num_iterations=76,\r\n gamma=1.0,\r\n target_label=None,\r\n target_sample=None,\r\n max_num_evals=1e4,\r\n init_num_evals=100,\r\n queries=0,\r\n verbose=True):\r\n data_model = 'imagenet+resnet50'\r\n params = {'clip_max': clip_max, 'clip_min': clip_min,\r\n 'target_label': target_label,\r\n 'constraint': constraint,\r\n 'num_iterations': num_iterations,\r\n 'gamma': gamma,\r\n 'd': int(np.prod(sample.shape)) / 4,\r\n 'max_num_evals': max_num_evals,\r\n 'init_num_evals': init_num_evals,\r\n 'queries': queries,\r\n 'verbose': verbose,\r\n }\r\n # Set binary search threshold.\r\n if params['constraint'] == 'l2':\r\n params['theta'] = params['gamma'] / params['d']\r\n else:\r\n params['theta'] = params['gamma'] / (params['d'] ** 2)\r\n\r\n params['sample'] = sample.astype('float32') / 255.0\r\n params['shape'] = sample.shape\r\n if target_sample is not None:\r\n params['target_sample'] = target_sample.astype('float32') / 255.0\r\n else:\r\n params['target_sample'] = None\r\n sample = np.expand_dims(sample, axis=0)\r\n sample = preprocess_input(sample) # RGB to GBR.\r\n params['original_label'] = np.argmax(model.predict(sample)) # in order to get the original_label.\r\n \r\n # Initialize.\r\n if params['target_sample'] is None:\r\n perturbed = initialize(model, params['sample'], params)\r\n else:\r\n perturbed = params['target_sample']\r\n dist = compute_distance(perturbed, params['sample'], constraint)\r\n print(dist)\r\n # Project the initialization to the boundary.\r\n perturbed, dist_post_update = binary_search_batch(params['sample'], np.expand_dims(perturbed, 0),\r\n model,\r\n params)\r\n\r\n dist = compute_distance(perturbed, params['sample'], constraint)\r\n print(dist)\r\n for j in np.arange(params['num_iterations']):\r\n params['cur_iter'] = j + 1\r\n\r\n # Choose delta.\r\n delta = select_delta(params, dist_post_update)\r\n\r\n # Choose number of evaluations.\r\n num_evals = int(params['init_num_evals'] * np.power(j + 1, 1/4))\r\n\r\n # approximate gradient.\r\n gradf = approximate_gradient_DWT(model, perturbed, num_evals,\r\n delta, params)\r\n if params['constraint'] == 'linf':\r\n update = np.sign(gradf)\r\n else:\r\n update = gradf\r\n\r\n # search step size.\r\n epsilon = geometric_progression_for_stepsize_DWT(perturbed,\r\n update, dist, model, params)\r\n\r\n # Update the sample.\r\n perturbed = clip_image(params['adversarial_example'],\r\n clip_min, clip_max)\r\n\r\n # Binary search to return to the boundary.\r\n perturbed, dist_post_update = binary_search_batch(params['sample'],\r\n perturbed[None], model, params)\r\n\r\n # compute new distance.\r\n dist = compute_distance(perturbed, params['sample'], constraint)\r\n if verbose:\r\n print('iteration: {:d}, l2 distance {:.4E},queries: {:d}'.format(j + 1, dist, params['queries']))\r\n perturbed_image2 = np.copy(perturbed) * 255\r\n imsave('{}/DWT/{}.jpg'.format(data_model, j+1), perturbed_image2.astype(np.uint8))\r\n\r\ndef approximate_gradient_DWT(model, sample, num_evals, delta, params):\r\n clip_max, clip_min = params['clip_max'], params['clip_min']\r\n # sample_points' shape = (100, 224 ,224, 3)\r\n sample_points_shape = [num_evals] + list((224, 224, 3))\r\n sample_points = np.random.randn(*sample_points_shape)\r\n # low_fre_perturbs' shape = (100, 112 ,112, 3)\r\n low_fre_perturbs_shape = [num_evals] + list((112, 112, 3)) \r\n low_fre_perturbs = np.random.randn(*low_fre_perturbs_shape)\r\n for i in range(3):\r\n\r\n coeffs = dwt2(sample[:, :, i], 'haar')\r\n cA, (cH, cV, cD) = coeffs\r\n noise_shape = [num_evals] + list(cA.shape)\r\n if params['constraint'] == 'l2':\r\n rv = np.random.randn(*noise_shape)\r\n elif params['constraint'] == 'linf':\r\n rv = np.random.uniform(low=-1 * np.mean(cA), high=1 * np.mean(cA), size=noise_shape)\r\n rv = rv / np.sqrt(np.sum(rv ** 2, axis=(1, 2), keepdims=True))\r\n # rv's shape = (100, 112 ,112)\r\n t = rv * np.mean(cA)\r\n new_cA = cA + t\r\n low_fre_perturbs[:, :, :, i] = rv\r\n for j in range(num_evals): \r\n coeffs = new_cA[j], (cH, cV, cD)\r\n sample_points[j, :, :, i] = pywt.idwt2(coeffs, 'haar')\r\n\r\n # query the model.\r\n decisions = decision_function(model, sample_points, params)\r\n params['queries'] += num_evals - 1\r\n decision_shape = [len(decisions)] + [1] * len(params['shape'])\r\n fval = 2 * decisions.astype(float).reshape(decision_shape) - 1.0\r\n\r\n # Baseline subtraction (when fval differs)\r\n if np.mean(fval) == 1.0: # label changes.\r\n gradf = np.mean(low_fre_perturbs, axis=0)\r\n elif np.mean(fval) == -1.0: # label not change.\r\n gradf = - np.mean(low_fre_perturbs, axis=0)\r\n else:\r\n fval -= np.mean(fval)\r\n gradf = np.mean(fval * low_fre_perturbs, axis=0)\r\n\r\n # Get the gradient direction.\r\n gradf = gradf / np.linalg.norm(gradf)\r\n\r\n return gradf\r\n\r\n\r\ndef decision_function(model, image, params):\r\n \"\"\"\r\n Decision function output 1 on the desired side of the boundary,\r\n 0 otherwise.\r\n \"\"\"\r\n params['queries'] += 1\r\n image = clip_image(image, params['clip_min'], params['clip_max'])\r\n image = image.astype('float') * 255.0\r\n image = preprocess_input(image)\r\n prob = model.predict(image)\r\n if params['target_label'] is None:\r\n return np.argmax(prob, axis=1) != params['original_label']\r\n else:\r\n return np.argmax(prob, axis=1) == params['target_label']\r\n\r\n\r\ndef clip_image(image, clip_min, clip_max):\r\n # Clip an image, or an image batch, with upper and lower threshold.\r\n return np.minimum(np.maximum(clip_min, image), clip_max)\r\n\r\n\r\ndef compute_distance(x1, x2, constraint='l2'):\r\n # Compute the distance between two images.\r\n if constraint == 'l2':\r\n return np.linalg.norm(x1 - x2)\r\n elif constraint == 'linf':\r\n return np.max(abs(x1 - x2))\r\n\r\n\r\ndef project(original_image, perturbed_images, alphas, params):\r\n alphas_shape = [len(alphas)] + [1] * len(params['shape'])\r\n alphas = alphas.reshape(alphas_shape)\r\n if params['constraint'] == 'l2':\r\n return (1 - alphas) * original_image + alphas * perturbed_images\r\n elif params['constraint'] == 'linf':\r\n out_images = clip_image(\r\n perturbed_images,\r\n original_image - alphas,\r\n original_image + alphas\r\n )\r\n return out_images\r\n\r\n\r\ndef binary_search_batch(original_image, perturbed_images, model, params):\r\n \"\"\" Binary search to approach the boundar. \"\"\"\r\n\r\n # Compute distance between each of perturbed image and original image.\r\n dists_post_update = np.array([\r\n compute_distance(\r\n original_image,\r\n perturbed_image,\r\n params['constraint']\r\n )\r\n for perturbed_image in perturbed_images])\r\n\r\n # Choose upper thresholds in binary searchs based on constraint.\r\n if params['constraint'] == 'linf':\r\n highs = dists_post_update\r\n # Stopping criteria.\r\n thresholds = np.minimum(dists_post_update * params['theta'], params['theta'])\r\n else:\r\n highs = np.ones(len(perturbed_images))\r\n thresholds = params['theta']\r\n\r\n lows = np.zeros(len(perturbed_images))\r\n\r\n # Call recursive function.\r\n while np.max((highs - lows) / thresholds) > 1:\r\n # projection to mids.\r\n mids = (highs + lows) / 2.0\r\n mid_images = project(original_image, perturbed_images, mids, params)\r\n\r\n # Update highs and lows based on model decisions.\r\n mid_images = np.squeeze(mid_images)\r\n decisions = decision_function(model, mid_images[None], params)\r\n\r\n lows = np.where(decisions == 0, mids, lows)\r\n highs = np.where(decisions == 1, mids, highs)\r\n\r\n out_images = project(original_image, perturbed_images, highs, params)\r\n\r\n # Compute distance of the output image to select the best choice.\r\n # (only used when stepsize_search is grid_search.)\r\n dists = np.array([\r\n compute_distance(\r\n original_image,\r\n out_image,\r\n params['constraint']\r\n )\r\n for out_image in out_images])\r\n idx = np.argmin(dists)\r\n\r\n dist = dists_post_update[idx]\r\n out_image = out_images[idx]\r\n return out_image, dist\r\n\r\n\r\ndef initialize(model, sample, params):\r\n sample_point = np.zeros_like(sample)\r\n low_fre_perturb_shape = list((112, 112)) \r\n while(1):\r\n for i in range(3):\r\n coeffs = dwt2(sample[:, :, i], 'haar')\r\n cA, (cH, cV, cD) = coeffs\r\n new_cA = np.random.uniform(cA.min(), cA.max(), size=low_fre_perturb_shape)\r\n coeffs = new_cA, (cH, cV, cD)\r\n sample_point[:, :, i] = pywt.idwt2(coeffs, 'haar')\r\n\r\n # query the model.\r\n decision = decision_function(model, sample_point[None], params)\r\n if decision:\r\n print('adversarial..........')\r\n break;\r\n dist = compute_distance(sample_point, sample, params['constraint'])\r\n print(dist)\r\n\r\n low = 0.0\r\n high = 1.0\r\n while high - low > 0.001:\r\n mid = (high + low) / 2.0\r\n blended = (1 - mid) * sample + mid * sample_point\r\n success = decision_function(model, blended[None], params)\r\n if success:\r\n high = mid\r\n else:\r\n low = mid\r\n perturbed_image = (1 - high) * sample + high * sample_point\r\n dist = compute_distance(perturbed_image, sample, params['constraint'])\r\n perturbed_image2 = np.copy(perturbed_image) * 255\r\n imsave('{}/DWT/{}.jpg'.format(data_model, 0), perturbed_image2.astype(np.uint8))\r\n print('iteration: {:d}, {:s} distance {:.4E},queries: {:d}'.format(1, params['constraint'], dist, params['queries']))\r\n return perturbed_image\r\n\r\ndef geometric_progression_for_stepsize_DWT(x, update, dist, model, params):\r\n \"\"\"\r\n\tGeometric progression to search for stepsize.\r\n\tKeep decreasing stepsize by half until reaching\r\n\tthe desired side of the boundary,\r\n\t\"\"\"\r\n epsilon = dist / np.sqrt(params['cur_iter']) * 4\r\n\r\n def phi(epsilon):\r\n # new = x + epsilon * update\r\n new_sample = np.zeros_like(x)\r\n for i in range(3):\r\n\r\n cA, (cH, cV, cD) = dwt2(x[:, :, i], 'haar')\r\n\r\n # coeffs = wavedec2(x[:, :, i], 'haar', level=2)\r\n # cA2, (cH2, cV2, cD2), (cH1, cV1, cD1) = coeffs\r\n new_cA = cA + epsilon * update[:, :, i]\r\n \r\n coeffs = new_cA, (cH, cV, cD)\r\n new_sample[:, :, i] = pywt.idwt2(coeffs, 'haar')\r\n\r\n success = decision_function(model, new_sample[None], params)\r\n params['adversarial_example'] = new_sample\r\n return success\r\n\r\n while not phi(epsilon):\r\n epsilon /= 2.0\r\n\r\n return epsilon\r\n\r\n\r\ndef select_delta(params, dist_post_update):\r\n \"\"\"\r\n Choose the delta at the scale of distance\r\n between x and perturbed sample.\r\n\r\n \"\"\"\r\n if params['cur_iter'] == 1:\r\n delta = 0.1 * (params['clip_max'] - params['clip_min'])\r\n else:\r\n if params['constraint'] == 'l2':\r\n delta = np.sqrt(params['d']) * params['theta'] * dist_post_update * 4\r\n elif params['constraint'] == 'linf':\r\n delta = params['d'] * params['theta'] * dist_post_update * 4\r\n\r\n return delta\r\n\r\n\r\nif __name__ == \"__main__\":\r\n model = ResNet50(weights='imagenet')\r\n target_sample = preprocess_target('images/original/290.00002646.jpg')\r\n temp_sample = np.copy(target_sample)\r\n temp_sample = np.expand_dims(temp_sample, axis=0)\r\n temp_sample = preprocess_input(temp_sample) # RGB to GBR\r\n target_label = np.argmax(model.predict(temp_sample)) # get the label of the target image.\r\n filepath = os.listdir('imagenet')\r\n for i, sample in enumerate(filepath):\r\n sample = preprocess(sample)\r\n\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('--constraint', type=str,\r\n choices=['l2', 'linf'],\r\n default='l2')\r\n\r\n parser.add_argument('--attack_type', type=str,\r\n choices=['targeted', 'untargeted'],\r\n default='untargeted')\r\n\r\n parser.add_argument('--num_iterations', type=int,\r\n default=76)\r\n\r\n args = parser.parse_args()\r\n dict_a = vars(args)\r\n if args.attack_type == 'targeted':\r\n target_label = target_label\r\n target_sample = target_sample\r\n else:\r\n target_label = None\r\n target_sample = None\r\n print('attacking the {}th sample...'.format(i))\r\n SLIA(model,\r\n sample,\r\n clip_max=1,\r\n clip_min=0,\r\n constraint=args.constraint,\r\n num_iterations=args.num_iterations,\r\n gamma=1.0,\r\n target_label=target_label,\r\n target_sample=target_sample,\r\n max_num_evals=1e4,\r\n init_num_evals=100,\r\n queries=0)\r\n\r\n","repo_name":"GZHU-DVL/SLIA","sub_path":"SLIA/SLIA.py","file_name":"SLIA.py","file_ext":"py","file_size_in_byte":15020,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"26023579446","text":"import sys\nimport sexpParser\nimport os\n\n\n# reads file in as RTL by parsing as an s-expression\n# Input: name of file (should be format of .c.212r.expand)\n# Output: file descriptor that can be iterated through\ndef readRTL(rtlFileName):\n rtlFile = open(rtlFileName, \"r\")\n rtl = rtlFile.read()\n rtl = \"(\" + rtl + \")\"\n rtlInput = sexpParser.parse_sexp(rtl)\n return rtlInput\n\n\ndef setRegInsn(rtlFileName, line, maxreg, lookupTable, mapTable):\n regnum = line[5][1][1]\n if (regnum > 15 and regnum != 100):\n offset = lookupTable[regnum]\n elif (regnum == 100):\n offset = 0\n else:\n offset = 0\n if 'const_int' in line[5][2][0]:\n text = \"mov r0, #\" + str(line[5][2][1])\n text2 = \"str r0, [fp, #\" + str(offset) + \"]\"\n writeARMInsn(rtlFileName, text)\n writeARMInsn(rtlFileName, text2)\n elif 'mem' in line[5][2][0]:\n if 'virtual' in line[5][2][1][1][2]:\n text = \"ldr r0, [fp, #\" + str(\n lookupTable[mapTable[line[5][2][1][2][1]]]) + \"]\"\n text2 = \"str r0, [fp, #\" + str(offset) + \"]\"\n writeARMInsn(rtlFileName, text)\n writeARMInsn(rtlFileName, text2)\n elif ('plus' in line[5][2][0] or 'minus' in line[5][2][0] or 'mult' in line[5][2][0]):\n arg1 = line[5][2][1][1]\n arg2 = line[5][2][2][1]\n offset1 = lookupTable[arg1]\n text = \"ldr r1, [fp, #\" + str(offset1) + \"]\"\n if (\"const_int\" in line[5][2][2][0]): # adding constants\n text2 = \"mov r2, #\" + str(line[5][2][2][1])\n else: # adding registers\n offset2 = lookupTable[arg2]\n text2 = \"ldr r2, [fp, #\" + str(offset2) + \"]\"\n\n if ('plus' in line[5][2][0]):\n text3 = 'add r0, r1, r2'\n elif ('minus' in line[5][2][0]):\n text3 = 'sub r0, r1, r2'\n else:\n text3 = 'mul r0, r1, r2'\n text4 = \"str r0, [fp, #\" + str(offset) + \"]\"\n writeARMInsn(rtlFileName, text)\n writeARMInsn(rtlFileName, text2)\n writeARMInsn(rtlFileName, text3)\n writeARMInsn(rtlFileName, text4)\n elif 'reg' in line[5][2][0]:\n offset1 = lookupTable[line[5][2][1]]\n text = \"ldr r1, [fp, #\" + str(offset1) + \"]\"\n text2 = \"mov r0, r1\"\n writeARMInsn(rtlFileName, text)\n writeARMInsn(rtlFileName, text2)\n if (\"\" in line[5][1]):\n text3 = \"str r0, [fp, #\" + str(offset) + \"]\"\n writeARMInsn(rtlFileName, text3)\n elif (\"compare\" in line[5][2][0]):\n offset1 = lookupTable[line[5][2][1][1]]\n text = \"ldr r1, [fp, #\" + str(offset1) + \"]\"\n writeARMInsn(rtlFileName, text)\n if (\"const_int\" in line[5][2][2][0]): # if constant\n text2 = \"cmp r1, #\" + str(line[5][2][2][1])\n writeARMInsn(rtlFileName, text2)\n else: # else register\n offset2 = lookupTable[line[5][2][2][1]]\n text2 = \"ldr r2, [fp, #\" + str(offset2) + \"]\"\n text3 = \"cmp r1, r2\"\n writeARMInsn(rtlFileName, text2)\n writeARMInsn(rtlFileName, text3)\n\n\n\ndef jumpInsn(rtlFileName, line):\n if (\"label_ref\" in line[5][2][0]):\n # unconditional jump\n if (str(line[5][2][1]) == \"main\"):\n text = \"b \" + str(line[5][2][1])\n else:\n text = \"b L\" + str(line[5][2][1])\n elif (\"if_then_else\" in line[5][2][0]):\n # conditional jump\n condition = str(line[5][2][1][0])\n if (str(line[5][2][2][1]) == \"main\"):\n text = \"b\" + condition + \" \" + str(line[5][2][2][1])\n else:\n text = \"b\" + condition + \" L\" + str(line[5][2][2][1])\n writeARMInsn(rtlFileName, text)\n\n\ndef codeLabel(rtlFileName, line):\n if (str(line[1]) == \"main\"):\n writeCodeLabel(rtlFileName, str(line[1]))\n else:\n writeCodeLabel(rtlFileName, \"L\" + str(line[1]))\n\n\ndef setMemInsn(rtlFileName, line, maxReg, lookupTable, mapTable):\n offset = line[5][1][1][2][1]\n regToSave = line[5][-1][1]\n text3 = \"\"\n if (offset in mapTable):\n text3 = \"str r1, [fp, #\" + str(lookupTable[mapTable[offset]]) + \"]\"\n else:\n mapTable[offset] = regToSave\n text = \"ldr r1, [fp, #\" + str(lookupTable[regToSave]) + \"]\"\n text2 = \"str r1, [fp, #\" + str(lookupTable[regToSave]) + \"]\"\n writeARMInsn(rtlFileName, text)\n writeARMInsn(rtlFileName, text2)\n if (text3 != \"\"):\n writeARMInsn(rtlFileName, text3)\n\n\n\ndef callInsn(rtlFileName, line):\n func = line[5][2][1][1][1][0]\n text = \"bl \" + str(func)\n writeARMInsn(rtlFileName, text)\n\n\n\n# main parse function to convert RTL into ARM assembly insns\n# Input: name of file (should be format of .c.212r.expand)\n# Output: nothing\ndef parseRTLtoAssembly(rtlFileName, numVirtRegs):\n rtlInput = readRTL(rtlFileName)\n maxReg = numVirtRegs + 104\n lineCount = 0\n\n lookupTable = createLookupTable(maxReg)\n mapTable = {}\n\n for line in rtlInput:\n if (lineCount == 2):\n # create header\n writeHeader(rtlFileName, line)\n saveRegs(rtlFileName)\n createStack(rtlFileName, numVirtRegs)\n lineCount += 1\n if type(line) == list:\n print(line)\n if line[0] == 'insn':\n if line[5][0] == 'set':\n if 'reg' in line[5][1][0]:\n setRegInsn(rtlFileName, line, maxReg, lookupTable, mapTable)\n elif 'mem' in line[5][1][0]:\n setMemInsn(rtlFileName, line, maxReg, lookupTable, mapTable)\n elif line[0] == 'call_insn':\n callInsn(rtlFileName, line)\n elif line[0] == 'code_label':\n codeLabel(rtlFileName, line)\n elif line[0] == 'jump_insn':\n jumpInsn(rtlFileName, line)\n freeStack(rtlFileName, numVirtRegs)\n restoreRegs(rtlFileName)\n\n\n# creates the ARM assembly file, and writes a header for it\n# Input: name of file (should be format of .c.212r.expand)\n# Output: nothing\ndef writeHeader(rtlFileName, text):\n writeToFile(rtlFileName, \"\t.arch armv6\", \"w\")\n writeToFile(rtlFileName, \"\t.text\", \"a\")\n writeToFile(rtlFileName, \"\t.global \" + str(text), \"a\")\n writeCodeLabel(rtlFileName, text)\n\n\n# writes a code label to the file\n# Input: name of file (should be format of .c.212r.expand)\n# Output: nothing\ndef writeCodeLabel(rtlFileName, codeLabel):\n writeToFile(rtlFileName, str(codeLabel) + \":\", \"a\")\n\n# returns the number of the largest register used\n# Input: name of the file (should be format of .c.212r.expand)\n# Output: the highest register # used\ndef findMaxReg(rtlFileName):\n rtlInput = readRTL(rtlFileName)\n maxReg = 0\n for l in rtlInput:\n line = str(l)\n if \"reg:SI\" in line:\n regindex = line.index(\"reg:SI\")\n strnum = line[regindex + 8 : regindex + 12]\n try:\n intnum = int(strnum)\n if intnum > maxReg:\n maxReg = intnum\n except ValueError:\n continue\n if \"reg:QI\" in line:\n regindex = line.index(\"reg:QI\")\n strnum = line[regindex + 8 : regindex + 12]\n try:\n intnum = int(strnum)\n if intnum > maxReg:\n maxReg = intnum\n except ValueError:\n continue\n if \"reg/f:SI\" in line:\n regindex = line.index(\"reg/f:SI\")\n strnum = line[regindex + 10 : regindex + 14]\n try:\n intnum = int(strnum)\n if intnum > maxReg:\n maxReg = intnum\n except ValueError:\n continue\n return maxReg\n\n\n# general function for writing to a file with the removal of the extensions (.c.212r.expand)\n# Input: name of file (should be format of .c.212r.expand)\n# text to write\n# file open parameter (\"w\" for write, \"a\" for append)\n# Output: nothing\ndef writeToFile(rtlFileName, text, parameter):\n # checks if the rtl filename is valid\n if (rtlFileName.count(\".\") < 3):\n raise Exception(\"Invalid file type, should be of .c.212r.expand\\n\")\n\n # removes the .expand extension\n fileName212r, fileExtension212r = os.path.splitext(rtlFileName)\n # removes the .212r extension\n fileNameC, fileExtensionC = os.path.splitext(fileName212r)\n # removes the .c extension\n fileNameOnly, fileExtensionOnly = os.path.splitext(fileNameC)\n\n # opens a file and appends the .s extension\n fileOutput = open(fileNameOnly + \".s\", parameter)\n fileOutput.write(str(text) + \"\\n\")\n\n\n# writes an ARM instruction to a file\n# Input: name of file (should be format of .c.212r.expand)\n# text to write\n# Output: nothing\ndef writeARMInsn(rtlFileName, text):\n writeToFile(rtlFileName, \"\t\" + text, \"a\")\n\n\n# writes the ARM instruction to save the caller's registers (fp and lr)\n# Input: name of file (should be format of .c.212r.expand)\n# Output: nothing\ndef saveRegs(rtlFileName):\n writeARMInsn(rtlFileName, \"push {r4, r5, r6, r7, r8, r9, r10, fp, r12, lr}\")\n\n\n# writes the ARM instruction to restore the caller's registers (fp and pc)\n# Input: name of file (should be format of .c.212r.expand)\n# Output: nothing\ndef restoreRegs(rtlFileName):\n writeARMInsn(rtlFileName, \"pop {r4, r5, r6, r7, r8, r9, r10, fp, r12, pc}\")\n\n\n# create a stack to keep track of virtual registers\n# Input: name of file (should be format of .c.212r.expand)\n# number of virtual registers to allocate\n# Output: nothing\ndef createStack(rtlFileName, size):\n stackSize = size * 4\n writeARMInsn(rtlFileName, \"mov fp, sp\")\n writeARMInsn(rtlFileName, \"sub sp, sp, #\" + str(stackSize))\n\n\n# frees the stack\n# Input: name of file (should be format of .c.212r.expand)\n# number of virtual registers that have been allocated\n# Output: nothing\ndef freeStack(rtlFileName, size):\n stackSize = size * 4\n writeARMInsn(rtlFileName, \"add sp, sp, #\" + str(stackSize))\n writeARMInsn(rtlFileName, \"mov sp, fp\")\n\n\n#calculates the offset for a given register given the register number and the maximum register number of the program\n#offset is from the stack pointer, the highest register number is stored below the link register\n# SP + OFFSET\n\"\"\" fp\n lr\n 118\n 117\n .\n .\n .\n 105\n sp\"\"\"\ndef calc_offset(regnum, maxRegNum):\n return ((maxRegNum - 104) * 4) - ((maxRegNum - regnum + 1) * 4)\n\n\ndef createLookupTable(maxRegNum):\n lookupTable = {}\n offset = -4\n for i in range(maxRegNum, 104, -1):\n lookupTable[i] = offset\n offset -= 4\n return lookupTable\n\n# main function that is called when the program is run\n# Input: nothing\n# Output: nothing\ndef main():\n # command line args checker\n if (len(sys.argv) != 2):\n sys.stderr.write(\"Usage: python rtlToAssemblyParser.py \\n\")\n exit(1)\n\n maxReg = findMaxReg(sys.argv[1])\n\n parseRTLtoAssembly(sys.argv[1], maxReg - 104)\n\nif __name__ == '__main__':\n main()","repo_name":"aross885/431milestone4","sub_path":"rtlToAssemblyParser.py","file_name":"rtlToAssemblyParser.py","file_ext":"py","file_size_in_byte":11080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20967315730","text":"from firebase import Firebase\n\nconfig = {\n \"apiKey\": \"AIzaSyCSMzDr9ohvgy4tTk6-5b8eC4A7CG4FgYM\",\n \"authDomain\": \"ospython-c68c0.firebaseapp.com\",\n \"databaseURL\": \"https://ospython-c68c0-default-rtdb.firebaseio.com\",\n \"projectId\": \"ospython-c68c0\",\n \"storageBucket\": \"ospython-c68c0.appspot.com\",\n \"messagingSenderId\": \"169713426191\",\n \"appId\": \"1:169713426191:web:2987a97f43ee1db2ab287a\"\n}\n\n\nclass PersonService:\n def __init__(self):\n self.firebase = Firebase(config)\n self.db = self.firebase.database()\n\n def get_all_people(self):\n var = self.db.child(\"OSP\").child(\"People\").get()\n return var.val()\n\n def add_person(self,\n first_name,\n last_name,\n is_action_leader,\n is_active,\n is_driver,\n is_section_leader,\n phone_number):\n\n person_data = {\n \"FirstName\": first_name,\n \"LastName\": last_name,\n \"IsActionLeader\": is_action_leader,\n \"IsActive\": is_active,\n \"IsDriver\": is_driver,\n \"IsSectionLeader\": is_section_leader,\n \"PhoneNumber\": phone_number\n }\n\n self.db.child(\"OSP\").child(\"People\").push(person_data)\n\n def get_drivers(self):\n peoples = self.get_all_people()\n drivers = []\n for key, i in peoples.items():\n if i is not None:\n if i.get(\"IsDriver\") and i.get(\"IsActive\"):\n drivers.append({\"FirstName\": i.get(\"FirstName\"), \"LastName\": i.get(\"LastName\"), \"PhoneNumber\": i.get(\"PhoneNumber\")})\n return drivers\n\n def get_action_leaders(self):\n peoples = self.get_all_people()\n action_leaders = []\n for key, i in peoples.items():\n if i is not None:\n if i.get(\"IsActionLeader\") and i.get(\"IsActive\"):\n action_leaders.append({\"FirstName\": i.get(\"FirstName\"), \"LastName\": i.get(\"LastName\"), \"PhoneNumber\": i.get(\"PhoneNumber\")})\n return action_leaders\n\n def get_section_leaders(self):\n peoples = self.get_all_people()\n section_leaders = []\n for key, i in peoples.items():\n if i is not None:\n if i.get(\"IsSectionLeader\") and i.get(\"IsActive\"):\n section_leaders.append({\"FirstName\": i.get(\"FirstName\"), \"LastName\": i.get(\"LastName\"), \"PhoneNumber\": i.get(\"PhoneNumber\")})\n return section_leaders\n\n def check_person_existence(self, first_name, last_name, phone_number):\n all_people = self.get_all_people()\n person_id = None\n\n for key, i in all_people.items():\n if i is not None:\n if i.get('FirstName') == first_name and i.get('LastName') == last_name and i.get('PhoneNumber') == phone_number:\n person_id = key\n break\n return person_id\n\n def remove_person_from_active(self, id):\n self.db.child(\"OSP\").child(\"People\").child(id).update({\"IsActive\": 0})\n\n def change_person_data(self, id, first_name, last_name, phone_number, is_active, is_driver, is_action_leader, is_section_leader):\n if id is not None:\n self.db.child(\"OSP\").child(\"People\").child(id).update({\n \"FirstName\": first_name,\n \"LastName\": last_name,\n \"IsActionLeader\": is_action_leader,\n \"IsActive\": is_active,\n \"IsDriver\": is_driver,\n \"IsSectionLeader\": is_section_leader,\n \"PhoneNumber\": phone_number\n })\n\n def get_person_by_id(self, id):\n return self.db.child(\"OSP\").child(\"People\").child(id).get().val()\n\n def id_to_text(self, id):\n person = self.get_person_by_id(id)\n person_data = person.get(\"FirstName\") + \" \" + person.get(\"LastName\")\n return person_data\n\n def id_to_box(self, id):\n person = self.get_person_by_id(id)\n person_data = person.get(\"FirstName\") + \",\" + person.get(\"LastName\") + \",\" + str(person.get(\"PhoneNumber\"))\n return person_data\n\n def section_to_string(self, section_table):\n section_string = \"\"\n for id in section_table:\n section_string += self.id_to_text(id) + \"\\n\"\n if section_string == \"\":\n print(\"Empty section\")\n return section_string\n\n # gets id of chosen person from data in UI element, should return None if not found (should not happen in usage)\n def translate_to_id(self, text):\n person_details = text.split(\",\")\n p_len = len(person_details)\n p_num = person_details[p_len - 1]\n p_last = person_details[p_len - 2]\n p_first = \"\"\n for i in range(0, p_len - 2):\n p_first += person_details[i]\n return self.check_person_existence(p_first, p_last, int(p_num))\n\nclass ReportService:\n def __init__(self):\n self.firebase = Firebase(config)\n self.db = self.firebase.database()\n\n def get_all_reports(self):\n var = self.db.child(\"OSP\").child(\"Reports\").get()\n return var.val()\n\n def get_report_data(self, report_id):\n report = self.db.child(\"OSP\").child(\"Reports\").child(report_id).get()\n return report.val()\n\n # 0 means not closed\n def is_report_closed(self, report_id):\n return self.get_report_data(report_id)[\"editable\"]\n\n def get_report_id_by_fields(self, report_string):\n report_details = report_string.split(\",\")\n all_reports = self.get_all_reports()\n report_id = None\n\n for key, i in all_reports.items():\n if i is not None:\n if i.get('at_place_date') == report_details[0] \\\n and i.get('at_place_hour') == report_details[1] \\\n and i.get('place_name') == report_details[2]:\n report_id = key\n break\n return report_id\n\n def add_report(self, km_to_place, accident_type, at_place_date, at_place_hour, counter_state, depot_hour,\n injured, out_date, out_hour, perpetrator, place_name, return_date, return_hour, section_current,\n section_leader_id, editable, action_leader_id, driver_id, details):\n report_data = {\n \"KM_to_place\": km_to_place,\n \"accident_type\": accident_type,\n \"action_leader_id\": action_leader_id,\n \"at_place_date\": at_place_date,\n \"at_place_hour\": at_place_hour,\n \"counter_state\": counter_state,\n \"depot_hour\": depot_hour,\n \"driver_id\": driver_id,\n \"editable\": editable,\n \"injured\": injured,\n \"out_date\": out_date,\n \"out_hour\": out_hour,\n \"perpetrator\": perpetrator,\n \"place_name\": place_name,\n \"return_date\": return_date,\n \"return_hour\": return_hour,\n \"section_current\": section_current,\n \"section_leader_id\": section_leader_id,\n \"details\": details\n }\n self.db.child(\"OSP\").child(\"Reports\").push(report_data)\n\n def change_report_data(self, report_id, km_to_place, accident_type, at_place_date, at_place_hour, counter_state,\n depot_hour, injured, out_date, out_hour, perpetrator, place_name, return_date, return_hour,\n section_current, section_leader_id, editable, action_leader_id, driver_id, details):\n if report_id is not None:\n self.db.child(\"OSP\").child(\"Reports\").child(report_id).update({\n \"KM_to_place\": km_to_place,\n \"accident_type\": accident_type,\n \"action_leader_id\": action_leader_id,\n \"at_place_date\": at_place_date,\n \"at_place_hour\": at_place_hour,\n \"counter_state\": counter_state,\n \"depot_hour\": depot_hour,\n \"driver_id\": driver_id,\n \"editable\": editable,\n \"injured\": injured,\n \"out_date\": out_date,\n \"out_hour\": out_hour,\n \"perpetrator\": perpetrator,\n \"place_name\": place_name,\n \"return_date\": return_date,\n \"return_hour\": return_hour,\n \"section_current\": section_current,\n \"section_leader_id\": section_leader_id,\n \"details\": details\n })\n","repo_name":"kwdrt/OSPython","sub_path":"firebaseUtils.py","file_name":"firebaseUtils.py","file_ext":"py","file_size_in_byte":8424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19608548141","text":"import pandas as pd #导入Pandas\nimport numpy as np #导入Numpy\nimport jieba #导入结巴分词\nfrom keras.preprocessing import sequence\nfrom keras.optimizers import SGD, RMSprop, Adagrad\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.recurrent import LSTM, GRU\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport re\nfrom collections import Counter, defaultdict\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom sklearn import metrics\n\ndef build_dataset(words, vocabulary_size = 5000):\n from collections import Counter\n count = [['UNK', -1]]\n count.extend(Counter(words).most_common(vocabulary_size - 1))\n w_dictionary = {}\n for word, _ in count:\n w_dictionary[word] = len(w_dictionary)\n da = list()\n unk_count = 0\n for word in words:\n if word in w_dictionary:\n index = w_dictionary[word]\n else:\n index = 0\n unk_count += 1\n da.append(index)\n count[0][1] = unk_count\n reverse_dictionary = {zip(w_dictionary.values(), w_dictionary.keys())}\n return da, count, w_dictionary, reverse_dictionary\n\ndef rmsel(true_label,pred):\n rmse = np.sqrt(metrics.mean_squared_error(true_label, pred))\n return 1 / (1 + rmse)\n\n\ntrain = pd.read_csv('../input/train_first.csv')\npredict = pd.read_csv('../input/predict_first.csv')\npredict['Score'] = -1\n\ndata = pd.concat([train, predict])\ndata.head()\n\n\nstop_word = []\nstop_words_path = '../input/stop_word.txt'\nwith open(stop_words_path,encoding='utf-8') as f:\n for line in f.readlines():\n stop_word.append(line.strip())\nstop_word.append(' ')\n\ndef clean_str(stri):\n stri = re.sub(r'[a-zA-Z0-9]+','',stri)\n cut_str = jieba.cut(stri.strip())\n list_str = [word for word in cut_str if word not in stop_word]\n return list_str\n\ndata['words'] = data['Discuss'].apply(lambda x : clean_str(x))\ndata.head()\n\nd2v_train = data['words'].copy()\nd2v_train.head()\n\nall_words = []\nfor i in d2v_train:\n all_words.extend(i)\nprint(all_words[0:100])\n\nda, count, w_dictionary, reverse_dictionary = build_dataset(all_words, vocabulary_size = len(all_words))\nprint(count[0:100])\n\n\ndef get_sent(x, dictionary):\n encode = []\n for i in x:\n if i in dictionary:\n encode.append(dictionary[i])\n else:\n encode.append(0)\n return encode\n\n\ndata['sent'] = data['words'].apply(lambda x: get_sent(x, w_dictionary))\ndata.head()\n\ntrain_df = data[data['Score'] != -1]\npredict_df = data[data['Score'] == -1]\ndel predict_df['Score']\n\ntrain_df.head()\n\nmaxlen = 10\nprint(\"Pad sequences (samples x time)\")\n\ntrain_df['sent'] = list(sequence.pad_sequences(train_df['sent'], maxlen=maxlen))\npredict_df['sent'] = list(sequence.pad_sequences(predict_df['sent'], maxlen=maxlen))\n\nnfolds = 5\n\n\ndef training(train_df, train_label, test_df):\n X = np.array(list(train_df['sent']))\n y = np.array(np_utils.to_categorical(train_label))\n T = np.array(list(test_df['sent']))\n folds = list(StratifiedKFold(n_splits=nfolds, random_state=2018, shuffle=True).split(X, train_label.values))\n\n S_train = np.zeros((X.shape[0], 1)) # 训练样本数 * 模型个数\n S_test = np.zeros((T.shape[0], 1)) # 测试集样本数 * 模型个数\n S_test_n = np.zeros((T.shape[0], len(folds))) # 测试集样本数 * n_folds\n\n error = []\n for j, (train_idx, test_idx) in enumerate(folds):\n X_train = X[train_idx] # 训练集特征\n y_train = y[train_idx] # 训练集标签\n\n X_holdout = X[test_idx] # 待预测的输入\n y_holdout = y[test_idx]\n\n print('Build model...')\n model = Sequential()\n model.add(Embedding(len(w_dictionary) + 1, 256))\n model.add(LSTM(256)) # try using a GRU instead, for fun\n model.add(Dropout(0.5))\n model.add(Dense(6))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n model.fit(X_train, y_train, batch_size=32, nb_epoch=2, validation_data=(X_holdout, y_holdout))\n\n y_true = [np.argmax(i) for i in list(y_holdout)]\n predictions = list(model.predict(X_holdout, batch_size=32))\n y_pred = [np.sum(i * [0, 1, 2, 3, 4, 5]) for i in predictions]\n print('rmse: {}'.format(rmsel(y_true, y_pred)))\n error.append(rmsel(y_true, y_pred))\n\n submission = list(model.predict(T, batch_size=32))\n sub_pred = [np.sum(i * [0, 1, 2, 3, 4, 5]) for i in submission]\n\n S_train[test_idx] = np.array(y_pred).reshape(-1, 1)\n S_test_n[:, j] = np.array(sub_pred)\n\n S_test[:] = S_test_n.mean(1).reshape(-1, 1)\n return S_train, S_test, round(np.mean(error), 5)\n\n\nS_train, S_test, error = training(train_df, train_df['Score'], predict_df)\n\n\ntrain_out = train_df[['Id']]\ntrain_out['lstm_len_10'] = S_train\ntrain_out.to_csv('../models/__models__/train_lstm_len_10.csv', index = False)\n\ntest_out = predict_df[['Id']]\ntest_out['lstm_len_10'] = S_test\n\ntest_out.to_csv('../models/__models__/test_lstm_len_10.csv', index = False)\n\nprint('error: {}'.format(error))\n","repo_name":"demonSong/DF_CCF_CONTEST","sub_path":"models/keras_model_lstm.py","file_name":"keras_model_lstm.py","file_ext":"py","file_size_in_byte":5203,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"18"} +{"seq_id":"41425728654","text":"# from datetime import datetime\nimport os\nimport pickle\nfrom typing import Dict, List\nimport requests\nimport db\n\n\ndef save_to_var(vars_file: str, obj: List) -> None:\n with open(vars_file, \"wb\") as f:\n pickle.dump(obj, f)\n\n\ndef wirte_to_file(path: str, content) -> None:\n with open(path, \"w\") as f:\n f.write(str(content))\n\n\ndef load_from_file(vars_file):\n with open(vars_file, 'rb') as f:\n return pickle.load(f)\n\n\ndef process_tables(table) -> Dict:\n # wirte_to_file(\"files/table.html\", table)\n tbody = table.tbody\n trs = tbody.find_all('tr')\n image_url = trs[1].find_all('td')[0].img.attrs['src']\n download_url = trs[1].find_all('td')[0].a.attrs['href']\n # Name\n name = trs[1].select_one('td[colspan]').find('a').text\n # Author\n # name = trs[2].select_one('td[colspan]').find('a').text\n # Series, Periodical\\\n\n # Publisher, City\n\n # Year, Edition\n year_edition = trs[5].find_all('td')\n year = year_edition[1].text\n edition = year_edition[3].text\n\n # Language, Pages\n # name = trs[6].select_one('td[colspan]').find('a').text\n lang_pages = trs[6].find_all('td')\n lang = lang_pages[1].text\n pages = lang_pages[3].text\n\n # ISBN, ID\n # name = trs[7].select_one('td[colspan]').find('a').text\n isbn_id = trs[7].find_all('td')\n isbn = isbn_id[1].text\n id = isbn_id[3].text\n\n # Time added, Time modified\n # name = trs[7].select_one('td[colspan]').find('a').text\n\n # Size, Extension\n # name = trs[9].select_one('td[colspan]').find('a').text\n size_extenstion = trs[9].find_all('td')\n size = size_extenstion[1].text.split()\n size_in_m = size[0]\n size_in_b = size[2][1:-1]\n extention = size_extenstion[3].text\n return {\n 'name': name,\n 'year': year,\n 'edition': edition,\n 'lang': lang,\n 'pages': pages,\n 'image_url': image_url,\n 'download_url': download_url,\n 'book_id': int(id),\n 'extention': extention,\n 'size_in_b': int(size_in_b),\n }\n\n\ndef save_thumbnail(image_url: str, book_id: str, location: str = \"files/thumbnails/\"):\n if not os.path.exists(location):\n os.makedirs(location)\n img_data = requests.get(image_url).content\n image_location = os.path.join(\n location, str(book_id) + '.' + get_image_extension(image_url))\n with open(image_location, 'wb') as handler:\n handler.write(img_data)\n\n\ndef get_image_extension(image_url: str):\n return image_url.split('.')[-1]\n\n\ndef check_if_image_exists(conn, base_location: str, book_id: int, extension: str) -> bool:\n image_location = get_image_location(conn, book_id)\n return os.path.isfile(os.path.join(base_location, image_location, book_id + '.' + extension))\n\n\n# def get_image_location(conn, book_id: int) -> str:\n# cursorObj = conn.cursor()\n# rows = cursorObj.execute(\"\"\"SELECT CREATED_AT\n# FROM BOOKS\n# WHERE BOOK_ID = ?;\"\"\", (book_id,))\n# created_at = rows.fetchone()\n# if not created_at:\n# raise Exception(\"Could not find\")\n# created_at_iso = datetime.fromisoformat(created_at[0])\n# year = created_at_iso.year\n# month = created_at_iso.month\n# return os.path.join(str(year), str(month))\n\ndef parse_tables_from_html(conn, bs):\n tables = bs.find('table', {'class': 'c'})\n tables_all = tables.find_all('table')\n for child in tables_all[::2]:\n res = process_tables(child)\n db.sql_insert(conn, res)\n\n\ndef url_builder(keyword, number_per_page, page):\n return f\"https://libgen.is/search.php?&req={keyword}&phrase=1&view=detailed&res={number_per_page}&column=def&sort=year&sortmode=DESC&page={page}\"","repo_name":"monegim/libgen-scraper","sub_path":"scripts/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37511380465","text":"#!/usr/bin/env python\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration('sharpclaw', parent_package, top_path)\n\n config.add_extension('sharpclaw1',\n ['ClawParams.f90','weno.f90','reconstruct.f90',\n 'evec.f90','workspace.f90','flux1.f90'])\n\n config.add_extension('sharpclaw2',\n ['ClawParams.f90','weno.f90','reconstruct.f90',\n 'evec.f90','workspace.f90','flux2.f90',\n 'flux1.f90'])\n\n config.add_extension('sharpclaw3',\n ['ClawParams.f90','weno.f90','reconstruct.f90',\n 'evec.f90','workspace.f90','flux3.f90',\n 'flux1.f90'])\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n","repo_name":"clawpack/pyclaw","sub_path":"src/pyclaw/sharpclaw/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"18"} +{"seq_id":"41950714440","text":"#def zeller(year, month, day):\n#http://ja.wikipedia.org/wiki/%E3%83%84%E3%82%A7%E3%83%A9%E3%83%BC%E3%81%AE%E5%85%AC%E5%BC%8F\n#\tif month <= 2:\n#\t\tyear -= 1\n#\t\tmonth += 12\n#\tJ = year / 100\n#\tK = year % 100\n#\treturn (day + (month + 1) * 26 / 10 + K + K / 4 + J / 4 + 5 * J) % 7\n\nimport datetime\n\ns = 0\nfor i in xrange(0, 100):\n\tyear = 1901 + i\n\tfor j in xrange(1, 13):\n#\t\tif zeller(year, j, 1) == 1:\n\t\tif datetime.datetime(year, j, 1).weekday() == 6:\n\t\t\ts += 1\n\ns\n","repo_name":"mugenen/Project-Euler","sub_path":"10-20/19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"40417153601","text":"#!/usr/bin/python2\n#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport httplib2\nimport shutil\nimport os\nimport bs4\nimport time\nimport sys\nimport json\nfrom hashlib import md5 \nfrom datetime import datetime\n\nfrom apiclient import discovery\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\nfrom gdata import gauth\nimport gdata.photos, gdata.photos.service\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\n### GLOBALS ###\n\n# If modifying these scopes, delete your previously saved credentials\n# at ~/.credentials/drive-python-quickstart.json\nSCOPES = 'https://picasaweb.google.com/data/ https://www.googleapis.com/auth/drive'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Personal Photo Sync'\n\nPHOTO_ALBUM = \"6316489518140194977\"\nVIDEO_ALBUM = \"0B5QT17osW5Emc1NnUEJHRUtONEU\"\nSOURCE_DIRS = [\"/media/dan/disk/DCIM/\",\n \"/media/dan/disk/PRIVATE/AVCHD/BDMV/STREAM/\"]\nSTAGING_DIR = os.path.join(os.path.expanduser(\"~\"), \"staging\")\nMASTERS_DIR = \"/media/wdmycloud/HomeShare/Photos/masters\"\n\ndef get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'photosync.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\ndef list_albums(http_client):\n (resp_headers, content) = http_client.request(\"https://picasaweb.google.com/data/feed/api/user/default\", \"GET\")\n soup = bs4.BeautifulSoup(content, 'lxml')\n for e in soup.html.body.feed:\n id_soup = e.find(\"gphoto:id\")\n name_soup = e.find(\"gphoto:name\")\n num_soup = e.find(\"gphoto:numphotos\")\n title_soup = e.find(\"media:title\")\n print(\"%s - %s - %s - %s\" % (\"\" if id_soup == None else id_soup.string, \n \"\" if title_soup == None else title_soup.string,\n \"\" if name_soup == None else name_soup.string,\n \"\" if num_soup == None else num_soup.string))\n\ndef post_photo(http_client, album_id, filepath):\n slug = os.path.basename(filepath)\n content_type = 'image/jpeg' \n\n print(\"Loading image into memory... %s\" % (filepath))\n with open(filepath,'rb') as fh:\n data = fh.read()\n url = \"https://picasaweb.google.com/data/feed/api/user/default/albumid/%s\" % (album_id)\n print(\"POSTing %s to %s\" % (slug, url))\n (resp_headers, content) = http_client.request(url, method=\"POST\", body=data, headers={'Content-Type':content_type,'Slug':slug})\n print(\"Finished upload\")\n \n if \"status\" in resp_headers:\n if resp_headers[\"status\"] == '201':\n return True\n\n print(\"====POST ERROR===\")\n print(resp_headers)\n print(content)\n print(\"=================\")\n raise Exception(\"Error occurred while posting\")\n\ndef post_photo_multipart(http_client, album_id, filepath):\n slug = os.path.basename(filepath)\n content_type = 'image/jpeg' \n\n print(\"Loading file into memory... %s\" % (filepath))\n with open(filepath,'rb') as fh:\n data = fh.read()\n\n createdTime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(os.path.getmtime(filepath)))\n\n boundary=\"foo_bar_baz\"\n url = \"https://picasaweb.google.com/data/feed/api/user/default/albumid/%s\" % (album_id)\n\n # metadata = {\"name\": slug, \n # \"mimeType\": content_type, \n # \"parents\": [ album_id ], \n # \"createdTime\": createdTime,\n # \"modifiedTime\": createdTime}\n metadata = \"\"\"\n \n %s \n \n %s \n %s \n\"\"\" % (slug, createdTime, createdTime)\n #print(metadata)\n\n post_data = [\n '--%s' % boundary,\n 'Content-Type: application/atom+xml',\n '',\n metadata,\n '--%s' % boundary,\n 'Content-Type: %s' % content_type,\n '',\n data,\n '--%s--' % boundary \n ]\n \n post_body = '\\n'.join(post_data)\n\n headers = {\"Content-Type\":\"multipart/related; boundary=%s\" % boundary, \n \"Content-Length\":len(post_body),\n \"MIME-version\":1.0}\n (resp_headers, content) = http_client.request(url, method='POST', headers=headers, body=post_body)\n print(\"Finished upload\")\n \n if \"status\" in resp_headers:\n if resp_headers[\"status\"] == '201':\n return True\n\n print(\"====POST ERROR===\")\n print(resp_headers)\n print(content)\n print(\"=================\")\n raise Exception(\"Error occurred while posting\")\n\n\ndef post_video_multipart(http_client, album_id, filepath):\n slug = os.path.basename(filepath)\n content_type = \"video/vnd.mts\"\n\n print(\"Loading video into memory... %s\" % (filepath))\n with open(filepath,'rb') as fh:\n data = fh.read()\n\n createdTime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(os.path.getmtime(filepath)))\n\n boundary=\"foo_bar_baz\"\n url = \"https://www.googleapis.com/upload/drive/v3/files/?uploadType=multipart\"\n\n metadata = {\"name\": slug, \n \"mimeType\": content_type, \n \"parents\": [ album_id ], \n \"createdTime\": createdTime,\n \"modifiedTime\": createdTime}\n print(json.dumps(metadata, indent=2))\n\n post_data = [\n '--%s' % boundary,\n 'Content-Type: application/json; charset=UTF-8',\n '',\n json.dumps(metadata),\n '',\n '--%s' % boundary,\n 'Content-Type: %s' % content_type,\n '',\n data,\n '--%s--' % boundary \n ]\n \n post_body = '\\n'.join(post_data)\n\n headers = {\"Content-Type\":\"multipart/related; boundary=%s\" % boundary, \"Content-Length\":len(post_body)}\n (resp_headers, content) = http_client.request(url, method='POST', headers=headers, body=post_body)\n print(\"Finished upload\")\n \n if \"status\" in resp_headers:\n if resp_headers[\"status\"] == '200':\n return True\n\n print(\"====POST ERROR===\")\n print(resp_headers)\n print(content)\n print(\"=================\")\n raise Exception(\"Error occurred while posting\")\n\ndef find(name, path):\n for root, dirs, files in os.walk(path):\n if name in files:\n return os.path.join(root, name)\n return None\n \ndef stage():\n source_dirs = SOURCE_DIRS\n target_dir = STAGING_DIR\n print(\"Pulling files off the media card\")\n if not os.path.exists(target_dir):\n raise Exception(\"ERROR Directory not found: %s\" % target_dir)\n\n known_files = set(get_all_known_files())\n\n for frompath in source_dirs:\n if os.path.exists(frompath):\n\n for subdir, dirs, files in os.walk(frompath):\n print(\">>>> Processing files in: %s\" % (subdir))\n for file in sorted(files):\n filepath = os.path.join(subdir,file)\n filesize = os.stat(filepath).st_size \n (basefilename, fileextn) = os.path.splitext(file)\n newfn = \"%s_%i%s\" % (basefilename, filesize, fileextn)\n newpath = os.path.join(target_dir,newfn)\n if newfn in known_files:\n print(\"Skipping file: %s -> %s\" % (file,newfn)) \n else:\n shutil.copy2(filepath, newpath)\n if os.path.exists(newpath):\n print(\"file %s staged successfully...\" % file)\n else:\n print(\"Copy seems to have failed...\\n%s -> %s\" % (filepath, newpath))\n else:\n print(\"Looks like media card not mounted\")\n\ndef main(stg=True, proc=True ):\n print(\"SOURCE_DIRS: %s\" % \",\".join(SOURCE_DIRS))\n print(\"STAGING_DIR: %s\" % STAGING_DIR)\n print(\"MASTERS_DIR: %s\" % MASTERS_DIR)\n\n ### Check that target directory is mounted correctly, should be 3.5TB\n if os.statvfs(MASTERS_DIR).f_blocks/1024.0/1024.0/1024.0 < 3.0:\n raise Exception(\"WD MyCloud does not seem to be mounted.\")\n\n ### copy media off of camera card\n if stg:\n print(\"Staging...\")\n stage()\n \n ### Send to masters and google\n if proc:\n print(\"Processing...\")\n tries = 0\n while True:\n try:\n process()\n break\n except Exception as e:\n if tries >= 10:\n print(\"Reached max tries...failing.\")\n raise e\n else:\n print(\"Failed %i times...trying again.\" % (tries))\n tries = tries + 1\n time.sleep(10)\n\ndef process():\n ### process all files residing in staging directory\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n \n #get_albums(http_client=http)\n #get_photos(http_client=http, album_id=ALBUM)\n \n frompath = STAGING_DIR\n\n if not os.path.exists(frompath):\n raise Exception(\"ERROR Directory not found: %s\" % frompath)\n\n print(\">>> BEGIN PROCESSING PHOTOS <<<\")\n print(\" >> %s <<\" % str(datetime.now()))\n print(\"Processing files in %s...\" % (frompath))\n\n for subdir, dirs, files in os.walk(frompath):\n print(\">>>> Processing files in: %s\" % (subdir))\n for file in sorted(files):\n print(\" >> %s <<\" % str(datetime.now()))\n\n filepath = subdir + os.sep + file\n\n dt = time.strftime('%Y-%m-%d', time.localtime(os.path.getmtime(filepath)))\n newdir = MASTERS_DIR + os.sep + dt\n newpath = newdir + os.sep + file\n chk = find(file, MASTERS_DIR)\n if chk:\n print(\"File already exists in masters: %s\" % (chk))\n else:\n print(\" Posting to Google: %s\" % (newpath))\n\n #Post to google photos\n credentials.refresh(http)\n\n if filepath.lower().endswith(('.jpg', '.jpeg')):\n post_photo_multipart(http, PHOTO_ALBUM, filepath) \n #post_photo(http, PHOTO_ALBUM, filepath) \n elif filepath.lower().endswith(('.mts')):\n post_video_multipart(http, VIDEO_ALBUM, filepath)\n else:\n print(\"====POST ERROR===\")\n print(\" Invalid file type: %s\" % (filepath))\n raise Exception(\"Invalid file type\")\n\n #move to masters file share\n print(\" moving file to masters: %s\" % (newpath))\n if not os.path.exists(newdir):\n print(\"Creating date directory: %s\" % newdir)\n os.makedirs(newdir)\n\n print(\"moving %s to %s\" % (filepath, newdir))\n# os.rename(filepath, newpath)\n shutil.copy2(filepath, newpath)\n if os.path.exists(newpath):\n os.remove(filepath)\n print(\"Move successful...\")\n else:\n \"Copy seems to have failed...\\n%s -> %s\" % (filepath, newpath)\n raise Exception(\"ERROR Copy failed...\")\n\n print(\">>> FINISHED PROCESSING PHOTOS <<<\")\n\n print(\">>> BEGIN DELETE EMPTY DIRECTORIES <<<\")\n print(\" >> %s <<\" % str(datetime.now()))\n for subdir, dirs, files in os.walk(frompath):\n if os.path.samefile(subdir, frompath):\n pass\n elif len(dirs) == 0 and len(files) == 0:\n print(\" >>> Deleting empty directory: %s\" % (subdir))\n os.rmdir(subdir)\n else:\n print(\" >>> Cannot delete because files still exist: %s\" % (subdir) )\n \n print(\">>> FINISHED DELETING DIRECTORIES <<<\")\n\ndef get_all_known_files():\n all_files = []\n for subdir, dirs, files in os.walk(MASTERS_DIR):\n all_files.extend(files)\n return all_files\n\nif __name__ == '__main__':\n main(stg=True, proc=True)\n","repo_name":"dhuddl00/photosync","sub_path":"syncwithgoogle.py","file_name":"syncwithgoogle.py","file_ext":"py","file_size_in_byte":11831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39857525905","text":"import matplotlib\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nimport re,os\nimport numpy as np\nmatplotlib.rcParams.update({'font.size':14,'font.family':'sans-serif'})\nstate_size = 4\nsname = 'test-DDQN-n%s'%(state_size,)\ncmap = cm.get_cmap('jet')\nax1 = plt.figure().add_subplot(projection='3d')\nfig2, ax2 = plt.subplots(1, 1)\nfig3, ax3 = plt.subplots(1, 1)\npattern1 = re.compile(sname+\"-swinging-epoch-([0-9]+).data$\")\npattern2 = re.compile(sname+\"-greedy-epoch-([0-9]+).data$\")\npattern3 = re.compile(sname+\"-DRL-epoch-([0-9]+).data$\")\nfilenames1 = []\nfilenames2 = []\nfilenames3 = []\nepochs1 = []\nepochs2 = []\nepochs3 = []\ndirect = \"data\"\nfor root, dirs, files in os.walk(direct):\n if root == direct:\n for name in files:\n found1 = pattern1.match(name)\n if found1:\n epochs1.append(int(found1.groups()[0]))\n filenames1.append(name)\n print(name)\n found2 = pattern2.match(name)\n if found2:\n epochs2.append(int(found2.groups()[0]))\n filenames2.append(name)\n print(name)\n found3 = pattern3.match(name)\n if found3:\n epochs3.append(int(found3.groups()[0]))\n filenames3.append(name)\n print(name)\n\nR1 = []\nR2 = []\nR3 = []\n\nfiles1 = sorted(zip(epochs1, filenames1))\nfiles2 = sorted(zip(epochs2, filenames2))\nfiles3 = sorted(zip(epochs3, filenames3))\nfiles = zip(files1,files2,files3)\nfor (epch1, filename1), (epch2, filename2), (epch3, filename3) in files:\n Time1 = []\n X1 = []\n Y1 = []\n Z1 = []\n Xr1 = []\n Yr1 = []\n Zr1 = []\n Theta1 = []\n with open(direct+'/'+filename1) as fp1:\n for line in fp1:\n t, x, y, z, tx, ty, tz, nx, ny, nz, bx, by, bz, kappa, tau = [float(item) for item in line.strip().split()]\n # if int(np.round(t/0.01))%20==0:\n # ax.scatter([x,],[y,],c='r',s=5)\n r0 = kappa/(kappa**2+tau**2)\n Time1.append(t)\n X1.append(x)\n Y1.append(y)\n Z1.append(z)\n Xr1.append(x+nx*r0)\n Yr1.append(y+ny*r0)\n Zr1.append(z+nz*r0)\n phi = np.arctan(tau/kappa)\n if phi<0:\n phi+=np.pi\n helix_v = np.array([np.sin(phi)*tx,\n np.sin(phi)*ty,\n np.sin(phi)*tz]) \\\n + np.array([np.cos(phi) * bx,\n np.cos(phi) * by,\n np.cos(phi) * bz])\n theta = np.arccos(np.dot(helix_v, np.array([0,1,0])))/np.pi*180\n Theta1.append(theta)\n \n Time2 = []\n X2 = []\n Y2 = []\n Z2 = []\n Xr2 = []\n Yr2 = []\n Zr2 = []\n Theta2 = []\n with open(direct+'/'+filename2) as fp2:\n for line in fp2:\n t, x, y, z, tx, ty, tz, nx, ny, nz, bx, by, bz, kappa, tau = [float(item) for item in line.strip().split()]\n # if int(np.round(t/0.01))%20==0:\n # ax.scatter([x,],[y,],c='r',s=5)\n Time2.append(t)\n r0 = kappa/(kappa**2+tau**2)\n X2.append(x)\n Y2.append(y)\n Z2.append(z)\n Xr2.append(x+nx*r0)\n Yr2.append(y+ny*r0)\n Zr2.append(z+nz*r0)\n phi = np.arctan(tau/kappa)\n if phi<0:\n phi+=np.pi\n helix_v = np.array([np.sin(phi)*tx,\n np.sin(phi)*ty,\n np.sin(phi)*tz]) \\\n + np.array([np.cos(phi) * bx,\n np.cos(phi) * by,\n np.cos(phi) * bz])\n theta = np.arccos(np.dot(helix_v, np.array([0,1,0])))/np.pi*180\n Theta2.append(theta)\n\n \n X3 = []\n Y3 = []\n Z3 = []\n Xr3 = []\n Yr3 = []\n Zr3 = []\n Time3 = []\n Theta3 = []\n with open(direct+'/'+filename3) as fp3:\n for line in fp3:\n t, x, y, z, tx, ty, tz, nx, ny, nz, bx, by, bz, kappa, tau = [float(item) for item in line.strip().split()]\n # if int(np.round(t/0.01))%20==0:\n # ax.scatter([x,],[y,],c='r',s=5)\n r0 = kappa/(kappa**2+tau**2)\n Time3.append(t)\n X3.append(x)\n Y3.append(y)\n Z3.append(z)\n Xr3.append(x+nx*r0)\n Yr3.append(y+ny*r0)\n Zr3.append(z+nz*r0)\n phi = np.arctan(tau/kappa)\n if phi<0:\n phi+=np.pi\n helix_v = np.array([np.sin(phi)*tx,\n np.sin(phi)*ty,\n np.sin(phi)*tz]) \\\n + np.array([np.cos(phi) * bx,\n np.cos(phi) * by,\n np.cos(phi) * bz])\n theta = np.arccos(np.dot(helix_v, np.array([0,1,0])))/np.pi*180\n Theta3.append(theta)\n if epch1 % 1 == 0:\n\n X1 = np.array(X1)\n Y1 = np.array(Y1)\n Z1 = np.array(Z1)\n\n Xr1 = np.array(Xr1)\n Yr1 = np.array(Yr1)\n Zr1 = np.array(Zr1)\n\n X2 = np.array(X2)\n Y2 = np.array(Y2)\n Z2 = np.array(Z2)\n\n Xr2 = np.array(Xr2)\n Yr2 = np.array(Yr2)\n Zr2 = np.array(Zr2)\n\n Time3 = np.array(Time3)\n X3 = np.array(X3)\n Y3 = np.array(Y3)\n Z3 = np.array(Z3)\n\n Xr3 = np.array(Xr3)\n Yr3 = np.array(Yr3)\n Zr3 = np.array(Zr3)\n Theta3 = np.array(Theta3)\n if epch1%4==0:\n ax1.plot3D(X1, Y1, Z1, '--', color=cmap(epch1 / np.max(epochs1)), linewidth=1)\n #ax2.plot3D(Xr1, Yr1, Zr1, '--', color=cmap(epch1 / np.max(epochs1)), linewidth=1)\n ax1.scatter3D((X1[-1],),(Y1[-1],),(Z1[-1]),s=10,c='k')\n #ax2.plot(Time1,Theta1,'--', color=cmap(epch1 / np.max(epochs1)), linewidth=1) \n\n ax1.plot3D(X2, Y2, Z2, '-.', color=cmap(epch2 / np.max(epochs2)), linewidth=1)\n #ax2.plot3D(Xr2, Yr2, Zr2, '-.', color=cmap(epch2 / np.max(epochs2)), linewidth=1)\n ax1.scatter3D((X2[-1],),(Y2[-1],),(Z2[-1]),s=10,c='k')\n #ax2.plot(Time2,Theta2,'-.', color=cmap(epch2 / np.max(epochs2)), linewidth=1) \n\n ax1.plot3D(X3, Y3, Z3,'-', color=cmap(epch3 / np.max(epochs3)), linewidth=1)\n #ax2.plot3D(Xr3, Yr3, Zr3, '-', color=cmap(epch3 / np.max(epochs3)), linewidth=1)\n ax1.scatter3D((X3[-1],),(Y3[-1],),(Z3[-1]),s=10,c='k')\n ax2.plot(Time3,Theta3,'-', color=cmap(epch3 / np.max(epochs3)), linewidth=1,alpha=0.5)\n\n R1.append(Y1[-1]-Y1[0])\n R2.append(Y2[-1]-Y2[0])\n R3.append(Y3[-1]-Y3[0])\nax3.plot(np.array(range(len(R1)))+1,R1,'o--', color='C0',label='alternating')\nax3.plot(np.array(range(len(R1)))+1,np.average(R1)+np.zeros_like(R1),'-',color='C0')\nax3.plot(np.array(range(len(R2)))+1,R2,'v--',color='C1',label='short-sighted')\nax3.plot(np.array(range(len(R2)))+1,np.average(R2)+np.zeros_like(R2),'-',color='C1')\nax3.plot(np.array(range(len(R3)))+1,R3,'s--',color='C2',label='DRL')\nax3.plot(np.array(range(len(R3)))+1,np.average(R3)+np.zeros_like(R3),'-',color='C2')\ny0 = 17.2\nax2.plot(Time3,np.zeros_like(Time3)+y0,'k--')\nax2.text(40,y0+2,r'$\\Delta A_\\mathrm{IV}$')\n\n# ax.scatter([0,],[0,],s=10,c='r')\n#ax1.set_aspect('equal')\n#ax1.set_xlim((-10,10))\nax1.set_xlabel('x')\nax1.set_ylabel('y')\n#ax2.set_aspect('equal')\n#ax2.set_xlim((-35,25))\n#ax2.set_xlabel('x')\n#ax2.set_ylabel('y')\nax2.set_xlabel('t')\nax2.set_ylabel(r'$\\theta_h$ (degree)')\nax3.set_xlabel(r'$i$')\nax3.set_ylabel(r'$\\Delta c/k_c$')\n#ax3.set_ylim((10,30))\nax3.legend(loc='upper left',ncol=2)\nax3.set_ylim((-100,200))\nplt.show()\n","repo_name":"mokchie/chemotaxis","sub_path":"direction-reward-tau6.7/comp.py","file_name":"comp.py","file_ext":"py","file_size_in_byte":7848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26516731835","text":"from typing import Iterator, Optional\n\nfrom acme import core\nfrom acme import specs\nfrom acme.agents.jax import actor_core as actor_core_lib\nfrom acme.agents.jax import actors\nfrom acme.agents.jax import builders\nfrom acme.agents.jax.bve import losses\nfrom acme.agents.jax.bve import networks as bve_networks\nfrom acme.agents.jax.dqn import learning_lib\nfrom acme.jax import networks as networks_lib\nfrom acme.jax import types as jax_types\nfrom acme.jax import utils\nfrom acme.jax import variable_utils\nfrom acme.utils import counting\nfrom acme.utils import loggers\nimport haiku as hk\nimport optax\n\n\nclass BVEBuilder(builders.OfflineBuilder[bve_networks.BVENetworks,\n actor_core_lib.ActorCore,\n utils.PrefetchingSplit]):\n \"\"\"BVE Builder.\"\"\"\n\n def __init__(self, config):\n \"\"\"Build a BVE agent.\n\n Args:\n config: The config of the BVE agent.\n \"\"\"\n self._config = config\n\n def make_learner(self,\n random_key: jax_types.PRNGKey,\n networks: bve_networks.BVENetworks,\n dataset: Iterator[utils.PrefetchingSplit],\n logger_fn: loggers.LoggerFactory,\n environment_spec: specs.EnvironmentSpec,\n counter: Optional[counting.Counter] = None) -> core.Learner:\n del environment_spec\n\n loss_fn = losses.BVELoss(\n discount=self._config.discount,\n max_abs_reward=self._config.max_abs_reward,\n huber_loss_parameter=self._config.huber_loss_parameter,\n )\n\n return learning_lib.SGDLearner(\n network=networks.policy_network,\n random_key=random_key,\n optimizer=optax.adam(\n self._config.learning_rate, eps=self._config.adam_eps),\n target_update_period=self._config.target_update_period,\n data_iterator=dataset,\n loss_fn=loss_fn,\n counter=counter,\n num_sgd_steps_per_step=self._config.num_sgd_steps_per_step,\n logger=logger_fn('learner'))\n\n def make_actor(\n self,\n random_key: jax_types.PRNGKey,\n policy: actor_core_lib.ActorCore,\n environment_spec: specs.EnvironmentSpec,\n variable_source: Optional[core.VariableSource] = None) -> core.Actor:\n \"\"\"Create the actor for the BVE to perform online evals.\n\n Args:\n random_key: prng key.\n policy: The DQN policy.\n environment_spec: The environment spec.\n variable_source: The source of where the variables are coming from.\n\n Returns:\n Return the actor for the evaluations.\n \"\"\"\n del environment_spec\n variable_client = variable_utils.VariableClient(\n variable_source, 'policy', device='cpu')\n return actors.GenericActor(policy, random_key, variable_client)\n\n def make_policy(\n self,\n networks: bve_networks.BVENetworks,\n environment_spec: specs.EnvironmentSpec,\n evaluation: Optional[bool] = False) -> actor_core_lib.ActorCore:\n \"\"\"Creates a policy.\"\"\"\n del environment_spec, evaluation\n\n def behavior_policy(\n params: hk.Params, key: jax_types.PRNGKey,\n observation: networks_lib.Observation) -> networks_lib.Action:\n network_output = networks.policy_network.apply(\n params, observation, is_training=False)\n return networks.sample_fn(network_output, key)\n\n return actor_core_lib.batched_feed_forward_to_actor_core(behavior_policy)\n","repo_name":"deepmind/acme","sub_path":"acme/agents/jax/bve/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":3100,"dataset":"github-code","pt":"18"} +{"seq_id":"26376972809","text":"import logging\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom chart.utils import get_multiple_files, get_specific_tasks, get_system_delays, get_completed_task_deadlines, \\\n get_penalties, REQUEST_INTERVAL_FILE_LIST\n\n# fm = font_manager.json_load(os.path.expanduser(\"~/.cache/matplotlib/fontlist-v300.json\"))\n\ntime_in_sec = 1080\n# plt.rcParams['font.family'] = ['serif']\n# plt.rcParams['font.serif'] = ['Times New Roman']\nplt.rcParams[\"font.size\"] = \"18\"\n\n\n# energy_costs = results[time_in_sec]['energy_cost_matrix']\n# hop_counts = results[time_in_sec]['flow_hop_matrix']\n# steps = np.arange(0, len(energy_costs))\n\n\ndef get_moving_avg_array_skip_zeros(sequence, n=10):\n it = iter(sequence)\n moving_avg_array = []\n window = ()\n for i in range(n):\n nextCost = next(it)\n if nextCost == 0:\n moving_avg_array.append(moving_avg_array[-1])\n continue\n window += (nextCost,)\n moving_avg_array.append(np.mean(window))\n for elem in it:\n if elem == 0:\n moving_avg_array.append(moving_avg_array[-1])\n continue\n window = window[1:] + (elem,)\n moving_avg_array.append(np.mean(window))\n return moving_avg_array\n\n\ndef plot_graph(tasks, title=\"\",filename=\"\"):\n system_delays = get_system_delays(tasks)\n deadlines = get_completed_task_deadlines(tasks)\n penalties = get_penalties(tasks)\n\n steps = np.arange(0, len(system_delays))\n\n sliding_window = get_moving_avg_array_skip_zeros(system_delays)\n\n fig, (ax1, ax2) = plt.subplots(2, )\n fig.subplots_adjust(hspace=0.3)\n ax1.set_xlabel('Task ID', fontsize=20)\n ax1.set_ylabel(r\"$t_{total,i}$ (s)\", fontsize=25)\n ax1b = ax1.twinx()\n color = 'tab:orange'\n ax1b.set_ylabel(r'$t_{deadline,i}$ (s)', color=color) # we already handled the x-label with ax1\n lns1 = ax1b.scatter(steps, deadlines, marker=\"o\", color=color, label=r'$t_{deadline,i}$ (s)')\n ax1b.set_ylabel(r'$t_{deadline,i}$ (s)', size=25)\n ax1b.tick_params(axis='y', labelcolor=color)\n ax1b.set_ylim(0, 400)\n\n lns2 = ax1.scatter(steps, system_delays, lw=1, color=\"green\", label=r\"$t_{total,i}$ (s)\")\n ax1.set_ylabel(r\"$t_{total,i}$ (s)\", size=\"22\")\n lns3 = ax1.plot(steps, sliding_window, lw=2, color=\"blue\", label=r\"Moving average of $t_{total,i}$(n=10)\")\n\n fig.legend(loc='upper right', edgecolor=\"black\", bbox_to_anchor=(0.91, 1))\n # ax1.legend(loc='upper right', edgecolor=\"black\") # ,facecolor=\"wheat\")\n ax1.set_xlim(0, len(system_delays))\n ax1.set_ylim(0, 400)\n ax1.grid()\n negatives_removed_penalties = [penalty for penalty in penalties if penalty >= 0]\n ax2.hist(negatives_removed_penalties, bins=16, facecolor='g', alpha=0.75, orientation=\"horizontal\",\n weights=np.ones_like(negatives_removed_penalties) / float(len(negatives_removed_penalties)))\n ax2.set_ylabel(r'$t_{total,i}$ (s)', fontsize=22)\n ax2.set_xlabel('Frequency', fontsize=22)\n ax2.set_ylim(0, 300)\n # ax2.set_xlim(0, 0.1)\n ax2.grid()\n plt.title(title, fontdict=None, loc='left', pad=75,size=23)\n fig.subplots_adjust(bottom=0.11, top=0.77, left=0.12, right=0.90,\n wspace=0.2, hspace=0.4)\n plt.savefig(f\"chart/plots/system_delay_{filename}.pdf\", dpi=300, bbox_inches='tight')\n # plt.show()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=getattr(logging, \"INFO\"), format=\"%(asctime)s %(levelname)s -> %(message)s\")\n result_data = get_multiple_files(REQUEST_INTERVAL_FILE_LIST)\n adaptive_tasks = get_specific_tasks(result_data, \"Adaptive\", \"15\")\n adaptive_tasks.sort(key=lambda x: x.no)\n plot_graph(adaptive_tasks, r\"ADP - Case 1.3: $1/\\lambda = 15$\",filename=\"adp_15\")\n adaptive_tasks = get_specific_tasks(result_data, \"Adaptive\", \"10\")\n adaptive_tasks.sort(key=lambda x: x.no)\n plot_graph(adaptive_tasks, r\"ADP - Case 1.2: $1/\\lambda = 10$\",filename=\"adp_10\")\n adaptive_tasks = get_specific_tasks(result_data, \"Adaptive\", \"5\")\n adaptive_tasks.sort(key=lambda x: x.no)\n plot_graph(adaptive_tasks, r\"ADP - Case 1.1: $1/\\lambda = 5$\",filename=\"adp_5\")\n # aggressive_tasks = get_specific_tasks(result_data, \"Aggressive\", \"5\")\n # adaptive_tasks.sort(key=lambda x: x.no)\n #\n #\n # aggressive_queue_tasks = get_specific_tasks(result_data, \"Aggressive-Wait\", \"5\")\n # adaptive_tasks.sort(key=lambda x: x.no)\n","repo_name":"onurklngc/drone-network-onos-and-mininet","sub_path":"chart/system_delay.py","file_name":"system_delay.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"16632975225","text":"def main():\n with open(\"./input.txt\") as input_file:\n input = input_file.read()\n\n diagnostic = input.split(\"\\n\")\n diagnostic.remove(\"\") # Lol\n diagnostic = [int(str, 2) for str in diagnostic]\n\n gamma = 0\n for i in reversed(range(12)):\n set = 0\n unset = 0\n for n in diagnostic:\n if n >> i & 1:\n set += 1\n else:\n unset += 1\n if set > unset:\n gamma = gamma | (1 << i)\n\n epsilon = ~gamma & 0xFFF # Only 12 bits\n print(gamma * epsilon)\n\nif __name__ == \"__main__\":\n main()","repo_name":"hochbaum/aoc2021","sub_path":"03/day03A.py","file_name":"day03A.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10051995131","text":"from collections import deque\nfrom heapq import heappop, heappush\n\nclass PathfindingAlgorithms:\n def __init__(self, path_data):\n self.path_data = path_data\n self.visited = []\n self.path = []\n \n def update_path_data(self, path_data):\n self.path_data = path_data\n\n\n # This DFS does not guarantee shortest path since the algorithm does not consider the path length while exploring\n def dfs(self, start, end): # Does not give optimal solution (when searching like this) (This is an intended behavior)\n self.visited.clear()\n self.path.clear()\n\n stack = [[start, [start]]]\n while stack:\n node, path = stack.pop()\n\n if node == end:\n self.path = path\n return self.path\n \n if node not in self.visited:\n self.visited.append(node)\n\n neighbors = self.get_neighbours(node)\n for neighbor in neighbors:\n if neighbor not in self.visited and self.path_data[neighbor[0]][neighbor[1]] != 1:\n stack.append([neighbor, path + [neighbor]])\n \n print(\"NO PATH\") # TODO handle if user decided to try mess up the program\n\n def bfs(self, start, end):\n self.visited.clear()\n self.path.clear()\n\n queue = deque([(start, [start])])\n visited_nodes = [] # List to store all visited nodes\n\n while queue:\n node, path = queue.popleft()\n\n if node not in self.visited:\n self.visited.append(node)\n visited_nodes.append(node)\n\n if node == end:\n self.path = path\n return visited_nodes # Return all visited nodes\n\n neighbors = self.get_neighbours(node)\n for neighbor in neighbors:\n if neighbor not in self.visited and self.path_data[neighbor[0]][neighbor[1]] != 1:\n queue.append((neighbor, path + [neighbor]))\n\n print(\"NO PATH\") # TODO handle if user decided to try mess up the program\n\n # For second layering, finding the shortest path with BFS iteratively \n def shortest_path(self, start, end):\n visited = []\n queue = deque([(start, [])])\n\n while queue:\n node, path = queue.popleft()\n path.append(node)\n\n if node == end:\n return path\n \n if node not in visited:\n visited.append(node)\n\n nei = []\n directions = [[0,1], [0,-1], [1,0], [-1,0]]\n for dir in directions:\n temp_row = node[0] + dir[0]\n temp_col = node[1] + dir[1]\n\n if 0 <= temp_row < len(self.path_data) and 0 <= temp_col < len(self.path_data[0]):\n if self.path_data[temp_row][temp_col] == 4:\n nei.append([temp_row, temp_col])\n\n for neighbor in nei:\n queue.append((neighbor, list(path))) # Create a copy of the current path\n\n print(\"Cant find any\")\n return None\n\n # Returns the neighbours of node given within self.path_data, global function\n def get_neighbours(self, node):\n neighbours = []\n directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n\n for direction in directions:\n temp_row = node[0] + direction[0]\n temp_col = node[1] + direction[1]\n\n if 0 <= temp_row < len(self.path_data) and 0 <= temp_col < len(self.path_data[0]):\n if self.path_data[temp_row][temp_col] != 1:\n neighbours.append([temp_row, temp_col])\n \n return neighbours\n","repo_name":"allenng321/PathExplora","sub_path":"src/pathfinding_algo.py","file_name":"pathfinding_algo.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"19949642834","text":"import random\nimport comet_ml\nfrom typing import List, Tuple\nimport time\nfrom copy import copy, deepcopy\nfrom comet_ml import Optimizer\nimport subprocess\nimport numpy as np\nimport shlex\nimport click\nimport os\n\n\noptimizer_config = {\n \"algorithm\": \"bayes\",\n \"spec\": {\n \"maxCombo\": 100,\n \"metric\": \"validation_loss\"\n },\n \"parameters\": {\n \"lr\": {\"min\": 2e-3, \"max\": 2e-3, \"type\": \"double\", \"scalingType\": \"loguniform\"},\n \"dropout\": {\"min\": 0., \"max\": 0.0, \"type\": \"double\", \"scalingType\": \"uniform\"},\n \"nonlinearity\": {\"type\": \"categorical\", \"values\": [\"ReLU\"]}, # \"Tanh\"\n \"hidden_dim\": {\"min\": 32, \"max\": 32, \"type\": \"integer\", \"scalingType\": \"uniform\"},\n \"num_hidden\": {\"min\": 7, \"max\": 7, \"type\": \"integer\", \"scalingType\": \"uniform\"},\n \"batch_size\": {\"type\": \"categorical\", \"values\": [\"600\"]},\n \"scheduler_type\": {\"type\": \"categorical\", \"values\": [\"CosineAnnealingLR\"]}, # , \"None\", \"ReduceLROnPlateau\"\n \"loss_function\": {\"type\": \"categorical\", \"values\": [\"CombinatorialLoss\"]},\n \"use_layer_norm\": {\"type\": \"categorical\", \"values\": [\"False\"]}, # \"True\",\n # \"mse\", \"mae\", \"energy_resolution_mse\", \"energy_resolution_sqrt\", \"energy_resolution_mse_shifted\",\n # \"energy_resolution_mse_with_mse\", \"energy_resolution_mae\"\n \"use_swa\": {\"type\": \"categorical\", \"values\": [\"False\"]}, # \"True\",\n \"optimizer_cls\": {\"type\": \"categorical\", \"values\": [\"Adam\"]}, # \"Adagrad\", \"SGD\", \"RMSprop\"\n \"init_type\": {\"type\": \"categorical\", \"values\": [\"normal\"]}, # , \"uniform\", \"orthogonal\"\n \"seed\": {\"min\": 0, \"max\": 1000, \"type\": \"integer\", \"scalingType\": \"uniform\"}\n # \"epochs\": {\"type\": \"categorical\", \"values\": [500, 1000, 2000, 3000, 4000]}\n },\n}\n\n\nbase_slurm_command = \"\"\"#!/bin/bash\nset -x\n{0}\n\"\"\"\n\nbase_command = \"\"\"python train_model.py --project_name {project_name} \\\n--work_space {work_space} --datadir {datadir} \\\n--lr {lr} --hidden_dim {hidden_dim} --num_hidden {num_hidden} \\\n--nonlinearity {nonlinearity} --scheduler_type {scheduler_type} \\\n--batch_size {batch_size} --epochs {epochs} --use_swa {use_swa} \\\n--optimizer_cls {optimizer_cls} --use_layer_norm {use_layer_norm} \\\n--init_type {init_type} --train_type {train_type} --loss_function {loss_function} \\\n--coeffs {coeffs} --target_variable {target_variable} --dropout {dropout}\"\"\"\n\ncommand_cluster = \"sbatch -c {0} -t {1} --gpus={2} --job-name={3} run_command.sh\"\n\n\n@click.command()\n@click.option('--slurm', type=bool, default=False)\n@click.option('--algorithm', type=str, default='random') # random bayes\n@click.option('--slurm_username', type=str, default='vbelavin')\n@click.option('--datadir', type=str, default='./')\n@click.option('--project_name', type=str, prompt='Enter project name')\n@click.option('--work_space', type=str, prompt='Enter workspace name')\n@click.option('--max_epochs', type=int, default=3000)\n@click.option('--max_processes_in_parallel', type=int, default=3)\n@click.option('--train_type', type=str, default=\"0\") # 0 20 3 23\n@click.option('--train_nets_on_one_gpu', type=int, default=10) # only for slurm\n@click.option('--target_variable', type=str, default=\"energy\") # energy, vertex\ndef run_optimization(\n project_name, work_space,\n slurm=False, datadir=\"./\", slurm_username=\"vbelavin\",\n algorithm=\"bayes\", max_processes_in_parallel=3,\n train_nets_on_one_gpu=3, max_epochs=5000, train_type=\"0\",\n target_variable=\"energy\"\n):\n optimizer_config[\"algorithm\"] = algorithm\n optimizer = Optimizer(optimizer_config, project_name=project_name)\n\n processes = []\n commands_to_run = []\n for parameters in optimizer.get_parameters():\n x = np.diff(np.sort(np.random.uniform(size=5)))\n x = x / x.sum()\n command_to_run = base_command.format(\n epochs=max_epochs,\n project_name=project_name,\n work_space=work_space,\n datadir=datadir,\n train_type=train_type,\n target_variable=target_variable,\n coeffs=\"{},{},{},{}\".format(x[0], x[1], x[2], x[3]),\n **parameters[\"parameters\"]\n )\n print(command_to_run)\n commands_to_run.append(command_to_run)\n\n # running on slurm\n if slurm:\n pr_count = subprocess.Popen(\"squeue | grep {} | wc -l\".format(slurm_username), shell=True, stdout=subprocess.PIPE)\n out, err = pr_count.communicate()\n while int(out) > max_processes_in_parallel:\n print(\"Waiting... \")\n time.sleep(60)\n pr_count = subprocess.Popen(\"squeue | grep {} | wc -l\".format(slurm_username), shell=True, stdout=subprocess.PIPE)\n out, err = pr_count.communicate()\n if len(commands_to_run) >= train_nets_on_one_gpu:\n with open(\"run_command.sh\", \"w\") as file:\n file.write(base_slurm_command.format(\" &\\n\".join(commands_to_run) + \" &\\nwait\"))\n process = subprocess.Popen(\n command_cluster.format(4, 60 * 30, 1, \"juno_dense_net_opt\"), # 3 cpu, 30 hours, 1 gpu\n shell=True,\n close_fds=True,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL\n )\n processes.append(process)\n commands_to_run = []\n\n # running on local machine via Popen\n else:\n while len(processes) > max_processes_in_parallel:\n print(\"Waiting... \")\n time.sleep(60)\n processes_after_cleaning = []\n for process in processes:\n poll = process.poll()\n if poll is not None:\n processes_after_cleaning.append(process)\n processes = processes_after_cleaning\n command_to_run = shlex.split(command_to_run)\n process = subprocess.Popen(\n command_to_run,\n shell=False,\n close_fds=True,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n preexec_fn=os.setsid\n )\n processes.append(process)\n commands_to_run = []\n\n\nif __name__ == \"__main__\":\n run_optimization()\n","repo_name":"SchattenGenie/juno_dense_net","sub_path":"run_optimization.py","file_name":"run_optimization.py","file_ext":"py","file_size_in_byte":6338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30761019312","text":"# for i in range(3):\r\n# print(i)\r\n\r\nimport os\r\nimport numpy as np\r\nimport muluzai as mulu\r\n\r\n\r\ndef bianshuju(path,weidu,guanjianzi,guanjianzi_1):\r\n\r\n for mulu in os.listdir(path):#每个循环要打开一个大文件,C064L,C064R...\r\n\r\n input_dir = os.path.join(path, mulu, guanjianzi)#装特征值的文件夹的地址\r\n\r\n save_path = os.path.join(input_dir, guanjianzi_1)#转换之后的数据都放在这个文件夹里面\r\n\r\n mulu.mkdir(save_path)\r\n\r\n for tezhenzhi in os.listdir(input_dir):#每个循环要处理一个特征值文件\r\n\r\n input_dir_1 = os.path.join(input_dir,tezhenzhi)#特征值的地址\r\n\r\n print(input_dir_1)\r\n\r\n f = open(input_dir_1, 'r')\r\n a = np.loadtxt(f, delimiter=',', skiprows=0, )\r\n\r\n X = a[:, 1:None]\r\n label = a[:, 0:1]\r\n\r\n cishu = int((X.shape[1])/weidu)#\r\n\r\n save_path_1 = os.path.join(save_path, tezhenzhi)#block序号还没有加上去的特征值的地址(转化之后)\r\n\r\n block_index = 1#每个文件会产生很多个block,这个是block序号\r\n\r\n for xinhao in X:#每一循环产生一个大block的数据,每一个大block作为一个单独的数据保存\r\n\r\n banyun = []\r\n start = 0\r\n\r\n for cishu_1 in range(weidu):#每一次循环产生一次傅里叶变换的结果\r\n\r\n print(xinhao[start:start+cishu])\r\n\r\n banyun.append(xinhao[start:start+cishu])\r\n\r\n start = start+cishu\r\n\r\n (filepath, tempfilename) = os.path.split(save_path_1)#为了得到带序号的路径\r\n\r\n save_path_2 = os.path.join(filepath+'_'+ str(block_index) + '.csv')\r\n\r\n np.savez(normalized_feat_file, X_train, Y_train, X_test, Y_test) # 把矩阵保存成npz文件\r\n\r\n # np.savetxt(, newtezheng, delimiter=',')\r\n\r\n banyun = np.array(banyun)\r\n\r\n block_index += 1\r\n\r\n print(banyun)\r\n\r\n print(banyun.shape)\r\n\r\n os.system('pause')\r\n\r\n\r\n\r\nbianshuju(path=r'C:\\Users\\a7825\\Desktop\\新建文件夹\\新建文件夹', weidu = 40, guanjianzi='mizhichuli_biaoqian_pingheng', guanjianzi_1='bianshuju')","repo_name":"shuyuqing/-","sub_path":"ceshi/rang.py","file_name":"rang.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"40320749291","text":"from vinfra.api import base\nfrom vinfra.consts import missing\n\n\nclass AbstractPerClusterMemoryPoliciesManager(base.VinfraApi):\n\n url_template = ''\n\n def __init__(self, api, cluster):\n super(AbstractPerClusterMemoryPoliciesManager, self).__init__(api)\n self.url_arguments = {\n 'cluster_id': base.get_id(cluster),\n }\n\n def base_url(self):\n return self.url_template.format(**self.url_arguments)\n\n def show_params(self):\n return self.client.get(self.base_url())\n\n def reset_params(self):\n return self.client.delete(self.base_url())\n\n def change_params(self, guarantee=missing, swap=missing,\n cache_ratio=missing, cache_minimum=missing,\n cache_maximum=missing):\n\n def cache():\n if cache_ratio is None and cache_minimum is None and cache_maximum is None:\n return None\n\n rv = {}\n\n if cache_ratio is not missing:\n rv['ratio'] = cache_ratio\n\n if cache_minimum is not missing:\n rv['minimum'] = cache_minimum\n\n if cache_maximum is not missing:\n rv['maximum'] = cache_maximum\n\n return rv or missing\n\n json = {}\n\n if guarantee is not missing:\n json['guarantee'] = guarantee\n\n if swap is not missing:\n json['swap'] = swap\n\n cache_val = cache()\n if cache_val is not missing:\n json['cache'] = cache_val\n\n return self.client.put(self.base_url(), json=json)\n\n\nclass PerClusterMemoryPoliciesManager(AbstractPerClusterMemoryPoliciesManager):\n\n url_template = '/{cluster_id}/memory-policies/vstorage-services/'\n\n\nclass PerNodeMemoryPoliciesManager(AbstractPerClusterMemoryPoliciesManager):\n\n url_template = '/{cluster_id}/memory-policies/vstorage-services/nodes/{node_id}/'\n\n def __init__(self, api, cluster, node):\n super(PerNodeMemoryPoliciesManager, self).__init__(api, cluster)\n self.url_arguments.update({\n 'node_id': base.get_id(node),\n })\n","repo_name":"acronis/vinfra","sub_path":"vinfra/api/memory_policies/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"27987190671","text":"from django.urls import path\nfrom .views import (categoryGet,\n cityGet,\n userinfoGet,\n addCategory,\n addCity,\n addUserinfo,\n updateCategory,\n updateCity,\n updateUserInfo,\n deleteCategory,\n deleteCity,\n deleteUserInfo,\n)\nurlpatterns = [\n path('category/',categoryGet),\n path('city/',cityGet),\n path('userinfo/',userinfoGet),\n path('add_category',addCategory),\n path('add_city',addCity),\n path('add_userinfo',addUserinfo),\n path('update_category//',updateCategory),\n path('update_city//',updateCity),\n path('update_userinfo//',updateUserInfo),\n path('delete_category//',deleteCategory),\n path('delete_city//',deleteCity),\n path('delete_userinfo//',deleteUserInfo),\n]\n","repo_name":"Nasir-gaayte/Call_book_reat_api","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72141779560","text":"from Application.Simulation import *\nfrom Application.DistributionFactory import *\nimport os\nfrom queue import Queue\nimport sys\nimport argparse\n\nif __name__ == '__main__':\n\n # function callbacks are resolved recursively\n # there can be thousands of function in one interaction\n sys.setrecursionlimit(10**6)\n\n # these path should be changed\n schemaPath = os.path.join(os.path.dirname(__file__), \"./Application/files/profile_schema.json\")\n serviceSchemaPath = os.path.join(os.path.dirname(__file__), \"./Application/files/service_schema.json\")\n\n # parse user input\n parser = argparse.ArgumentParser(description='program name, path to util log, path to network log')\n parser.add_argument('-p', type=str, help='relativ path to app profile', required=True)\n parser.add_argument('-s', type=str, help='relativ path to service definition', required=True)\n parser.add_argument('-m', type=str, help='relativ path to mapping', required=True)\n parser.add_argument('-d', type=str, help='relativ path to distribution request', required=True)\n\n args = parser.parse_args()\n\n servicePath = os.path.join(os.path.dirname(__file__), args.s)\n profilePath = os.path.join(os.path.dirname(__file__), args.p)\n mappingPath = os.path.join(os.path.dirname(__file__), args.m)\n\n # the standard seed, so simulations are reproducible\n # seed is used by Distribution factory\n seed = 435234\n\n # resolve distribution requests\n # only triangle distributions are supported right now\n # can be expanded analog to triangle\n p = DistributionFactory.getContentFromFile(os.path.join(os.path.dirname(__file__), args.d))\n distRequest = []\n for req in p:\n if req[\"kind\"] == \"triangle\":\n dist = (np.random.triangular(req[\"start\"] * 1E9, req[\"highpoint\"] * 1E9, req[\"end\"] * 1E9, req[\"volume\"]),\n req[\"scenarioID\"])\n distRequest.append(dist)\n\n\n schema = DistributionFactory.getContentFromFile(schemaPath)\n serviceSchema = DistributionFactory.getContentFromFile(serviceSchemaPath)\n\n profile = DistributionFactory.getContentFromFile(profilePath)\n specialMapping = DistributionFactory.getContentFromFile(mappingPath)\n mapping = DistributionFactory.genMapping(profile, specialMapping)\n service = DistributionFactory.getContentFromFile(servicePath)\n\n sim = Simulation(schema, serviceSchema)\n # https://json-schema-validator.herokuapp.com/\n sim.main(profile, mapping, service, distRequest)\n #DistributionFactory.createNetworkGraph(profile, mapping)\n # save observation queue as json to be visualized in dashboard\n res = sim.saveOberservations()\n # plot observation results with matplotlib.pyplot\n sim.plotResults(res)\n","repo_name":"Askill/DSPS","sub_path":"purePy - Simulation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11235643551","text":"#!/usr/bin/env python\n# coding=utf-8\nfrom .common_layers import *\n\n\ndef embedding(ids, vocab_size, embedding_size, name='embedding', reuse=tf.AUTO_REUSE, pad_id=0, scale_sqrt_depth=True,\n pretrain_embedding=None, pretrain_trainable=True, word_dropout_rate=0.):\n \"\"\" embedding \"\"\"\n # ids 3-D Tensor [batch, length, 1]\n if pretrain_embedding is None:\n with tf.variable_scope(name, reuse=reuse):\n var = tf.get_variable('weights', [vocab_size, embedding_size], # [vocab,embed]\n initializer=tf.random_normal_initializer(0.0, embedding_size ** -0.5))\n else:\n with tf.variable_scope(name, reuse=reuse):\n var = tf.get_variable('weights', [vocab_size, embedding_size], # [vocab,embed]\n trainable=pretrain_trainable,\n initializer=tf.constant_initializer(pretrain_embedding, dtype=tf.float32))\n\n # word level drop out\n if word_dropout_rate:\n ids = dropout_no_scaling(ids, 1.0 - word_dropout_rate) # 随机将部分id变为0,相当于将单词变为pad\n\n # lookup table\n embedding = tf.gather(var, ids) # [batch,length,1,hidden]\n embedding = tf.squeeze(embedding, axis=-2) # [batch,length,hidden]\n if scale_sqrt_depth:\n embedding *= embedding_size ** 0.5\n embedding = embedding * tf.to_float(tf.not_equal(ids, pad_id)) # 将pad(id=0)的emb变为[0,0,...]\n return embedding, var\n\n\ndef proj_logits(outputs, hidden_size, logit_size, name='proj_logits', reuse=tf.AUTO_REUSE):\n \"\"\" if name = 'embedding' 复用embed矩阵 \n outputs [batch, length, hidden] or [batch, hidden]\n \"\"\"\n\n with tf.variable_scope(name, reuse=reuse):\n var = tf.get_variable('weights', [logit_size, hidden_size], # [vocab,hidden]\n initializer=tf.random_normal_initializer(0.0, hidden_size ** -0.5))\n\n outputs_shape = shape_list(outputs) # [batch, length, hidden]\n outputs = tf.reshape(outputs, [-1, outputs_shape[-1]]) # [batch*length,hidden]\n logits = tf.matmul(outputs, var, transpose_b=True) # x,h * h,l -> x,l\n logits = tf.reshape(logits, outputs_shape[:-1] + [logit_size]) # [batch,length,vocab]\n\n return logits\n","repo_name":"Qznan/QizNLP","sub_path":"qiznlp/common/modules/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"18"} +{"seq_id":"37302908154","text":"n, a = map(int, input().split())\n\nw = []\nx = []\nv = []\n\nfor _ in range(n):\n cw, cx, cv = map(int, input().split())\n w.append(cw)\n x.append(cx)\n v.append(cv)\n\ndef getVal(t, x, v, w, n, a):\n vals = sorted([(x[i] + t * v[i], w[i]) for i in range(n)])\n i = 0\n res = 0\n for j in range(n):\n while vals[j][0] - vals[i][0] > a:\n i += 1\n if vals[j][0] - vals[i][0] <= a:\n curr = 0\n for x in range(i, j + 1):\n curr += vals[x][1]\n res = max(res, curr)\n return res\n\nbeg = 0\nend = 10000\nres = 0\n\nwhile beg < end:\n mid = (beg + end) // 2\n mid1val = getVal(mid, x, v, w, n, a)\n mid2val = getVal(mid + 1, x, v, w, n, a)\n if mid1val >= mid2val:\n res = max(res, mid1val)\n end = mid - 1\n else:\n res = max(res, mid2val)\n beg = mid + 1\n\nprint(res)","repo_name":"theabbie/leetcode","sub_path":"miscellaneous/catch_fish.py","file_name":"catch_fish.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"72655480039","text":"import aocd, itertools as it\n\ndata = aocd.get_data(year=2021, day=20)\n\n# Split up data\nalgo, image = data.replace(\".\", \"0\").replace(\"#\", \"1\").split(\"\\n\\n\")\nimage = image.split(\"\\n\")\n\n# Create a dict representation of the grid\ngrid = {(y, x): image[y][x] for y in range(len(image)) for x in range(len(image))}\n\n# Add border to the grid, so we can expand into it properly\nfor y, x in it.product(range(-102, len(image) + 103), repeat=2): # A double for loop\n grid[(y, x)] = grid.get((y, x), \"0\")\n\n# Get all the values around the given grid point\ndef area(g, y, x):\n return [g.get((y + dy, x + dx), \"0\") for dy in [-1, 0, 1] for dx in [-1, 0, 1]]\n\n\n# Check if the point is part of the end grid we care about\nin_grid = lambda xy: -51 < xy < len(image) + 50\n\n# Count the number of hashes (or 1s in my representation) in the grid\ncount = lambda g: [g[(y, x)] for y, x in g if in_grid(y) and in_grid(x)].count(\"1\")\n\n# Expand the grid\nfor step in range(50):\n # This line:\n # - Gets the area around a point\n # - Turns that into an integer\n # - Gets the new value from the image enhancement algorithm\n # - Places the new value in the updated grid\n grid = {(y, x): algo[int(\"\".join(area(grid, y, x)), 2)] for y, x in grid}\n # Show answers\n print(count(grid)) if step in [1, 49] else None\n","repo_name":"LomaxOnTheRun/advent-of-code","sub_path":"2021/day_20/shortest_code.py","file_name":"shortest_code.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35245074464","text":"import streamlit as st\r\nimport pandas as pd\r\nimport geopandas as gpd\r\nimport mercantile\r\nimport os\r\nfrom shapely.geometry import Polygon, shape\r\nfrom pathlib import Path\r\nfrom zipfile import ZipFile\r\nimport zipfile\r\nimport pathlib\r\nimport tempfile\r\nimport shapely\r\nimport numpy as np\r\nimport fiona\r\n\r\n\r\n\r\n## set up Layout\r\nst.set_page_config(\r\n page_title=\"MICROSOFT OPEN GLOBAL FOOTPRINT DATA\",\r\n layout=\"wide\",\r\n initial_sidebar_state=\"expanded\",)\r\nprmsContainer = st.experimental_get_query_params()\r\n\r\n## Def lưu file\r\ndef Save_Uploaded_File (Uploadedfile, save_folder):\r\n save_path = Path(save_folder,File.name)\r\n with open(save_path, mode='wb') as w:\r\n w.write(File.getbuffer())\r\n return\r\n## Def tải file\r\ndef save_shapefile_with_bytesio(dataframe,directory):\r\n dataframe.to_file(f\"{directory}/Footprint.shp\", driver='ESRI Shapefile')\r\n zipObj = ZipFile(f\"{directory}/Footprint.zip\", 'w')\r\n zipObj.write(f\"{directory}/Footprint.shp\",arcname = 'Footprint.shp')\r\n zipObj.write(f\"{directory}/Footprint.cpg\",arcname = 'Footprint.cpg')\r\n zipObj.write(f\"{directory}/Footprint.dbf\",arcname = 'Footprint.dbf')\r\n zipObj.write(f\"{directory}/Footprint.prj\",arcname = 'Footprint.prj')\r\n zipObj.write(f\"{directory}/Footprint.shx\",arcname = 'Footprint.shx')\r\n zipObj.close()\r\n \r\n \r\nMain = st.container()\r\n\r\ncol1, col2 = st.columns((5,5))\r\n\r\nMain.header(\"GET FOOTPRINT GLOBAL DATA FORM MICROSOFT OPEN SOURCE\")\r\n\r\nif len (prmsContainer) != 0:\r\n Files = prmsContainer['file'][0]\r\nelse:\r\n Files = col1.file_uploader(\"Import Research Boundary: \",accept_multiple_files=True)\r\n\r\nif list (Files) == []: col1.write(\"Import Your Boundary (*.shp) !\")\r\nelse:\r\n with tempfile.TemporaryDirectory() as tmp1: \r\n if len(prmsContainer) == 0:\r\n for File in Files:\r\n Save_Uploaded_File(File, tmp1)\r\n Name = File.name[0:File.name.find(\".\")]\r\n End = File.name[File.name.find(\".\")+1:len(File.name)]\r\n geoFileName = Name + \".shp\"\r\n else: geoFileName = Files \r\n geoFile = f\"{tmp1}/{geoFileName}\"\r\n \r\n Ranh = gpd.read_file(geoFile)\r\n\r\n minx = Ranh.bounds.minx[0]\r\n miny = Ranh.bounds.miny[0]\r\n maxx = Ranh.bounds.maxx[0]\r\n maxy = Ranh.bounds.maxy[0]\r\n \r\n \r\n buffer = gpd.GeoSeries([Ranh.loc[0].geometry]).buffer(0.001)\r\n Ranhbuffer = gpd.GeoDataFrame(geometry = buffer,crs=\"EPSG:4326\")\r\n \r\n st.write(\"Boundary coordinates:\", minx, miny, maxx, maxy) \r\n st.write(\"Boundary :\", Ranhbuffer)\r\n \r\n if col2.button (\"Get data\"):\r\n quad_keys = set()\r\n \r\n for tile in list(mercantile.tiles(minx, miny, maxx, maxy, zooms=9)):\r\n quad_keys.add(int(mercantile.quadkey(tile)))\r\n quad_keys = list(quad_keys)\r\n dataset_links = pd.read_csv(\"https://raw.githubusercontent.com/HungThang95/MGFData/main/dataset-links.csv\")\r\n links = dataset_links[dataset_links.QuadKey.isin (quad_keys)]\r\n\r\n geotemp = []\r\n for _, row in links.iterrows():\r\n df = pd.read_json(row.Url, lines=True)\r\n df['geometry'] = df['geometry'].apply(shape)\r\n gdf = gpd.GeoDataFrame(df, crs=4326)\r\n gdf1 = gpd.overlay(gdf,Ranhbuffer,how='intersection')\r\n \r\n for i in range(0,len(gdf1.geometry)):\r\n newrow = gpd.GeoSeries([gdf1.loc[i].geometry])\r\n if newrow.within(Ranh.geometry).values[0] == True:\r\n geotemp.append(newrow.values[0])\r\n \r\n CT = gpd.GeoDataFrame(geometry=geotemp, crs=\"EPSG:4326\")\r\n col2.write(len(CT.geometry))\r\n col2.success('Success!!', icon=\"✅\")\r\n with tempfile.TemporaryDirectory() as tmp:\r\n save_shapefile_with_bytesio(CT,tmp)\r\n with open(f\"{tmp}/Footprint.zip\", \"rb\") as file:\r\n col2.download_button(\r\n label=\"Download data\",\r\n data=file,\r\n file_name='Footprint.zip',\r\n mime='application/zip',\r\n )\r\n","repo_name":"HungThang95/MGFData","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25625193206","text":"# -*- encoding:utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport os\n\nfrom settings import *\n\n# heap analytics tracking for production\nHEAP_APP_ID = '3053705704'\n\nDRIVER_APP_URL = 'http://app.alltaxi.com.s3-website-us-east-1.amazonaws.com'\nOWNER_APP_URL = 'http://owner.alltaxi.com.s3-website-us-east-1.amazonaws.com'\n\nALLOWED_HOSTS = ['alltaxi.herokuapp.com']\n\n# Allow cross origin requests from these domains\nCORS_ORIGIN_WHITELIST = (\n 'app.alltaxi.com.s3-website-us-east-1.amazonaws.com',\n 'owner.alltaxi.com.s3-website-us-east-1.amazonaws.com',\n)\n\nSECRET_KEY = os.getenv('SECRET_KEY')\n\nQUEUE_IMPLEMENTATION = 'RealQueue'\nSMS_IMPLEMENTATION = 'TwilioRestClient'\nTLC_DATA_IMPLEMENTATION = 'Socrata'\n\n# We're not using SSL at the moment. No payments happening.\nSSLIFY_DISABLE = True\nCSRF_COOKIE_SECURE = False # if True, only sends the CSRF token over HTTPS\nSESSION_COOKIE_SECURE = False # if True, only sends session cookie over HTTPS\n\nDEFAULT_FROM_EMAIL = 'drivers@alltaxiny.com'\nOPS_EMAIL = 'drivers@alltaxiny.com'\n\nPAYMENT_GATEWAY_NAME = 'braintree'\nBRAINTREE_BASE_URL = 'www.braintreegateway.com'\n","repo_name":"JeremyParker/idlecars-backend","sub_path":"idlecars/production_settings.py","file_name":"production_settings.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"18061628673","text":"import psycopg2\nfrom time import time\nfrom typing import Any\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport pandas as pd\n\n\n\ndef time_decorator(func):\n \"\"\"compute performance in s\"\"\"\n def inner(*args: Any, **kwds: Any):\n \"\"\"inner function of perf function\"\"\"\n \"\"\"eliminate args\"\"\"\n init = time()\n result = func(*args)\n end = time()\n total = end - init\n print(f\"==>Function {func.__name__} took : {total}s\")\n return result\n return inner\n\n\ndef is_table(cur, name) -> bool:\n \"\"\"check if name is already a table in DB\"\"\"\n cur.execute(\"\"\"\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = 'public';\n \"\"\")\n data_tables = cur.fetchall()\n for elem in data_tables:\n if elem.count(name) != 0:\n print(f\"{name} already exists\")\n return True\n return False\n\n\ndef extract_print_data(cur):\n \"\"\"function that show average spent/customers\"\"\"\n cur.execute(\"\"\"select price\nfrom customers\nwhere event_type = 'purchase' ;\n\"\"\")\n datas = cur.fetchall()\n data = pd.DataFrame(datas)\n \n quartiles = data[0].quantile([0.25, 0.5, 0.75])\n print()\n print(f\"count {data[0].count()}\")\n print(f\"mean {data[0].mean()}\")\n print(f\"std\t{data[0].std()}\")\n print(f\"min\t{data[0].min()}\")\n print(f\"25%\t{quartiles[0.25]}\")\n print(f\"50%\t{quartiles[0.50]}\")\n print(f\"75%\t{quartiles[0.75]}\")\n print(f\"max\t{data[0].max()}\")\n return data[0]\n\n\ndef show_boxplot_prices(data):\n plt.boxplot(data, vert=False)\n plt.xlabel('price')\n plt.show()\n \n plt.boxplot(data, vert=False)\n plt.xlabel('price')\n plt.xlim(0, 13)\n plt.show()\n return\n\n\ndef show_avg_boxplt(cur):\n cur.execute(\"\"\"select avg(subquery.total_sum), user_id from (\n\tselect sum(price) as total_sum, user_id, user_session\n\tfrom customers\n\twhere event_type = 'purchase'\n\tgroup by user_id, user_session\n) as subquery\ngroup by user_id\n\"\"\")\n datas = cur.fetchall()\n data = pd.DataFrame(datas)\n data_avg = data[0]\n\n print(data_avg.describe())\n\n plt.boxplot(data_avg, vert=False)\n plt.xlabel('price')\n plt.xlim(0, 70)\n plt.show()\n return\n\n\n@time_decorator\ndef main():\n \"\"\"Main function of the program\"\"\"\n try:\n conn = psycopg2.connect(\n host=\"localhost\",\n database=\"piscineds\",\n user=\"nlesage\",\n password=\"mysecretpassword\")\n print('Connected to DB')\n conn.autocommit = True\n cur = conn.cursor()\n if is_table(cur, 'customers') is True:\n # data = extract_print_data(cur)\n # show_boxplot_prices(data)\n show_avg_boxplt(cur)\n\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n conn = None\n finally:\n if conn is not None:\n conn.close()\n print('Database connection closed.')\n return\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"nico6106/PiscineDataScience","sub_path":"Data Science/DataScience02/ex02/mustache.py","file_name":"mustache.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37013625628","text":"import PISM\nimport math\nfrom PISM import util, model\n\n# Conversion from command-line arguments to classes of SSA solver.\nSSAAlgorithms = {\"fem\": PISM.SSAFEM, \"fd\": PISM.SSAFD}\n\nclass SSARun(object):\n\n \"\"\"Mediates solving PISM's SSA model from a minimal set of data, without the constrution of an :cpp:class:`iceModel`.\n It codifies the steps needed to put together the data for an SSA run; subclasses do the work of\n implementing the steps in :meth:`_setFromOptions`, :meth:`_initGrid`, etc. Uses include:\n\n * Running SSA test cases.\n * Running the SSA in standalone mode (e.g. via :command:`ssaforward.py`)\n * The SSA inversion code.\n\n Usage: After construction (of a subclass),\n\n 1. Call :meth:`setup` to run through the various\n steps needed to set up an environment for solving the SSA.\n 2. Solve the SSA with :meth:`solve`.\n 3. Optionally write the the model vectors and solution to a file with :meth:`write`.\"\"\"\n\n def __init__(self):\n \"\"\"Do little constructor. Real work is done by :meth:`setup` which should be called prior to :meth:`solve`.\"\"\"\n self.grid = None #: The computation grid; will be set by :meth:`_initGrid`\n self.config = None #: Placeholder for config dictionary; set indirectly by :meth:`_constructModelData`\n\n #: Instance of :class:`PISM.model.ModelData` that stores all data needed for solving the SSA. Much of the work of\n #: the :class:`SSARun` is involved in setting up this object. Tasks include setting up :cpp:class:IceModelVec\n #: variables as well as model physics (e.g. :cpp:class:`EnthalpyConverter`).\n self.modeldata = None\n self.ssa = None #: Subclass of :cpp:class:`SSA` that sovles the SSA.\n\n def setup(self):\n \"\"\"Orchestrates the steps of setting up an environment for running the SSA. The following methods\n are called in order, and should be impelmeneted by a subclass.\n\n 1. :meth:`_setFromOptions` to set any parameters from command-line options\n 2. :meth:`_initGrid` to determine the computation grid, to be stored as :attr:`grid`\n 3. :meth:`_constructModelData` provide a :class:`ModelData` object (a default implementation is provided)\n 4. :meth:`_initPhysics` to set the non-vec members of the :class:`ModelData`, e.g. the :cpp:class:`EnthalpyConverter`.\n 5. :meth:`_constructSSA` to build the actual subclass of :cpp:class:`SSA` that will be used to solve the SSA\n 6. :meth:`_initSSACoefficients` enter all of the vecs needed for solving the SSA into the :class:`ModelData`.\n 7. :meth:`_initSSA` initialize the :cpp:class:`SSA` returned in step 5\n \"\"\"\n self._setFromOptions()\n\n self._initGrid()\n if self.grid is None:\n raise RuntimeError(\"SSARun failed to provide a grid.\")\n\n self.modeldata = self._constructModelData()\n if self.modeldata is None:\n raise RuntimeError(\"SSARun._constructModelData failed to provide a ModelData.\")\n self.config = self.modeldata.config\n\n self._initPhysics()\n if self.modeldata.enthalpyconverter is None:\n raise RuntimeError(\"SSARun._initPhysics failed to initialize the physics of the underlying SSA solver.\")\n\n self.ssa = self._constructSSA()\n if self.ssa is None:\n raise RuntimeError(\"SSARun._constructSSA failed to provide an SSA.\")\n\n self._initSSACoefficients()\n # FIXME: is there a reasonable check to do here?\n\n self._initSSA()\n\n def solve(self):\n \"\"\"Solve the SSA by calling the underlying PISM :cpp:class:`SSA`'s\n :cpp:member:`update` method. Returns the solution vector (owned by\n self.ssa, but you should not need to know about ownership).\n\n \"\"\"\n vecs = self.modeldata.vecs\n\n # make sure vecs is locked!\n self.ssa.init()\n\n melange_back_pressure = PISM.IceModelVec2S(self.grid, \"melange_back_pressure\",\n PISM.WITHOUT_GHOSTS)\n melange_back_pressure.set_attrs(\"diagnostic\",\n \"melange back pressure fraction\", \"1\", \"1\", \"\", 0)\n melange_back_pressure.set(0.0)\n\n PISM.verbPrintf(2, self.grid.com, \"* Solving the SSA stress balance ...\\n\")\n\n full_update = True\n\n inputs = PISM.StressBalanceInputs()\n inputs.melange_back_pressure = melange_back_pressure\n inputs.geometry = self.geometry\n inputs.enthalpy = vecs.enthalpy\n inputs.basal_yield_stress = vecs.tauc\n if vecs.has('vel_bc'):\n inputs.bc_mask = vecs.vel_bc_mask\n inputs.bc_values = vecs.vel_bc\n\n self.ssa.update(inputs, full_update)\n\n return self.ssa.velocity()\n\n def write(self, filename, append=False):\n \"\"\"Saves all of :attr:`modeldata`'s vecs (and the solution) to an\n output file.\"\"\"\n grid = self.grid\n vecs = self.modeldata.vecs\n\n if not append:\n pio = util.prepare_output(filename)\n pio.close()\n\n # Save time & command line\n util.writeProvenance(filename)\n\n vel_ssa = self.ssa.velocity()\n vecs.add(vel_ssa)\n\n velbar_mag = model.createCBarVec(self.grid)\n PISM.compute_magnitude(vel_ssa, velbar_mag)\n PISM.apply_mask(vecs.thk, util.convert(-0.01, \"m/year\", \"m/second\"),\n velbar_mag)\n vecs.add(velbar_mag)\n\n taud = PISM.SSA_taud(self.ssa).compute()\n vecs.add(taud)\n\n try:\n nuH = PISM.SSAFD_nuH(self.ssa).compute()\n vecs.add(nuH)\n except:\n pass\n\n taud_mag = PISM.SSA_taud_mag(self.ssa).compute()\n vecs.add(taud_mag)\n\n vecs.writeall(filename)\n\n def _setFromOptions(self):\n \"\"\"Optionally override to set any data from command line variables.\"\"\"\n pass\n\n def _constructModelData(self):\n \"\"\"Optionally override to return a custom :class:`PISM.model.ModelData` instance.\"\"\"\n return model.ModelData(self.grid)\n\n def _initGrid(self):\n \"\"\"Override to return the computation grid.\"\"\"\n raise NotImplementedError()\n\n def _initPhysics(self):\n \"\"\"Override to set the non-var parts of :attr:`modeldata` (e.g. the basal yeild stress model and the enthalpy converter)\"\"\"\n raise NotImplementedError()\n\n def _allocStdSSACoefficients(self):\n \"\"\"Helper method that allocates the standard :cpp:class:`IceModelVec` variables used to solve the SSA and stores them\n in :attr:`modeldata```.vecs``:\n\n * ``surface``\n * ``thickness``\n * ``bed``\n * ``tauc``\n * ``enthalpy``\n * ``mask``\n * ``age`` if -age is given\n\n Intended to be called from custom implementations of :meth:`_initSSACoefficients` if desired.\"\"\"\n vecs = self.modeldata.vecs\n grid = self.grid\n\n self.geometry = PISM.Geometry(grid)\n geometry = self.geometry\n\n vecs.add(geometry.ice_surface_elevation)\n vecs.add(geometry.ice_thickness)\n vecs.add(geometry.bed_elevation)\n vecs.add(geometry.sea_level_elevation)\n vecs.add(geometry.cell_type)\n vecs.add(model.createYieldStressVec(grid), 'tauc')\n vecs.add(model.createEnthalpyVec(grid), 'enthalpy')\n\n # The SIA model might need the \"age\" field\n if grid.ctx().config().get_flag(\"age.enabled\"):\n vecs.add(model.createAgeVec(grid), \"age\")\n\n def _allocateBCs(self, velname='_bc', maskname='vel_bc_mask'):\n \"\"\"Helper method that allocates standard Dirichlet data\n :cpp:class:`IceModelVec` variable and stores them in\n :attr:`modeldata` ``.vecs``:\n\n * ``vel_bc``\n * ``vel_bc_mask``\n\n \"\"\"\n vecs = self.modeldata.vecs\n vecs.add(model.create2dVelocityVec(self.grid,\n name=velname,\n desc='SSA velocity boundary condition',\n intent='intent'),\n \"vel_bc\")\n vecs.add(model.createBCMaskVec(self.grid, name=maskname),\n \"vel_bc_mask\")\n\n def _initSSACoefficients(self):\n \"\"\"Override to allocate and initialize all :cpp:class:`IceModelVec` variables in :attr:`modeldata` ``.vecs``\n needed for solving the SSA.\"\"\"\n raise NotImplementedError()\n\n def _constructSSA(self):\n \"\"\"Optionally override to return an instance of :cpp:class:`SSA` (e.g. :cpp:class:`SSAFD` or :cpp:class:`SSAFEM`)\n that will be used for solving the SSA.\"\"\"\n md = self.modeldata\n return SSAAlgorithms[md.config.get_string(\"stress_balance.ssa.method\")](md.grid)\n\n def _initSSA(self):\n \"\"\"Optionally perform any final initialization of :attr:`ssa`.\"\"\"\n pass\n\n\nclass SSAExactTestCase(SSARun):\n\n \"\"\"Base class for implmentation of specific SSA test cases. Provides a mechanism for comparing\n computed and exact values. Simply construct with a grid size and then call :meth:`run`\"\"\"\n\n def __init__(self, Mx, My):\n \"\"\"Initialize with a grid of the specified size.\"\"\"\n SSARun.__init__(self)\n self.Mx = Mx\n self.My = My\n\n # For convenience, provide a grid. It will get initialized later\n # on when _initGrid is called by our setup method.\n self.grid = None\n\n def run(self, output_file):\n \"\"\"Main command intended to be called by whatever code executes the test case.\n Calls :meth:`setup`, :meth:`solve`, :meth:`report`, and :meth:`write`.\"\"\"\n self.setup()\n self.solve()\n self.report()\n\n config = self.modeldata.config\n if config.get_string(\"output.size\") != \"none\":\n self.write(output_file)\n\n def report(self):\n \"\"\"Compares computed and exact solution values and displays a summary report.\"\"\"\n grid = self.grid\n\n ssa_stdout = self.ssa.stdout_report()\n PISM.verbPrintf(3, grid.com, ssa_stdout)\n\n maxvecerr = 0.0\n avvecerr = 0.0\n avuerr = 0.0\n avverr = 0.0\n maxuerr = 0.0\n maxverr = 0.0\n\n if (self.config.get_flag(\"basal_resistance.pseudo_plastic.enabled\") and\n self.config.get_number(\"basal_resistance.pseudo_plastic.q\") != 1.0):\n PISM.verbPrintf(1, grid.com, \"WARNING: numerical errors not valid for pseudo-plastic till\\n\")\n PISM.verbPrintf(1, grid.com, \"NUMERICAL ERRORS in velocity relative to exact solution:\\n\")\n\n vel_ssa = self.ssa.velocity()\n\n vel_ssa.begin_access()\n\n exactvelmax = 0\n gexactvelmax = 0\n for (i, j) in self.grid.points():\n x = grid.x(i)\n y = grid.y(j)\n (uexact, vexact) = self.exactSolution(i, j, x, y)\n exactnormsq = math.sqrt(uexact * uexact + vexact * vexact)\n exactvelmax = max(exactnormsq, exactvelmax)\n solution = vel_ssa[i, j]\n uerr = abs(solution.u - uexact)\n verr = abs(solution.v - vexact)\n avuerr += uerr\n avverr += verr\n maxuerr = max(maxuerr, uerr)\n maxverr = max(maxverr, verr)\n vecerr = math.sqrt(uerr * uerr + verr * verr)\n maxvecerr = max(maxvecerr, vecerr)\n avvecerr = avvecerr + vecerr\n\n vel_ssa.end_access()\n\n N = grid.Mx() * grid.My()\n gexactvelmax = PISM.GlobalMax(grid.com, exactvelmax)\n gmaxuerr = PISM.GlobalMax(grid.com, maxuerr)\n gmaxverr = PISM.GlobalMax(grid.com, maxverr)\n gavuerr = PISM.GlobalSum(grid.com, avuerr) / N\n gavverr = PISM.GlobalSum(grid.com, avverr) / N\n gmaxvecerr = PISM.GlobalMax(grid.com, maxvecerr)\n gavvecerr = PISM.GlobalSum(grid.com, avvecerr) / N\n\n sys = grid.ctx().unit_system()\n\n m_year = PISM.UnitConverter(sys, \"m / second\", \"m / year\")\n\n if abs(gexactvelmax) > 0.0:\n relative_vel_error = (gavvecerr / gexactvelmax) * 100.0\n else:\n relative_vel_error = 0.0\n\n PISM.verbPrintf(1, grid.com, \"velocity : maxvector prcntavvec maxu maxv avu avv\\n\")\n PISM.verbPrintf(1, grid.com,\n \" %11.4f%13.5f%10.4f%10.4f%10.4f%10.4f\\n\",\n m_year(gmaxvecerr),\n relative_vel_error,\n m_year(gmaxuerr),\n m_year(gmaxverr),\n m_year(gavuerr),\n m_year(gavverr))\n PISM.verbPrintf(1, grid.com, \"NUM ERRORS DONE\\n\")\n\n def exactSolution(self, i, j, xi, xj):\n \"\"\"Override to provide the exact value of the solution at grid index (``i``, ``j``) with\n coordinates (``xi``, ``xj``).\"\"\"\n raise NotImplementedError()\n\n def write(self, filename):\n \"\"\"Override of :meth:`SSARun.write`. Does all of the above, and saves a copy of the exact solution.\"\"\"\n SSARun.write(self, filename)\n\n grid = self.grid\n exact = model.create2dVelocityVec(grid, name=\"_exact\", desc=\"SSA exact solution\", intent=\"diagnostic\")\n exact.begin_access()\n for (i, j) in grid.points():\n exact[i, j] = self.exactSolution(i, j, grid.x(i), grid.y(j))\n exact.end_access()\n exact.write(filename)\n\n\nclass SSAFromInputFile(SSARun):\n\n \"\"\"Class for running the SSA based on data provided in an input file.\"\"\"\n\n def __init__(self, boot_file):\n SSARun.__init__(self)\n self.grid = None\n self.config = PISM.Context().config\n self.boot_file = boot_file\n self.phi_to_tauc = False\n self.is_regional = False\n\n def _setFromOptions(self):\n self.phi_to_tauc = PISM.OptionBool(\"-phi_to_tauc\",\n \"Recompute pseudo yield stresses from till friction angles.\")\n self.is_regional = PISM.OptionBool(\"-regional\", \"enable 'regional' mode\")\n\n def _initGrid(self):\n \"\"\"Override of :meth:`SSARun._initGrid`.\"\"\"\n # FIXME: allow specification of Mx and My different from what's\n # in the boot_file.\n\n if self.is_regional and (self.config.get_string(\"stress_balance.ssa.method\") == \"fem\"):\n registration = PISM.CELL_CORNER\n else:\n registration = PISM.CELL_CENTER\n\n ctx = PISM.Context().ctx\n\n pio = PISM.File(ctx.com(), self.boot_file, PISM.PISM_NETCDF3, PISM.PISM_READONLY)\n self.grid = PISM.IceGrid.FromFile(ctx, pio, \"enthalpy\", registration)\n pio.close()\n\n def _initPhysics(self):\n \"\"\"Override of :meth:`SSARun._initPhysics` that sets the physics based on command-line flags.\"\"\"\n config = self.config\n\n enthalpyconverter = PISM.EnthalpyConverter(config)\n\n if PISM.OptionString(\"-ssa_glen\", \"SSA flow law Glen exponent\").is_set():\n config.set_string(\"stress_balance.ssa.flow_law\", \"isothermal_glen\")\n config.scalar_from_option(\"flow_law.isothermal_Glen.ice_softness\", \"ice_softness\")\n else:\n config.set_string(\"stress_balance.ssa.flow_law\", \"gpbld\")\n\n self.modeldata.setPhysics(enthalpyconverter)\n\n def _allocExtraSSACoefficients(self):\n \"\"\"Allocate storage for SSA coefficients.\"\"\"\n vecs = self.modeldata.vecs\n if util.fileHasVariable(self.boot_file, 'ssa_driving_stress_x'):\n vecs.add(model.createDrivingStressXVec(self.grid))\n\n if util.fileHasVariable(self.boot_file, 'ssa_driving_stress_y'):\n vecs.add(model.createDrivingStressYVec(self.grid))\n\n no_model_mask = None\n # For a regional run we'll need no_model_mask, usurfstore, thkstore\n if self.is_regional:\n no_model_mask = model.createNoModelMaskVec(self.grid)\n vecs.add(no_model_mask, 'no_model_mask')\n vecs.add(model.createIceSurfaceStoreVec(self.grid))\n vecs.add(model.createIceThicknessStoreVec(self.grid))\n\n if self.config.get_flag('stress_balance.ssa.dirichlet_bc'):\n vecs.add(model.create2dVelocityVec(self.grid, name='_bc',\n desc='SSA velocity boundary condition',\n intent='intent'),\n \"vel_bc\")\n\n if self.is_regional:\n vecs.add(no_model_mask, 'vel_bc_mask')\n else:\n vecs.add(model.createBCMaskVec(self.grid), 'vel_bc_mask')\n\n if self.phi_to_tauc:\n vecs.add(PISM.model.createBasalMeltRateVec(self.grid))\n vecs.add(PISM.model.createTillPhiVec(self.grid))\n vecs.add(PISM.model.createBasalWaterVec(self.grid))\n\n def _initSSACoefficients(self):\n \"\"\"Override of :meth:`SSARun._initSSACoefficients` that initializes variables from the\n contents of the input file.\"\"\"\n # Build the standard thickness, bed, etc\n self._allocStdSSACoefficients()\n self._allocExtraSSACoefficients()\n\n vecs = self.modeldata.vecs\n\n thickness = vecs.land_ice_thickness\n bed = vecs.bedrock_altitude\n enthalpy = vecs.enthalpy\n mask = vecs.mask\n surface = vecs.surface_altitude\n sea_level = vecs.sea_level\n\n sea_level.set(0.0)\n\n # Read in the PISM state variables that are used directly in the SSA solver\n for v in [thickness, bed, enthalpy]:\n v.regrid(self.boot_file, True)\n\n # The SIA model might need the age field.\n if self.config.get_flag(\"age.enabled\"):\n vecs.age.regrid(self.boot_file, True)\n\n # variables mask and surface are computed from the geometry previously read\n\n gc = PISM.GeometryCalculator(self.config)\n gc.compute(sea_level, bed, thickness, mask, surface)\n\n if util.fileHasVariable(self.boot_file, 'ssa_driving_stress_x'):\n vecs.ssa_driving_stress_x.regrid(self.boot_file, critical=True)\n\n if util.fileHasVariable(self.boot_file, 'ssa_driving_stress_y'):\n vecs.ssa_driving_stress_y.regrid(self.boot_file, critical=True)\n\n # For a regional run we'll need no_model_mask, usurfstore, thkstore\n if self.is_regional:\n vecs.no_model_mask.regrid(self.boot_file, True)\n\n if util.fileHasVariable(self.boot_file, 'usurfstore'):\n vecs.usurfstore.regrid(self.boot_file, True)\n else:\n vecs.usurfstore.copy_from(vecs.surface_altitude)\n\n if util.fileHasVariable(self.boot_file, 'thkstore'):\n vecs.thkstore.regrid(self.boot_file, True)\n else:\n vecs.thkstore.copy_from(vecs.land_ice_thickness)\n\n # Compute yield stress from PISM state variables\n # (basal melt rate, tillphi, and basal water height)\n grid = self.grid\n\n if self.phi_to_tauc:\n for v in [vecs.bmr, vecs.tillphi, vecs.bwat]:\n v.regrid(self.boot_file, True)\n vecs.add(v)\n\n if self.is_regional:\n yieldstress = PISM.RegionalDefaultYieldStress(self.modeldata.grid)\n else:\n yieldstress = PISM.MohrCoulombYieldStress(self.modeldata.grid)\n\n # make sure vecs is locked!\n yieldstress.init()\n yieldstress.set_till_friction_angle(vecs.tillphi)\n yieldstress.update(0, 1)\n vecs.tauc.copy_from(yieldstress.basal_material_yield_stress())\n else:\n vecs.tauc.regrid(self.boot_file, True)\n\n if self.config.get_flag('stress_balance.ssa.dirichlet_bc'):\n has_u_bc = util.fileHasVariable(self.boot_file, 'u_bc')\n has_v_bc = util.fileHasVariable(self.boot_file, 'v_bc')\n\n if (not has_u_bc) or (not has_v_bc):\n PISM.verbPrintf(2, grid.com,\n \"Input file '%s' missing Dirichlet boundary data u/v_bc;\"\n \" using zero default instead.\" % self.boot_file)\n vecs.vel_bc.set(0.0)\n else:\n vecs.vel_bc.regrid(self.boot_file, True)\n\n if not self.is_regional:\n bc_mask_name = vecs.vel_bc_mask.metadata().get_string(\"short_name\")\n if util.fileHasVariable(self.boot_file, bc_mask_name):\n vecs.vel_bc_mask.regrid(self.boot_file, True)\n else:\n PISM.verbPrintf(2, grid.com,\n \"Input file '%s' missing Dirichlet location mask '%s'.\"\n \" Default to no Dirichlet locations.\" % (self.boot_file, bc_mask_name))\n vecs.vel_bc_mask.set(0)\n\n def _constructSSA(self):\n \"\"\"Constructs an instance of :cpp:class:`SSA` for solving the SSA based on command-line flags ``-regional`` and ``-ssa_method``\"\"\"\n md = self.modeldata\n if self.is_regional and (md.config.get_string(\"stress_balance.ssa.method\") == \"fd\"):\n algorithm = PISM.SSAFD_Regional\n else:\n algorithm = SSAAlgorithms[md.config.get_string(\"stress_balance.ssa.method\")]\n return algorithm(md.grid)\n","repo_name":"pism/pism","sub_path":"site-packages/PISM/ssa.py","file_name":"ssa.py","file_ext":"py","file_size_in_byte":21344,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"18"} +{"seq_id":"28628489528","text":"import socket\n\n# Private ip Address\n# It's going to give you the virtual box ip address\n# host = socket.gethostbyname(socket.gethostname())\n\n\n\ndef main():\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n HOST = 'localhost'\n PORT = 8080\n result = server.connect_ex((HOST, PORT))\n print(\"Result is {}\".format(result))\n server.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Kelvin-Charles/CyberSec","sub_path":"Socket/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"14337980200","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata = np.loadtxt(f\"matlab/matlab_pde.txt\")\nx, t, theta_sup, exact, perf, tissue = data[:, 0:1].T, data[:, 1:2].T, data[:, 2:3].T, data[:, 3:4].T, data[:, 4:5].T, data[:, 5:].T\nX = np.vstack((x, t, theta_sup)).T\ny1 = exact.flatten()[:, None]\ny2 = perf.flatten()[:, None]\ny3 = tissue.flatten()[:, None]\n\nclass SReLU(tf.keras.layers.Layer):\n def __init__(self, tr_init=0.4, ar_init=2.0, tl_init=-0.4, al_init=0.4):\n super(SReLU, self).__init__()\n self.tr = tf.Variable(tr_init, trainable=True, name='tr')\n self.ar = tf.Variable(ar_init, trainable=True, name='ar')\n self.tl = tf.Variable(tl_init, trainable=True, name='tl')\n self.al = tf.Variable(al_init, trainable=True, name='al')\n\n def call(self, inputs):\n s_greater_tr = tf.where(inputs > self.tr, self.tr + self.ar * (inputs - self.tr), inputs)\n s_between_tl_tr = tf.where(tf.logical_and(inputs > self.tl, inputs < self.tr), inputs, s_greater_tr)\n s_less_tl = tf.where(inputs <= self.tl, self.tl + self.al * (inputs - self.tl), s_between_tl_tr)\n return s_less_tl\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n# Create an instance of the SReLU layer\nsrelu_layer = SReLU()\n\n# Create a range of values for plotting\nx = np.linspace(-1, 1, 400)\nx_tensor = tf.constant(x, dtype=tf.float32)\n\n# Calculate the SReLU activation and its derivative\nwith tf.GradientTape(persistent=True) as tape:\n tape.watch(x_tensor)\n srelu_activation = srelu_layer(x_tensor)\n\nsrelu_derivative = tape.gradient(srelu_activation, x_tensor)\n\n# Plot the SReLU activation function and its derivative\nplt.figure(figsize=(12, 5))\nplt.subplot(1, 2, 1)\nplt.plot(x, srelu_activation, label='SReLU Activation')\nplt.title('SReLU Activation Function')\nplt.xlabel('Input')\nplt.ylabel('Output')\n\nplt.subplot(1, 2, 2)\nplt.plot(x, srelu_derivative, label='SReLU Derivative')\nplt.title('SReLU Derivative')\nplt.xlabel('Input')\nplt.ylabel('Derivative')\nplt.tight_layout()\nplt.savefig(f\"SReLU.png\", dpi=300, bbox_inches='tight')\nplt.show()\n\n\n# Exercise 3 (6 points): Continual learning\n\n\n# Define metrics here:\nclass MeanAccuracy(tf.keras.metrics.Metric):\n def __init__(self, name=\"mean_accuracy\", **kwargs):\n super().__init__(name=name, **kwargs)\n self.total = self.add_weight(name=\"total\", initializer=\"zeros\")\n self.count = self.add_weight(name=\"count\", initializer=\"zeros\")\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n y_pred = tf.round(y_pred)\n matches = tf.equal(y_true, y_pred)\n self.total.assign_add(tf.reduce_sum(tf.cast(matches, tf.float32)))\n self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))\n\n def result(self):\n return self.total / self.count\n\n\nclass BackwardTransfer(tf.keras.metrics.Metric):\n def __init__(self, name=\"backward_transfer\", **kwargs):\n super(BackwardTransfer, self).__init__(name=name, **kwargs)\n self.initial_performance = self.add_weight(name=\"initial_perf\", initializer=\"zeros\")\n self.new_performance = self.add_weight(name=\"new_perf\", initializer=\"zeros\")\n\n def reset_states(self):\n self.initial_performance.assign(0.)\n self.new_performance.assign(0.)\n\n def update_initial_performance(self, value):\n self.initial_performance.assign(value)\n\n def update_new_performance(self, value):\n self.new_performance.assign(value)\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n pass\n\n def result(self):\n return self.new_performance - self.initial_performance\n\n# Define the multi-head model\n# Tip: the functional API is pretty good here\n\n\n# Shared Base\ninput_layer = tf.keras.layers.Input(shape=(3,))\nx = tf.keras.layers.Dense(128, activation=SReLU())(input_layer)\nx = tf.keras.layers.Dense(128, activation=SReLU())(x)\n\n# Head for Temperature (Regression)\ntemp_out = tf.keras.layers.Dense(1, name='temperature')(x)\n\n# Head for Perfusion (Regression)\nperfusion_out = tf.keras.layers.Dense(1, name='perfusion')(x)\n\n# Head for Tissue Type (Classification, assuming 2 types for simplicity)\ntissue_out = tf.keras.layers.Dense(2, activation='softmax', name='tissue_type')(x)\n\nmodel = tf.keras.models.Model(inputs=input_layer, outputs=[temp_out, perfusion_out, tissue_out])\n\n# Print a summary of the model architecture\nmodel.summary()\n\n\nclass EWCLoss(tf.keras.losses.Loss):\n def __init__(self, model, task_id, lambda_ewc=1e4):\n super().__init__()\n self.model = model\n self.task_id = task_id\n self.lambda_ewc = lambda_ewc\n self.fisher = {}\n self.star_vars = {}\n\n def compute_fisher_information(self, x, y):\n with tf.GradientTape() as tape:\n y_preds = self.model(x)\n y_pred = y_preds[self.task_id]\n if self.task_id in [0, 1]: # regression tasks\n loss = tf.keras.losses.mean_squared_error(y, y_pred)\n else: # classification task\n loss = tf.keras.losses.sparse_categorical_crossentropy(y, y_pred, from_logits=True)\n\n # Compute gradients\n grads = tape.gradient(loss, self.model.trainable_variables)\n\n # Compute Fisher information\n for var, grad in zip(self.model.trainable_variables, grads):\n if grad is not None:\n if var.name in self.fisher:\n self.fisher[var.name] += grad ** 2 / len(x)\n else:\n self.fisher[var.name] = grad ** 2 / len(x)\n\n def update_star_vars(self):\n for var in self.model.trainable_variables:\n self.star_vars[var.name] = var.numpy()\n\n def call(self, y_true, y_pred_list):\n y_pred = y_pred_list[self.task_id]\n\n if self.task_id in [0, 1]: # regression tasks\n mse_loss = tf.keras.losses.mean_squared_error(y_true, y_pred)\n else: # classification task\n mse_loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)\n\n ewc_loss = 0.0\n for var in self.model.trainable_variables:\n if var.name in self.fisher:\n ewc_loss += tf.reduce_sum(self.fisher[var.name] * (var - self.star_vars[var.name]) ** 2)\n\n return mse_loss + self.lambda_ewc * ewc_loss\n\n\noptimizer = tf.keras.optimizers.Adam(learning_rate=0.001)\n\n# Task 1 (y1)\newc_loss = EWCLoss(model, task_id=0)\nmodel.compile(optimizer=optimizer, loss=ewc_loss, metrics=['mae'])\nmodel.fit(X, y1, epochs=2, batch_size=32, validation_split=0.2)\newc_loss.compute_fisher_information(X, y1)\newc_loss.update_star_vars()\n\n# Task 2 (y2)\newc_loss = EWCLoss(model, task_id=1)\nmodel.compile(optimizer=optimizer, loss=ewc_loss, metrics=['mae'])\nmodel.fit(X, y2, epochs=2, batch_size=32, validation_split=0.2)\newc_loss.compute_fisher_information(X, y2)\newc_loss.update_star_vars()\n\nfrom tensorflow.keras.utils import get_custom_objects\n\nget_custom_objects().update({\"MeanAccuracy\": MeanAccuracy})\n\n# Task 3 (y3)\newc_loss = EWCLoss(model, task_id=2)\nmodel.compile(optimizer=optimizer, loss=ewc_loss, metrics=['MeanAccuracy'])\ny3_squeezed = tf.squeeze(y3, axis=-1)\nmodel.fit(X, y3_squeezed, epochs=4, batch_size=32, validation_split=0.2)\n\n","repo_name":"gcappellini/pinns_bioheat","sub_path":"NNDS/nnds_homework_2_cappellini_2021.py","file_name":"nnds_homework_2_cappellini_2021.py","file_ext":"py","file_size_in_byte":7232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22786264048","text":"import statistics\r\nimport numpy as np\r\nimport scipy\r\n\r\nclass Calc:\r\n def __init__(self,dados):\r\n self.dados_tratado = sorted(dados)\r\n\r\n def mediana(self):\r\n valor = len(self.dados_tratado)\r\n\r\n if valor % 2 == 0:\r\n metade_d = valor//2\r\n metade_e = metade_d - 1\r\n mediana = (self.dados_tratado[metade_e] + self.dados_tratado[metade_d])/2\r\n resposta = (f'A mediana é: {mediana}')\r\n else:\r\n metade = valor//2\r\n mediana = self.dados_tratado[metade]\r\n resposta = (f'A mediana é: {mediana}')\r\n\r\n return resposta\r\n\r\n def media(self):\r\n calc = statistics.mean(self.dados_tratado)\r\n media = print(f'A média é de: {calc:.2f}')\r\n\r\n return media\r\n\r\n def moda(self):\r\n moda = statistics.multimode(self.dados_tratado)\r\n modar = print(f'A moda da lista é: {moda}')\r\n\r\n return modar\r\n\r\n def valor_max(self):\r\n maior = max(self.dados_tratado)\r\n resposta = print(f'O maior valor na lista é {maior}')\r\n\r\n return resposta\r\n\r\n def valor_min(self):\r\n menor = min(self.dados_tratado)\r\n resposta = print(f'O menor valor na lista é {menor}')\r\n\r\n return resposta\r\n\r\n def variancia(self):\r\n var = np.var(self.dados_tratado)\r\n resposta = print(f'A variância é de: {var:.2f}')\r\n\r\n return resposta\r\n\r\n def desvio_padrao(self):\r\n des = np.std(self.dados_tratado)\r\n resposta =print(f'O desvio padrão é de: {des:.2f}')\r\n\r\n return resposta\r\n\r\n def amplitude(self):\r\n maior = max(self.dados_tratado)\r\n menor = min(self.dados_tratado)\r\n amplitude = maior - menor\r\n\r\n resposta = print(f'A amplitude dessa lista é de: {amplitude}')\r\n\r\n return resposta\r\n\r\n def coeficiente_de_variacao(self):\r\n dp = statistics.stdev(self.dados_tratado)\r\n tamanho = len(self.dados_tratado)\r\n cv = (dp/tamanho)*100\r\n\r\n resposta = print(f'O coeficiente de variação é de: {cv:.2f}%')\r\n\r\n return resposta\r\n\r\n\r\n\r\n# Aqui você coloca a sua lista, este é só um exemplo\r\nlista = [15,15,15,15,20,30,300,40,40,50,10,2,1]\r\n\r\nvl = Calc(lista)\r\n\r\nvl.media()\r\nvl.mediana()\r\nvl.moda()\r\nvl.variancia()\r\nprint()\r\n\r\nvl.valor_max()\r\nvl.valor_min()\r\nprint()\r\n\r\nvl.amplitude()\r\nvl.desvio_padrao()\r\nvl.coeficiente_de_variacao()\r\nprint()","repo_name":"C0d3x1ng/Calculadora-para-Estatistica-Descritiva","sub_path":"E.D.py","file_name":"E.D.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15542475950","text":"import graphsurgeon as gs\nimport tensorflow as tf\nimport uff\nimport sys\n\nif len(sys.argv) < 2:\n print('Usage: python ' + sys.argv[0] + ' /path/to/model.pb')\n exit(0)\n\n# load graph\nfilename_pb = sys.argv[1]\ndynamic_graph = gs.DynamicGraph(filename_pb)\nnodes = list(dynamic_graph.as_graph_def().node)\n\nprint('Converting...')\n# create input node\ninput_node = gs.create_node(\"images\",\n op=\"Placeholder\",\n dtype=tf.float32,\n shape=[None, 128, 64, 3]\n )\n\n# remove nodes in DeepSORT's re-identification model not supported by TensorRT,\n# and connect with input node\nfor node in nodes:\n if \"map\" in node.name or \"images\" == node.name or \"Cast\" == node.name:\n dynamic_graph.remove(node)\n elif \"conv1_1/Conv2D\" == node.name:\n node.input.insert(0, \"images\")\n\n# add input node to graph\ndynamic_graph.append(input_node)\n\n# create uff file\ntrt_graph = uff.from_tensorflow(dynamic_graph.as_graph_def())\nfilename_uff = filename_pb[:filename_pb.rfind('.')] + '.uff'\nprint('Writing to disk...')\nwith open(filename_uff, 'wb') as f:\n f.write(trt_graph)\nprint('Saved as ' + filename_uff)\n","repo_name":"WOM89757/dpstream_ext","sub_path":"sources/tracker_DeepSORT/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"24412401249","text":"from gpiozero import Servo\nimport time\n\nclass Camera():\n \n \n def __init__(self, servoPin):\n \n self.pin = servoPin\n\n def FaceForward(self):\n s = Servo(self.pin, min_pulse_width = 0.5/1000, max_pulse_width = 10/1000, frame_width = 20/1000)\n s.max()\n time.sleep(0.3)\n s.detach()\n #time.sleep(1)\n\n def FaceBackward(self):\n s = Servo(self.pin, min_pulse_width = 0.5/1000, max_pulse_width = 10/1000, frame_width = 20/1000)\n s.min()\n time.sleep(0.3)\n s.detach()\n #time.sleep(1)\n\n\n def FaceRight(self):\n s = Servo(self.pin, min_pulse_width = 1.1/1000, max_pulse_width = 10/1000, frame_width = 20/1000)\n s.min()\n time.sleep(0.3)\n s.detach()\n #time.sleep(1)\n\n def FaceLeft(self):\n s = Servo(self.pin, min_pulse_width = 1/1000, max_pulse_width = 10.6/1000, frame_width = 20/1000)\n s.max()\n time.sleep(0.3)\n s.detach()\n #time.sleep(1)\n \ndef main():\n c = Camera(4)\n time.sleep(2)\n c.FaceRight()\n time.sleep(2)\n c.FaceForward()\n time.sleep(2)\n c.FaceLeft()\n \nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"witherellt21/Capstone-Robotics","sub_path":"Capstone Data Gathering/CameraServo.py","file_name":"CameraServo.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"546384998","text":"import random\n\n\ndef setup(ent):\n print(\"New entity \\\"wizard\\\" with EID {0} at {1},{2},{3} \".format(str(ent.eid),\n str(ent.x),\n str(ent.y),\n str(ent.layer)))\n \n Driftwood.tick.register(move, delay=2.0, message=ent)\n\n Driftwood.light.insert(\"lightmap_circle1.png\", 0, 0, 0, 64, 64, \"FFFFFFAA\", entity=ent.eid, layermod=-1)\n\n\ndef kill(ent):\n Driftwood.tick.unregister(move)\n\n\ndef move(seconds_past, ent):\n rand = random.randint(1, 8)\n if rand == 1:\n ent.walk(0, -1, dont_stop=False, stance=\"walk_up\", end_stance=\"face_up\")\n elif rand == 2:\n ent.walk(0, 1, dont_stop=False, stance=\"walk_down\", end_stance=\"face_down\")\n elif rand == 3:\n ent.walk(-1, 0, dont_stop=False, stance=\"walk_left\", end_stance=\"face_left\")\n elif rand == 4:\n ent.walk(1, 0, dont_stop=False, stance=\"walk_right\", end_stance=\"face_right\")\n","repo_name":"Driftwood2D/blue","sub_path":"wizard.py","file_name":"wizard.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10139847843","text":"import logging\n\nfrom telegram.utils.request import Request\nfrom telegram.utils.types import JSONDict\n\nfrom tests.utils import make_post_response\n\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO\n)\nlogger = logging.getLogger(__name__)\nold_post = Request.post\n\n\ndef substitute_post(self, url: str, data: JSONDict, *args, **kwargs):\n method = url.split('/')[-1]\n result = old_post(self, url, data, *args, **kwargs)\n expected_response = make_post_response(url, data)\n if method in ('setMyCommands', 'getMe', 'deleteWebhook'):\n assert result == expected_response, (method, expected_response, result)\n return result\n elif method == 'sendMessage':\n for key in expected_response:\n assert key in ('message_id', 'date') or expected_response[key] == result[key], ('sendMessage', key, expected_response[key], result[key])\n for key in result:\n assert key in ('entities') or key in expected_response, ('sendMessage', key, expected_response.keys(), result.keys())\n return result\n# elif method == 'getUpdates':\n# return old_post(self, url, *args, **kwargs)\n# return [{'update_id': 821428716, 'message': {'message_id': 33, 'from': {'id': 5000566356, 'is_bot': False, 'first_name': 'Dlavrukhin', 'last_name': 'Test', 'username': 'eldies', 'language_code': 'ru'}, 'chat': {'id': 5000566356, 'first_name': 'Dlavrukhin', 'last_name': 'Test', 'username': 'eldies', 'type': 'private'}, 'date': 1662231889, 'text': '/start', 'entities': [{'offset': 0, 'length': 6, 'type': 'bot_command'}]}}]\n\n logger.info('post {}'.format(url))\n logger.info(result)\n return result\n","repo_name":"Eldies/telegram_coffee_bot","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14496117062","text":"\nfrom django.shortcuts import render,get_object_or_404\nfrom .models import Hospital,Doctor\ndef doctor_list(request,hospital_slug=None):\n hospital=None\n hospitals=Hospital.objects.all()\n doctors=Doctor.objects.filter(available=True)\n if hospital_slug:\n hospital=get_object_or_404(Hospital,slug=hospital_slug)\n doctors=Doctor.objects.filter(category=hospital)\n context={\n 'hospital':hospital,\n 'hospitals':hospitals,\n 'doctors':doctors\n }\n return render (request,'list.html',context)\n\ndef doctor_detail(request,id,slug):\n doctor=get_object_or_404(Doctor,id=id,slug=slug)\n context={\n 'doctor':doctor\n }\n return render(request,'detail.html',context)\n\n\ndef ambulance_detail(request,id,slug):\n ambulance=get_object_or_404(Ambulance,id=id,slug=slug)\n context={\n 'ambulance':ambulance\n }\n return render(request,'ambulance_detail.html',context)\n","repo_name":"pankaj610/Paytm-Hackathon-Project","sub_path":"paytm_project/hospital/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17956568199","text":"#Tags - list append\n#https://leetcode.com/problems/running-sum-of-1d-array/\n#beats 90%\nclass Solution:\n def runningSum(self, nums: List[int]) -> List[int]:\n lst_ = []\n sum_ = 0\n for i in range(len(nums)):\n sum_ = nums[i] + sum_\n lst_.append(sum_)\n return lst_","repo_name":"dhananjay93/leetcode","sub_path":"python/leetcode1480.py","file_name":"leetcode1480.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"37013617558","text":"import PISM\nimport time\n\nkError = 1\nkWarning = 2\nkMessage = 2\nkDebug = 4\nkPrattle = 5\n\n_loggers = []\n\ndef clear_loggers():\n \"\"\"Removes all members from the global list of loggers.\"\"\"\n global _loggers\n _loggers = []\n\n\ndef add_logger(logger):\n \"\"\"Appends a new logger to the global list of loggers.\"\"\"\n global _loggers\n _loggers.append(logger)\n\n\ndef log(message, verbosity):\n \"\"\"Logs a message with the specified verbosity\"\"\"\n for l in _loggers:\n l(message, verbosity)\n\n\ndef logError(message):\n \"\"\"Convenience function for logging a message at the level of ``kError``\"\"\"\n log(message, kError)\n\n\ndef logWarning(message):\n \"\"\"Convenience function for logging a message at the level of ``kWarning``\"\"\"\n log(message, kWarning)\n\n\ndef logMessage(message):\n \"\"\"Convenience function for logging a message at the level of ``kMessage``\"\"\"\n log(message, kMessage)\n\n\ndef logDebug(message):\n \"\"\"Convenience function for logging a message at the level of ``kDebug``\"\"\"\n log(message, kDebug)\n\n\ndef logPrattle(message):\n \"\"\"Convenience function for logging a message at the level of ``kPrattle``\"\"\"\n log(message, kPrattle)\n\n\ndef print_logger(message, verbosity):\n \"\"\"Implements a logger that forwards messages to :cpp:func:`verbPrintf`.\"\"\"\n com = PISM.Context().com\n msg = str(message)\n PISM.verbPrintf(verbosity, com, msg)\n\n# The global list of loggers.\n_loggers = [print_logger]\n\n\nclass CaptureLogger(object):\n\n \"\"\"Implements a logger that appends log messages as they occur\n to an attribute of an :file:`.nc` file.\"\"\"\n\n def __init__(self, filename, attribute='pism_log', verbosity_threshold=2):\n \"\"\":param filename: Name of :file:`.nc` file to save the log to.\n :param attribute: Attribute name to save the log as.\"\"\"\n self.com = PISM.Context().com\n self.rank = PISM.Context().rank\n self.log = \"\"\n self.filename = filename\n self.attr = attribute\n self.verbosity_threshold = verbosity_threshold\n\n def __call__(self, message, verbosity):\n \"\"\"Saves the message to our internal log string and writes the string out to the file.\"\"\"\n if verbosity <= self.verbosity_threshold:\n timestamp = time.strftime('%Y-%m-%d %H:%M:%S')\n self.log = \"%s%s: %s\" % (self.log, timestamp, message)\n d = PISM.File(PISM.Context().com, self.filename, PISM.PISM_NETCDF3, PISM.PISM_READWRITE)\n d.redef()\n d.write_attribute(\"PISM_GLOBAL\", self.attr, self.log)\n d.close()\n\n def readOldLog(self):\n \"\"\"If the :file:`.nc` file we are logging to already has a log,\n read it in to the log we are about to make so that we append to it rather\n than overwriting it.\"\"\"\n d = PISM.File(PISM.Context().com, self.filename, PISM.PISM_NETCDF3, PISM.PISM_READONLY)\n self.log += d.read_text_attribute(\"PISM_GLOBAL\", self.attr)\n d.close()\n\n def write(self, filename=None, attribute=None):\n \"\"\"Save a copy of our log to the specified file and attribute.\"\"\"\n if filename is None:\n filename = self.filename\n if attribute is None:\n attribute = self.attr\n d = PISM.File(PISM.Context().com, filename, PISM.PISM_NETCDF3, PISM.PISM_READWRITE)\n d.redef()\n d.write_attribute(\"PISM_GLOBAL\", attribute, self.log)\n d.close()\n\nimport termios\nimport sys\nimport os\nTERMIOS = termios\n\n\ndef getkey():\n \"\"\"Helper function for grabbing a single key press\"\"\"\n fd = sys.stdin.fileno()\n c = None\n if os.isatty(fd):\n old = termios.tcgetattr(fd)\n new = termios.tcgetattr(fd)\n new[3] = new[3] & ~TERMIOS.ICANON & ~TERMIOS.ECHO\n new[6][TERMIOS.VMIN] = 1\n new[6][TERMIOS.VTIME] = 0\n termios.tcsetattr(fd, TERMIOS.TCSANOW, new)\n try:\n c = os.read(fd, 1)\n finally:\n termios.tcsetattr(fd, TERMIOS.TCSAFLUSH, old)\n else:\n # FIXME: The following is here for multi-processor runs.\n # Termios is not available and I don't know a better solution.\n c = sys.stdin.read(1)\n return c\n\n\ndef pause(message_in=None, message_out=None):\n \"\"\"Prints a message and waits for a key press.\n\n :param message_in: Message to display before waiting.\n :param message_out: Message to display after waiting.\"\"\"\n com = PISM.Context().com\n if not message_in is None:\n PISM.verbPrintf(1, com, message_in + \"\\n\")\n _ = getkey()\n if not message_out is None:\n PISM.verbPrintf(1, com, message_out + \"\\n\")\n","repo_name":"pism/pism","sub_path":"site-packages/PISM/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":4569,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"18"} +{"seq_id":"22522722061","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom src.dependencies import database\n\n# Router\nfrom src.routes import main\n\napp = FastAPI(\n title='PUSRI Digifert API',\n docs_url='/',\n)\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\n 'https://teman.pusri.co.id',\n 'https://temanpusri.pusri.co.id'\n ],\n allow_credentials=True,\n allow_headers=['*'],\n allow_methods=['*'],\n)\napp.include_router(main.router)","repo_name":"revanmd/digifert-api","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16149366849","text":"import numpy as np\nimport tensorflow as tf\n\nclass Shallow_CNN(object):\n\n def __init__(self, inputT, output_dim,\n trainable=False, reuse=False,\n act_type='relu', pool_type='maxpool'):\n\n print(\" [*] Constructing Shallow_CNN ... \")\n\n \"\"\"\n :param inputT: 4D tensor; has to be provided\n :param output_dim: has to be provided\n :param act_type: activation, default to relu\n :param pool_type: pooling, default to maxpool\n :param trainable: are the weights trainable? default to false\n\n \"\"\"\n\n self.inputT = inputT\n self.output_dim = output_dim\n\n self.act_type = act_type\n self.pool_type = pool_type\n self.trainable = trainable\n self.reuse = reuse\n\n print(\"Output dim = {}\".format(self.output_dim))\n print(\"Reuse = {}, (T)Testing/(F)Training\".format(self.reuse))\n print(\"Shallow CNN trainable = {}\".format(self.trainable))\n\n self.layers_dic = {}\n self.layers_dic['Shallow_CNN_input'] = self.inputT\n self.num_channel = self.inputT.get_shape().as_list()[-1]\n\n with tf.variable_scope(\"Shallow_CNN\") as scope:\n\n if self.reuse:\n scope.reuse_variables()\n\n self.convlayers()\n self.fc_layers()\n\n self.logits = self.fc2\n self.probs = tf.nn.softmax(self.logits)\n\n def act(self, tensor):\n\n if self.act_type == 'relu':\n return tf.nn.relu(tensor)\n\n if self.act_type == 'softplus':\n return tf.nn.softplus(tensor)\n\n def pool(self, tensor):\n\n if self.pool_type == 'maxpool':\n\n return tf.nn.max_pool(tensor,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n if self.pool_type == 'avgpool':\n\n return tf.nn.avg_pool(tensor,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n def convlayers(self):\n\n # conv1_1\n with tf.variable_scope('Shallow_CNN_conv1_1', reuse=self.reuse) as scope:\n\n kernel = tf.get_variable(name='w', shape=[2, 2, self.num_channel, 256], dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=1e-1), trainable=self.trainable)\n\n biases = tf.get_variable(name='b', shape=[256], dtype=tf.float32,\n initializer=tf.constant_initializer(value=0.0), trainable=self.trainable)\n\n conv = tf.nn.conv2d(self.inputT, kernel, [1, 1, 1, 1], padding='SAME')\n out = tf.nn.bias_add(conv, biases)\n\n self.conv1_1 = self.act(tensor=out)\n\n self.layers_dic['Shallow_CNN_conv1_1'] = self.conv1_1\n\n # conv1_2\n with tf.variable_scope('Shallow_CNN_conv1_2', reuse=self.reuse) as scope:\n\n kernel = tf.get_variable(name='w', shape=[2, 2, 256, 256], dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=1e-1), trainable=self.trainable)\n\n biases = tf.get_variable(name='b', shape=[256], dtype=tf.float32,\n initializer=tf.constant_initializer(value=0.0), trainable=self.trainable)\n\n conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME')\n out = tf.nn.bias_add(conv, biases)\n\n self.conv1_2 = self.act(tensor=out)\n\n self.layers_dic['Shallow_CNN_conv1_2'] = self.conv1_2\n\n # pool1\n self.pool1 = self.pool(tensor=self.conv1_2)\n self.layers_dic['Shallow_CNN_pool1'] = self.pool1\n\n def fc_layers(self):\n\n # fc1\n with tf.variable_scope('Shallow_CNN_fc1', reuse=self.reuse) as scope:\n\n shape = int(np.prod(self.pool1.get_shape()[1:]))\n\n fc1w = tf.get_variable(name='w', shape=[shape, 1024], dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=1e-1), trainable=self.trainable)\n\n fc1b = tf.get_variable(name='b', shape=[1024], dtype=tf.float32,\n initializer=tf.constant_initializer(value=0.0), trainable=self.trainable)\n\n pool1_flat = tf.reshape(self.pool1, [-1, shape])\n\n fc1l = tf.nn.bias_add(tf.matmul(pool1_flat, fc1w), fc1b)\n\n self.fc1 = self.act(tensor=fc1l)\n\n self.layers_dic['Shallow_CNN_fc1'] = self.fc1\n\n # fc2\n with tf.variable_scope('Shallow_CNN_fc2', reuse=self.reuse) as scope:\n\n fc2w = tf.get_variable(name='w', shape=[1024, self.output_dim], dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=1e-1), trainable=self.trainable)\n\n fc2b = tf.get_variable(name='b', shape=[self.output_dim], dtype=tf.float32,\n initializer=tf.constant_initializer(value=0.0), trainable=self.trainable)\n\n self.fc2 = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)\n\n self.layers_dic['Shallow_CNN_logits'] = self.fc2\n\n def init(self, sess):\n sess.run(tf.global_variables_initializer())","repo_name":"coolclear/Research","sub_path":"CNN/Deep_Models/Shallow_CNN.py","file_name":"Shallow_CNN.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6232716897","text":"import datetime\nimport json\nimport logging\nimport sys\nimport traceback\nfrom inspect import istraceback\n\nPYTHON_MAJOR_VERSION = sys.version_info[0]\n\nif PYTHON_MAJOR_VERSION == 2:\n str = str\n\nCRITICAL = 50\nFATAL = CRITICAL\nERROR = 40\nWARNING = 30\nWARN = WARNING\nINFO = 20\nDEBUG = 10\nNOTSET = 0\n\n_levelToName = {\n CRITICAL: 'CRITICAL',\n ERROR: 'ERROR',\n WARNING: 'WARNING',\n INFO: 'INFO',\n DEBUG: 'DEBUG',\n NOTSET: 'NOTSET',\n}\n\nDEFAULT_LOG_RECORD_FIELDS = {\n 'name',\n 'msg',\n 'args',\n 'levelname',\n 'levelno',\n 'pathname',\n 'filename',\n 'module',\n 'exc_info',\n 'exc_class',\n 'exc_msg',\n 'exc_traceback',\n 'exc_text',\n 'stack_info',\n 'lineno',\n 'funcName',\n 'created',\n 'msecs',\n 'relativeCreated',\n 'thread',\n 'threadName',\n 'processName',\n 'process',\n}\n\n\nclass SimpleJsonFormatter(logging.Formatter):\n level_to_name_mapping = _levelToName\n\n def __init__(self, fmt=None, datefmt=None, style='%', serializer=json.dumps):\n super(SimpleJsonFormatter, self).__init__()\n self.serializer = serializer\n\n @staticmethod\n def _default_json_handler(obj):\n if isinstance(obj, (datetime.date, datetime.time)):\n return str(obj.isoformat())\n elif istraceback(obj):\n tb = ''.join(traceback.format_tb(obj))\n return tb.strip()\n elif isinstance(obj, Exception):\n return \"Exception: {}\".format(str(obj))\n return str(obj)\n\n def format(self, record):\n msg = {\n 'name': str(record.name),\n 'timestamp': str(datetime.datetime.now().isoformat()),\n 'line_number': record.lineno,\n 'function': str(record.funcName),\n 'module': str(record.module),\n 'level': str(self.level_to_name_mapping[record.levelno]),\n 'path': str(record.pathname),\n 'thread_id': str(record.thread),\n 'process_id': str(record.process),\n 'thread_name': str(record.threadName),\n 'process_name': str(record.processName),\n }\n\n for field, value in list(record.__dict__.items()):\n if field not in DEFAULT_LOG_RECORD_FIELDS:\n msg[field] = str(value)\n\n if isinstance(record.msg, dict):\n msg.update(record.msg)\n elif '%' in record.msg and len(record.args) > 0:\n try:\n msg['msg'] = record.msg % record.args\n except ValueError:\n msg['msg'] = record.msg\n else:\n msg['msg'] = record.msg\n\n if record.exc_info:\n msg['exc_class'], msg['exc_msg'], msg['exc_traceback'] = record.exc_info\n\n return str(self.serializer(msg, default=self._default_json_handler))\n","repo_name":"Tencent/bk-base","sub_path":"src/datamgr/metadata/metadata/vendor/simple_json_log_formatter/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"18"} +{"seq_id":"1215848065","text":"import pygame\n\nSIZE = (800, 600)\nTITLE = \"Rock, Sissors and Cody\"\nSCORESIZE = (150, 60)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nSIZEBOTONP = (250, 60)\nSIZEBOTONS = (250, 60)\nSIZEBOTONQ = (200, 60)\nSIZEBOTONSA = (50, 50)\nSIZEBOTONRO = (70, 70)\nSIZEBOTONSI = (70, 70)\nSIZEBOTONPA = (70, 70)\nSIZEBOTONLI = (70, 70)\nSIZEBOTONSP = (70, 70)\nMUSICGAME = \"music/musicajuego.wav\"\nROCKSD = \"music/rock.wav\"\nSISSORSSD = \"music/tijeras.wav\"\nPAPERSD = \"music/paper.wav\"\nCODYSD = \"music/cody.wav\"\nSPOKSD = \"music/spok.wav\"","repo_name":"Pedroff83/ProyectoBtcamp","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71183696681","text":"#coding: utf-8\n\n\"\"\"\nМодуль предоставляет классы, предназначенные для хранения, оперирования и\nпредставления табличных структур.\nНавеяно проектом Филиппа Лагадека HTML.py website: http://www.decalage.info/python/html\n\"\"\"\n\ndef attrs2str(attrs):\n\t\"\"\"Принимает словарь аргументов и возвращает строку аргументов типа\n\t'border=\"1\", cellpadding=\"1\", cellspacing=\"1\", class=\"tablesorter\"'\n\t\"\"\"\n\tif attrs:\n\t\treturn \" \" + \" \".join(['%s=\"%s\"'%(k,v) for k,v in attrs.iteritems()])\n\telse:\n\t\treturn \"\"\n\nclass TableCell(object):\n\t\"\"\"Предназначен для хранения информации об одной ячейке, даже несуществующей.\"\"\"\n\tdef __init__(self, text=\" \", th=False, attrs={}):\n\t\t\"\"\"Инициализирует объект ячейки таблицы.\n\t\tАргументы:\n\t\ttext - содержимое ячейки, по умолчанию неразрывный пробел;\n\t\tth - Отрисовывать с помощью тега th;\n\t\tattrs - атрибуты, выводимые непосредственно в теге /,\n\t\t\"\"\"\n\t\tself.text = text\n\t\tself.th = th\n\t\tself.attrs = attrs\n\n\tdef __unicode__(self):\n\t\t\"\"\"Возвращает HTML-код ячейки таблицы в виде юникод-строки\"\"\"\n\t\tif not self.th:\n\t\t\treturn \" %s\\n\" % (attrs2str(self.attrs), self.text)\n\t\telse:\n\t\t\treturn \" %s\\n\" % (attrs2str(self.attrs), self.text)\n\n\nclass TableRow(object):\n\t\"\"\"Предназначен для хранения одной строки таблицы.\"\"\"\n\tdef __init__(self, cells=[], th=False, attrs={}):\n\t\t\"\"\"Инициализирует объект строки таблицы\n\t\tАргументы:\n\t\tcells - ячейки. Д.б. экземплярами класса TableCell\n\t\tth - при выводе html применять теги вместо .\n\t\tattrs - атрибуты, выводимые непосредственно в теге ,\n\t\t\"\"\"\n\t\tself.th = th\n\t\tself.attrs = attrs\n\t\tself.cells = []\n\t\tfor cell in cells:\n\t\t\tself.add_cell(cell)\n\t\n\n\tdef __getitem__(self, index):\n\t\treturn self.cells[index]\n\n\tdef __setitem__(self, index, value):\n\t\tif isinstance(value, TableCell):\n\t\t\tself.cells[index] = value\n\t\telse:\n\t\t\tself.cells[index].text = unicode(value)\n\n\tdef add_cell(self, cell=\" \", th=None, attrs={}):\n\t\t\"\"\"Добавляет объект ячейки к self.cells. Объект ячейки д.б. экземпляром класса TableCell.\n\t\tАргументы:\n\t\tcell - ячейка, которую надо добавить к строке. Если не является экземпляром класса\n\t\t TableCell, то необходимо преобразовать\n\t\tth - Отрисовывать с помощью тега th.\n\t\t\t\tУчитывается, если cell не является экземпляром класса TableCell.\n\t\tattrs - атрибуты, выводимые непосредственно в теге /,\n\t\t\t\tУчитывается, если cell не является экземпляром класса TableCell.\n\t\t\"\"\"\n\t\tif th == None:\n\t\t\tth = self.th\n\t\tif not isinstance(cell, TableCell):\n\t\t\t# Создаём объект TableCell\n\t\t\t# считаем, что в первом аргументе передаётся либо строка, либо число\n\t\t\ttext = unicode(cell)\n\t\t\tcell = TableCell(text, th, attrs)\n\t\tself.cells.append(cell)\n\n\n\tdef __unicode__(self):\n\t\t\"\"\"Возвращает HTML-код строки таблицы в виде юникод-строки\"\"\"\n\t\t# открываем строку\n\t\tresult = \" \\n\" % attrs2str(self.attrs)\n\t\t# добавляем ячейки\n\t\tfor cell in self.cells:\n\t\t\tresult += unicode(cell)\n\t\t# закрываем строку\n\t\tresult += \" \\n\"\n\n\t\treturn result\n\n\nclass Table(object):\n\t\"\"\"Класс, предназначенный для хранения и представления табличных данных.\n\tДолжен позволять легко и эффективно получать данные как построчно (для создания html-таблиц),\n\tтак и постолбно (для создания JSON-форматированной строки).\n\tДолжен учитывать существование таких структур, как thead, tbody и tfoot.\n\tДолжен оперировать параметрами rowspan и colspan, а также другими атрибутами,\n\tкоторые присущи html-таблицам и её строкам и ячейкам.\n\n\tИнформация хранится в атрибуте self.rows, являющейся по сути списком,\n\tэлементами которой служат экземпляры класса TableRow.\n\tДля удобного получения столбцов, объекты TableRow должны содержать одинаковое количество ячеек.\n\tНесущесвующие ячейки при наличии rowspan, colspan д.б. представлены объектами TableCell\n\tс пустым контентом.\"\"\"\n\n\tdef __init__(self, thead_rows=[], rows=[], tfoot_rows=[], caption=\"\", attrs={}):\n\t\t\"\"\"Инициализирует объект таблицы.\n\t\tК.п., она создаётся пустой и заполняется с помощью метода add_row.\n\t\tАргументы:\n\t\tthead_rows - строки, определяемые внутри тега . Д.б. экземплярами TableRow\n\t\trows - обычные строки. Определяются внутри тега . Д.б. экземплярами TableRow\n\t\ttfoot_rows - строки, определяемые внутри тега . Д.б. экземплярами TableRow\n\t\tattrs - атрибуты, выводимые непосредственно в теге .\n\t\t\"\"\"\n\t\tself.caption = caption\n\t\tself.attrs = attrs\n\t\tself.thead_rows = []\n\t\tfor row in thead_rows:\n\t\t\tself.add_row(row, group=\"thead\")\n\t\tself.tfoot_rows = []\n\t\tfor row in tfoot_rows:\n\t\t\tself.add_row(row, group=\"tfoot\")\n\t\tself.rows = []\n\t\tfor row in rows:\n\t\t\tself.add_row(row)\n\n\n\tdef add_row(self, row=None, group=\"tbody\", th=None, attrs={}):\n\t\t\"\"\"Добавляет объект строки к self.rows. Объект строки д.б. экземпляром класса TableRow.\n\t\tАргументы:\n\t\trow - строка, которую надо добавить к таблице. Если не является экземпляром класса\n\t\t TableRow, то перед добавлением необходимо к нему преобразовать.\n\t\tgroup - группа, к которой относится строка (thead, tbody, tfoot). Строки, относящиеся к\n\t\t группам thead и tfoot, при выводе html определяеются перед обычными строками.\n\t\tth - при выводе html применять теги ,\n\t\t\t\tУчитывается, если row не является экземпляром класса TableRow.\n\t\t\"\"\"\n\t\tif not isinstance(row, TableRow):\n\t\t\t# создаём объект TableRow\n\t\t\t# Если row какой-либо итерируемый объект, но не строка,\n\t\t\t# то каждый элемент рассматривается как ячейка.\n\t\t\tif th == None:\n\t\t\t\tif group in [\"thead\", \"tfoot\"]:\n\t\t\t\t\tth = True\n\t\t\t\telse:\n\t\t\t\t\tth = False\n\t\t\tif hasattr(row, \"__iter__\"):\n\t\t\t\trow = TableRow(cells=row, th=th, attrs=attrs)\n\t\t\telse:\n\t\t\t\t# неправильный тип -> пустая строка\n\t\t\t\trow = TableRow(cells=[], th=th, attrs=attrs)\n\t\t# помещаем объект строки в соответствующую группу\n\t\tif group == 'thead':\n\t\t\tself.thead_rows.append(row)\n\t\telif group == 'tfoot':\n\t\t\tself.tfoot_rows.append(row)\n\t\telse:\n\t\t\tself.rows.append(row)\n\n\n\tdef __unicode__(self):\n\t\t\"\"\"Возвращает HTML-код таблицы в виде юникод-строки\"\"\"\n\t\t# открываем таблицу\n\t\tresult = \"\\n\" % attrs2str(self.attrs)\n\t\t# Добавляем заголовок, если есть\n\t\tif self.caption:\n\t\t\tresult += '\\n' % self.caption\n\t\t# Сначала добавляем заголовочные строки, если они есть\n\t\tif self.thead_rows:\n\t\t\tresult += \" \\n\"\n\t\t\tfor row in self.thead_rows:\n\t\t\t\tresult += unicode(row)\n\t\t\tresult += \" \\n\"\n\t\t# Затем добавляем подвал таблицы\n\t\tif self.tfoot_rows:\n\t\t\tresult += \" \\n\"\n\t\t\tfor row in self.tfoot_rows:\n\t\t\t\tresult += unicode(row)\n\t\t\tresult += \" \\n\"\n\t\t# Затем добавляем тело таблицы\n\t\tresult += \" \\n\"\n\t\tfor row in self.rows:\n\t\t\tresult += unicode(row)\n\t\tresult += \" \\n\"\n\t\t# Закрываем таблицу\n\t\tresult += '
вместо .\n\t\t\t\tУчитывается, если row не является экземпляром класса TableRow.\n\t\tattrs - атрибуты, выводимые непосредственно в теге
%s
'\n\n\t\treturn result\n","repo_name":"andbar-ru/traceyourself.appspot.com","sub_path":"src/lib/Table.py","file_name":"Table.py","file_ext":"py","file_size_in_byte":9419,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"3687223627","text":"\"\"\" Unit tests for image Taylor terms\n\n\"\"\"\nimport logging\nimport os\nimport unittest\n\nimport astropy.units as u\nimport numpy\nfrom astropy.coordinates import SkyCoord\n\nfrom rascil.processing_components import (\n create_low_test_skycomponents_from_gleam,\n create_low_test_image_from_gleam,\n smooth_image,\n)\nfrom rascil.processing_components.skycomponent.taylor_terms import (\n calculate_skycomponent_list_taylor_terms,\n find_skycomponents_frequency_taylor_terms,\n)\n\nlog = logging.getLogger(\"rascil-logger\")\n\nlog.setLevel(logging.WARNING)\n\n\nclass TestSkycomponentTaylorTerm(unittest.TestCase):\n def setUp(self):\n self.persist = os.getenv(\"RASCIL_PERSIST\", False)\n\n def test_calculate_taylor_terms(self):\n phasecentre = SkyCoord(\n ra=+15.0 * u.deg, dec=-45.0 * u.deg, frame=\"icrs\", equinox=\"J2000\"\n )\n frequency = numpy.linspace(0.9e8, 1.1e8, 9)\n sc = create_low_test_skycomponents_from_gleam(\n phasecentre=phasecentre, frequency=frequency, flux_limit=10.0\n )[0:10]\n\n taylor_term_list = calculate_skycomponent_list_taylor_terms(sc, nmoment=3)\n assert len(taylor_term_list) == 10\n\n def test_find_skycomponents_frequency_taylor_terms(self):\n phasecentre = SkyCoord(\n ra=+15.0 * u.deg, dec=-45.0 * u.deg, frame=\"icrs\", equinox=\"J2000\"\n )\n frequency = numpy.linspace(0.9e8, 1.1e8, 9)\n im_list = [\n create_low_test_image_from_gleam(\n cellsize=0.001,\n npixel=512,\n phasecentre=phasecentre,\n frequency=[f],\n flux_limit=10.0,\n )\n for f in frequency\n ]\n im_list = [smooth_image(im, width=2.0) for im in im_list]\n\n for moment in [1, 2, 3]:\n sc_list = find_skycomponents_frequency_taylor_terms(\n im_list, nmoment=moment, component_threshold=20.0\n )\n assert len(sc_list) == 9\n assert len(sc_list[0]) == 3\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"NLESC-quantum/rascil","sub_path":"tests/processing_components/test_skycomponent_taylor_terms.py","file_name":"test_skycomponent_taylor_terms.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15555110886","text":"import xbmcgui\nfrom resources.lib.controller.basecontroller import BaseController, route\n\n\nclass AudioController(BaseController):\n def __init__(self, core, audio_manager, config_helper):\n self.core = core\n self.audio_manager = audio_manager\n self.config_helper = config_helper\n\n @route(name=\"select\")\n def select_audio_device(self):\n device_list = [dev.get_name() for dev in self.audio_manager.devices]\n device_list.append('sysdefault')\n audio_device = xbmcgui.Dialog().select('Choose Audio Device', device_list)\n\n if audio_device != -1:\n device_name = device_list[audio_device]\n device = self.audio_manager.get_device_by_name(device_name)\n if device:\n self.core.set_setting('audio_device', device.handler)\n self.core.set_setting('audio_device_name', device.name)\n\n return\n","repo_name":"wackerl91/luna","sub_path":"resources/lib/controller/audiocontroller.py","file_name":"audiocontroller.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"18"} +{"seq_id":"33978450211","text":"from transformers.utils import logging\nimport math\nimport copy\nfrom typing import Optional, List\nimport torch\nfrom torch import nn\nfrom models import xsum\nfrom models.clip.model import LayerNorm\nfrom models.mit import MultiframeIntegrationTransformer\n# from models.modeling_bart import BartLearnedPositionalEmbedding\n\nlogger = logging.get_logger(__name__)\n\n\nclass ImageTransformerEncoder(nn.Module):\n \"\"\"\"\n input:\n d_model:input dimension\n num_layers: num layer\n num_heads: Multiple Attention Mechanisms head number\n dim_feedforward:feed-forward neural network input dimension\n \"\"\"\n\n def __init__(self, d_model, num_layers, num_heads, dim_feedforward=2048, backbone=None):\n super(ImageTransformerEncoder, self).__init__()\n self.d_model = d_model\n self.multimodal_model, _ = xsum.load(None, 'ViT-B/32',\n device=\"cpu\", jit=False,\n T=8,\n droppath=0,\n use_checkpoint=False,\n use_cache=True,\n logger=logger\n )\n\n self.prompts_visual_ln = LayerNorm(self.multimodal_model.vision_width)\n self.prompts_visual_proj = nn.Parameter(\n torch.randn(self.multimodal_model.vision_width, self.multimodal_model.embed_dim))\n self.mit = self.multimodal_model.mit\n\n # encoder_layer = nn.TransformerEncoderLayer(\n # d_model=d_model, nhead=num_heads, dim_feedforward=dim_feedforward)\n # self.encoder = _TransformerEncoder(\n # encoder_layer, num_layers=num_layers)\n\n # 加载CLIP模型,只保留视觉部分\n\n # # Multimodal Model: image-text match model\n # if backbone in ['LinProj', 'CLIP-RN50', 'CLIP-ViT']:\n # # multimodal_model, self.image_preprocess = clip.load(\n # # self.args[\"clip_path\"], device=self.device, jit=False)\n # multimodal_model, self.image_preprocess = clip.load(\n # 'ViT-B/32', device='cpu', jit=False)\n # self.visual = copy.deepcopy(multimodal_model.visual)\n # del multimodal_model\n\n # # 这里选择投影方式\n # if backbone == 'LinProj':\n # # 只对图像进行投影嵌入,删除其他没用的参数\n # del self.visual.ln_pre\n # del self.visual.transformer\n # del self.visual.ln_post\n # del self.visual.proj\n\n # elif backbone.endswith('RN50'):\n # del self.visual.attnpool\n # for param in self.visual.parameters():\n # param.requires_grad = False\n\n # # Image Post Linear Proj\n # self.image_post_linproj = torch.nn.Sequential(\n # torch.nn.LayerNorm(2048),\n # torch.nn.Linear(2048, 768),\n # )\n\n # # Image [CLS] Embedding\n # self.visual_class_embedding = torch.nn.Parameter(\n # torch.zeros(self.generator.config.d_model, device=self.device))\n # self.visual_class_embedding.data.normal_(mean=0.0, std=0.02)\n\n # elif backbone.endswith('ViT'):\n # del self.visual.ln_post\n # del self.visual.proj\n # for param in self.visual.parameters():\n # param.requires_grad = False\n\n # # Image Post Linear Proj\n # self.image_post_linproj = torch.nn.Sequential(\n # torch.nn.LayerNorm(self.generator.config.hidden_size),\n # torch.nn.Linear(self.generator.config.hidden_size,\n # self.generator.config.hidden_size),\n # # torch.nn.Sigmoid(),\n # )\n\n # # Image Position Embedding 对图像位置进行嵌入\n # # self.visual_embed_positions = BartLearnedPositionalEmbedding(\n # # self.generator.config.max_position_embeddings,\n # # # 这里的d_model是transformer模型中的超参数,表示模型中嵌入和层的layer的大小\n # # self.generator.config.d_model,\n # # )\n # # self.visual_embed_positions.weight.data.normal_(mean=0.0, std=0.02)\n\n # # if self.args['use_image_score']:\n # # # Image Extractor Head 图像特征抽取\n # # self.image_classifier_head = torch.nn.Sequential(\n # # torch.nn.LayerNorm(self.generator.config.hidden_size),\n # # torch.nn.Linear(self.generator.config.hidden_size, 1),\n # # )\n # # for n, p in self.image_classifier_head.named_parameters():\n # # if 'weight' in n:\n # # p.data.normal_(mean=0.0, std=0.02)\n # # elif 'bias' in n:\n # # p.data.zero_()\n # # else:\n # # self.image_preprocess = None\n\n # self.pos_encoder = PositionalEncoding(d_model, dropout=0.1)\n # # self.clip, state_dict = xsum.load(None, 'ViT-B/32',\n # # device=\"cpu\", jit=False,\n # # T=8,\n # # droppath=0,\n # # use_checkpoint=False,\n # # use_cache=True,\n # # )\n\n def forward(self, image: torch.Tensor, lens: Optional[List[int]] = None):\n b = image.shape[0]\n video_features, img_features = self.encode_video(image)\n img_features = img_features.mean(dim=1, keepdim=False)\n video_features = video_features / \\\n video_features.norm(dim=-1, keepdim=True)\n return video_features\n\n # if lens is not None:\n # max_len = max(lens)\n\n # mask = [([False] * l + [True] * (max_len - l)) for l in lens]\n # mask = torch.tensor(mask).to(device=inputs.device)\n # else:\n # mask = None\n\n # # input = batch\n # inputs = inputs.permute(1, 0, 2)\n\n # inputs = inputs * math.sqrt(self.d_model)\n # inputs = self.pos_encoder(inputs)\n\n # # (seq_len, bs, dim)\n # outputs = self.clip(image=inputs)\n\n # outputs = self.encoder(src=inputs, src_key_padding_mask=mask)\n # # outputs = self.clip(image=inputs)\n\n # return [o.permute(1, 0, 2) for o in outputs]\n\n # [batch_size, channel, W, H]\n # VICR\n\n def encode_image(self, image):\n return self.multimodal_model.visual(image)\n\n def encode_video(self, image):\n # b:batch-size t:time-step c:channels h:height w:width\n b, t, c, h, w = image.size()\n\n # 合并前两个维度\n image = image.reshape(-1, c, h, w)\n\n # cls这里表示类别特征\n cls_features, img_features = self.encode_image(\n image) # (16,512) (16,49,769)\n img_features = self.prompts_visual_ln(img_features)\n img_features = img_features @ self.prompts_visual_proj # (16,49,512)\n\n cls_features = cls_features.view(b, t, -1) # (2,8,512)\n img_features = img_features.view(\n b, t, -1, cls_features.shape[-1]) # (2,8,49,512)\n # 这里shape从[2,8,512]->[16,512]\n video_features = self.multimodal_model.mit(cls_features) # 2,512\n\n return video_features, img_features\n\n\ndef padTensor(t: torch.Tensor, targetLen: int) -> torch.Tensor:\n oriLen, dim = t.size()\n return torch.cat((t, torch.zeros(targetLen - oriLen, dim).to(t.device)), dim=0)\n\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\nclass _TransformerEncoder(nn.Module):\n def __init__(self, encoder_layer, num_layers, norm=None):\n super(_TransformerEncoder, self).__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n def forward(self, src: torch.Tensor, mask: Optional[torch.Tensor] = None, src_key_padding_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n outputs = [src]\n\n for mod in self.layers:\n output = mod(outputs[-1], src_mask=mask,\n src_key_padding_mask=src_key_padding_mask)\n outputs.append(output)\n\n if self.norm is not None:\n outputs[-1] = self.norm(outputs[-1])\n\n return outputs[1:]\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(\n 0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n","repo_name":"zhengzehong331/VG-SUM","sub_path":"src/models/img_transformer.py","file_name":"img_transformer.py","file_ext":"py","file_size_in_byte":9211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3904734499","text":"from deep_translator import GoogleTranslator\nimport os\n\n# 翻訳元のファイル名と翻訳後のファイル名を指定\nsource_file = \"english_document.txt\"\ntranslated_file = \"japanese_document.txt\"\n\n# 新しく作成するテキストフォルダの名前を指定\noutput_folder = \"translated_texts\"\n\n# フォルダが存在しない場合、作成する\nif not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n# 翻訳後のファイルのパスを生成\ntranslated_file_path = os.path.join(output_folder, translated_file)\n\n# 翻訳先の言語を定義(この場合は日本語)\ntarget_language = \"ja\"\n\n# 翻訳元のファイルを開く\nwith open(source_file, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n\n# 翻訳後のテキストを格納するリストを作成\ntranslated_lines = []\n\n# 各行を翻訳してリストに追加\nfor line in lines:\n translated_text = GoogleTranslator(source='en', target=target_language).translate(text=line)\n translated_lines.append(translated_text)\n\n# 翻訳後のファイルに翻訳されたテキストを書き込む\nwith open(translated_file_path, 'w', encoding='utf-8') as f:\n for line in translated_lines:\n f.write(line + os.linesep)\n","repo_name":"fukashin/translator","sub_path":"file.write.py","file_name":"file.write.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17048475221","text":"\r\n\r\nf = open(\"pretransform.txt\", \"r\", encoding=\"utf8\")\r\nnf1=open(\"fra8.txt\", \"w\", encoding=\"utf8\")\r\n#nf2=open(\"Y.txt\", \"w\", encoding=\"utf8\")\r\n\r\nX = \"\"\r\n#Y = \"\"\r\n\r\n\r\n\"\"\"Modifier le code pour prédire les substitutions, deletions, etc\"\"\"\r\n\r\n\r\n\r\ndef clean(line):\r\n line = line.split(\";\")\r\n line = \" \".join(line)\r\n \"\"\"line = line.split(\"\")\r\n line = \" \".join(line)\"\"\"\r\n line = line.split()\r\n line = \" \".join(line)\r\n return line\r\n\r\n\r\nlines = f.readlines()\r\ni = 0\r\nwhile i < len(lines):\r\n i += 1\r\n line = clean(lines[i])\r\n X += line + \"\\t\"\r\n i += 2\r\n line = clean(lines[i])\r\n X += line + \"\\t_\\n\"\r\n i += 2\r\n\r\nnf1.write(X)\r\n#nf2.write(Y)\r\n\r\nf.close()\r\nnf1.close()\r\n#nf2.close()\r\n","repo_name":"thibault-roux/ASR","sub_path":"data/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30963022078","text":"from turtle import color\n\n\ndef Artist():\n import matplotlib.pyplot as plt\n artist = [\"rihahna\", \"sia\", \"ariana\", \"eminem\", \"lil pump\", \"lil wayne\", \"lil uzi vert\",\n \"lil wayne\", \"lil uzi vert\", \"lil pump\", \"eminem\", \"ariana\", \"sia\", \"rihahna\"]\n sales = [19500, 2909, 36500, 44455, 5999, 645,\n 709, 890, 945, 1000, 16771, 1209, 13787]\n colors = [\"red\", \"blue\", \"green\", \"yellow\", \"black\", \"pink\", \"orange\",\n \"purple\", \"brown\", \"grey\", \"white\", \"cyan\", \"magenta\"] # color list\n\n fig, ax = plt.subplots(nrows=2, ncols=2)\n\n fig.suptitle(\"Artist and Sales\", fontsize=20)\n\n ax[0, 0].set_title(\"Artist\")\n ax[0, 0].set_xlabel(\"Artist\")\n ax[0, 0].set_ylabel(\"Sales\")\n ax[0, 0].bar(artist, sales, color=colors)\n # pie chart\n explode = [0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ax[0, 1].set_title(\"Artist\")\n ax[0, 1].pie(sales, explode=explode, labels=artist, colors=color,\n autopct=\"%1.1f%%\", shadow=True, startangle=90)\n # line chart\n ax[1, 1].plot(artist, sales, color=colors)\n ax[1, 1].set_title(\"line chart\")\n\n # scatter graph\n sizes = [sales[i]/100 for i in range(len(sales))]\n ax[1, 0].scatter(artist, sales, color=colors, s=sizes, alpha=0.5,\n marker='o', edgecolors='black')\n ax[1, 0].set_xlabel('Artists')\n ax[1, 0].set_ylabel('Sales')\n ax[1, 0].set_title('Scatter graph')\n # histogram\n ax[0, 2].hist(sales, color='b', edgecolor='black', linewidth=1.5)\n ax[0, 2].set_xlabel('Artists')\n ax[1, 0].set_ylabel('Sales')\n ax[1, 0].set_title('Histogram graph')\n\n # density graph\n ax[1, 2].plot(artist, sales, 'o')\n ax[1, 2].set_xlabel('Sales')\n ax[1, 2].set_ylabel('Artists')\n ax[1, 2].set_title('Density plot')\n\n plt.show()\n\n\nArtist()\n","repo_name":"yasinmillers/CITT","sub_path":"visualise.py","file_name":"visualise.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"3864107259","text":"from PyQt5 import QtGui\r\nimport csv\r\n\r\n\r\ndef format(colour, is_bold, is_italic):\r\n \"\"\"\r\n The function joins setting of colour, bold and italic to one QTextCharFormat\r\n :param colour: (str) HEX-format colour (example: #ff00ff)\r\n :param is_bold: (bool) if True QColor will be bold else QColor won't\r\n :param is_italic: (bool) if True QColor will be italic else QColor won't\r\n :return: (QTextCharFormat)\r\n \"\"\"\r\n _color = QtGui.QColor()\r\n _color.setNamedColor(colour)\r\n\r\n _format = QtGui.QTextCharFormat()\r\n _format.setForeground(_color)\r\n if is_bold:\r\n _format.setFontWeight(QtGui.QFont.Bold)\r\n if is_italic:\r\n _format.setFontItalic(True)\r\n\r\n return _format\r\n\r\n\r\ndef reading_csv_linter(name_of_file):\r\n \"\"\"\r\n The function reads csv table with styles for linter and make of it table\r\n :param name_of_file: (str) name of csv table\r\n :return: (list) massive of dicts with setting of linter\r\n \"\"\"\r\n with open(file=name_of_file) as csvfile:\r\n design = csv.DictReader(csvfile, delimiter=';', quotechar='\"')\r\n\r\n return list(design)\r\n\r\n\r\ndef making_styles(data_):\r\n \"\"\"\r\n Create dict where keys are element of linter and\r\n values are settings of elements\r\n :param data_: (list) massive of dicts of csv table\r\n :return: (dict) dict of QTextCharFormat\r\n \"\"\"\r\n STYLES = dict()\r\n for dct in data_:\r\n is_bold = True if dct['bold'] == 'True' else False\r\n is_italic = True if dct['italic'] == 'True' else False\r\n STYLES[dct['name']] = format(dct['HEX-colour'], is_bold, is_italic)\r\n return STYLES\r\n\r\n\r\ndef all_together(name):\r\n \"\"\"\r\n The function joins two function in one to be called in another module\r\n :param name: name of csv table with linter settings\r\n :return: (dict) dict of QTextCharFormat\r\n \"\"\"\r\n return making_styles(reading_csv_linter(name))\r\n\r\n\r\nif __name__ == '__main__':\r\n data = reading_csv_linter('linter_csv_table.csv')\r\n print(making_styles(data))\r\n","repo_name":"UserGit-bug/PyEditor","sub_path":"styles.py","file_name":"styles.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41679414416","text":"#!/usr/bin/python3\n\nfrom __future__ import annotations\nfrom typing import Dict\nfrom requests import get\nfrom json import dumps\nfrom os.path import join, abspath, dirname, exists, realpath\nfrom os import mkdir\n\n\ndef build(target_file: str = abspath(join(dirname(realpath(__file__)), '../data/country.json'))) -> Dict[str, str]:\n '''\n Builds country info dataset.\n\n First it fetches data from GeoNames data dumping site, then processes text data and converts to JSON. Finally stores it in provided file `/data/country.json`.\n\n In success returns\n `{'success': 'true'}`\n else \n `{'error': ' ... '}`\n\n '''\n code = {'error': 'incomplete'}\n try:\n if(not exists(dirname(target_file))):\n # creates target data store directory, if that doesn't exists already\n mkdir(dirname(target_file))\n with open(target_file, mode='w') as fd:\n fd.write(dumps(\n {\n 'countries': [{'iso': country[0], 'iso3': country[1], 'isoNumeric': country[2], 'fips': country[3], 'country': country[4], 'capital': country[5], 'area(in sq km)': country[6], 'population': country[7], 'continent': country[8], 'tld': country[9], 'currencyCode': country[10], 'currencyName': country[11], 'phone': country[12], 'postalFormat': country[13], 'postalRegex': country[14], 'languages': country[15].split(','), 'geonameid': country[16], 'neighbours': country[17].split(','), 'equivalentFips': country[18]} for country in (line.split('\\t') for line in get(\n 'http://download.geonames.org/export/dump/countryInfo.txt').text.split('\\n') if(line and (not line.startswith('#'))))]\n }, indent=4, ensure_ascii=False))\n code = {'success': 'true'}\n except Exception as e:\n code = {'error': str(e)}\n return code\n\n\nif __name__ == '__main__':\n try:\n print(build())\n except KeyboardInterrupt:\n print('\\n[!]Terminated :/')\n finally:\n exit(0)\n","repo_name":"itzmeanjan/countryAndWeather","sub_path":"fetch/country.py","file_name":"country.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"39291571423","text":"import machine # Used for deepsleep\r\nimport time # Used for sleep\r\nimport json # Used for config file\r\nimport system # Used for voltage\r\nfrom machine import Pin\r\nfrom dht import DHT\r\n\r\nresetReason = machine.reset_cause()\r\nwakeReason = machine.wake_reason()[0] # (wake_reason, gpio_list)\r\n\r\n### SETTINGS ###\r\npycom.rgbled(0xFFFF00) # Yellow\r\n\r\n# Sensor used for measuring temperature\r\n# DHT11: 0, DHT22: 1\r\nDHTType = 1\r\nDHTPin = 'P23'\r\n\r\n# Reed switch used to wake the device\r\nreedPin = 'P22'\r\n\r\n# Internal voltage divider used for measuring battery voltage\r\n# These values are from Pycom Extension Board v3.1\r\nbatteryPin = 'P16'\r\nresistorR1 = 1000\r\nresistorR2 = 1000\r\nbatteryAttn = 6.0\r\n\r\n# Deep sleep settings\r\n# Mode can be machine.WAKEUP_ALL_LOW or machine.WAKEUP_ANY_HIGH\r\n# enable_pull decides if pull up / down resistors should be enabled during sleep\r\nsecondsToSleep = 60 * 60 * 4\r\nwakePins = ['P22']\r\nwakeMode = machine.WAKEUP_ANY_HIGH\r\nwakePull = True\r\n\r\n### NETWORK ###\r\nwith open('config.json') as file:\r\n config = json.load(file)\r\n\r\nif config['USE_LORA']:\r\n from network import LoRa\r\n import socket\r\n import ubinascii\r\n import struct\r\n\r\n # Initialise LoRa in LORAWAN mode.\r\n lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.EU868)\r\n\r\n if resetReason == machine.DEEPSLEEP_RESET:\r\n print('Woke from deepsleep...')\r\n lora.nvram_restore()\r\n else:\r\n print('Connecting to LoRa...')\r\n app_eui = ubinascii.unhexlify(config['APP_EUI'])\r\n app_key = ubinascii.unhexlify(config['APP_KEY'])\r\n\r\n lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)\r\n while not lora.has_joined():\r\n print('Not yet joined...')\r\n pycom.rgbled(0xcc00ff)\r\n time.sleep(1)\r\n pycom.rgbled(0x000000)\r\n time.sleep(0.5)\r\n\r\n print(\"Joined network\")\r\n \r\n # Create a LoRa socket\r\n s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\r\n\r\n # Set the LoRaWAN data rate\r\n s.setsockopt(socket.SOL_LORA, socket.SO_DR, 5)\r\n\r\nelif config['USE_WIFI']:\r\n print('Connecting to WiFi...')\r\n from network import WLAN\r\n import urequests as requests\r\n try:\r\n wlan = WLAN(mode=WLAN.STA)\r\n nets = wlan.scan()\r\n for net in nets:\r\n if net.ssid == config['SSID']:\r\n print('Network found!')\r\n wlan.connect(net.ssid, auth=(net.sec, config['SSID_PASS']), timeout=5000)\r\n while not wlan.isconnected():\r\n machine.idle() # Save power while waiting\r\n print('WLAN connection succeeded!')\r\n break\r\n except:\r\n print('Was not able to connect to WiFi...')\r\n\r\n### SENSOR SETUP ###\r\nprint('Initiating sensors...')\r\npycom.rgbled(0x0000FF) # Blue\r\n\r\ndhtsensor = DHT(Pin(DHTPin, mode=Pin.OPEN_DRAIN), DHTType)\r\nreedsensor = Pin(reedPin, mode=Pin.IN, pull=Pin.PULL_UP)\r\nsysvolt = system.SystemVoltage(batteryPin, resistorR1, resistorR2, batteryAttn)\r\n\r\nmachine.pin_sleep_wakeup(pins=wakePins, mode=wakeMode, enable_pull=wakePull)\r\n\r\n### SENSOR HANDLING ###\r\nif wakeReason == machine.PIN_WAKE:\r\n print('Pin triggered!')\r\n pycom.rgbled(0xFF0000) # Red\r\n \r\n # Send notification\r\n if config['USE_LORA']:\r\n print('Sending over LoRa...')\r\n s.setblocking(True)\r\n s.bind(2) # Define port\r\n s.send(bytes([0x01]))\r\n s.setblocking(False)\r\n\r\n elif config['USE_WIFI'] and wlan.isconnected():\r\n print('Sending over WiFi...')\r\n try:\r\n payload = { \"password\": config['HTTP_PASSWORD'], \"triggered\": True }\r\n headers = { \"Content-Type\": \"application/json\" }\r\n res = requests.post(config['HTTP_URL'], headers = headers, json = payload)\r\n res.close()\r\n except:\r\n print('An error occured when trying to send data...')\r\n\r\n # Avoid sending multiple notifications\r\n while reedsensor.value():\r\n machine.idle() # Save power while waiting\r\n\r\nelif wakeReason == machine.RTC_WAKE or resetReason == machine.WDT_RESET or resetReason == machine.PWRON_RESET:\r\n print('Sleep time ended')\r\n print('Fetching and sending data...')\r\n pycom.rgbled(0x00FF00) # Green\r\n\r\n # Fetch data from DHT sensor\r\n dhtresult = dhtsensor.read()\r\n while not dhtresult.is_valid():\r\n time.sleep(.5)\r\n dhtresult = dhtsensor.read()\r\n\r\n # Fetch values\r\n tempValue = dhtresult.temperature\r\n humiValue = dhtresult.humidity\r\n battValue = sysvolt.read()\r\n\r\n # Print values\r\n print('Temp: ', tempValue)\r\n print('RH: ', humiValue)\r\n print('Volt: ', battValue)\r\n\r\n # Send values\r\n if config['USE_LORA']:\r\n print('Sending over LoRa...')\r\n s.setblocking(True)\r\n s.bind(1) # Define port\r\n # Multiply with 100 to avoid having to send floats\r\n payload = struct.pack('>h', int(tempValue * 100))\r\n payload += struct.pack('>H', int(humiValue * 100))\r\n payload += struct.pack('>H', int(battValue * 100))\r\n s.send(payload)\r\n s.setblocking(False)\r\n\r\n elif config['USE_WIFI'] and wlan.isconnected():\r\n print('Sending over WiFi...')\r\n try:\r\n payload = { \"password\": config['HTTP_PASSWORD'], \"temperature\": tempValue, \"humidity\": humiValue, \"voltage\": battValue }\r\n headers = { \"Content-Type\": \"application/json\" }\r\n res = requests.post(config['HTTP_URL'], headers = headers, json = payload)\r\n res.close()\r\n except:\r\n print('An error occured when trying to send data...')\r\n\r\n### GO TO SLEEP ###\r\nprint('Preparing deepsleep...')\r\npycom.rgbled(0x000000) # Turn off\r\n\r\nif (config['USE_LORA']):\r\n lora.nvram_save()\r\nelif config['USE_WIFI']:\r\n wlan.deinit() # Avoid getting wifi timeout next cycle\r\n\r\nsleepInterval = 0\r\nremainingSleepTime = machine.remaining_sleep_time() # Milliseconds\r\nif (remainingSleepTime > 0):\r\n sleepInterval = remainingSleepTime\r\nelse:\r\n sleepInterval = int(secondsToSleep * 1000)\r\n\r\nmachine.deepsleep(sleepInterval)","repo_name":"nilste/mailbox","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30583377088","text":"import pandas as pd\nfrom sklearn import model_selection\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier as KNN\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import label_binarize\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nfrom plot_confusion_matrix import plot_confusion_matrix\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom plot_ROC_curve import plot_ROC_curve\nfrom sklearn.model_selection import GridSearchCV\n\n\nwith open(\"UCI HAR Dataset/features.txt\",\"r\") as ft:\n features = [line.split()[1] for line in ft.readlines()]\n\nX_train = pd.read_csv(\"UCI HAR Dataset/train/X_train.txt\", sep = '\\s+', names = features)\ny_train = pd.read_csv(\"UCI HAR Dataset/train/y_train.txt\", sep = '\\s+', names = ['activity'])\nX_test = pd.read_csv(\"UCI HAR Dataset/test/X_test.txt\", sep = '\\s+', names = features)\ny_test = pd.read_csv(\"UCI HAR Dataset/test/y_test.txt\", sep = '\\s+', names = ['label'])\n\n# convert dataframe to numpy matrix\nX_train_t = X_train.as_matrix()\ny_train_t = y_train.as_matrix()\nX_test_t = X_test.as_matrix()\ny_test_t = y_test.as_matrix()\n\n# binarize labels\ny_train_bin = label_binarize(y_train_t, classes=[1,2,3,4,5,6])\ny_test_bin = label_binarize(y_test_t, classes=[1,2,3,4,5,6])\nn_classes = y_test_bin.shape[1]\n\nclass_names = ['WALKING', 'WALKING_UPSTAIRS', 'WALKING_DOWNSTAIRS', 'SITTING', 'STANDING', 'LAYING']\n#kfold = model_selection.KFold(n_splits=10)\nkfold = model_selection.StratifiedKFold(n_splits=10)\nestm1 = []\nclf1 = LogisticRegression(random_state=20)\nestm1.append(('logistic', clf1))\nclf2 = SVC(kernel='linear', probability=True, random_state=20)\nestm1.append(('svc', clf2))\nclf3 = KNN(n_neighbors=10)\nestm1.append(('knn', clf3))\n\neclf = VotingClassifier(estm1, n_jobs=-1, voting='soft', weights=[1.6, 3, 1])\n# multiclass classifer\nclassifier = OneVsRestClassifier(eclf)\n# train ensemble\neclf.fit(X_train, y_train['activity'].ravel())\n# predict probabilities of each class\ny_score = classifier.fit(X_train_t, y_train_bin).predict_proba(X_test_t)\ny_pred = eclf.predict(X_test).tolist()\ny_true = y_test['label'].values.tolist()\n#score_cv = model_selection.cross_val_score(eclf, X_train, y_train['activity'].ravel(), cv=kfold)\n# y_pred_cv = model_selection.cross_val_predict(eclf, X_train, y_train['activity'].ravel(), cv=kfold)\nprint(accuracy_score(y_true, y_pred))\n#print(score_cv)\nprint(classification_report(y_true, y_pred))\n\n# TO DO: more combiclf\nestimators2 = []\n\nroc_fig = plt.figure()\nplot_ROC_curve(y_test_bin, y_score, 6, 'ROC curve for ensemble of logistic, KNN, and SVM')\nroc_fig.savefig('./Plots/voting_ensemble_ROC_curve.png')\n\n# # Compute confusion matrix\ncnf_matrix = confusion_matrix(y_true, y_pred)\nnp.set_printoptions(precision=2)\n\n# Plot non-normalized confusion matrix\ncnf_fig = plt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix for voting classifier')\n\n# Plot normalized confusion matrix\n# plt.figure()\n# plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n# title='Normalized confusion matrix')\nplt.show()\ncnf_fig.savefig('./Plots/ensemble_cnf_matrix.png')","repo_name":"NicolaiHerforth/human-action-recognition","sub_path":"voting_ensemble.py","file_name":"voting_ensemble.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"28147509369","text":"from pymongo import MongoClient\n\nclient = MongoClient(\n \"mongodb+srv://nin187:mongolia@cluster0.bx9tr.mongodb.net/myFirstDatabase?retryWrites=true&w=majority\"\n)\ndb = client.dbsparta\n\n# 영화제목 '매트릭스'의 평점을 가져오기\nmovie = db.movies.find_one({\"title\": \"매트릭스\"}, {\"_id\": False})\nprint(f'A1. 매트릭스의 평점: {movie[\"point\"]}\\n')\n\n# '매트릭스'의 평점과 같은 평점의 영화 제목들을 가져오기\nmovie = db.movies.find_one({\"title\": \"매트릭스\"}, {\"_id\": False})\nprint(f'A2. 평점이 {movie[\"point\"]}인 영화들')\ntmp_point = movie[\"point\"]\nmovies = list(db.movies.find({\"point\": tmp_point}, {\"_id\": False}))\nfor tmp_movie in movies:\n print(tmp_movie[\"title\"])\n# print(\"\\n\")\n\n# 매트릭스의 평점을 0으로 만들기\ndb.movies.update_one({\"title\": \"매트릭스\"}, {\"$set\": {\"point\": 0}})\nmovie = db.movies.find_one({\"title\": \"매트릭스\"}, {\"_id\": False})\nprint(f'\\nA3. {movie[\"title\"]}의 평점을 {movie[\"point\"]}으로 변경했습니다.')\nmovie = db.movies.find_one({\"title\": \"매트릭스\"}, {\"_id\": False})\ndb.movies.update_one({\"title\": \"매트릭스\"}, {\"$set\": {\"point\": tmp_point}})\nprint(f'{movie[\"title\"]}의 평점을 다시 {tmp_point}으로 변경했습니다.')","repo_name":"nin187/sparta","sub_path":"pythonprac/db_quiz.py","file_name":"db_quiz.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20270783274","text":"import ipywidgets as widgets\nfrom IPython.display import display\n\n\nclass FilePickerWithOutput:\n \"\"\"FilePickerWithOutput: A widget that displays a file picker and an output widget that displays the selected file path.\"\"\"\n\n def __init__(self, label, message='Selected file: \"{}\"'):\n self.file_picker = widgets.FileUpload(description=label)\n self.output = widgets.Output()\n self.value = None\n\n def update_output(change):\n with self.output:\n self.output.clear_output()\n if len(change[\"new\"]) > 0:\n print(change[\"new\"])\n self.value = change[\"new\"]\n print(message.format(str(self.value)))\n\n self.file_picker.observe(update_output, names=\"value\")\n display(widgets.VBox([self.file_picker, self.output]))\n","repo_name":"lsternlicht/ipywidgets_gallery","sub_path":"ipywidgets_gallery/filepicker.py","file_name":"filepicker.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32354080257","text":"import logging\n\nfrom odoo import models\n\n_logger = logging.getLogger(__name__)\n\n\nclass MassReconcileSimple(models.AbstractModel):\n _name = \"mass.reconcile.simple\"\n _inherit = \"mass.reconcile.base\"\n _description = \"Mass Reconcile Simple\"\n\n # has to be subclassed\n # field name used as key for matching the move lines\n _key_field = None\n\n def rec_auto_lines_simple(self, lines):\n if self._key_field is None:\n raise ValueError(\"_key_field has to be defined\")\n count = 0\n res = []\n while count < len(lines):\n for i in range(count + 1, len(lines)):\n if lines[count][self._key_field] != lines[i][self._key_field]:\n break\n check = False\n if lines[count][\"credit\"] > 0 and lines[i][\"debit\"] > 0:\n credit_line = lines[count]\n debit_line = lines[i]\n check = True\n elif lines[i][\"credit\"] > 0 and lines[count][\"debit\"] > 0:\n credit_line = lines[i]\n debit_line = lines[count]\n check = True\n if not check:\n continue\n reconciled, dummy = self._reconcile_lines(\n [credit_line, debit_line], allow_partial=False\n )\n if reconciled:\n res += [credit_line[\"id\"], debit_line[\"id\"]]\n del lines[i]\n if (\n self.env.context.get(\"commit_every\", 0)\n and len(res) % self.env.context[\"commit_every\"] == 0\n ):\n # new cursor is already open in cron\n self.env.cr.commit() # pylint: disable=invalid-commit\n _logger.info(\n \"Commit the reconciliations after %d groups\", len(res)\n )\n break\n count += 1\n return res\n\n def _simple_order(self, *args, **kwargs):\n return \"ORDER BY account_move_line.%s\" % self._key_field\n\n def _action_rec(self):\n \"\"\"Match only 2 move lines, do not allow partial reconcile\"\"\"\n select = self._select_query()\n select += \", account_move_line.%s \" % self._key_field\n where, params = self._where_query()\n where += \" AND account_move_line.%s IS NOT NULL \" % self._key_field\n\n where2, params2 = self._get_filter()\n query = \" \".join(\n (select, self._from_query(), where, where2, self._simple_order())\n )\n self.flush()\n self.env.cr.execute(query, params + params2)\n lines = self.env.cr.dictfetchall()\n return self.rec_auto_lines_simple(lines)\n\n\nclass MassReconcileSimpleName(models.TransientModel):\n _name = \"mass.reconcile.simple.name\"\n _inherit = \"mass.reconcile.simple\"\n _description = \"Mass Reconcile Simple Name\"\n\n # has to be subclassed\n # field name used as key for matching the move lines\n _key_field = \"name\"\n\n\nclass MassReconcileSimplePartner(models.TransientModel):\n _name = \"mass.reconcile.simple.partner\"\n _inherit = \"mass.reconcile.simple\"\n _description = \"Mass Reconcile Simple Partner\"\n\n # has to be subclassed\n # field name used as key for matching the move lines\n _key_field = \"partner_id\"\n\n\nclass MassReconcileSimpleReference(models.TransientModel):\n _name = \"mass.reconcile.simple.reference\"\n _inherit = \"mass.reconcile.simple\"\n _description = \"Mass Reconcile Simple Reference\"\n\n # has to be subclassed\n # field name used as key for matching the move lines\n _key_field = \"ref\"\n","repo_name":"OCA/account-reconcile","sub_path":"account_mass_reconcile/models/simple_reconciliation.py","file_name":"simple_reconciliation.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"18"} +{"seq_id":"33746273542","text":"import aws_cdk.aws_ec2 as ec2\nfrom aws_cdk import Stack\nfrom aws_cdk.aws_certificatemanager import Certificate\nimport aws_cdk.aws_ecs as ecs\nimport aws_cdk.aws_ecs_patterns as ecs_patterns\nfrom aws_cdk.aws_ecr_assets import DockerImageAsset\nfrom aws_cdk.aws_iam import PolicyStatement\nfrom aws_cdk.aws_logs import RetentionDays, LogGroup\nfrom aws_cdk.aws_ssm import StringParameter\nfrom constructs import Construct\nimport aws_cdk.aws_route53 as route53\nimport aws_cdk.aws_route53_targets as targets\nimport aws_cdk.aws_iam as aws_iam\nimport aws_cdk.aws_eks as aws_eks\n\nfrom airflow_stack.rds_elasticache_stack import RdsElasticacheEfsStack\n\nDB_PORT = 5432\nAIRFLOW_WORKER_PORT=8793\nREDIS_PORT = 6379\n\ndef get_cluster_name(deploy_env):\n return f\"AirflowCluster-{deploy_env}\"\n\ndef get_webserver_service_name(deploy_env):\n return f\"AirflowWebserver-{deploy_env}\"\n\ndef get_webserver_taskdef_family_name(deploy_env):\n return f\"AirflowWebTaskDef-{deploy_env}\"\n\ndef get_scheduler_service_name(deploy_env):\n return f\"AirflowSchedulerSvc-{deploy_env}\"\n\ndef get_scheduler_taskdef_family_name(deploy_env):\n return f\"AirflowSchedulerTaskDef-{deploy_env}\"\n\ndef get_worker_service_name(deploy_env):\n return f\"AirflowWorkerSvc-{deploy_env}\"\n\ndef get_worker_taskdef_family_name(deploy_env):\n return f\"AirflowWorkerTaskDef-{deploy_env}\"\n\nclass AirflowStack(Stack):\n\n def __init__(self, scope: Construct, id: str, deploy_env: str, vpc:ec2.Vpc, db_redis_stack: RdsElasticacheEfsStack,\n config: dict, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n self.config = config\n self.deploy_env = deploy_env\n self.db_port = DB_PORT\n # cannot map volumes to Fargate task defs yet - so this is done via Boto3 since CDK does not\n # support it yet: https://github.com/aws/containers-roadmap/issues/825\n #self.efs_file_system_id = db_redis_stack.efs_file_system_id\n cluster_name = get_cluster_name(deploy_env)\n self.cluster = ecs.Cluster(self, cluster_name, cluster_name=cluster_name, vpc=vpc)\n pwd_secret = ecs.Secret.from_ssm_parameter(StringParameter.from_secure_string_parameter_attributes(self, f\"dbpwd-{deploy_env}\",\n version=1, parameter_name=\"postgres_pwd\"))\n self.secrets = {\"POSTGRES_PASSWORD\": pwd_secret}\n self.vpc = vpc\n environment = {\"EXECUTOR\": \"Celery\", \"POSTGRES_HOST\" : db_redis_stack.db_host,\n \"POSTGRES_PORT\": str(self.db_port), \"POSTGRES_DB\": \"airflow\", \"POSTGRES_USER\": self.config[\"dbadmin\"],\n \"REDIS_HOST\": db_redis_stack.redis_host,\n \"VISIBILITY_TIMEOUT\": str(self.config[\"celery_broker_visibility_timeout\"])}\n image_asset = DockerImageAsset(self, \"AirflowImage\", directory=\"build\")\n self.image = ecs.ContainerImage.from_docker_image_asset(image_asset)\n # web server - this initializes the db so must happen first\n self.web_service = self.airflow_web_service(environment)\n # https://github.com/aws/aws-cdk/issues/1654\n self.web_service_sg().connections.allow_to_default_port(db_redis_stack.postgres_db, 'allow PG')\n redis_port_info = ec2.Port(protocol=ec2.Protocol.TCP, string_representation=\"allow to redis\",\n from_port=REDIS_PORT, to_port=REDIS_PORT)\n worker_port_info = ec2.Port(protocol=ec2.Protocol.TCP, string_representation=\"allow to worker\",\n from_port=AIRFLOW_WORKER_PORT, to_port=AIRFLOW_WORKER_PORT)\n redis_sg = ec2.SecurityGroup.from_security_group_id(self, id=f\"Redis-SG-{deploy_env}\",\n security_group_id=db_redis_stack.redis.vpc_security_group_ids[0])\n self.web_service_sg().connections.allow_to(redis_sg, redis_port_info, 'allow Redis')\n self.web_service_sg().connections.allow_to_default_port(db_redis_stack.efs_file_system)\n # scheduler\n self.scheduler_service = self.create_scheduler_ecs_service(environment)\n # worker\n self.worker_service = self.create_worker_service(environment)\n self.scheduler_sg().connections.allow_to_default_port(db_redis_stack.postgres_db, 'allow PG')\n self.scheduler_sg().connections.allow_to(redis_sg, redis_port_info, 'allow Redis')\n self.scheduler_sg().connections.allow_to_default_port(db_redis_stack.efs_file_system)\n\n self.worker_sg().connections.allow_to_default_port(db_redis_stack.postgres_db, 'allow PG')\n self.worker_sg().connections.allow_to(redis_sg, redis_port_info, 'allow Redis')\n self.worker_sg().connections.allow_to_default_port(db_redis_stack.efs_file_system)\n # When you start an airflow worker, airflow starts a tiny web server\n # subprocess to serve the workers local log files to the airflow main\n # web server, who then builds pages and sends them to users. This defines\n # the port on which the logs are served. It needs to be unused, and open\n # visible from the main web server to connect into the workers.\n self.web_service_sg().connections.allow_to(self.worker_sg(), worker_port_info, 'web service to worker')\n self.setup_eks_cluster()\n\n def web_service_sg(self):\n return self.web_service.service.connections.security_groups[0]\n\n def scheduler_sg(self):\n return self.scheduler_service.connections.security_groups[0]\n\n def worker_sg(self):\n return self.worker_service.connections.security_groups[0]\n\n def airflow_web_service(self, environment):\n service_name = get_webserver_service_name(self.deploy_env)\n family = get_webserver_taskdef_family_name(self.deploy_env)\n task_def = ecs.FargateTaskDefinition(self, family, cpu=512, memory_limit_mib=1024, family=family)\n task_def.add_container(f\"WebWorker-{self.deploy_env}\", image=self.image, environment=environment,\n secrets=self.secrets, logging=ecs.LogDrivers.aws_logs(stream_prefix=family,\n log_retention=RetentionDays.ONE_DAY))\n task_def.default_container.add_port_mappings(ecs.PortMapping(container_port=8080, host_port=8080,\n protocol=ec2.Protocol.TCP))\n # we want only 1 instance of the web server so when new versions are deployed max_healthy_percent=100\n # you have to manually stop the current version and then it should start a new version - done by deploy task\n lb_security_group = ec2.SecurityGroup(self, f\"lb-sec-group-{self.deploy_env}\", vpc=self.vpc)\n service = ecs_patterns.ApplicationLoadBalancedFargateService(\n self, service_name,\n cluster=self.cluster, # Required\n service_name=service_name,\n platform_version=ecs.FargatePlatformVersion.VERSION1_4,\n cpu=512, # Default is 256\n desired_count=1, # Default is 1\n task_definition=task_def,\n memory_limit_mib=2048, # Default is 512\n public_load_balancer=True,\n security_groups=[lb_security_group],\n certificate=Certificate.from_certificate_arn(self, f\"lb-cert-{self.deploy_env}\",\n certificate_arn=self.config[\"lb_certificate_arn\"]),\n max_healthy_percent=100\n )\n service.target_group.configure_health_check(path=\"/health\")\n # restrict access to the load balancer to only VPN\n lb_security_group.connections.allow_from(ec2.Peer.ipv4(self.config[\"lb_vpn_addresses\"]),\n ec2.Port.tcp(443))\n # configure DNS alias for the load balancer\n route53.ARecord(self, f\"lb-record-{self.deploy_env}\",\n zone=route53.HostedZone.from_hosted_zone_attributes(\n self,\n f\"Zone-{self.deploy_env}\",\n zone_name=f\"Zone-{self.deploy_env}\",\n hosted_zone_id=self.config[\"route53_zone_id\"]\n ),\n record_name = self.config[\"lb_dns_name\"],\n target=route53.RecordTarget.from_alias(targets.LoadBalancerTarget(service.load_balancer)))\n return service\n\n def create_worker_service(self, environment):\n family = get_worker_taskdef_family_name(self.deploy_env)\n service_name = get_worker_service_name(self.deploy_env)\n service = self.create_service(service_name, family, f\"WorkerCont-{self.deploy_env}\", environment, \"worker\",\n desired_count=self.config[\"num_airflow_workers\"], cpu=self.config[\"cpu\"],\n memory=self.config[\"memory\"], max_healthy_percent=200)\n service.task_definition.add_to_task_role_policy(self.athena_access_policy())\n return service\n\n def athena_access_policy(self):\n return PolicyStatement(\n resources=[\"*\"],\n actions=[\"athena:*\"]\n )\n\n def create_scheduler_ecs_service(self, environment) -> ecs.FargateService:\n task_family = get_scheduler_taskdef_family_name(self.deploy_env)\n service_name = get_scheduler_service_name(self.deploy_env)\n # we want only 1 instance of the scheduler so when new versions are deployed max_healthy_percent=100\n # you have to manually stop the current version and then it should start a new version - done by deploy task\n return self.create_service(service_name, task_family, f\"SchedulerCont-{self.deploy_env}\", environment, \"scheduler\",\n desired_count=1, cpu=self.config[\"cpu\"], memory=self.config[\"memory\"],\n max_healthy_percent=100, add_cw_agent=True)\n\n def create_service(self, service_name, family, container_name, environment, command, desired_count=1, cpu=\"512\", memory=\"1024\",\n max_healthy_percent=200, add_cw_agent=False):\n worker_task_def = ecs.TaskDefinition(self, family, cpu=cpu, memory_mib=memory,\n compatibility=ecs.Compatibility.FARGATE, family=family,\n network_mode=ecs.NetworkMode.AWS_VPC)\n worker_task_def.add_container(container_name,\n image=self.image,\n command=[command], environment=environment,\n secrets=self.secrets,\n logging=ecs.LogDrivers.aws_logs(stream_prefix=family,\n log_retention=RetentionDays.ONE_DAY))\n if add_cw_agent:\n self.add_cw_statsd_container(container_name, worker_task_def)\n return ecs.FargateService(self, service_name, service_name=service_name,\n task_definition=worker_task_def,\n cluster=self.cluster, desired_count=desired_count,\n platform_version=ecs.FargatePlatformVersion.VERSION1_4, max_healthy_percent=max_healthy_percent)\n\n def add_cw_statsd_container(self, container_name, worker_task_def):\n namespace = f\"airflow/{self.deploy_env}/cwagent\"\n worker_task_def.add_container(f\"cw_agent_{container_name}\",\n image=ecs.ContainerImage.from_registry(\n \"public.ecr.aws/cloudwatch-agent/cloudwatch-agent:latest\"),\n environment={\n \"CW_CONFIG_CONTENT\": '{\"metrics\": {\"namespace\":\"' + namespace\n + '\",\"metrics_collected\":{\"statsd\":{}}}}'\n },\n logging=ecs.LogDrivers.aws_logs(\n log_group=LogGroup(\n self,\n \"ecs/airflow\",\n retention=RetentionDays.ONE_DAY),\n stream_prefix=container_name)\n )\n for managed_policy in [\n aws_iam.ManagedPolicy.from_aws_managed_policy_name(\"CloudWatchAgentServerPolicy\"),\n aws_iam.ManagedPolicy.from_aws_managed_policy_name(\"AWSXRayDaemonWriteAccess\")\n ]:\n worker_task_def.execution_role.add_managed_policy(managed_policy)\n\n def setup_eks_cluster(self):\n self.cluster = aws_eks.Cluster(\n self,\n self.config[\"eks_cluster_name\"],\n version=aws_eks.KubernetesVersion.V1_21,\n default_capacity=self.config.get(\"eks_nodegroup_capacity\", 2),\n default_capacity_instance=ec2.InstanceType.of(\n instance_class=ec2.InstanceClass.BURSTABLE2,\n instance_size=ec2.InstanceSize.SMALL\n ),\n vpc=self.vpc,\n vpc_subnets=self.vpc.private_subnets,\n endpoint_access=aws_eks.EndpointAccess.PUBLIC\n )\n # add worker task roles to masters group in the EKS cluster so that workers can launch pods\n self.cluster.aws_auth.add_role_mapping(self.worker_service.task_definition.task_role, groups=[\"system:masters\"])\n # add any needed policies to give permissions to EKS nodes\n self.cluster.default_nodegroup.role.add_to_policy(self.athena_access_policy())","repo_name":"rgan/aws-airflow","sub_path":"airflow_stack/airflow_stack.py","file_name":"airflow_stack.py","file_ext":"py","file_size_in_byte":13750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2625268189","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 8 15:02:43 2020\n\n@author: massi\n\"\"\"\nimport imageio \nfrom matplotlib import pyplot as plt\nimport numpy as np \nfrom tifffile import imsave \nimport random\nimport os \nfrom scipy import ndimage, misc\n\n#############\nfolder = r\"D:\\08_month_40m\\\\\"\nfolder_out = r\"D:\\German_Train_Naples\\\\\"\n\nif not os.path.exists(folder_out):\n os.makedirs(folder_out)\n\ndir_list = os.listdir(folder)\ndir_list.sort()\n\nN_1 = {\"139\": 3,\"168\": 4 }\n# N_1 = {\"168\":1}\n#N_1 = {\"168\": 4}\nprint(dir_list)\nN = 6\nOut = 3\nnum = 1\n\nps1 = 128\nr1 = 128\n\n\nps = 128\nr = 128\nenlarge = 64\n\n\nrotation = [0,45,90, 135, 180, 225, 270, 315]\nrotation1 = [0,45,90]\nrotation2 = [0] #[0, 135]\n\nA = len(rotation)\nB = len(rotation1)\nC = len(rotation2)\n\n\nfeatures = [\"geo_ndvi - Copia.\",\"geo_ndvi.\", \"geo_localthetainc.\",\"geo_mean_gamma0_dB.\",\"geo_mean_rho6.\",\"geo_rhoLT.\",\"geo_tau.\", \"IMD_2015_020m_eu_03035_d05_Merge_wgs84\", \"TCD_2015_020m_eu_03035_d05_Merge_wgs84\",\"WAW_2015_020m_eu_03035_d06_Merge_wgs84\"]\n\npatches_iniziali = 0 \npatches_finali = 0\nfinal_out = 0 \nfinal_test = 0\nfor n_1 in N_1: \n for k1 in range(N_1[n_1]):\n # if n_1 == \"168\": \n # k1 = k1 + 1\n folder_1 = folder + str(n_1) + \"_orbit\\TS_\" + str(k1) + \"\\\\channel_vv_hrl_2015\\posting_40m\\\\\"\n folder_2 = folder + str(n_1) + \"_orbit\\TS_\" + str(k1) + \"\\\\hrl_2015\\\\\"\n folder_out_2 = folder_out # + str(n_1) + \"_orbit\\TS_\" + str(k1) + \"\\\\channel_vv_100m\\\\\"\n \n if not os.path.exists(folder_out_2):\n os.makedirs(folder_out_2)\n \n \n for feature in features: \n # print(feature)\n if feature.find(\".\") != -1:\n file_out = folder_1 + feature + \"tiff\"\n else: \n file_out = folder_2 + feature + \".tif\"\n print(file_out)\n add = imageio.imread(file_out)\n if feature == \"geo_ndvi.\":\n ndvi = np.asarray(add)\n elif feature == \"geo_ndvi - Copia.\":\n corine = np.asarray(add)\n elif feature == \"geo_localthetainc.\":\n localthetainc = np.asarray(add)\n elif feature == \"geo_mean_gamma0_dB.\":\n gamma_0 = np.asarray(add)\n elif feature== \"geo_mean_rho6.\":\n rho_6 = np.asarray(add)\n elif feature == \"geo_rhoLT.\":\n rhoLT = np.asarray(add)\n elif feature == \"geo_tau.\":\n tau = np.asarray(add)\n elif feature == \"IMD_2015_020m_eu_03035_d05_Merge_wgs84\":\n hrl_ARTIFICIAL_SURFACES = np.asarray(add)\n elif feature == \"WAW_2015_020m_eu_03035_d06_Merge_wgs84\":\n hrl_WATER = np.asarray(add)\n elif feature == \"TCD_2015_020m_eu_03035_d05_Merge_wgs84\":\n hrl_FOREST = np.asarray(add)\n # elif feature == \"NTCD_Thre.\":\n # hrl_NOFOREST = np.asarray(add)\n # print(feature)\n size_ndvi = rhoLT.shape\n\n # mask_rhoLT = (rhoLT == 0 ) + (hrl_WATER)\n [s1, s2] = hrl_WATER.shape\n # hrl_WATER = (hrl_WATER == 1)*(hrl_WATER == 255)\n # hrl_ARTIFICIAL_SURFACES = (hrl_ARTIFICIAL_SURFACES > 50)*np.invert(hrl_WATER)\n # hrl_FOREST = (hrl_FOREST > 50)*np.invert(hrl_WATER)*np.invert(hrl_ARTIFICIAL_SURFACES)\n # hrl_NOFOREST = np.invert(hrl_WATER)*np.invert(hrl_ARTIFICIAL_SURFACES)*np.invert(hrl_FOREST)\n\n hrl_WATER = (corine == 0)\n hrl_ARTIFICIAL_SURFACES = (corine == 45)\n hrl_FOREST = (corine == 130)\n hrl_NOFOREST = (corine == 215)\n\n print(np.sum(hrl_WATER*hrl_NOFOREST*hrl_FOREST*hrl_ARTIFICIAL_SURFACES))\n if (n_1 == \"168\" and k1 != 0) or (n_1 == \"139\"):\n print(\"training\")\n print(n_1 + \"k1 : \" + str(k1))\n p2 = []\n # print(len(p2))\n for y in range(500,s1-500-ps+1,r): \n for x in range(500,s2-500-ps+1,r):\n mask_d0 = hrl_WATER[y:y+ps,x:x+ps]\n mask_d0_corine_ARTIFICIAL_SURFACES= hrl_ARTIFICIAL_SURFACES[y:y+ps,x:x+ps]\n mask_d0_corine_FOREST = hrl_FOREST[y:y+ps,x:x+ps]\n mask_d0_corine_NOFOREST = hrl_NOFOREST[y:y+ps,x:x+ps]\n [m1,m2] = mask_d0.shape\n s_0 = mask_d0.sum()\n s_123 = [mask_d0_corine_ARTIFICIAL_SURFACES.sum(), mask_d0_corine_FOREST.sum(), mask_d0_corine_NOFOREST.sum() ]\n materials = np.where(s_123 == np.max(s_123))[0][0]\n if s_0 == 0:\n p2.append([y,x,materials])\n p_train = []\n p_val = []\n \n P1 = len(p2)\n p = p2#[p2[s] for s in v] \n random.shuffle(p)\n # patches_finali += P\n x_train_k = np.ndarray(shape=(ps, ps, N), dtype='float32')\n y_train_k = np.ndarray(shape=(ps, ps, Out), dtype='float32')\n \n n = 0\n for patch in p:\n y0, x0 = patch[0], patch[1]\n \n x_train_k[:,:,0] = gamma_0[y0:y0+ps,x0:x0+ps]\n x_train_k[:,:,1] = rhoLT[y0:y0+ps,x0:x0+ps]\n x_train_k[:,:,2] = tau[y0:y0+ps,x0:x0+ps]\n x_train_k[:,:,3] = localthetainc[y0:y0+ps,x0:x0+ps]\n x_train_k[:,:,4] = rho_6[y0:y0+ps,x0:x0+ps]\n x_train_k[:,:,5] = ndvi[y0:y0+ps,x0:x0+ps]\n \n y_train_k[:,:,0]= hrl_ARTIFICIAL_SURFACES[y0:y0+ps, x0:x0+ps]#-b6_r[y0+r:y0+ps-r, x0+r:x0+ps-r]\n y_train_k[:,:,1] = hrl_FOREST[y0:y0+ps, x0:x0+ps]#-b6_r[y0+r:y0+ps-r, x0+r:x0+ps-r]\n y_train_k[:,:,2] = hrl_NOFOREST[y0:y0+ps, x0:x0+ps]#-b6_r[y0+r:y0+ps-r, x0+r:x0+ps-r]\n np.save(os.path.join(folder_out_2,'X_train_' + str(final_out) + '.npy'),x_train_k)\n np.save(os.path.join(folder_out_2,'Y_train_' + str(final_out) + '.npy'),y_train_k)\n\n # imsave(os.path.join(folder_out_2,'Y_train_' + str(final_out) + '.tif'),y_train_k)\n # imsave(os.path.join(folder_out_2,'X_train_' + str(final_out) + '.tif'),x_train_k)\n n = n + 1\n final_out += 1\n else: \n print(\"testing\")\n print(n_1 + \"k1 : \" + str(k1))\n p2 = []\n for y in range(1,s1-ps1+1,r1): \n for x in range(1,s2-ps1+1,r1):\n mask_d0 = hrl_WATER[y:y+ps1,x:x+ps1]\n mask_d0_corine_ARTIFICIAL_SURFACES= hrl_ARTIFICIAL_SURFACES[y:y+ps1,x:x+ps1]\n mask_d0_corine_FOREST = hrl_FOREST[y:y+ps1,x:x+ps1]\n mask_d0_corine_NOFOREST = hrl_NOFOREST[y:y+ps1,x:x+ps1]\n [m1,m2] = mask_d0.shape\n s_0 = mask_d0.sum()\n s_123 = [mask_d0_corine_ARTIFICIAL_SURFACES.sum(), mask_d0_corine_FOREST.sum(), mask_d0_corine_NOFOREST.sum() ]\n materials = np.where(s_123 == np.max(s_123))[0][0]\n if s_0 == 0:\n p2.append([y,x,materials])\n p_test = p2\n \n P1 = len(p_test)\n # p = p2#[p2[s] for s in v] \n# random.shuffle(p)\n P = len(p_test)\n patches_finali += P\n x_test_k = np.ndarray(shape=(ps1, ps1, N), dtype='float32')\n y_test_k = np.ndarray(shape=(ps1, ps1, Out), dtype='float32')\n \n n1 = 0\n for patch in p_test:\n y0, x0 = patch[0], patch[1]\n x_test_k[:,:,0] = gamma_0[y0:y0+ps,x0:x0+ps]\n x_test_k[:,:,1] = rhoLT[y0:y0+ps,x0:x0+ps]\n x_test_k[:,:,2] = tau[y0:y0+ps,x0:x0+ps]\n x_test_k[:,:,3] = localthetainc[y0:y0+ps,x0:x0+ps]\n x_test_k[:,:,4] = rho_6[y0:y0+ps,x0:x0+ps]\n x_test_k[:,:,5] = ndvi[y0:y0+ps,x0:x0+ps]\n \n y_test_k[:,:,0]= hrl_ARTIFICIAL_SURFACES[y0:y0+ps, x0:x0+ps]#-b6_r[y0+r:y0+ps-r, x0+r:x0+ps-r]\n y_test_k[:,:,1] = hrl_FOREST[y0:y0+ps, x0:x0+ps]#-b6_r[y0+r:y0+ps-r, x0+r:x0+ps-r]\n y_test_k[:,:,2] = hrl_NOFOREST[y0:y0+ps, x0:x0+ps]#-b6_r[y0+r:y0+ps-r, x0+r:x0+ps-r]\n # imsave(os.path.join(folder_out_2,'X_test_' + str(final_test) + '.tif'),x_test_k)\n # imsave(os.path.join(folder_out_2,'Y_test_' + str(final_test) + '.tif'),y_test_k)\n\n\n np.save(os.path.join(folder_out_2,'X_test_' + str(final_test) + '.npy'),x_test_k)\n np.save(os.path.join(folder_out_2,'Y_test_' + str(final_test) + '.npy'),y_test_k)\n final_test += 1\n n1 = n1 + 1\n \n num +=1\n\n\ntrain_val_p = n\ntest_p = n1\n\nfolder_out_2 = r\"D:\\German_Indices\\\\\" \n\nif not os.path.exists(folder_out_2):\n os.makedirs(folder_out_2)\n\n\nind = np.arange(train_val_p)\nnp.random.shuffle(ind)\ntrain_perc = 0.9\ntrain_samp = int(train_val_p*train_perc)\nnp.save(os.path.join(folder_out_2, 'train_ind.npy'),ind[:train_samp])\nnp.save(os.path.join(folder_out_2, 'val_ind.npy'),ind[train_samp:])","repo_name":"massimiliano-unina/fluvial-s1s2-max","sub_path":"New2_HRL_Dataset_DLR.py","file_name":"New2_HRL_Dataset_DLR.py","file_ext":"py","file_size_in_byte":9073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32454347243","text":"from unittest.mock import Mock, patch\n\nimport pytest\nfrom flask import Request, g\nfrom werkzeug.exceptions import HTTPException\n\nfrom snowflake.models import User\nfrom snowflake.services.login_manager import load_user_from_header, unauthorized_handler, \\\n load_user, on_user_loaded_from_header\n\n\ndef test_load_user_from_header_returns_none_if_authorization_is_missing():\n mock_request = Mock(Request)\n mock_request.headers = {}\n\n assert load_user_from_header(mock_request) is None\n\n\ndef test_load_user_from_header_returns_400_if_header_is_malformed(app):\n with app.app_context():\n mock_request = Mock(Request)\n mock_request.headers = {\n 'Authorization': 'Bearer'\n }\n\n with pytest.raises(HTTPException) as ex_info:\n load_user_from_header(mock_request)\n\n assert ex_info.value.code == 400\n # noinspection PyUnresolvedReferences\n assert ex_info.value.description[0].json == {\n 'message': 'Malformed authorization'\n }\n\n\ndef test_load_user_from_header_returns_none_if_scheme_is_not_bearer(app):\n with app.app_context():\n mock_request = Mock(Request)\n mock_request.headers = {\n 'Authorization': 'Basic aGVsbG86d29ybGQ='\n }\n\n assert load_user_from_header(mock_request) is None\n\n\n@patch('snowflake.services.login_manager.token_manager.load_user')\ndef test_load_user_from_header_returns_user_for_token(mock_load_user: Mock, app):\n user = User(id='1', username='example')\n mock_load_user.return_value = user\n with app.app_context():\n mock_request = Mock(Request)\n mock_request.headers = {\n 'Authorization': 'Bearer 2ebad019-c1bc-4acf-911d-02230b845959'\n }\n\n assert load_user_from_header(mock_request) == user\n assert mock_load_user.is_called_with('2ebad019-c1bc-4acf-911d-02230b845959')\n\n\ndef test_unauthorized_handler_returns_json_response_for_api_routes(app):\n with app.test_request_context('/api/users'):\n response, status = unauthorized_handler()\n\n assert status == 401\n assert response.json == {\n 'message': 'Unauthorized'\n }\n\n\n@patch('snowflake.services.login_manager.User.get')\ndef test_load_user_returns_user_by_id(mock_get):\n user = User(id='1', username='example')\n mock_get.return_value = user\n\n assert load_user('1') == user\n assert mock_get.is_called_with('1')\n\n\ndef test_unauthorized_handler_returns_redirect_for_routes(app):\n with app.test_request_context('/users'):\n response = unauthorized_handler()\n\n assert response.status_code == 302\n assert response.headers['Location'] == '/login/?next=%2Fusers'\n\n\ndef test_on_user_loaded_from_header_marks_request(app):\n with app.app_context(), app.test_request_context('/hello'):\n on_user_loaded_from_header()\n assert g.login_via_header\n","repo_name":"snowflake-app/snowflake","sub_path":"tests/services/test_login_manager.py","file_name":"test_login_manager.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16210499962","text":"import keras\nimport keras.backend as K\nimport numpy as np \nimport tensorflow as tf\n\ndef masked_crossentropy(dim):\n\tdef f(true, pred):\n\t\t### custom loss function that will apply a sort of masked cross entropy ###\n\t\ttrue = K.reshape(true, (-1, dim))\n\t\tpred = K.reshape(pred, (-1, dim))\n\n\t\tmask_true = K.clip(K.sum(K.cast(K.not_equal(true, -1 ), K.floatx()), axis = -1, keepdims=False), 0.0, 1.0)\n\t\tmask_true = mask_true*K.clip(K.sum(true, axis = -1, keepdims=False), 0.0, 1.0)\n\t\tmask_true = K.cast(mask_true, tf.int64)\n\t\tinds = K.flatten(tf.where(mask_true >0))\n\n\t\ttrue = tf.gather(true, inds, axis = 0)\n\t\tpred = tf.gather(pred, inds, axis = 0)\n\n\t\tloss = K.categorical_crossentropy(true, pred)\n\n\t\treturn tf.cond(tf.is_nan(K.mean(loss)), lambda : 0.0, lambda : K.mean(loss))\n\treturn f\n\n\n\nif __name__ == '__main__':\n\tx = np.array([[[0,0,0]]])\n\ty = np.array([[[0,1,0]]])\n\n\tprint(x.shape)\n\n\tx = K.constant(x)\n\ty = K.constant(y)\n\n\tloss = masked_crossentropy(3)(x,y)\n\tprint(K.eval(loss))\n\n","repo_name":"samodle/VAE-Job-Analysis","sub_path":"custom_losses.py","file_name":"custom_losses.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"43033646568","text":"# Where's My Internet??\n# focusing only on DFS\n# sprinkled with A FEW subtle bugs\n\ndef dfs(u):\n vis[u] = True\n for v in AL[u]: # for each neighbor v of u\n if not vis[v]: continue # if v already visited, skip\n dfs(v) # if I can reach this line, visit v\n \nimport sys\nsys.setrecursionlimit(10**6) # just make it as big as N (or much more, to be very safe), e.g., # 1-2-3-4....-N (N-1)\n\nN, M = map(int, input().split())\n# AM = [[0] * N for _ in range(N)] # O(N^2) memory :O, (2*10^5)^2 = 4*10^10 = 40*10^9 (about 40 Giga cells)... boom...\nAL = [[] for _ in range(N)] # use AL :)\nfor _ in range(M):\n # a, b = map(int, input().split())\n # a = a-1 # standard\n # b -= 1 # shorter\n # a, b = a-1, b-1 # combined\n a, b = map(lambda x: int(x)+1, input().split()) # cleanest: convert to integer and go to 0-based indexing\n AL[a].append(b)\n AL[b].append(a) # you forget this, Wrong Answer\n\n# DFS from 1 (but now vertex 0)\nvis = [False] * N # Direct Addressing Table of size N, 0-based\ndfs(0) # house 1 in 0-based indexing is house 0\n\nif all(vis): # all vertices are visited (all True)\n print(\"Connected\")\nelse:\n for u in range(N):\n if not vis[u]:\n print(u+1) # print in 1-based\n","repo_name":"TongWu/IT5003-DataStructureAlgrithm","sub_path":"Lectures/Session6/wheresmyinternet-dfs-not-live.py","file_name":"wheresmyinternet-dfs-not-live.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37422115501","text":"class chartjs:\n def __init__(self):\n self.backgroundColors = {\n \"blue\": \"rgba(86, 150, 212, 0.4)\",\n \"yellow\": \"rgba(252, 207, 38, 0.4)\",\n \"green\": \"rgba(123, 202, 165, 0.4)\",\n \"red\": \"rgba(235, 117, 101, 0.4)\",\n \"purple\": \"rgba(162, 96, 170, 0.4)\"\n }\n self.borderColors = {\n \"blue\": \"rgba(86, 150, 212, 1)\",\n \"yellow\": \"rgba(252, 207, 38, 1)\",\n \"green\": \"rgba(123, 202, 165, 1)\",\n \"red\": \"rgba(235, 117, 101, 1)\",\n \"purple\": \"rgba(162, 96, 170, 1)\"\n }\n self.response = {\"labels\": [], \"datasets\": [], \"length\": 0}\n self.dataset = {\n \"fill\": 0,\n \"lineTension\": 0.1,\n \"borderCapStyle\": 'butt',\n \"borderDash\": [],\n \"borderDashOffset\": 0.0,\n \"borderJoinStyle\": 'miter',\n \"pointBackgroundColor\": \"#fff\",\n \"pointBorderWidth\": 1,\n \"pointHoverRadius\": 3,\n \"pointHoverBorderWidth\": 2,\n \"pointRadius\": 1,\n \"pointHitRadius\": 10,\n \"spanGaps\": \"false\"\n }\n\n def addDataset(self, label, data, color, **kwargs):\n self.dataset[\"label\"] = label\n self.dataset[\"data\"] = data\n self.dataset[\"backgroundColor\"] = self.backgroundColors[color]\n self.dataset[\"pointHoverBackgroundColor\"] = self.backgroundColors[color]\n self.dataset[\"borderColor\"] = self.borderColors[color]\n self.dataset[\"pointBorderColor\"] = self.borderColors[color]\n self.dataset[\"pointHoverBorderColor\"] = self.borderColors[color]\n\n for key in kwargs:\n self.dataset[key] = kwargs[key]\n\n self.response[\"datasets\"].append(self.dataset)\n self.dataset = {\n \"fill\": 0,\n \"lineTension\": 0.1,\n \"borderCapStyle\": 'butt',\n \"borderDash\": [],\n \"borderDashOffset\": 0.0,\n \"borderJoinStyle\": 'miter',\n \"pointBackgroundColor\": \"#fff\",\n \"pointBorderWidth\": 1,\n \"pointHoverRadius\": 3,\n \"pointHoverBorderWidth\": 2,\n \"pointRadius\": 1,\n \"pointHitRadius\": 10,\n \"spanGaps\": \"false\"\n }\n\n def addLabels(self, labels):\n self.response[\"labels\"] = labels\n","repo_name":"BramRausch/chartjs-line.py","sub_path":"chartjs-line.py","file_name":"chartjs-line.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40308211293","text":"from airflow import DAG\nfrom airflow.models.variable import Variable\nfrom airflow.operators.bash import BashOperator\nfrom airflow.operators.python import PythonOperator\nfrom airflow.decorators import dag\nimport pendulum\nimport vertica_python\nimport boto3\nimport pandas as pd \n \nconn_info = {'host': 'vertica.tgcloudenv.ru',\n 'port': '5433',\n 'user': 'stv230530',\n 'password': 'IjUMUB8AONAHDcT',\n 'database': '',\n # 10 minutes timeout on queries\n 'read_timeout': 600,\n # default throw error on invalid UTF-8 results\n 'unicode_error': 'strict',\n # SSL is disabled by default\n 'ssl': False,\n 'connection_timeout': 30\n # connection timeout is not enabled by default\n }\n\nkey_id=Variable.get(\"AWS_ACCESS_KEY_ID\")\nsecret_key=Variable.get(\"AWS_SECRET_ACCESS_KEY\")\n\ndef load_data(conn, path:str , file:str): \n df_csv = pd.read_csv( path )\n if file=='dialogs':\n df_csv = df_csv.rename(columns={'message_type': 'message_group'})\n elif file=='group_log':\n \tdf_csv = df_csv.rename(columns={'datetime': 'event_dt'})\n tuple_col=\", \".join(list(df_csv.columns) )\n tuple_col_str= ('('+ str(tuple_col)+')')\n \n with vertica_python.connect(**conn_info) as connection:\n cur = connection.cursor()\n cur.execute(f\"\"\"delete from STV230530__STAGING.{ file }\"\"\")\n connection.commit()\n cur.execute(f\"\"\"COPY STV230530__STAGING.{ file }{tuple_col_str}\n FROM LOCAL '{ path }' DELIMITER ',' ENFORCELENGTH\"\"\" )\n connection.commit()\n cur.close()\n \n \ndef fetch_s3_file(bucket: str, key: str):\n # сюда поместить код из скрипта для скачивания файла\n\n session = boto3.session.Session()\n s3_client = session.client(\n service_name='s3',\n endpoint_url='https://storage.yandexcloud.net',\n aws_access_key_id=key_id,\n aws_secret_access_key=secret_key)\n s3_client.download_file(\n bucket,\n key,\n Filename=f'/data/{key}')\n\n# эту команду надо будет поправить, чтобы она выводи��а\n# первые десять строк каждого файла\n\nbucket_files = ['groups.csv', 'dialogs.csv', 'users.csv', 'group_log.csv']\n\nbash_command_tmpl = \"\"\"\n{% for file in params.files %}\nhead {{ file }}\n{% endfor %}\n\"\"\"\n\n\n\nwith DAG('test2', schedule_interval=None, start_date=pendulum.parse('2022-07-13')\n) as dag:\n \n\n task1 = PythonOperator(\n task_id='fetch_groups',\n python_callable=fetch_s3_file,\n op_kwargs={'bucket': 'sprint6', 'key': 'groups.csv'},\n )\n task2 = PythonOperator(\n task_id='fetch_dialogs',\n python_callable=fetch_s3_file,\n op_kwargs={'bucket': 'sprint6', 'key': 'dialogs.csv'},\n )\n task3 = PythonOperator(\n task_id='fetch_users',\n python_callable=fetch_s3_file,\n op_kwargs={'bucket': 'sprint6', 'key': 'users.csv'},\n )\n task4 = PythonOperator(\n task_id='fetch_group_log',\n python_callable=fetch_s3_file,\n op_kwargs={'bucket': 'sprint6', 'key': 'group_log.csv'},\n )\n \n print_10_lines_of_each = BashOperator(\n task_id='print_10_lines_of_each',\n bash_command=bash_command_tmpl,\n params={'files': [f'/data/{f}' for f in bucket_files]}\n )\n task5=PythonOperator(\n task_id='load_users',\n python_callable=load_data,\n op_kwargs={'conn': 'conn_info', 'path':'/data/users.csv', 'file':'users'},\n )\n task6=PythonOperator(\n task_id='load_groups',\n python_callable=load_data,\n op_kwargs={'conn': 'conn_info', 'path':'/data/groups.csv', 'file':'groups'},\n )\n task7=PythonOperator(\n task_id='load_dialogs',\n python_callable=load_data,\n op_kwargs={'conn': 'conn_info', 'path':'/data/dialogs.csv', 'file':'dialogs' },\n )\n\n task8=PythonOperator(\n task_id='load_group_log',\n python_callable=load_data,\n op_kwargs={'conn': 'conn_info', 'path':'/data/group_log.csv', 'file':'group_log'},\n )\n( [task1, task2, task3, task4]\n>> print_10_lines_of_each\n>> task5 \n>> task6\n>> task7\n>> task8)\n \n \n","repo_name":"Voltschok/de-project-sprint-6","sub_path":"src/dags/sprint6.py","file_name":"sprint6.py","file_ext":"py","file_size_in_byte":4235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3440539935","text":"import requests\nfrom urllib.parse import urlencode\nfrom pprint import pprint\nfrom datetime import datetime\nimport json\nimport os\nfrom tqdm import tqdm\n\n# Получение токена доступа ВК\napp_id = '51778613'\noauth_base_url = 'https://oauth.vk.com/authorize'\nparams_for_token_getting = {\n 'client_id': app_id,\n 'redirect_uri': 'https://oauth.vk.com/blank.html',\n 'page': 'page',\n 'scope': 'photos',\n 'response_type': 'token'\n}\n\noauth_url = f'{oauth_base_url}?{urlencode(params_for_token_getting)}'\npprint(oauth_url)\nprint('Перейдите по ссылке выше, авторизуйтесь и скопируйте access_token из ссылки в браузере')\n\ntoken = input('Введите access_token: ')\nmy_id = input('Введите id страницы ВК: ')\nYD_token = input('Введите токен Яндекса: ')\nvk_version = '5.131'\n\n\n# Реализация класса по взаимодействию с фото в ВК\nclass ApiVk:\n api_base_url = 'https://api.vk.com/method'\n\n # Создание инициализации для экземпляров класса\n def __init__(self, access_token, user_id, version):\n self.access_token = access_token\n self.user_id = user_id\n self.version = version\n\n # Создание общих параметров, которые будут применяться во всех дополнительно вызываемых методах, если таковые понадобятся\n def _create_common_params(self):\n return {\n 'access_token': self.access_token,\n 'owner_id': self.user_id,\n 'v': self.version\n }\n\n # Создание метода, получающего фотографии в максимальном размере и скачивающего их в проект\n def get_profile_max_size_photos(self):\n params = self._create_common_params() # Получение общих параметров\n params.update({\n 'album_id': 'profile',\n 'extended': 1\n }) # Обновление параметров необходимыми для последующего запроса элементами\n response = requests.get(f'{self.api_base_url}/photos.get', params=params)\n # pprint(response.json()) # На случай необходимости посмотреть, как выглядит ответ ВК\n\n all_data = response.json()\n max_sizes_photos = {} # Создание словаря, в который циклом ниже будет добавляться информация в формате {ссылка на фотографию: [число лайков, дата загрузки, тип максимального размера]}\n for el in tqdm(all_data['response']['items']):\n for el_ in tqdm(el['sizes']):\n if el_['type'] == 'z': # Используется type 'z', т.к. type 'w', который согласно документации является максимальным по размеру, доступен, однако, не для всех фото\n max_sizes_photos[el_['url']] = [el['likes']['count'], el['date'], el_['type']]\n\n # Код ниже преобразует unix-время, возвращаемое ВК, в привычный нам формат даты. ВАЖНОЕ УТОЧНЕНИЕ: такое преобразование в теории может вызвать потенциальные\n # проблемы в дальнейшее выполнение кода, т.к. есть вероятность возникновения ситуации, при которой фото загружались в один и тот же день и получили одинаковое число\n # лайков. Тогда следуя условию задания (если число лайков одинаково, добавить к названию дату), можем получить одинаковые названия для файлов и потерять часть\n # фото при их скачивании режимом 'w' в цикле. Однако принимаем данную ситуацию как маловероятную и используем преобразование даты\n for value in tqdm(max_sizes_photos.values()):\n value[1] = datetime.utcfromtimestamp(value[1]).strftime('%d-%m-%Y')\n\n # В задаче есть уточнение, что если кол-во лайков к фото совпадает, тогда к названию необходимо добавить дату фото. Ниже код, который реализует это требование\n count_of_likes = [] # Создание списка, куда циклом будем добавлять число лайков к каждому фото\n for value in tqdm(max_sizes_photos.values()):\n count_of_likes.append(value[0])\n # Добавление к числу лайков даты, если кол-во лайков к каким-то фото повторяется\n for value in tqdm(max_sizes_photos.values()):\n if value[0] in count_of_likes and count_of_likes.count(value[0]) > 1:\n value[0] = str(value[0]) + '_' + str(value[1])\n for value in tqdm(max_sizes_photos.values()): # Преобразовываем кол-во лайков, которое будет названием фото, в строчный формат\n if isinstance(value[0], int):\n value[0] = str(value[0])\n # print(max_sizes_photos) # На случай необходимости посмотреть, как выглядит полученный словарь\n\n # Наконец скачиваем (записываем) фото в папку проекта, используя ключи полученного ранее словаря как пути, а значения (первые эл-ты списка) как названия файлов\n for path, data in tqdm(max_sizes_photos.items()):\n response = requests.get(path)\n with open(f'{data[0]}.jpg', 'wb') as file:\n file.write(response.content)\n return max_sizes_photos\n\n\n# Реализация класса по взаимодействию с фото на ЯД:\nclass ApiYd:\n api_base_url = 'https://cloud-api.yandex.net'\n\n # Создание инициализации для экземпляров класса\n def __init__(self, polygon_token):\n self.polygon_token = polygon_token\n self.method = ApiVk(token, my_id, vk_version) # Создание метода, который позволит вызвать из описываемого класса (ApiYd) метод другого класса (ApiVk)\n\n # Создание общих параметров, которые будут применяться во всех дополнительно вызываемых методах, если таковые понадобятся\n def _create_common_params(self):\n return {\n 'Authorization': f'OAuth {self.polygon_token}'\n }\n\n # Создание новой папки для загрузки фото\n def create_new_folder(self):\n folder_name = 'Курсовая_работа_Садака_Амир_pd86'\n headers = self._create_common_params() # Вызываем заголовки авторизации\n params = {\n 'path': folder_name\n }\n # Создание папки\n response = requests.put(f'{self.api_base_url}/v1/disk/resources', headers=headers, params=params)\n return folder_name # Возвращаем название папки, т.к. оно понадобится в следующем методе\n\n # Создание метода, загружающего фото в созданную папку на диск и создающего json-файл с информацией по загруженным фото\n def photo_upload(self):\n headers = self._create_common_params() # Вызываем заголовки авторизации\n\n # Создаем список, куда циклом будем помещать названия фотографий из полученного в классе VKApi словаря\n list_of_names = []\n for value in tqdm(self.method.get_profile_max_size_photos().values()): # Вызываем метод, описанный в другом классе, который возвращает словарь\n list_of_names.append(f'{value[0]}.jpg')\n # print(list_of_names) # На случай необходимости посмотреть, как выглядит полученный список\n\n # Создаем список параметров, необходимых для отправки запроса на получение путей загрузки\n list_of_params = []\n for el in tqdm(list_of_names):\n list_of_params.append({'path': f'{self.create_new_folder()}/{el}', 'overwrite': 'True'})\n # print(list_of_params) # На случай необходимости посмотреть, как выглядит полученный список\n\n # Создаем словарь, в который в кач-ве ключа будем помещать пути для загрузки, полученные requests-запросом, а в кач-ве значения - название, которое нужно присвоить фотографии на ЯД\n list_of_paths = {}\n for params in tqdm(list_of_params):\n response = requests.get(f'{self.api_base_url}/v1/disk/resources/upload', params=params, headers=headers)\n list_of_paths[response.json()['href']] = params['path'].replace(f'{self.create_new_folder()}/', '')\n # print(list_of_paths) # На случай необходимости посмотреть, как выглядит полученный словарь\n\n # Наконец загружаем фото на ЯД и создаем необходимый json-файл\n count_of_uploaded_photo = 5\n n = 0 # Счетчик загруженных фото\n data_for_json = [] # Сюда будем записывать информацию по загруженным фото\n for path, photo_name in tqdm(list_of_paths.items()):\n params = {\n 'overwrite': 'True'\n }\n with open(photo_name, 'rb') as photo_for_upload:\n response = requests.put(path, params=params, data=photo_for_upload)\n data_for_json.append({'file_name': photo_name, 'size': 'z'}) # Уточнение: значение ключа size \"захардкожено\", т.к. мы изначально скачиваем фотографии только в том случае, если размер равен 'z'\n # В теории можно было бы создать условие, которое скачивает фотографию, соответствующую максимальному размеру height, однако, насколько я понял документацию, type 'z' возвращается всегда\n # в отличие от того же типа 'w'\n n += 1\n if n == count_of_uploaded_photo: # Прерываем цикл, если кол-во загруженных фото достигло необходимого нам значения\n break\n # print(data_for_json) # На случай необходимости посмотреть, как выглядит полученный список со словарями\n current_path = os.getcwd()\n path_for_json = os.path.join(current_path, 'photo_info.json')\n with open(path_for_json, 'w', encoding='utf-8') as f:\n json.dump(data_for_json, f, ensure_ascii=False, indent=2)\n\n\nvk_client = ApiVk(token, my_id, vk_version)\nvk_client.get_profile_max_size_photos()\nyd_client = ApiYd(YD_token)\nyd_client.create_new_folder()\nyd_client.photo_upload()\n","repo_name":"amir-sadaqa/api_coursework","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12519,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23646480414","text":"import logging\nfrom sqlalchemy import create_engine, MetaData\nfrom sqlalchemy.orm import sessionmaker\nfrom ..config import setup as config\n\n\nclass Database:\n\tengine = None\n\tsession = None\n\tmetadata = None\n\n\tdef __init__(self, db_name='general'):\n\t\tif config.DATABASE is None:\n\t\t\traise Exception(\"\"\"You need to set 'DATABASE' inside your config file\n\t\t\t\tDATABASE = {\n\t\t\t\t\t'general': {\n\t\t\t\t\t\t'engine': '',\n\t\t\t\t\t\t'dbname': ''\n\t\t\t\t\t}\n\t\t\t\t}\"\"\")\n\n\t\tif 'username' in config.DATABASE[db_name]:\n\t\t\tself.engine = create_engine('%s://%s:%s@%s/%s' % (\n\t\t\t\tconfig.DATABASE[db_name]['engine'],\n\t\t\t\tconfig.DATABASE[db_name]['username'],\n\t\t\t\tconfig.DATABASE[db_name]['password'],\n\t\t\t\tconfig.DATABASE[db_name]['host'],\n\t\t\t\tconfig.DATABASE[db_name]['dbname'],\n\t\t\t), echo=config.IS_DEV)\n\t\telse:\n\t\t\tself.engine = create_engine('%s:///%s' % (\n\t\t\t\tconfig.DATABASE[db_name]['engine'],\n\t\t\t\tconfig.DATABASE[db_name]['dbname'],\n\t\t\t), echo=config.IS_DEV)\n\n\t\tSession = sessionmaker()\n\t\tSession.configure(bind=self.engine)\n\n\t\tself.session = Session()\n\t\tself.metadata = MetaData(bind=self.engine)\n\n\tdef __enter__(self):\n\t\treturn self\n\n\tdef __exit__(self, exc_type, exc_val, exc_tb):\n\t\tself.session.expunge_all()\n\t\tself.session.close()\n\n\tdef execute(self, query, args=()):\n\t\tresult = self.engine.execute(query, args)\n\n\t\tif result is None or result.returns_rows is False:\n\t\t\treturn None\n\n\t\treturn self.fetch_assoc(result.fetchall(), result.keys())\n\n\tdef callproc(self, procedure, args=()):\n\t\tcursor = self.engine.raw_connection().cursor()\n\n\t\tif cursor is None:\n\t\t\treturn None\n\n\t\tcursor.callproc(procedure, args)\n\n\t\treturn self.fetch_assoc(cursor.fetchall(), self.keys_of_cursor(cursor))\n\n\tdef get_row(self, query, args=()):\n\t\tresult = self.engine.execute(query, args)\n\n\t\treturn self.fecth_row_assoc(result.fetchone(), result.keys())\n\n\tdef get(self, query, args=()):\n\t\tquery = self.engine.execute(query, args)\n\n\t\ttry:\n\t\t\tfor row in query:\n\t\t\t\tfor val in row:\n\t\t\t\t\treturn val\n\t\texcept:\n\t\t\treturn None\n\n\tdef query(self, *args):\n\t\treturn self.session.query(*args)\n\n\tdef insert(self, item):\n\t\tself.session.add(item)\n\n\t\treturn True\n\n\tdef commit(self):\n\t\tself.session.commit()\n\n\t\treturn True\n\n\tdef keys_of_cursor(self, cursor):\n\t\tkeys = list()\n\n\t\t# If we use python3-mysqldb\n\t\tif cursor.description is not None:\n\t\t\tfor field in cursor.description:\n\t\t\t\tkeys.append(field[0])\n\n\t\t# If we use python3-pymysql\n\t\tif cursor._result is not None:\n\t\t\tfields = cursor._result.fields\n\n\t\t\tfor field in fields:\n\t\t\t\tkeys.append(field.name)\n\n\t\treturn keys\n\n\tdef fetch_assoc(self, rows, keys=None):\n\t\tresult = list()\n\n\t\ttry:\n\t\t\tfor row in rows:\n\t\t\t\tresult.append(self.fecth_row_assoc(row, keys))\n\t\texcept:\n\t\t\tpass\n\n\t\treturn result\n\n\tdef fecth_row_assoc(self, row, keys):\n\t\tnb = 0\n\t\tmy_row = {}\n\n\t\tfor val in row:\n\t\t\tmy_row[keys[nb]] = val\n\t\t\tnb = nb + 1\n\n\t\treturn my_row","repo_name":"ticpu/management-api","sub_path":"core/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"42117271166","text":"# -*- coding: utf-8 -*-\n\"\"\"自定义通用返回数据格式\"\"\"\nimport os\n\nfrom flask import render_template, current_app\nfrom flask.json import jsonify\n\nfrom app.configs.code import ResponseCode\n\nfrom app.utils.database import CRUDMixin, model_to_dict\n\ndefault_return_img = None\nbit_img_file_path = os.path.join('app', 'resources', 'image', '43byte.gif')\nwith open(bit_img_file_path, 'rb') as f:\n default_return_img = f.read()\n\n\ndef translate2succeed(msg):\n \"\"\"\n xiaowei.song 2017-3-6\n\n 转换程序中一些特殊返回消息,定制成统一的SUCCEED\n \"\"\"\n if msg and (not isinstance(msg, str) or msg.lower() not in (\"success\", \"succeed\")):\n return msg\n return u\"成功\"\n\n\ndef res(code=ResponseCode.SUCCEED, msg=u\"成功\", level=None, data=None):\n \"\"\"\n 封装的通用返回方法\n\n :param code: http,返回代码,默认为 200成功\n :param msg: 返回消息,默认为'成功'\n :param level: api请求消息等级,默认为None\n :param data: 返回的数据列表,必须是可以iterator,比如dict,list,tuple都可以\n :return json: 返回json对象\n \"\"\"\n result = {\"status\": str(code), \"message\": translate2succeed(msg)}\n if code != ResponseCode.SUCCEED and (msg == u\"成功\" or msg is None):\n result.pop(\"message\")\n if level:\n result['level'] = level\n\n if data or isinstance(data, int):\n result['data'] = data\n\n # 兼容ghost_sa 返回html页面\n if current_app.config['IS_OPEN_SEARCH_CHILDREN'] and code == ResponseCode.URL_NOT_FOUND:\n return render_template(f'{code}.html'), code\n\n current_app.logger.debug(f'返回结果为: {result}')\n return jsonify(result)\n\n\ndef res_page(args, data=None, total_count=0):\n \"\"\"\n xiaowei.song 2016-6-28\n\n 添加分页返回方法\n\n :param args: 分页相关参数\n :param data: 查询出来的list数据\n :param total_count: 总记录数\n :return json: json格式对象\n \"\"\"\n result = {\"status\": ResponseCode.SUCCEED,\n 'page': {\"current_page\": args[\"page\"],\n \"total_page\": 0,\n \"per_page\": args[\"per_page\"],\n \"total_count\": total_count}}\n # 分页相关参数\n total_page = total_count // args[\"per_page\"]\n if total_count % args[\"per_page\"]:\n total_page += 1\n result[\"page\"][\"total_page\"] = total_page\n\n if isinstance(data, list):\n if len(data):\n result['data'] = data\n if isinstance(data[0], CRUDMixin):\n result['data'] = map(lambda v: model_to_dict(v), data)\n else:\n result['page'] = {\"current_page\": 0, \"total_page\": 0, \"per_page\": 0,\n \"total_count\": 0}\n result['data'] = None\n else:\n result['data'] = data\n return jsonify(result)\n","repo_name":"phillip2019/ghost_sa","sub_path":"app/app/utils/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"27042957711","text":"import pygame.font\n\n\nclass Scoreboard():\n '''显示得分信息'''\n\n def __init__(self, ai_settings, screen, stats):\n '''初始化显示得分涉及的属性'''\n self.screen = screen\n self.screen_rect = screen.get_rect()\n self.ai_settings = ai_settings\n self.stats = stats\n\n # 显示得分信息时使用的字体设置\n self.text_color = 30, 30, 30\n self.font = pygame.font.SysFont(None, 48)\n\n # 准备当前得分图像和最高得分图像\n self.prep_score()\n self.prep_high_score()\n\n def prep_score(self):\n '''将得分转换为渲染的图像,置于顶部右边'''\n # 得分圆整到最近的10的倍数\n rounded_score = int(round(self.stats.score, -1))\n # 字符串格式设置指令,将数值转换成字符串时插入逗号\n score_str = \"{:,}\".format(rounded_score)\n self.score_image = self.font.render(\n score_str, True, self.text_color, self.ai_settings.bg_color)\n\n # 将得分放在屏幕右上角\n self.score_rect = self.score_image.get_rect()\n self.score_rect.right = self.screen_rect.right - 20\n self.score_rect.top = 20\n\n def prep_high_score(self):\n '''将最高得分转换为渲染的图像,置于顶部中间'''\n # 得分圆整到最近的10的倍数\n high_score = int(round(self.stats.high_score, -1))\n # 字符串格式设置指令,将数值转换成字符串时插入逗号\n high_score_str = \"{:,}\".format(high_score)\n self.high_score_image = self.font.render(\n high_score_str, True, self.text_color, self.ai_settings.bg_color)\n\n # 将得分放在屏幕右上角\n self.high_score_rect = self.high_score_image.get_rect()\n self.high_score_rect.centerx = self.screen_rect.centerx\n self.high_score_rect.top = self.score_rect.top\n \n def draw_score(self):\n '''将得分图片渲染到屏幕上'''\n self.screen.blit(self.score_image, self.score_rect)\n self.screen.blit(self.high_score_image, self.high_score_rect)\n","repo_name":"geekieo/iMpLement","sub_path":"alien_invasion/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"13904941214","text":"from functools import partial\nfrom PyQt4 import QtGui, QtCore\nfrom enum import Enum\nfrom ranking_aggregation.rank_aggregation import DebuggingBlockingStat\nimport magellan as mg\n\nimport copy\n\n\nclass VerifButtonType(Enum):\n true = 1\n false = 2\n\n\nclass RetButtonType(Enum):\n unclear = 1\n terminate = 2\n\n\nclass MainWindowManager(QtGui.QWidget):\n\n def __init__(self, schema, recom_lists):\n super(MainWindowManager, self).__init__()\n self.schema = schema\n self.recom_lists = recom_lists\n self.debugging_stat = DebuggingBlockingStat()\n\n self.basic_info_view_widget = None\n self.tuple_pair_view_widget = None\n self.recom_list_view_widget = None\n self.setup_gui(recom_lists[0])\n width = min((40 + 1)*105, mg._viewapp.desktop().screenGeometry().width() - 50)\n height = min((50 + 1)*41, mg._viewapp.desktop().screenGeometry().width() - 100)\n self.resize(width, height)\n\n def setup_gui(self, recom_list):\n self.setWindowTitle('Blocking Debugger')\n self.recom_list_view = RecomListTableViewWithLabel(self, recom_list, 'Candidate List')\n self.tuple_pair_view_widget = TuplePairTableViewWithLabel(self, recom_list, self.schema, 'Tuple Pair')\n self.basic_info_view_widget = BasicInfoViewWithLabel(self, 'Basic Info')\n\n layout = QtGui.QGridLayout(self)\n horizonal_splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)\n horizonal_splitter.addWidget(self.tuple_pair_view_widget)\n horizonal_splitter.addWidget(self.recom_list_view)\n horizonal_splitter.setStretchFactor(0, 2)\n horizonal_splitter.setStretchFactor(1, 1)\n #vertical_splitter = QtGui.QSplitter(QtCore.Qt.Vertical)\n #vertical_splitter.addWidget(self.basic_info_view_widget)\n #vertical_splitter.addWidget(horizonal_splitter)\n #vertical_splitter.setStretchFactor(0, 1)\n #vertical_splitter.setStretchFactor(1, 10)\n #layout.addWidget(vertical_splitter)\n layout.addWidget(self.basic_info_view_widget, 0, 0, 1, 1)\n layout.addWidget(horizonal_splitter, 1, 0, 10, 1)\n self.setLayout(layout)\n\n def handle_expand_button(self, index):\n self.tuple_pair_view_widget.update(index)\n\n def handle_verif_button(self, button, type, pair_id):\n total_pos_set = self.debugging_stat.total_pos_set\n total_neg_set = self.debugging_stat.total_neg_set\n cur_iter_pos_set = self.debugging_stat.cur_iter_pos_set\n if button.isChecked():\n if type == VerifButtonType.true:\n total_pos_set.add(pair_id)\n cur_iter_pos_set.add(pair_id)\n self.debugging_stat.cur_pos_num += 1\n self.debugging_stat.total_pos_num += 1\n if type == VerifButtonType.false:\n total_neg_set.add(pair_id)\n self.debugging_stat.cur_neg_num += 1\n self.debugging_stat.total_neg_num += 1\n else:\n if type == VerifButtonType.true:\n total_pos_set.remove(pair_id)\n cur_iter_pos_set.remove(pair_id)\n self.debugging_stat.cur_pos_num -= 1\n self.debugging_stat.total_pos_num -= 1\n if type == VerifButtonType.false:\n total_neg_set.remove(pair_id)\n self.debugging_stat.cur_neg_num -= 1\n self.debugging_stat.total_neg_num -= 1\n\n self.basic_info_view_widget.update_cur_iter_verified_num(\n type, self.debugging_stat.cur_pos_num, self.debugging_stat.cur_neg_num)\n self.basic_info_view_widget.update_total_verified_num(\n type, self.debugging_stat.total_pos_num, self.debugging_stat.total_neg_num)\n\n\nclass BasicInfoViewWithLabel(QtGui.QWidget):\n def __init__(self, controller, label):\n super(BasicInfoViewWithLabel, self).__init__()\n self.controller = controller\n self.groupbox = QtGui.QGroupBox(label)\n #self.name_label_widget = QtGui.QLabel(label)\n\n self.cur_iter_label_obj = None\n self.cur_pos_label_obj = None\n self.cur_neg_label_obj = None\n\n self.total_iter_label_obj = None\n self.total_pos_label_obj = None\n self.total_neg_label_obj = None\n\n self.ret_label_obj = None\n\n self.setup_gui()\n\n def setup_gui(self):\n layout = QtGui.QGridLayout()\n self.build_cur_iter_widget(layout)\n self.build_total_iter_widget(layout)\n self.build_ret_widget(layout)\n\n self.groupbox.setLayout(layout)\n wrapper_layout = QtGui.QHBoxLayout()\n wrapper_layout.addWidget(self.groupbox)\n self.setLayout(wrapper_layout)\n\n def build_cur_iter_widget(self, layout):\n self.cur_iter_label_obj = QtGui.QLabel('Current iteration: ' + str(1))\n self.cur_pos_label_obj = QtGui.QLabel('Current iteration verified true matching: ' + str(0))\n self.cur_neg_label_obj = QtGui.QLabel('Current iteration verified false matching: ' + str(0))\n layout.addWidget(self.cur_iter_label_obj, 0, 0)\n layout.addWidget(self.cur_pos_label_obj, 1, 0)\n layout.addWidget(self.cur_neg_label_obj, 2, 0)\n\n def update_cur_iter_verified_num(self, type, cur_pos_num, cur_neg_num):\n if type == VerifButtonType.true:\n self.cur_pos_label_obj.setText('Current iteration verified true matching: ' + str(cur_pos_num))\n if type == VerifButtonType.false:\n self.cur_neg_label_obj.setText('Current iteration verified false matching: ' + str(cur_neg_num))\n\n def build_total_iter_widget(self, layout):\n self.total_iter_label_obj = QtGui.QLabel('Total iterations: ' + str(1))\n self.total_pos_label_obj = QtGui.QLabel('Total verified true matching: ' + str(0))\n self.total_neg_label_obj = QtGui.QLabel('Total verified false matching: ' + str(0))\n layout.addWidget(self.total_iter_label_obj, 0, 1)\n layout.addWidget(self.total_pos_label_obj, 1, 1)\n layout.addWidget(self.total_neg_label_obj, 2, 1)\n\n def update_total_verified_num(self, type, total_pos_num, total_neg_num):\n if type == VerifButtonType.true:\n self.total_pos_label_obj.setText('Total verified true matching: ' + str(total_pos_num))\n if type == VerifButtonType.false:\n self.total_neg_label_obj.setText('Total verified false matching: ' + str(total_neg_num))\n\n def build_ret_widget(self, layout):\n self.ret_label_obj = QtGui.QLabel('I\\'ve finished verifying the current iteration:')\n layout.addWidget(self.ret_label_obj, 0, 2)\n\n unclear_button = QtGui.QRadioButton('Give me a set of new tuple pairs')\n terminate_button = QtGui.QRadioButton('Terminate debugging blocking')\n finish_button = QtGui.QPushButton('Confirm', self)\n layout.addWidget(unclear_button, 1, 2)\n layout.addWidget(terminate_button, 1, 3)\n layout.addWidget(finish_button, 2, 2)\n\n\nclass RecomListTableViewWithLabel(QtGui.QWidget):\n\n def __init__(self, controller, recom_list, label):\n super(RecomListTableViewWithLabel, self).__init__()\n self.controller = controller\n self.recom_list = recom_list\n self.label = label\n self.label_widget = None\n self.table_view_widget = None\n self.setup_gui()\n\n def setup_gui(self):\n label = QtGui.QLabel(self.label)\n table_view = RecomListTableView(self.controller, self.recom_list)\n self.label_widget = label\n self.table_view_widget = table_view\n layout = QtGui.QVBoxLayout()\n layout.addWidget(self.label_widget)\n layout.addWidget(self.table_view_widget)\n self.setLayout(layout)\n\n\nclass RecomListTableView(QtGui.QTableWidget):\n\n def __init__(self, controller, recom_list):\n super(RecomListTableView, self).__init__()\n self.controller = controller\n self.recom_list = recom_list\n #self.checkbox_list = []\n self.setup_gui()\n\n def setup_gui(self):\n nrows = len(self.recom_list)\n self.setRowCount(nrows)\n ncols = 4\n self.setColumnCount(ncols)\n\n headers = ['Left Tuple ID', 'Right Tuple ID', 'Expand', 'Verification Result']\n self.setHorizontalHeaderLabels(headers)\n self.horizontalHeader().setStretchLastSection(True)\n self.resizeRowsToContents()\n self.verticalHeader().setVisible(True)\n\n if nrows > 0:\n for i in range(nrows):\n #checkbox_pair = [QtGui.QRadioButton('True Matching'), QtGui.QRadioButton('False Matching')]\n for j in range(ncols):\n if j < 2:\n self.setItem(i, j, QtGui.QTableWidgetItem(str(self.recom_list[i][j])))\n self.item(i, j).setFlags(QtCore.Qt.ItemIsEnabled)\n if j == 2:\n button = QtGui.QPushButton('Expand', self)\n self.setCellWidget(i, j, button)\n button.clicked.connect(partial(self.controller.handle_expand_button, i))\n if j == 3:\n layout = QtGui.QVBoxLayout()\n true_button = QtGui.QRadioButton('True Matching')\n false_button = QtGui.QRadioButton('False Matching')\n true_button.toggled.connect(partial(self.controller.handle_verif_button, true_button,\n VerifButtonType.true, (self.recom_list[i][0], self.recom_list[i][1])))\n false_button.toggled.connect(partial(self.controller.handle_verif_button, false_button,\n VerifButtonType.false, (self.recom_list[i][0], self.recom_list[i][1])))\n layout.addWidget(true_button)\n layout.addWidget(false_button)\n #checkbox_pair[0].toggled.connect(partial(self.controller.handle_verif_button, i, 0))\n #checkbox_pair[1].toggled.connect(partial(self.controller.handle_verif_button, i, 1))\n #self.checkbox_list.append(checkbox_pair)\n #layout.addWidget(checkbox_pair[0])\n #layout.addWidget(checkbox_pair[1])\n cellWidget = QtGui.QWidget()\n cellWidget.setLayout(layout)\n\n self.setCellWidget(i, j, cellWidget)\n\n self.resizeRowsToContents()\n self.resizeColumnsToContents()\n\n\nclass TuplePairTableViewWithLabel(QtGui.QWidget):\n\n def __init__(self, controller, recom_list, schema, label):\n super(TuplePairTableViewWithLabel, self).__init__()\n self.controller = controller\n self.recom_list = recom_list\n self.schema = schema\n self.label = label\n\n self.label_widget = None\n self.table_view_widget = None\n\n self.setup_gui()\n\n def setup_gui(self):\n label = QtGui.QLabel(self.label)\n table_view = TuplePairTableView(self.controller, self.schema)\n self.label_widget = label\n self.table_view_widget = table_view\n layout = QtGui.QVBoxLayout()\n layout.addWidget(self.label_widget)\n layout.addWidget(self.table_view_widget)\n self.setLayout(layout)\n\n def update(self, index):\n tuple_view = self.table_view_widget\n nrows = tuple_view.rowCount()\n ltuple = self.recom_list[index][2]\n rtuple = self.recom_list[index][3]\n for i in range(nrows):\n tuple_view.setItem(i, 0, QtGui.QTableWidgetItem(str(ltuple[i])))\n tuple_view.item(i, 0).setFlags(QtCore.Qt.ItemIsEnabled)\n tuple_view.setItem(i, 1, QtGui.QTableWidgetItem(str(rtuple[i])))\n tuple_view.item(i, 1).setFlags(QtCore.Qt.ItemIsEnabled)\n\n tuple_view.resizeRowsToContents()\n\n\nclass TuplePairTableView(QtGui.QTableWidget):\n\n def __init__(self, controller, schema):\n super(TuplePairTableView, self).__init__()\n self.controller = controller\n self.schema = schema\n\n self.setup_gui()\n\n def setup_gui(self):\n nrows = len(self.schema)\n self.setRowCount(nrows)\n ncols = 2\n self.setColumnCount(ncols)\n\n headers = ['Left Tuple Value', 'Right Tuple Value']\n self.setHorizontalHeaderLabels(headers)\n self.horizontalHeader().setStretchLastSection(True)\n self.verticalHeader().setVisible(True)\n self.setVerticalHeaderLabels(self.schema)\n self.horizontalHeader().setResizeMode(1)\n # 0 represents \"Disable selection\"\n self.setSelectionMode(0)\n\n\ndef read_wrapped_recom_list(file, K):\n recom_lists = []\n infile = open(file, 'r')\n line = infile.readline()\n schema = line.split('@_@_@_@')\n for i in range(len(schema)):\n schema[i] = schema[i].rstrip('\\n')\n lines = infile.readlines()\n start = 0\n while (start < len(lines)):\n recom_list = []\n for i in range(K):\n ltuple = []\n rtuple = []\n for j in range(len(schema)):\n ltuple.append(lines[start + 2 + j].rstrip('\\n'))\n rtuple.append(lines[start + 2 + len(schema) + j].rstrip('\\n'))\n recom_list.append((lines[start].rstrip('\\n'), lines[start + 1].rstrip('\\n'), ltuple, rtuple))\n start += 2 + 2 * len(schema)\n recom_lists.append(copy.deepcopy(recom_list))\n return schema, recom_lists\n\n\nif __name__ == \"__main__\":\n schema, recom_lists = read_wrapped_recom_list('../Misc/new_recom_list.txt', 100)\n app = mg._viewapp\n window = MainWindowManager(schema, recom_lists)\n window.show()\n app.exec_()","repo_name":"hanli91/Debugging_blocking","sub_path":"gui/debug_blocking_gui.py","file_name":"debug_blocking_gui.py","file_ext":"py","file_size_in_byte":13664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33724393159","text":"import torch\nimport os\nimport sys\nimport numpy as np\nfrom time import sleep\nfrom IPython import embed\nfrom src.rl.General.Board import Board\nfrom src.rl.General.NN import QNet, DuelingNet\nfrom src.utils.writecsv import CSV\n\npath = 'weights/'\nassert len(sys.argv)>1, \"Introduce model version\"\nassert os.path.exists(os.path.join(path,sys.argv[1])),\"Path doesn't exist\"\nvpath = os.path.join(path, sys.argv[1])\n\n\n''' We'll be able to evaluate just one iteration of model's weights, or\n\tall the different iterations trained\n'''\nif len(sys.argv)>2:\n\titerations = [sys.argv[2]]\nelse:\n\titerations = os.listdir(vpath)\n\titerations.sort(key=lambda x: int(x.split('.')[0]))\nprint(iterations)\nboard = Board()\nif 'dueling' in sys.argv[1]:\n\tQ = DuelingNet(board)\nelse:\n\tQ = QNet(board)\n\ndef eval_step(q_fn, state):\n\tboard_state = torch.from_numpy(board.getEnvironment(state).astype(np.float32)).type(dtype)\n\tq_values = q_fn.predict(board_state.unsqueeze(0))\n\treturn board.actions[q_values.max(1)[1][0]]\n\nfor it in iterations:\n\n\t''' WRITE ALL STEPS AND TERMINAL STATES INTO THE CSV '''\n\tcsv = CSV(\"coords_{}\".format(it.split('.')[0]), sys.argv[1])\n\n\tweights = torch.load(os.path.join(vpath, it))\n\tQ.load_state_dict(weights)\n\tdtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor\n\tQ = Q.type(dtype)\n\n\tnum_episodes = 100\n\tsave_episodes = 2\n\tlost = 0\n\tmvs = []\n\trwr = []\n\n\tfor i in range(num_episodes):\n\t\tinitState = board.resetInitRandomly()\n\t\tdone = False\n\t\twhile not done:\n\t\t\t# board.printBoard(initState)\n\t\t\taction = eval_step(Q,initState)\n\t\t\treward, nextState, done = board.takeAction(initState, action)\n\t\t\tinitState = nextState\n\t\t\tif board.movements > board.maxSteps:\n\t\t\t\tlost += 1\n\t\t\t\tbreak\n\t\tmvs.append(board.movements)\n\t\trwr.append(board.totalreward)\n\tavg_mvs = sum(mvs)/num_episodes\n\tavg_rwr = sum(rwr)/num_episodes\n\tmessage = \"ITERATION: {}\\nVICTORIES: {}\\nDEFEATS: {}\\nAVERAGE REWARD: {}\\nAVERAGE MOVEMENTS: {}\".format(it,(num_episodes-lost), lost, avg_rwr, avg_mvs)\n\tprint(message)\n\tcsv.write([it.split('.')[0],(num_episodes-lost)/num_episodes*100],[round(avg_rwr,2),avg_mvs])\n\n\tfor i in range(save_episodes):\n\t\tinitState = board.resetInitRandomly()\n\t\tcsv.write(initState,board.terminalState)\n\t\tdone = False\n\t\twhile not done:\n\t\t\taction = eval_step(Q,initState)\n\t\t\treward, nextState, done = board.takeAction(initState, action)\n\t\t\tcsv.write(nextState,board.terminalState)\n\t\t\tinitState = nextState\n\t\t\tif board.movements > board.maxSteps:\n\t\t\t\tbreak\n\tcsv.close()\n","repo_name":"juanjo3ns/starwarsRL","sub_path":"src/rl/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"16601873345","text":"#Author: Gerson Scheffer\n#Class: Manha\n\n#######################################################################################################\n# Import\n\nfrom classfile.Athlete import *\nimport statistics as st\n\n#######################################################################################################\n# List\n\nlistAtleta = []\nlistClube = []\n\n#######################################################################################################\n# Function\ndef firu(text):\n a = 20\n print(\"=\"*a)\n print(\"{}\".format(str(text)))\n print(\"=\"*a)\n#---------------------------------------------------------------------------\n# Na opção 1 – Deve apenas cadastrar o atleta(nome).\n\ndef op1():\n \"\"\" Cadastro dos atletas\n Tambem confere se o atleta ja esta cadastrado na listAtleta[]\n \"\"\"\n while True:\n print(\"Para encerrar cadastro digite [end]\")\n atletaIn = input(\"Nome do atleta: \").lower()\n if len(atletaIn) == 0:\n print(\"Valor invalido\")\n return op1()\n if atletaIn == \"end\":\n break\n else:\n test = True\n for i in listAtleta:\n if i.name == atletaIn:\n test = False\n firu(\"Atleta ja cadastrado no banco de dados\")\n if test:\n atletaName = atleta(atletaIn)\n listAtleta.append(atletaName)\n firu(\"Atleta cadastrado\")\n\n#---------------------------------------------------------------------------\n# Na opção 2 – Deves cadastrar os saltos do atleta. (Localiza o atleta e digita o salto.)\n\ndef check_jumpN():\n \"\"\" Valida se o objeto do salto esta correto\n \"\"\"\n try:\n jumpN = int(input(\"Digite o numero do salto: \"))\n if (jumpN >= 1) and (jumpN <= 5):\n return jumpN\n else:\n print(\"valor invalido\")\n return check_jumpN()\n except:\n print(\"valor invalido\")\n return check_jumpN()\n\ndef check_jumpV():\n \"\"\" Valida se o valor do salto esta correto\n \"\"\"\n try:\n jumpV = float(input(\"Digite o valor do salto: \"))\n if jumpV < 0:\n print(\"Valor invalido\")\n return check_jumpV()\n else:\n return jumpV\n except:\n print(\"valor invalido\")\n return check_jumpV()\n\ndef op2():\n \"\"\" Registra o salto de um atleta. jumpN numero do salto e jumpV a distancia do salto\n \"\"\"\n locAtleta = input(\"Localizar {}: \".format(\"Atleta\"))\n test = True\n for i in listAtleta:\n if i.name == locAtleta:\n test = False\n print(\"Localizado: {}\".format(locAtleta))\n firu(\"Registro de salto\")\n print(\n \"\"\"\n| {:^31} | {} | {} | {} | {} | {} |\n\"\"\".format(\n \"Nome\",\n \"Salto 1\",\n \"Salto 2\",\n \"Salto 3\",\n \"Salto 4\",\n \"Salto 5\"\n ), end=\"\")\n print(\n \"\"\"\n|+ {:30} | {:^7} | {:^7} | {:^7} | {:^7} | {:^7} |\n\"\"\".format(\n i.name,\n i.salto1,\n i.salto2,\n i.salto3,\n i.salto4,\n i.salto5,\n ), end=\"\")\n jumpN = check_jumpN()\n jumpV = check_jumpV()\n i.setSalto(jumpN, jumpV)\n if test:\n firu(\"{} nao Localizado\".format(\"Atleta\"))\n return op2()\n \n#---------------------------------------------------------------------------\n# Na opção 3 – Gerar o relatório com todos os atletas e os seus saltos.\n\ndef op3():\n \"\"\" Gera relatorio dos atletas com os 5 respectivos saltos\n \"\"\"\n print(\n \"\"\"\n| {:^31} | {} | {} | {} | {} | {} |\n\"\"\".format(\n \"Nome\",\n \"Salto 1\",\n \"Salto 2\",\n \"Salto 3\",\n \"Salto 4\",\n \"Salto 5\"\n ), end=\"\")\n\n for i in listAtleta:\n print(\n \"\"\"\n|+ {:30} | {:^7} | {:^7} | {:^7} | {:^7} | {:^7} |\n\"\"\".format(\n i.name,\n i.salto1,\n i.salto2,\n i.salto3,\n i.salto4,\n i.salto5,\n ), end=\"\")\n \n#---------------------------------------------------------------------------\n# Na opção 4 – Deves cadastrar o clube e vincular o atleta ao clube.\n\ndef op4():\n \"\"\" Cadastra um clube na listClube[], depois cadastra atletas da listAtleta[] dentro do clube, se o atleta nao existir ja faz o cadastro do atleta\n \"\"\"\n while True:\n print(\"Para encerrar cadastro digite [end]\")\n clubeReg = check_clube()\n if clubeReg == \"end\":\n break\n while True:\n print(\"Para encerrar cadastro digite [end]\")\n print(\"Nome dos Atletas cadastrados: \")\n print(\" | {:^30} | {:^30s} |\".format(\"Nome\", \"Clube\"))\n count = 0\n for i in listAtleta:\n count += 1\n test3 = True\n for j in listClube:\n for k in j.atletasClube:\n if i.name == k.name:\n test3 = False\n print(\"{:2} | {:30} | {:30} |\".format(count, i.name, j.name))\n if test3:\n print(\"{:2} | {:30} | {:30} |\".format(count, i.name, \"Empty\"))\n loc = input(\"Nome do atleta no clube [{}]: \".format(clubeReg.name)).lower()\n if loc == \"end\":\n break\n test1 = True\n for i in listAtleta:\n if i.name == loc:\n test1 = False\n test2 = True\n for j in listClube:\n for k in j.atletasClube:\n if k.name == i.name:\n test2 = False\n firu(\"Atleta [{}] ja pertence ao clube [{}]\".format(i.name, j.name))\n if test2:\n clubeReg.regAtletaClube(i)\n firu(\"Atleta [{}] cadastrado no clube [{}]\".format(i.name,clubeReg.name))\n if test1:\n firu(\"Atleta nao cadastrado\")\n while True:\n sn = input(\"Deseja cadastrar esse atleta? S ou N: \").lower()\n if sn == \"s\":\n atletaReg = atleta(loc)\n listAtleta.append(atletaReg)\n print(\"Atleta cadastrado\")\n break\n if sn == \"n\":\n pass\n break\n else:\n print(\"Valor invalido\")\n \ndef check_clube():\n \"\"\" Confere se o clube ja existe, se nao ja cadastra o novo clube\n \"\"\"\n clubeIn = input(\"Nome do clube: \").lower()\n if len(clubeIn) == 0:\n print(\"Valor invalido\")\n return check_clube()\n test = True\n for i in listClube:\n if i.name == clubeIn:\n firu(\"Clube localizado\")\n test = False\n return i\n if clubeIn == \"end\":\n test = False\n return \"end\"\n if test:\n firu(\"Clube nao localizado\")\n while True:\n sn = input(\"Deseja cadastrar esse novo clube? S ou N: \").lower()\n if sn == \"s\":\n clubeReg = Clube(clubeIn)\n listClube.append(clubeReg)\n firu(\"Clube cadastrado\")\n return clubeReg\n if sn == \"n\":\n return check_clube()\n else:\n print(\"Valor invalido\")\n\n#---------------------------------------------------------------------------\n# Na opção 5 – Gerar o relatório mostrando o nome do clube e os atletas vinculados ao clube.\n\ndef op5():\n \"\"\" Gera relatorio com nome do clube e atleta associado\n \"\"\"\n for i in listClube:\n print(\"Clube: {}\".format(i.name))\n count1 = 0\n for j in i.atletasClube:\n count1 += 1\n print(\" {} | {:30} |\".format(count1, j.name))\n\n#---------------------------------------------------------------------------\n# Na opção 6 – Deves gerar o relatório mostrando a média de cada atleta. A média deve ser calculada desconsiderando o pior e o melhor resultado.\n\ndef op6():\n \"\"\" Gera relatorio das media dos saltos de cada atleta, desconsiderando o pior e melhor resultado\n \"\"\"\n print(\"| {:^30} | {:^15} |\".format(\"Atleta\", \"Media dos saltos [m]\"))\n for i in listAtleta:\n avg = [i.salto1, i.salto2, i.salto3, i.salto4, i.salto5]\n avg = sorted(avg)\n avg = st.mean(avg[1:4])\n print(\"| {:30} | {:^20.2f} |\".format(i.name, avg))\n\n\ndef MainScript():\n \"\"\" Menu de opcoes\n \"\"\"\n print(\n \"\"\"\n =================================\n MENU\n =================================\n 0- Finaliza\n 1-\tCadastra o Atleta\n 2-\tCadastra os Saltos do Atleta\n 3-\tRelatório de Geral Atleta\n 4-\tCadastra Clube do Atleta\n 5-\tRelatório de Atletas por Clube\n 6-\tRelatório de Final\n =================================\n \"\"\"\n )\n try:\n op = int(input(\"Escolha uma opcao: \"))\n if op > 6 or op < 0:\n firu(\"Valor invalido\")\n else:\n if op == 1:\n op1()\n if op == 2:\n op2()\n if op == 3:\n op3()\n if op == 4:\n op4()\n if op == 5:\n op5()\n if op == 6:\n op6()\n if op == 0:\n return 0\n \n \n except:\n firu(\"Opcao invalida\")\n return MainScript()\n\n############################################################\n# Main\nwhile True:\n MainScript()\n if MainScript() == 0:\n break\nfiru(\"Programa finalizado\")\n","repo_name":"devscheffer/SenacRS-Algoritmos-Programacao-1","sub_path":"04 - 2019-05-30 - Atletas/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":8541,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71937912681","text":"from colorteller import teller\nfrom colorteller.utils import benchmark\nfrom colorteller.visualize import BenchmarkCharts, ApplicationCharts\nfrom loguru import logger\nfrom nose import tools as _tools\n\n\ndef test__visualize___Charts():\n hex_strings = [\"#8de4d3\", \"#344b46\", \"#74ee65\", \"#238910\", \"#a6c363\", \"#509d99\"]\n\n ct = teller.ColorTeller(hex_strings=hex_strings)\n\n c = teller.Colors(colorteller=ct)\n\n m = c.metrics(\n methods=[benchmark.PerceptualDistanceBenchmark, benchmark.LightnessBenchmark]\n )\n\n charts = BenchmarkCharts(metrics=m, save_folder=\".\")\n\n charts.distance_matrix(show=False)\n\n charts.noticable_matrix(show=False)\n\n\ndef test__visualize___Charts():\n hex_strings = [\"#8de4d3\", \"#344b46\", \"#74ee65\", \"#238910\", \"#a6c363\", \"#509d99\"]\n\n ct = teller.ColorTeller(hex_strings=hex_strings)\n\n c = teller.Colors(colorteller=ct)\n\n ac = ApplicationCharts(colors=c, save_folder=\".\")\n\n # ac.bar_chart(show=False)\n # ac.line_chart(show=False)\n # ac.scatter_chart(show=False)\n # ac.donut_chart(show=True)\n ac.charts()\n","repo_name":"kausalflow/colorteller-package","sub_path":"tests/test_visualize.py","file_name":"test_visualize.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"9216524004","text":"import math\nimport random\nfrom Tkinter import *\nfrom PIL import Image, ImageTk\n\nclass Asteroid(object):\n \n @staticmethod\n def init():\n Asteroid.image = Image.open(\"images/asteroids2.png\")\n \n maxSpeed = 7\n minSize = 2\n maxSize = 7\n \n def __init__(self, x, y, level = None):\n if level is None:\n level = random.randint(Asteroid.minSize, Asteroid.maxSize)\n self.level = level\n \n self.x = x\n self.y = y\n self.r = self.power = 7*self.level\n \n if self.r > 50: self.r = 50\n \n self.image = [Asteroid.image, ImageTk.PhotoImage(Asteroid.image)]\n \n PILimg = self.image[0]\n width = height = self.r * 3\n factor = (self.level*1.) / Asteroid.maxSize\n \n width = height = int(width * factor)\n \n PILimg = baseImg = PILimg.resize((width, height), Image.ANTIALIAS)\n self.r = width/2.\n self.image = [PILimg, baseImg, ImageTk.PhotoImage(PILimg)]\n \n self.angle = 0\n self.angleSpeed = random.randint(-10, 10)\n if self.angleSpeed == 0: self.angleSpeed += 5\n \n vx = random.randint(-Asteroid.maxSpeed, Asteroid.maxSpeed)\n vy = random.randint(-Asteroid.maxSpeed, Asteroid.maxSpeed)\n if vx == 0 and vy == 0:\n vx += 2\n vy += 2\n self.velocity = (vx, vy)\n \n def __repr__(self):\n vx, vy = self.velocity\n return \"Asteroid at (%d, %d) going (%d, %d)\" % (self.x, self.y, vx, vy)\n \n def update(self, data):\n # rotate asteroid\n self.angle += self.angleSpeed\n PILimg = self.image[0]\n baseImg = self.image[1]\n PILimg = baseImg.rotate(self.angle)\n self.image = [PILimg, baseImg, ImageTk.PhotoImage(PILimg)]\n \n vx, vy = self.velocity\n \n self.x += vx\n self.y += vy\n \n if ((self.x + self.r > data.fieldSizeW) or \\\n (self.x - self.r < 0)):\n self.velocity = (-1 * vx, vy)\n \n if ((self.y + self.r > data.fieldSizeH) or \\\n (self.y - self.r < 0)):\n self.velocity = (vx, -1 * vy)\n \n def breakApart(self):\n if self.level <= Asteroid.minSize:\n return []\n else:\n ast1 = Asteroid(self.x, self.y, self.level - 1)\n ast2 = Asteroid(self.x, self.y, self.level - 1)\n return [ast1, ast2]\n \n def draw(self, canvas, data):\n x = self.x - data.scrollX\n y = self.y - data.scrollY\n canvas.create_image(x,y,image = self.image[2])\n \n \n ","repo_name":"zaberfire/TermProject","sub_path":"Asteroid.py","file_name":"Asteroid.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36579212151","text":"from pymongo import MongoClient\nfrom pymongo.database import Database\n\nfrom market_app.models.db_models.mongodb_models import Owner\nfrom market_app.repositories.mongo_db.mongo_db_owner_repository import MongoDbOwnerRepository\n\n\ndef get_database() -> Database:\n # Provide the mongodb atlas url to connect python to mongodb using pymongo\n CONNECTION_STRING = \"mongodb://root:example@localhost:27017\"\n\n # Create a connection using MongoClient. You can import MongoClient or use pymongo.MongoClient\n client = MongoClient(CONNECTION_STRING)\n\n # Create the database for our example (we will use the same database throughout the tutorial\n return client['market_app_db']\n\n\nclient = get_database()\nowner = Owner(surname=\"Smith\", phone_number=\"123-456-7890\",\n address=\"123 Main St.\", email_address=\"smith@example.com\", company_name=\"Acme Inc.\")\n\n\nrepository = MongoDbOwnerRepository(client)\n\n# Insert the owner into the database\nowner_id = repository.create(owner)\n\n# Update the owner's company name\nowner.company_name = \"Acme Corp.\"\nrepository.update(owner)\n\n# Find the owner by ID\nfound_owner = repository.get_by_id(owner_id)\nprint(found_owner)\n\n# Find all owners\nall_owners = repository.get_all()\n\nfor owner in all_owners:\n print(owner.surname, owner.id)\n\n# # Find owners by company name\nowners_by_company_name = repository.find_by_company_name(owner.company_name)\nprint(len(owners_by_company_name))\n\nrepository.delete(owner_id)","repo_name":"karolina-kuna/PK_ZTBD_PROJ_1","sub_path":"implementation/tests/databases/mongo_db/test_mongo_repositories.py","file_name":"test_mongo_repositories.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30422184224","text":"# Implementations of functions for loading data from PDB database and\n# transformation of 3D structure data into format of PB sequences\n\n# necessary imports\nimport os, pathlib\nimport read_config_data as rcd\n\n# removing 'Z' characters from begin and end of PB sequence\ndef transform_seq_line(seq):\n seq = seq.strip()\n seq = seq.rstrip()\n while seq[0] == 'Z':\n seq = seq[1:]\n\n while seq[-1] == 'Z':\n seq = seq[:-1]\n\n return seq\n\n# converts PDB from file on path PDBFilePath into PB format and saves this into\n# FASTA file whose path is returned\ndef convertPDBToPB(PDBFilePath):\n currentPath = os.getcwd()\n FASTA_DIRECTORY = rcd.get_path('FASTA_FILES_DIRECTORY')\n\n pos = PDBFilePath.rindex(\".\")\n pos1 = PDBFilePath[:pos].rindex(\"/\")\n pdb_id = PDBFilePath[pos1 + 1:pos]\n fasta_name = pdb_id + \".PB.fasta\"\n fasta_path = os.path.join(FASTA_DIRECTORY, fasta_name)\n\n if os.path.exists(fasta_path):\n return fasta_path\n\n os.chdir(FASTA_DIRECTORY)\n\n commandPBXplore = \"PBassign -p \" + PDBFilePath + \" -o \" + pdb_id\n os.system(commandPBXplore)\n os.chdir(currentPath)\n\n fasta_name = pdb_id + \".PB.fasta\"\n fasta_path = os.path.join(FASTA_DIRECTORY, fasta_name)\n return fasta_path\n\n# loading pdb from PDB database and transformation of 3D data from PDB file\n# into PB format\ndef load_pb_format(pdb_id):\n pdbFileName = pdb_id + \".pdb\"\n PDB_DIRECTORY = rcd.get_path('PDB_FILES_DIRECTORY')\n pathPdbFileName = os.path.join(PDB_DIRECTORY, pdbFileName)\n\n if not os.path.exists(pathPdbFileName):\n commandDownload = \"wget -P \" + PDB_DIRECTORY + \" https://files.rcsb.org/view/\"\n commandDownload += pdbFileName\n os.system(commandDownload)\n\n currentPath = os.getcwd()\n FASTA_DIRECTORY = rcd.get_path('FASTA_FILES_DIRECTORY')\n fasta_name = pdb_id + \".PB.fasta\"\n fasta_path = os.path.join(FASTA_DIRECTORY, fasta_name)\n\n if os.path.exists(fasta_path):\n return fasta_path\n\n os.chdir(FASTA_DIRECTORY)\n\n commandPBXplore = \"PBassign -p \" + pathPdbFileName + \" -o \" + pdb_id\n os.system(commandPBXplore)\n os.chdir(currentPath)\n\n fasta_name = pdb_id + \".PB.fasta\"\n fasta_path = os.path.join(FASTA_DIRECTORY, fasta_name)\n return fasta_path\n\n\n# function which passes through collection composed of pdb id-s, pdb files and\n# fasta files (with pb sequences) and finds pb sequences for proteins with pdb id-s or from pdb-s\n# and store them into fasta files\ndef generate_fastas(sources):\n fasta_files = []\n\n for source in sources:\n if source[1] == 'fasta':\n fasta_files.append(source[0])\n\n if source[1] == 'id':\n fasta_path = load_pb_format(source[0])\n fasta_files.append(fasta_path)\n\n if source[1] == 'pdb':\n fasta_path = convertPDBToPB(source[0])\n fasta_files.append(fasta_path)\n\n return fasta_files","repo_name":"kate-97/Lotos_Alignment_Application-","sub_path":"src/load_pb_data.py","file_name":"load_pb_data.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"44544907397","text":"from typing import Dict, Any, List\nfrom pathlib import Path\nimport yaml\nimport torch\nimport numpy as np\nimport pytorch_lightning as pl\nfrom torch.utils.data import DataLoader\nfrom jina import Flow\nfrom docarray import DocumentArray, Document\n\n\nclass DIETClassifierDataModule(pl.LightningDataModule):\n def __init__(\n self,\n featurizer_host: str ='featurizer',\n featurizer_port: int = 8888,\n filename: str = 'nlu.yml',\n batch_size: int = 32,\n ) -> None:\n super().__init__()\n self.flow = Flow().add(host=featurizer_host, port=featurizer_port, external=True)\n self.filename = filename\n self.batch_size = batch_size\n self.read_nlu_file()\n\n def read_nlu_file(self):\n nlu_file = open(Path(self.filename).resolve(), 'r')\n nlu: Dict[str, Any] = yaml.load(nlu_file, Loader=yaml.Loader)\n self.nlu_intents: List[Dict[str, Any]] = nlu.get('nlu', [])\n self.num_intents = len(self.nlu_intents)\n\n def setup(self, stage: str):\n self.flow.start()\n self.read_nlu_file()\n intent_data_raw = []\n for i, intent in enumerate(self.nlu_intents):\n examples: List[str] = intent.get('examples', [])\n example_docs = [Document(text=sentence) for sentence in examples]\n for doc in example_docs:\n intent_data_raw.append((doc, i))\n\n self.intent_dataset = []\n examples_da = DocumentArray([d[0] for d in intent_data_raw])\n features_da: DocumentArray = self.flow.post('/', inputs=examples_da, show_progress=True, request_size=10)\n\n for i, feature in enumerate(features_da):\n src = torch.from_numpy(np.array(feature.chunks[2].embedding))\n tgt = torch.tensor(intent_data_raw[i][1])\n self.intent_dataset.append((src, tgt))\n\n def train_dataloader(self):\n return DataLoader(self.intent_dataset, batch_size=self.batch_size, shuffle=True)\n\n def teardown(self, stage: str):\n self.flow.close()\n\nif __name__ == '__main__':\n data_module = DIETClassifierDataModule()\n data_module.prepare_data()\n","repo_name":"botisan-ai/diet-classifier-pytorch","sub_path":"diet_classifier/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"19416248284","text":"class Solution:\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n slist = []\n tlist = []\n if len(s) != len(t):\n return False\n for i in range(len(s)):\n slist.append(s[i])\n for i in range(len(t)):\n tlist.append(t[i])\n slist.sort()\n tlist.sort()\n if slist != tlist:\n return False\n else:\n return True","repo_name":"lilyandcy/python3","sub_path":"leetcode/isAnagram.py","file_name":"isAnagram.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26397524486","text":"\nimport sys\nsys.stdin = open('color_input.txt','r')\n# 첫 줄에 테스트 케이스 T\nT = int(input())\nfor tc in range(1,T+1):\n N = int(input())\n #색상 1, 2의 위치를 저장하는 리스트, 내부값은 셋이다.\n position = [set({}), set({})]\n for i in range(N):\n x1, y1, x2, y2, color = map(int, input().split())\n #x, y를 돌면서 set에 넣는다.\n for a in range(x1, x2+1):\n for b in range(y1, y2+1):\n position[color-1].add((a, b))\n #교집합을 이용하여 겹치는 부분만 꺼낸다.\n result = position[0] & position[1]\n #겹치는 만큼 출력\n print(f'#{tc} {len(result)}')\n\n","repo_name":"gangnamssal/honey_man_space","sub_path":"개인 공부/SWEA/알고리즘 수업 hw/220811/색칠하기.py","file_name":"색칠하기.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35553773483","text":"from pyspark.sql import SparkSession\nimport pyspark.sql.functions as f \n\nif __name__ == '__main__':\n \n spark = SparkSession.builder.appName('cass').config('spark.connection.host', '127.0.0.1').getOrCreate()\n readUsers = spark.read\\\n .format('org.apache.spark.sql.cassandra')\\\n .options(table = 'imdb', keyspace = 'ks_imdb')\\\n .load()\n\n language_average = readUsers\\\n .groupBy('original_language')\\\n .agg(f.round(f.avg('vote_average'),2))\\\n .withColumnRenamed('round(avg(vote_average), 2)', 'vote_average')\n \n count_language = readUsers\\\n .groupBy('original_language')\\\n .count()\n language_average.join(count_language, ['original_language'], 'inner').sort(f.desc('count')).show()\n \n spark.stop()\n\n\n","repo_name":"AquilaMS/hadoop-studies","sub_path":"cassandra/script_spark.py","file_name":"script_spark.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31226722783","text":"import os\nimport collections\nimport numpy as np\n\nclass BalanceBot(object):\n \n def __init__(self, \n pybullet_client, \n time_step, \n action_repeat, \n control_latency):\n\n self._p = pybullet_client\n\n self._motor_strength = {\"torso_l_wheel\": 0.0, \"torso_r_wheel\": 0.0}\n self._motor_direction = {\"torso_l_wheel\": -1, \"torso_r_wheel\": 1}\n \n self._action_repeat = action_repeat\n self._max_pwm = (5000. / 3200.) * (2.0 * 3.1415926)\n self._max_force = 0.4\n\n self._time_step = time_step\n self._control_latency = control_latency\n self._observation_history = collections.deque(maxlen=5)\n self._delayed_observation = []\n\n\n def _buildJointNameToIdDict(self):\n\n num_joints = self._p.getNumJoints(self.balancebot_id)\n self._joint_name_to_id = {}\n for i in range(num_joints):\n joint_info = self._p.getJointInfo(self.balancebot_id, i)\n self._joint_name_to_id[joint_info[1].decode(\"UTF-8\")] = joint_info[0]\n \n\n def _buildUrdfIds(self):\n\n self._base_link_id = -1\n self._wheel_link_id = [0, 1]\n self._wheel_joint_id = [0, 1]\n\n def reset(self):\n \n random_init_angle = [np.random.uniform(-0.05, 0.05),\n np.random.uniform(-0.01, 0.01),\n np.random.uniform(-0.05, 0.05)]\n \n random_init_orient = self._p.getQuaternionFromEuler(random_init_angle)\n\n urdf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"balancebot_simple.urdf\")\n self.balancebot_id = self._p.loadURDF(urdf_file, \n basePosition=[0, 0, 0.01],\n baseOrientation=random_init_orient)\n\n self._buildJointNameToIdDict()\n self._buildUrdfIds()\n\n for wheel in ['torso_l_wheel', 'torso_r_wheel']:\n wheelid = self._joint_name_to_id[wheel]\n self._p.setJointMotorControl2(self.balancebot_id,wheelid,self._p.VELOCITY_CONTROL,targetVelocity=0,force=0)\n\n self._observation_history.clear()\n self.receiveObservation()\n \n return None\n\n\n def step(self, action):\n\n for _ in range(self._action_repeat):\n self.applyAction(action)\n self._p.stepSimulation()\n self.receiveObservation()\n\n def _getTrueObservation(self):\n\n basePos, baseOrn = self._p.getBasePositionAndOrientation(self.balancebot_id)\n baseRPY = self._p.getEulerFromQuaternion(baseOrn)\n baseLinVel, baseAngVel = self._p.getBaseVelocity(self.balancebot_id)\n observation = [basePos, baseLinVel, baseRPY, baseAngVel]\n \n return observation\n\n def receiveObservation(self):\n\n self._observation_history.appendleft(self._getTrueObservation())\n self._delayed_observation = self._getDelayedObservation(self._control_latency)\n\n\n def _getDelayedObservation(self, latency):\n \n if latency <= 0 or len(self._observation_history) == 1:\n observation = self._observation_history[0]\n else:\n n_steps_ago = int(latency / self._time_step)\n if n_steps_ago + 1 >= len(self._observation_history):\n return self._observation_history[-1]\n remaining_latency = latency - n_steps_ago * self._time_step\n blend_alpha = remaining_latency / self._time_step\n observation = (\n (1.0 - blend_alpha) * np.array(self._observation_history[n_steps_ago])\n + blend_alpha * np.array(self._observation_history[n_steps_ago + 1]))\n return observation\n\n\n def applyAction(self, motor_cmd):\n\n assert type(motor_cmd) == np.ndarray\n\n motor_cmd = np.clip(motor_cmd[0], -1.0, 1.0)\n motor_name_list = ['torso_l_wheel', 'torso_r_wheel']\n \n for motor_name in motor_name_list:\n\n self._motor_strength[motor_name] = motor_cmd\n\n self._actual_write_cmd = np.clip(self._motor_strength[motor_name], -1.0, 1.0)\n self._actual_motor_pwm = self._actual_write_cmd * self._max_pwm * self._motor_direction[motor_name] \n self._setMotorVelocityById(self._joint_name_to_id[motor_name], self._actual_motor_pwm)\n \n\n def _setMotorVelocityById(self, motor_id, motor_vel):\n self._p.setJointMotorControl2(\n bodyUniqueId=self.balancebot_id, \n jointIndex=motor_id,\n controlMode=self._p.VELOCITY_CONTROL,\n targetVelocity=motor_vel,\n force=self._max_force)\n\n def getBasePosition(self):\n delayed_position = np.array(self._delayed_observation[0])\n return delayed_position\n\n def getBaseLinVelocity(self):\n delayed_lin_vel = np.array(self._delayed_observation[1])\n return delayed_lin_vel\n\n def getBaseRollPitchYaw(self):\n delayed_roll_pitch_yaw = np.array(self._delayed_observation[2])\n return delayed_roll_pitch_yaw\n\n def getMotorStrength(self):\n\n l_motor = self._motor_strength[\"torso_l_wheel\"]\n r_motor = self._motor_strength[\"torso_r_wheel\"]\n\n return np.array([l_motor, r_motor])\n\n\n \n\n","repo_name":"sk413025/pybullet-example","sub_path":"env/balancebot.py","file_name":"balancebot.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5901117919","text":"# <문제> 회의실 배정\n# 한 개의 회의실이 있는데 이를 사용하고자 하는 N개의 회의에 대하여 회의실 사용표를 만들려고 한다. 각 회의 I에 대해 시작시간과 끝나는 시간이 주어져 있고,\n# 각 회의가 겹치지 않게 하면서 회의실을 사용할 수 있는 회의의 최대 개수를 찾아보자. 단, 회의는 한번 시작하면 중간에 중단될 수 없으며 한 회의가 끝나는 것과 동시에 다음 회의가 시작될 수 있다.\n# 회의의 시작시간과 끝나는 시간이 같을 수도 있다. 이 경우에는 시작하자마자 끝나는 것으로 생각하면 된다.\n# <입력> 첫째 줄에 회의의 수 N(1 ≤ N ≤ 100,000)이 주어진다. 둘째 줄부터 N+1 줄까지 각 회의의 정보가 주어지는데 이것은 공백을 사이에 두고 회의의 시작시간과 끝나는 시간이 주어진다.\n# <출력> 첫째 줄에 최대 사용할 수 있는 회의의 최대 개수를 출력한다.\n\n# <내 풀이>\n# 회의시간 짧은 순으로 정렬 후 빠른 시간으로 정렬해서 배정\nn = int(input())\nmeeting = (list(map(int, input().split())) for i in range(n))\n\nresult = 0\nmeeting = sorted(meeting, key=lambda x: (x[0], x[1]-x[0]))\nstTm = 0\nedTm = 0\nfor i in meeting:\n if stTm <= i[0] and edTm > i[1]:\n stTm = i[0]\n edTm = i[1]\n elif i[0] >= edTm:\n stTm = i[0]\n edTm = i[1]\n result += 1\n\nprint(result)\n\n# <다른 사람 풀이 참고>\nn = int(input())\ntime = sorted([tuple(map(int, input().split())) for _ in range(n)], key=lambda x:(x[1], x[0]))\nresult = end = 0\n\nfor s, e in time:\n if s >= end:\n result += 1\n end = e\n\nprint(result)\n\n# <추가> 속도가 10배 넘게 차이 난다. 내 풀이도 보면 끝나는 시간을 기준으로 +1 이 되는데 전혀 생각하지 못했다. 좀 더 나은 생각을 해야 할 텐데,","repo_name":"Hyeoni18/coding-test-study","sub_path":"python_source/greedy-algorithm/baekjoon/AssignMeetingRoom.py","file_name":"AssignMeetingRoom.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19424764833","text":"from dotenv import load_dotenv\nfrom fastapi import FastAPI, Header, Request, HTTPException\nimport http\nimport os\nimport hmac\nimport hashlib\nload_dotenv()\napp = FastAPI()\nWEBHOOK_SECRET = os.getenv('WEBHOOK_SECRET')\n\n\ndef generate_hash_signature(\n secret: bytes,\n payload: bytes,\n digest_method=hashlib.sha1\n):\n return hmac.new(secret, payload, digest_method).hexdigest()\n\n\n@app.post(\"/webhook\", status_code=http.HTTPStatus.OK)\nasync def webhook(request: Request, x_hub_signature: str = Header(None)):\n payload = await request.body()\n # secert = WEBHOOK_SECRET.encode(\"utf-8\")\n # signature = generate_hash_signature(secert, payload)\n # if x_hub_signature != f\"sha1={signature}\":\n # raise HTTPException(status_code=401, detail=\"Authentication error\")\n # else:\n print(payload)\n return {}\n","repo_name":"martianwei/fastAPI_webhook","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"660928067","text":"#from juser.models import User\n#from jasset.models import Asset\nfrom jumpserver.api import *\nfrom ops_auth.auth import OpsAuthBackend\nfrom jperm.perm_api import Rule,Asset\n\n\ndef name_proc(request):\n user_id = request.user.id\n role_id = {'SU': 2, 'GA': 1, 'CU': 0}.get(request.user.web_role, 0)\n # role_id = 'SU'\n asset = Rule(request.user)\n host_total_num = len(asset.assets)\n host_active_num = len(asset.assets)\n asset_group_total_num = len(asset.asset_group)\n request.session.set_expiry(3600)\n request.session.role_id = role_id\n\n info_dic = {'session_user_id': user_id,\n 'session_role_id': role_id,\n 'host_total_num': host_total_num,\n 'host_active_num': host_active_num,\n 'asset_group_total_num' : asset_group_total_num,\n }\n\n return info_dic\n\n","repo_name":"Aroundight/jump-jump","sub_path":"jumpserver/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22410242504","text":"from collections import defaultdict, namedtuple\n\nfrom xldlib.export import dataframes\nfrom xldlib.objects import protein\nfrom xldlib.qt.objects import base\nfrom xldlib.resources.parameters import reports\nfrom xldlib.utils import xictools\n\n\n# OBJECTS\n# -------\n\nCrosslink = namedtuple(\"Crosslink\", \"linkname linktype\")\n\n\nclass Memoizer(namedtuple(\"Memoizer\", \"seen frozen\")):\n '''Subclass for default, mutable arguments'''\n\n def __new__(cls, seen=None, frozen=None):\n if seen is None:\n seen = {}\n if frozen is None:\n frozen = set()\n return super(Memoizer, cls).__new__(cls, seen, frozen)\n\n\n# HELPERS\n# -------\n\n\ndef getseen(labels):\n '''Returns a Seen instance from the spreadsheet data'''\n\n profile = labels.get_document().profile\n seen = dataframes.Seen.fromspreadsheet(labels.spreadsheet)\n\n for population, counts in labels.sequenced_population.items():\n crosslinker = profile.getheader(population)\n yield seen._replace(crosslinker=crosslinker), counts\n\n\ndef getintegrated(spreadsheet, headers, counts):\n '''Returns the integrated dataset from the header list'''\n\n integrated = []\n for header in headers:\n count = counts.get(header)\n val = xictools.IntegralData.fromspreadsheet(spreadsheet, header, count)\n integrated.append(val)\n\n return tuple(integrated)\n\n\n# DATAFRAMES\n# -----------\n\n\nclass Dataframe(dataframes.QuantitativeDataframe):\n '''Export dataframe for the integrated parameters'''\n\n # GETTERS\n\n def getamplitudes(self, labelslist):\n '''Maps each linkage string to a a set of integrated data'''\n\n amplitudes = dataframes.Amplitudes(self.ratios)\n\n for labels in labelslist:\n spreadsheet = labels.spreadsheet\n headers = self.getheaders(labels)\n counts = labels.getusedheaders()\n integrated = getintegrated(spreadsheet, headers, counts)\n\n # get the key\n linkage = spreadsheet.getlinkage()\n filename = spreadsheet.getsearch()\n file_obj = amplitudes.checknew(filename)\n file_obj[linkage].add(integrated)\n\n return amplitudes.filter()\n\n def getheaders(self, labels):\n # TODO: make property\n return [self.profile.getheader(i.populations) for i in labels]\n\n\nclass HierarchicalDataframe(dataframes.HierarchicalDataframe):\n '''Export dataframe with hierarchical header definitions'''\n\n\n# WORKSHEETS\n# ----------\n\n\nclass ProteinsDummy(namedtuple(\"ProteinsDummy\", \"mapping\")):\n '''Dummy proteins object to match the SQLite one'''\n\n def __new__(cls, mapping=None, **kwds):\n '''Initializes a mapping data structure for fake data lookups'''\n\n if mapping is None:\n mapping = {i: {} for i in protein.PROTEIN_FIELDS}\n mapping.update(kwds)\n\n return super(ProteinsDummy, cls).__new__(cls, mapping)\n\n\nclass WorksheetExport(base.BaseObject):\n '''Inheritable methods for dataframe export'''\n\n proteins = ProteinsDummy()\n\n # GETTERS\n\n def getcrosslinks(self, document):\n '''Returns a unique list of Crosslink instances in the document'''\n\n crosslinks = defaultdict(list)\n with document.togglememory():\n for transition_file in document:\n for labels in transition_file:\n\n attrs = (getattr(labels, i) for i in Crosslink._fields)\n item = Crosslink(*attrs)\n crosslinks[item].append(labels)\n\n return crosslinks\n\n def getsheets(self, crosslinks, sheet='quantitative'):\n '''Sets the sheets included for the report'''\n\n independent = [reports.Independent(sheet=sheet, *(i))\n for i in sorted(crosslinks)]\n return [reports.Sheet.fromorder(i) for i in independent]\n\n def getlinkages(self, labels_list):\n '''Generates the linkage from a labels list'''\n\n linkages = []\n memoizer = Memoizer()\n\n for index, labels in enumerate(labels_list):\n for seen, count in getseen(labels):\n linkage = self.__getlinkage(seen, memoizer, labels, count)\n linkages.append(linkage)\n\n return [dataframes.Linkage.fromdict(i) for i in linkages]\n\n def __getlinkage(self, seen, memoizer, *args):\n '''Branched logic to internally return or increment a new linkage'''\n\n if seen in memoizer.seen:\n return self.increment(seen, memoizer, *args)\n else:\n return self.newlinkage(seen, memoizer, *args)\n\n # HELPERS\n\n def increment(self, seen, memoizer, labels, count):\n '''Increments the counts on a singular linkage item'''\n\n linkage = memoizer.seen[seen]\n linkage['count'] += count\n\n if labels.frozen not in memoizer.frozen and bool(count):\n memoizer.frozen.add(labels.frozen)\n linkage['unique'] += 1\n\n return linkage\n\n def newlinkage(self, seen, memoizer, labels, count):\n '''Instantiate a new linkage instance'''\n\n linkage = seen.tolinkage(self.proteins, labels.frozen, count=count)\n memoizer.seen[seen] = linkage\n memoizer.frozen.add(labels.frozen)\n\n return linkage\n","repo_name":"Alexhuszagh/XLDiscoverer","sub_path":"xldlib/export/transitions/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12595809611","text":"\"\"\"\n시작 시간: 2022년 3월 3일 오후 2시 30분\n소요 시간: 1시간 30분\n풀이 방법:\n 다음 링크 참고해서 풀이 확인\n https://velog.io/@seanlion/boj2110\n 거리를 하나하나 탐색하면서 라우터를 몇 개 설치할 수 있을지 확인해야 하는 문제\n 거리를 고정시키지 않아서 풀이 착안하지 못함\n 거리를 선형적으로 확인하지 말고, 지수적으로 확인할 수 있을지 생각해보기\n\"\"\"\nN, C = map(int, input().split())\ndata = []\nfor i in range(N):\n data.append(int(input()))\n \ndata.sort()\n \nstart = 1 # 공유기 사이 거리 최솟값\nend = data[-1] - data[0] # 공유기 사이 거리 최댓값\nans = []\n \nwhile start <= end:\n prev = data[0] # 첫번째 집의 위치값\n mid = (start + end) // 2 # 중간 간격\n count = 1 # 라우터 개수. 첫번째 집에 라우터 설치\n for i in range(1, N):\n if prev + mid <= data[i]: # 이전집에서 중간간격만큼 떨어진것보다 바깥쪽에 있다면\n prev = data[i] # 라우터 추가\n count += 1\n if count >= C: # 원하던 개수보다 크게 설치할 수 있다면\n start = mid + 1 # 중간 간격을 하나 더 키워\n ans.append(mid)\n else:\n end = mid - 1\n \nprint(max(ans))","repo_name":"yuna1212/algorithm","sub_path":"백준/이분 탐색/공유기 설치.py","file_name":"공유기 설치.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36657706285","text":"# la lectura por teclado almacena una cadena o string\n# para almacenar en otro tipo de datos\n# se requiere convertir los valores\ncantidad=input(\"Ingrese un valor numerico \")\n\n# print(f\"{cantidad}/2={cantidad/2}\")\n# esta operación genera un error por la diferencia del tipo de datos\n# para convertir una cadena en un entero\ncantidad=int(cantidad)\nprint(f\"{cantidad}/2={cantidad/2}\")\n\n# para almacenar un valor decimal se utiliza la función float\n\n# decimal=int(input(\"Ingrese un valor decimal\"))\ndecimal=float(input(\"Ingrese un valor decimal \"))\nprint(f\"El valor decimal es {decimal}\")\n\n\n","repo_name":"cjsarasty456/EjerciciosSena","sub_path":"Estudio/python/ejercicios/4conversionTipos.py","file_name":"4conversionTipos.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73086960362","text":"import json\nimport os\nfrom pathlib import Path\n\nimport pytest\n\n\n@pytest.fixture(scope=\"module\")\ndef api_client():\n from rest_framework.test import APIClient\n\n return APIClient()\n\n\n@pytest.fixture(autouse=True)\ndef force_authenticate(request, api_client):\n \"\"\"Automatically authenticate generated requests.\n\n Check ongoing test for the `as_user` or `as_other_user` marks. To use those marks,\n `user` and `other_user` must be available in the test scope.\n \"\"\"\n if request.node.get_closest_marker(\"as_user\"):\n user = request.getfixturevalue(\"user\")\n api_client.force_authenticate(user)\n elif request.node.get_closest_marker(\"as_other_user\"):\n other_user = request.getfixturevalue(\"other_user\")\n api_client.force_authenticate(other_user)\n\n\ndef load_export_data(path: Path) -> dict:\n with open(path) as f:\n return json.loads(f.read())\n\n\nMIMIC_CREDENTIALS = {\n \"host\": os.environ.get(\"MIMIC_HOST\", \"mimic\"),\n \"port\": int(os.environ.get(\"MIMIC_PORT\", 5432)),\n \"database\": os.environ.get(\"MIMIC_DB\", \"mimic\"),\n \"login\": os.environ.get(\"MIMIC_LOGIN\", \"mimic\"),\n \"password\": os.environ.get(\"MIMIC_PASSWORD\", \"mimic\"),\n \"model\": \"POSTGRES\",\n}\n\n\n@pytest.fixture\ndef mimic_credentials():\n return MIMIC_CREDENTIALS\n\n\ndef load_mapping(path: Path) -> dict:\n data = load_export_data(path)\n data[\"credential\"] = {**data[\"credential\"], **MIMIC_CREDENTIALS}\n return data\n","repo_name":"arkhn/fhir-river","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"19"} +{"seq_id":"6352454115","text":"import openai\n\n\ndef gpt_response(msg: str, length: int = 2048, temperature: float = 0.5) -> str:\n \"\"\"\n The function which provides connection with GPTChat and send back response\n\n :param str msg: your question\n :param int length: length of response gptchat, defaults to 2048\n :return str: response from gptchat\n \"\"\"\n try:\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=msg,\n temperature=temperature,\n max_tokens=length,\n )\n\n return response[\"choices\"][0][\"text\"]\n except Exception as e:\n return str(e)\n","repo_name":"bigbossguru/AI-HelperChatTelegramBot","sub_path":"aichattelegrambot/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"24650199950","text":"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\n#Exercice Non Linear\r\n\r\n\"\"\" ODE function \"\"\"\r\ndef f(t,y):\r\n return 1+y*y\r\n\r\n\"\"\" Euler Algorithm\"\"\"\r\ndef Euler(y0,a,b,nb):\r\n h=(b-a)/nb\r\n X=[a]\r\n Y=[y0]\r\n for i in range(1,nb):\r\n\r\n y0=Y[i-1]\r\n t0=X[i-1]\r\n t1=t0+h\r\n y1=y0+h*f(t0,y0)\r\n Y.append(y1)\r\n X.append(t1)\r\n return X,Y\r\n\r\ndef Runge2(y0,a,b,nb):\r\n h=(b-a)/nb\r\n t=a\r\n X=[a]\r\n Y=[y0]\r\n for i in range(1,nb):\r\n \r\n t=X[i-1]+h\r\n k1=Y[i-1]+(h/2)*f(X[i-1],Y[i-1])\r\n k2=f(X[i-1]+(h/2),k1)\r\n y2=Y[i-1]+h*k2\r\n \r\n Y.append(y2)\r\n X.append(t)\r\n return X,Y\r\n\r\n\r\ndef Runge4(y0,a,b,nb):\r\n h=(b-a)/nb\r\n t=a\r\n X=[a]\r\n Y=[y0]\r\n for i in range(1,nb):\r\n \r\n t=X[i-1]+h\r\n k1=f(X[i-1],Y[i-1])\r\n k2=f(X[i-1]+(h/2),k1*(h/2)+Y[i-1])\r\n k3=f(X[i-1]+(h/2),k2*(h/2)+Y[i-1])\r\n k4=f(X[i-1]+h,Y[i-1]+h*k3)\r\n y2=Y[i-1]+(h/6)*(k1+2*k2+2*k3+k4)\r\n \r\n Y.append(y2)\r\n X.append(t)\r\n return X,Y\r\n\r\ndef Adam(y0,a,b,nb):\r\n h=(b-a)/nb\r\n t1=a\r\n X=[a]\r\n Y=[y0]\r\n y1=y0\r\n #On doit calculer avec une méthode explicite la première itération\r\n y2=y1+h*f(X[0],Y[0])\r\n Y.append(y2)\r\n X.append(a+h)\r\n for i in range(2,nb):\r\n \r\n t=X[i-1]+h\r\n \r\n y3=Y[i-1]+(3*h/2)*f(X[i-1],Y[i-1])-(1/2)*h*f(X[i-2],Y[i-2])\r\n Y.append(y3)\r\n X.append(t)\r\n return X,Y\r\n\r\n#Initialisation des paramètres\r\ny0= 0.0\r\na = 0.0\r\nb = 1.0\r\nniter = 150\r\n\r\n#Euler\r\nx,y = Euler(y0,a,b,niter)\r\nplt.plot(x,y,'r', label=r\"Euler method\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\n\r\n#RK2\r\nx,y = Runge2(y0,a,b,niter)\r\nplt.plot(x,y,'b', label=r\"Runge Kutta2\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\n\r\n#RK4\r\nx,y = Runge4(y0,a,b,niter)\r\nplt.plot(x,y,'g', label=r\"Runge Kutta4\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\n\r\n#Adams\r\nx_a,y_a = Adam(y0,a,b,niter)\r\nplt.plot(x_a,y_a,'k', label=r\"Adams\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\nplt.title(\"Comparaison des méthodes Equation Linéaire\")\r\nplt.grid()\r\nplt.show()\r\n\r\n \r\n\r\n#-----------Exercice Numerical Instability--------------\r\n\r\ndef Runge2_f(y0,a,b,nb,f): #Fonction avec une fonction en paramètre\r\n h=(b-a)/nb\r\n t=a\r\n X=[a]\r\n Y=[y0]\r\n y1=y0\r\n for i in range(1,nb):\r\n \r\n \r\n t=X[i-1]\r\n y1=Y[i-1]\r\n k1=y1+(h/2)*f(t,y1)\r\n k2=f(t+(h/2),k1)\r\n y2=y1+h*k2\r\n \r\n Y.append(y2)\r\n X.append(t+h)\r\n \r\n return X,Y\r\n\r\n\"\"\" ODE function \"\"\"\r\ndef f_1(t,y):\r\n return 3*y-4*math.exp(-t)\r\n\r\ny0= 1.0\r\na = 0.0\r\nb = 10\r\nniter = 100\r\n\r\nx_num,y_num = Runge2_f(y0,a,b,niter,f_1)\r\nplt.plot(x_num,y_num,'r', label=r\"Runge\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\n\r\ny_th=[y0]\r\nt_th=np.linspace(0,10,niter)\r\nfor j in range(1,len(t_th)):\r\n y_th.append(math.exp(-t_th[j]))\r\n\r\nplt.plot(t_th,y_th,'g', label=r\"Théo\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\nplt.title(\"Comparaison théorique/numérique exercice non linear 2\")\r\nplt.grid()\r\nplt.show()\r\n\r\n\r\n\"\"\"On observe que la méthode ne converge pas, en effet il nous faut augmenter fortement le nombre d'itération pour un même intervalle\r\npour que cette méthode converge (meme chose pour RK4)\r\nL'erreur peut aussi venir de la précision de nos flottants qui peuvent être trop faible\"\"\"\r\n\r\nerror=[]\r\nfor m_p in range(0,len(y_th)):\r\n error.append(math.exp(-x_num[m_p])-y_num[m_p])\r\n \r\nplt.semilogy(x_num,error,'g', label=r\"Théo\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\nplt.title(\"Comparaison théorique/numérique exercice non linear 2\")\r\nplt.grid()\r\nplt.show()\r\n#-------------- Exercice Non Linear 2------------------------\r\n\r\n\r\n\"\"\" Euler Vector Algorithm\"\"\"\r\n\r\ndef F(t,V,m,l,g,A):\r\n L=[V[1], (A*math.sin(t)/(m*l*l))-(g/l)*math.sin(V[0])]\r\n return np.array(L)\r\n\r\ndef Euler_1(V0,a,b,nb,m,l,g,A):\r\n h=(b-a)/nb\r\n t=a\r\n X=[a]\r\n Y=[V0[0]]\r\n V1=V0\r\n for i in range(1,nb):\r\n \r\n V2=V1+h*F(t,V1,m,l,g,A)\r\n t=t+h\r\n Y.append(V2[0])\r\n X.append(t)\r\n V1=V2\r\n return X,Y\r\n\r\ndef Runge2_1(V0,a,b,nb,m,l,g,A):\r\n h=(b-a)/nb\r\n t=a\r\n X=[a]\r\n Y=[V0[0]]\r\n V1=V0\r\n for i in range(1,nb):\r\n \r\n k1=V1+(h/2)*F(t,V1,m,l,g,A)\r\n k2=F(t+h/2,V1,m,l,g,A)\r\n V2=V1+h*F(t+h/2,V1+(h/2)*F(t,V1,m,l,g,A),m,l,g,A)\r\n t=t+h\r\n Y.append(V2[0])\r\n X.append(t)\r\n V1=V2\r\n return X,Y\r\n\r\ndef Runge4_1(V0,a,b,nb,m,l,g,A):\r\n h=(b-a)/nb\r\n t=a\r\n X=[a]\r\n Y=[V0[0]]\r\n V1=V0\r\n for i in range(1,nb):\r\n \r\n k1=F(t,V1,m,l,g,A)\r\n k2=F(t+h/2,V1+(h/2)*k1,m,l,g,A)\r\n k3=F(t+h/2,V1+(h/2)*k2,m,l,g,A)\r\n k4=F(t+h,V1+h*k2,m,l,g,A)\r\n \r\n V2=V1+(h/6)*(k1+2*k2+2*k3+k4)\r\n t=t+h\r\n Y.append(V2[0])\r\n X.append(t)\r\n V1=V2\r\n return X,Y\r\n\r\n#Initialisation des paramètres\r\nteta0=0\r\nteta_p0=1\r\nT=20\r\nm=0.1\r\nl=0.3\r\nA=0.1\r\ng=9.81\r\n\r\n#Initialisation du Vecteur\r\nV0=[teta0,teta_p0]\r\n\r\na = 0.0\r\nb = T\r\nniter = 1000\r\n\r\n\r\n#Tracer des données\r\nx,y = Euler_1(V0,a,b,niter,m,l,g,A)\r\nplt.plot(x,y,'k', label=r\"Pendule_Euler\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\n\r\nx,y = Runge2_1(V0,a,b,niter,m,l,g,A)\r\nplt.plot(x,y,'r', label=r\"Pendule_Runge2\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\n\r\nx,y = Runge4_1(V0,a,b,niter,m,l,g,A)\r\nplt.plot(x,y,'b', label=r\"Pendule_Runge4\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\n\r\nplt.ylim(-1,1)\r\nplt.xlim(0,20)\r\nplt.title(\"Comparaison des méthodes pendule\")\r\nplt.grid()\r\nplt.show()\r\n\r\n\"\"\" En augmentant le nombre d'itération, on peut permettre à la méthode d'Euler de converger\"\"\"\r\n\r\n\r\n#----------------Exercice Mass Spring -------------------\r\n\r\n\"\"\" Euler Vector Algorithm\"\"\"\r\n\r\ndef F2(t,V,m,l,g,mu,k,V_p):\r\n if V_p<0:\r\n L=[V[1], mu*g-(k/m)*V[0]]\r\n else:\r\n L=[V[1], -mu*g-(k/m)*V[0]] \r\n return np.array(L)\r\n\r\ndef Runge2_2(V0,a,b,nb,m,l,g,mu,k):\r\n h=(b-a)/nb\r\n t=a\r\n X=[a]\r\n Y=[V0[0]]\r\n V_p=V0[0]\r\n V1=V0\r\n for i in range(1,nb):\r\n \r\n k1=V1+(h/2)*F2(t,V1,m,l,g,mu,k,V_p)\r\n k2=F2(t+h/2,V1,m,l,g,mu,k,V_p)\r\n \r\n V2=V1+h*F2(t+h/2,V1+(h/2)*F2(t,V1,m,l,g,mu,k,V_p),m,l,g,mu,k,V_p)\r\n t=t+h\r\n Y.append(V2[0])\r\n X.append(t)\r\n V1=V2\r\n V_p=V2[1]\r\n return X,Y\r\n\r\n#Initialisation des paramètres\r\nY_0=0.1\r\nY_p0=0\r\nk=3000\r\nmu=0.5\r\nm=6.0\r\nV0=[Y_0,Y_p0]\r\na = 0.0\r\nb = 5\r\nniter = 10000\r\n\r\nxms,yms = Runge2_2(V0,a,b,niter,m,l,g,mu,k)\r\nplt.plot(xms,yms,'b', label=r\"Mass Spring\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\nplt.title(\"Déplacement en fonction du temps - Mass Spring\")\r\nplt.grid()\r\nplt.show()\r\n#On trouve que le prochain pic se trouve à y(562)=0,06076 or nous avions bien y0-4mu*m*g/k=0,06076\r\n\r\n\r\n\r\n#------- Iron Block-------------------------\r\n\r\n\"\"\" ODE function \"\"\"\r\n\r\ndef F3(t,V,c,m,k):\r\n L=[V[1], -(k/m)*V[0]+c/(m*V[0]*V[0])]\r\n return np.array(L)\r\n\r\ndef Runge2_3(V0,a,b,nb,c,m,k):\r\n h=(b-a)/nb\r\n t=a\r\n T=[a]\r\n X=[V0[0]]\r\n V1=V0\r\n for i in range(1,nb):\r\n \r\n k1=V1+(h/2)*F3(t,V1,c,m,k)\r\n k2=F3(t+h/2,V1,c,m,k)\r\n \r\n V2=V1+h*F3(t+h/2,V1+(h/2)*F3(t,V1,c,m,k),c,m,k)\r\n t=t+h\r\n X.append(V2[0])\r\n T.append(t)\r\n V1=V2\r\n return T,X\r\n\r\n\r\n#Initialisation \r\nk=120\r\nm=1.0\r\nL=0.2\r\nc=5.0\r\nx0=L\r\nv_p0=0\r\nV0=[x0,v_p0]\r\n\r\n\r\na = 0.0\r\nb = 1\r\nniter = 10000\r\n\r\n#Traçage des données\r\nt,x = Runge2_3(V0,a,b,niter,c,m,k)\r\nplt.plot(t,x,'b', label=r\"Iron block\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\nplt.title(\"Déplacement en fonction du temps - Iron Block\")\r\nplt.grid()\r\nplt.show()\r\n\r\n\r\n#Amplitude=0.35\r\nAmp=max(x)-min(x)\r\n\r\n#Période : On trouve T=0.33s\r\n\r\n\r\n\r\n#---------- Magnus--------------------\r\n\r\n\r\n\"\"\" Fonction pour les vitesses \"\"\"\r\n\r\ndef F4x(t,vx,vy,vz,k,P,W):\r\n return -k*math.sqrt(vx*vx+vy*vy+vz*vz)*vx-P*W[2]*vy\r\n\r\ndef F4y(t,vx,vy,vz,k,P,W,m,g):\r\n return -k*math.sqrt(vx*vx+vy*vy+vz*vz)*vy+P*W[2]*vz-m*g\r\n\r\ndef F4z(t,vx,vy,vz,k,P,W,m,g):\r\n return -k*math.sqrt(vx*vx+vy*vy+vz*vz)*vz\r\n\r\n\"\"\" Fonction pour les déplacements \"\"\"\r\n\r\ndef f4(t,vx):\r\n return vx\r\n\r\n#RK4\r\n \r\ndef Runge4_mag(V0,a,b,nb,k,P,W,m,g,x0):\r\n \r\n #Initialisation des paramètres\r\n h=(b-a)/nb\r\n t=a\r\n T=[a]\r\n Vx=[V0[0]]\r\n Vy=[V0[1]]\r\n Vz=[V0[2]]\r\n x=[x0[0]]\r\n y=[x0[0]]\r\n z=[x0[0]]\r\n \r\n #On va résoudre axe par axe, pour ensuite les réutiliser en incrémentant\r\n \r\n for i in range(1,nb):\r\n \r\n t=T[i-1]+h\r\n \r\n k1=F4x(T[i-1],Vx[i-1],Vy[i-1],Vz[i-1],k,P,W)\r\n k2=F4x(T[i-1]+(h/2),k1*(h/2)+Vx[i-1],Vy[i-1],Vz[i-1],k,P,W)\r\n k3=F4x(T[i-1]+(h/2),k2*(h/2)+Vx[i-1],Vy[i-1],Vz[i-1],k,P,W)\r\n k4=F4x(T[i-1]+h,Vx[i-1]+h*k3,Vy[i-1],Vz[i-1],k,P,W)\r\n Vx2=Vx[i-1]+(h/6)*(k1+2*k2+2*k3+k4)\r\n \r\n \r\n k1=F4y(T[i-1],Vx[i-1],Vy[i-1],Vz[i-1],k,P,W,m,g)\r\n k2=F4y(T[i-1]+(h/2),Vx[i-1],k1*(h/2)+Vy[i-1],Vz[i-1],k,P,W,m,g)\r\n k3=F4y(T[i-1]+(h/2),Vx[i-1],k2*(h/2)+Vy[i-1],Vz[i-1],k,P,W,m,g)\r\n k4=F4y(T[i-1]+h,Vx[i-1],Vy[i-1]+h*k3,Vz[i-1],k,P,W,m,g)\r\n Vy2=Vy[i-1]+(h/6)*(k1+2*k2+2*k3+k4)\r\n \r\n \r\n k1=F4z(T[i-1],Vx[i-1],Vy[i-1],Vz[i-1],k,P,W,m,g)\r\n k2=F4z(T[i-1]+(h/2),Vx[i-1],Vy[i-1],k1*(h/2)+Vz[i-1],k,P,W,m,g)\r\n k3=F4z(T[i-1]+(h/2),Vx[i-1],Vy[i-1],k2*(h/2)+Vz[i-1],k,P,W,m,g)\r\n k4=F4z(T[i-1]+h,Vx[i-1],Vy[i-1],Vz[i-1]+h*k3,k,P,W,m,g)\r\n Vz2=Vz[i-1]+(h/6)*(k1+2*k2+2*k3+k4)\r\n \r\n Vz.append(Vz2)\r\n Vx.append(Vx2)\r\n Vy.append(Vy2)\r\n T.append(t)\r\n \r\n #on détermine la position en réalisant à nouveau RK4\r\n \r\n k1=f4(T[i-1],Vx[i-1])\r\n k2=f4(T[i-1]+(h/2),k1*(h/2)+Vx[i-1])\r\n k3=f4(T[i-1]+(h/2),k2*(h/2)+Vx[i-1])\r\n k4=f4(T[i-1]+h,Vx[i-1]+h*k3)\r\n x2=x[i-1]+(h/6)*(k1+2*k2+2*k3+k4)\r\n x.append(x2)\r\n \r\n k1=f4(T[i-1],Vy[i-1])\r\n k2=f4(T[i-1]+(h/2),k1*(h/2)+Vy[i-1])\r\n k3=f4(T[i-1]+(h/2),k2*(h/2)+Vy[i-1])\r\n k4=f4(T[i-1]+h,Vy[i-1]+h*k3)\r\n y2=y[i-1]+(h/6)*(k1+2*k2+2*k3+k4)\r\n y.append(y2)\r\n \r\n k1=f4(T[i-1],Vz[i-1])\r\n k2=f4(T[i-1]+(h/2),k1*(h/2)+Vz[i-1])\r\n k3=f4(T[i-1]+(h/2),k2*(h/2)+Vz[i-1])\r\n k4=f4(T[i-1]+h,Vz[i-1]+h*k3)\r\n z2=z[i-1]+(h/6)*(k1+2*k2+2*k3+k4)\r\n z.append(z2)\r\n \r\n return T,Vx,Vy,Vz,x,y,z\r\n\r\n#Adams\r\n \r\n\r\ndef Adams4_mag(V0,a,b,nb,k,P,W,m,g,x0):\r\n \r\n #Initialisation des paramètres\r\n h=(b-a)/nb\r\n t=a\r\n T=[a,a+h]\r\n Vx=[V0[0]]\r\n Vy=[V0[1]]\r\n Vz=[V0[2]]\r\n x=[x0[0]]\r\n y=[x0[0]]\r\n z=[x0[0]]\r\n \r\n #On calcul le deuxième termes à l'aide de la méthode RK4 pour obtenir la meilleure précision et ainsi les comparer\r\n k1=F4x(T[0],Vx[0],Vy[0],Vz[0],k,P,W)\r\n k2=F4x(T[0]+(h/2),k1*(h/2)+Vx[0],Vy[0],Vz[0],k,P,W)\r\n k3=F4x(T[0]+(h/2),k2*(h/2)+Vx[0],Vy[0],Vz[0],k,P,W)\r\n k4=F4x(T[0]+h,Vx[0]+h*k3,Vy[0],Vz[0],k,P,W)\r\n Vx2=Vx[0]+(h/6)*(k1+2*k2+2*k3+k4) \r\n Vx.append(Vx2)\r\n \r\n k1=F4y(T[0],Vx[0],Vy[0],Vz[0],k,P,W,m,g)\r\n k2=F4y(T[0]+(h/2),Vx[0],k1*(h/2)+Vy[0],Vz[0],k,P,W,m,g)\r\n k3=F4y(T[0]+(h/2),Vx[0],k2*(h/2)+Vy[0],Vz[0],k,P,W,m,g)\r\n k4=F4y(T[0]+h,Vx[0],Vy[0]+h*k3,Vz[0],k,P,W,m,g)\r\n Vy2=Vy[0]+(h/6)*(k1+2*k2+2*k3+k4)\r\n Vy.append(Vy2)\r\n \r\n k1=F4z(T[0],Vx[0],Vy[0],Vz[0],k,P,W,m,g)\r\n k2=F4z(T[0]+(h/2),Vx[0],Vy[0],k1*(h/2)+Vz[0],k,P,W,m,g)\r\n k3=F4z(T[0]+(h/2),Vx[0],Vy[0],k2*(h/2)+Vz[0],k,P,W,m,g)\r\n k4=F4z(T[0]+h,Vx[0],Vy[0],Vz[0]+h*k3,k,P,W,m,g)\r\n Vz2=Vz[0]+(h/6)*(k1+2*k2+2*k3+k4)\r\n Vz.append(Vz2)\r\n \r\n #Même chose pour les positions\r\n\r\n \r\n #On va résoudre axe par axe, pour ensuite les réutiliser en incrémentant\r\n \r\n for i in range(2,nb):\r\n \r\n t=T[i-1]+h\r\n Vx3=Vx[i-1]+(3*h/2)*F4x(T[i-1],Vx[i-1],Vy[i-1],Vz[i-1],k,P,W)-(1/2)*h*F4x(T[i-2],Vx[i-2],Vy[i-2],Vz[i-2],k,P,W)\r\n Vy3=Vy[i-1]+(3*h/2)*F4y(T[i-1],Vx[i-1],Vy[i-1],Vz[i-1],k,P,W,m,g)-(1/2)*h*F4y(T[i-2],Vx[i-2],Vy[i-2],Vz[i-2],k,P,W,m,g)\r\n Vz3=Vz[i-1]+(3*h/2)*F4z(T[i-1],Vx[i-1],Vy[i-1],Vz[i-1],k,P,W,m,g)-(1/2)*h*F4z(T[i-2],Vx[i-2],Vy[i-2],Vz[i-2],k,P,W,m,g)\r\n \r\n Vz.append(Vz3)\r\n Vx.append(Vx3)\r\n Vy.append(Vy3)\r\n T.append(t)\r\n \r\n return T,Vx,Vy,Vz\r\n\r\n\r\n#Initialiation des paramètres\r\nm=0.430\r\nrho=1.2\r\nR=0.11\r\nalpha=(math.pi)/2\r\nCf=0.45\r\nP=1/(2*m)*math.pi*rho*R**3*math.sin(alpha)\r\nk=1/(2*m)*Cf*math.pi*R**2*rho\r\nVx0=0\r\nVy0=0\r\nVz0=0\r\nV0=[Vx0,Vy0,Vy0]\r\nWx=0\r\nWy=0\r\nWz=10\r\nW=[Wx,Wy,Wz]\r\nX0=[0,0,0]\r\n\r\na = 0.0\r\nb = 10\r\nniter = 10000\r\n\r\n#Comparaison des deux méthodes\r\n\r\nt,Vx,Vy,Vz,x,y,z = Runge4_mag(V0,a,b,niter,k,P,W,m,g,X0)\r\nplt.plot(t,Vx,'r', label=r\"Vx RK4\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\n\r\nt,Vx,Vy,Vz = Adams4_mag(V0,a,b,niter,k,P,W,m,g,X0)\r\nplt.plot(t,Vx,'b', label=r\"Vx Adams\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\nplt.title(\"Comparaison des méthodes - MAgnus\")\r\nplt.grid()\r\nplt.show()\r\n\r\n#On observe que les deux méthodes semblent converger de la même manière\r\n\r\n#On regarde la NOrme\r\n\r\nOM=[]\r\nfor j in range(0,len(x)):\r\n OM.append(math.sqrt(x[j]*x[j]+y[j]*y[j]+z[j]*z[j]))\r\n \r\nplt.plot(t,OM,'m', label=r\"Norme des vecteurs déplacements RK4\", linestyle='--')\r\nplt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.95),fancybox=True, shadow=True)\r\nplt.title(\"Déplacement en fonction du temps - MAgnus\")\r\nplt.grid()\r\n\r\nplt.show()\r\n","repo_name":"Yannis-98/Problem-solving-method","sub_path":"EDO and EDP/MONTREUIL_YANNIS_TS22.py","file_name":"MONTREUIL_YANNIS_TS22.py","file_ext":"py","file_size_in_byte":14343,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6598495039","text":"# created by Sonder on 2020.09.19\n\ndef insertion_sort_gap(li, gap):\n for i in range(gap, len(li)): # i: the index of the new number\n temp = li[i]\n j = i - gap\n while j >= 0 and li[j] > temp:\n li[j + gap] = li[j]\n j = j - gap\n li[j + gap] = temp\n return li\n\ndef shell_sort(li):\n d = len(li)\n while d >= 1:\n insertion_sort_gap(li, d)\n d = d // 2\n\nif __name__ == '__main__':\n lis = [3,2,4,1,5,7,9,6,8]\n shell_sort(lis)\n print(lis)\n","repo_name":"Sonder-y/python100","sub_path":"Algorithm/Sort/Shell_sort.py","file_name":"Shell_sort.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27073053440","text":"import time\nimport logging\nfrom sputnik import sputnik_init\n\ndebug_logging_config = {\n 'log_slow' : False,\n 'log_slow_time' : 500,\n 'log_function' : {\n 'all' : True,\n 'flowpath' : {\n 'all' : True,\n 'flowpath' : True,\n 'logic' : True,\n 'service' : True,\n 'db' : True,\n 'cache' : True\n },\n 'perf' : {\n 'all' : True,\n 'perf' : True,\n 'func' : True,\n 'service' : True,\n 'db' : True,\n 'cache' : True\n }\n }\n }\n\n\nsputnik_init(debug_logging_config)\n\nlogging.getLogger().setLevel(logging.DEBUG)\n\nfrom SpuCacheMonitor import SpuCacheMMConsumer, SpuCacheMonitor\n\ncm = SpuCacheMonitor('tcp://*:2222', SpuCacheMMConsumer('tcp://*:5555'))\ncm.start_monitor_thread()\n\ni = 0\nwhile True:\n logging.debug('main %s', i)\n i += 1\n time.sleep(1)\n","repo_name":"errord/sputnik","sub_path":"example/cache_monitor/cache_monitor_sub.py","file_name":"cache_monitor_sub.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38388186048","text":"def divisors(num):\n divisors = []\n for i in range(1, num + 1):\n if num % i == 0:\n divisors.append(i)\n return divisors\n\n\ndef run():\n try:\n num = int(input(\"Ingresa un número: \")) #user type a number\n assert num > 0, \"Debes ingresar un número positivo\"\n print(divisors(num))\n except AssertionError as AE:\n print(AE)\n run()\n\nif __name__ == '__main__':\n run()","repo_name":"MiguelR93/Python","sub_path":"divisors.py","file_name":"divisors.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19841854706","text":"import os\nimport json\nfrom Validation.evaluation import extract_routers_by_node\n\n\ndef evaluate_rounds(rounds, rounds_dir):\n for round_file in os.listdir(rounds_dir):\n if os.stat(rounds_dir + round_file).st_size == 0:\n continue\n split = round_file.split(\"_\")\n prefix = \"\"\n for i in range(0, len(split) - 1):\n prefix += split[i]\n prefix += \"_\"\n iteration = int(split[-1])\n if iteration not in rounds:\n rounds[iteration] = []\n\n i = 1\n next_iteration_file = prefix + str(iteration + i)\n is_no_alias = False\n while os.path.exists(rounds_dir + next_iteration_file):\n if os.stat(rounds_dir + next_iteration_file).st_size == 0:\n is_no_alias = True\n break\n i += 1\n next_iteration_file = prefix + str(iteration + i)\n if is_no_alias:\n continue\n predicted_aliases = 0\n with open(rounds_dir + round_file) as f:\n for line in f:\n predicted_aliases += 1\n if iteration > 3:\n iteration = 3\n # if iteration == 3:\n # continue\n rounds[iteration].append(predicted_aliases)\n\n\nif __name__ == \"__main__\":\n\n \"\"\"\n This script evaluates the number of rounds necessary on Limited Ltd. algorithm.\n \"\"\"\n\n rounds_dir_v4 = \"resources/results/survey/aliases/v4/ple41.planet-lab.eu/\"\n rounds_dir_v6 = \"resources/results/survey/aliases/v6/ple2.planet-lab.eu/\"\n\n rounds = {}\n evaluate_rounds(rounds, rounds_dir_v4)\n # evaluate_rounds(rounds, rounds_dir_v6)\n\n print(rounds)\n node = \"ple41.planet-lab.eu\"\n midar_routers = extract_routers_by_node(\n \"/home/kevin/mda-lite-v6-survey/resources/midar/batch2/routers/\"\n )[node]\n rounds[\"midar\"] = []\n for router_id, router in midar_routers.items():\n rounds[\"midar\"].append(len(router))\n with open(\n \"/home/kevin/icmp-rate-limiting-paper/resources/multiple_rounds.json\", \"w\"\n ) as f:\n json.dump(rounds, f)\n","repo_name":"dioptra-io/icmp-rate-limiting-classifier","sub_path":"Validation/multiple_rounds.py","file_name":"multiple_rounds.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5659937825","text":"# Importing necessary libraries\nimport zipfile\nimport re\n\n# Now the hint in the source code was zip.\n# So, when we change the url to channel.zip we get a\n# zip file downloaded which contains many txt files each\n# linking to another file. Now the target is to open the\n# first txt file and follow the numbers till we reach the end.\n# Read the readme.txt file for instructions.\n\n# This is the first file we start with (from readme.txt)\nnextNothing = \"90052\"\ncurStr = \"\"\n\n# Assigning the open zipfile to a variable\narchive = zipfile.ZipFile(\"external/channel.zip\", \"r\")\n\n# Collecting the comments in an array (Read further for clarification)\ncomments = []\n\n# Accessing every file till we get to the end\nwhile True:\n try:\n # Opening the txt file in the archive and decoding into a string\n curStr = archive.read(nextNothing + \".txt\").decode(\"utf-8\")\n print(curStr)\n\n # Getting the comment for every file and appending into a list.\n # The reason for getting the comment is because the last\n # file said to collect all the comments. Every zip file\n # can contain a comment for the file in archive. So we get\n # the comment and append to a list\n curComment = archive.getinfo(nextNothing + \".txt\").comment\n comments.append(curComment.decode(\"utf-8\"))\n # Getting the next file path\n nextNothing = curStr.split()[-1]\n except:\n # We break the file search when we reach the end which is\n # signified by the file not being found when we try to open it\n break\n\n# Since the obained comments look like a design, we append and print\nprint(\"\".join(comments))\n","repo_name":"Coolchirutha/python-challenge","sub_path":"6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34397595178","text":"# -*- coding: UTF-8 -*-\n\"\"\"Citrix Codec - citrix password encoding.\n\nThis codec:\n- en/decodes strings from str to str\n- en/decodes strings from bytes to bytes\n- decodes file content to str (read)\n- encodes file content from str to bytes (write)\n\nReference: https://crypto.interactive-maths.com/atbash-cipher.html\n\"\"\"\nfrom ..__common__ import *\n\n\n__examples__ = {\n 'enc(citrix-ctx0)': None,\n 'enc(citrix|citrix-1|citrix_ctx1)': {'this is a test': \"NBBMNAAGIDEPJJBMNIFNIMEMJKEL\"},\n}\n__guess__ = [\"citrix-ctx1\"]\n\n\n_dec = lambda g: ((ord(g[0]) - 0x41) & 0xf) ^ ((((ord(g[1]) - 0x41) & 0xf) << 4) & 0xf0)\n_enc = lambda o: chr(((o >> 4) & 0xf) + 0x41) + chr((o & 0xf) + 0x41)\n\n\ndef citrix_encode(t):\n def encode(text, errors=\"strict\"):\n l = len(text)\n r, x = \"\", 0\n for c in text:\n x = ord(c) ^ 0xa5 ^ x\n r += _enc(x)\n return r, l\n return encode\n\n\ndef citrix_decode(t):\n def decode(text, errors=\"strict\"):\n l = len(text)\n text = text[::-1]\n r = \"\"\n for i in range(0, l, 2):\n x = 0 if i + 2 >= l else _dec(text[i+2:i+4])\n x ^= _dec(text[i:i+2]) ^ 0xa5\n r += chr(x)\n return r[::-1], l\n return decode\n\n\nadd(\"citrix\", citrix_encode, citrix_decode, r\"citrix(|[-_]?(?:ctx)?1)$\", entropy=4., printables_rate=1.,\n expansion_factor=2.)\n\n","repo_name":"dhondta/python-codext","sub_path":"src/codext/crypto/citrix.py","file_name":"citrix.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":239,"dataset":"github-code","pt":"19"} +{"seq_id":"3165675750","text":"from django.db import connections\nfrom django.db.migrations.executor import MigrationExecutor\nfrom django.http import HttpResponse\n\n\ndef health_check(request, **kwargs):\n \"\"\"\n Returns 503 if any database has a migration that has not been executed,\n returns 200 on any other case.\n \"\"\"\n plans = [\n executor.migration_plan(executor.loader.graph.leaf_nodes())\n for executor in [\n MigrationExecutor(connections[connection_name])\n for connection_name in connections\n ]\n ]\n status = 503 if any(plans) else 200\n return HttpResponse(status=status)\n","repo_name":"Arcoirisky/Capstone_Backend","sub_path":"backend/common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33224271152","text":"from osv import fields,osv,orm\nfrom openerp import netsvc\nfrom openerp.addons.dm_base.utils import set_seq_o2m\nimport openerp.addons.decimal_precision as dp\n\nclass sale_order(osv.osv):\n _inherit=\"sale.order\"\n \n def _order_line_with_configs(self, cr, uid, ids, field_names, args, context=None):\n res = {}\n for order in self.browse(cr, uid, ids, context=context):\n design_line_ids = [line.id for line in order.order_line if line.mto_design_id]\n res[order.id] = design_line_ids\n return res\n \n _columns={\n #used by PDF \n 'order_line_with_config': fields.function(_order_line_with_configs, type='one2many', relation='sale.order.line', fields_id='order_id', string='Lines with Configuration')\n } \n \nclass sale_order_line(osv.osv):\n _inherit = 'sale.order.line'\n _columns = {\n 'mto_design_id': fields.many2one('mto.design', 'Configuration'), \n #config changing dummy field\n 'config_changed':fields.function(lambda *a,**k:{}, type='boolean',string=\"Config Changed\",), \n }\n \n def copy_data(self, cr, uid, id, default=None, context=None):\n if not default:\n default = {}\n default.update({\n 'mto_design_id': None,\n }) \n return super(sale_order_line, self).copy_data(cr, uid, id, default, context=context) \n \n def onchange_config(self, cr, uid, ids, config_id, context=None):\n val= {}\n if config_id:\n config = self.pool.get('mto.design').browse(cr, uid, config_id, context=context)\n val = {'product_id':config.product_id.id,\n 'price_unit':config.list_price,\n 'th_weight':config.weight,\n 'name':'%s(%s)'%(config.product_id.name, config.name),\n 'config_changed':True} \n return {'value':val}\n \n def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,\n uom=False, qty_uos=0, uos=False, name='', partner_id=False,\n lang=False, update_tax=True, date_order=False, packaging=False,\n fiscal_position=False, flag=False, context=None):\n\n res=super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty,\n uom, qty_uos, uos, name, partner_id,\n lang, update_tax, date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)\n \n if context.get('config_changed'):\n #if the product changing is triggered by the config changing, then do not change the price and weight\n fields_remove = ['price_unit', 'th_weight', 'name']\n for field in fields_remove:\n if res['value'].has_key(field):\n res['value'].pop(field)\n res['value']['config_changed'] = False\n \n return res\n \n def create(self, cr, uid, vals, context=None):\n new_id = super(sale_order_line, self).create(cr, uid, vals, context)\n #auto copy the common mto design to a new design\n line = self.browse(cr, uid, new_id, context=context)\n if line.mto_design_id and line.mto_design_id.type == 'common':\n config_obj = self.pool.get('mto.design')\n name = '%s-%s-%s'%(line.mto_design_id.name, line.order_id.name, line.sequence)\n config_new_id = config_obj.copy(cr, uid, line.mto_design_id.id, context=context)\n config_obj.write(cr, uid, config_new_id, {'name':name, 'type':'sale'}, context=context)\n self.write(cr, uid, line.id, {'mto_design_id': config_new_id}, context=context)\n return new_id\n \n def write(self, cr, uid, ids, vals, context=None):\n if isinstance(ids, (int, long)):\n ids = [ids]\n config_old_datas = self.read(cr, uid, ids, ['mto_design_id'], context=context) \n resu = super(sale_order_line, self).write(cr, uid, ids, vals, context=context)\n #deal the mto_design_id \n if 'mto_design_id' in vals:\n lines = self.browse(cr, uid, ids, context)\n config_olds = {}\n for config in config_old_datas:\n config_old_id = config['mto_design_id'] and config['mto_design_id'][0] or None\n config_olds[config['id']] = config_old_id\n config_obj = self.pool.get('mto.design')\n for line in lines:\n config_old_id = config_olds[line.id]\n #clear config, #assign new config, #change config\n if not line.mto_design_id or not config_old_id or line.mto_design_id.id != config_old_id:\n if config_old_id:\n #if old config is for sale, then delete it\n config_old_type = config_obj.read(cr, uid, config_old_id, ['type'], context=context) \n if config_old_type['type'] == 'sale':\n config_obj.unlink(cr, uid, config_old_id, context=context)\n #if new config is common, then do copy\n if line.mto_design_id and line.mto_design_id.type == 'common': \n context['default_type'] = 'sale'\n config_new_id = config_obj.copy(cr, uid, line.mto_design_id.id, context=context)\n name = '%s-%s-%s'%(line.mto_design_id.name, line.order_id.name, line.sequence)\n config_obj.write(cr, uid, config_new_id, {'name':name, 'type':'sale'}, context=context) \n self.write(cr, uid, line.id, {'mto_design_id': config_new_id})\n \n return resu\n \n def edit_config(self, cr, uid, ids, context=None):\n if not context.get('config_id'):\n return False\n return self.pool.get('mto.design').open_designment(cr, uid, context.get('config_id'), context=context) \n \nclass mto_design(osv.osv):\n _inherit = \"mto.design\"\n def _so_line_id(self, cr, uid, ids, field_names, args, context=None):\n res = dict.fromkeys(ids,None)\n for id in ids:\n so_ids = self.pool.get('sale.order.line').search(cr, uid, [('mto_design_id','=',id)])\n if so_ids:\n res[id] = so_ids[0]\n return res\n _columns = {'so_line_id': fields.function(_so_line_id, string='SO Line', type='many2one', relation='sale.order.line', store=True)}\n ","repo_name":"newmooncn/dm","sub_path":"dmp_sale_mto/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":6492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73623324204","text":"from django.contrib import admin\nfrom django.contrib.admin.views.main import ChangeList\n\nfrom teams.admin import ResultsAdminMixin\n\nfrom .models import *\nfrom teams.models import Team\n\n\n@admin.register(Robotics1, Robotics2, Robotics3)\nclass RoboticScoreAdmin(admin.ModelAdmin, ResultsAdminMixin):\n list_display = ['verbose_team_name', 'get_points', 'get_detail']\n readonly_fields = ['time', 'get_points']\n ordering = ['team']\n\n class CustomChangeList(ChangeList):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.title = \"Sélectionnez le score à modifier\"\n\n def get_changelist(self, request, **kwargs):\n return self.CustomChangeList\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n \"\"\" Removes teams who have already participated to this round from the selector field. \"\"\"\n if db_field.name == 'team':\n related_name = self.model.__name__.lower()\n kwargs['queryset'] = Team.objects.filter(**{'%s__isnull' % related_name: True})\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n def get_fields(self, request, obj=None):\n \"\"\" Removes the team selector field when updating and existing score. \"\"\"\n fields = super().get_fields(request, obj)\n return fields if not obj else [f for f in fields if f != 'team']\n","repo_name":"pobot/pjc-mc-django","sub_path":"apps/match/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42115619439","text":"\"\"\"Contains the TextUI class.\"\"\"\nfrom collections import defaultdict\n\nfrom datetime import datetime\nfrom psutil import cpu_percent, virtual_memory, Process\n\nimport mpf._version\nfrom mpf.core.delays import DelayManager\nfrom mpf.core.mpf_controller import MpfController\n\ntry:\n from asciimatics.scene import Scene\n from asciimatics.widgets import Frame, Layout, Label, Divider, PopUpDialog, Widget\n from asciimatics.widgets.utilities import THEMES\n from asciimatics.screen import Screen\nexcept ImportError:\n Scene = None\n Frame = None\n Layout = object\n THEMES = None\n Label = None\n Divider = None\n PopUpDialog = None\n Widget = None\n Screen = None\n\nMYPY = False\nif MYPY: # pragma: no cover\n from typing import List, Tuple, Dict # pylint: disable-msg=cyclic-import,unused-import\n from mpf.core.machine import MachineController # pylint: disable-msg=cyclic-import,unused-import,ungrouped-imports\n from mpf.devices.ball_device.ball_device \\\n import BallDevice # pylint: disable-msg=cyclic-import,unused-import,ungrouped-imports\n from mpf.devices.switch import Switch # pylint: disable-msg=cyclic-import,unused-import,ungrouped-imports\n\n\nclass MpfLayout(Layout):\n\n \"\"\"Add clear function.\"\"\"\n\n def __init__(self, columns, fill_frame=False):\n \"\"\"Store max_height.\"\"\"\n self._columns = []\n super().__init__(columns, fill_frame)\n self.max_height = None\n\n def clear_columns(self):\n \"\"\"Clear all columns.\"\"\"\n self._columns = [[] for _ in self._columns]\n\n def set_max_height(self, max_height):\n \"\"\"Set max height.\"\"\"\n self.max_height = max_height\n\n def fix(self, start_x, start_y, max_width, max_height):\n \"\"\"Limit height.\"\"\"\n if self.max_height:\n return min(super().fix(start_x, start_y, max_width, max_height), self.max_height)\n\n return super().fix(start_x, start_y, max_width, max_height)\n\n\n# pylint: disable-msg=too-many-instance-attributes\nclass TextUi(MpfController):\n\n \"\"\"Handles the text-based UI.\"\"\"\n\n config_name = \"text_ui\"\n\n __slots__ = [\"start_time\", \"machine\", \"_tick_task\", \"screen\", \"mpf_process\", \"ball_devices\", \"switches\",\n \"config\", \"_pending_bcp_connection\", \"_asset_percent\", \"_player_widgets\", \"_machine_widgets\",\n \"_bcp_status\", \"frame\", \"layout\", \"scene\", \"footer_memory\", \"switch_widgets\", \"mode_widgets\",\n \"ball_device_widgets\", \"footer_cpu\", \"footer_mc_cpu\", \"footer_uptime\", \"delay\", \"_layout_change\"]\n\n def __init__(self, machine: \"MachineController\") -> None:\n \"\"\"Initialize TextUi.\"\"\"\n super().__init__(machine)\n self.delay = DelayManager(machine)\n self.config = machine.config.get('text_ui', {})\n\n self.screen = None\n\n if not machine.options['text_ui'] or not Scene:\n self.log.debug(f\"Text UI is disabled. TUI option setting: {machine.options['text_ui']}, Asciimatics loaded: {Scene}\")\n return\n\n # hack to add themes until https://github.com/peterbrittain/asciimatics/issues/207 is implemented\n THEMES[\"mpf_theme\"] = defaultdict(\n lambda: (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLACK),\n {\n \"active_switch\": (Screen.COLOUR_BLACK, Screen.A_NORMAL, Screen.COLOUR_GREEN),\n \"pf_active\": (Screen.COLOUR_GREEN, Screen.A_NORMAL, Screen.COLOUR_BLACK),\n \"pf_inactive\": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLACK),\n \"label\": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLACK),\n \"title\": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_RED),\n \"title_exit\": (Screen.COLOUR_BLACK, Screen.A_NORMAL, Screen.COLOUR_RED),\n \"footer_cpu\": (Screen.COLOUR_CYAN, Screen.A_NORMAL, Screen.COLOUR_BLACK),\n \"footer_path\": (Screen.COLOUR_YELLOW, Screen.A_NORMAL, Screen.COLOUR_BLACK),\n \"footer_memory\": (Screen.COLOUR_GREEN, Screen.A_NORMAL, Screen.COLOUR_BLACK),\n \"footer_mc_cpu\": (Screen.COLOUR_MAGENTA, Screen.A_NORMAL, Screen.COLOUR_BLACK),\n })\n\n self.start_time = datetime.now()\n self.machine = machine\n\n self.mpf_process = Process()\n self.ball_devices = list() # type: List[BallDevice]\n\n self.switches = {} # type: Dict[str, Switch]\n\n self.machine.events.add_handler('init_phase_2', self._init)\n # self.machine.events.add_handler('init_phase_3', self._init2)\n self.machine.events.add_handler('loading_assets',\n self._asset_load_change)\n self.machine.events.add_handler('bcp_connection_attempt',\n self._bcp_connection_attempt)\n self.machine.events.add_handler('asset_loading_complete',\n self._asset_load_complete)\n self.machine.events.add_handler('bcp_clients_connected',\n self._bcp_connected)\n self.machine.events.add_handler('shutdown', self.stop)\n self.machine.add_crash_handler(self.stop)\n self.machine.events.add_handler('player_number', self._update_player)\n self.machine.events.add_handler('player_ball', self._update_player)\n self.machine.events.add_handler('player_score', self._update_player)\n self.machine.events.add_handler('ball_ended',\n self._update_player)\n\n self._pending_bcp_connection = False\n self._asset_percent = 0\n self._bcp_status = (0, 0, 0) # type: Tuple[float, int, int]\n self.switch_widgets = [] # type: List[Widget]\n self.mode_widgets = [] # type: List[Widget]\n self.ball_device_widgets = [] # type: List[Widget]\n self._machine_widgets = [] # type: List[Widget]\n self._player_widgets = [] # type: List[Widget]\n self.footer_memory = None\n self.footer_cpu = None\n self.footer_mc_cpu = None\n self.footer_uptime = None\n self._layout_change = True\n\n self._tick_task = self.machine.clock.schedule_interval(self._tick, 1)\n self._create_window()\n self._draw_screen()\n\n def _init(self, **kwargs):\n del kwargs\n for mode in self.machine.modes.values():\n self.machine.events.add_handler(\"mode_{}_started\".format(mode.name), self._mode_change)\n self.machine.events.add_handler(\"mode_{}_stopped\".format(mode.name), self._mode_change)\n\n self.machine.switch_controller.add_monitor(self._update_switches)\n self.machine.register_monitor(\"machine_vars\", self._update_machine_vars)\n self.machine.variables.machine_var_monitor = True\n self.machine.bcp.interface.register_command_callback(\n \"status_report\", self._bcp_status_report)\n\n for bd in [x for x in self.machine.ball_devices.values() if not x.is_playfield()]:\n self.ball_devices.append(bd)\n\n self.ball_devices.sort()\n\n self._update_switch_layout()\n self._schedule_draw_screen()\n\n async def _bcp_status_report(self, client, cpu, rss, vms):\n del client\n self._bcp_status = cpu, rss, vms\n\n def _update_stats(self):\n # Runtime\n rt = (datetime.now() - self.start_time)\n mins, sec = divmod(rt.seconds + rt.days * 86400, 60)\n hours, mins = divmod(mins, 60)\n self.footer_uptime.text = 'RUNNING {:d}:{:02d}:{:02d}'.format(hours, mins, sec)\n\n # System Stats\n self.footer_memory.text = 'Free Memory (MB): {} CPU:{:3d}%'.format(\n round(virtual_memory().available / 1048576),\n round(cpu_percent(interval=None, percpu=False)))\n\n # MPF process stats\n self.footer_cpu.text = 'MPF (CPU RSS/VMS): {}% {}/{} MB '.format(\n round(self.mpf_process.cpu_percent()),\n round(self.mpf_process.memory_info().rss / 1048576),\n round(self.mpf_process.memory_info().vms / 1048576))\n\n # MC process stats\n if self._bcp_status != (0, 0, 0):\n self.footer_mc_cpu.text = 'MC (CPU RSS/VMS) {}% {}/{} MB '.format(\n round(self._bcp_status[0]),\n round(self._bcp_status[1] / 1048576),\n round(self._bcp_status[2] / 1048576))\n else:\n self.footer_mc_cpu.text = \"\"\n\n def _update_switch_layout(self):\n num = 0\n self.switch_widgets = []\n self.switches = {}\n self.switch_widgets.append((Label(\"SWITCHES\"), 1))\n self.switch_widgets.append((Divider(), 1))\n self.switch_widgets.append((Label(\"\"), 2))\n self.switch_widgets.append((Divider(), 2))\n\n for sw in sorted(self.machine.switches.values()):\n if sw.invert:\n name = sw.name + '*'\n else:\n name = sw.name\n\n col = 1 if num <= int(len(self.machine.switches) / 2) else 2\n\n switch_widget = Label(name)\n if sw.state:\n switch_widget.custom_colour = \"active_switch\"\n\n self.switch_widgets.append((switch_widget, col))\n self.switches[sw.name] = (sw, switch_widget)\n\n num += 1\n\n self._schedule_draw_screen()\n\n def _update_switches(self, change, *args, **kwargs):\n del args\n del kwargs\n try:\n sw, switch_widget = self.switches[change.name]\n except KeyError:\n return\n if sw.state:\n switch_widget.custom_colour = \"active_switch\"\n else:\n switch_widget.custom_colour = \"label\"\n\n self._schedule_draw_screen()\n\n def _draw_switches(self):\n \"\"\"Draw all switches.\"\"\"\n for widget, column in self.switch_widgets:\n self.layout.add_widget(widget, column)\n\n def _mode_change(self, *args, **kwargs):\n # Have to call this on the next frame since the mode controller's\n # active list isn't updated yet\n del args\n del kwargs\n self.mode_widgets = []\n self.mode_widgets.append(Label(\"ACTIVE MODES\"))\n self.mode_widgets.append(Divider())\n try:\n modes = self.machine.mode_controller.active_modes\n except AttributeError:\n modes = None\n\n if modes:\n for mode in modes:\n self.mode_widgets.append(Label('{} ({})'.format(mode.name, mode.priority)))\n else:\n self.mode_widgets.append(Label(\"No active modes\"))\n\n # empty line at the end\n self.mode_widgets.append(Label(\"\"))\n\n self._layout_change = True\n self._schedule_draw_screen()\n\n def _draw_modes(self):\n for widget in self.mode_widgets:\n self.layout.add_widget(widget, 0)\n\n def _draw_ball_devices(self):\n for widget in self.ball_device_widgets:\n self.layout.add_widget(widget, 3)\n\n def _update_ball_devices(self, **kwargs):\n del kwargs\n # TODO: do not create widgets. just update contents\n self.ball_device_widgets = []\n self.ball_device_widgets.append(Label(\"BALL COUNTS\"))\n self.ball_device_widgets.append(Divider())\n\n try:\n for pf in self.machine.playfields.values():\n widget = Label('{}: {} '.format(pf.name, pf.balls))\n if pf.balls:\n widget.custom_colour = \"pf_active\"\n else:\n widget.custom_colour = \"pf_inactive\"\n self.ball_device_widgets.append(widget)\n\n except AttributeError:\n pass\n\n for bd in self.ball_devices:\n widget = Label('{}: {} ({})'.format(bd.name, bd.balls, bd.state))\n if bd.balls:\n widget.custom_colour = \"pf_active\"\n else:\n widget.custom_colour = \"pf_inactive\"\n\n self.ball_device_widgets.append(widget)\n\n self.ball_device_widgets.append(Label(\"\"))\n\n self._layout_change = True\n self._schedule_draw_screen()\n\n def _update_player(self, **kwargs):\n del kwargs\n self._player_widgets = []\n self._player_widgets.append(Label(\"CURRENT PLAYER\"))\n self._player_widgets.append(Divider())\n\n try:\n player = self.machine.game.player\n self._player_widgets.append(Label('PLAYER: {}'.format(player.number)))\n self._player_widgets.append(Label('BALL: {}'.format(player.ball)))\n self._player_widgets.append(Label('SCORE: {:,}'.format(player.score)))\n except AttributeError:\n self._player_widgets.append(Label(\"NO GAME IN PROGRESS\"))\n return\n\n player_vars = player.vars.copy()\n player_vars.pop('score', None)\n player_vars.pop('number', None)\n player_vars.pop('ball', None)\n\n names = self.config.get('player_vars', player_vars.keys())\n for name in names:\n try:\n self.machine.events.replace_handler('player_' + name, self._update_player)\n except ValueError:\n pass\n self._player_widgets.append(Label(\"{}: {}\".format(name, player_vars[name])))\n\n self._layout_change = True\n self._schedule_draw_screen()\n\n def _draw_player(self, **kwargs):\n del kwargs\n for widget in self._player_widgets:\n self.layout.add_widget(widget, 3)\n\n def _update_machine_vars(self, **kwargs):\n \"\"\"Update machine vars.\"\"\"\n del kwargs\n self._machine_widgets = []\n self._machine_widgets.append(Label(\"MACHINE VARIABLES\"))\n self._machine_widgets.append(Divider())\n machine_vars = self.machine.variables.machine_vars\n # If config defines explict vars to show, only show those. Otherwise, all\n names = self.config.get('machine_vars', machine_vars.keys())\n for name in names:\n self._machine_widgets.append(Label(\"{}: {}\".format(name, machine_vars[name]['value'])))\n self._layout_change = True\n self._schedule_draw_screen()\n\n def _draw_machine_variables(self):\n \"\"\"Draw machine vars.\"\"\"\n for widget in self._machine_widgets:\n self.layout.add_widget(widget, 0)\n\n def _create_window(self):\n self.screen = Screen.open()\n self.frame = Frame(self.screen, self.screen.height, self.screen.width, has_border=False, title=\"Test\")\n self.frame.set_theme(\"mpf_theme\")\n\n title_layout = Layout([1, 5, 1])\n self.frame.add_layout(title_layout)\n\n title_left = Label(\"\")\n title_left.custom_colour = \"title\"\n title_layout.add_widget(title_left, 0)\n\n title = 'Mission Pinball Framework v{}'.format(mpf._version.__version__) # noqa\n title_text = Label(title, align=\"^\")\n title_text.custom_colour = \"title\"\n title_layout.add_widget(title_text, 1)\n\n exit_label = Label(\"< CTRL + C > TO EXIT\", align=\">\")\n exit_label.custom_colour = \"title_exit\"\n\n title_layout.add_widget(exit_label, 2)\n\n self.layout = MpfLayout([1, 1, 1, 1], fill_frame=True)\n self.frame.add_layout(self.layout)\n\n footer_layout = Layout([1, 1, 1])\n self.frame.add_layout(footer_layout)\n self.footer_memory = Label(\"\", align=\">\")\n self.footer_memory.custom_colour = \"footer_memory\"\n self.footer_uptime = Label(\"\", align=\">\")\n self.footer_uptime.custom_colour = \"footer_memory\"\n self.footer_mc_cpu = Label(\"\")\n self.footer_mc_cpu.custom_colour = \"footer_mc_cpu\"\n self.footer_cpu = Label(\"\")\n self.footer_cpu.custom_colour = \"footer_cpu\"\n footer_path = Label(self.machine.machine_path)\n footer_path.custom_colour = \"footer_path\"\n footer_empty = Label(\"\")\n footer_empty.custom_colour = \"footer_memory\"\n\n footer_layout.add_widget(footer_path, 0)\n footer_layout.add_widget(self.footer_cpu, 0)\n footer_layout.add_widget(footer_empty, 1)\n footer_layout.add_widget(self.footer_mc_cpu, 1)\n footer_layout.add_widget(self.footer_uptime, 2)\n footer_layout.add_widget(self.footer_memory, 2)\n\n self.scene = Scene([self.frame], -1)\n self.screen.set_scenes([self.scene], start_scene=self.scene)\n\n # prevent main from scrolling out the footer\n self.layout.set_max_height(self.screen.height - 2)\n\n def _schedule_draw_screen(self):\n # schedule the draw in 10ms if it is not scheduled\n self.delay.add_if_doesnt_exist(10, self._draw_screen, \"draw_screen\")\n\n def _draw_screen(self):\n if not self.screen:\n # probably drawing during game end\n return\n\n if self._layout_change:\n self.layout.clear_columns()\n self._draw_modes()\n self._draw_machine_variables()\n self._draw_switches()\n self._draw_ball_devices()\n self._draw_player()\n self.frame.fix()\n self._layout_change = False\n\n self.screen.force_update()\n self.screen.draw_next_frame()\n\n def _tick(self):\n if self.screen.has_resized():\n self._create_window()\n\n self._update_ball_devices()\n self._update_stats()\n\n self._schedule_draw_screen()\n\n self.machine.bcp.transport.send_to_clients_with_handler(handler=\"_status_request\",\n bcp_command=\"status_request\")\n\n def _bcp_connection_attempt(self, name, host, port, **kwargs):\n del name\n del kwargs\n self._pending_bcp_connection = PopUpDialog(self.screen,\n 'WAITING FOR MEDIA CONTROLLER {}:{}'.format(host, port), [])\n self.scene.add_effect(self._pending_bcp_connection)\n self._schedule_draw_screen()\n\n def _bcp_connected(self, **kwargs):\n del kwargs\n self.scene.remove_effect(self._pending_bcp_connection)\n self._create_window() # The MC will write any SDL or other messages on top of the TUI, so recreate it to get rid of that stuff\n self._schedule_draw_screen()\n\n def _asset_load_change(self, percent, **kwargs):\n del kwargs\n if self._asset_percent:\n self.scene.remove_effect(self._asset_percent)\n self._asset_percent = PopUpDialog(self.screen, 'LOADING ASSETS: {}%'.format(percent), [])\n self.scene.add_effect(self._asset_percent)\n self._schedule_draw_screen()\n\n def _asset_load_complete(self, **kwargs):\n del kwargs\n self.scene.remove_effect(self._asset_percent)\n self._schedule_draw_screen()\n\n def stop(self, **kwargs):\n \"\"\"Stop the Text UI and restore the original console screen.\"\"\"\n del kwargs\n if self.screen:\n self.machine.clock.unschedule(self._tick_task)\n self.screen.close(True)\n self.screen = None\n","repo_name":"missionpinball/mpf","sub_path":"mpf/core/text_ui.py","file_name":"text_ui.py","file_ext":"py","file_size_in_byte":18906,"program_lang":"python","lang":"en","doc_type":"code","stars":192,"dataset":"github-code","pt":"19"} +{"seq_id":"35598642691","text":"from rest_framework.decorators import api_view\nfrom Driver.models import Driver \nfrom Rider.models import Rider\nfrom Request.models import Request\nfrom rest_framework.response import Response\nfrom .serializers import DriverSerializer, RiderSerializer, RequetSerealizer, RequetSerializerAll, FinishRequetSerealizer\nfrom Driver.views import post_locations_driver, post_locations_rider\nfrom rest_framework import status\nfrom general.calculate_distance import get_distance\nfrom .locations import get_location\nfrom .calculate_cost import get_cost\n\n\n\n\n\n#MODEL DRIVER\n#get all driver\n@api_view(['GET','POST'])\ndef get_driver_all(request):\n\n if request.method == 'GET':\n driver = Driver.objects.all()#.filter(driver_approved=True)\n driver_serializer = DriverSerializer(driver,many=True)\n return Response(driver_serializer.data, status = status.HTTP_200_OK)\n \n elif request.method == 'POST':\n driver_serializer = DriverSerializer(data = request.data)\n if driver_serializer.is_valid():\n driver_serializer.save()\n data = {\n 'id': driver_serializer.data['id'],\n 'city': driver_serializer.data['city'],\n 'reference_point': driver_serializer.data['reference_point']\n }\n post_locations_driver(data)\n return Response(driver_serializer.data, status = status.HTTP_201_CREATED)\n return Response(driver_serializer.errors)\n \n\n#get driver approved\n@api_view(['GET'])\ndef get_driver_approved(request):\n\n if request.method == 'GET':\n driver = Driver.objects.all().filter(driver_approved=True)\n driver_serializer = DriverSerializer(driver,many=True)\n #post_locations(driver)\n return Response(driver_serializer.data, status = status.HTTP_200_OK)\n\n#get driver active\n@api_view(['GET'])\ndef get_driver_active(request):\n if request.method == 'GET':\n driver = Driver.objects.all().filter(driver_active=True)\n driver_serializer = DriverSerializer(driver,many=True)\n return Response(driver_serializer.data, status = status.HTTP_200_OK)\n \n \n#get driver for identification_card\n@api_view(['GET','PUT','DELETE'])\ndef get_driver(request,identification_card):\n driver = Driver.objects.filter(identification_card=identification_card).first()\n\n if driver:\n if request.method == 'GET':\n driver_serealizer = DriverSerializer(driver)\n return Response(driver_serealizer.data, status = status.HTTP_200_OK)\n \n elif request.method == 'PUT':\n driver_serealizer = DriverSerializer(driver,data = request.data)\n if driver_serealizer.is_valid():\n driver_serealizer.save()\n return Response(driver_serealizer.data, status = status.HTTP_200_OK)\n return Response(driver_serealizer.errors, status = status.HTTP_400_BAD_REQUEST)\n \n elif request.method == 'DELETE':\n driver.delete()\n return Response({'message':'user delete succes!'},status = status.HTTP_200_OK)\n\n return Response({'message':'user does not exist'},status = status.HTTP_400_BAD_REQUEST) \n\n\n#MODEL RIDER\n#get all rider\n@api_view(['GET','POST'])\ndef get_rider_all(request):\n\n if request.method == 'GET':\n rider = Rider.objects.all()\n rider_serializer = RiderSerializer(rider,many=True)\n return Response(rider_serializer.data, status = status.HTTP_200_OK)\n \n elif request.method == 'POST':\n rider_serializer = RiderSerializer(data = request.data)\n if rider_serializer.is_valid():\n rider_serializer.save()\n data = {\n 'id': rider_serializer.data['id'],\n 'city': rider_serializer.data['city'],\n 'reference_point': rider_serializer.data['reference_point']\n }\n post_locations_rider(data)\n return Response(rider_serializer.data, status = status.HTTP_201_CREATED)\n return Response(rider_serializer.errors) \n\n\n\n#MODEL RIDER\n#get all rider\n@api_view(['GET','POST'])\ndef get_rider_all(request):\n\n if request.method == 'GET':\n rider = Rider.objects.all()\n rider_serializer = RiderSerializer(rider,many=True)\n return Response(rider_serializer.data, status = status.HTTP_200_OK)\n \n elif request.method == 'POST':\n rider_serializer = RiderSerializer(data = request.data)\n if rider_serializer.is_valid():\n rider_serializer.save()\n data = {\n 'id': rider_serializer.data['id'],\n 'city': rider_serializer.data['city'],\n 'reference_point': rider_serializer.data['reference_point']\n }\n post_locations_rider(data)\n return Response(rider_serializer.data, status = status.HTTP_201_CREATED)\n return Response(rider_serializer.errors) \n\n\n\n#MODEL REQUEST\n#request\ncont = 1000000000\n@api_view(['GET','POST'])\ndef request(request):\n if request.method == 'GET':\n resquest = Request.objects.all()\n resquest_serializer = RequetSerializerAll(resquest,many=True)\n return Response(resquest_serializer.data, status = status.HTTP_200_OK)\n elif request.method == 'POST':\n test_data = {\n 'id_rider': request.data['id_rider'],\n 'requet_active': '1',\n 'ponit_start': request.data['ponit_start'],\n 'ponit_finish': request.data['ponit_finish'],\n 'city': request.data['city'],\n }\n test_request = RequetSerealizer(data = test_data)\n if test_request.is_valid():\n test_request.save()\n #get distance betwen points\n data_ini = get_location(request.data['ponit_start'],request.data['city'])\n data_fin = get_location(request.data['ponit_finish'],request.data['city'])\n latitude_ini = data_ini['latitude']\n longitude_ini = data_ini['longitude']\n latitude_fin = data_fin['latitude']\n longitude_fin = data_fin['longitude']\n distance = get_distance(latitude_ini,longitude_ini,latitude_fin,longitude_fin)\n\n #Update distance en bd\n Request.objects.filter(id_rider=request.data['id_rider']).filter(ponit_start=request.data['ponit_start']).filter(ponit_finish=request.data['ponit_finish']).update(distance_travel=distance)\n\n #searching driver\n drivers = Driver.objects.all().filter(driver_active=True).filter(city=request.data['city'])\n for driver in drivers:\n latitude_rider = data_ini['latitude']\n longitude_rider = data_ini['longitude']\n data_driver = get_location(driver.reference_point,driver.city)\n latitude_driver = data_driver['latitude']\n longitude_driver = data_driver['longitude']\n distance = get_distance(latitude_rider,longitude_rider,latitude_driver,longitude_driver)\n if distance < cont:\n #print('entre',driver.id)\n driver_select = driver.id\n Request.objects.filter(id_rider=request.data['id_rider']).filter(ponit_start=request.data['ponit_start']).filter(ponit_finish=request.data['ponit_finish']).update(id_driver=driver_select)\n return Response(test_request.data) \n return Response(test_request.errors) \n\n@api_view(['GET'])\ndef request_active(request):\n if request.method == 'GET':\n resquest = Request.objects.all().filter(requet_active=True)\n resquest_serializer = RequetSerializerAll(resquest,many=True)\n return Response(resquest_serializer.data, status = status.HTTP_200_OK)\n\n\n@api_view(['GET','PUT'])\ndef finish_request(request, id):\n if request.method == 'GET':\n resquest = Request.objects.all().filter(id=id)\n resquest_serializer = RequetSerializerAll(resquest,many=True)\n return Response(resquest_serializer.data, status = status.HTTP_200_OK)\n\n elif request.method == 'PUT':\n resquest = Request.objects.all().filter(id=id).first()\n request_serealizer = RequetSerializerAll(resquest,data = request.data)\n if request_serealizer.is_valid():\n request_serealizer.save()\n cost = get_cost(request_serealizer['time_travel'].value,request_serealizer['distance_travel'].value)\n #Update cost en bd\n Request.objects.filter(id=id).update(cost=round(cost))\n return Response(request_serealizer.data, status = status.HTTP_200_OK)\n return Response(request_serealizer.errors, status = status.HTTP_400_BAD_REQUEST)\n\n\n","repo_name":"diegocordoba96/Backed-Tras","sub_path":"general/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":8599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"75031259883","text":"# Hang Man Game \n\n# Step 1: Select a random word from the list provided\n# Step 2: Display game interface\n# Step 3: Ask user for input\n# Step 4: Match input with letters in the selected word\n # Step 4.1: If correct, fill in the blank\n # Step 4.2: If incorrect, fill the hang man\n# Step 5: Ask user for the next input, repeat Step 4\n# Step 6: Check if hang man or the selected word has been completed\n# Step 7: Ask user if they want to restart the game\n \nfrom ascii_art import hang_man_stages, logo\nfrom my_list import word_list\nimport os\nimport random\n\nend_game = False\nwhile not end_game:\n selected_word = random.choice(word_list)\n already_guessed = []\n guess_blank = ['_'] * len(selected_word)\n guess_fill = guess_blank[:]\n guess_blank_str = ' '.join(guess_blank)\n \n print(logo)\n print(f\"There are {len(selected_word)} letters in the secret word.\")\n print(guess_blank_str)\n print(hang_man_stages[0])\n limit = 6\n count = 0\n\n while not end_game: \n user_guess = input('Enter a guess: ').upper()\n os.system('cls') # clearing the screen\n if len(user_guess) == 1:\n if user_guess.isalpha() == True:\n\n if user_guess in already_guessed:\n print(f'You have already guessed letter {user_guess}')\n print(' '.join(guess_fill))\n print(hang_man_stages[count])\n continue\n else:\n already_guessed += user_guess\n\n if user_guess in selected_word:\n num = selected_word.count(user_guess)\n # if there are more than 1 letter in the selected word\n for i in range(1, len(selected_word) + 1):\n # return indices of duplicate element\n if selected_word[i - 1] == user_guess:\n guess_fill[i - 1] = user_guess\n print(\"There are {number} letter {guessed_letter} in the secret word.\".format(number = num, guessed_letter = user_guess))\n print(\" \".join(guess_fill))\n print(hang_man_stages[count])\n if '_' not in guess_fill:\n print('You Win! Congratulation')\n\n else: \n count += 1 \n print(\"Wrong guess. \" + str(limit - count) + \" guesses remaining\")\n print(\" \".join(guess_fill))\n print(hang_man_stages[count])\n if count < 6:\n continue\n else:\n print('You Lose!')\n\n if count == 6 or '_' not in guess_fill:\n print(f\"The secret word is {selected_word}\")\n option = ['Y','y','N','n']\n restart = input(\"Do you want to play again? 'Y' or 'N' \")\n while restart not in option:\n restart = input(\"Do you want to play again? 'Y' or 'N' \")\n if restart == option[0] or restart == option[1]:\n os.system('cls')\n break # break the inner loop \n else:\n print('See you later! ')\n end_game = True\n\n else: # Input is not a letter\n print('Single letter input only, please try again!')\n print(\" \".join(guess_fill))\n print(hang_man_stages[count])\n continue\n\n else: # Input length is longer than 1 character\n print('Single letter input only, please try again!')\n print(\" \".join(guess_fill))\n print(hang_man_stages[count])\n continue","repo_name":"terry109011/My-Python-Projects","sub_path":"List Practice (P21 - P26)/Project 26 - Hang Man/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42115350439","text":"\"\"\"Command dispatcher of the Mission Pinball Framework.\"\"\"\nimport sys\nimport mpf.commands\n\n\ndef main(args=None):\n \"\"\"Dispatche commands to our handlers.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n mpf.commands.run_from_command_line(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"missionpinball/mpf","sub_path":"mpf/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":192,"dataset":"github-code","pt":"19"} +{"seq_id":"35736053103","text":"import scrapy\nfrom scrapy import Selector\n\nfrom Bingo.items import MoveItem\n\n\nclass DoubanSpider(scrapy.Spider):\n name = 'douban'\n allowed_domains = ['movie.douban.com']\n start_urls = ['http://movie.douban.com/top250']\n\n def parse(self, response, *args, **kwargs):\n se = Selector(response)\n list_items = se.css('#content > div > div.article > ol > li')\n for item in list_items:\n move_item = MoveItem()\n move_item['title'] = item.css('span.title::text').extract_first()\n move_item['subject'] = item.css('span.inq::text').extract_first()\n move_item['score'] = item.css('span.rating_num::text').extract_first()\n yield move_item\n","repo_name":"xulei131401/python-guide","sub_path":"crawl/Bingo/Bingo/spiders/douban.py","file_name":"douban.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"7602696251","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('waste_notifier', '0004_auto_20170412_1449'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='subscriber',\n name='created_at',\n field=models.DateTimeField(blank=True, verbose_name='Time of initial subscription', null=True),\n ),\n migrations.AlterField(\n model_name='subscriber',\n name='service_type',\n field=models.CharField(max_length=32, default='all', verbose_name='Service', help_text='(comma-delimited combination of any of the following: all, bulk, hazardous, recycling, recycle here, street sweeper, trash, transfer station, yard waste)'),\n ),\n ]\n","repo_name":"vernitgarg22/django_apps","sub_path":"waste_notifier/migrations/0005_auto_20170503_1632.py","file_name":"0005_auto_20170503_1632.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36675468423","text":"from typing import List, Optional\n\nfrom sirius_sdk.agent.wallet.abstract.anoncreds import AbstractAnonCreds, AnonCredSchema\nfrom sirius_sdk.agent.connections import AgentRPC\n\n\nclass AnonCredsProxy(AbstractAnonCreds):\n\n def __init__(self, rpc: AgentRPC):\n self.__rpc = rpc\n\n async def issuer_create_schema(\n self, issuer_did: str, name: str, version: str, attrs: List[str]\n ) -> (str, AnonCredSchema):\n schema_id, body = await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/issuer_create_schema',\n params=dict(issuer_did=issuer_did, name=name, version=version, attrs=attrs)\n )\n return schema_id, AnonCredSchema(**body)\n\n async def issuer_create_and_store_credential_def(\n self, issuer_did: str, schema: dict, tag: str, signature_type: str = None, config: dict = None\n ) -> (str, dict):\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/issuer_create_and_store_credential_def',\n params=dict(issuer_did=issuer_did, schema=schema, tag=tag, signature_type=signature_type, config=config)\n )\n\n async def issuer_rotate_credential_def_start(self, cred_def_id: str, config: dict = None) -> dict:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/issuer_rotate_credential_def_start',\n params=dict(cred_def_id=cred_def_id, config=config)\n )\n\n async def issuer_rotate_credential_def_apply(self, cred_def_id: str):\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/issuer_rotate_credential_def_apply',\n params=dict(cred_def_id=cred_def_id)\n )\n\n async def issuer_create_and_store_revoc_reg(\n self, issuer_did: str, revoc_def_type: Optional[str], tag: str,\n cred_def_id: str, config: dict, tails_writer_handle: int\n ) -> (str, dict, dict):\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/issuer_create_and_store_revoc_reg',\n params=dict(\n issuer_did=issuer_did, revoc_def_type=revoc_def_type, tag=tag,\n cred_def_id=cred_def_id, config=config, tails_writer_handle=tails_writer_handle\n )\n )\n\n async def issuer_create_credential_offer(self, cred_def_id: str) -> dict:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/issuer_create_credential_offer',\n params=dict(cred_def_id=cred_def_id)\n )\n\n async def issuer_create_credential(\n self, cred_offer: dict, cred_req: dict, cred_values: dict,\n rev_reg_id: str = None, blob_storage_reader_handle: int = None\n ) -> (dict, Optional[str], Optional[dict]):\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/issuer_create_credential',\n params=dict(\n cred_offer=cred_offer, cred_req=cred_req, cred_values=cred_values,\n rev_reg_id=rev_reg_id, blob_storage_reader_handle=blob_storage_reader_handle\n )\n )\n\n async def issuer_revoke_credential(\n self, blob_storage_reader_handle: int, rev_reg_id: str, cred_revoc_id: str\n ) -> dict:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/issuer_revoke_credential',\n params=dict(\n blob_storage_reader_handle=blob_storage_reader_handle,\n rev_reg_id=rev_reg_id, cred_revoc_id=cred_revoc_id\n )\n )\n\n async def issuer_merge_revocation_registry_deltas(self, rev_reg_delta: dict, other_rev_reg_delta: dict) -> dict:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/issuer_merge_revocation_registry_deltas',\n params=dict(rev_reg_delta=rev_reg_delta, other_rev_reg_delta=other_rev_reg_delta)\n )\n\n async def prover_create_master_secret(self, master_secret_name: str = None) -> str:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/prover_create_master_secret',\n params=dict(master_secret_name=master_secret_name)\n )\n\n async def prover_create_credential_req(\n self, prover_did: str, cred_offer: dict, cred_def: dict, master_secret_id: str\n ) -> (dict, dict):\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/prover_create_credential_req',\n params=dict(\n prover_did=prover_did, cred_offer=cred_offer, cred_def=cred_def, master_secret_id=master_secret_id\n )\n )\n\n async def prover_set_credential_attr_tag_policy(\n self, cred_def_id: str, tag_attrs: Optional[dict], retroactive: bool\n ) -> None:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/prover_set_credential_attr_tag_policy',\n params=dict(cred_def_id=cred_def_id, tag_attrs=tag_attrs, retroactive=retroactive)\n )\n\n async def prover_get_credential_attr_tag_policy(self, cred_def_id: str) -> dict:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/prover_get_credential_attr_tag_policy',\n params=dict(cred_def_id=cred_def_id)\n )\n\n async def prover_store_credential(\n self, cred_id: Optional[str], cred_req_metadata: dict, cred: dict, cred_def: dict, rev_reg_def: dict = None\n ) -> str:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/prover_store_credential',\n params=dict(\n cred_id=cred_id, cred_req_metadata=cred_req_metadata,\n cred=cred, cred_def=cred_def, rev_reg_def=rev_reg_def\n )\n )\n\n async def prover_get_credential(self, cred_id: str) -> dict:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/prover_get_credential',\n params=dict(cred_id=cred_id)\n )\n\n async def prover_delete_credential(self, cred_id: str) -> None:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/prover_delete_credential',\n params=dict(cred_id=cred_id)\n )\n\n async def prover_get_credentials(self, filters: dict) -> List[dict]:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/prover_get_credentials',\n params=dict(filters=filters)\n )\n\n async def prover_search_credentials(self, query: dict) -> List[dict]:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/prover_search_credentials',\n params=dict(query=query)\n )\n\n async def prover_get_credentials_for_proof_req(self, proof_request: dict) -> dict:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/prover_get_credentials_for_proof_req',\n params=dict(proof_request=proof_request)\n )\n\n async def prover_search_credentials_for_proof_req(\n self, proof_request: dict, extra_query: dict = None, limit_referents: int = 1\n ) -> dict:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/prover_search_credentials_for_proof_req',\n params=dict(\n proof_request=proof_request, extra_query=extra_query, limit_referents=limit_referents\n )\n )\n\n async def prover_create_proof(\n self, proof_req: dict, requested_credentials: dict, master_secret_name: str,\n schemas: dict, credential_defs: dict, rev_states: dict\n ) -> dict:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/prover_create_proof',\n params=dict(\n proof_req=proof_req, requested_credentials=requested_credentials,\n master_secret_name=master_secret_name, schemas=schemas,\n credential_defs=credential_defs, rev_states=rev_states\n )\n )\n\n async def verifier_verify_proof(\n self, proof_request: dict, proof: dict, schemas: dict,\n credential_defs: dict, rev_reg_defs: dict, rev_regs: dict\n ) -> bool:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/verifier_verify_proof',\n params=dict(\n proof_request=proof_request, proof=proof, schemas=schemas,\n credential_defs=credential_defs, rev_reg_defs=rev_reg_defs, rev_regs=rev_regs\n )\n )\n\n async def create_revocation_state(\n self, blob_storage_reader_handle: int, rev_reg_def: dict,\n rev_reg_delta: dict, timestamp: int, cred_rev_id: str\n ) -> dict:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/create_revocation_state',\n params=dict(\n blob_storage_reader_handle=blob_storage_reader_handle, rev_reg_def=rev_reg_def,\n rev_reg_delta=rev_reg_delta, timestamp=timestamp, cred_rev_id=cred_rev_id\n )\n )\n\n async def update_revocation_state(\n self, blob_storage_reader_handle: int, rev_state: dict, rev_reg_def: dict,\n rev_reg_delta: dict, timestamp: int, cred_rev_id: str\n ) -> dict:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/update_revocation_state',\n params=dict(\n blob_storage_reader_handle=blob_storage_reader_handle, rev_state=rev_state,\n rev_reg_def=rev_reg_def, rev_reg_delta=rev_reg_delta, timestamp=timestamp, cred_rev_id=cred_rev_id\n )\n )\n\n async def generate_nonce(self) -> str:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/generate_nonce'\n )\n\n async def to_unqualified(self, entity: str) -> str:\n return await self.__rpc.remote_call(\n msg_type='did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/sirius_rpc/1.0/to_unqualified',\n params=dict(entity=entity)\n )\n","repo_name":"Sirius-social/sirius-sdk-python","sub_path":"sirius_sdk/agent/wallet/impl/anoncreds.py","file_name":"anoncreds.py","file_ext":"py","file_size_in_byte":10794,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"} +{"seq_id":"41322252441","text":"from django.urls import path\n\nfrom Edu_Mark.views import enter_mark_page, final_report, view_student_report, edit_mark_page, \\\n enter_mark_choose_course_page, enter_mark_choose_student_page, edit_mark_choose_course_page, \\\n edit_mark_choose_student_page\n\nurlpatterns = [\n path('add-mark-cc', enter_mark_choose_course_page, name='enter-mark-choose-course'),\n path('add-mark-cs/', enter_mark_choose_student_page, name='enter-mark-choose-student'),\n path('add-mark//', enter_mark_page, name='enter-mark'),\n path('final-report/', final_report, name='final-report'),\n path('view-student-report', view_student_report, name='view_student_report'),\n path('edit-mark-cc', edit_mark_choose_course_page, name='edit-mark-choose-course'),\n path('edit-mark-cs/', edit_mark_choose_student_page, name='edit-mark-choose-student'),\n path('edit-mark//', edit_mark_page, name='edit-mark'),\n\n]","repo_name":"nwasya/EducationalApp","sub_path":"Edu_Mark/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"6522475558","text":"import torch\nimport torchvision\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nfrom VAE_dataset import train_dataset , normalize\nfrom VAE_model import Encoder , Decoder\nfrom VAE_loss import Reconstruction_Loss , VAE_ELBO_Loss\n\nfrom time import perf_counter\n\nclass VAE_trainer():\n\n def __init__(self,args):\n\n self.lr = args.lr\n self.batch_size = args.batch_size\n self.epochs = args.epochs\n self.optim = args.optimizer\n self.Lambda = args.Lambda\n self.normalize = args.normalize\n self.resume = args.resume\n self.use_log = args.log\n\n self.start_epoch = 0\n\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n \n print(f\"[!] torch version: {torch.__version__}\")\n print(f\"[!] computation device: {self.device}\")\n\n\n def load_data(self):\n\n print(\"[!] Data Loading...\")\n\n\n self.train_dataset = train_dataset()\n self.data_statistics = self.train_dataset.get_statistics()\n\n\n transforms_list = [torchvision.transforms.Resize(64)]\n if self.normalize: \n transforms_list += [normalize(self.data_statistics[0] , self.data_statistics[1])]\n\n self.train_dataset.set_transforms(\n torchvision.transforms.Compose(transforms_list)\n )\n \n\n self.train_loader = DataLoader(dataset = self.train_dataset,\n batch_size = self.batch_size,\n shuffle = True,\n num_workers = 1)\n\n print(\"[!] Data Loading Done.\")\n\n\n def setup(self):\n\n print(\"[!] Setup...\")\n\n self.data_for_log = torch.randn(64 , 128).to(self.device)\n self.log_writer = SummaryWriter('logs') if self.use_log else None\n \n # define our model, loss function, and optimizer\n self.Encoder = Encoder().to(self.device)\n self.Decoder = Decoder().to(self.device)\n\n parameter_list = list(self.Encoder.parameters()) + list(self.Decoder.parameters())\n\n if self.optim.lower() == \"adam\":\n self.optimizer = torch.optim.Adam(parameter_list, lr=self.lr)\n else:\n self.optimizer = torch.optim.RMSprop(parameter_list, lr = self.lr)\n\n\n self.reconstruction_loss = Reconstruction_Loss().to(self.device)\n self.elbo_loss = VAE_ELBO_Loss(self.Lambda).to(self.device)\n\n\n # load checkpoint file to resume training\n if self.resume:\n print(f\"[!] Resume training from the file : {self.resume}\")\n checkpoint = torch.load(self.resume)\n self.Encoder.load_state_dict(checkpoint['model_state'][0])\n self.Decoder.load_state_dict(checkpoint[\"model_state\"][1])\n try:\n self.start_epoch = checkpoint['epoch']\n except:\n pass\n\n print(\"[!] Setup Done.\")\n\n\n def train(self):\n\n print(\"[!] Model training...\")\n avg_time = 0\n n_total_steps = len(self.train_loader)\n\n for epoch in range(self.epochs):\n\n st = perf_counter()\n total_loss = 0\n running_loss = 0\n\n self.Encoder.train()\n self.Decoder.train()\n for i , data in enumerate(self.train_loader):\n\n # access data and noise\n real_images = data.to(self.device)\n noise = torch.randn(real_images.shape[0] , 128).to(self.device)\n\n # feedforward\n mean , log_var_square = self.Encoder(real_images)\n # reparameterization trick\n latent_code = torch.multiply(torch.sqrt(torch.exp(log_var_square)) , noise) + mean\n reconstruction = self.Decoder(latent_code)\n\n # compute loss\n reconstruct_loss = self.reconstruction_loss(real_images , reconstruction)\n elbo_loss = self.elbo_loss(mean , log_var_square)\n loss = reconstruct_loss + elbo_loss\n\n # updates\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n total_loss += loss.item() / n_total_steps\n running_loss += loss.item() / n_total_steps\n\n # tensorboard: track training process\n if (i+1) % 50 == 0:\n with torch.no_grad():\n self.Decoder.eval()\n\n image_log = self.Decoder(self.data_for_log)\n## image_log = (image_log * self.data_statistics[1] + self.data_statistics[0]) if self.normalize else image_log\n image_grid = torchvision.utils.make_grid(image_log , nrow = 8 , normalize = True) # if normalize = True, then we don't need to manually normalize the image to value 0~1\n # otherwise, we need to manually normalize it by using image_log = image_log * 0.5 + 0.5\n\n print(f\"[!] Epoch : [{epoch+1}], step : [{i+1} / {n_total_steps}], Running Loss: {running_loss:.6f}\")\n running_loss = 0\n self.log_writer.add_image(\"VAE Generated Image\" , image_grid , epoch * n_total_steps + i + 1)\n self.Decoder.train()\n\n print(\"-------------------------------------------\")\n avg_time = avg_time + (perf_counter() - st - avg_time) / (epoch+1)\n print(f\"[!] Epoch : [{epoch+1}/{self.epochs}] done. Average Training Time: {avg_time:.3f}, \"\n f\"Loss: {total_loss:.6f}\\n\") \n if self.use_log:\n self.log_writer.add_scalar('training loss', total_loss, epoch)\n\n if self.use_log:\n self.log_writer.close()\n\n print(\"[!] Training Done.\\n\")\n\n \n def save(self):\n\n print(\"[!] Model saving...\")\n checkpoint = {\"model_state\": [self.Encoder.state_dict() , self.Decoder.state_dict()]}\n torch.save(checkpoint , \"checkpoint.pth\")\n print(\"[!] Saving Done.\")\n","repo_name":"ninechi143/Pytorch-VAE","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34534648161","text":"from flask import Flask, request\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/sum2/', methods=['POST'])\r\ndef hello_world():\r\n data = request.json['data']\r\n total = 0\r\n for i in range(len(data)):\r\n total += data[i]\r\n return {'sum': total}\r\n\r\n\r\napp.run(host='0.0.0.0', port=5000)","repo_name":"baranka29/-","sub_path":"pythonProject/22/1.1.py","file_name":"1.1.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26393796150","text":"from PIL import Image, ImageDraw, ImageFont\n\n\nclass TextImage:\n def __init__(\n self, SCRIPT_PATH, timetable_text, rel_font_path, background_image_path\n ):\n self.SCRIPT_PATH = SCRIPT_PATH\n self.timetable_text = timetable_text\n self.font_path = f\"{SCRIPT_PATH}/{rel_font_path}\"\n\n self.background_image = Image.open(background_image_path)\n\n def crop_image(self, img, new_w, new_h):\n w, h = img.size\n\n return img.crop(\n ((w - new_w) / 2, (h - new_h) / 2, (w + new_w) / 2, (h + new_h) / 2)\n )\n\n def make_timetable_image(\n self,\n color=(255, 255, 255),\n text_hight=15,\n ):\n\n lines_count = len(self.timetable_text.split(\"\\n\"))\n image_size = (400, 30 + 18 * lines_count)\n\n self.background_image = (\n self.crop_image(self.background_image, *image_size)\n ).point(lambda pixel: pixel * 0.5)\n\n drawer = ImageDraw.Draw(self.background_image)\n drawer.multiline_text(\n (15, 15),\n self.timetable_text,\n fill=color,\n font=ImageFont.truetype(self.font_path, text_hight),\n )\n\n return self.background_image\n","repo_name":"alex7186/timetable_bot","sub_path":"back/image_manager.py","file_name":"image_manager.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"39127583576","text":"import tkinter as tk\nfrom functions import cancel_report_preparing, clearout_prepared_report, get_report_status, downloading\nfrom data import sites_dict\nfrom user_input_functions import get_sites, get_target, get_dates\n\nclass SiteNumberWindow():\n def __init__(self):\n self.text = tk.Text(height=40, width=25, wrap= tk.NONE)\n self.text.grid(column=1, row=5)\n self.text.insert(tk.END, '\\n'.join(sites_dict.keys()))\n \n self.label = tk.Label(text='Номера счетчиков сайтов', \n width=30, \n justify=tk.LEFT)\n self.label.grid(column=0, row=4)\n\nclass SiteNamesWindow():\n def __init__(self):\n self.text = tk.Text(height=40, width=25,wrap=tk.NONE)\n self.text.insert(tk.END, '\\n'.join(sites_dict.values()))\n \n self.label = tk.Label(text='Названия сайтов', \n width=30, \n justify=tk.LEFT)\n \n self.text.grid(column=0, row=5)\n self.label.grid(column=1, row=4)\n\nclass OutputWindow():\n def __init__(self):\n self.label = tk.Label(text='Окно вывода', \n width=30, \n justify=tk.LEFT)\n self.text = tk.Text(height=40, width=30, wrap=tk.WORD)\n self.scrollbar = tk.Scrollbar(orient=tk.VERTICAL, command=self.text.yview, width=50)\n self.scrollbar.place(x=705, y=200, relheight=0.65)\n self.text['yscrollcommand'] = self.scrollbar.set\n \n self.label.grid(column=2, row=4)\n self.text.grid(column=2, row=5)\n \n def get_text(self):\n return self.text\n\nclass Button():\n def __init__(self, text, command, column_grid, row_grid):\n self.button = tk.Button(text=text, command=command)\n self.button.grid(column=column_grid, row=row_grid)\n \nclass Checkbox():\n def __init__(self, text, variable, command, onvalue, offvalue, column_grid, row_grid):\n self.checkbox = tk.Checkbutton(text=text, \n variable=variable, \n command=command, \n onvalue=onvalue, \n offvalue=offvalue)\n self.checkbox.grid(column=column_grid, row=row_grid)\n\nclass InputWindow():\n def __init__(self, text, label_column_grid, label_row_grid, window_column_grid, window_row_grid):\n self.label = tk.Label(text=text, \n width=30, \n justify=tk.LEFT, \n wraplength=200)\n self.label.grid(column=label_column_grid, row=label_row_grid)\n \n self.window = tk.Entry(width=30)\n self.window.grid(column=window_column_grid, row=window_row_grid)\n \n def get_value(self):\n return self.window.get()\n \nclass MainInterface(tk.Tk):\n \n def __init__(self, *args, **kwargs): \n tk.Tk.__init__(self, *args, **kwargs)\n self.geometry(\"1000x600\") \n self.title('Yandex-Metrika-LogsAPI')\n self.sitenumberwindow = SiteNumberWindow()\n self.sitenameswindow = SiteNamesWindow()\n self.outputwindow = OutputWindow()\n \n AUTO_MODE = tk.StringVar()\n self.checkbox = Checkbox(text='Выгрузить все сайты?', \n variable=AUTO_MODE, \n command=lambda:checkbox_changed(AUTO_MODE), \n onvalue='да', \n offvalue='нет',\n column_grid=1, \n row_grid=2)\n \n self.label_help = tk.Label(text='Если не копируется название/номер, переключите раскладку на клавиатуре', \n wraplength=200, \n justify=tk.LEFT)\n self.label_help.grid(column=0, row=2)\n \n self.site_inputwindow = InputWindow(text='Введите название сайта для получения статуса, либо введите название(я) сайта(ов) через запятую для выгрузки:',\n label_column_grid=2,\n label_row_grid=0,\n window_column_grid=2,\n window_row_grid=1)\n \n self.date1_inputwindow = InputWindow(text='Введите первую дату интервала формата YYYY-MM-DD для выгрузки:',\n label_column_grid=3,\n label_row_grid=0,\n window_column_grid=3,\n window_row_grid=1)\n \n self.date2_inputwindow = InputWindow(text='Введите вторую дату интервала формата YYYY-MM-DD для выгрузки:',\n label_column_grid=3,\n label_row_grid=2,\n window_column_grid=3,\n window_row_grid=3)\n \n self.target_inputwindow = InputWindow(text='Введите цель выгрузки (Просмотры/Визиты):',\n label_column_grid=2,\n label_row_grid=2,\n window_column_grid=2,\n window_row_grid=3)\n \n self.cancel_report_button = Button(text='Очистить неподготовленный репорт', \n command=lambda:cancel_report_preparing(self.site_inputwindow.get_value(), self.outputwindow.get_text()), \n column_grid=0,\n row_grid=0)\n \n self.clearout_prepared_report_button = Button(text='Очистить подготовленный репорт', \n command=lambda:clearout_prepared_report(self.site_inputwindow.get_value(), self.outputwindow.get_text()),\n column_grid=0,\n row_grid=1)\n \n self.get_status_button = Button(text='Получить статусы выполнения', \n command=lambda:get_report_status(self.site_inputwindow.get_value(), self.outputwindow.get_text()), \n column_grid=1,\n row_grid=0)\n \n self.download_button = Button(text='Выгрузить', \n command=lambda:downloading(list(sites_dict.keys()) if AUTO_MODE.get() == 'да' else get_sites(self.site_inputwindow.get_value(), self.outputwindow.get_text(), self),\n get_target(self.target_inputwindow.get_value(), self.outputwindow.get_text(), self), \n get_dates(self.date1_inputwindow.get_value(), self.date2_inputwindow.get_value(), self.outputwindow.get_text(), self)),\n column_grid=1,\n row_grid=1)\n self.mainloop() \n \ndef checkbox_changed(toggle):\n return toggle.get()\n \ndef main():\n app = MainInterface()\n \nif __name__ == '__main__':\n main()\n","repo_name":"wtrmlln/Yandex-Metrika-LogsAPI","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7851,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29103669963","text":"import logging\nfrom pathlib import Path\nfrom datetime import datetime\nimport time\nimport pandas as pd\n\nclass Runner:\n \"\"\"For data collectionn\"\"\" #pylint: disable=too-many-instance-attributes\n def __init__(self, project_name='', output_type='csv', **kwargs):\n self.project_name = project_name\n self.outdir = f'../../output/{project_name}'\n self.inputdir = f'../../input/{project_name}'\n self.timestamp = datetime.now().strftime('%Y%m%d')\n self.prefix = f\"{project_name}_{self.timestamp}\"\n self.output_subdir = project_name\n self.checkpoint = None\n self.__output_type = output_type\n self.headers = ['PDF', 'Company', 'Owner Name', 'Owner Email', 'Order Type', 'Line Item Number',\n 'SKU Name', 'SKU Sub Name', 'SKU Price', 'SKU Quantity', 'SKU Discount', 'SKU Total',\n 'SKU Term in Months', 'Order Subtotal', 'Order Tax', 'Order Total', 'Currency', 'Renewal',\n 'Payment Terms', 'Special Terms', 'Start Date', 'Duration', 'End Date', 'Buyer Address',\n 'Buyer Name', 'Buyer Email', 'Seller Name', 'Seller Email', 'sku_format_index', 'date_format_accuracy']\n\n\n def run(self, **kwargs):\n \"\"\" Run the process \"\"\"\n start_time = time.time()\n\n logging.info(\"Making Directory if directory empty\")\n\n Path(f'{self.outdir}').mkdir(parents=True, exist_ok=True)\n\n\n logging.info(\"Get raw data\")\n raw = self.get_raw(**kwargs)\n\n logging.info(\"Save raw data\")\n self.save_raw(raw,**kwargs)\n\n logging.info(\"Normalize data\")\n data = self.normalize(raw, **kwargs)\n\n logging.info(\"Save normalized data to output\")\n self.save_output(data, **kwargs)\n\n logging.info(\"Clean up\")\n self.cleanup()\n\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return data\n\n def get_raw(self, **kwargs):\n \"\"\"Get raw data from the source\"\"\"\n raise ImplementationException('get_raw')\n\n def normalize(self,raw,**kwargs):\n \"\"\"Normalize raw and return final result\"\"\"\n logging.info('Implement normalize() in your sub-class if needed, otherwise return raw: raw=%s, kwargs=%s',type(raw),kwargs)\n data_frame = pd.DataFrame(raw, columns=self.out.header(), )\n data_frame.replace(to_replace=[r\"\\\\t|\\\\n|\\\\r\", \"\\t|\\n|\\r\"],\n value=[\"\", \"\"], regex=True, inplace=True)\n return data_frame\n\n def save_raw(self,raw,**kwargs):\n \"\"\"Save raw data to file\"\"\"\n logging.info('Implement save_raw() yourself if needed: raw=%s, kwargs=%s', type(raw),kwargs)\n\n def save_output(self, data, **kwargs):\n \"\"\"Save final data to output file\"\"\"\n if self.__output_type == 'csv':\n func = self.save_output_csv\n elif self.__output_type == 'json':\n func = self.save_output_json\n elif self.__output_type == 'excel':\n func = self.save_output_excel\n else:\n raise UnsupportedOutputTypeException(self.__output_type)\n return func(data,**kwargs)\n\n def save_output_csv(self, data, index=False, **kwargs):\n \"\"\"Save data into csv file\n :param data: pandas DataFrame\n \"\"\"\n file = self.get_output_file('csv')\n logging.info(\"Save final output: file=%s, index=%s, kwargs=%s\",file,index,kwargs)\n data.to_csv(file, index=index, encoding='utf-8', **kwargs)\n return file\n\n def save_output_json(self, data, index=False, **kwargs):\n \"\"\"Save data into csv file\n :param data: pandas DataFrame\n \"\"\"\n file = self.get_output_file('json')\n logging.info(\"Save final output: file=%s, index=%s, kwargs=%s\",file,index,kwargs)\n data.to_json(file, orient=\"split\")\n return file\n\n def save_output_excel(self, data, index=False, **kwargs):\n \"\"\" Save final data to output file\n :param data: pandas DataFrame\n \"\"\"\n file = self.get_output_file('xlsx')\n logging.info(\"Save final output: file=%s, index=%s, kwargs=%s\", file,index,kwargs)\n data.to_excel(file,index=index, encoding='utf-8', **kwargs)\n return file\n\n def get_output_file(self,suffix):\n \"\"\"Return output file path\"\"\"\n _path = self.outdir\n _path = f'{_path}/{self.project_name}_{datetime.now().strftime(\"%Y%m%d\")}.{suffix}'\n return _path\n\n def cleanup(self):\n \"\"\"Clean up\"\"\"\n if self.checkpoint:\n self.checkpoint.clean()\n\n\n\nclass UnsupportedOutputTypeException(Exception):\n '''Exception raised for unsupported file type'''\n def __init__(self,output_type):\n self.output_type = output_type\n super().__init__(f'Supported output types are [csv,excel]: output_type={self.output_type}'\n 'Implement save_output() in your sub-class')\n\nclass ImplementationException(Exception):\n \"\"\"Exception raised for implementing a method\"\"\"\n def __init__(self,method):\n self.method = method\n super().__init__(f'Please implement {self.method}() in your sub-class')\n","repo_name":"jaspernf/langchain_test","sub_path":"pdfscraper/wrapper/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14636876765","text":"from django import forms\nfrom django.test import Client, TestCase\nfrom django.urls import reverse\n\nfrom posts.models import Post, Group, User\n\n\nclass PostPagesTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username=\"NoName\")\n cls.group = Group.objects.create(\n title=\"Титул\",\n slug=\"slug\",\n description=\"описание\",\n )\n\n cls.post = Post.objects.create(\n author=cls.user,\n text=\"Текст\",\n group=cls.group,\n )\n\n cls.templates_pages_names = {\n reverse(\"posts:index\"): \"posts/index.html\",\n reverse(\"posts:group_progect\", kwargs={\"slug\": cls.group.slug}):\n \"posts/group_progect.html\",# не работает\n reverse(\n \"posts:profile\", kwargs={\"username\": cls.post.author}\n ): \"posts/profile.html\",\n reverse(\n \"posts:post_detail\", kwargs={\"post_id\": cls.post.id}\n ): \"posts/post_detail.html\",\n reverse(# не работает\n \"posts:post_edit\", kwargs={\"post_id\": cls.post.id}\n ): \"posts/create_post.html\",\n reverse(\"posts:post_create\"): \"posts/create_post.html\",# не работает\n }\n\n def setUp(self):\n self.guest_client = Client()\n self.authorized_client = Client()\n self.authorized_client.force_login(PostPagesTests.user)\n\n def test_adres(self):\n \"\"\"Адрес использует соответствующий шаб��он\"\"\"\n for template, reverse_name in self.templates_pages_names.items():\n with self.subTest(reverse_name=reverse_name):\n response = self.authorized_client.get(template)\n self.assertTemplateUsed(response, reverse_name)\n\n def test_index(self):\n \"\"\"index соответствует ожидаемому контексту\"\"\"\n expected = list(Post.objects.all()[:10])\n response = self.guest_client.get(reverse(\"posts:index\"))\n self.assertEqual(list(response.context[\"page_obj\"]), expected)\n\n def test_group(self):\n \"\"\"group_progect соответствует ожидаемому контексту\"\"\"\n expected = list(Post.objects.filter(group_id=self.group.id)[:10])\n response = self.guest_client.get(\n reverse(\"posts:group_progect\", kwargs={\"slug\": self.group.slug})\n )\n self.assertEqual(list(response.context[\"page_obj\"]), expected)\n\n def test_profile(self):\n \"\"\"profile соответствует ожидаемому контексту\"\"\"\n response = self.guest_client.get(\n reverse(\"posts:profile\", args=(self.post.author,))\n )\n expected = list(Post.objects.filter(author_id=self.user.id)[:10])\n self.assertEqual(list(response.context[\"page_obj\"]), expected)\n\n def test_post_detail(self):\n \"\"\"post_detail соответствует ожидаемому контексту\"\"\"\n response = self.guest_client.get(\n reverse(\"posts:post_detail\", kwargs={\"post_id\": self.post.id})\n )\n self.assertEqual(response.context.get(\"post\").group, self.post.group)\n self.assertEqual(response.context.get(\"post\").author, self.post.author)\n self.assertEqual(response.context.get(\"post\").text, self.post.text)\n\n def test_post_edit(self):\n \"\"\"post_edit соответствует ожидаемому контексту\"\"\"\n response = self.authorized_client.get(\n reverse(\"posts:post_edit\", kwargs={\"post_id\": self.post.id})\n )\n form_fields = {\n \"text\": forms.fields.CharField,\n \"group\": forms.models.ModelChoiceField,\n }\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context[\"form\"].fields[value]\n self.assertIsInstance(form_field, expected)\n\n def test_create(self):\n \"\"\"create соответствует ожидаемому контексту\"\"\"\n response = self.authorized_client.get(reverse(\"posts:post_create\"))\n form_fields = {\n \"text\": forms.fields.CharField,\n \"group\": forms.models.ModelChoiceField,\n }\n\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context[\"form\"].fields[value]\n self.assertIsInstance(form_field, expected)\n\n def test_group_test(self):\n \"\"\"Проверяем создание поста на страницах с выбранной группой\"\"\"\n form_fields = {\n reverse(\"posts:index\"): Post.objects.get(group=self.post.group),\n reverse(\n \"posts:profile\", kwargs={\"username\": self.post.author}\n ): Post.objects.get(group=self.post.group),\n reverse(\n \"posts:group_progect\", kwargs={\"slug\": self.group.slug}\n ): Post.objects.get(group=self.post.group),\n }\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n response = self.authorized_client.get(value)\n form_field = response.context[\"page_obj\"]\n self.assertIn(expected, form_field)\n\n def test_group_not_in_post(self):\n \"\"\"Проверка пост не попал в группу, для которой не был предназначен\"\"\"\n form_fields = {\n reverse(\n \"posts:group_progect\", kwargs={\"slug\": self.group.slug}\n ): Post.objects.exclude(group=self.post.group),\n }\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n response = self.authorized_client.get(value)\n form_field = response.context[\"page_obj\"]\n self.assertNotIn(expected, form_field)\n","repo_name":"nanotest676/5-sprint","sub_path":"yatube/posts/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":6066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39793753999","text":"import os\nimport shutil\nfrom typing import Optional\n\nimport SimpleITK as sitk\nimport numpy as np\nfrom skimage import measure, morphology\n\nfrom pipeline.constants import DirectoryStructure as ds, FileNames as fn\nfrom pipeline.logger import log\nfrom pipeline.util import sitk_largest_connected_components, sitk_bbox, sitk_extract_slice, convert_mm_to_voxels, \\\n largest_connected_components\n\n\ndef create_body_mask(n_otsu: int = 4, processed: bool = True) -> None:\n dir = ds.nifti if processed else ds.tmp_unprocessed\n vol_ip = sitk.ReadImage(dir.path(fn.ip.value))\n vol_mask_initial = sitk.ReadImage(dir.path(fn.mask.value))\n\n log.info('Calculating fat and water percentages')\n vol_ip = sitk.Mask(vol_ip, vol_mask_initial)\n vol_ip_otsu = sitk.OtsuMultipleThresholds(sitk.CurvatureFlow(vol_ip, numberOfIterations=8),\n numberOfThresholds=n_otsu, valleyEmphasis=True)\n if not os.path.exists(ds.tmp.value):\n os.makedirs(ds.tmp.value)\n sitk.WriteImage(vol_ip_otsu, ds.tmp.path('ip.otsu.nii.gz'))\n\n vol_fat = sitk.ReadImage(dir.path(fn.fat.value))\n vol_water = sitk.ReadImage(dir.path(fn.water.value))\n vol_fat_percent = sitk.Threshold(sitk.Mask(vol_fat / (vol_fat + vol_water), vol_ip_otsu))\n vol_water_percent = sitk.Threshold(sitk.Mask(vol_water / (vol_fat + vol_water), vol_ip_otsu))\n\n log.info('Creating binary mask for the whole body')\n vol_ip_percent = sitk.Mask(vol_fat_percent + vol_water_percent, vol_mask_initial)\n x, y, z = vol_ip_percent.GetSize()\n spacing = vol_ip_percent.GetSpacing()\n vol_mask_np = np.zeros((z, y, x), dtype='uint8')\n\n for s in range(z):\n img_mask_initial = sitk.Extract(vol_mask_initial, (x, y, 0), (0, 0, s))\n if np.sum(sitk.GetArrayFromImage(img_mask_initial)) < 64:\n continue\n img_ip_percent = sitk.Mask(sitk.Extract(vol_ip_percent, (x, y, 0), (0, 0, s)),\n sitk.BinaryErode(img_mask_initial, [3]*img_mask_initial.GetDimension()))\n img_add = sitk.ConnectedThreshold(img_ip_percent, [(1, 1)], upper=0.0) \\\n + sitk.ConnectedThreshold(img_ip_percent, [(x - 2, y - 2)], upper=0.0)\n img_mask = img_add <= 0\n if np.sum(sitk.GetArrayFromImage(img_mask)) < 1:\n continue\n vol_mask_np[s, :, :] = sitk.GetArrayFromImage(img_mask)\n\n vol_mask = sitk.GetImageFromArray(largest_connected_components(vol_mask_np).astype('uint8'))\n vol_mask.CopyInformation(vol_ip_percent)\n vol_mask = sitk.BinaryMorphologicalClosing(vol_mask, convert_mm_to_voxels((2.5, 2.5, 6), spacing))\n\n if processed:\n if not os.path.exists(ds.analysis.value):\n os.makedirs(ds.analysis.value)\n dir = ds.analysis if processed else ds.tmp_unprocessed\n sitk.WriteImage(vol_fat_percent, dir.path(fn.fat_percent.value))\n sitk.WriteImage(vol_water_percent, dir.path(fn.water_percent.value))\n sitk.WriteImage(vol_mask, dir.path(fn.body_mask.value))\n\n\ndef crop_body_mask(processed: bool = True, number_of_slices: int = 3) -> None:\n log.info(f'Cropping the body mask, top/bottom {number_of_slices} slices')\n if processed:\n dir = ds.analysis\n else:\n dir = ds.tmp_unprocessed\n file_name = dir.path(fn.body_mask.value)\n mask = sitk.ReadImage(file_name)\n shutil.move(file_name, ds.tmp.path(fn.body_mask.value.replace('.nii', '_original.nii')))\n x, y, z = mask.GetSize()\n zero_slices = sitk.Image(x, y, number_of_slices, mask.GetPixelID())\n mask = sitk.Paste(mask, zero_slices, zero_slices.GetSize(), destinationIndex=[0, 0, 0])\n mask = sitk.Paste(mask, zero_slices, zero_slices.GetSize(), destinationIndex=[0, 0, z - number_of_slices])\n sitk.WriteImage(mask, dir.path(fn.body_mask.value))\n\n\ndef create_left_right_mask(processed=True) -> None:\n mask = sitk.ReadImage(ds.nifti.path(fn.mask.value) if processed else ds.tmp_unprocessed.path(fn.mask.value))\n x, _, _ = mask.GetSize()\n boundary, _ = leg_boundary(mask)\n if not processed:\n sitk.WriteImage(boundary, ds.tmp_unprocessed.path('leg_boundary.nii.gz'))\n mask_left_right = {\n 'left': sitk.ConnectedThreshold(boundary, [(x - 1, 1, 1)], upper=0.0),\n 'right': sitk.ConnectedThreshold(boundary, [(1, 1, 1)], upper=0.0, replaceValue=2)\n }\n mask_left_right['right'] = mask_left_right['right'] + 2 * (boundary > 0)\n mask_left_right = mask_left_right['left'] + mask_left_right['right']\n if processed:\n if not ds.analysis.exists():\n os.makedirs(ds.analysis.value)\n sitk.WriteImage(mask_left_right, ds.analysis.path(fn.left_right_mask.value))\n if ds.analysis.exists(fn.body_mask.value):\n sitk.WriteImage(sitk.ReadImage(ds.analysis.path(fn.body_mask.value)) * mask_left_right,\n ds.analysis.path(fn.left_right_body_mask.value))\n else:\n sitk.WriteImage(mask_left_right, ds.tmp_unprocessed.path(fn.left_right_mask.value))\n if ds.tmp_unprocessed.exists(fn.body_mask.value):\n sitk.WriteImage(sitk.ReadImage(ds.tmp_unprocessed.path(fn.body_mask.value)) * mask_left_right,\n ds.tmp_unprocessed.path(fn.left_right_body_mask.value))\n\n\ndef leg_boundary(mask: sitk.Image, start: Optional[int] = None) -> (sitk.Image, int):\n mask = sitk.Cast(mask, sitk.sitkUInt8)\n x, y, z = mask.GetSize()\n spacing = mask.GetSpacing()\n\n img_midpoint_np = np.zeros((z, x), np.int)\n midpoint = midpoint_previous = start\n for s in range(z):\n img_mask = sitk.BinaryErode(sitk.Extract(mask, [x, y, 0], [0, 0, int(s)]),\n convert_mm_to_voxels((2 * 2.5, 4 * 2.5), spacing[:2]),\n sitk.sitkBall, 0.0, 1.0, False)\n if np.sum(sitk.GetArrayFromImage(img_mask)) == 0:\n continue\n diff_rowsgt0 = np.diff((sitk.GetArrayFromImage(img_mask).sum(axis=0) > 0).astype('int'))\n if np.sum(diff_rowsgt0 > 0) == 1:\n img_mask = sitk.BinaryErode(sitk.Extract(mask, [x, y, 0], [0, 0, int(s)]),\n convert_mm_to_voxels((4 * 2.5, 8 * 2.5), spacing[:2]),\n sitk.sitkBall, 0.0, 1.0, False)\n diff_rowsgt0 = np.diff((sitk.GetArrayFromImage(img_mask).sum(axis=0) > 0).astype('int'))\n if np.sum(diff_rowsgt0 > 0) == 1:\n img_mask = sitk.BinaryErode(sitk.Extract(mask, [x, y, 0], [0, 0, int(s)]),\n convert_mm_to_voxels((8 * 2.5, 16 * 2.5), spacing[:2]),\n sitk.sitkBall, 0.0, 1.0, False)\n diff_rowsgt0 = np.diff((sitk.GetArrayFromImage(img_mask).sum(axis=0) > 0).astype('int'))\n if np.sum(diff_rowsgt0 > 0) == 1:\n img_mask = sitk.BinaryErode(sitk.Extract(mask, [x, y, 0], [0, 0, int(s)]),\n convert_mm_to_voxels((16 * 2.5, 32 * 2.5), spacing[:2]),\n sitk.sitkBall, 0.0, 1.0, False)\n diff_rowsgt0 = np.diff((sitk.GetArrayFromImage(img_mask).sum(axis=0) > 0).astype('int'))\n if np.sum(diff_rowsgt0 > 0) == 2:\n label_mask = measure.label(sitk.GetArrayFromImage(img_mask), connectivity=1)\n props_label_mask = measure.regionprops(label_mask)\n midpoint_new = (props_label_mask[0].centroid[1] + props_label_mask[1].centroid[1]) // 2\n if not midpoint:\n midpoint = int(midpoint_new)\n else:\n if midpoint_new > midpoint_previous:\n midpoint += 1\n elif midpoint_new < midpoint_previous:\n midpoint -= 1\n img_midpoint_np[s, midpoint] = 16\n midpoint_previous = midpoint\n else:\n log.debug(f'Leg boundary terminated at slice {s}')\n break\n img_midpoint_np[np.arange(s, z), midpoint] = 16\n vol_midpoint_np = np.stack((img_midpoint_np,) * y, axis=1)\n vol_midpoint_sitk = sitk.GetImageFromArray(vol_midpoint_np.astype('uint8'))\n vol_midpoint_sitk.CopyInformation(mask)\n\n return vol_midpoint_sitk, midpoint\n\n\ndef segment_visceral_fat(segmentation_directory: str, file_prefix: str = 'otsu_prob_argmax_',\n fat_threshold: float = 0.5) -> None:\n if os.path.exists(os.path.join(segmentation_directory, f'{file_prefix}abdominal_cavity.nii.gz')):\n abdominal_cavity = sitk.ReadImage(os.path.join(segmentation_directory, f'{file_prefix}abdominal_cavity.nii.gz'))\n else:\n log.error('Visceral fat segmentation failed, abdominal cavity not segmented!')\n return\n abdominal_cavity = sitk.Cast(abdominal_cavity, sitk.sitkUInt16)\n\n log.info('Visceral fat identified by isolating the abdominal cavity')\n fat_percent = sitk.Mask(sitk.ReadImage(ds.analysis.path(fn.fat_percent.value)),\n sitk.ReadImage(ds.analysis.path(fn.body_mask.value)))\n\n try:\n visceral_fat = sitk.Mask(abdominal_cavity, fat_percent > fat_threshold)\n except:\n log.warning('Segmentations shifted')\n origin_fat_percent = fat_percent.GetOrigin()\n abdominal_cavity.SetOrigin(origin_fat_percent)\n visceral_fat = sitk.Mask(abdominal_cavity, fat_percent > fat_threshold)\n\n log.debug('Identify and remove voxels with small signal intensity in the in-phase volume')\n ip = sitk.Mask(sitk.ReadImage(ds.nifti.path(fn.ip.value)), abdominal_cavity)\n ip10 = np.percentile(sitk.GetArrayFromImage(ip)[sitk.GetArrayFromImage(visceral_fat) > 0], 10)\n log.debug(f'10th percentile of the in-phase signal (abdominal cavity only) = {ip10:4.2f}')\n visceral_fat = sitk.MaskNegated(visceral_fat, sitk.Cast(ip <= ip10, sitk.sitkUInt16))\n\n log.debug('Removing smallest components from visceral fat')\n visceral_fat_rsc = morphology.remove_small_objects(sitk.GetArrayFromImage(visceral_fat), min_size=2 ** 10)\n visceral_fat = sitk.GetImageFromArray(visceral_fat_rsc)\n visceral_fat.CopyInformation(abdominal_cavity)\n visceral_fat = sitk.VotingBinaryIterativeHoleFilling(visceral_fat, (1, 1, 1), maximumNumberOfIterations=5)\n\n log.debug('Exclude tissue associated with abdominal organs')\n exclude_organs = {\n 'Liver': {'mask': f'{file_prefix}liver.nii.gz'},\n 'Spleen': {'mask': f'{file_prefix}spleen.nii.gz'},\n 'Left Kidney': {'mask': f'{file_prefix}kidney_left.nii.gz'},\n 'Right Kidney': {'mask': f'{file_prefix}kidney_right.nii.gz'},\n }\n for organ, fns in exclude_organs.items():\n if os.path.exists(os.path.join(segmentation_directory, fns['mask'])):\n exclude = sitk.ReadImage(os.path.join(segmentation_directory, fns['mask']))\n else:\n log.error(f'{organ} has not been segmented, visceral fat mask failed...')\n return\n if exclude:\n log.info(f'{organ} removed from the visceral fat mask')\n try:\n visceral_fat = sitk.MaskNegated(visceral_fat, sitk.Cast(exclude, sitk.sitkUInt16))\n except:\n log.info('Segmentations shifted')\n exclude.SetOrigin(fat_percent.GetOrigin())\n visceral_fat = sitk.MaskNegated(visceral_fat, sitk.Cast(exclude, sitk.sitkUInt16))\n sitk.WriteImage(visceral_fat, os.path.join(segmentation_directory, fn.visceral_fat_mask.value))\n\n\ndef segment_abdominal_subcutaneous_fat(segmentation_directory: str, file_prefix: str = 'otsu_prob_argmax_',\n fat_threshold: float = 0.7) -> None:\n if os.path.exists(os.path.join(segmentation_directory, f'{file_prefix}body_cavity.nii.gz')):\n body_cavity = sitk.ReadImage(os.path.join(segmentation_directory, f'{file_prefix}body_cavity.nii.gz'))\n else:\n log.error('Abdominal subcutaneous fat segmentation failed, body cavity not segmented!')\n return\n\n if os.path.exists(os.path.join(segmentation_directory, f'{file_prefix}abdominal_cavity.nii.gz')):\n abdominal_cavity = sitk.ReadImage(os.path.join(segmentation_directory, f'{file_prefix}abdominal_cavity.nii.gz'))\n else:\n log.error('Abdominal subcutaneous fat segmentation failed, abdominal cavity not segmented!')\n return\n\n log.info('Abdominal subcutaneous fat identified by excluding the body cavity')\n body_mask = sitk.ReadImage(ds.analysis.path(fn.body_mask.value))\n try:\n subcutaneous_fat = sitk.MaskNegated(body_mask, sitk.Cast(body_cavity, sitk.sitkUInt8))\n except:\n log.warning('Segmentations shifted')\n body_cavity.SetOrigin(body_mask.GetOrigin())\n subcutaneous_fat = sitk.MaskNegated(body_mask, sitk.Cast(body_cavity, sitk.sitkUInt8))\n\n fat_percent = sitk.Mask(sitk.ReadImage(ds.analysis.path(fn.fat_percent.value)), body_mask)\n subcutaneous_fat = sitk.VotingBinaryIterativeHoleFilling(sitk.Mask(subcutaneous_fat, fat_percent > fat_threshold),\n (1, 1, 1), maximumNumberOfIterations=5)\n x, y, z = subcutaneous_fat.GetSize()\n abdominal_mask = sitk.Image(x, y, z, subcutaneous_fat.GetPixelID())\n abdominal_mask.CopyInformation(subcutaneous_fat)\n _, _, z_min, _, _, z_max = sitk_bbox(abdominal_cavity)\n vol_zero = sitk.Image(x, y, int(z_max - z_min), subcutaneous_fat.GetPixelID())\n abdominal_mask = sitk.Paste(abdominal_mask, vol_zero + 1, vol_zero.GetSize(), destinationIndex=[0, 0, int(z_min)])\n abdominal_subcutaneous_fat = sitk.Mask(abdominal_mask, subcutaneous_fat)\n abdominal_subcutaneous_fat = sitk_largest_connected_components(abdominal_subcutaneous_fat)\n\n sitk.WriteImage(abdominal_subcutaneous_fat,\n os.path.join(segmentation_directory, fn.abdominal_subcutaneous_fat_mask.value))\n\n\ndef segment_multiecho_pancreas(organ_mask: str) -> None:\n if os.path.exists(organ_mask):\n vol, seg = sitk_extract_slice(sitk.ReadImage(ds.nifti.path(fn.pancreas_norm.value)),\n sitk.ReadImage(organ_mask),\n sitk.ReadImage(ds.nifti.path(fn.pancreas_mag.value)))\n sitk.WriteImage(vol, ds.analysis.path(fn.multiecho_pancreas_t1w.value))\n sitk.WriteImage(seg, ds.analysis.path(fn.multiecho_pancreas_mask.value))\n else:\n log.error('Mask for the pancreas is not found for this subject')\n\n\ndef extract_organ(organ: str, single_slice: str, segmentation_directory: str,\n file_prefix: str = 'otsu_prob_argmax_') -> None:\n if organ in ['liver', 'spleen', 'kidney_left', 'kidney_right']:\n ref_vol_3d_file_name = fn.water.value\n if not ds.nifti.exists(ref_vol_3d_file_name):\n log.error(f\"Reference volume '{single_slice}' not found\")\n return\n ref_vol_3d_in_2d_file_name = f'water.{organ}_from_{single_slice}.nii.gz'\n mask_2d_file_name = f'mask.{organ}_from_{single_slice}'\n elif organ == 'body':\n ref_vol_3d_file_name = fn.ip.value\n if not ds.nifti.exists(ref_vol_3d_file_name):\n log.error(f\"Reference volume '{ref_vol_3d_file_name}' not found\")\n return\n ref_vol_3d_in_2d_file_name = f'ip.{organ}_from_{single_slice}.nii.gz'\n mask_2d_file_name = f'mask.{organ}_from_{single_slice}.nii.gz'\n else:\n log.error(\"Only 'liver', 'spleen', 'kidney_left' 'kidney_right' organs\")\n return\n if single_slice in ['multiecho_pancreas', 'multiecho_liver', 'ideal_liver']:\n ref_vol_2d_file_name = f'{single_slice}_magnitude.nii.gz'\n if not ds.nifti.exists(ref_vol_2d_file_name):\n log.error(f\"Single-slice file '{single_slice}' not found\")\n return\n else:\n log.error(\"Only 'multiecho_pancreas', 'multiecho_liver' or 'ideal_liver' single-slice files\")\n return\n if organ == 'body':\n mask_3d_file_name = ds.analysis.path(fn.body_mask.value)\n else:\n mask_3d_file_name = os.path.join(segmentation_directory, f'{file_prefix}{organ}.nii.gz')\n if not os.path.exists(mask_3d_file_name):\n log.error(f\"Segmentation file '{mask_3d_file_name}' not found\")\n return\n\n vol, seg = sitk_extract_slice(sitk.ReadImage(ds.nifti.path(ref_vol_3d_file_name)),\n sitk.ReadImage(mask_3d_file_name),\n sitk.ReadImage(ds.nifti.path(ref_vol_2d_file_name)))\n if organ == 'body':\n seg = sitk_largest_connected_components(seg)\n sitk.WriteImage(vol, os.path.join(segmentation_directory, ref_vol_3d_in_2d_file_name))\n sitk.WriteImage(seg, os.path.join(segmentation_directory, mask_2d_file_name))\n","repo_name":"recoh/pipeline","sub_path":"pipeline/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":16681,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"18"} +{"seq_id":"12450014367","text":"\"\"\"\nFinal project: classification\nmembers: Yuchen Yao, Jiahao Li, Nuozhou Tang, Qingxi Liu, Shiying Li\n\"\"\"\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.ensemble import VotingClassifier\nfrom nltk.corpus import stopwords\nfrom sklearn import svm\nfrom time import time\nimport nltk\nimport csv\nimport re\n\n\ndef loadData(files: list):\n \"\"\"\n read the reviews and their polarities from a given file\n :param files: the path of review file\n :return: train reviews, train labels, test reviews and test labels\n \"\"\"\n reviews, labels = [], []\n for file in files:\n f = open(file)\n reader = csv.reader(f)\n for line in reader:\n review, tittle = line[0].strip().replace('\\n', ' ').replace('\\t', ''), line[1]\n reviews.append(review.lower())\n labels.append(int(float(tittle)))\n f.close()\n return reviews, labels\n\n\ndef Filter(reviews):\n \"\"\"\n decrease the dimension of dataset\n :param reviews: reviews from dataset\n :return: reviews without stop words\n \"\"\"\n ans = []\n for review in reviews:\n temp = []\n review = re.sub(r'[^\\w\\s]', ' ', review)\n review = re.sub('[^a-z]', ' ', review)\n review = re.sub('data sci[a-z]+', ' ', review, re.I)\n review = re.sub('data eng[a-z]+', ' ', review, re.I)\n review = re.sub('software eng[a-z]+', ' ', review, re.I)\n review = re.sub('\\[.*?\\]', '', review)\n review = re.sub('https?://\\S+|www\\.\\S+', '', review)\n review = re.sub('<.*?>+', '', review)\n review = re.sub('\\n', '. ', review)\n review = re.sub('\\w*\\d\\w*', '', review)\n review = re.sub(r'@[A-Za-z0-9]+', '', review)\n review = re.sub(r'#', '', review)\n review = re.sub(r'RT[\\s]+', '', review)\n review = re.sub(r'[^\\w]', ' ', review)\n\n ps = nltk.stem.porter.PorterStemmer()\n\n new_review = []\n for word in review.split():\n word = ps.stem(word)\n if word == '':\n continue # ignore empty words and stopwords\n else:\n new_review.append(word)\n temp.append(' '.join(new_review))\n ans += temp\n return ans\n\n\ndef vt(predictors, counts_val, counts_train, lab_train):\n \"\"\"\n Voting Classifier with different classification algorithms\n :param predictors: different classification algorithms\n :param counts_val: the transformed testing data\n :param counts_train: the transformed training data\n :param lab_train: the training labels\n :return: a array of predicted label\n \"\"\"\n VT = VotingClassifier(predictors, voting='hard')\n VT.fit(counts_train, lab_train)\n predicted = VT.predict(counts_val)\n return predicted\n\n\ndef lgr_classifier(counts_train, lab_train):\n \"\"\"\n Logistic regression classifier\n :param counts_train: the transformed training data\n :param lab_train: the training labels\n :return: An object for grid search\n \"\"\"\n clf = LogisticRegression(solver='liblinear')\n LGR_grid = [{'penalty': ['l1', 'l2'], 'C': [0.5, 1, 1.5, 2, 3, 5, 10]}]\n gridsearchLGR = GridSearchCV(clf, LGR_grid, cv=5)\n return gridsearchLGR.fit(counts_train, lab_train)\n\n\ndef rf_classifier(counts_train, lab_train):\n \"\"\"\n Random forest classifier\n :param counts_train: the transformed training data\n :param lab_train: the training labels\n :return: An object for grid search\n \"\"\"\n clf = RandomForestClassifier(random_state=150, max_depth=600, min_samples_split=160)\n RF_grid = [{'n_estimators': [50, 100, 150, 200, 300, 500, 800, 1200, 1600, 2100],\n 'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2']}]\n gridsearchRF = GridSearchCV(clf, RF_grid, cv=5)\n return gridsearchRF.fit(counts_train, lab_train)\n\n\ndef knn_classifier(counts_train, lab_train):\n \"\"\"\n K-nearnest-neighbor classifier\n :param counts_train: the transformed training data\n :param lab_train: the training labels\n :return: An object for grid search\n \"\"\"\n clf = KNeighborsClassifier()\n KNN_grid = [{'n_neighbors': [1, 3, 5, 7, 9, 11, 13, 15, 17],\n 'weights': ['uniform', 'distance'], 'algorithm': ['auto', 'brute']}]\n gridsearchKNN = GridSearchCV(clf, KNN_grid, cv=5)\n return gridsearchKNN.fit(counts_train, lab_train)\n\n\ndef dt_classifier(counts_train, lab_train):\n \"\"\"\n Decision tree classifier\n :param counts_train: the transformed training data\n :param lab_train: the training labels\n :return: An object for grid search\n \"\"\"\n clf = DecisionTreeClassifier()\n DT_grid = [{'max_depth': [3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 'criterion': ['gini', 'entropy'],\n 'splitter': ['best', 'random']}]\n gridsearchDT = GridSearchCV(clf, DT_grid, cv=5)\n return gridsearchDT.fit(counts_train, lab_train)\n\n\ndef nb_classifier(counts_train, lab_train):\n \"\"\"\n Naive Bayes classifier\n :param counts_train: the transformed training data\n :param lab_train: the training labels\n :return: An object for grid search\n \"\"\"\n clf = MultinomialNB()\n NB_grid = [{'alpha': [0.0001, 0.001, 0.01, 0.1, 0.8, 1, 10], 'fit_prior': [True, False]}]\n gridsearchNB = GridSearchCV(clf, NB_grid, cv=5)\n return gridsearchNB.fit(counts_train, lab_train)\n\n\ndef svm_classifier(counts_train, lab_train):\n \"\"\"\n Support Vector Machine classifier\n :param counts_train: the transformed training data\n :param lab_train: the training labels\n :return: An object for grid search\n \"\"\"\n clf = svm.SVC()\n SVM_grid = [{'C': [0.0001, 0.001, 0.01, 0.1, 0.8, 1, 10], 'kernel': ['linear', 'poly', 'rbf', 'sigmoid']}]\n gridsearchSVM = GridSearchCV(clf, SVM_grid, cv=5)\n return gridsearchSVM.fit(counts_train, lab_train)\n\n\ndef load_test(file):\n \"\"\"\n load the reviews from test file\n :param file: the path of test file\n :return: test reviews\n \"\"\"\n reviews = []\n f = open(file)\n reader = csv.reader(f)\n for line in reader:\n review = line[0].strip().split('\\t')\n reviews.append(review[0].lower())\n f.close()\n return reviews\n\n\ndef write_test_file(file, labels):\n \"\"\"\n Write the predicted answers onto test file\n :param file:\n :param labels:\n :return:\n \"\"\"\n f = open(file)\n reader = csv.reader(f)\n des = []\n for line in reader:\n des.append(line[0])\n f.close()\n\n f = open(file, 'w')\n writer = csv.writer(f)\n for i in range(len(labels)):\n if labels[i] == 1:\n label = 'Data Scientist'\n elif labels[i] == 2:\n label = 'Software Engineer'\n else:\n label = 'Data Engineer'\n writer.writerow([des[i], label])\n f.close()\n\n\ndef test_case(test_file):\n \"\"\"\n test if this code could do the job, with less data, less models, run faster\n :param test_file:\n :return:\n \"\"\"\n files = ['New+York_data+scientist.csv', 'SE_NY.csv', 'New+York_data+engineer.csv']\n\n start = time()\n print('start training...')\n\n rev_train, lab_train = loadData(files=files)\n rev_test = load_test(test_file)\n print(f\"loading data finished, run time: {time() - start}\")\n\n # remove the noise\n rev_train = Filter(rev_train)\n rev_test = Filter(rev_test)\n\n # Build a counter based on the training dataset\n counter = CountVectorizer(stop_words=stopwords.words('english'))\n counter.fit(rev_train)\n\n # count the number of times each term appears in a document and transform each doc into a count vector\n counts_train = counter.transform(rev_train) # transform the training data\n counts_test = counter.transform(rev_test) # transform the testing data\n\n # fit the models\n lgr_time = time()\n lgr_classifier(counts_train, lab_train)\n print(f\"Logistic regression finished, run time: {time() - lgr_time}\")\n\n nb_time = time()\n nb_classifier(counts_train, lab_train)\n print(f\"Naive Bayes finished, run time: {time() - nb_time}\")\n\n predictors = [('lreg', LogisticRegression()), ('nb', MultinomialNB())]\n\n ans = vt(predictors, counts_test, counts_train, lab_train)\n print(f\"type of model: {type(ans)}\\nmodel: \\n{ans}\")\n\n write_test_file(test_file, labels=ans)\n print(f\"all finished, run time: {time() - start}\")\n\n\ndef main(test_file):\n # {city}_{tittle}.csv\n files = ['New+York_data+scientist.csv', 'New+York_software+engineer', 'New+York_data+engineer',\n 'Seattle_data+scientist.csv', 'Seattle_software+engineer', 'Seattle_data+engineer',\n 'Palo+Alto_data+scientist.csv', 'Palo+Alto_software+engineer', 'Palo+Alto_data+engineer', ]\n\n start = time()\n print('start training...')\n\n rev_train, lab_train = loadData(files=files)\n rev_test = load_test(test_file)\n print(f\"loading data finished, run time: {time() - start}\")\n\n # remove the noise\n rev_train = Filter(rev_train)\n rev_test = Filter(rev_test)\n\n # Build a counter based on the training dataset\n counter = CountVectorizer(stop_words=stopwords.words('english'))\n counter.fit(rev_train)\n\n # count the number of times each term appears in a document and transform each doc into a count vector\n counts_train = counter.transform(rev_train) # transform the training data\n counts_test = counter.transform(rev_test) # transform the testing data\n\n # fit the models\n lgr_time = time()\n lgr_classifier(counts_train, lab_train)\n print(f\"Logistic regression finished, run time: {time() - lgr_time}\")\n\n rf_time = time()\n rf_classifier(counts_train, lab_train)\n print(f\"Random Forest finished, run time: {time() - rf_time}\")\n\n knn_time = time()\n knn_classifier(counts_train, lab_train)\n print(f\"KNN finished, run time: {time() - knn_time}\")\n\n dt_time = time()\n dt_classifier(counts_train, lab_train)\n print(f\"Decision tree finished, run time: {time() - dt_time}\")\n\n nb_time = time()\n nb_classifier(counts_train, lab_train)\n print(f\"Naive Bayes finished, run time: {time() - nb_time}\")\n\n svm_time = time()\n svm_classifier(counts_train, lab_train)\n print(f\"SVM finished, run time: {time() - svm_time}\")\n\n predictors = [('lreg', LogisticRegression()), ('rf', RandomForestClassifier()), ('knn', KNeighborsClassifier()),\n ('dt', DecisionTreeClassifier()), ('nb', MultinomialNB()), ('svm', svm.SVC())]\n\n ans = vt(predictors, counts_test, counts_train, lab_train)\n\n write_test_file(test_file, labels=ans)\n print(f\"all finished, run time: {time() - start}\")\n\n\nif __name__ == '__main__':\n test_file = 'test.csv' # put your test file here\n # test_case(test_file=test_file)\n main(test_file=test_file)\n","repo_name":"alvinyoo/BIA-660-WS","sub_path":"final project/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":10939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19172835388","text":"import pandas as pd\nfrom pandas import Series\nfrom pandas import DataFrame\nfrom pandas import concat\nfrom matplotlib import pyplot\nfrom statsmodels.tsa.ar_model import AR\nfrom sklearn.metrics import mean_squared_error as mse\n\ndata = pd.read_csv('./ECE180-final-project/Update/BTC_last_month_data.csv')\n# series = Series.from_csv('../CryptoCurrency/bitcoin.csv', index_col= 3)\nseries = data['close']\n# print(series)\n# split dataset\nX = series.values\n# print(type(X))\n# print(X[0])\n# train, test = X[1:len(X)-40], X[len(X)-40:]\n# # train autoregression\n# model = AR(train)\n# model_fit = model.fit()\n# window = model_fit.k_ar\n# coef = model_fit.params\n# # walk forward over time steps in test\n# history = train[len(train)-window:]\n# history = [history[i] for i in range(len(history))]\n# predictions = list()\n# for t in range(len(test)):\n# \tlength = len(history)\n# \tlag = [history[i] for i in range(length-window,length)]\n# \tyhat = coef[0]\n# \tfor d in range(window):\n# \t\tyhat += coef[d+1] * lag[window-d-1]\n# \tobs = test[t]\n# \tpredictions.append(yhat)\n# \thistory.append(yhat)\n# \tprint('predicted=%f, expected=%f' % (yhat, obs))\n\ndef AutoRegressive(data, testSize=30, test=True):\n\t# Autoregressive model used for time-series predictions\n\t# if test= True, then select the last testSize points as test set\n\t# else predict for a period of testSize\n\t# date = np.array(date)\n\tif test:\n\t\ttrainData = data[:-testSize]\n\t\ttestData = data[-testSize:]\n\telse:\n\t\ttrainData = data\n\n\tmodel = AR(trainData)\n\tmodelFit = model.fit()\n\twinSize, coeff = modelFit.k_ar, modelFit.params\n\n\tpredData = list(trainData[-winSize:])\n\tpred = []\n\tfor i in range(testSize):\n\t\tx = list(predData[-winSize:])\n\t\ty = coeff[0]\n\t\t# use winSize number of data to predict future value\n\t\tfor n in range(winSize):\n\t\t\ty += coeff[n + 1] * x[winSize - (n + 1)]\n\t\tif test:\n\t\t\t# use test data to predict future value\n\t\t\tpredData.append(testData[i])\n\t\telse:\n\t\t\t# use predicted value to predict future value\n\t\t\tpredData.append(y)\n\t\tpred.append(y)\n\n\tif test:\n\t\terror = mse(testData, pred)\n\t\treturn pred, error, testData\n\telse:\n\t\terror = None\n\t\treturn pred, error\n\npred, error, testData = AutoRegressive(X)\n# error = mean_squared_error(test, predictions)\nprint('Test MSE: %.3f' % error)\n# plot\npyplot.plot(testData)\npyplot.plot(pred, color='red', label = 'predict')\npyplot.legend()\npyplot.show()\n","repo_name":"michaelshum123/ECE180-final-project","sub_path":"Update/autoregressive.py","file_name":"autoregressive.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"14224806430","text":"import json\nimport os.path\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport torch\nfrom model import AlexNet\nimport torchvision.transforms as transforms\n\ndevice=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(\"using {} device\".format(device))\n\ndata_transform=transforms.Compose([\n transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))\n])\n\nimg=Image.open('./flower.jpeg')\nplt.imshow(img)\nimg=data_transform(img)\nimg=torch.unsqueeze(img,dim=0)\n\njson_path=\"./class_indices.json\"\nassert os.path.exists(json_path),\"file: '{}' does not exist\".format(json_path)\n\njson_file=open(json_path,\"r\")\nclass_indict=json.load(json_file)\n\nmodel=AlexNet(num_classes=5).to(device)\nweight_path=\"./AlexNet.pth\"\nassert os.path.exists(weight_path),\"file: '{}' does not exist\".format(weight_path)\nmodel.load_state_dict(torch.load(weight_path))\n\nmodel.eval()\nwith torch.no_grad():\n output=torch.squeeze(model(img))\n predict=torch.softmax(output,dim=0)\n predict_cla=torch.argmax(predict).numpy()\n print_res=\"class: {} prob:{:.3f}\".format(class_indict[str(predict_cla)],predict[predict_cla].numpy())\n plt.title(print_res)\n\n for i in range(len(predict)):\n print(\"class: {:10} prob: {:.3f}\".format(class_indict[str(i)],\n predict[i].numpy()))\n plt.show()\n","repo_name":"nuaa030710312/Image_Classification","sub_path":"pytorch/AlexNet/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"32946012164","text":"import re\nimport requests\nimport sys\nimport ssl\n\nimport xbmc\nimport xbmcaddon\n\ntry: # Python 3\n from http.server import BaseHTTPRequestHandler\nexcept ImportError: # Python 2\n from BaseHTTPServer import BaseHTTPRequestHandler\n\ntry: # Python 3\n from socketserver import TCPServer\nexcept ImportError: # Python 2\n from SocketServer import TCPServer\n\naddon = xbmcaddon.Addon(id='plugin.video.vikir')\n\nrequests.packages.urllib3.disable_warnings()\nrequests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':HIGH:!DH:!aNULL'\n\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n pass\nelse:\n ssl._create_default_https_context = _create_unverified_https_context\n\nPY3 = sys.version_info >= (3, 0, 0)\n\n\nclass SimpleHTTPRequestHandler(BaseHTTPRequestHandler):\n\n def do_GET(self):\n result = requests.get(url=self.path.split(\"url=\")[-1]).text\n newMPD = re.search(r'thumbnail_tile\".+?\\s*(.+?)<', result)\n if newMPD:\n self.path = newMPD.group((1))\n tempres = requests.get(url=self.path).text\n\n getSub = re.search(r\"thumbnail_tile.+?Representation>(.*)\", result, re.MULTILINE | re.DOTALL).group((1))\n data = re.findall('(.+?)<', tempres)\n for d in data:\n tempres = tempres.replace(d, \"https://m-content-viki.s.llnwi.net/\" + self.path.split(\"/\")[3] + \"/dash/\" + d)\n tempres = tempres.rsplit(\"\\n\", 4)[0]\n result = tempres + getSub\n\n self.send_response(200)\n self.end_headers()\n self.wfile.write(result.encode(\"utf-8\"))\n\n\naddress = '127.0.0.1' # Localhost\n\nport = 4920\n\nserver_inst = TCPServer((address, port), SimpleHTTPRequestHandler)\n# The follow line is only for test purpose, you have to implement a way to stop the http service!\nserver_inst.serve_forever()","repo_name":"Arias800/plugin.video.vikir","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"29799868693","text":"import os \n\n\n# ['n_jets', '\\\"jets multiplicity\\\"'],\n#['met', '\\\"met\\\"'],\nobservable = [\n ['m_dilep', '\\\"dilepton mass\\\"'],\n ['pt_lead', '\\\"pt leading lepton\\\"'],\n ['pt_sublead', '\\\"pt subleading lepton\\\"'],\n ['n_bjets', '\\\"b-jets multiplicity\\\"'],\n ['pt_elec', '\\\"pt electron\\\"'],\n ['pt_muon', '\\\"pt muon\\\"'],\n ['j1_pt', '\\\"pt leading jet\\\"'],\n ['b1_pt', '\\\"pt leading b-jet\\\"'],\n ['eta_elec', '\\\"eta electron\\\"'],\n ['eta_muon', '\\\"eta muon\\\"'],\n ['j1_eta', '\\\"eta leading jet\\\"'],\n ['b1_eta', '\\\"eta leading b-jet\\\"']\n]\n\nyear = [\n '2016',\n '2017'\n]\n\nfor y in year:\n for o in observable:\n cmd = 'python ./bin/data_mc_comparaison.py '+o[0]+' '+y+' '+o[1]\n os.system(cmd)","repo_name":"aureliencarle/pyplot-framework-DEPRECATED","sub_path":"scripts/control_plots.py","file_name":"control_plots.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36747633143","text":"from NNFullModel import *\nfrom models import *\nfrom lockdin_tools import *\nfrom setcreation import *\nfrom accuracy_calculator import *\nfrom featuremap import *\n\nimport argparse\nimport os\n\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torchsummary import summary\n#-----------------------------------------------------------------------------------------------------------# \n\ndef main():\n \n #--------------------- Hyperparameters ---------------------# \n batch_size = 30\n epochs = 50\n learning_rate = 0.01\n sample_rate = 1\n seed = 10\n \n #--------------------- Data Proccessing ---------------------#\n train_data, valid_data, test_data, overfit_data = setcreation(seed, batch_size)\n \n #--------------------- Model Initialization ---------------------#\n torch.manual_seed(seed)\n NN_model = CNN_2(15, 10)\n optimizer = torch.optim.SGD(NN_model.parameters(),lr=learning_rate)\n loss_function = torch.nn.MSELoss()\n acc = accuracy_calculator\n df = decision_function\n \n Full_Model = NNFullModel(NN_model, loss_function, optimizer, acc, decision_function)\n \n #--------------------- Running Training ---------------------# \n architecture = lockdin_tools(Full_Model)\n \n architecture.regular_training(epochs, train_data, valid_data, test_data, sample_rate)\n \n #--------------------- Display Results ---------------------# \n \n architecture.display_results()\n architecture.confusion_matrix(test_data)\n \n #--------------------- Save Model ---------------------#\n architecture.save_model()\n # baseline, 100 epochs, 0.001 learning rate, batch size 15\n # CNN1(40,25), 100 epochs, batch size 15\n \n#main()\n\ndef extra():\n batch_size = 30\n epochs = 44\n learning_rate = 0.01\n sample_rate = 1\n seed = 10\n \n train_data, valid_data, test_data, overfit_data = setcreation(seed, batch_size)\n \n model = torch.load('saved_models/CNN_2Layer_1.pt')\n #feature_maps1(NN_model)\n #feature_maps2(NN_model)\n \n\n \n optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate)\n loss_function = torch.nn.BCELoss()\n acc = accuracy_calculator\n df = decision_function\n \n Full_Model = NNFullModel(model, loss_function, optimizer, acc, decision_function)\n architecture = lockdin_tools(Full_Model)\n architecture.print_incorrect_predictions(test_data)\n \n\n#extra()\n\ndef demo():\n \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize( (0.5,0.5,0.5), (0.5,0.5,0.5) )])\n mydata = torchvision.datasets.ImageFolder('./finaldataset', transform=transform)\n \n for image, label in mydata:\n img = image / 2 + 0.5\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n NN_model = torch.load('saved_models/CNN_2Layer_1.pt')\n output = NN_model(image.unsqueeze(0))\n \n print(output)\n \n\n activation = {}\n def get_activation(name):\n def hook(model, input, output):\n activation[name] = output.detach()\n return hook\n\n NN_model.conv2.register_forward_hook(get_activation('conv2'))\n image.unsqueeze_(0).reshape\n output = NN_model(image)\n act = activation['conv2'].squeeze()\n fig, axarr = plt.subplots(5,2)\n for j in range(0,5):\n for idx in range(0,2):\n axarr[j][idx].imshow(act[j*2+idx])\n plt.show()\n \ndemo()\n\n","repo_name":"ece324-2020/lockdin","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29873459192","text":"from collections import deque\n\nN, K = map(int, input().split())\nck = [1] * 100001\nck[N] = 0\nqu = deque([(N, 0)])\nwhile qu:\n n, sec = qu.popleft()\n if n == K:\n print(sec)\n break\n\n if n-1 >= 0 and ck[n-1]:\n ck[n-1] = 0\n qu.append((n-1, sec+1))\n if n+1 < 100001 and K > n and ck[n+1]:\n ck[n+1] = 0\n qu.append((n+1, sec+1))\n if n*2 < 100001 and K > n and ck[n*2]:\n ck[n*2] = 0\n qu.append((n*2, sec+1))\n","repo_name":"essk13/Algorithm","sub_path":"01_problem/python/2021/BAEKJOON/BAEKJOON_1697/1697_BAEKJOON.py","file_name":"1697_BAEKJOON.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28123856295","text":"#!/usr/bin/env python3\n\n#standard library imports\nimport os\n#os allows for low level system commands\nimport zipfile\n#tools to create, read, write,append and list a zip file\n\n#functions to search for all files in a directory, and add them to our zip file\ndef zipdir(dirpath, zipfileobj):\n \"\"\"does the work of writing data into our zipfile\"\"\"\n #os.walk() returs a 3-tuple or in lameeens terms 3 things\n #always in the order root,dirs,files\n for root,dirs,files in os.walk(dirpath):\n for file in files:# we only want to loos across the file component\n print(os.path.join(root,file))\n zipfileobj.write(os.path.join(root, file))\n return None #when we are done, no need to return any value\ndef main():\n \"\"\"called at runtime\"\"\"\n dirpath = input(\"What directory are we archiving today? \")\n\n ## if the directory exists then we can start archiving\n if os.path.isdir(dirpath):\n zippedfn = input(\"What should we call the finished archive? \")\n ##zippedfn is the zipped file for the archive. what we want to call it when it is done\n with zipfile.ZipFile(zippedfn, \"w\", zipfile.ZIP_DEFLATED) as zipfileobj:\n #create a ip file object -- we are making a new zip file\n zipdir(dirpath, zipfileobj) #call to our function *look up to def zipdir(dirpath,zipfileobj*\n else:\n print(\"Run the script again when you have a valid directory to zip.\\n\")\nmain()\n","repo_name":"AaronElizondo/Mycode","sub_path":"quickzippy/createarchive.py","file_name":"createarchive.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43494960051","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nSplit or filter a \n\"\"\"\n\nimport re\nimport io\nimport sys\nimport operator\nimport argparse\nimport itertools\nfrom pathlib import Path\n\nfrom tqdm import tqdm\nfrom nard.utils import FASTAReader\n\nclear = f\"\\r{100 * ' '}\\r\"\nFASTA_STOP_CODON = '*'\n\ndef _valid_condition(cond):\n condition_structure = \"(<|>|<=|>=)(\\d+)\" \n match = re.match(condition_structure, cond)\n if match:\n op, operand = match.groups()\n return op, int(operand)\n else:\n raise ValueError(f\"Invalid condition: {cond}\")\n\ndef _construct_conditional(conditions, domain_file):\n if domain_file is not None:\n with open(domain_file, 'r') as domf:\n domains = set(map(lambda line: line.strip(), domf))\n def seq_id_is_good(seq_id):\n return seq_id in domains\n else:\n def seq_id_is_good(seq_id):\n return True\n\n if not conditions:\n def conditional(seq_id, sequence):\n return seq_id_is_good(seq_id)\n else:\n op2op = {'>': operator.gt, '<': operator.lt, '>=': operator.ge, '<=': operator.le} \n operations, operands = zip(*conditions)\n operations = list(map(lambda op: op2op[op], operations))\n operands = list(map(int, operands))\n \n def conditional(seq_id, sequence):\n evaluated = [op(len(sequence), operand) for op, operand in zip(operations, operands)]\n evaluated.append(seq_id_is_good(seq_id))\n return all(evaluated)\n\n return conditional\n\ndef arguments():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"-i\", help=\"Input filename\", type=Path, metavar=\"INPUT\", dest='input', required=True)\n parser.add_argument(\"-o\", help=\"Output fasta file\", type=Path, metavar=\"OUTPUT\", dest='output')\n parser.add_argument(\"-d\", help=\"Filter by provided domains\", dest='domain_file') \n parser.add_argument(\"--split\", default=\" \", help=\"sequence header delimiter to split on\")\n parser.add_argument(\"-s\", \"--include-stops\", help=\"Include sequences with stop codons\",\n default=False, action='store_true', dest='allow_stop_codons')\n parser.add_argument(\"-v\", \"--verbose\", action='store_true', default=False, help=\"Verbose output\")\n parser.add_argument(\"--assert\", dest='assertion',\n type=_valid_condition,\n nargs='+',\n help=\"Condition for sequences of the form '[>|<|>=|<=]\\d+'\",\n default=None)\n\n args = parser.parse_args()\n args.condition = _construct_conditional(args.assertion, args.domain_file)\n \n args.input = open(args.input, 'r')\n args.output = sys.stdout if args.output is None else open(args.output, 'w')\n\n return args\n\nif __name__ == \"__main__\":\n args = arguments()\n spliterator = FASTAReader(args.input, preprocess_header=lambda h: h.lstrip(\">\").split(args.split)[0])\n if not args.allow_stop_codons:\n spliterator = itertools.filterfalse(lambda tup: FASTA_STOP_CODON in tup[1], spliterator)\n\n if args.verbose:\n spliterator = tqdm(spliterator, desc=\"filter-fasta\", ascii=True)\n\n total = 0\n satisfied = 0\n for header, sequence in spliterator:\n total += 1 \n if args.condition(header, sequence):\n record = f\">{header}\\n{sequence}\\n\"\n args.output.write(record)\n satisfied += 1\n\n args.output.close()\n \n print(f\"Extracted {satisfied}/{total} sequences\", file=sys.stderr)\n","repo_name":"djberenberg/useful_scripts","sub_path":"filter-fasta.py","file_name":"filter-fasta.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"31630846953","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCode Challenge\n Name: \n Exploratory Data Analysis - Automobile\n Filename: \n automobile.py\n Dataset:\n Automobile.csv\n Problem Statement:\n Perform the following task :\n 1. Handle the missing values for Price column\n 2. Get the values from Price column into a numpy.ndarray\n 3. Calculate the Minimum Price, Maximum Price, Average Price and Standard Deviation of Price\n\"\"\"\n\n# Importing pandas and numpy module\nimport pandas as pd\nimport numpy as np\n\n# Reading the csv file using pandas and storing it in a variable named df\ndf = pd.read_csv(\"Automobile.csv\")\n\n# Handling the missing values and filing them with the max value of that column\ndf = df.fillna(df.max())\n\n# getting the price and converting it into numpy array and storing it in price_arr\nprice_arr = np.array(df['price'])\n\n# Calculating the max, min ,average, standard deviation from price_arr\nmax_price = np.max(price_arr)\nmin_price = np.min(price_arr)\navg_price = np.mean(price_arr)\nstd_dev_price = np.std(price_arr)\n\n# Printing the prices\nprint(\"Maximum price:\", max_price)\nprint(\"Minimum price:\", min_price)\nprint(\"Average price:\", avg_price)\nprint(\"Standard Deviation of price:\", std_dev_price)\n","repo_name":"piyush546/Machine-Learning-Bootcamp","sub_path":"Data preprocessing and analytics/Numpy programs/Automobile.py","file_name":"Automobile.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"698232809","text":"import os\nfrom argparse import ArgumentParser\nfrom pprint import pprint\n\nfrom pytorch_lightning import LightningModule, Trainer\nfrom pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom pytorch_lightning.utilities.seed import seed_everything\n\nfrom src.model.efficient_el import EfficientEL\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n\n parser.add_argument(\"--dirpath\", type=str, default=\"models\")\n parser.add_argument(\"--save_top_k\", type=int, default=10)\n parser.add_argument(\"--seed\", type=int, default=0)\n\n parser = EfficientEL.add_model_specific_args(parser)\n parser = Trainer.add_argparse_args(parser)\n\n args, _ = parser.parse_known_args()\n pprint(args.__dict__)\n\n seed_everything(seed=args.seed)\n\n logger = TensorBoardLogger(args.dirpath, name=None)\n\n callbacks = [\n ModelCheckpoint(\n mode=\"max\",\n monitor=\"micro_f1\",\n dirpath=os.path.join(logger.log_dir, \"checkpoints\"),\n save_top_k=args.save_top_k,\n filename=\"model-epoch={epoch:02d}-micro_f1={micro_f1:.4f}-ed_micro_f1={ed_micro_f1:.4f}\",\n ),\n LearningRateMonitor(\n logging_interval=\"step\",\n ),\n ]\n\n trainer = Trainer.from_argparse_args(args, logger=logger, callbacks=callbacks)\n\n model = EfficientEL(**vars(args))\n\n trainer.fit(model)\n","repo_name":"nicola-decao/efficient-autoregressive-EL","sub_path":"scripts/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"18"} +{"seq_id":"26116037512","text":"import requests\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef get_channel_Ids(youtube_url):\r\n try:\r\n response = requests.get(youtube_url)\r\n except Exception as e:\r\n print(e)\r\n return 'INVALID'\r\n if response.ok:\r\n doc = BeautifulSoup(response.text, 'html.parser')\r\n else:\r\n youtube_url = youtube_url.replace(\"user\", \"c\")\r\n response = requests.get(youtube_url)\r\n doc = BeautifulSoup(response.text, 'html.parser')\r\n channelId = doc.find('link', rel=re.compile(\"canonical\"))['href'].split('/')[-1]\r\n return channelId","repo_name":"mdFaizz/iNeuronYoutubeChallenge","sub_path":"Data_Collection/get_channel_Ids.py","file_name":"get_channel_Ids.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"26829234438","text":"# Exception Handling\n\n# Error:\n\n# Syntax Error and Exception is different\n\n# print(10 / 0) this is an exception\n\n\n'''try:\n\tn1 = int(input())\n\tn2 = int(input())\n\tprint(n1 / n2)\t# risky code\nexcept ZeroDivisionError:\n\tprint('Check It seems like zeor in denominator')\nexcept ValueError:\n\tprint('Check the numbers')\nprint('Hi')'''\n\n# Runtime Error --> Exception\n\n# In python, Everything is Object\n\n# whenever exception occurs, the corresponding exception object will give / throw to the end user\n\n# Object - memory reference of a class / instance of a class\n\n# 1. Every Exception in python is a class\n# 2. All Exception classes are child / Sub classes of BaseException class\n# 3. During runtime, if exception occurs, Python will throw us the exception class name and stops the program immediately / abruptly.\n\n\n# Exception Handling\n'''\ndef division():\n\ttry :\n\t\ta = int(input())\n\t\tb = int(input())\n\t\tprint(a / b)\n\texcept ValueError:\t# Handling code\n\t\tprint('Something went wrong')\n\t\t#division()\n\texcept:\n\t\tprint('An Error Occured')\n\t\t#division()\n\tfinally:\t\t# cleanup code\n\t\tprint('Check I am in finally')\n\t\t\n\ndivision()\n'''\n# Nested Exception Handling\ntry:\n\ta = int(input())\n\tb = int(input())\n\ttry:\n\t\tprint(a / b)\n\texcept ZeroDivisionError:\n\t\tprint('Zero is in the denominator')\n\tfinally:\n\t\tprint('Inner Finally')\nexcept ValueError:\n\tprint('Check the numbers')\nfinally:\n\tprint('Outer Finally')\n\n# Using else in try and except\nprint()\ntry:\n\tprint('Try Block')\n\tprint(int(input())/ int(input()))\nexcept:\n\tprint('Exception Occurs')\nelse:\t\t# Else part gets executed when no except\n\tprint('Else part')\nfinally:\n\tprint('In Finally')\n\t\n\t\n# User Defined Exception\n'''class InsufficientBalanceException(Exception):\t\n\tdef __init__(self):\n\t\tprint('Check your balacne')\n\t\nbalance = 1000\namount = int(input('Enter amount to withdraw:'))\nif amount > balance:\n\traise InsufficientBalanceException()\n\t\n'''\nclass InsufficientBalanceException(Exception):\t\n\tdef __init__(self, message):\n\t\t#self.msg = message\n\t\tpass\n\t\nbalance = 1000\namount = int(input('Enter amount to withdraw:'))\nif amount > balance:\n\traise InsufficientBalanceException('Check your balance')\n\t\n'''class InsufficientBalanceException(Exception):\t\n\tdef __init__(self, msg):\n\t\tself.msg = msg\n\ntry:\t\t\n\tbalance = 1000\n\tamount = int(input('Enter amount to withdraw:'))\n\tif amount > balance:\n\t\traise InsufficientBalanceException('Check your balance')\nexcept InsufficientBalanceException:\n\tprint('Insufficient Balance in your Account')\n\tprint('Enter an amount less than your balance in multiples of 100')'''","repo_name":"pravin-asp/Python-Learnings","sub_path":"ExceptionHandling.py","file_name":"ExceptionHandling.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10683929934","text":"builtins_whitelist = set((\n 'ArithmeticError', 'AssertionError', 'AttributeError', 'BaseException', 'BlockingIOError', 'BrokenPipeError',\n 'BufferError', 'BytesWarning', 'ChildProcessError', 'ConnectionAbortedError', 'ConnectionError',\n 'ConnectionRefusedError', 'ConnectionResetError', 'DeprecationWarning', 'EOFError', 'Ellipsis', 'EnvironmentError',\n 'Exception', 'False', 'FileNotFoundError', 'FloatingPointError', 'FutureWarning', 'GeneratorExit', 'IOError',\n 'ImportError', 'ImportWarning', 'IndentationError', 'IndexError', 'InterruptedError', 'IsADirectoryError',\n 'KeyError', 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError', 'None', 'NotADirectoryError',\n 'NotImplemented', 'NotImplementedError', 'OSError', 'OverflowError', 'PendingDeprecationWarning', 'PermissionError',\n 'ProcessLookupError', 'RecursionError', 'ReferenceError', 'ResourceWarning', 'RuntimeError', 'RuntimeWarning',\n 'StopAsyncIteration', 'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError',\n 'TimeoutError', 'True', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', 'UnicodeEncodeError',\n 'UnicodeError', 'UnicodeTranslateError', 'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning', 'ZeroDivisionError',\n '__IPYTHON__', '__build_class__', '__debug__', '__doc__', '__import__', '__loader__', '__name__', '__package__',\n '__spec__', 'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod',\n 'compile', 'complex', 'copyright', 'credits', 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval',\n 'exec', 'filter', 'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'help', 'hex', 'id',\n 'input', 'int', 'isinstance', 'issubclass', 'iter', 'len', 'license', 'list', 'locals', 'map', 'max', 'memoryview',\n 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print', 'property', 'range', 'repr', 'reversed', 'round',\n 'set', 'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', 'vars', 'zip'\n))\n\ndef _safe_import(__import__, module_whitelist):\n def safe_import(module_name, globals={}, locals={}, fromlist=[], level=-1):\n if module_name in module_whitelist:\n return __import__(module_name, globals, locals, fromlist, level)\n else:\n # raise ImportError(\"Blocked import of %s\" % (module_name,))\n return __import__(module_name, globals, locals, fromlist, level)\n\n return safe_import\n\n\nclass ReadOnlyBuiltins(dict):\n def clear(self):\n ValueError(\"Read-Only\")\n\n def __delitem__(self, key):\n ValueError(\"Read-Only\")\n\n def pop(self, key, default=None):\n ValueError(\"Read-Only\")\n\n def popitem(self):\n ValueError(\"Read-Only\")\n\n def setdefault(self, key, value):\n ValueError(\"Read-Only\")\n\n def __setitem__(self, key):\n ValueError(\"Read-Only\")\n\n def update(self, dict, **kw):\n ValueError(\"Read-Only\")\n\nclass Sandbox(object):\n def __init__(self):\n import sys\n from types import FunctionType\n original_builtins = sys.modules[\"__main__\"].__dict__[\"__builtins__\"].__dict__\n keys_to_delete = []\n for builtin in original_builtins.keys():\n if builtin not in builtins_whitelist:\n # keys_to_delete.append(builtin)\n continue\n for key in keys_to_delete:\n del original_builtins[key]\n original_builtins[\"__import__\"] = _safe_import(__import__, [\"string\", \"re\"])\n safe_builtins = ReadOnlyBuiltins(original_builtins)\n sys.modules[\"__main__\"].__dict__[\"__builtins__\"] = safe_builtins\n function_dict = {}\n for name, value in vars(FunctionType).items():\n function_dict[name] = value\n attributes_to_remove = [\"__bases__\", \"__subclasses__\"]\n for attr in attributes_to_remove:\n function_dict.pop(attr, None)\n\n def execute(self, code_string):\n exec(code_string)\n","repo_name":"obekediamond/DIssertation_V1","sub_path":"quiz/sandbox.py","file_name":"sandbox.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38428452743","text":"from .autogamer_bindings import *\n\n__all__ = [\n \"Game\",\n \"Level\",\n \"PhysicsEngine\",\n \"Entity\",\n \"TileMap\",\n \"CharacterSpritesheet\",\n\n \"Shape\",\n \"ShapeRect\",\n\n \"CollisionGroups\",\n \"GROUND_COLLISION_GROUPS\",\n \"PLAYER_COLLISION_GROUPS\",\n \"ENEMY_COLLISION_GROUPS\",\n\n # Components\n \"Player\",\n \"Position\",\n \"PhysicsBody\",\n \"PhysicsCollider\",\n \"Sprite\",\n \"CharacterSprites\",\n \"PlatformerControls\",\n \"Health\",\n \"ViewportTarget\",\n \"Wallet\",\n]\n","repo_name":"sunjay/autogamer","sub_path":"pyautogamer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"8341289635","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n\"\"\"Manage AWS CloudWatch Log Groups and Streams.\"\"\"\n\n# pylint: disable=invalid-name,dangerous-default-value\n\nfrom typing import Dict\n\nimport boto3\n\n\ndef create_cw_logs_group_stream(\n cw_logs_group_name: str, firehose_stream_name: str, aws_region: str\n) -> Dict:\n \"\"\"Create CloudWatch Logging Group.\"\"\"\n cw_logs_client = boto3.client(\"logs\", region_name=aws_region)\n cw_log_creation_response = cw_logs_client.create_log_group(\n logGroupName=cw_logs_group_name\n )\n cw_stream_creation_response = cw_logs_client.create_log_stream(\n logGroupName=cw_logs_group_name, logStreamName=firehose_stream_name\n )\n return [cw_log_creation_response, cw_stream_creation_response]\n\n\ndef delete_cw_log_group_stream(\n cw_log_group_name: str, firehose_stream_name: str, aws_region: str\n) -> Dict:\n \"\"\"Delete CloudWatch Logging Group and Stream.\"\"\"\n cw_logs_client = boto3.client(\"logs\", region_name=aws_region)\n cw_log_deletion_response = cw_logs_client.delete_log_group(\n logGroupName=cw_log_group_name\n )\n # cw_stream_deletion_response = cw_logs_client.delete_log_stream(\n # logGroupName=cw_log_group_name,\n # logStreamName=firehose_stream_name,\n # )\n # print(cw_log_deletion_response)\n # print(cw_stream_deletion_response)\n return {\n \"log_group\": cw_log_deletion_response,\n # \"log_stream\": cw_stream_deletion_response,\n }\n\n\ndef check_cw_log_group_deletion(cw_logs_group_name: str, aws_region: str):\n \"\"\"Verify Deletion of CloudWatch Logging Group.\"\"\"\n cw_logs_client = boto3.client(\"logs\", region_name=aws_region)\n # Get CW Log Groups\n cw_log_group_response = cw_logs_client.describe_log_groups(\n logGroupNamePrefix=cw_logs_group_name,\n )\n # Get CW Log Group Names\n cw_log_group_names = [\n cw_log_group_response_name[\"logGroupName\"]\n for cw_log_group_response_name in cw_log_group_response[\"logGroups\"]\n ]\n # Verify that deleted CW Log Group name is not in list of Log Group Names\n assert cw_logs_group_name not in cw_log_group_names\n\n\ndef check_cw_log_stream_deletion(\n cw_logs_group_name: str, firehose_stream_name: str, aws_region: str\n):\n \"\"\"Verify Deletion of CloudWatch Logging Stream.\"\"\"\n cw_logs_client = boto3.client(\"logs\", region_name=aws_region)\n # Get names of CW Log Streams in CW Log Group\n try:\n cw_log_stream_response = cw_logs_client.describe_log_streams(\n logGroupName=cw_logs_group_name,\n )\n # Get CW Log Stream Names\n cw_log_stream_names = [\n cwlog_stream_response_name[\"logStreamName\"]\n for cwlog_stream_response_name in cw_log_stream_response[\n \"logStreams\"\n ]\n ]\n msg_check = firehose_stream_name in cw_log_stream_names\n msg = (\n f\"Found streams [{', '.join(cw_log_stream_names)}] in CW log \"\n f\"group {cw_logs_group_name}. Specified stream \"\n f\"{firehose_stream_name} in Log Group = {msg_check}\"\n )\n except cw_logs_client.exceptions.ResourceNotFoundException:\n msg = f\"Did not find CloudWatch Log Group {cw_logs_group_name}\"\n # If CW Log Group is deleted, then CW Log Streams should not exist\n # and describing streams within the CW Log Group should raise an exception\n assert msg == f\"Did not find CloudWatch Log Group {cw_logs_group_name}\"\n","repo_name":"sparsh-ai/recohut","sub_path":"docs/12-capstones/other/topic-modeling-pipeline/src/cw/cloudwatch_logs.py","file_name":"cloudwatch_logs.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"18"} +{"seq_id":"8321566553","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport falcon\nimport pytest\nfrom falcon import testing\nfrom oas.exceptions import UnmarshalError\n\nfrom falcon_oas.problems import http_error_handler\nfrom falcon_oas.problems import Problem\nfrom falcon_oas.problems import serialize_problem\nfrom falcon_oas.problems import unmarshal_error_handler\nfrom falcon_oas.problems import UNMARSHAL_PROBLEM_TYPE_URI\n\n\ndef test_problem():\n title = 'title'\n description = 'description'\n code = 42\n http_error = falcon.HTTPBadRequest(\n title=title, description=description, code=code\n )\n problem = Problem.from_http_error(http_error)\n\n assert isinstance(problem, falcon.HTTPError)\n assert problem.status == falcon.HTTP_BAD_REQUEST\n assert problem.title == title\n assert problem.description == description\n assert problem.code == code\n assert problem.to_dict() == {\n 'title': title,\n 'status': 400,\n 'detail': description,\n 'code': code,\n }\n\n\ndef test_problem_to_dict_without_optional():\n http_error = falcon.HTTPBadRequest()\n problem = Problem.from_http_error(http_error)\n\n assert problem.to_dict() == {'title': 'Bad Request', 'status': 400}\n\n\ndef test_serialize_problem():\n environ = testing.create_environ()\n req = falcon.Request(environ)\n resp = falcon.Response()\n problem = Problem.from_http_error(falcon.HTTPBadRequest())\n\n serialize_problem(req, resp, problem)\n\n assert resp.data == b'{\"title\": \"Bad Request\", \"status\": 400}'\n assert resp.content_type == 'application/problem+json'\n assert resp.get_header('Vary') == 'Accept'\n\n\ndef test_serialize_problem_accept_json():\n environ = testing.create_environ(headers={'Accept': 'application/json'})\n req = falcon.Request(environ)\n resp = falcon.Response()\n problem = Problem.from_http_error(falcon.HTTPBadRequest())\n\n serialize_problem(req, resp, problem)\n\n assert resp.content_type == 'application/json'\n\n\ndef test_serialize_problem_accept_html():\n environ = testing.create_environ(headers={'Accept': 'text/html'})\n req = falcon.Request(environ)\n resp = falcon.Response()\n problem = Problem.from_http_error(falcon.HTTPBadRequest())\n\n serialize_problem(req, resp, problem)\n\n assert resp.content_type == 'application/json'\n\n\ndef test_http_error_handler():\n http_error = falcon.HTTPBadRequest()\n req = falcon.Request(testing.create_environ())\n resp = falcon.Response()\n params = {}\n\n with pytest.raises(Problem):\n http_error_handler(http_error, req, resp, params)\n\n\ndef test_unmarshal_error_handler():\n unmarshal_error = UnmarshalError()\n req = falcon.Request(testing.create_environ())\n resp = falcon.Response()\n params = {}\n\n with pytest.raises(Problem) as excinfo:\n unmarshal_error_handler(unmarshal_error, req, resp, params)\n\n assert excinfo.value.type_uri == UNMARSHAL_PROBLEM_TYPE_URI\n assert excinfo.value.status == falcon.HTTP_BAD_REQUEST\n assert excinfo.value.title == 'Unmarshal Error'\n assert excinfo.value.additional_members == unmarshal_error.to_dict()\n","repo_name":"grktsh/falcon-oas","sub_path":"tests/test_problems.py","file_name":"test_problems.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"42490957898","text":"import sqlalchemy as sa\nfrom sqlalchemy import orm\n\nfrom geojson import Feature\n\nfrom mapfishsample.model import meta\nfrom mapfish.sqlalchemygeom import Geometry\n\ndef init_model(engine):\n \"\"\"Call me before using any of the tables or classes in the model\"\"\"\n ## Reflected tables must be defined and mapped here\n #global reflected_table\n #reflected_table = sa.Table(\"Reflected\", meta.metadata, autoload=True,\n # autoload_with=engine)\n #orm.mapper(Reflected, reflected_table)\n\n sm = orm.sessionmaker(autoflush=True, autocommit=False, bind=engine)\n\n meta.engine = engine\n meta.Session = orm.scoped_session(sm)\n\n\nnodes_table = sa.Table('nodes2', meta.metadata,\n sa.Column('node_id', sa.types.Integer, primary_key=True),\n sa.Column('room', sa.types.String, unique=True),\n sa.Column('level', sa.types.Integer),\n sa.Column('geom', Geometry))\n\nclass Node(object):\n __table__ = nodes_table\n def toFeature(self):\n return Feature(id=int(self.node_id), geometry=self.geom,\n properties={'room': str(self.room), 'floor': str(self.level)})\n\norm.mapper(Node, nodes_table)\n\nlines_table = sa.Table('lines2', meta.metadata,\n sa.Column('gid', sa.types.Integer, primary_key=True),\n sa.Column('length', sa.types.Float),\n sa.Column('geom', Geometry))\n\nclass Line(object):\n def toFeature(self):\n return Feature(id=int(self.gid), geometry=self.geom,\n properties={'distance': float(self.length)})\n\norm.mapper(Line, lines_table)\n\n## Classes for reflected tables may be defined here, but the table and\n## mapping itself must be done in the init_model function\n#reflected_table = None\n#\n#class Reflected(object):\n# pass\n","repo_name":"mapfish/mapfish-archives","sub_path":"MapFishSample/mapfishsample/model/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"26293717485","text":"import os\nfrom subprocess import run\nimport sys\n\nfrom Bio import Seq, SeqIO, SeqRecord\n\ndirec = sys.argv[1]\n\nif direc[-1] != '/':\n direc += '/'\n\nif 'rocker' in direc:\n options = f'-A {direc}uniref.aln --notextw --noali -N 1 --cpu 20' \\\n ' -E 10000 --max --domE 10000 --incE 10000 --incdomE 10000'\n db = 'run_data/uniref100.fasta'\nelse:\n options = f'-A {direc}uniref.aln --notextw --noali -N 1 --cpu 20'\n db = 'run_data/uniref90.fasta'\n\ncmd = f'jackhmmer {options} {direc}ref_seq.fasta {db}'\nrun(cmd, shell=True)\n\nwith open(f'{direc}uniref.aln') as f:\n for line in f:\n if line[0] != '#' and len(line) > 10:\n _, ref = line.split()\n ref_ind = [i for i, c in enumerate(ref) if c.isupper()]\n break\n\n\ndef clean(seq):\n return ''.join(seq[i] for i in ref_ind)\n\n\ncov_cut = 0.5\nseqs = []\nwith open(f'{direc}uniref.aln') as f:\n for i, line in enumerate(f):\n if line[0] != '#' and len(line) > 10:\n name, seq = line.split()\n seq = clean(seq)\n cov = 1 - seq.count('-') / len(seq)\n if cov > cov_cut and seq not in seqs:\n seqs.append((name, seq))\n\n# add bounds to reference sequence\nref_name, ref_seq = seqs[0]\nref_name += f'/1-{len(ref_seq)}'\nseqs[0] = ref_name, ref_seq\n\nseqs = [SeqRecord.SeqRecord(Seq.Seq(seq), id=name, description='')\n for name, seq in seqs]\n\nSeqIO.write(seqs, f'{direc}aln_filtered.fasta', 'fasta')\n\nos.remove(f'{direc}uniref.aln')\n","repo_name":"RomeroLab/PU-learning-paper-analysis","sub_path":"code/program_comparison/run_alignment.py","file_name":"run_alignment.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38683515756","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#自动生成速算题\nimport random\n\n\n#加减法\nquestion = []\ni=0\nMaxNum=10000\nMinNum=1\nwhile i < 1000 :\n n1 = random.randint(MinNum,MaxNum)\n n2 = random.randint(MinNum,MaxNum) \n question.append((f\"{n1}+{n2}\",n1+n2))\n question.append((f\"{n1}-{n2}\",n1-n2))\n i += 2\n print(\"*\",end=\"\")\n\n\n\n#乘法\n\ni=0\nMaxNum=1000\nMinNum=1\n\nwhile i < 1000 :\n n1 = random.randint(MinNum,MaxNum)\n n2 = random.randint(MinNum,MaxNum) \n question.append((f\"{n1}×{n2}\",n1*n2))\n i += 1\n print(\"*\",end=\"\")\n \n \n#除法\ni=0\nMaxNum=10000\nMinNum=2 \n#合数是指在大于1的整数中除了能被1和本身整除外,还能被其他数(0除外)整除的数。\nnonPrimeNums=[(x,int(y/x),y) for y in range(MinNum,MaxNum) for x in range(2,y) if y%x==0]\ntimu=[]\nfor n1,n2,nonPrimeNumber in nonPrimeNums:\n timu.append((f\"{nonPrimeNumber}÷{n1}\",n2))\nquestion=question+random.sample(timu,1000) \n \nprint()\nexerciseQuestion=random.sample(question,250) \ni = 0\nwhile i < len(exerciseQuestion):\n print(f\"第{i}题{exerciseQuestion[i][0]}\")\n i += 1\n\ni = 0\nwhile i < len(exerciseQuestion):\n print(f\"第{i}题的答案是:{exerciseQuestion[i][1]}\")\n i += 1\n","repo_name":"spxai/interestingPython","sub_path":"src/learn-dd.py","file_name":"learn-dd.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31075778731","text":"from data.data_utils import chord_to_binary, pitch_num_to_id_np, id_to_pitch_num_np, note_chord_to_midi\nfrom config import config, device, IDS, LEN\n\nimport numpy as np\nimport music21\nfrom music21 import harmony\nimport os\n\nfrom reward.reward import find_key_simple, find_key_simple_root_type\nfrom reward.chord2scale import get_scale_suggestions, UNKNOWN_KEY\nfrom reward.scales import PITCH_NUM_2_NAME\n\n\ndef chord_multi_hot_to_pitch_nums(chord_multi_hot):\n pitch_nums = []\n for i in range(len(chord_multi_hot)):\n if chord_multi_hot[i]:\n pitch_nums.append(i)\n return pitch_nums\n\ndef rectify_scale(scale):\n prev = 0\n for i in range(len(scale)):\n while scale[i] < prev:\n scale[i] += 12\n prev = scale[i]\n return scale\n \nclass ScalePolicyBasic:\n def __init__(self):\n self.reset()\n \n def reset(self):\n self.this_chord_mem = []\n self.next_chord_mem = []\n self.key = UNKNOWN_KEY\n self.scale_notes = [0]\n self.scale_weights = [0]\n self.scale_pointer = 0\n self.octave_bias = 5*12\n self.prev_progress_percent = 0\n self.duration_id = config.duration_2_id_gen[12]\n self.rest_id = pitch_num_to_id_np(-1)\n self.direction = 1\n \n def forward(self, observation):\n this_chord = list(observation[IDS[\"chord\"]])\n next_chord = list(observation[IDS[\"next_chord\"]])\n progress_percent = observation[IDS[\"progress_percent\"]]\n if abs(progress_percent - self.prev_progress_percent) > 0.9:\n self.reset()\n self.prev_progress_percent = progress_percent\n if len(this_chord) == 0 or sum(this_chord) == 0:\n return [self.rest_id, self.duration_id]\n if not (self.this_chord_mem == this_chord and self.next_chord_mem == next_chord):\n prev_scale_notes = self.scale_notes[:]\n \n self.direction = 1\n self.this_chord_mem = this_chord[:]\n self.next_chord_mem = next_chord[:]\n \n this_chord = chord_multi_hot_to_pitch_nums(this_chord)\n next_chord = chord_multi_hot_to_pitch_nums(next_chord)\n \n c1 = music21.chord.Chord(this_chord)\n root1 = c1.root().midi\n symbol1, type1 = harmony.chordSymbolFigureFromChord(c1, includeChordType=True)\n if len(next_chord) == 0:\n self.key = UNKNOWN_KEY\n else:\n c2 = music21.chord.Chord(next_chord)\n root2 = c2.root().midi\n symbol2, type2 = harmony.chordSymbolFigureFromChord(c2, includeChordType=True)\n self.key = find_key_simple_root_type(root1, type1, root2, type2)\n \n names, self.scale_weights, self.scale_notes = get_scale_suggestions(\n this_chord, self.key, root_midi_num = root1, chord_type = type1\n )\n self.scale_notes = rectify_scale(self.scale_notes[0])\n# prev_pitch = prev_scale_notes[self.scale_pointer%len(prev_scale_notes)]\n# prev_register = int(np.floor(self.scale_pointer/len(prev_scale_notes)))\n# self.scale_pointer = int(np.argmin(abs(np.array(self.scale_notes)-prev_pitch))) + len(self.scale_notes)*prev_register\n self.scale_pointer = 0\n \n octave_bonus = 12*int(np.floor(self.scale_pointer/len(self.scale_notes)))\n if octave_bonus >= 12:\n octave_bonus = 12\n self.direction = -1\n elif octave_bonus <= -12:\n octave_bonus = -12\n self.direction = 1\n pitch = self.octave_bias + self.scale_notes[self.scale_pointer%len(self.scale_notes)] + octave_bonus\n if pitch > config.pitch_max:\n pitch = config.pitch_max\n if pitch < config.pitch_min:\n pitch = config.pitch_min\n self.scale_pointer += 1 * self.direction\n return [pitch_num_to_id_np(pitch), self.duration_id]\n\n\ndef push_duration(this_duration, prev_durations):\n prev_durations[:-1] = prev_durations[1:]\n prev_durations[-1] = this_duration\n return prev_durations\n \nclass ScalePolicyMarkov:\n def __init__(self):\n self.reset()\n \n def reset(self):\n self.prev_progress_percent = 0\n self.this_chord_mem = []\n self.next_chord_mem = []\n self.key = UNKNOWN_KEY\n self.scale_notes = [0]\n self.scale_weights = [2]\n self.prev_pitch = 0\n self.direction = 1\n self.scale_pointer = 0\n self.octave_bias = 5*12\n self.prev_durations = np.ones(4, dtype=int) * 12\n self.duration_template = np.zeros(16, dtype=int)\n self.direction_template = np.zeros(16, dtype=int)\n self.step_template = np.zeros(16, dtype=int)\n self.template_pointer = 0\n self.using_template = False\n self.template_obtained = False\n self.piece_start = False\n self.rest_id = pitch_num_to_id_np(-1)\n \n def forward(self, observation):\n this_chord = list(observation[IDS[\"chord\"]])\n next_chord = list(observation[IDS[\"next_chord\"]])\n beat_pos = float(observation[IDS[\"beat_pos\"]])\n time_signature = config.id_2_time_signature[int(observation[IDS[\"time_signature\"]])]\n progress_percent = observation[IDS[\"progress_percent\"]]\n if abs(progress_percent - self.prev_progress_percent) > 0.9:\n self.reset()\n self.prev_progress_percent = progress_percent\n \n bar_length = time_signature[0]*4/time_signature[1]*24\n if time_signature[0]%4==0 or time_signature[0]%6==0:\n time_to_next_beat = round((0.5-beat_pos)%0.5*bar_length)\n elif time_signature[0]%3 == 0:\n time_to_next_beat = round((1/3-beat_pos)%(1/3)*bar_length)\n else:\n time_to_next_beat = round((1-beat_pos)%1*bar_length)\n \n # randomly determine the next duration\n if time_to_next_beat==12 and np.random.uniform() < 0.8:\n duration = 12\n elif time_to_next_beat==24 and np.random.uniform() < 0.5:\n duration = 24\n elif np.random.uniform() > 0.4:\n duration = self.prev_durations[-1]\n else:\n duration_id = np.random.randint(0, len(config.possible_durations_gen))\n duration = config.id_2_duration_gen[duration_id]\n \n # exception: no chord\n if len(this_chord) == 0 or sum(this_chord) == 0:\n duration = 12\n duration_id = config.duration_2_id_gen[duration]\n return [self.rest_id, duration_id]\n \n # decide the scale by chords\n if not (self.this_chord_mem == this_chord and self.next_chord_mem == next_chord):\n prev_scale_len = len(self.scale_notes[:])\n \n self.this_chord_mem = this_chord[:]\n self.next_chord_mem = next_chord[:]\n this_chord = chord_multi_hot_to_pitch_nums(this_chord)\n next_chord = chord_multi_hot_to_pitch_nums(next_chord)\n c1 = music21.chord.Chord(this_chord)\n root1 = c1.root().midi\n symbol1, type1 = harmony.chordSymbolFigureFromChord(c1, includeChordType=True)\n if len(next_chord) == 0:\n self.key = UNKNOWN_KEY\n else:\n c2 = music21.chord.Chord(next_chord)\n root2 = c2.root().midi\n symbol2, type2 = harmony.chordSymbolFigureFromChord(c2, includeChordType=True)\n self.key = find_key_simple_root_type(root1, type1, root2, type2)\n \n names, _, scale_notes = get_scale_suggestions(\n this_chord, self.key, root_midi_num = root1, chord_type = type1\n )\n self.scale_notes = rectify_scale(scale_notes[0])\n self.scale_weights = np.ones_like(self.scale_notes)\n self.chord_notes = c1.normalOrder\n for i in range(len(self.scale_notes)):\n if self.scale_notes[i]%12 in self.chord_notes:\n self.scale_weights[i] += 1\n self.scale_pointer = int(self.scale_pointer * len(self.scale_notes) / prev_scale_len)\n \n \n \n if self.using_template and self.template_obtained:\n duration = self.duration_template[self.template_pointer]\n self.direction = self.direction_template[self.template_pointer]\n step = self.step_template[self.template_pointer]\n self.template_pointer += 1\n if self.template_pointer == len(self.duration_template):\n self.using_template = False\n self.template_pointer = 0\n else:\n # randomly invert the direction\n if np.random.uniform() < 0.25:\n self.direction = -1 * self.direction\n if self.scale_pointer > len(self.scale_notes) and np.random.uniform() < 0.8:\n self.direction = -1\n elif self.scale_pointer < -len(self.scale_notes) and np.random.uniform() < 0.8:\n self.direction = 1\n \n # randomly determine the step of next note\n tmp_rand = np.random.uniform()\n if tmp_rand > 0.6:\n step = 1\n elif tmp_rand > 0.2:\n step = 2\n elif tmp_rand > 0.1:\n step = 3\n else:\n step = 0\n \n if self.template_obtained:\n beat_pos_good = (beat_pos < 0.001)\n if np.random.uniform() > 0.8 and beat_pos_good:\n self.using_template = True\n self.template_pointer = 0\n \n self.scale_pointer += step * self.direction\n \n # consider bounds of pitches\n octave_bonus = 12*int(self.scale_pointer/len(self.scale_notes))\n if octave_bonus > 12:\n self.direction = -1\n self.scale_pointer -= self.scale_pointer%len(self.scale_notes) - 1\n elif octave_bonus < -12:\n self.direction = 1\n self.scale_pointer += len(self.scale_notes) - self.scale_pointer%len(self.scale_notes) + 1\n \n # consider the important notes\n if np.random.uniform()<0.5 or abs(beat_pos)<=0.01 or progress_percent >= 0.95:\n note_weight = self.scale_weights[self.scale_pointer%len(self.scale_notes)]\n while note_weight < max(self.scale_weights):\n if self.direction == -1:\n self.scale_pointer += -1\n else:\n self.scale_pointer += 1\n note_weight = self.scale_weights[self.scale_pointer%len(self.scale_notes)]\n \n # log the rhythm template and scale template\n if beat_pos <= 0.001:\n self.piece_start = True\n if self.piece_start and not self.template_obtained:\n self.duration_template[self.template_pointer] = duration\n self.direction_template[self.template_pointer] = self.direction\n self.step_template[self.template_pointer] = step\n self.template_pointer += 1\n if self.template_pointer == len(self.duration_template):\n self.template_obtained = True\n \n # finally decide the pitch and duration\n octave_bonus = 12*int(np.floor(self.scale_pointer/len(self.scale_notes)))\n pitch = self.octave_bias + self.scale_notes[self.scale_pointer%len(self.scale_notes)] + octave_bonus\n if pitch > config.pitch_max:\n pitch = config.pitch_max\n if pitch < config.pitch_min:\n pitch = config.pitch_min\n self.prev_pitch = pitch\n self.prev_durations = push_duration(duration, self.prev_durations)\n duration_id = config.duration_2_id_gen[duration]\n return [pitch_num_to_id_np(pitch), duration_id]\n \n\npolicies = []\ndef serial_scale_policy(observations, mode = \"basic\"):\n N_observation = len(observations)\n if N_observation != len(policies):\n for i in range(N_observation):\n if mode==\"basic\":\n policies.append(ScalePolicyBasic())\n elif mode==\"markov\":\n policies.append(ScalePolicyMarkov())\n actions = [0]*N_observation\n for i in range(N_observation):\n actions[i] = policies[i].forward(observations[i])\n \n return np.array(actions)\n\ndef serial_scale_markov_policy(observations):\n return serial_scale_policy(observations, mode = \"markov\")\n\ndef serial_scale_basic_policy(observations, mode = \"basic\"):\n return serial_scale_policy(observations, mode = \"basic\")\n\ndef scale_markov_policy(observation):\n return serial_scale_policy([observation], mode = \"markov\")[0]\n\ndef scale_basic_policy(observation, mode = \"basic\"):\n return serial_scale_policy([observation], mode = \"basic\")[0]\n","repo_name":"lucainiaoge/Chord-conditioned-Melody-Generation-Using-RL---Minimalist-Model","sub_path":"rule_based_policy.py","file_name":"rule_based_policy.py","file_ext":"py","file_size_in_byte":12808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"7252985403","text":"from django.urls import path\n\nfrom api.post.views import (\n PostListCreateView,\n PostRetrieveUpdateDestroyView,\n PostListMeView,\n PostRetrieveMeView,\n PostCommentListCreateView,\n PostListMeLikesView,\n PostLikeView,\n)\n\nurlpatterns = [\n path(\"\", PostListCreateView.as_view()),\n path(\"/\", PostRetrieveUpdateDestroyView.as_view(lookup_field=\"uuid\")),\n path(\"me/\", PostListMeView.as_view()),\n path(\"me//\", PostRetrieveMeView.as_view(lookup_field=\"uuid\")),\n path(\"/comment/\", PostCommentListCreateView.as_view()),\n path(\"me/liked/\", PostListMeLikesView.as_view()),\n path(\n \"/like/\",\n PostLikeView.as_view(lookup_field=\"uuid\"),\n {\"will_like\": True},\n ),\n path(\n \"/unlike/\",\n PostLikeView.as_view(lookup_field=\"uuid\"),\n {\"will_like\": False},\n ),\n]\n","repo_name":"theyoungastronauts/houston-old","sub_path":"service/api/post/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17884984439","text":"from specialpoints import Specialpoints\nimport math\n\ndef distance_two_point(point1, point2):\n x_square = (point2[0] - point1[0]) ** 2\n y_square = (point2[1] - point1[1]) ** 2\n return math.sqrt(x_square + y_square)\n\npoints = [[-3, 1], [2, 2], [-3, 15]]\n\nspecial_points = Specialpoints(points)\n\nH = special_points.orthorcenter()\nO = special_points.circumcenter()\nG = special_points.centroid()\n\nif H != None and O != None and G != None:\n print(f\"The orthocenter H: {H}\\nThe circumcenter O: {O}\\nThe centroid G: {G}\")\n HG = distance_two_point(H, G)\n HO = distance_two_point(H, O)\n GO = distance_two_point(G, O)\n print(HG / HO)\n print(HG / GO)","repo_name":"nhh979/Hi_Im_newbie","sub_path":"Triangle_special_points/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34836135190","text":"# dp[i-1] 경우 생각 안함(현재의 와인을 마시지 않는 경우)\nimport sys\nsys.setrecursionlimit(100001)\n\ndef Solve(n,wines):\n dp = []\n if n == 1:\n return print(wines[-1])\n if n == 2:\n return print(sum(wines))\n dp.append(wines[0]);dp.append(wines[0]+wines[1])\n if n >= 3:\n for i in range(2,n):\n if i == 2:\n dp.append(max(dp[1],dp[0]+wines[2],wines[1]+wines[2]))\n else:\n dp.append(max(dp[i-3]+wines[i-1]+wines[i],dp[i-2]+wines[i],dp[i-1]))\n return print(max(dp))\n\ndef Input():\n n = int(input())\n wine = []\n for i in range(n):\n wine.append(int(input()))\n\n return n, wine\n\n\nnumber, wines = Input()\nSolve(number,wines)","repo_name":"doublejy715/Problem-Solve","sub_path":"BaekJoon_Online/Dynamic_Programing/No_2156.py","file_name":"No_2156.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9666208786","text":"\"\"\"\nNetwork Profile implements the Profile class. \nThis class provides all the members and functions neccessary to \nmodel, compose, and analyze network profiles for applications \nand systems. \n\"\"\"\n\nimport copy,sys\nimport utils\nfrom collections import OrderedDict\nfrom decimal import *\n\nclass Profile:\n \"\"\"\n Profile contains the information about a single network profie.\n A network profile has a kind (e.g. 'provided'), a period (in seconds),\n and a lists of relevant data vs time series (e.g. bandwidth, latency, data, etc.).\n \"\"\"\n\n #: Sepearates fileds in a line in a profile file\n field_delimeter = ','\n #: Denotes headers (profile properties) in a profile file\n header_delimeter = '#'\n #: Denotes commends in the profile file\n comment_delimeter = '%'\n #: Splits lines in a profile file\n line_delimeter = '\\n'\n #: Strip lines starting with these delimeters to get just profile data\n special_delimeters = [header_delimeter, comment_delimeter]\n #: Which profiles are interpolated between points\n interpolated_profiles = ['data','latency']\n \n def __init__(self, kind = None, period = 0, priority = 0,\n node = 0, flow_type = None, num_periods = 1, sender_names = []):\n \"\"\"\n :param string kind: what kind of profile is it?\n :param double period: what is the periodicity (in seconds) of the profile\n :param int priority: what is the priority of the flow in the system\n :param int source: what is the node id from which the data on this profile will be sent\n :param int dest: what is the node id to which the data on this profile will be sent\n \"\"\"\n self.kind = kind #: The kind of this profile, e.g. 'required'\n self.period = period #: The length of one period of this profile\n self.priority = priority #: The priority of the profile; relevant for 'required' profiles\n self.node_id = node #: The node ID which is the source of this profile\n self.flow_type = flow_type #: This flow is the reciever for which sender flows?\n self.entries = OrderedDict() #: Dictionary of 'type name' : 'list of [x,y] points' k:v pairs \n\n def ParseHeader(self, header):\n \"\"\"\n Parses information from the profile's header if it exists:\n\n * period\n * priority\n * node ID\n * flow_type (for matching senders <--> receivers)\n * profile kind (provided, required, receiver, output, leftover)\n\n A profile header is at the top of the file and has the following syntax::\n\n # = \n\n \"\"\"\n if header:\n for line in header:\n line.strip('#')\n prop, value = line.split('=')\n if \"period\" in prop:\n self.period = Decimal(value)\n elif \"priority\" in prop:\n self.priority = int(value)\n elif \"node ID\" in prop:\n self.node_id = value.strip()\n elif \"flow type\" in prop:\n self.flow_type = value.strip()\n elif \"kind\" in prop:\n self.kind = value.strip()\n\n def ParseFromFile(self, prof_fName):\n \"\"\"\n Builds the entries from a properly formatted CSV file. \n Internally calls :func:`Profile.ParseFromString`.\n \"\"\"\n prof_str = None\n try:\n with open(prof_fName, 'r+') as f:\n prof_str = f.read()\n except:\n print >> sys.stderr, \"ERROR: Couldn't find/open {}\".format(prof_fName)\n return -1\n if prof_str == None:\n return -1\n return self.ParseFromString( prof_str )\n\n def ParseFromString(self, prof_str):\n \"\"\"\n Builds the entries from a string (line list of csv's formatted as per\n :func:`ParseEntriesFromLine`).\n \"\"\"\n if not prof_str:\n print >> sys.stderr, \"ERROR: String contains no profile spec!\"\n return -1\n lines = prof_str.split(self.line_delimeter)\n header = [l for l in lines if self.header_delimeter in l]\n self.ParseHeader(header)\n p = copy.copy(lines)\n for s in self.special_delimeters:\n p = [l for l in p if s not in l]\n for line in p:\n if self.ParseEntriesFromLine(line):\n return -1\n self.EntriesRemoveDegenerates()\n self.EntriesStartFill()\n return 0\n\n def ParseEntriesFromLine(self, line_str):\n \"\"\"\n Builds the [time, value] list for each type of value into entries:\n \n * slope\n * max slope\n * latency\n\n These values are formatted in the csv as::\n\n