\n\nThis module is an implementation of K-mean algorithm to confront it with our\nimplementation of the DBSCAN one.\n\"\"\"\n\n# -- Imports\nimport sys\nimport random\nimport operator\nfrom pandas import DataFrame\nfrom pathlib import Path\nfrom datas import (read_dataset, dataframe_to_points, display_clusters, Center,\n Cluster)\n\n\n# -- Classes\nclass Kmean(object):\n \"\"\"The class representation of our implementation of Kmean.\"\"\"\n\n def __init__(self, dataset, k, precision=1):\n \"\"\"Initialization function, called when creating a new object.\"\"\"\n # Type checking the dataset\n if not isinstance(dataset, DataFrame) or dataset.empty:\n raise TypeError(\n 'Dataset given to Kmean class has to be a non empty',\n 'pandas.DataFrame instance'\n )\n\n # If asking more clusters than the number of points\n if k > dataset.size:\n raise ValueError(\n 'k cannot be superior than dataset size (> %d)' % dataset.size\n )\n\n # Initialize private attributes\n self._k = k\n self._precision = precision\n self._points = []\n self._clusters = []\n self._neighbour_counter = {}\n\n # Create the Point objects from the DataFrame one\n self._points = dataframe_to_points(dataset)\n\n # Initialize the neighbour counter\n for point in self._points:\n self._neighbour_counter[point] = 0\n\n # DEBUG: Display initial state of the algorithm\n # display_clusters(self._clusters, self._points)\n\n def _turn(self):\n \"\"\"Run a turn of the algorithm till we reach the convergence point.\"\"\"\n # Varible put to False only to enter the first time into the loop\n converged = False\n nb_loop = 0\n\n # While we still haven't reached the point of convergence\n while not converged:\n\n # DEBUG: Display the state at each loop\n # display_clusters(self._clusters)\n\n # Put the converged value back to True, if a point changes its\n # cluster, we will know that we still haven't converged\n converged = True\n\n # For every point (we assume that they are already into a cluster)\n for p in self._points:\n\n # The closest is the current cluster of the point\n closest = p.cluster\n curr_dist = p.dist(closest.center)\n\n # Parse all the other clusters\n for cluster in self._clusters:\n\n # If one is closest than the current one\n if p.dist(cluster.center) < curr_dist:\n closest = cluster\n curr_dist = p.dist(closest.center)\n\n # If the closest cluster is different than the current one,\n # assign this point to this cluster and we know that we still\n # haven't converged\n if p.cluster != closest:\n closest.assign(p)\n converged = False\n\n # Reassign the center of the clusters\n self._update_cluster_center()\n\n # Simple counter\n nb_loop += 1\n\n # Return the number of loops that this turn took\n return nb_loop\n\n def run(self):\n \"\"\"Run the algorithm a precision number of times.\"\"\"\n # Do a precision number of turns\n nb_loop = 0\n for turn in range(self._precision):\n\n # Initialization with random centers\n self._initialization()\n\n # Execute the turn and counting its number of loops\n nb_loop += self._turn()\n\n # Count the number of neighbour points of each points\n self._count_neighbours()\n\n # Execute the last turn with optimized centers\n opt_loop = self._optimized_turn()\n\n # At the end, print the final convergence time\n print('%d, %d, %d' % (self._k, nb_loop/self._precision, opt_loop))\n\n # Display the final state of the clusters\n display_clusters(self._clusters)\n # for c in self._clusters:\n # print(c)\n\n def _optimized_turn(self):\n \"\"\"Optimized turn to get the 'best' centers for clusters.\"\"\"\n # Get k points with the max neighbours which will make better centers\n best_centers = []\n for i in range(self._k):\n\n # Get the id of the point with maximum neighbours (better center)\n new_max_point = max(\n self._neighbour_counter.items(),\n key=operator.itemgetter(1)\n )[0]\n\n # For every point into the cluster of the maximum one, remove them\n # in order to not select two centers into the same cluster\n cluster = new_max_point.cluster\n # closest = cluster.points[0]\n closest = new_max_point\n for point in cluster.points:\n # if point.dist(cluster.center) < closest.dist(cluster.center):\n # closest = point\n self._neighbour_counter[point] = 0\n\n # Just add the created center into the center list\n best_centers.append(Center(i, closest.x, closest.y))\n\n # Clear the clusters\n self._clear_clusters()\n\n # Create the clusters with their optimized centers\n for center in best_centers:\n c = Cluster()\n c.center = center\n self._clusters.append(c)\n\n # Assign each point to its closest cluster\n self._assign_point_to_closest_cluster()\n\n # Reassign the center of the clusters\n self._update_cluster_center()\n\n # Execute the final and optimized turn and counting its number of loops\n return self._turn()\n\n def _count_neighbours(self):\n \"\"\"Count the number of neighbours of each point.\"\"\"\n for point in self._points:\n self._neighbour_counter[point] += len(point.cluster.points)\n\n def _initialization(self):\n \"\"\"Initialization part of the algorithm.\n\n Note that the points will be assigned to their nearest cluster and the\n center points of the clusters are scattered on the diagonal going from\n left bottom to top right.\n \"\"\"\n # Clear the clusters\n self._clear_clusters()\n\n # Initialize the clusters\n self._init_clusters()\n\n # Assign each point to its closest cluster\n self._assign_point_to_closest_cluster()\n\n # Reassign the center of the clusters\n self._update_cluster_center()\n\n def _update_cluster_center(self):\n \"\"\"Update the cluster's center.\"\"\"\n # Update the center of each cluster if there are points into it\n for cluster in self._clusters:\n\n # Get the number of points into this cluster\n nb_points = len(cluster.points)\n if nb_points > 0:\n\n # Update the way of getting sums and centers for 3D points\n\n # Add all x and y values of each point of this cluster\n x_sum, y_sum = 0, 0\n for point in cluster.points:\n x_sum += point.x\n y_sum += point.y\n\n # Reassign the center of this cluster by getting the mean\n cluster.center.x = x_sum / nb_points\n cluster.center.y = y_sum / nb_points\n\n # DEBUG: Display the new centers approximations\n # print(\n # 'center.x=%s and center.y=%s' %\n # (cluster.center.x, cluster.center.y)\n # )\n\n def _clear_clusters(self):\n \"\"\"Clear the clusters between each turn.\"\"\"\n for point in self._points:\n point.cluster = None\n self._clusters.clear()\n\n def _init_clusters(self):\n \"\"\"Initialize the clusters.\"\"\"\n # Select randomly k points and put them as cluster centers\n for index in range(self._k):\n\n # Select a random point\n random_point = random.choice(self._points)\n\n # Update what is needed for 3D centers using 3D points\n\n # Create a new cluster with this a random point as its center\n c = Cluster()\n c.center = Center(index, random_point.x, random_point.y)\n self._clusters.append(c)\n\n def _assign_point_to_closest_cluster(self):\n \"\"\"Assign each point to its closes cluster.\"\"\"\n for p in self._points:\n\n # The closest is the first cluster in the list (for the moment)\n closest = self._clusters[0]\n curr_dist = p.dist(closest.center)\n\n # Parse all the other clusters\n for cluster in self._clusters[1:]:\n\n # If one is closest than the current one\n if p.dist(cluster.center) < curr_dist:\n closest = cluster\n curr_dist = p.dist(closest.center)\n\n # Assign this point to its closest cluster\n closest.assign(p)\n\n\n# -- Private functions\ndef __get_params(argv):\n \"\"\"Function to manage input parameters.\"\"\"\n # Correct syntax\n syntax = '%s filename k [precision]' % argv[0]\n\n # Not enough parameters\n if len(argv) not in (3, 4):\n print('Usage: %s' % syntax)\n exit()\n\n # Get the parameter k\n try:\n k = int(argv[2])\n if k < 1:\n raise ValueError\n except ValueError:\n print(\n 'Parameter k as %s is invalid, must be a positive integer'\n % argv[2]\n )\n exit()\n\n # Get the filename after checking that the file exists and is a .csv\n f = Path(argv[1])\n if not f.is_file() or f.suffix != '.csv':\n print('The file %s was not found' % argv[1])\n exit()\n\n # Get the precision value\n try:\n precision = int(argv[3])\n if precision < 1:\n raise ValueError\n except IndexError:\n precision = 1\n except ValueError:\n print(\n 'Parameter precision as %s is invalid, must be a positive integer'\n % argv[3]\n )\n exit()\n\n # Return the parameters\n return argv[1], k, precision\n\n\nif __name__ == \"__main__\":\n \"\"\"Main function to be launched when this script is called \"\"\"\n\n # -- Normal functionment\n # Get parameters and execute K-mean algorithm\n dataset, k, precision = __get_params(sys.argv)\n Kmean(read_dataset(dataset), k, precision).run()\n\n # -- Convergence measurement gives 3 columns csv file\n # => (k | normal convergence time | optimized version convergence time)\n # datasets = [\n # 'carnet2.csv',\n # 'carnet_bis.csv',\n # 'circles.csv',\n # 'density_gap.csv',\n # 'example.csv',\n # 'stats_reseaux_ping_download.csv'\n # ]\n #\n # from contextlib import redirect_stdout\n # for ds in datasets:\n # with open('../Report/convergences/' + ds, 'w') as f:\n # with redirect_stdout(f):\n # print('k, convergence_time')\n # try:\n # for k in range(1, 100):\n # Kmean(read_dataset('../datasets/' + ds), k).run()\n # except ValueError:\n # pass\n","repo_name":"tandriamil/clustering-based-ids","sub_path":"src/kmean.py","file_name":"kmean.py","file_ext":"py","file_size_in_byte":11302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"6169882868","text":"#!/usr/bin/python2\n\n__version__ = \"$Id:$\"\n__docformat__ = \"reStructuredText\"\n\nimport sys,math\n\nsys.dont_write_bytecode = 1\n\nimport pygame\nfrom pygame.locals import *\nfrom pygame.color import *\n\nimport pymunk\nfrom pymunk.vec2d import Vec2d\nfrom pymunk.pygame_util import draw, from_pygame, to_pygame\n\nfrom scene import *\nfrom entities import *\nfrom actor import *\nfrom damage import *\n\n# --------------------------------------------------------\n\nwidth, height = 700,400\nfps = 60\ndt = 1./fps\n\n# --------------------------------------------------------\n\ndef main():\n ### PyGame init\n pygame.init()\n screen = pygame.display.set_mode((width,height))\n\n clock = pygame.time.Clock()\n running = True\n font = pygame.font.SysFont(\"Arial\", 16)\n\n ### Physics stuff\n space = pymunk.Space()\n space.gravity = 0,-1000\n # box walls\n static = [pymunk.Segment(space.static_body, (10, 50), (690, 50), 5)\n , pymunk.Segment(space.static_body, (690, 50), (690, 370), 5)\n , pymunk.Segment(space.static_body, (690, 370), (10, 370), 5)\n , pymunk.Segment(space.static_body, (10, 370), (10, 50), 5)\n ]\n\n for s in static:\n s.friction = 1.\n s.group = 1\n s.elasticity = 0.9\n\n space.add(static)\n space.elasticIterations = 10\n\n scene = SceneGraph(screen, space)\n\n body = Actor()\n body.position = 100,100\n\n ball = Ball()\n ball.position = 150,90\n\n scene.register(ball)\n scene.register(body)\n\n\n while running:\n screen.fill(pygame.color.THECOLORS[\"black\"])\n\n scene.update(dt)\n\n for event in pygame.event.get():\n if event.type == QUIT or \\\n event.type == KEYDOWN and (event.key in [K_ESCAPE, K_q]):\n running = False\n else:\n body.controller.onEvent(event)\n\n scene.draw(screen)\n screen.blit(font.render(\"fps: \" + str(clock.get_fps()), 1, THECOLORS[\"white\"]), (0,0))\n screen.blit(font.render(\"%d%%\" % ball.damage, 1, THECOLORS[\"white\"]), (600,0))\n pygame.display.flip()\n\n space.step(dt)\n clock.tick(fps)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"isovector/smashball","sub_path":"smashball.py","file_name":"smashball.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"11656476814","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom blog.forms import *\nfrom django.http import HttpResponseRedirect\nfrom blog.models import Post, Comment\n# Create your views here.\n@login_required\ndef create_post(request):\n if request.method == 'POST':\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return HttpResponseRedirect('/')\n else:\n form = PostForm()\n return render(request,\n 'post.html',\n {'form': form}\n )\n\n@login_required\ndef post_page(request, id):\n try:\n post = Post.objects.get(pk=id)\n comments = post.comments.all()\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.com = post\n comment.save()\n\n else:\n form = CommentForm()\n return render(request,\n 'post_page.html',\n {'form': form, 'post': post, 'comments':comments}\n )\n\n except Exception as e:\n print(e)\n return render(request,\n 'post_page.html',\n {'alert': \"Такого поста нет\"}\n )\n\n\n@login_required\ndef feed(request):\n posts = Post.objects.all()\n print(posts)\n return render(request, 'posts.html', {'posts':posts})\n","repo_name":"Archelunch/tasks","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"19905773241","text":"# importing required packages\n'''\nHow to initiate bokeh server using this file:\n\ntype bokeh serve --port 5002 widgets_part2.py in your terminal\nThen you may go to the FirstStop landing page to click the bidding price link\n\n'''\n\n\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import layout\nfrom bokeh.models.widgets import Button, TextInput, Paragraph, Div\nimport numpy as np\n\nLOGO = Div(text=\"\"\"
\"\"\")\n\n# Create Input Controls\nLISTPRICE = TextInput(title=\"enter list price/predict price here ($)\")\nZIPCODE = TextInput(title=\"enter zipcode here\")\n\nBUTTON_1 = Button(label=\"Submit\")\nBUTTON_2 = Button(label=\"Reset\")\nOUTPUT = Paragraph(width=600, height=300) #or use pretext, for a tag in html\n\nHOTTEST = [98004, 98006, 98007, 98008, 98112, 98033, 98034, 98039, 98040,\n 98052, 98053, 98074, 98075, 98077, 98103, 98112, 98177, 98115, 98117]\nMEDIUM_HOT = [98001, 98005, 98023, 98027, 98028, 98029, 98056, 98059, 98105,\n 98107, 98116, 98118, 98119, 98122, 98125, 98133, 98155, 98199]\n\ndef bidding_price(zipcode, list_price):\n \"\"\"\n This function implements a mathematical model to calculate bidding price of a house\n :param zipcode: Zipcode of house entered by user\n :param list_price: List price of house entered by user\n :return: returns the estimated bidding price\n \"\"\"\n if zipcode in HOTTEST:\n add_price = (np.random.randint(12, 18, None, int)/100)*list_price\n bid_price = list_price + add_price\n elif zipcode in MEDIUM_HOT:\n add_price = (np.random.randint(5, 10, None, int)/100)*list_price\n bid_price = list_price + add_price\n else:\n add_price = (np.random.randint(5, 10, None, int) / 100) * list_price\n bid_price = list_price - add_price\n\n return bid_price\n\ndef submit():\n \"\"\"\n these are made up coefficients for now\n \"\"\"\n value = bidding_price(float(ZIPCODE.value), float(LISTPRICE.value))\n OUTPUT.text = 'Your suggested bidding price is: ' + str(int(value)) + ' $'\n\ndef reset():\n \"\"\"\n This function resets the output\n \"\"\"\n OUTPUT.text = None\n\nBUTTON_1.on_click(submit)\nBUTTON_2.on_click(reset)\n\nLAY_OUT = layout(children=[[LOGO], [LISTPRICE, ZIPCODE], [BUTTON_1], [BUTTON_2], [OUTPUT]],\n sizing_mode='fixed')\ncurdoc().add_root(LAY_OUT)\ncurdoc().title = \"Predict the bidding price of your first home\"\n","repo_name":"sliwhu/UWHousingTeam","sub_path":"UWHousingTeam/Scripts/part2_bid_price.py","file_name":"part2_bid_price.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"}
+{"seq_id":"1406078477","text":"class Solution(object):\n \n # brute force: O(N) time, O(1) space, TLE\n def checkPerfectNumber(self, num):\n \"\"\"\n :type num: int\n :rtype: bool\n \"\"\"\n sum, i = 0, 1\n while i < num:\n sum += i if num % i == 0 else 0\n i += 1\n return sum == num\n \n # better brute force: O(sqrt(N)) time, O(1) space, TLE\n def checkPerfectNumber(self, num):\n if num == 1: return False\n sum, i = 1, 2\n while i ** 2 <= num:\n if num % i == 0:\n sum += (num / i) + i\n i += 1\n return sum == num\n ","repo_name":"haomingchan0811/Leetcode","sub_path":"507. Perfect Number.py","file_name":"507. Perfect Number.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"27638896538","text":"#!/usr/bin/env python\n# @Date : 2020-07-04\n# @Author : Bright Li (brt2@qq.com)\n# @Link : https://gitee.com/brt2\n# @Version : 0.1.3\n\nimport os\nimport shutil\nfrom pathlib import Path\nimport json\nimport xmlrpc.client\nfrom time import sleep\n\nfrom fmt_md import MarkdownFormatter, format_one_doc\nfrom db_mgr import DocumentsMgr\n\ntry:\n from utils.log import getLogger\nexcept ImportError:\n from logging import getLogger\nlogger = getLogger()\n\n\nTIME_FOR_FREQUENCE_LIMIT = 5\nTESTING = False\nif TESTING:\n print(\"\\n\" + \"#\"*49)\n print(\"注意:当前为模拟上传环境\")\n print(\"#\"*49 + \"\\n\")\n\n\nclass PostidNotUnique(Exception):\n \"\"\" 获取到postid不唯一,可能是存在同名title的文档 \"\"\"\n\nclass CnblogManager:\n def __init__(self, path_cnblog_account):\n self.dict_conf = {\n # \"blog_url\": \"\",\n # \"blog_id\" : \"\",\n # \"app_key\" : \"\",\n # \"user_id\" : \"\",\n # \"username\": \"\",\n # \"password\": \"\",\n # \"repo_dir\": \"\"\n }\n self.load_cnblog_conf(path_cnblog_account)\n self.cnblog_server = xmlrpc.client.ServerProxy(self.dict_conf[\"blog_url\"])\n self.mime = None\n\n self.md_fmt = MarkdownFormatter()\n self.md_fmt.set_ignore_websites([\"cnblogs.com/blog/\" + self.dict_conf[\"user_id\"]])\n\n repo_dir = self.get_repodir()\n assert os.path.isabs(repo_dir), \"[repo_dir]必须为绝对路径\"\n assert repo_dir, \"请先为配置文件指定操作的repo目录...\"\n self.db_mgr = DocumentsMgr(repo_dir)\n\n def get_repodir(self):\n repo_dir = self.dict_conf[\"repo_dir\"]\n if isinstance(repo_dir, dict):\n from platform import system\n repo_dir = repo_dir[system()]\n return repo_dir\n\n def load_cnblog_conf(self, path_conf):\n with open(path_conf, \"r\") as fp:\n dict_conf = json.load(fp)\n for key, value in dict_conf.items():\n self.dict_conf[key] = value\n\n # def load_repo_conf(self, path_conf):\n\n def get_postid(self, path=None, title=None):\n # if path.isdecimal():\n # return path # just the postid\n if path:\n if os.path.abspath(path):\n path = os.path.relpath(path, self.get_repodir())\n return self.db_mgr.get_postid_by_path(path)\n elif title:\n return self.db_mgr.get_postid_by_title(title)\n\n def get_user_info(self):\n \"\"\" return a list of user-info \"\"\"\n user_info = self.cnblog_server.blogger.getUsersBlogs(\n self.dict_conf[\"blog_url\"],\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"])\n return user_info\n\n def pull_img(self, path_md):\n self.md_fmt.load_file(path_md)\n\n if self.md_fmt.get_images(\"http\"):\n self.md_fmt.download_img()\n\n def _upload_img(self, path_img):\n if TESTING:\n return \"https://img2020.cnblogs.com/blog/2039866/202005/2039866-20200525195318772-1131646535.jpg\"\n\n file_name = os.path.basename(path_img)\n # from download_img_link import format_ext\n # file_name = format_ext(file_name)\n _, suffix = os.path.splitext(file_name)\n\n try:\n type_ = self.mime[suffix]\n except KeyError:\n logger.error(f\"未定义的扩展名类型【{suffix}】,使用默认值'image/jpeg'\")\n type_ = \"image/jpeg\"\n\n with open(path_img, 'rb') as fp:\n file = {\n \"bits\": fp.read(),\n \"name\": file_name,\n \"type\": type_\n }\n url_new = self.cnblog_server.metaWeblog.newMediaObject(\n self.dict_conf[\"blog_id\"],\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"],\n file)\n return url_new[\"url\"]\n\n def _load_mime(self):\n with open(\"mime.json\", \"r\") as fp:\n self.mime = json.load(fp)\n\n def _new_blog(self, struct_post):\n if TESTING: # 模拟博客上传\n postid = \"12960953\"\n else:\n postid = self.cnblog_server.metaWeblog.newPost(\n self.dict_conf[\"blog_id\"],\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"],\n struct_post, True)\n print(f\">> 完成blog的上传:【{postid}】\")\n self.db_mgr.add_doc(self.md_fmt, str(postid))\n\n def _repost_blog(self, postid, struct_post):\n \"\"\" 重新发布 \"\"\"\n if TESTING: # 模拟博客上传\n status = True\n else:\n status = self.cnblog_server.metaWeblog.editPost(\n postid,\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"],\n struct_post, True)\n print(f\">> 完成blog的更新:【{status}】\")\n self.db_mgr.modify_doc(self.md_fmt)\n\n def _is_article(self, path_md):\n abspath_article = os.path.join(self.db_mgr.repo_dir, self.db_mgr.data[\"dir_article\"])\n return path_md.find(abspath_article) >= 0\n\n def _update_categories(self, path_md):\n assert os.path.isabs(path_md)\n assert path_md.find(os.path.abspath(self.db_mgr.repo_dir)) == 0\n\n # 通过相对路径\n def get_categories(key_dirname):\n # path_dir = Path(os.path.dirname(path_md)).as_posix()\n path_parts = Path(os.path.dirname(path_md)).parts # tuple\n assert key_dirname in path_parts, f\"Error: {key_dirname} not in {path_parts}\"\n index = path_parts.index(key_dirname)\n return list(path_parts[index +1:])\n\n # categories = get_categories(article_dirname if self._is_article(path_md) else essay_dirname)\n categories = get_categories(self.db_mgr.data[\"dir_essay\"])\n if self.md_fmt.metadata[\"categories\"] != categories:\n self.md_fmt.metadata[\"categories\"] = categories\n self.md_fmt.update_meta()\n return True\n else:\n return False # 无需更新\n\n def _rebuild_images(self, path_md):\n dir_img = path_md[:-3] # 同名文件夹\n has_dir = os.path.exists(dir_img)\n\n md_parser = self.md_fmt\n\n # 上传图片\n dict_images_relpath = md_parser.get_images(\"local\", force_abspath=False)\n if not has_dir:\n assert not dict_images_relpath, f\"Markdown文档引用的图像未存储在同名文件夹下: {dict_images_relpath}\"\n md_parser.unlock_text()\n return False\n\n # 删除未被引用的(多余)图像\n list_dir = os.listdir(dir_img)\n dict_images_backup = md_parser.get_images(\"backup\", force_abspath=False)\n dict_images_local = {**dict_images_relpath, **dict_images_backup}\n if not dict_images_local:\n md_parser.unlock_text()\n logger.warning(f\"Markdown文档并未引用本地图像,同名dir内容如下: {list_dir}\")\n if input(\"是否清除同名文件夹? [Y/n]: \").lower() != \"n\":\n shutil.rmtree(dir_img)\n logger.warning(f\"已清除未引用文件夹:【{dir_img}】\")\n return False\n\n set_redundant = set(list_dir) - {os.path.basename(i) for i in dict_images_local.values()}\n str_redundant = '\\n'.join(set_redundant)\n if set_redundant and input(f\"\"\"################ 是否删除多余图片文件:\n{str_redundant}\n################ [Y/n]:\"\"\").lower() != \"n\":\n for file in set_redundant:\n os.remove(os.path.join(dir_img, file))\n\n # 将图像链接地址改写为cnblog_link\n dict_images = {}\n dir_md = os.path.dirname(path_md)\n # if dict_images_relpath:\n for line_idx, rel_path in dict_images_relpath.items():\n dict_images[line_idx] = os.path.join(dir_md, rel_path)\n md_parser.process_images(dict_images, self._upload_img)\n\n # 备注原本地图像链接\n text_lines = md_parser.get_text()\n # if dict_images_relpath:\n for line, url_local in dict_images_relpath.items():\n # path_rel = os.path.relpath(url_local, md_parser.file_name)\n md_parser.modify_text(line, f\"{text_lines[line].rstrip()} \")\n return True\n\n def post_blog(self, path_md):\n md_parser = self.md_fmt\n\n if self.mime is None:\n self._load_mime()\n\n # md_parser读取文档,并初步格式化\n format_one_doc(md_parser, path_md)\n # 图片的处理\n self._rebuild_images(path_md)\n # 更新category\n self._update_categories(path_md)\n # 保存修改url的Markdown\n md_parser.overwrite()\n\n # if self._is_article(path_md):\n # # 貌似没有用 ??\n # md_parser.metadata[\"categories\"] = [\"[文章分类]\"] + md_parser.metadata[\"categories\"]\n\n blog_title = self.md_fmt.make_title()\n struct_post = {\n \"title\": blog_title,\n \"categories\": [\"[Markdown]\"] + md_parser.metadata[\"categories\"],\n \"description\": \"\".join(md_parser.get_text()),\n 'mt_keywords': \",\".join(md_parser.metadata[\"tags\"])\n }\n\n postid = self.get_postid(path=self.md_fmt.file_path)\n if postid:\n self._repost_blog(postid, struct_post)\n else:\n while True:\n try:\n self._new_blog(struct_post)\n except xmlrpc.client.Fault as e:\n err_type = str(e).split(':', 1)[0]\n if err_type == \"\n print(f\"cnblog限制了发送频率,请静候{TIME_FOR_FREQUENCE_LIMIT}s\\n程序正在后台运行,请勿退出...\")\n sleep(TIME_FOR_FREQUENCE_LIMIT)\n elif err_type == \"'等类似标签字符?\")\n else:\n raise Exception(f\"未知的上传问题: {e}\")\n else:\n break\n\n def download_blog(self, title_or_postid, ignore_img=True):\n if not ignore_img:\n raise Exception(\"尚未开发,敬请期待\")\n\n postid = title_or_postid if title_or_postid.isdecimal() else self.get_postid(title=title_or_postid)\n if not postid:\n logger.error(f\"本地数据库未存储blog: 【{title_or_postid}】,\\\n但不确定博客园服务器状态。如有必要,请指定postid值,重新查询。\")\n return\n\n dict_data = self.cnblog_server.metaWeblog.getPost(\n postid,\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"])\n\n dir_download = \"cnblog_bak\"\n if not os.path.exists(dir_download):\n os.makedirs(dir_download)\n path_save = f\"{dir_download}/{postid}.md\"\n with open(path_save, \"w\", encoding=\"utf8\") as fp:\n fp.write(dict_data['description'])\n print(f\">> 已下载blog:【{path_save}】\")\n\n def delete_blog(self, path_file):\n \"\"\" postid: str_id or path_file \"\"\"\n # if not postid.isdecimal():\n postid = self.get_postid(path=path_file)\n\n try:\n self.cnblog_server.blogger.deletePost(\n self.dict_conf[\"app_key\"],\n postid,\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"],\n True)\n except xmlrpc.client.Fault:\n # logger.error(e) # \n title = self.db_mgr.get_title_by_postid(postid)\n logger.error(f\"Web操作失败,请手动删除博客【{title}】\")\n else:\n print(f\">> 已删除blog:【{postid}】\")\n\n path_rel = self.db_mgr.data[\"postids\"][postid]\n dir_md = path_file[:-3]\n if os.path.exists(dir_md):\n os.rmdir(dir_md)\n self.db_mgr.remove_doc(path_rel)\n\n def move_blog(self, path_from, path_to):\n # 无需cnblog变更\n self.db_mgr.move_doc(path_from, path_to)\n\n def get_recent_post(self, num=9999):\n \"\"\"\n return: [{\n 'dateCreated': ,\n 'description': '...',\n 'title': 'Python数据结构',\n 'categories': ['[随笔分类]33-python', '[随笔分类]3-syntax'],\n 'enclosure': {'length': 0},\n 'link': 'https://www.cnblogs.com/brt2/p/12944353.html',\n 'permalink': 'https://www.cnblogs.com/brt2/p/12944353.html',\n 'postid': '12944353',\n 'source': {},\n 'userid': '-2'\n }, ...]\n \"\"\"\n recent_post = self.cnblog_server.metaWeblog.getRecentPosts(\n self.dict_conf[\"blog_id\"],\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"],\n num)\n return recent_post\n","repo_name":"brt2cv/md2blog","sub_path":"cnblog_mgr.py","file_name":"cnblog_mgr.py","file_ext":"py","file_size_in_byte":13260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"17044611304","text":"import requests\nimport threading\nfrom lxml import etree\n# 解析网页,并得到网页中的IP代理\ndef get_proxy(html):\n selector = etree.HTML(html)\n proxies = []\n for each in selector.xpath(\"//tr[@class='odd']\"):\n ip = each.xpath(\"./td[2]/text()\")[0]\n port = each.xpath(\"./td[3]/text()\")[0]\n # 拼接IP地址,端口号6\n proxy = ip + \":\" + port\n proxies.append(proxy)\n test_proxies(proxies)\n\ndef thread_write_proxy(proxy):\n with open(\"./ip_proxy.txt\", 'a+') as f:\n f.write(proxy + '\\n')\n# 验证已得到IP的可用性\ndef thread_test_proxy(proxy):\n url = \"http://www.baidu.com/\"\n header = {\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36\",\n }\n try:\n response = requests.get(url, headers=header, proxies={\"http\": proxy}, timeout=1)\n if response.status_code == 200:\n thread_write_proxy(proxy)\n except Exception:\n pass\n# 添加线程模式\ndef test_proxies(proxies):\n proxies = proxies\n for proxy in proxies:\n test = threading.Thread(target=thread_test_proxy, args=(proxy,))\n test.start()\ndef get_html(url):\n header = {\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36\",\n }\n\n response = requests.get(url,headers=header)\n get_proxy(response.text)\nif __name__ == \"__main__\":\n url = \"http://www.xicidaili.com/nn/\"\n for i in range(1,30):\n get_html(url+str(i))\n\n","repo_name":"zyq914014125/spider","sub_path":"the end test/ip_set.py","file_name":"ip_set.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"3628233543","text":"import pya\nimport re\nimport os\t\n\nWIDTH = 2048\nHEIGHT = 2048\n\napp = pya.Application.instance()\nwin = app.main_window()\n\n# Load technology file\ntech = pya.Technology()\ntech.load(tech_file)\nlayoutOptions = tech.load_layout_options\nlayoutOptions.text_enabled = False\n\n# Load def file in the main window\ncell_view = win.load_layout(input_layout, layoutOptions, 0)\nlayout_view = cell_view.view()\nlayout_view.grid_visible = False\n\nlayout_view.max_hier()\n# layout_view.clear_layers()\n\n# Hide layers with these purposes\nhidden_purposes = [0, 4, 5]\n\nli = layout_view.begin_layers()\nwhile not li.at_end():\n lp = li.current()\n if lp.source_datatype in hidden_purposes:\n new_lp = lp.dup()\n new_lp.visible = False\n layout_view.set_layer_properties(li, new_lp)\n\n li.next()\n\nlayout_view.save_image(os.path.splitext(input_layout)[0]+'.png', WIDTH, HEIGHT)\n\napp.exit(0)\n","repo_name":"efabless/foss-asic-tools","sub_path":"images/foss-asic-tools/addons/sak/klayout/scrotLayout.py","file_name":"scrotLayout.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"76"}
+{"seq_id":"18954912183","text":"def part1(file_path):\n gamma_rate_bin = \"0b\"\n epsilon_rate_bin = \"0b\"\n with open(file_path) as file:\n lines = file.readlines()\n line_char = [[int(x) for x in line.strip()] for line in lines]\n col_number = len(line_char[0])\n for index in range(0, col_number):\n count = {0: 0, 1: 0}\n for x in line_char:\n count[x[index]] += 1\n most_common = max(count.items(), key=lambda x: x[1])[0]\n less_common = min(count.items(), key=lambda x: x[1])[0]\n gamma_rate_bin += str(most_common)\n epsilon_rate_bin += str(less_common)\n return int(gamma_rate_bin, 2) * int(epsilon_rate_bin, 2)\n\n\ndef part2(file_path):\n o2_rating = \"0b\"\n co2_rating = \"0b\"\n with open(file_path) as file:\n lines = file.readlines()\n line_char = [[int(x) for x in line.strip()] for line in lines]\n col_number = len(line_char[0])\n\n keep_most = line_char\n keep_less = line_char\n\n for index in range(0, col_number):\n count = {0: 0, 1: 0}\n for x in keep_most:\n count[x[index]] += 1\n most_common = max(count.items(), key=lambda x: x[1])[0] if count[0] != count[1] else 1\n keep_most = list(filter(lambda x: x[index] == most_common, keep_most))\n if len(keep_most) == 1:\n break\n\n for index in range(0, col_number):\n count = {0: 0, 1: 0}\n for x in keep_less:\n count[x[index]] += 1\n less_common = min(count.items(), key=lambda x: x[1])[0] if count[0] != count[1] else 0\n keep_less = list(filter(lambda x: x[index] == less_common, keep_less))\n if len(keep_less) == 1:\n break\n\n o2_rating += \"\".join(map(str, keep_most[0]))\n co2_rating += \"\".join(map(str, keep_less[0]))\n\n return int(o2_rating, 2) * int(co2_rating, 2)\n\n\nif __name__ == '__main__':\n print(part1(\"../../data/03-part1\"))\n print(part2(\"../../data/03-part2\"))\n","repo_name":"ashleycaselli/AdventOfCode2021","sub_path":"src/day03/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"71580540726","text":"import torch\nimport torch.nn as nn\n\n\nclass Network(nn.Module):\n def __init__(self, nx, nu, n_joints):\n super(Network, self).__init__()\n\n self.nx = nx\n self.nu = nu\n self.n_joints = n_joints\n\n self.layer1 = nn.Sequential(\n nn.Linear(\n in_features=nx,\n out_features=16,\n ),\n nn.ReLU(),\n )\n\n self.layer2 = nn.Sequential(\n nn.Linear(\n in_features=16,\n out_features=32,\n ),\n nn.ReLU(),\n )\n\n self.layer3 = nn.Sequential(\n nn.Linear(\n in_features=32,\n out_features=64,\n ),\n nn.ReLU(),\n )\n\n self.layer4 = nn.Sequential(\n nn.Linear(\n in_features=64,\n out_features=64,\n ),\n nn.ReLU(),\n )\n\n # # Three jointed pendulum:\n\n # self.layer5 = nn.Sequential(\n # nn.Linear(\n # in_features=64,\n # out_features=128,\n # ),\n # nn.ReLU(),\n # )\n\n # Split into n_joints heads\n self.last_layers = nn.ModuleList()\n for _ in range(self.n_joints):\n self.last_layers.append(\n nn.Linear(\n in_features=64,\n out_features=nu,\n ),\n )\n\n self.init_weights()\n\n def init_weights(self):\n # Recommended weights initialization if using ReLU activation functions\n nn.init.kaiming_normal_(self.layer1[0].weight)\n nn.init.kaiming_normal_(self.layer2[0].weight)\n nn.init.kaiming_normal_(self.layer3[0].weight)\n nn.init.kaiming_normal_(self.layer4[0].weight)\n\n # # Three jointed pendulum:\n # nn.init.kaiming_normal_(self.layer5[0].weight)\n\n for _, layer in enumerate(self.last_layers):\n nn.init.kaiming_normal_(layer.weight)\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n # # Three jointed pendulum:\n # x = self.layer5(x)\n\n # Get data from each (n_joints) head layer\n y = []\n for _, layer in enumerate(self.last_layers):\n y.append(layer(x))\n\n # Dimension is: n_joints, batch_size, nu\n \n # Reshape dimension to get: batch_size, j_joints, nu\n y = torch.stack(y, dim=1)\n\n return y\n","repo_name":"lorenzinigiovanni/orc-project","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"34230467228","text":"from typing import List\n\n\ndef check_shift(a: str, b: str) -> bool:\n offset = None\n for ca, cb in zip(a, b):\n if offset is None:\n offset = ord(ca) - ord(cb)\n else:\n if offset != ord(ca) - ord(cb):\n return False\n\n return True\n\n\nclass Solution:\n def groupStrings(self, strings: List[str]) -> List[List[str]]:\n\n if len(strings) == 0:\n return []\n\n groups = dict()\n for elem in strings:\n group_list = groups.get(len(elem), [])\n group_list.append(elem)\n groups[len(elem)] = group_list\n\n sol = []\n for str_group_len, group_list in groups.items():\n group_sol = dict()\n for elem in group_list:\n if len(group_sol) == 0:\n group_sol[elem] = [elem]\n else:\n group = elem\n for possible_group, group_list in group_sol.items():\n if check_shift(possible_group, elem):\n group = possible_group\n\n the_group = group_sol.get(group, [])\n the_group.append(elem)\n group_sol[group] = the_group\n\n sol.extend(list(group_sol.values()))\n\n return sol\n\n\ndin = [\n [\"abc\",\"bcd\",\"acef\",\"xyz\",\"az\",\"ba\",\"a\",\"z\"]\n]\n\nexpected_out = [\n [[\"acef\"],[\"a\",\"z\"],[\"abc\",\"bcd\",\"xyz\"],[\"az\",\"ba\"]]\n]\n\nfor i, expected in zip(din, expected_out):\n s = Solution()\n actual = s.groupStrings(i)\n print(actual)\n print(expected)\n assert actual == expected","repo_name":"DarioBernardo/hackerrank_exercises","sub_path":"strings/group_shifted_strings.py","file_name":"group_shifted_strings.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"21110395587","text":"import pandas as pd\nfrom pathlib import Path\n\ndataDir = Path(__file__).parent.absolute()\n\nfake_c = pd.read_csv(f\"{dataDir}/fake_counterfactuals.csv\", header=None)\nfake_d_c = pd.read_csv(f\"{dataDir}/fake_delta_counterfactuals.csv\", header=None)\n\np_c = fake_c.iloc[1:,:]\np_c = p_c / 100\npos = p_c[(p_c[0]>= 0.5) & (p_c[1]>= 0.5) & (p_c[2]>= 0.5) & (p_c[3]>= 0.5)].count()\nneg = p_c[(p_c[0]<= -0.5) & (p_c[1]<= -0.5) & (p_c[2]<= -0.5) & (p_c[3]<= -0.5)].count()\npos_pos = p_c[(p_c[0]<= 0.5) & (p_c[1]<= 0.5) & (p_c[2]<= 0.5) & (p_c[3]<= 0.5)].count()\nneg_neg = p_c[(p_c[0]>= -0.5) & (p_c[1]>= -0.5) & (p_c[2]>= -0.5) & (p_c[3]>= -0.5)].count()\nprint(pos)\nprint(neg)\nprint(pos_pos)\nprint(neg_neg)\nprint(len(p_c))\n\n# pos = 0\n# neg = 0\n# for v in p_c[0]:\n# if v >= 0.5:\n# # print(\"if\", v)\n# pos += 1\n# pass\n# elif v <= -0.5:\n# # print(\"elif\", v)\n# neg += 1\n# pass\n# else:\n# # print(\"else\", v)\n# pass\n\n# print(pos)\n# print(neg)\n","repo_name":"Paalar/friendly-enigma","sub_path":"data/fake/cchvae_revert.py","file_name":"cchvae_revert.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"}
+{"seq_id":"33292918768","text":"from app import app\nfrom mongoengine.queryset import DoesNotExist\ndb = app.extensions['mongoengine']\n\nclass RefreshToken(db.Document):\n meta = {'collection': 'applications',\n 'indexes': [\n {'fields': ['client_id', 'refresh_token'], 'unique': True},\n ]\n }\n\n client_id = db.StringField(required=True)\n refresh_token = db.StringField(required=True)\n data = db.StringField(required=True)\n\n @classmethod\n def delete(cls, client_id, refresh_token):\n cls.objects(db.Q(client_id = client_id) & db.Q(refresh_token = refresh_token)).remove()\n\n @classmethod\n def find(cls, client_id, refresh_token):\n return RefreshToken.objects.get(db.Q(client_id=client_id) & db.Q(refresh_token=refresh_token))\n\n @classmethod\n def save(cls, client_id, refresh_token, data):\n token, created = RefreshToken.objects.get_or_create(client_id=client_id, refresh_token=refresh_token)\n token.data = data\n\n token.save()\n\nclass AccessKey(db.Document):\n meta = {'collection': 'applications',\n 'indexes': [\n {'fields': ['client_id', 'user_id'], 'unique': False},\n ]\n }\n\n client_id = db.StringField(required=True)\n user_id = db.StringField(required=True)\n access_key = db.StringField(required=True)\n token = db.ReferenceField(RefreshToken, required=True)\n\n @classmethod\n def has_access(cls, client_id, user_id):\n try:\n return cls.objects.get(db.Q(client_id = client_id) & db.Q(user_id = user_id)) != None\n except DoesNotExist:\n return None\n\n @classmethod\n def delete(cls, client_id, user_id):\n cls.objects(db.Q(client_id = client_id) & db.Q(user_id = user_id)).remove()\n\n @classmethod\n def save(cls, client_id, user_id, access_key, token):\n access, created = AccessKey.objects.get_or_create(client_id=client_id, user_id=user_id)\n access.access_key = access_key\n access.token = token\n\n access.save()","repo_name":"Labgoo/redshift-goog-datasource","sub_path":"models/refresh_token.py","file_name":"refresh_token.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"}
+{"seq_id":"34811902518","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 13 15:19:47 2018\n\n@author: loganwu\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\n\nx = np.arange(0, 100)\n\nmu = [35, 60]\nsd = [5, 3]\n\nvar1 = sd[0]**2\nvar2 = sd[1]**2\nprec1=1/var1\nprec2=1/var2\nprec3=prec1+prec2\nvar3=1/prec3\nmu3=(mu[0]*prec1+mu[1]*prec2)/(prec1+prec2)\nsd3=np.sqrt(var3)\n\ndprior = norm.pdf(x, mu[0], sd[0])\ndlikelihood = norm.pdf(x, mu[1], sd[1])\ndposterior = norm.pdf(x, mu3, sd3)\n\n\nfig, ax = plt.subplots(figsize=[2, 1])\nplt.fill_between(x, dprior, alpha=0.5, label=\"Prior\")\nplt.fill_between(x, dlikelihood, alpha=0.5, label=\"Likelihood\")\nplt.fill_between(x, dposterior, alpha=0.5, label=\"Posterior\")\n#plt.xlabel(\"Parameter estimate\")\n#plt.legend()\n\n# Hide the right and top spines\nax.spines['left'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.spines['bottom'].set_visible(False)\n\nplt.yticks([])\nplt.xticks([])\n\nplt.savefig(\"../media/bayesexample.png\", bbox_inches=\"tight\", dpi=144, transparent=True)\n","repo_name":"loganbwu/geothermal","sub_path":"src/presentation_plots.py","file_name":"presentation_plots.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"17960203838","text":"import cv2 \nimport numpy as np\nimport time\nbasePath = \"Computer-Vision-py/DATA/\"\n\n\"\"\"\nUsing an existing video file\n\"\"\"\n\n# create a cap obj but just provide the file path instead of a 0.... for the capture device \ncap = cv2.VideoCapture(basePath + \"hand_move.mp4\")\nframeRate = 0x00000014\n\n# print(f\"{frameRate} fps\\n{cv2.CAP_PROP_FRAME_COUNT // 20}\")\n\n# check if you where able to open the file \nif ( not cap.isOpened() ):\n print(\"Unable to open video file\")\n\nwhile ( cap.isOpened() ):\n ret, frame = cap.read()\n\n # 20 fps || dont delay unless you want to view the frames\n time.sleep(1/frameRate)\n if ( ret ):\n cv2.imshow(\"frame\", frame)\n\n if ( cv2.waitKey(1) & 0xFF == 27 ):\n break\n\n else:\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"Abukar-1000/myCompVision","sub_path":"videoBasics/vidFile.py","file_name":"vidFile.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"23217628608","text":"import csv\nfrom datetime import datetime\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.core.exceptions import ValidationError\nfrom measure_snow import models\n\nclass Command(BaseCommand):\n args = ' '\n help = '''Imports snowfall measures from a CSV file.\n\nEach row of CSV data should be:\n,\n\nFor example:\nmm/dd/yy,inches with one decimal place\n11/12/11,4.5'''\n\n def handle(self, *args, **options):\n season_name = args[0]\n try:\n season = models.SnowSeason.objects.get(name=season_name)\n except models.SnowSeason.DoesNotExist:\n raise CommandError(\"SnowSeason with name %s does not exist\" % season_name)\n\n csv_filename = args[1]\n try:\n csv_fh = open(csv_filename, 'rb')\n except IOError:\n raise CommandError(\"Could not open %s\" % csv_filename)\n\n csv_reader = csv.reader(csv_fh)\n for row in csv_reader:\n try:\n timestamp = datetime.strptime(row[0], \"%m/%d/%y\")\n except ValueError:\n self.stderr.write('The row(%r) has a date that cannot be parsed. Skipping this row...\\n' % row)\n continue\n\n measure = models.SnowfallMeasure(timestamp=timestamp, season=season, inches=row[1])\n try:\n measure.save()\n self.stdout.write('Successfully created SnowfallMeasure.\\n')\n except ValidationError:\n self.stderr.write('The row(%r) likely has a value for inches that cannot be parsed. Skipping this row...\\n' % row)\n continue\n\n csv_fh.close()\n","repo_name":"jpwoodbu/measure_snow","sub_path":"management/commands/importsnowcsv.py","file_name":"importsnowcsv.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"}
+{"seq_id":"10022600263","text":"class Solution:\r\n def isMatch(self, s, p):\r\n dp = [[False for _ in range(len(p) + 1)] for i in range(len(s) + 1)]\r\n # dp = [[False]*(len(p)+1)]*(len(s)+1)\r\n print(dp)\r\n dp[0][0] = True\r\n for j in range(1, len(p) + 1):\r\n if p[j - 1] != '*':\r\n break\r\n dp[0][j] = True\r\n\r\n for i in range(1, len(s) + 1):\r\n for j in range(1, len(p) + 1):\r\n if p[j - 1] in {s[i - 1], '?'}:\r\n dp[i][j] = dp[i - 1][j - 1]\r\n elif p[j - 1] == '*':\r\n dp[i][j] = dp[i - 1][j] or dp[i][j - 1]\r\n print(dp)\r\n return dp[-1][-1]\r\n\r\na= Solution()\r\np=\"*a*b\"\r\ns=\"adceb\"\r\nprint(a.isMatch(s,p))\r\n\r\ndp = [[False for _ in range(len(p) + 1)] for i in range(len(s) + 1)]\r\ndp1 = [[False]*(len(p)+1)]*(len(s)+1)\r\nprint(dp==dp1)","repo_name":"heloo311/luyen-tap-giai-thuat","sub_path":"dynamic programing/44. Wildcard Matching.py","file_name":"44. Wildcard Matching.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"17992755526","text":"import sys\nimport time\nimport os\n\n\nfichiers = [\"out\" + str(k * 10) + \".txt\" for k in range(0, 867)]\n\nfor fichier in fichiers:\n f = open(fichier, 'r')\n # sys.stdout.write(\"\\n\"*10)\n # \\n pas nécessaire\n sys.stdout.write(f.read())\n f.close()\n time.sleep(0.17)\n os.system('clear')","repo_name":"TikSL/ytb_mp4_ascii_converter","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"11212911881","text":"import argparse\nfrom libs.phonebooter import PhoneBooter\n\n\ndef main(targetNum, threads, bootLength, wav):\n\n booter = PhoneBooter()\n booter.launch (targetNum, threads, bootLength, wav)\n\n\nif __name__ == '__main__':\n banner = \"\"\"\n\n _____ _ ____ _ \n | __ \\| | | _ \\ | | \n | |__) | |__ ___ _ __ ___| |_) | ___ ___ | |_ ___ _ __ \n | ___/| '_ \\ / _ \\| '_ \\ / _ \\ _ < / _ \\ / _ \\| __/ _ \\ '__|\n | | | | | | (_) | | | | __/ |_) | (_) | (_) | || __/ | \n |_| |_| |_|\\___/|_| |_|\\___|____/ \\___/ \\___/ \\__\\___|_| \n\n\n FUCKING MICROSOFT SUPPORT SCAMMING PIECES OF SHIT!\n ex. usage: phonebooter.py -p -l 600 -s ducktales\n \"\"\"\n print(banner)\n parser = argparse.ArgumentParser(description='PhoneBooter CLI')\n parser.add_argument('-p', '--phonenumber', action='store', dest='targetNum', required=True,\n help='Specify the target phone number to attack. Example: 18001234567')\n parser.add_argument('-l', '--length', action='store', dest='bootLength', required=True,\n help='Length of time in seconds to run the phone')\n parser.add_argument('-s', '--sound', action='store', dest='wav',\n help='Specify the *.ulaw file to play. Store it under /usr/share/asterisk/sounds. '\n 'Do not include the extension', default='hello-world')\n parser.add_argument('-t', '--threads', action='store', dest='threads',\n help='Number of async processes to kick off. Default is 2. '\n '2 is sufficient for cellular devices in most cases.', default=2)\n args = parser.parse_args()\n\n if args.wav is not None:\n main(args.targetNum, int(args.threads), int(args.bootLength), args.wav)\n\n else:\n main(args.targetNum, int(args.threads), int(args.bootLength), 'hello-world')\n","repo_name":"BraveLittleRoaster/PhoneBooter","sub_path":"booter-cli.py","file_name":"booter-cli.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"127349178","text":"from fides.api.schemas.masking.masking_configuration import NullMaskingConfiguration\nfrom fides.api.service.masking.strategy.masking_strategy_nullify import (\n NullMaskingStrategy,\n)\n\n\ndef test_mask_with_value():\n request_id = \"123\"\n config = NullMaskingConfiguration()\n masker = NullMaskingStrategy(configuration=config)\n assert masker.mask([\"something else\"], request_id)[0] is None\n\n\ndef test_mask_with_multi_value():\n request_id = \"123\"\n config = NullMaskingConfiguration()\n masker = NullMaskingStrategy(configuration=config)\n masked = masker.mask([\"something else\", \"some more\"], request_id)\n assert masked[0] is None\n assert masked[1] is None\n\n\ndef test_mask_no_value():\n request_id = \"123\"\n config = NullMaskingConfiguration()\n masker = NullMaskingStrategy(configuration=config)\n assert masker.mask(None, request_id) is None\n","repo_name":"ethyca/fides","sub_path":"tests/ops/service/masking/strategy/test_masking_strategy_null.py","file_name":"test_masking_strategy_null.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":302,"dataset":"github-code","pt":"76"}
+{"seq_id":"25859418570","text":"from zope.component import adapts\nfrom zope.interface import implements\n\nfrom themecontainer import FSThemeContainer\nfrom Products.GenericSetup.utils import XMLAdapterBase\nfrom Products.GenericSetup.utils import PropertyManagerHelpers\nfrom Products.GenericSetup.utils import exportObjects\nfrom Products.GenericSetup.utils import importObjects\n\nfrom Products.GenericSetup.interfaces import IBody\nfrom Products.GenericSetup.interfaces import ISetupEnviron\nfrom interfaces import IThemeContainer\n_marker = object()\n\nNAME = 'designer_themes'\n\nROOT_THEMES = '.cps_themes'\n\ndef importRootThemesContainer(context):\n \"\"\"Create the root themes container.\n \"\"\"\n logger = context.getLogger(NAME)\n logger.info(\"Creating the root themes container\")\n\n site = context.getSite()\n thc = getattr(site, ROOT_THEMES, None)\n if thc is None:\n thc = FSThemeContainer(ROOT_THEMES)\n thc.manage_changeProperties(title='Root themes')\n site._setObject(ROOT_THEMES, thc)\n thc = getattr(site, ROOT_THEMES)\n importObjects(thc, '', context)\n\n\ndef exportRootThemesContainer(context):\n \"\"\"Export the root themes container\n \"\"\"\n site = context.getSite()\n thc = getattr(site, ROOT_THEMES, None)\n if thc is None:\n logger = context.getLogger(NAME)\n logger.info(\"Nothing to export.\")\n return\n exportObjects(thc, '', context)\n\nclass ThemeContainerXMLAdapter(XMLAdapterBase, PropertyManagerHelpers):\n \"\"\"XML importer and exporter for theme containers\n \"\"\"\n\n adapts(IThemeContainer, ISetupEnviron)\n implements(IBody)\n\n _LOGGER_ID = NAME\n name = NAME\n\n def _exportNode(self):\n \"\"\"Export the object as a DOM node.\n \"\"\"\n node = self._getObjectNode('object')\n node.appendChild(self._extractProperties())\n self._logger.info(\"Root themes container exported.\")\n return node\n\n def _importNode(self, node):\n \"\"\"Import the object from the DOM node.\n \"\"\"\n if self.environ.shouldPurge():\n self._purgeProperties()\n self._initProperties(node)\n self._logger.info(\"Root theme container imported.\")\n\n","repo_name":"nuxeo-cps/products--CPSDesignerThemes","sub_path":"exportimport.py","file_name":"exportimport.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"21936680200","text":"def main():\n costs = [[259, 770], [448, 54], [926, 667], [184, 139], [840, 118], [577, 469]]\n min_cost = 0\n diff = {}\n for element in costs:\n min_cost += element[0]\n diff.update({costs.index(element): element[0] - element[1]})\n sorted_diff = {k: v for k, v in sorted(diff.items(), key=lambda item: item[1], reverse=True)}\n for i in range(int(len(costs) / 2)):\n min_cost -= costs[list(sorted_diff)[i]][0]\n min_cost += costs[list(sorted_diff)[i]][1]\n print(min_cost)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"meghakorade16/leetcode-solution","sub_path":"leetcode-June-challenges/two-city-sheduling.py","file_name":"two-city-sheduling.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"73115404724","text":"'''\nGiven an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.\n\nNote: The solution set must not contain duplicate triplets.\n\nFor example, given array S = [-1, 0, 1, 2, -1, -4],\n\nA solution set is:\n[\n [-1, 0, 1],\n [-1, -1, 2]\n]\nConclusion:\nThe key steps:\n1.Try to sort the list firstly. Because it will reduce the time of iterating\n2.Then in terms of the iterating pattern, we should pick each number as an answer,\nand then go search search the following potential pair such that the sum is zero.\n3.The last thing is to take care of the duplicated answer, we only care about the number we meet\nat the first time, then skip those duplicated numbers\n'''\nclass Solution(object):\n def threeSum(self, nums):\n sort_nums = sorted(nums)\n ans = []\n for i in range(len(nums)-2):\n # Use the first number of those duplicated sequence\n if i>0 and sort_nums[i]==sort_nums[i-1]:\n continue\n left,right = i+1,len(nums)-1\n while left < right:\n s = sort_nums[i] + sort_nums[left] + sort_nums[right]\n # we increase the left(min) value\n if s < 0 :\n left = left + 1\n # we decrease the right(max) value\n elif s > 0 :\n right = right -1\n # we get the pair such that the sum are zero\n else:\n ans.append([sort_nums[i],sort_nums[left],sort_nums[right]])\n left = left + 1\n right = right -1\n # Skip duplicated sequence\n while left < right and sort_nums[left] == sort_nums[left - 1]:\n left = left + 1\n while right > left and sort_nums[right] == sort_nums[right + 1]:\n right = right - 1\n\n\n return ans\ns1 = [-1,0,1,2,-1,-4]\ns2 = [0,0,0]\ns3 = [-4,-2,-2,-2,0,1,2,2,2,3,3,4,4,6,6]\nsolution = Solution()\n\nprint (solution.threeSum(s3))\n\n\n'''\n list_len = len(nums)\n if list_len<3: return []\n pair_i = 0\n pair_j = 1\n ans = []\n while(pair_j < list_len-1):\n pair_sum = nums[pair_i]+nums[pair_j]\n for walker in range(pair_j+1,list_len,1):\n print walker\n if pair_sum + nums[walker] == 0: ans.append([nums[pair_i],nums[pair_j],nums[walker]])\n pair_i = pair_i + 1\n pair_j = pair_j + 1\n'''","repo_name":"AlphaGarden/LeetCodeProblems","sub_path":"Medium/3Sum.py","file_name":"3Sum.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"72300200564","text":"\"\"\"tests for bubble.\n\nBubble is identical to Sphere except for the name. If Sphere works Bubble will also work.\n\"\"\"\nfrom honeybee_radiance.geometry import Bubble\n\n\ndef test_cup():\n geo = Bubble('test_bubble')\n assert geo.identifier == 'test_bubble'\n assert geo.to_radiance(\n minimal=True) == 'void bubble test_bubble 0 0 4 0.0 0.0 0.0 10.0'\n","repo_name":"ladybug-tools/honeybee-radiance","sub_path":"tests/geometry_bubble_test.py","file_name":"geometry_bubble_test.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"76"}
+{"seq_id":"72024859124","text":"filenames = ['document', 'report', 'presentation']\n# Copy-paste the above list in a .py file and extend the code, so it prints out the output below:\n\nfor index, i in enumerate(filenames, 1):\n print(f'{index}-{i.capitalize()}.txt')\n\n#########################################################\nips = ['100.122.133.105', '100.122.133.111']\n# Copy-paste the ips list in a .py file and extend the program so it:\n# 1. Prompts the user to input an index (e.g, 0 or 1).\n# 2. Returns the IP address that has that index.\n# Here is how the program would behave when executed:\nchose_ip = int(input('Enter the index of the IP you want: '))\nprint(f'You chose {ips[chose_ip]}')\n\n\n","repo_name":"ChrisSamHarris/todoapp","sub_path":"ExerciseDir/enumerate.py","file_name":"enumerate.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"23271813418","text":"import streamlit as st\nimport pandas as pd\nimport plotly.express as px\nfrom PIL import Image\n\nst.set_page_config(\n page_title=\"Visualize Your Connections\", \n page_icon=\"💽\", \n layout=\"wide\",\n initial_sidebar_state=\"collapsed\")\n\ninstructions = Image.open('images/inst.png')\n\n# \\\\\\ Sidebar /// #\n\n@st.cache_data\ndef load_data(csv, dataset):\n if csv is not None: # if file is uploaded\n df = pd.read_csv(csv, skiprows=3, parse_dates=['Connected On'])\n df['year'] = df['Connected On'].dt.year\n df['Company'] = df['Company'].fillna('No Company Data')\n df['Position'] = df['Position'].fillna('No Position Data')\n\n else: # if no file is uploaded or removed\n df = pd.read_csv(f'data/{dataset}.csv', skiprows=3, parse_dates=['Connected On'])\n df['year'] = df['Connected On'].dt.year\n df['Company'] = df['Company'].fillna('No Company Data')\n df['Position'] = df['Position'].fillna('No Position Data')\n\n return df\n\ndef bar_px(df):\n year = df['year'].value_counts().reset_index()\n\n bar = px.bar(\n year,\n y='year',\n x='count',\n orientation='h',\n text_auto=True,\n color='count',\n height=200,\n color_continuous_scale=px.colors.sequential.Aggrnyl,\n labels={'year':'','count':''}\n )\n bar.update_traces(textfont_size=14, textposition='outside', \n marker_line_width=0, hovertemplate=None, hoverinfo='skip')\n\n bar.update_layout(margin=dict(t=0, l=0, r=0, b=0),\n plot_bgcolor='rgba(0,0,0,0)',\n paper_bgcolor='rgba(0,0,0,0)')\n \n bar.update_coloraxes(showscale=False)\n\n bar.update_xaxes(color='#03b5aa',\n gridcolor='white',\n linecolor='rgba(0,0,0,0)')\n\n bar.update_yaxes(color='#03b5aa',\n linecolor='rgba(0,0,0,0)',\n dtick=1)\n\n return bar \n\ndef treemap_px(df, px_height):\n fig = px.treemap(\n df,\n height=px_height,\n path=['Company','Position'],\n color='Company',\n color_discrete_sequence=px.colors.sequential.Aggrnyl\n )\n fig.update_layout(margin=dict(t=0, l=0, r=0, b=0), \n font=dict(family='Arial', size=14),\n plot_bgcolor='rgba(0,0,0,0)')\n\n fig.update_traces(root_color='rgba(0,0,0,0)', # to match background color of app\n marker=dict(cornerradius=10),\n hovertemplate='%{value} Connection(s)
at %{label}')\n \n return fig\n\ndef polar_px(df):\n df['month'] = df['Connected On'].dt.month_name()\n month = df['month'].value_counts().reset_index()\n month_order = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']\n\n chart = px.bar_polar(\n month,\n theta='month',\n r='count',\n color='count',\n template='plotly_dark',\n color_discrete_map=px.colors.sequential.Redor,\n category_orders={'month': month_order})\n\n return chart\n\n# \\\\\\ Header /// #\n\nst.title(\"LinkedIn connections\")\n\nwith st.container():\n left, right = st.columns((3, 2))\n with left:\n st.subheader(\"the visual: \")\n st.write(\"\"\"\n \n\n my goal was to make this app interactive and allow users to create their own visualization by using their data\n \"\"\")\n st.subheader(\"couple notes:\")\n st.write(\"\"\"\n big thanks to my brother [alberto](https://www.linkedin.com/in/albertoreyes2021/) for letting me use his data \n \n and want to give credit to [isaac](https://www.linkedin.com/in/tuckerrasbury/) and his project that I took inspiration from\n \"\"\")\n with right:\n st.subheader(\"\")\n st.write(\"\")\n dataset = st.selectbox('choose a sample dataset ', ('diego','alberto'))\n csv_file = st.file_uploader('upload your file here 👇 ')\n df = load_data(csv_file, dataset)\n tree_height = st.slider(\"increase the size of the chart 🔍\", 500, 2000, 1000)\n\nwith st.container():\n\n left, right = st.columns((3, 2))\n with left:\n st.subheader(\"how to get your own data\")\n how_to = st.expander(\"steps: \")\n how_to.write(\"\"\"\n [click on this link](https://www.linkedin.com/mypreferences/d/download-my-data) and select \"request archive\" of your data\n\n then, you will receive an email in about 5 minutes with a link to download your data\n\n after that, just extract the file from the zipped folder and you are ready to visualize your connections! \n \"\"\")\n how_to.image(instructions, width=500, use_column_width='auto', output_format='PNG')\n \n\nst.write(\"##\") \n\n# \\\\\\ Treemap /// #\n\ntreemap = treemap_px(df, tree_height)\n\nwith st.container():\n st.plotly_chart(treemap, use_container_width=True)\n\n# \\\\\\ Bar Chart /// #\n\nst.write(\"##\")\n\nst.subheader(\"break it down! 🤸\")\n\nbar = bar_px(df)\n\nwith st.container():\n st.write(\"by year:\")\n st.plotly_chart(bar, use_container_width=True)\n","repo_name":"donutdiego/linkedin","sub_path":"linkedin.py","file_name":"linkedin.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"28031563976","text":"def collatz(number):\n if number % 2:\n print(3 * number + 1)\n return 3 * number + 1\n\n else:\n print(number // 2)\n return number // 2\n\nif __name__ == \"__main__\":\n print(f'Type an integer.')\n number = input()\n\n try:\n number = int(number)\n while number != 1:\n number = collatz(number)\n except ValueError:\n print(\"You must enter an integer.\")","repo_name":"diascarolina/book-automate-the-boring-stuff","sub_path":"chapter03_functions/collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"14793651578","text":"import sys\nsys.setrecursionlimit(10**7)\n\nn, m = map(int, input().split())\n\ngraph = [[] for i in range(n+1)]\nvisited = [False] * (n+1)\nresult = 0\n\nfor i in range(m):\n u, v = map(int, input().split())\n graph[u].append(v)\n graph[v].append(u)\n\n\ndef dfs(graph, start, visited):\n visited[start] = True\n for i in graph[start]:\n if not visited[i]:\n dfs(graph, i, visited)\n\nfor i in range(1,n+1):\n if not visited[i]:\n dfs(graph, i, visited)\n result += 1\n\nprint(result)\n\n\n","repo_name":"wxxhyeong/Algorithm_wh","sub_path":"Algorithm_Python/boj11724.py","file_name":"boj11724.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"9601642230","text":"from django.conf.urls import url,include\nfrom django.contrib import admin\nfrom pvd import views\n\nurlpatterns = [\n url(r'^annotations',views.annotations,name='buckets'),\n url(r'^runvision',views.runvision,name='runvision'),\n url(r'^storage',views.uploadstorage,name='uploadstorage'),\n url(r'^uploadimage',views.uploadimage,name='uploadimage'),\n url(r'^$',views.index),\n url(r'^admin/', admin.site.urls),\n]\n","repo_name":"umang-t-patel/Master-Project","sub_path":"pythonvision/pythonvision/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"33916907376","text":"\"\"\"\nImplement next permutation, which rearranges numbers into the lexicographically \nnext greater permutation of numbers.\n\nIf such an arrangement is not possible, it must rearrange it as the lowest possible order \n(i.e., sorted in ascending order).\n\nThe replacement must be in place and use only constant extra memory.\n\nExample 1:\n\n Input: nums = [1,2,3]\n Output: [1,3,2]\n\nExample 2:\n\n Input: nums = [3,2,1]\n Output: [1,2,3]\n\nExample 3:\n\n Input: nums = [1,1,5]\n Output: [1,5,1]\n\nExample 4:\n\n Input: nums = [1]\n Output: [1]\n\nConstraints:\n 1. 1 <= nums.length <= 100\n 2. 0 <= nums[i] <= 100\n\"\"\"\n\n\nclass Solution:\n def nextPermutation(self, nums) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n n = len(nums)\n i = n - 1\n while i > 0 and nums[i] <= nums[i-1]:\n i -= 1\n \n if i == 0:\n nums.sort()\n else:\n j = i + 1\n while j < n and nums[i-1] < nums[j]:\n j += 1\n \n nums[i-1], nums[j-1] = nums[j-1], nums[i-1]\n a, b = i, n-1\n while a < b:\n nums[a], nums[b] = nums[b], nums[a]\n a += 1\n b -= 1\n","repo_name":"chaosWsF/Python-Practice","sub_path":"leetcode/0031_next_permutation.py","file_name":"0031_next_permutation.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"12970557805","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.contrib.auth.models import User\nfrom Atreya.appointments.models import * \nfrom Atreya.appointments.serializers import *\nfrom rest_framework.authtoken.models import Token\nfrom .data.appointment_types import appointment_types\nfrom .data.appointments import appointments\nfrom .data.pre_appointment_questions import pre_appointment_questions\n# from .data.pre_appointment_responses import pre_appointment_responses\n\nclass Command(BaseCommand):\n help = 'create/delete sample data'\n def add_arguments(self, parser):\n parser.add_argument('command', type=str, help='create/delete sample data')\n\n def handle(self, *args, **options):\n if options['command']== 'create':\n try:\n print('Appointment Types')\n for appointment_type in appointment_types:\n serializer = AppointmentTypeSerializer(None, data=appointment_type)\n if serializer.is_valid():\n appointment_type = serializer.save()\n else:\n print(serializer.errors)\n raise CommandError('failure in creating sample data')\n\n print('Pre Appointment Questions')\n for pre_appointment_question in pre_appointment_questions:\n serializer = PreAppointmentQuestionSerializer(None, data=pre_appointment_question)\n if serializer.is_valid():\n pre_appointment_question = serializer.save()\n else:\n print(serializer.errors)\n raise CommandError('failure in creating sample data')\n\n print('Appointments')\n for appointment in appointments:\n for response in appointment.get('pre_appointment_responses',[]):\n # import pdb; pdb.set_trace()\n try:\n response['question'] = PreAppointmentQuestion.objects.get(appointment_type=appointment['appointment_type'],question=response['question']).id\n except PreAppointmentQuestion.DoesNotExist:\n print('\\n\\n\\nHERE\\n\\n\\n')\n # import pdb; pdb.set_trace()\n print(response)\n serializer = AppointmentSerializer(None, data=appointment)\n if serializer.is_valid():\n appointment = serializer.save()\n else:\n print(serializer.errors)\n raise CommandError('failure in creating sample data')\n\n # print('Pre appointment Responses') \n # for pre_appointment_response in pre_appointment_responses:\n # serializer = PreAppointmentResponseSerializer(None, data=pre_appointment_response)\n # if serializer.is_valid():\n # pre_appointment_response = serializer.save()\n # else:\n # print(serializer.errors)\n # raise CommandError('failure in creating sample data')\n\n except Exception as e:\n print(e)\n raise CommandError('failure in creating sample data')\n self.stdout.write(self.style.SUCCESS('Successfully created data'))\n elif options[\"command\"] == \"delete\":\n try:\n PreAppointmentResponse.objects.all().delete()\n Appointment.objects.all().delete()\n PreAppointmentQuestion.objects.all().delete()\n AppointmentType.objects.all().delete()\n\n except Exception as e:\n print(e)\n raise CommandError('failure in deleting sample data')\n\n self.stdout.write(self.style.SUCCESS('Successfully deleted data'))\n else:\n raise CommandError(\"not a valid command\") \n ","repo_name":"Seva-Solutions/MyClinic_Backend","sub_path":"Atreya/appointments/management/commands/appointments.py","file_name":"appointments.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"43457936572","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\nfrom tkinter import *\r\nimport tkinter as tk\r\nfrom tkinter import colorchooser, ttk,filedialog,Entry\r\nfrom PIL import ImageTk,Image,ImageDraw,ImageOps\r\nimport cv2\r\nimport io\r\nimport subprocess\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\n\r\nclass main(object):\r\n\r\n def __init__(self, master):\r\n self.master = master\r\n self.master.title=\"Button Example\"\r\n self.color_fg = \"black\"\r\n self.color_bg = \"white\"\r\n self.old_x = None\r\n self.old_y = None\r\n self.pen_width = 20\r\n self.darwWidget()\r\n self.c.bind()\r\n self.c.bind('', self.paint)\r\n self.c.bind(\"\", self.reset)\r\n # self.button1=tk.Button(master,text=\"clear\",command=self.clearcanvas)\r\n # self.button1.pack()\r\n # self.button2=tk.Button(master,text=\"exit\",command=self.master.destroy)\r\n # self.button2.pack()\r\n self.button3=tk.Button(master,text=\"learn\",command=self.savefile)\r\n self.label = tk.Label(master, text=\"Enter answer:\")\r\n self.label.pack()\r\n self.lrn_input=Entry(master)\r\n self.lrn_input.pack()\r\n self.button3.pack()\r\n self.button5=tk.Button(master,text=\"pridict\",command=self.pridict)\r\n self.button5.pack()\r\n self.button4=tk.Button(master,text=\"result\",command=self.show_digits)\r\n self.button4.pack()\r\n self.digits=0\r\n self.tcells=[]\r\n self.tcells_array=0\r\n self.targets=[]\r\n self.df=\"\"\r\n self.result=0\r\n self.targets_array=0\r\n self.X_array=0\r\n self.y_array=0\r\n self.data=pd.read_csv(\"mydf.csv\")\r\n # self.data=self.data.drop(\"Unnamed: 0\",axis=1)\r\n \r\n def paint(self, e):\r\n if self.old_x and self.old_y:\r\n # print(\"draw\")\r\n self.c.create_line(self.old_x, self.old_y, e.x, e.y, width=self.pen_width,fill=self.color_fg, capstyle=\"round\", smoot=True)\r\n self.old_x = e.x\r\n self.old_y = e.y\r\n\r\n def reset(self, e):\r\n self.old_x = None\r\n self.old_y = None\r\n\r\n def changedW(self, width): \r\n self.pen_width=width\r\n \r\n def clearcanvas(self):\r\n self.c.delete(ALL)\r\n\r\n def change_fg(self):\r\n self.color_fg = colorchooser.askcolor(color=self.color_fg)[1]\r\n\r\n def change_bg(self):\r\n self.color_bg = colorchooser.askcolor(color=self.color_bg)[1]\r\n self.c['bg']=self.color_bg\r\n \r\n\r\n def darwWidget(self):\r\n self.controls=Frame(self.master,padx=5,pady=5)\r\n # textpw=Label(self.controls,text=\"pen Width\",font=\"Georgia 16\")\r\n # textpw.grid(row=0,column=0)\r\n # self.slider=ttk.Scale(self.controls,from_=5 , to=100, command=self.changedW,orient=\"vertical\" )\r\n # self.slider.set(self.pen_width)\r\n # self.slider.grid(row=0,column=1)\r\n # self.controls.pack(side=\"left\")\r\n self.c=Canvas(self.master,width=500,height=400,bg=self.color_bg)\r\n self.c.pack(fill=BOTH,expand=True)\r\n \r\n menu=Menu(self.master)\r\n self.master.config(menu=menu)\r\n optionmenu=Menu(menu)\r\n menu.add_cascade(label=\"Menu\",menu=optionmenu)\r\n optionmenu.add_command(label='brush color',command=self.change_fg)\r\n optionmenu.add_command(label='backgrond color',command=self.change_bg)\r\n optionmenu.add_command(label='clear convas',command=self.clearcanvas) \r\n optionmenu.add_command(label='Exit',command=self.master.destroy)\r\n \r\n \r\n def savefile(self):\r\n target=self.lrn_input.get()\r\n \r\n \r\n if target==\"\":\r\n print(\"do nothing\")\r\n else:\r\n ps = self.c.postscript(colormode='gray')\r\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\r\n img=ImageOps.invert(img)\r\n img=img.resize((40, 40), Image.ANTIALIAS)\r\n img.save('test.png')\r\n self.digits=cv2.imread(\"test.png\",cv2.IMREAD_GRAYSCALE)\r\n self.tcells.append(self.digits.flatten())\r\n print(target)\r\n self.targets.append(target)\r\n self.clearcanvas()\r\n self. lrn_input.delete(0,\"end\")\r\n self.tcells_array=np.array(self.tcells,dtype=np.float32)\r\n self.targets_array=np.array(self.targets,dtype=np.float32)\r\n self.df=pd.DataFrame(self.tcells_array)\r\n self.df['target']=self.targets_array\r\n # self.df.to_csv(\"mydf.csv\",index=False)\r\n print(self.df)\r\n result=pd.concat([self.data,self.df],axis=0)\r\n # print(result)\r\n result.to_csv(\"mydf.csv\",index=False)\r\n \r\n def show_digits(self):\r\n print(self.digits)\r\n digits=self.digits\r\n \r\n self.tcells_array=np.array(self.tcells,dtype=np.float32)\r\n self.targets_array=np.array(self.targets,dtype=np.float32)\r\n self.master.destroy()\r\n # test_digits=cv2.imread(\"test_digits2.png\",cv2.IMREAD_GRAYSCALE)\r\n \r\n \r\n def pridict(self):\r\n\r\n X=self.df.loc[:,self.df.columns!=\"target\"]\r\n y=self.df.loc[:,self.df.columns==\"target\"]\r\n self.X_array=np.array(X,dtype=np.float32)\r\n self.y_array=np.array(y,dtype=np.float32)\r\n \r\n knn=cv2.ml.KNearest_create()\r\n knn.train(self.X_array,cv2.ml.ROW_SAMPLE,self.y_array)\r\n ret,self.result,neighbours,dist=knn.findNearest(self.X_array,k=3)\r\n message =neighbours \r\n text = self.c.create_text(10, 100, text=message, font=(\"Helvetica\", 10))\r\n print(self.result)\r\n \r\n \r\n \r\nwin = Tk()\r\n\r\nwin.title(\"my app\")\r\npaint=main(win)\r\nwin.mainloop()\r\n# mydf=paint.df\r\n# data=paint.data\r\n\r\n","repo_name":"armandabir/handwrithing-AI","sub_path":"final3.py","file_name":"final3.py","file_ext":"py","file_size_in_byte":5852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"20960568108","text":"# name = User --> named parameter, default parameter, keyed argument\r\ndef say_hello(age, address, name=\"User\"):\r\n print(\"Hello \", name, age, address)\r\n\r\n\r\n# if you want to pass null value use None\r\nsay_hello(age=37, address=\"Spring Hill Pkey\" ,name=\"Rishi\")\r\nsay_hello(address=\"Spring Hill Pkey\", name=\"Ram\", age=\"45\")\r\nsay_hello(age=30,name=\"Laxman\", address=\"Dashrath Lane\")\r\nsay_hello(\"\",'something', None)\r\n# \r\nprint(type(say_hello))","repo_name":"javarishi/H2KInfosysNovPython","sub_path":"learn_day07/TestFunction.py","file_name":"TestFunction.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"41388167038","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfs = 8000 # częstotliwość próbkowania\ntc = 6.4 # czas trwania sygnału\n#ts okres próbkowania\nn = tc * fs #sygnał\nt = np.arange(0, 6.4, 1/fs)\nu = []\nfor i in t:\n if 0.5 > i and i>=0:\n u.append(0.9 * np.sin(2 * np.pi * i * 8 - (np.pi/3)) + np.log2(np.abs(np.cos(7*(i**2)) + 2.2)))\n if 1.9 > i and i >= 0.5:\n u.append((np.sin(2*np.cos(4*np.pi*i)*np.pi*i))/(2*(i**2)+1))\n if 3.7 > i and i >= 1.9:\n u.append((i-1.9)**2 - np.cos(13*i))\n if 4.9 > i and i >= 3.7:\n u.append(0.5*(i**0.7)*np.sin(8*i))\n if 6.4 > i and i >= 4.9:\n u.append((2+np.sin(18*i)/(3+np.cos(28*i))))\nplt.plot(t, u)\nplt.show()","repo_name":"Szek1/Transmisja-Danych","sub_path":"lab-1/Zad3/zad3.py","file_name":"zad3.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"32279961678","text":"from flask import (Blueprint, redirect, render_template, request, url_for)\nfrom matchFinder.models import praeferenz_model\nfrom matchFinder.models import teilnehmer_model\nfrom . import database_helper\nfrom . import limiter\nfrom . import helper\nimport hashlib\nimport json\n\nbp = Blueprint('preference', __name__, url_prefix='/preference')\n\n@bp.route('')\ndef set_preference(verteilung_id):\n\t\"\"\"\n\tloads the verteilung to and id, presents the user with a form\n\tasking him to enter some credentials\n\t\"\"\"\n\n\tverteilung = database_helper.get_verteilung_by_hashed_id(verteilung_id)\n\tif verteilung != None:\n\t\treturn render_template('validate.html', id=verteilung_id,\n\t\t\tprotected=True if verteilung.protected else False)\n\telse:\n\t\treturn render_template('validate.html', id=verteilung_id,\n\t\t\terror=\"Keine gültige Verteilung!\")\n\n\n@bp.route('/validate/', methods=['POST'])\n@limiter.limit(\"5 per minute\", error_message=\"Too many requests! Try again later.\")\ndef validate():\n\t\"\"\"\n\tValidates a user by its matrikelnummer.\n\tIf the entered number is valid, the user is redirected to the next page.\n\tIf not, an error is displayed and the form is presented again.\n\t\"\"\"\n\n\tdata = request.form.get('data', None)\n\tobj = json.loads(data)\n\thashed_verteilung_id = obj['id']\n\tprotected = obj[\"protected\"]\n\tmatr_nr = request.form.get('matr_nr', None)\n\terror, verteilung, teilnehmer = helper.check_user_credentials(matr_nr,\n\t\t\t\t\t\t\t\t\t\thashed_verteilung_id)\n\tif error:\n\t\treturn render_template('validate.html', id=hashed_verteilung_id,\n\t\t\t\tprotected=protected, error=error)\n\telse:\n\t\tthemen = database_helper.get_thema_list_by_id(verteilung.thema_list_id).themen\n\t\treturn render_template(\"preference.html\", teilnehmer=teilnehmer,\n\t\t\t\tthemen=themen, verteilung_id=verteilung.id,\n\t\t\t\tveto_allowed=verteilung.veto_allowed, min_votes = verteilung.min_votes)\n\n@bp.route('/register/', methods=['POST'])\n@limiter.limit(\"5 per minute\", error_message=\"Too many requests! Try again later.\")\ndef register():\n\t\"\"\"\n\tregisters a new user, redirects him to the next page\n\t\"\"\"\n\n\tdata = request.form.get('data', None)\n\tobj = json.loads(data)\n\thashed_verteilung_id = obj['id']\n\tfirst_name = request.form.get('first_name', None)\n\tlast_name = request.form.get('last_name', None)\n\tverteilung = database_helper.get_verteilung_by_hashed_id(hashed_verteilung_id)\n\tif verteilung != None:\n\t\tteilnehmer = teilnehmer_model.Teilnehmer(first_name=first_name, matr_nr=0,\n\t\t\tlast_name=last_name, list_id=verteilung.teilnehmer_list_id)\n\t\tdatabase_helper.insert_teilnehmer(teilnehmer)\n\t\tthemen = database_helper.get_thema_list_by_id(verteilung.thema_list_id).themen\n\t\treturn render_template(\"preference.html\", teilnehmer=teilnehmer,\n\t\t\t\tthemen=themen, verteilung_id=verteilung.id,\n\t\t\t\tveto_allowed=verteilung.veto_allowed, min_votes = verteilung.min_votes)\n\treturn render_template('validate.html', id = hashed_verteilung_id,\n\t\tprotected=False, error=\"Es ist ein Fehler aufgetreten!\")\n\n@bp.route('save', methods=['POST'])\ndef save():\n\t\"\"\"\n\tsave the Präferenzen of a user.\n\tIf this user updated already existing präferenzen instead\n\tof entering new ones, the old präferenzen get overwritten.\n\t\"\"\"\n\n\tinformation_object = request.form.get('information', None)\n\n\tobj = json.loads(information_object)\n\tverteilung_id = obj[\"verteilung_id\"]\n\tteilnehmer_id = obj[\"teilnehmer_id\"]\n\tverteilung = database_helper.get_verteilung_by_id(verteilung_id)\n\tnumber_of_themen_in_verteilung = len(verteilung.thema_list.themen)\n\tpreferences = []\n\tfor index in range(number_of_themen_in_verteilung):\n\t\tpreference = request.form.get(str(index + 1), None)\n\t\tpreferences.append(preference)\n\tpreference_string = helper.convert_preferences(preferences)\n\texisting_praef = database_helper.get_praeferenz(teilnehmer_id, verteilung_id)\n\tif existing_praef != None:\n\t\tif not verteilung.editable:\n\t\t\thashed_verteilung_id = hashlib.sha256(str(verteilung.id).encode()).hexdigest()\n\t\t\treturn render_template('validate.html', id = hashed_verteilung_id,\n\t\t\t\tprotected=verteilung.protected,\n\t\t\t\terror=\"Das Bearbeiten der Präferenzen bei dieser Verteilung ist nicht erlaubt!\")\n\t\tdatabase_helper.update_praef(existing_praef, preference_string)\n\t\treturn redirect(url_for('home.index_with_message',\n\t\t\tmessage=\"Deine Präferenzen wurden aktualisiert!\"))\n\telse:\n\t\tpraeferenz = praeferenz_model.Praeferenz(\n\t\t\tteilnehmer_id=teilnehmer_id,\n\t\t\tverteilung_id=verteilung_id,\n\t\t\tpraeferenzen=preference_string)\n\t\tdatabase_helper.insert_praeferenz(praeferenz)\n\t\treturn redirect(url_for('home.index_with_message',\n\t\t\tmessage=\"Deine Präferenzen wurden gespeichert!\"))","repo_name":"felix-wolf/MatchFinder","sub_path":"matchFinder/preference.py","file_name":"preference.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"33847712512","text":"from torch import optim\r\n\r\n\r\ndef build_optimizers(model, config):\r\n optimizer = config['training']['optimizer']\r\n lr = config['training']['lr']\r\n\r\n params = model.parameters()\r\n\r\n # Optimizers\r\n if optimizer == 'rmsprop':\r\n optimizer = optim.RMSprop(params, lr=lr, alpha=0.99, eps=1e-8)\r\n elif optimizer == 'adam':\r\n optimizer = optim.Adam(params, lr=lr)\r\n elif optimizer == 'sgd':\r\n optimizer = optim.SGD(params, lr=lr, momentum=0.)\r\n\r\n return optimizer\r\n","repo_name":"HaoZhangXidian/Submission-to-NC-SPFA-for-EC","sub_path":"Optimizer.py","file_name":"Optimizer.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"14779464591","text":"from dash import Dash, html\nfrom dash.dependencies import Input, Output\n\nimport dash_blueprint as dbp\n\n\napp = Dash(__name__)\n\napp.scripts.config.serve_locally = True\napp.css.config.serve_locally = True\n\napp.layout = html.Div(\n [\n dbp.Menu(\n children=[\n dbp.MenuItem(text=\"Top level\", children=[\n dbp.MenuItem(text=\"Sub Menu 1\", href=\"/sub1\"),\n dbp.MenuItem(text=\"Sub Menu 2\", href=\"/sub2\"),\n ])\n ]\n )\n ]\n)\n\nif __name__ == \"__main__\":\n app.run_server(debug=False)\n","repo_name":"bsgip/dash-blueprint","sub_path":"tests/app/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"36803156263","text":"import pygame\nimport time\nimport random\nfrom global_vars import *\nimport global_vars\nfrom player_ball1 import Ball\nfrom map13 import map1\nfrom put_coin import put_coin\nfrom enemy3 import Enemy\nfrom score_it import score_display\nfrom exit_menu import exit_menu\n#from wall import Wall\n#from move_it import move_it\n\n\npygame.init()\ngameDisplay = pygame.display.set_mode((d_width,d_height))\npygame.display.set_caption(\"Pac-Man\")\nclock = pygame.time.Clock()\n\n\ndef check_enemy_collision(ball):\n\tif ball.immune_t > 0:\n\t\tball.immune_t += -1\n\t\t\n\tif ball.immune_t == 0:\n\t\tenemies = pygame.sprite.spritecollide(ball,enemy_sprites,True)\n\t\tfor enemmy in enemies:\n\t\t\tball.x_inc = 0\n\t\t\tball.y_inc = 0\n\t\t\tcrash_sound.play()\n\t\t\ttime.sleep(2)\n\t\t\tj = random.randrange(0,9)\n\t\t\ti = random.randrange(0,10) \n\t\t\t#print \"death = i :\",(enemmy.rect.y-y00-e-2)/box_w,\"j :\",(enemmy.rect.x-x00-e-2)/box_w\n\t\t\t#print \"birth = i :\",i,\"j :\",j\n\t\t\n\t\t\t\n\t\t\tif (i == 4 and ( j == 0 or j == 1 or j == 8 or j == 9)) or (i == 6 and ( j == 0 or j == 1 or j == 8 or j == 9)) or (i == 5 and ( j == 4 or j == 5)) or (i == 1 and (j == 1 or j == 3 or j == 6 or j == 8)):\n\t\t\t\ti += 1\n\t\t\t#ghost = Enemy(x00+e+2+j*box_w,y00+e+2+i*box_w)\t\n\t\t\tghost = Enemy(x00+e+2+j*box_w,y00+e+2+i*box_w,enemmy.immage)\n\t\t\t#for enemyy in enemies:\n\t\t\t#\tghost = Enemy(x00+e+2+j*box_w,y00+e+2+i*box_w,enemyy.color)\n\t\t\n\t\tif enemies :\n\t\t\tball.lifes += -1\n\t\t\tball.immune_t = 40\n\t\t\tif(ball.lifes == 0):\n\t\t\t\ttime.sleep(2)\n\t\t\t\tpygame.quit()\n\t\t\t\tquit()\n\t\t\ndef gameLoop():\n\t#player_x\n\t#player_y\n\t\n\tmap1(gameDisplay)\n\tput_coin()\n\tball = Ball(x00+e+2,y00+e+2)\t\n\t\n\t#if enemy is imported from enemy1\n\tghost1 = Enemy(x00+e+2+9*box_w,y00+e+2,'images3.jpeg')\n\tghost2 = Enemy(x00+e+2+9*box_w,y00+e+2+10*box_w,'images4.png')\n\tghost3 = Enemy(x00+e+2,y00+e+2+10*box_w,'images6.png')\n\t\n\t#if enemy is imported from enemy \n\t#ghost1 = Enemy(x00+e+2+9*box_w,y00+e+2,l_green)\n\t#ghost2 = Enemy(x00+e+2+9*box_w,y00+e+2+10*box_w,l_red)\n\t#ghost3 = Enemy(x00+e+2,y00+e+2+10*box_w,white)\t\n\t\t\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT :\n\t\t\t\tpygame.quit()\n\t\t\t\tquit()\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\t\n\t\t\t\t\t#pygame.quit()\n\t\t\t\t\t#quit()\n\t\t\t\t\texit_menu(gameDisplay)\n\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\n\t\t\t\t#else :\n\t\t\t\t#\tball.move(event.key)\t\n\t\t\n\t\t\t#elif event.type == pygame.KEYUP:\n\t\t\t#\tball.move(event.key)\n\t\t\n\t\tball.move(event)\n\t\t\n\t\tglobal_vars.player_x = (ball.rect.x )\n\t\tglobal_vars.player_y = (ball.rect.y )\n\t\t\n\t\tall_sprites.update()\n\t\t\t\n\t\tgameDisplay.fill(bg_color)\n\t\t\n\t\tscore_display(gameDisplay,ball.score,ball.lifes)\n\t\tall_sprites.draw(gameDisplay)\n\t\tpygame.display.update()\n\t\n\t\tcheck_enemy_collision(ball)\n\t\t\t\t\t\t\t\t\t\n\t\tclock.tick(30)\n\t\ngameLoop()\npygame.quit()\nquit()\n\t\n","repo_name":"shivam-dev-singh/PacMan-Copy","sub_path":"PacMan/start_game.py","file_name":"start_game.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"13602740163","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport argparse\nimport datetime\nimport logging\nimport os\nimport pickle\nimport signal\nimport warnings\n\nimport logzero\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data.distributed\n\nfrom multiprocessing import cpu_count\nfrom PIL import Image, ImageFile\nfrom sklearn.metrics import classification_report\nfrom torchvision import transforms\nfrom tqdm import tqdm\nfrom logzero import logger\nfrom torch.nn import functional as F\n\nfrom util.dataloader import ImageFolderWithPaths, CSVDataset\nfrom util.functions import accuracy, load_checkpoint, load_model_from_checkpoint, Metric, CustomTenCrop, CustomTwentyCrop, CustomSixCrop, CustomSevenCrop\n\nwarnings.filterwarnings(\"ignore\", \"(Possibly )?corrupt EXIF data\", UserWarning)\nsignal.signal(signal.SIGINT, signal.default_int_handler)\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nparser = argparse.ArgumentParser(description='test')\nparser.add_argument('test', metavar='valid_csv', help='path to test dataset list')\nparser.add_argument('--prefix', default='auto',\n help=\"prefix of model and logs (default: auto)\")\nparser.add_argument('--log-dir', default='logs',\n help='log directory (default: logs)')\nparser.add_argument('--model', '-m', type=str,\n help='model file to test')\nparser.add_argument('-j', '--workers', type=int, default=None,\n help='number of data loading workers (default: 80%% of the number of cores)')\n\nparser.add_argument('-b', '--batch-size', type=int, default=128, help='the batch size')\nparser.add_argument('--topk', type=int, default=3,\n help='report the top-k accuracy (default: 3)')\nparser.add_argument('--print-cr', action='store_true', default=False,\n help='print classification report (default: False)')\nparser.add_argument('--onehot', action='store_true', default=False,\n help='use onehot label (default: False)')\n\n\n# Test Time Augmentation\nparser.add_argument('--tta', action='store_true', default=False,\n help='test time augmentation (use FiveCrop)')\nparser.add_argument('--tta-ten-crop', action='store_true', default=False,\n help='test time augmentation (use TenCrop)')\nparser.add_argument('--tta-custom-six-crop', action='store_true', default=False,\n help='test time augmentation (use CustomSixCrop)')\nparser.add_argument('--tta-custom-seven-crop', action='store_true', default=False,\n help='test time augmentation (use CustomSevenCrop)')\nparser.add_argument('--tta-custom-ten-crop', action='store_true', default=False,\n help='test time augmentation (use CustomTenCrop)')\nparser.add_argument('--tta-custom-twenty-crop', action='store_true', default=False,\n help='test time augmentation (use CustomTwentyCrop)')\n\n# data preprocess\nparser.add_argument('--scale-size', type=int, default=None,\n help='scale size (default: auto)')\nparser.add_argument('--input-size', type=int, default=None,\n help='input size (default: auto)')\nparser.add_argument('--rgb-mean', type=str, default=None,\n help='RGB mean (default: auto)')\nparser.add_argument('--rgb-std', type=str, default=None,\n help='RGB std (default: auto)')\nparser.add_argument('--interpolation', type=str, default=None,\n choices=[None, 'BILINEAR', 'BICUBIC', 'NEAREST'],\n help='interpolation. (default: auto)')\nparser.add_argument('--grayscale', action='store_true', default=False,\n help='change input channel from 3 to 1.')\n\n# misc\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\n\n\ndef main():\n global args\n args = parser.parse_args()\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n\n if args.prefix == 'auto':\n args.prefix = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n\n formatter = logging.Formatter('%(message)s')\n logzero.formatter(formatter)\n\n if not os.path.exists(args.log_dir):\n os.makedirs(args.log_dir, exist_ok=True)\n\n log_filename = \"{}-test.log\".format(args.prefix)\n log_filepath = os.path.join(args.log_dir, log_filename)\n logzero.logfile(log_filepath)\n\n if args.workers is None:\n args.workers = max(1, int(0.8 * cpu_count()))\n elif args.workers == -1:\n args.workers = cpu_count()\n\n cudnn.benchmark = True\n\n logger.info('Running script with args: {}'.format(str(args)))\n\n checkpoint = load_checkpoint(args, args.model)\n logger.info(\"=> loaded the model (epoch {})\".format(checkpoint['epoch']))\n model_arch = checkpoint['arch']\n model_args = checkpoint['args']\n\n if model_arch.startswith('efficientnet-b4'):\n scale_size = 200\n input_size = 190\n else:\n scale_size = 120\n input_size = 112\n\n if args.scale_size:\n scale_size = args.scale_size\n else:\n args.scale_size = scale_size\n if args.input_size:\n input_size = args.input_size\n else:\n args.input_size = input_size\n\n if args.rgb_mean:\n rgb_mean = args.rgb_mean\n rgb_mean = [float(mean) for mean in rgb_mean.split(',')]\n else:\n rgb_mean = model_args.rgb_mean\n\n if args.rgb_std:\n rgb_std = args.rgb_std\n rgb_std = [float(std) for std in rgb_std.split(',')]\n else:\n rgb_std = model_args.rgb_std\n\n if args.interpolation:\n interpolation = args.interpolation\n else:\n try:\n interpolation = model_args.interpolation\n except AttributeError:\n interpolation = 'BICUBIC'\n\n logger.info(\"scale_size: {} input_size: {}\".format(scale_size, input_size))\n logger.info(\"rgb_mean: {}\".format(rgb_mean))\n logger.info(\"rgb_std: {}\".format(rgb_std))\n logger.info(\"interpolation: {}\".format(interpolation))\n\n interpolation = getattr(Image, interpolation, 3)\n\n try:\n args.grayscale = model_args.grayscale\n except:\n pass\n\n # Data augmentation and normalization for test\n if args.grayscale:\n if len(rgb_mean) == 1:\n gray_mean = rgb_mean\n gray_std = rgb_std\n else:\n # gray_mean = [0.5,]\n # gray_std = [0.5,]\n gray_mean = (rgb_mean[0], )\n gray_std = (rgb_std[0], )\n\n data_transforms = {\n 'test': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n transforms.CenterCrop(input_size),\n transforms.ToTensor(),\n transforms.Normalize(gray_mean, gray_std),\n ]),\n 'test_FiveCrop': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n transforms.FiveCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(gray_mean, gray_std)(crop) for crop in crops]))\n ]),\n 'test_TenCrop': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n transforms.TenCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(gray_mean, gray_std)(crop) for crop in crops]))\n ]),\n 'test_CustomSixCrop': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomSixCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(gray_mean, gray_std)(crop) for crop in crops]))\n ]),\n 'test_CustomSevenCrop': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomSevenCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(gray_mean, gray_std)(crop) for crop in crops]))\n ]),\n 'test_CustomTenCrop': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomTenCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(gray_mean, gray_std)(crop) for crop in crops]))\n ]),\n 'test_CustomTwentyCrop': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomTwentyCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(gray_mean, gray_std)(crop) for crop in crops]))\n ])\n }\n else:\n data_transforms = {\n 'test': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n transforms.CenterCrop(input_size),\n transforms.ToTensor(),\n transforms.Normalize(rgb_mean, rgb_std)\n ]),\n 'test_FiveCrop': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n transforms.FiveCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))\n ]),\n 'test_TenCrop': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n transforms.TenCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))\n ]),\n 'test_CustomSixCrop': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomSixCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))\n ]),\n 'test_CustomSevenCrop': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomSevenCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))\n ]),\n 'test_CustomTenCrop': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomTenCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))\n ]),\n 'test_CustomTwentyCrop': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomTwentyCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))\n ])\n }\n\n tfms = 'test'\n if args.tta:\n tfms = 'test_FiveCrop'\n batch_size = args.batch_size // 5\n elif args.tta_ten_crop:\n tfms = 'test_TenCrop'\n batch_size = args.batch_size // 10\n elif args.tta_custom_six_crop:\n tfms = 'test_CustomSixCrop'\n batch_size = args.batch_size // 6\n elif args.tta_custom_seven_crop:\n tfms = 'test_CustomSevenCrop'\n batch_size = args.batch_size // 7\n elif args.tta_custom_ten_crop:\n tfms = 'test_CustomTenCrop'\n batch_size = args.batch_size // 10\n elif args.tta_custom_twenty_crop:\n tfms = 'test_CustomTwentyCrop'\n batch_size = args.batch_size // 20\n else:\n batch_size = args.batch_size\n\n\n image_datasets = {\n 'test': CSVDataset(args.test, data_transforms[tfms], onehot=args.onehot)\n }\n\n test_num_classes = len(image_datasets['test'].classes)\n test_class_names = image_datasets['test'].classes\n\n kwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}\n test_loader = torch.utils.data.DataLoader(\n image_datasets['test'], batch_size=batch_size, shuffle=False, **kwargs)\n\n logger.info(\"number of test dataset: {}\".format(len(test_loader.dataset)))\n logger.info(\"number of classes: {}\".format(len(test_class_names)))\n\n model, metric_fc, criterion_state_dict, num_classes, class_names = load_model_from_checkpoint(args, checkpoint, test_num_classes, test_class_names, grayscale=args.grayscale)\n\n if args.topk > num_classes:\n logger.warn('--topk must be less than or equal to the class number of the model')\n args.topk = num_classes\n logger.warn('--topk set to {}'.format(num_classes))\n\n # check test and train class names\n do_report = True\n if test_num_classes != num_classes:\n logger.info(\"The number of classes for train and test is different.\")\n logger.info(\"Skip accuracy report.\")\n do_report = False\n\n test(args, model_arch, model, metric_fc, test_loader, class_names, do_report, logger)\n\n logger.info(\"=> Saved test log to \\\"{}\\\"\".format(log_filepath))\n\n\ndef test(args, model_arch, model, metric_fc, test_loader, class_names, do_report, logger):\n model.module.eval()\n if metric_fc:\n metric_fc.module.eval()\n test_accuracy = Metric('test_accuracy')\n test_loss = Metric('test_loss')\n\n\n pred = []\n Y = []\n correct_num = 0\n\n filepath = '{}-test-results.log'.format(args.prefix)\n savepath = os.path.join(args.log_dir, filepath)\n f = open(savepath, 'w')\n\n softmax = torch.nn.Softmax(dim=1)\n criterion = nn.CrossEntropyLoss()\n\n with tqdm(total=len(test_loader), desc='Test') as t:\n with torch.no_grad():\n for (data, target, paths) in test_loader:\n if args.cuda:\n data = data.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n if args.tta or args.tta_ten_crop or \\\n args.tta_custom_ten_crop or args.tta_custom_twenty_crop or \\\n args.tta_custom_six_crop or args.tta_custom_seven_crop:\n bs, ncrops, c, h, w = data.size()\n if metric_fc:\n feature = model(data.view(-1, c, h, w))\n output = metric_fc(feature.reshape(feature.shape[:-2]))\n else:\n output = model(data.view(-1, c, h, w))\n output = output.view(bs, ncrops, -1).mean(1)\n else:\n if metric_fc:\n feature = model(data)\n output = metric_fc(feature.reshape(feature.shape[:-2]))\n else:\n output = model(data)\n\n if do_report:\n pred += [int(l.argmax()) for l in output]\n Y += [int(l) for l in target]\n\n for path, y, preds in zip(paths, target, softmax(output)):\n probabilities, labels = preds.topk(args.topk)\n preds_text = ''\n for i in range(args.topk):\n preds_text += \" {} {}\".format(labels[i], probabilities[i])\n f.write(\"{} {}{}\\n\".format(path, int(y), preds_text))\n\n if str(y.item()) == str(labels[0].item()):\n correct_num += 1\n\n if do_report:\n test_accuracy.update(accuracy(output, target))\n test_loss.update(criterion(output, target))\n t.set_postfix({'loss': test_loss.avg.item(),\n 'accuracy': 100. * test_accuracy.avg.item()})\n t.update(1)\n\n f.close()\n logger.info(\"=> Saved test results to \\\"{}\\\"\".format(savepath))\n\n if do_report:\n\n cr_filepath = '{}-test-classification_report.log'.format(args.prefix)\n cr_savepath = os.path.join(args.log_dir, cr_filepath)\n\n cr = classification_report(Y, pred, target_names=class_names)\n if args.print_cr:\n print(cr)\n with open(cr_savepath, 'w') as crf:\n crf.write(cr)\n logger.info(\"=> Saved classification report to \\\"{}\\\"\".format(cr_savepath))\n\n logger.info(\"model: {}\".format(args.model))\n logger.info(\"Test-loss: {}\".format(test_loss.avg))\n logger.info(\"Test-accuracy: {} ({}/{})\".format((correct_num / len(test_loader.dataset)), correct_num, len(test_loader.dataset)))\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"knjcode/kaggle-kuzushiji-recognition-2019","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":18920,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"76"}
+{"seq_id":"73336527285","text":"from django.db.models import Q\nfrom django.contrib import admin\nfrom django.contrib.admin.views.main import ChangeList\n\nclass InputFilter(admin.SimpleListFilter):\n template = 'admin/input_filter.html'\n\n def lookups(self, request, model_admin):\n # Dummy, required to show the filter.\n return ((),)\n\n def choices(self, changelist):\n # Grab only the \"all\" option.\n all_choice = next(super().choices(changelist))\n all_choice['query_parts'] = (\n (k, v)\n for k, v in changelist.get_filters_params().items()\n if k != self.parameter_name\n )\n yield all_choice\n\n'''\nclass YearFilter(InputFilter):\n parameter_name = 'year'\n title = 'year' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(vehicle_year__year__icontains=bit)\n ) \n return queryset.filter(any_name)\n'''\nclass YearFilter(InputFilter):\n parameter_name = 'year'\n title = 'year' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(year__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass MakeModelFilter(InputFilter):\n parameter_name = 'makemodel'\n title = 'make and model' \n def queryset(self, request, queryset):\n term = self.value()\n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n #Q(vehicle_makemodel__make__icontains=bit) |\n #Q(vehicle_makemodel__vehiclemodel__icontains=bit) |\n Q(vehicle_makemodel__makemodel__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\n'''\nclass MakeFilter(InputFilter):\n parameter_name = 'make'\n title = 'make' \n def queryset(self, request, queryset):\n term = self.value()\n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(vehicle_make__make__icontains=bit)\n ) \n return queryset.filter(any_name)\n\nclass VehicleModelFilter(InputFilter):\n parameter_name = 'vehiclemodel'\n title = 'model' \n def queryset(self, request, queryset):\n term = self.value()\n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(vehicle_vehiclemodel__vehiclemodel__icontains=bit)\n ) \n return queryset.filter(any_name)\n'''\n\nclass TrimFilter(InputFilter):\n parameter_name = 'trim'\n title = 'trim' \n def queryset(self, request, queryset):\n term = self.value()\n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(vehicle_trim__trim__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass EngineSizeFilter(InputFilter):\n parameter_name = 'enginesize'\n title = 'engine size' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(vehicle_enginesize__enginesize__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass EngineCodeFilter(InputFilter):\n parameter_name = 'enginecode'\n title = 'engine code' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(vehicle_enginecode__enginecode__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()","repo_name":"reinali07/autoshop-manager","sub_path":"vehicles_db/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"10486030911","text":"from Basics import *\r\nfrom Display import *\r\nfrom competition import *\r\nfrom Minimax import *\r\nimport random\r\n\r\ndef Play(player,dam,player1,player2,gamemode):\r\n affiche(player,dam)\r\n if (win (dam,player, player1, player2,gamemode) == 0):\r\n print('')\r\n else:\r\n return\r\n if gamemode == 1:\r\n if (player == 1):\r\n coup = int(input(player1))\r\n else:\r\n coup = int(input(player2))\r\n if gamemode == 4:\r\n coup = Minimax(dam,player)\r\n if gamemode == 5:\r\n if player == 1:\r\n coup = Minimax(dam,player)\r\n else:\r\n plyy = []\r\n for i in range (10):\r\n for j in range(10):\r\n if estValide(i,j,player,dam) and dam[i][j] == 0:\r\n plyy.append(10*i+j)\r\n if plyy != []:\r\n coup = plyy[random.randint(0,len(plyy)-1)]\r\n if gamemode == 2:\r\n if player == 1:\r\n coup = int(input(player1))\r\n else:\r\n coup = Minimax(dam,player)\r\n x = int(coup//10) #Oui, la magie existe!\r\n y = int(coup%10)\r\n if coup == -1:\r\n print('aha')\r\n else:\r\n if coup not in range (0,100,1):\r\n print (\"Impossible\")\r\n return Play(player,dam,player1,player2,gamemode)\r\n if not estValide(x,y,player,dam):\r\n print (\"Impossible\")\r\n return Play(player,dam,player1,player2,gamemode)\r\n Poser(x,y,player,dam)\r\n return Play(3-player,dam,player1,player2,gamemode)\r\n \r\ndef win(damier, player, player1, player2,gamemode):\r\n noplay = np(damier,player)\r\n print (player1,\": \", noplay[1])\r\n print (player2,\": \", noplay[2])\r\n if (noplay[0] == 1):\r\n print (\"Desole, vous ne pouvez pas jouer.\")\r\n return Play(3-player,damier,player1,player2,gamemode)\r\n elif noplay[0] == 2:\r\n print('Gagnant: ' , player1)\r\n return 1\r\n elif noplay[0] == 3:\r\n print('Gagnant: ' , player2)\r\n return 1\r\n elif (noplay[0] == 4)or(noplay[1]+noplay[2] == 100):\r\n if (noplay[1]>noplay[2]):\r\n print('Gagnant: ' , player1)\r\n return 1\r\n elif (noplay[2]>noplay[1]):\r\n print('Gagnant: ' , player2)\r\n return 1\r\n else:\r\n print('Egalité.')\r\n return 1\r\n return 0\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"iarmagnat/Othello","sub_path":"gamemode1.py","file_name":"gamemode1.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"11847754637","text":"import logging\nfrom argparse import ArgumentParser\nfrom os import mkdir\nfrom os.path import exists\nfrom tempfile import TemporaryDirectory\nfrom pathlib import Path\n\nimport numpy as np\nfrom scipy.signal import savgol_filter\n\nimport sys\nsys.path.append('./DataProcessing')\nfrom DataProcessing.reconstruct_data import load_mean, denormalize\nfrom DataProcessing.process_motions import create_bvh\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef smoothing(motion):\n\n smoothed = [savgol_filter(motion[:,i], 9, 3) for i in range(motion.shape[1])]\n new_motion = np.array(smoothed).transpose()\n return new_motion\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\n \"--pred\",\n \"--prediction\",\n type=str,\n required=True,\n help=\"directory with .npy files with predictions of \" \"shape (N x 45)\",\n )\n parser.add_argument(\n \"--dest\",\n type=str,\n required=True,\n help=\"directory to save results\",\n )\n parser.add_argument(\n \"--mean\",\n type=str,\n default=\"DataProcessing/mean_pose.npz\",\n help=\"File with normalization values.\",\n )\n parser.add_argument(\n \"--pipe\",\n type=str,\n default=\"pipe\",\n help=\"pipe folder with pre/post processing.\"\n )\n parser.add_argument(\n \"--smooth\",\n action=\"store_true\",\n default=False,\n help=\"Flag to apply smoothing.\"\n )\n args = parser.parse_args()\n if not exists(args.dest):\n mkdir(args.dest)\n for pred_file in Path(args.pred).glob('*.npy'):\n logging.info(str(pred_file))\n prediction = np.load(str(pred_file))\n if args.smooth:\n logger.info(\"Smoothing prediction\")\n prediction = smoothing(prediction)\n\n logging.info(\"Reconstructing data by denormalizing it.\")\n max_val, mean_pose = load_mean(args.mean)\n prediction = denormalize(prediction, max_val, mean_pose)\n\n logging.info(\"Creating .bvh. This requires pipe\")\n\n with TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n np.save(tmpdir / pred_file.name, prediction)\n create_bvh(tmpdir / pred_file.name, args.dest, args.pipe)\n","repo_name":"FineMotion/GENEA_2020","sub_path":"create_bvh.py","file_name":"create_bvh.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"}
+{"seq_id":"24900274857","text":"from model.storage import Storage\nfrom schemas.storage import StorageCreate, StorageUpdate\nfrom typing import Union\nfrom fastapi.responses import JSONResponse\nfrom sqlalchemy import Column\nfrom sqlalchemy.orm import Session\n\n\ndef check_storage_exists(project_id: str, db: Session) -> bool:\n '''\n Returns if a storage account already exists for the given project.\n \n :param project_id: id of the corresponding project\n :param db: active database session\n '''\n\n return(db.query(Storage).filter(Storage.project==project_id, Storage.is_deleted==False).count() > 0)\n\n\ndef create_storage(project_id: str, data: StorageCreate, db:Session) -> JSONResponse:\n '''\n Stores the storage account credentials for the project.\n \n :param project_id: id of the corresponding project\n :param data: storage account details\n :param db: active database session\n '''\n \n storage = Storage()\n storage_data = data.dict(exclude_none=True, by_alias=False)\n\n for key, value in storage_data.items():\n setattr(storage, key, value)\n\n storage.project = project_id\n\n db.add(storage)\n db.commit()\n db.refresh(storage)\n\n return JSONResponse({\"status\": 201, \"message\": \"storage created\", \"data\": [{}]}, status_code=201)\n\n\ndef get_storage(project_id: str, db: Session) -> Union[Storage, None]:\n '''\n Returns the storage account associated with the specified project.\n \n :param project_id: id of the corresponding project\n :param db: active database session\n '''\n return(db.query(Storage).filter(Storage.project==project_id, Storage.is_deleted==False).first())\n\n\ndef get_storageid(project_id: str, db: Session) -> Column[str]:\n '''\n Returns the id for the storage account data of the given project.\n\n :param project_id: unique id of the project\n :param db: active database session\n '''\n\n return(db.query(Storage).filter(Storage.project==project_id, Storage.is_deleted==False).first().id)\n\n\ndef get_storage_by_id(storage_id: str, db: Session) -> Storage:\n '''\n Returns the storage account associated with the active project.\n \n :param storage_id: id of the corresponding storage account data\n :param db: active database session\n '''\n \n return(db.query(Storage).filter(Storage.id==storage_id).first())\n\n\ndef update_storage(data: StorageUpdate, db: Session) -> JSONResponse:\n '''\n Update the storage account details.\n \n :param data: storage account details for update\n :param db: active database session\n '''\n\n db_storage = get_storage_by_id(data.id, db)\n storage_data = data.dict(exclude_none=True, by_alias=False)\n\n for key, value in storage_data.items():\n setattr(db_storage, key, value)\n\n db.add(db_storage)\n db.commit()\n db.refresh(db_storage)\n\n return JSONResponse({\"status\": 204, \"message\": \"storage updated\", \"data\": [{}]}, status_code=204)","repo_name":"xmigrate/xmigrate","sub_path":"services/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"76"}
+{"seq_id":"9259649153","text":"import argparse\nfrom console_progressbar import ProgressBar\nimport io\nimport time\n\nfrom ngram import NGramModel\nfrom ngram_zhuyin import NGramPYModel\nfrom fenci_ngram import XNGramModel\n\nparser = argparse.ArgumentParser(description='Pinyin input with N-gram.')\nparser.add_argument('-f', '--fenci', dest='fenci', action='store_true',\n help='N-gram on single character or phrase')\nparser.add_argument('-z', '--no-zhuyin', dest='zhuyin', action='store_false',\n help='To disable N-gram with zhuyin')\nparser.add_argument('-i', '--input', dest='input', type=str,\n metavar='FILE', help='Path to input pinyin file')\nparser.add_argument('-o', '--output', dest='output', type=str,\n metavar='FILE', help='Path to output file')\nparser.add_argument('-s', '--source', dest='source', type=str, default='train',\n metavar='FILEPATH', help='Path to training source file')\nparser.add_argument('-m', '--model', dest='model', type=str, default='models/n-gram',\n metavar='FILEPATH', help='Path to model files')\nparser.add_argument('-n', dest='n', default=3, type=int,\n metavar='NGRAM', help='Default as 3')\nparser.add_argument('task', type=str, default='translate',\n choices=['train', 'retrain', 'translate', 'test', 'console'],\n help='Train, translate only, test accuracy, or use console mode')\n\ndef check_result(output: list, truth: list) -> float:\n correct_sentence_cnt = 0\n word_cnt = 0\n correct_word_cnt = 0\n for o, t in zip(output, truth):\n if o.strip() == t.strip():\n correct_sentence_cnt += 1\n word_cnt += len(o)\n for i in range(len(o.strip())):\n if o[i] == t[i]:\n correct_word_cnt += 1\n return (correct_sentence_cnt / len(output), correct_word_cnt / word_cnt)\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n if args.fenci:\n model = XNGramModel(\n n=args.n,\n table_path='pinyin_table',\n file_path=args.source,\n model_path=args.model,\n zhuyin=args.zhuyin)\n else:\n if args.zhuyin:\n model = NGramPYModel(\n n=args.n,\n table_path='pinyin_table',\n file_path=args.source,\n model_path=args.model)\n else:\n model = NGramModel(\n n=args.n,\n table_path='pinyin_table',\n file_path=args.source,\n model_path=args.model)\n\n if args.task == 'train':\n model.train([args.n-1])\n elif args.task == 'retrain':\n model.train(range(args.n))\n elif args.task == 'translate':\n if args.input is None:\n print('[Error] Missing input file.')\n exit(-1)\n model.load_model()\n lines = io.open(args.input, mode='r', encoding='utf-8').readlines()\n pb = ProgressBar(len(lines), length=50, prefix='Translating')\n result = []\n for i, l in enumerate(lines):\n result.append(model.translate(l))\n pb.print_progress_bar(i+1)\n print()\n print(\"[Info] Translated %d lines.\" % len(result))\n if args.output is None:\n for l in result:\n print(l)\n else:\n output = io.open(args.output, mode='w', encoding='utf-8')\n for l in result:\n output.write(l + '\\n')\n print(\"[Info] Results saved to \", args.output)\n elif args.task == 'test':\n if args.input is None:\n print('[Error] Missing input file.')\n exit(-1)\n model.load_model()\n lines = io.open(args.input, mode='r', encoding='utf-8').readlines()\n pb = ProgressBar(len(lines) / 2, length=50, prefix='Translating')\n result = []\n for i, l in enumerate(lines[0::2]):\n result.append(model.translate(l))\n pb.print_progress_bar(i+1)\n print()\n if args.output is not None:\n output = io.open(args.output, mode='w', encoding='utf-8')\n for l in result:\n output.write(l + '\\n')\n print(\"[Info] Results saved to \", args.output)\n accuracy = check_result(result, lines[1::2])\n print('[Info] Generated %d lines, with accuracy =' % len(result), accuracy)\n elif args.task == 'console':\n model.load_model()\n print(\"[Info] Entering console mode. Use Ctrl-C/D to exit.\")\n while True:\n in_s = input(\">> Input: \")\n time_d = time.time()\n result = model.translate(in_s)\n time_d = round(time.time()-time_d, 5)\n print(result)\n print(\"Used %fs\" % time_d)\n","repo_name":"baocvcv/intro-to-ai","sub_path":"a1-pinyin/src/pinyin.py","file_name":"pinyin.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"25973001130","text":"num = int(input())\n\n# 파라미터 : 원반수, 시작점, 보조장대, 도착장대\ndef hanoi(plate, start, bojo, to):\n if(plate==1): \n print(start, to)\n return\n \n # 플레이트가 2개 이상일 때 이동시키기\n # n-1개를 보조로 이동, 큰 원반을 to로 이동, 보조의 n-1을 to로 이동\n hanoi(plate-1, start, to, bojo)\n print(start, to)\n hanoi(plate-1, bojo, start, to)\n\nprint(2**num-1)\nif(num<=20): hanoi(num, 1,2,3)","repo_name":"sadie100/Practice_algorithm","sub_path":"백준/답봄/silver/1914_silver1_하노이 탑.py","file_name":"1914_silver1_하노이 탑.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"27968983750","text":"import numpy as np\nimport random\n\nclass N_net():\n def __init__(self,input,hidden1,hidden2,output):\n self.fitness = 0\n\n self.input_layer = input\n self.hidden_layer = hidden1 # 48\n self.hidden_layer2 = hidden2 # 48\n self.output_layer = output\n\n self.weight1 = np.random.randn(self.input_layer, self.hidden_layer) / np.sqrt(self.input_layer / 2) # He 초기화, 첫번째 가중치 영역 난수배열 생성\n self.weight2 = np.random.randn(self.hidden_layer, self.hidden_layer2) / np.sqrt(self.hidden_layer / 2) # He 초기화, 두번째 //\n self.weight3 = np.random.randn(self.hidden_layer2, self.output_layer) / np.sqrt(self.hidden_layer2 / 2) #\n\n def forward(self, inputs):\n net = np.dot(inputs, self.weight1) # 인풋배열과 제1 가중치 난수배열 행렬곱\n net = self.relu(net)\n net = np.dot(net, self.weight2)\n net = self.relu(net)\n net = np.dot(net, self.weight3)\n net = self.softmax(net)\n return net\n\n def relu(self, x):\n return np.maximum(0, x)\n\n def sigmoid(self, x):\n return 1.0/(1.0+np.exp(-x))\n\n def leaky_relu(self, x):\n return np.maximum(0.1, x)\n\n def softmax(self, x):\n c = np.max(x)\n exp_x = np.exp(x - c) # 오버플로우 대책\n sum_exp_x = np.sum(exp_x)\n y = exp_x / sum_exp_x\n return y\n","repo_name":"94jjiisu/Capstone-design","sub_path":"TEST_20200428/NN.py","file_name":"NN.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"32463430374","text":"import cx_Oracle as cx\ncx.init_oracle_client(lib_dir=r\"C:\\DEV_WorkSpace\\Rep_Python\\env_python\\cli_oracle\\instantclient_19_8\")\n\ndef connectBd():\n host_name = \"192.168.56.3\"\n port_number = \"1521\"\n service_name = \"orcl\"\n pass_1 = \"useraction\"\n user = \"useraction\"\n dbschema = \"BD_ACOES\"\n host = host_name + \":\" + port_number + \"/\" + service_name\n\n conn = cx.connect(user, pass_1, host , encoding=\"UTF-8\")\n #print(conn)\n #cursor = conn.cursor()\n return conn\n\ndef select(psql):\n #psql = \"select * from ticket\"\n conn = connectBd()\n cursor = conn.cursor()\n print(cursor)\n\n for row in cursor.execute(psql):\n print(row)\n\n conn.close()\n\n\ndef insertToBd(sigla, tipo, bolsa):\n conn = connectBd()\n cursor = conn.cursor()\n #sql = (\"insert into ticket(id_ticket, nome, tipo, pais) VALUES(SEQ_IDTICKET.nextval,'EGIE3.SA','ON','BRASIL')\")\n p1 = \"','\"\n psql = \"insert into acoes(id_acao, sigla, tipo, bolsa) VALUES(SEQ_IDACAO.nextval,'\"+sigla+p1+tipo+p1+bolsa+\"')\"\n print(psql)\n cursor.execute(psql)\n cursor.execute(\"commit\")\n\ndef getCursor(psql):\n conn = connectBd()\n cursor = conn.cursor()\n return cursor\n\n#insertToBd(\"PG\",\"ON\",\"NYSE\")\n#select()\n\ndef geraSqlInsert(nome_tabela, dados):\n sql_insert = 'insert into ' + nome_tabela + '('\n sql_str_campo = ''\n sql_str_valor = ''\n\n for p_campo, p_valor in dados.items():\n sql_str_campo = sql_str_campo + p_campo + ','\n sql_str_valor = sql_str_valor + \"'\" + str(p_valor) + \"',\"\n sql = sql_insert + sql_str_campo[:-1] + ') values(' + sql_str_valor[:-1] + ')'\n return sql\n\ndef insertCotacoes(id_sigla, pcotacao):\n conn = connectBd()\n cursor = conn.cursor()\n dados = {}\n print('print pcotacao = »»»» ',pcotacao)\n for t in pcotacao:\n dic1 = dict(list(pcotacao)[list(pcotacao).index(t)])\n print(dic1)\n for p_data, p_open, p_high, p_low, p_close, p_adj_close, p_volume, p_dividend_amount, p_split in dic1:\n #for x in lst1:\n print('print teste »»' ,p_data,p_close)\n #print(list(dic1)[list(dic1).index(x)], ' = ',dic1[x])\n \"\"\"dados = {'ID_TICKET': id_ticket,\n 'DATA_COTACAO': p_data,\n 'HIGH': p_high,\n 'LOW': p_low,\n 'OPEN': p_open,\n 'CLOSE': p_close,\n 'VOLUME': p_volume,\n 'ADJ_CLOSE': p_adj_close\n }\"\"\"\n\n print('print dados = »»»» ',dados)\n sql = geraSqlInsert('cotacao_diaria', dados)\n try:\n print(sql)\n #cursor.execute(sql)\n except Exception as err:\n print(err)\n #conn.commit()\n\n \"\"\"\n {\n 'data': '2020-10-23',\n 'open': '82.2500',\n 'high': '83.2600', \n 'low': '80.6400',\n 'close': '81.6700',\n 'adjusted close': '81.6700',\n 'volume': '8068300',\n 'dividend amount': '0.0000',\n 'split coefficient': '1.0'\n }\n \"\"\"","repo_name":"alexsfraga/Curso-Python-com-financas","sub_path":"conectaBD.py","file_name":"conectaBD.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"29696552866","text":"\n# coding: utf-8\n\n# # Define function to return an array of file names\n\n# In[240]:\n\n\nimport os\n\ndef return_list_of_files(rootdir, printname=False):\n all_files = []\n\n for subdir, dirs, files in os.walk(rootdir):\n for file in files:\n all_files.append(os.path.join(subdir, file))\n if printname: \n print(os.path.join(subdir, file))\n return np.asarray(all_files) \n\n\n# # Function to load data from file names into features + labels\n\n# In[241]:\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\n\n\ndef load_data(dataset_path):\n images_list = return_list_of_files(dataset_path)\n \n #print(images_list)\n print(len(images_list))\n \n features = np.ndarray(shape=(len(images_list), 28, 28),\n dtype=np.uint8)\n labels = []\n for i in range(len(images_list)):\n try:\n im = mpimg.imread(images_list[i])\n\n features[i] = im\n #features[i] = im.flatten()\n labels.append(images_list[i].split(\"/\")[2])\n except:\n print(images_list[i])\n \n return features, np.asarray(labels)\n\n\n# In[242]:\n\n\nfeatures, labels = load_data(\"../TrainingDataAll\")\ntest_features, test_labels = load_data(\"../TestData\")\n\n\n# In[243]:\n\n\nprint(\"\\n\", features.shape, \"\\n\", labels.shape)\nprint(\"\\n\", test_features.shape, \"\\n\", test_labels.shape)\n\n\n# In[244]:\n\n\nplt.imshow(test_features[3].reshape(28, 28))\n\n\n# # Label encoder to convert string labels into integers\n\n# In[245]:\n\n\nfrom sklearn import preprocessing\n\nle = preprocessing.LabelEncoder()\nle.fit(labels)\nlabels_encoded = le.transform(labels) \n\ntest_le = preprocessing.LabelEncoder()\ntest_le.fit(test_labels)\ntest_labels_encoded = test_le.transform(test_labels) \n\n#list(le.inverse_transform([2, 2, 1]))\n\n\n# In[246]:\n\n\nle.classes_\ntest_le.classes_\n\n\n# In[247]:\n\n\nfrom sklearn.model_selection import train_test_split\n\nX_train=features\nX_test=test_features\nY_train=labels_encoded\nY_test=test_labels_encoded\n\nprint(\"[STATUS] splitted train and test data...\")\nprint(\"Train data : {}\".format(X_train.shape))\nprint(\"Test data : {}\".format(X_test.shape))\nprint(\"Train labels: {}\".format(Y_train.shape))\nprint(\"Test labels : {}\".format(Y_test.shape))\n\n\n# In[248]:\n\n\n#28×28 の二次元で表現されている入力となる画像の情報が、784個になるようにしたいので、1次元になるように変形させる\nX_train = X_train.reshape(len(X_train), 784)\nX_test = X_test.reshape(len(X_test), 784)\n\n\n# In[249]:\n\n\nprint(X_train.shape)\nprint(Y_train.shape)\n\n\n# In[253]:\n\n\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\n# Now we prepare train_data and test_data.\ntrain = X_train.astype(np.float32)\ntest = X_test.astype(np.float32)\n\n\n# Create labels for train and test data\ntrain_labels = Y_train[:,np.newaxis]\ntest_labels = Y_test[:,np.newaxis]\n\n# Initiate kNN, train the data, then test it with test data for k=1\nknn = cv2.ml.KNearest_create() \nknn.train(train, cv2.ml.ROW_SAMPLE, train_labels) \nret,result,neighbours,dist = knn.findNearest(test,k=3)\n#print(result)\n#print(test_labels)\n#print(result.size)\n# Now we check the accuracy of classification\n# For that, compare the result with test_labels and check which are wrong\nmatches = result==test_labels\ncorrect = np.count_nonzero(matches)\nprint(correct)\naccuracy = correct/result.size*100\nprint(accuracy)\n\n","repo_name":"natsuwata/Hackfest-Reading-Hand-Written-Digits","sub_path":"KNN_Eno.py","file_name":"KNN_Eno.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"21034117950","text":"class Node:\n\tdef __init__(self, value, adj_list):\n\t\tself.value = value\n\t\tif adj_list is None:\n\t\t\tself.adj_list = {}\n\t\telse:\n\t\t\tself.adj_list = adj_list\n\ndef dijkstra(node_list, beg_node):\n\tmst_edges = list()\n\tvisited_nodes = set()\n\tinf = float('inf')\n\t\n\t# Set the distances of all nodes to infinity\n\tdist = dict()\n\tfor node in node_list:\n\t\tdist[node] = inf\n\tdist[beg_node] = 0\n\n\tvisited_nodes.add(beg_node)\n\n\twhile len(visited_nodes) < len(node_list):\n\n\t\tbeg_node, end_node = None, None\n\t\tmin_val = inf\n\n\t\t# Find minimal edge extending from any node in our visited set\n\n\t\tfor node in visited_nodes:\n\t\t\tfor adj_node in node.adj_list:\n\t\t\t\tif node.adj_list[adj_node] < min_val and adj_node not in visited_nodes:\n\t\t\t\t\tmin_val = node.adj_list[adj_node]\n\t\t\t\t\tbeg_node, end_node = node, adj_node\n\n\t\tmst_edges.append(\"(\" + str(beg_node.value) + \", \" + str(end_node.value) + \")\")\n\t\tvisited_nodes.add(end_node)\n\n\t\t# Update values based on this end node\n\n\t\tfor adj_node in end_node.adj_list:\n\t\t\told_val = dist[adj_node]\n\t\t\tnew_val = dist[end_node] + end_node.adj_list[adj_node]\n\t\t\tif new_val < old_val:\n\t\t\t\tdist[adj_node] = new_val\n\tprint(mst_edges)\n\n\ndef main():\n\tnode_0 = Node(0, None)\n\tnode_1 = Node(1, None)\n\tnode_2 = Node(2, None)\n\tnode_3 = Node(3, None)\n\t\n\tnode_0.adj_list = {node_1:1, node_2:10}\n\tnode_1.adj_list = {node_0:1, node_2:2}\n\tnode_2.adj_list = {node_0:10, node_1:2, node_3:5}\n\tnode_3.adj_list = {node_2:5}\n\n\tnode_list = [node_0, node_1, node_2, node_3]\n\n\tdijkstra(node_list, node_0)\n\tdijkstra_decomposed(node_list, node_0)\n\n\n\nif __name__ == '__main__':\n\tmain()","repo_name":"andrewjoliver/CodeConnectsAI","sub_path":"u4/l4/code/dijkstras.py","file_name":"dijkstras.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"74017002486","text":"import shutil\nimport tempfile\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import Client, TestCase\nfrom django.urls import reverse\n\nfrom posts.models import Comment, Follow, Group, Post\n\n\nclass PostsViewTests(TestCase):\n\n AUTH_USER_NAME = 'TestUser'\n PAGE_TEXT = 'Тестовое сообщение1'\n PAGE_GROUP = 'Тестовая группа'\n GROUP_SLUG = 'test-group'\n GROUP_DESCR = 'Описание группы'\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n cls.user = get_user_model().objects.create(\n username=cls.AUTH_USER_NAME\n )\n Group.objects.bulk_create([\n Group(title=f'{cls.PAGE_GROUP}{i}',\n slug=f'{cls.GROUP_SLUG}{i}',\n description=f'{cls.GROUP_DESCR}{i}')\n for i in range(1, 3)]\n )\n\n cls.post = Post.objects.create(\n text=cls.PAGE_TEXT,\n author=cls.user,\n group=Group.objects.get(title=cls.PAGE_GROUP+'1')\n )\n\n cls.unfollower = get_user_model().objects.create(\n username='Unfoollowuser',\n email='testunfoll@gmail.com',\n password='unfolow',\n )\n\n cls.follower = get_user_model().objects.create(\n username='folow',\n email='testsfoll@gmail.com',\n password='follow',\n )\n\n def setUp(self):\n self.guest_user = Client()\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n self.authorized_follower = Client()\n self.authorized_follower.force_login(self.follower)\n self.authorized_unfollower = Client()\n self.authorized_unfollower.force_login(self.unfollower)\n\n def test_auth_user_can_unfollow(self):\n \"\"\"Авторизированный пользователь может отписаться от автора поста\"\"\"\n Follow.objects.create(user=self.follower,\n author=self.user)\n self.authorized_follower.get(\n reverse(\n 'profile_unfollow',\n kwargs={'username': self.user}\n )\n )\n self.assertFalse(\n Follow.objects.filter(\n user=self.follower,\n author=self.user\n ),\n )\n\n def test_unfollower_follow_index(self):\n \"\"\"Посты не появляются у неподписчика\"\"\"\n self.authorized_follower.get(reverse(\n 'profile_follow',\n kwargs={\n 'username': self.user\n }))\n\n posts = Post.objects.filter(\n author__following__user=self.follower)\n\n response_follower = self.authorized_follower.get(\n reverse('follow_index'))\n response_author = self.authorized_client.get(\n reverse('follow_index'))\n\n self.assertIn(\n posts.get(),\n response_follower.context['paginator'].object_list,\n )\n self.assertNotIn(\n posts.get(),\n response_author.context['paginator'].object_list,\n )\n\n def test_auth_user_can_comment(self):\n \"\"\"Только авторизированный пользователь может комментировать посты\"\"\"\n form_data = {\n 'post': self.post,\n 'author': self.user,\n 'text': 'TESTTESXT'\n }\n self.authorized_client.post(\n reverse('add_comment', args=(self.user, self.post.id)),\n data=form_data, follow=True\n )\n comment = Comment.objects.first()\n self.assertEqual(comment.text, form_data['text'])\n self.assertEqual(comment.author, self.user)\n self.assertEqual(self.post.comments.count(), 1)\n self.assertEqual(comment.post, self.post)\n\n def test_urls_uses_correct_template(self):\n \"\"\"URL-адрес использует соответствующий шаблон.\"\"\"\n templates_url_names = {\n 'posts/index.html': reverse('index'),\n 'posts/new_post.html': reverse('post_new'),\n 'group.html': reverse('group_posts', kwargs={\n 'slug': f'{self.GROUP_SLUG}1'}),\n }\n for template, url in templates_url_names.items():\n with self.subTest(url=url):\n response = self.authorized_client.get(url)\n self.assertTemplateUsed(response, template)\n\n def test_context_in_post_new_page(self):\n \"\"\"Тестирование содержания context в post_new\"\"\"\n response = self.authorized_client.get(reverse('post_new'))\n\n form_fields = {\n 'text': forms.fields.CharField,\n 'group': forms.fields.ChoiceField,\n }\n for value, expected in form_fields.items():\n form_field = response.context.get('form').fields.get(value)\n self.assertIsInstance(form_field, expected)\n\n def test_context_in_index_page(self):\n \"\"\"Тестирование содержания context в index\"\"\"\n response = self.authorized_client.get(reverse('index'))\n all_post_count = Post.objects.count()\n resp_page = response.context['page'][0]\n\n context_post = {\n all_post_count: response.context['paginator'].count,\n self.PAGE_TEXT: resp_page.text,\n self.AUTH_USER_NAME: resp_page.author.username,\n f'{self.PAGE_GROUP}1': resp_page.group.title\n }\n\n for expected, value in context_post.items():\n with self.subTest(value=value):\n self.assertEqual(value, expected)\n\n def test_context_in_group_page(self):\n \"\"\"Тестирование содержания context в group\"\"\"\n response = self.authorized_client.get(\n reverse('group_posts', kwargs={'slug': f'{self.GROUP_SLUG}1'})\n )\n\n resp_page = response.context['page'][0]\n resp_group = response.context['group']\n\n context_group = {\n self.PAGE_TEXT: resp_page.text,\n self.AUTH_USER_NAME: resp_page.author.username,\n f'{self.PAGE_GROUP}1': resp_group.title,\n f'{self.GROUP_SLUG}1': resp_group.slug,\n f'{self.GROUP_DESCR}1': resp_group.description\n }\n\n for expected, value in context_group.items():\n with self.subTest(value=value):\n self.assertEqual(value, expected)\n\n def test_context_in_edit_post_page(self):\n \"\"\"Тестирование содержания context при редактировании поста\"\"\"\n response = self.authorized_client.get(\n reverse(\n 'post_edit',\n kwargs={\n 'username': self.AUTH_USER_NAME,\n 'post_id': self.post.id\n }\n )\n )\n\n context_edit_page = {\n self.PAGE_TEXT: response.context.get('post').text,\n f'{self.PAGE_GROUP}1': response.context.get('post').group.title,\n }\n\n for expected, value in context_edit_page.items():\n with self.subTest():\n self.assertEqual(value, expected)\n\n def test_context_in_profile_page(self):\n \"\"\"Тестирование содержания context для profile\"\"\"\n response = self.guest_user.get(\n reverse(\n 'profile',\n kwargs={'username': self.AUTH_USER_NAME}\n )\n )\n resp_page = response.context['page'][0]\n\n context_edit_page = {\n self.PAGE_TEXT: resp_page.text,\n f'{self.PAGE_GROUP}1': resp_page.group.title,\n self.AUTH_USER_NAME: resp_page.author.username,\n }\n\n for expected, value in context_edit_page.items():\n with self.subTest():\n self.assertEqual(value, expected)\n\n def test_context_in_post_id_page(self):\n \"\"\"Тестирование context для страницы индивидуального поста\"\"\"\n response = self.guest_user.get(\n reverse(\n 'post',\n kwargs={\n 'username': self.AUTH_USER_NAME,\n 'post_id': self.post.id\n }\n )\n )\n\n context_edit_page = {\n self.PAGE_TEXT: response.context.get('post').text,\n f'{self.PAGE_GROUP}1': response.context.get('post').group.title,\n self.AUTH_USER_NAME: response.context.get('post').author.username,\n }\n\n for expected, value in context_edit_page.items():\n with self.subTest():\n self.assertEqual(value, expected)\n\n def test_post_added_in_index_page(self):\n \"\"\"Тестирование наличия поста на главной странице сайта\"\"\"\n response = self.authorized_client.get(\n reverse('index'))\n post_id = response.context.get('page')[0].pk\n self.assertEqual(post_id, self.post.pk)\n\n def test_post_added_in_group_page(self):\n \"\"\"Тестирование наличия поста присвоенного группе на странице группы\"\"\"\n post = Post.objects.first()\n response = self.authorized_client.get(\n reverse('group_posts', kwargs={'slug': f'{self.GROUP_SLUG}1'}))\n self.assertEqual(post.text, response.context.get('page')[0].text)\n\n def test_post_added_in_correct_group(self):\n \"\"\"Тестирование на правильность назначения групп для постов\"\"\"\n group = Group.objects.first()\n posts_out_of_group = Post.objects.exclude(group=group)\n response = self.authorized_client.get(\n reverse('group_posts', kwargs={'slug': f'{self.GROUP_SLUG}1'}))\n group_list_posts_set = set(posts_out_of_group)\n all_posts_of_group_page = response.context.get(\n 'paginator').object_list\n self.assertTrue(\n group_list_posts_set.isdisjoint(all_posts_of_group_page)\n )\n\n\nclass StaticViewsTests(TestCase):\n\n def test_templates_static_pages(self):\n \"\"\"Тестирование шаблонов для статических страниц\"\"\"\n templates_url_names = {\n 'about/author.html': reverse('about:author'),\n 'about/tech.html': reverse('about:tech'),\n }\n\n for template, reverse_name in templates_url_names.items():\n with self.subTest(reverse_name=reverse_name):\n response = self.client.get(reverse_name)\n self.assertTemplateUsed(response, template)\n\n\nclass PostImageViewTest(TestCase):\n AUTH_USER_NAME = 'TestUser'\n GROUP_SLUG = 'test-group'\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n settings.MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\n cls.follower = get_user_model().objects.create(\n username='SecondFollow',\n email='teswes@gmail.com',\n password='Second',\n )\n\n cls.small_gif = (b'\\x47\\x49\\x46\\x38\\x39\\x61\\x02\\x00'\n b'\\x01\\x00\\x80\\x00\\x00\\x00\\x00\\x00'\n b'\\xFF\\xFF\\xFF\\x21\\xF9\\x04\\x00\\x00'\n b'\\x00\\x00\\x00\\x2C\\x00\\x00\\x00\\x00'\n b'\\x02\\x00\\x01\\x00\\x00\\x02\\x02\\x0C'\n b'\\x0A\\x00\\x3B'\n )\n\n cls.uploaded = SimpleUploadedFile(\n name='small.gif',\n content=cls.small_gif,\n content_type='image/gif'\n )\n cls.user = get_user_model().objects.create(\n username=cls.AUTH_USER_NAME\n )\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='test-group'\n )\n cls.post = Post.objects.create(\n text='Тестовая запись',\n group=cls.group,\n author=cls.user,\n image=cls.uploaded\n )\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)\n super().tearDownClass()\n\n def setUp(self):\n self.guest_client = Client()\n self.authorized_follower = Client()\n self.authorized_follower.force_login(self.follower)\n\n def test_follower_follow_user(self):\n \"\"\"Посты появляются у подписчика\"\"\"\n self.authorized_follower.get(\n reverse('profile_follow',\n kwargs={'username': self.user})\n )\n response = self.authorized_follower.get(\n reverse('follow_index')\n )\n self.assertContains(response, '
None:\n super().__init__(*args, **kwargs)\n self.max_retries = max_retries\n self.timeout = timeout\n self.backend = super().session._backend\n signal.signal(signal.SIGALRM, timeout_handler)\n \n def run(self, *args, **kwargs):\n result = None\n for i in range(self.max_retries):\n try:\n job = super().run(*args, **kwargs)\n while job.status() in [JobStatus.INITIALIZING, JobStatus.QUEUED, JobStatus.VALIDATING]:\n time.sleep(5) # Check every 5 seconds whether job status has changed\n signal.alarm(self.timeout) # Once job starts running, set timeout to 1 hour by default\n result = job.result()\n if result is not None:\n signal.alarm(0) # Reset timer\n return job\n except Exception as e:\n signal.alarm(0) # Reset timer\n print(\"\\nSomething went wrong...\")\n print(f\"\\n\\nERROR MESSAGE:\\n{e}\\n\\n\")\n if 'job' in locals(): # Sometimes job fails to create\n print(f\"Job ID: {job.job_id}. Job status: {job.status()}.\")\n if job.status() not in [JobStatus.DONE, JobStatus.ERROR, JobStatus.CANCELLED]:\n job.cancel()\n else:\n print(\"Failed to create job.\")\n try:\n super().session.close()\n print(\"Current session was closed.\")\n except:\n print(\"Current session could not be closed. Will leave it to close automatically.\")\n print(f\"Creating new session...\\n\")\n self._session = Session(backend=self.backend)\n print(f\"Starting trial number {i+2}...\\n\")\n signal.alarm(0) # Reset timer\n if result is None:\n raise RuntimeError(f\"Program failed! Maximum number of retries ({self.max_retries}) exceeded\")\n \nclass RetrySampler(RetryPrimitiveMixin, Sampler):\n pass\n\nclass RetryEstimator(RetryPrimitiveMixin, Estimator):\n pass","repo_name":"MarcoBarroca/q4c-team","sub_path":"Mg+H2O/real_hw_cairo/retry_primitives.py","file_name":"retry_primitives.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"10330150794","text":"import ROOT as R\nimport os,sys\nfrom math import acos,asin,sqrt\nR.gROOT.ProcessLine(\"RestFrames::RFKey load_libRestFrames(1)\")\n\n\n\ndef SetupGenerator(frames):\n # set particle masses and widths\n mH = 400 # default search value\n mW = 80.385 # GeV, PDG 2016\n wW = 2.085\n mL = 0.106 # muons\n mN = 0.\n\n frames['LAB_Gen'] = R.RestFrames.ppLabGenFrame(\"LAB_Gen\",\"LAB\")\n frames['Zp_Gen'] = R.RestFrames.DecayGenFrame(\"H_Gen\",\"H^{0}\")\n frames['Wa_Gen'] = R.RestFrames.ResonanceGenFrame(\"Wa_Gen\",\"W_{a}\")\n frames['Wb_Gen'] = R.RestFrames.ResonanceGenFrame(\"Wb_Gen\",\"W_{b}\")\n frames['La_Gen'] = R.RestFrames.VisibleGenFrame(\"La_Gen\",\"#it{l}_{a}\")\n frames['Na_Gen'] = R.RestFrames.InvisibleGenFrame(\"Na_Gen\",\"#nu_{a}\")\n frames['Lb_Gen'] = R.RestFrames.VisibleGenFrame(\"Lb_Gen\",\"#it{l}_{b}\")\n frames['Nb_Gen'] = R.RestFrames.InvisibleGenFrame(\"Nb_Gen\",\"#nu_{b}\")\n \n frames['LAB_Gen'].SetChildFrame(frames['Zp_Gen']);\n frames['Zp_Gen'].AddChildFrame(frames['Wa_Gen']);\n frames['Zp_Gen'].AddChildFrame(frames['Wb_Gen']);\n frames['Wa_Gen'].AddChildFrame(frames['La_Gen']);\n frames['Wa_Gen'].AddChildFrame(frames['Na_Gen']);\n frames['Wb_Gen'].AddChildFrame(frames['Lb_Gen']);\n frames['Wb_Gen'].AddChildFrame(frames['Nb_Gen']);\n\n #if frames['LAB_Gen'].InitializeTree():\n # print(\"...Successfully initialized generator tree\")\n #else:\n # print(\"...Failed initializing generator tree\")\n\n # set Zpiggs masses\n frames['Zp_Gen'].SetMass(mH);\n # set W masses and widths\n frames['Wa_Gen'].SetMass(mW); frames['Wa_Gen'].SetWidth(wW)\n frames['Wb_Gen'].SetMass(mW); frames['Wb_Gen'].SetWidth(wW)\n # set lepton and neutrino masses\n frames['La_Gen'].SetMass(mL); frames['Lb_Gen'].SetMass(mL)\n\n # set lepton pT and eta cuts\n frames['La_Gen'].SetPtCut(10.); frames['Lb_Gen'].SetPtCut(10.)\n frames['La_Gen'].SetEtaCut(2.5); frames['Lb_Gen'].SetEtaCut(2.5)\n \n #if frames['LAB_Gen'].InitializeAnalysis():\n # print(\"...Successfully initialized generator analysis\")\n #else:\n # print(\"...Failed initializing generator analysis\")\n\n return\n ########## End of Generator setup ##########\n\n \ndef SetupRecoFrame(frames):\n frames['LAB'] = R.RestFrames.LabRecoFrame(\"LAB\",\"LAB\");\n frames['Zp'] = R.RestFrames.DecayRecoFrame(\"Zp\",\"Z'\");\n frames['ND'] = R.RestFrames.DecayRecoFrame(\"ND\",\"ND\");\n frames['NDbar'] = R.RestFrames.DecayRecoFrame(\"NDbar\",\"ND~\");\n frames['Z'] = R.RestFrames.VisibleRecoFrame(\"Z\",\"Z_{0}\");\n frames['NS'] = R.RestFrames.InvisibleRecoFrame(\"NS\",\"NS\");\n frames['h'] = R.RestFrames.VisibleRecoFrame(\"h\",\"h\");\n frames['NSbar'] = R.RestFrames.InvisibleRecoFrame(\"NSbar\",\"NS~\");\n \n frames['LAB'].SetChildFrame(frames['Zp']);\n frames['Zp'].AddChildFrame(frames['ND']);\n frames['Zp'].AddChildFrame(frames['NDbar']);\n frames['ND'].AddChildFrame(frames['Z']);\n frames['ND'].AddChildFrame(frames['NS']);\n frames['NDbar'].AddChildFrame(frames['h']);\n frames['NDbar'].AddChildFrame(frames['NSbar']);\n \n if not frames['LAB'].InitializeTree():\n print(\"...Failed initializing reconstruction tree\")\n\n # Invisible Group\n frames['INV'] = R.RestFrames.InvisibleGroup(\"INV\",\"NS NS~ Jigsaws\")\n frames['INV'].AddFrame(frames['NS'])\n frames['INV'].AddFrame(frames['NSbar'])\n\n # Set NS NS mass equal to Z h mass\n frames['NSNSM'] = R.RestFrames.SetMassInvJigsaw(\"NSNSM\", \"M_(NSNS} = m_{zh}\")\n frames['INV'].AddJigsaw(frames['NSNSM'])\n \n #Set Rapidity Jigsaw\n frames['NSNSR'] = R.RestFrames.SetRapidityInvJigsaw(\"NSNSR\", \"#eta_{NSNS} = #eta_{zh}\")\n frames['INV'].AddJigsaw(frames['NSNSR'])\n \n #NuNuR.AddVisibleFrames( frames['LAB'].GetListVisibleFrames() ) \n vframes = frames['LAB'].GetListVisibleFrames()\n for i in range(vframes.GetN()):\n frames['NSNSR'].AddVisibleFrame( vframes.Get(i) )\n\n # MinMassesSqInvJigsaw MinMW(\"MinMW\",\"min M_{W}, M_{ND}= M_{Wb}\",2);\n frames['MinMND'] = R.RestFrames.ContraBoostInvJigsaw(\"MinMND\",\"min M_{ND}, M_{ND}= M_{ND}\")\n frames['INV'].AddJigsaw(frames['MinMND'])\n frames['MinMND'].AddVisibleFrame(frames['Z'], 0)\n frames['MinMND'].AddVisibleFrame(frames['h'], 1)\n frames['MinMND'].AddInvisibleFrame(frames['NS'], 0)\n frames['MinMND'].AddInvisibleFrame(frames['NSbar'], 1)\n\n #if frames['LAB'].InitializeAnalysis():\n #print(\"...Successfully initialized analysis\")\n #else:\n # print(\"...Failed initializing analysis\")\n \n return frames\n ########## End of Reco setup ##########\n\n#def PlotTree(rFrame,name=\"tree\",title=\"Tree\",flag=False):\n# treePlot = R.RestFrames.TreePlot(\"TreePlot\",\"TreePlot\")\n#\n# treePlot.SetTree(rFrame);\n# treePlot.Draw(name, title, flag)\n# R.SetOwnership( treePlot, False )\n#\n##################################################################\n## MAIN\n##################################################################\n## Number of events to generate\n#Ngen = 10000\n#\n#print(\"Initializing generator frames and tree...\")\n#genFrames={};\n#recoFrames={};\n#SetupGenerator(genFrames)\n#SetupRecoFrame(recoFrames)\n#\n#PlotTree(genFrames['LAB_Gen'],\"GenTree\",\"Generator Tree\",True)\n#PlotTree(recoFrames['LAB'],\"RecoTree\", \"Reconstruction Tree\")\n#PlotTree(recoFrames['INV'],\"InvTree\", \"Invisible Jigsaws\", True);\n#\n#h_MZp = R.TH1F(\"MZp\", \"M_{Zp^{ 0}}\", 64, 0., 3000.)\n#h_DcosZp = R.TH1F(\"DcosH\",\"#theta_{H^{ 0}} - #theta_{H^{ 0}}^{true}\", 64,\n#\t\t -acos(-1.)/2., acos(-1.)/2.)\n#\n##genFrames['Zp_Gen'].SetMass(1000.); # not working\n#for igen in range(Ngen):\n# if(igen%(max(Ngen,10)/10) == 0): print(\"Generating event \",igen,\"of\",Ngen)\n# #generate event\n# genFrames['LAB_Gen'].ClearEvent() # clear the gen tree\n# status=genFrames['LAB_Gen'].AnalyzeEvent() # generate a new event\n# #print(\"genstatus\", status)\n# \n# #analyze event\n# recoFrames['LAB'].ClearEvent(); #clear the reco tree\n# MET = genFrames['LAB_Gen'].GetInvisibleMomentum() # Get the MET from gen tree [TVector3]\n# MET.SetZ(0.);\n# recoFrames['INV'].SetLabFrameThreeVector(MET); # Set the MET in reco tree\n# recoFrames['La'].SetLabFrameFourVector(genFrames['La_Gen'].GetFourVector()) # set lepton 4-vectors\n# recoFrames['Lb'].SetLabFrameFourVector(genFrames['Lb_Gen'].GetFourVector())\n# \n# recoFrames['LAB'].AnalyzeEvent() # analyze the event\n#\n# # Generator-level observables\n# cosHgen = genFrames['H_Gen'].GetCosDecayAngle()\n# cosH = cosHgen\n# cosNDgen = genFrames['ND_Gen'].GetCosDecayAngle()\n# cosND = cosNDgen\n#\n# # Reconstruction-level observables\n# MH = recoFrames['H'].GetMass()\n# MHN = recoFrames['H'].GetMass()/genFrames['H_Gen'].GetMass();\n# MNDN = recoFrames['ND'].GetMass()/genFrames['ND_Gen'].GetMass();\n# cosH = recoFrames['H'].GetCosDecayAngle();\n# cosND = recoFrames['ND'].GetCosDecayAngle();\n# DcosH = asin(sqrt(1.-cosH*cosH)*cosHgen-sqrt(1.-cosHgen*cosHgen)*cosH);\n# DcosND = asin(sqrt(1.-cosND*cosND)*cosNDgen-sqrt(1.-cosNDgen*cosNDgen)*cosND);\n#\n# h_MH.Fill(MH)\n# h_DcosH.Fill(DcosH)\n#\n#tc=R.TCanvas()\n#tc.Divide(2,1)\n#tc.cd(1); h_MH.Draw()\n#tc.cd(2); h_DcosH.Draw()\n#tc.Draw()\n#print(\"Hit return to exit\")\n#sys.stdin.readline()\n#\n","repo_name":"gracecummings/ZpAnomalonAnalysis2","sub_path":"recursive_jigsaw_anomalon.py","file_name":"recursive_jigsaw_anomalon.py","file_ext":"py","file_size_in_byte":7426,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"23859777336","text":"from django.conf import settings\n\nfrom celery import shared_task\nfrom os.path import join\nfrom subprocess import Popen, STDOUT, CalledProcessError, PIPE\n\nfrom .models import Submission\n\nimport traceback\n\n\n@shared_task\ndef run_code(sub_pk):\n submission = Submission.objects.get(pk=sub_pk)\n try:\n with Popen(\"python3 -u {} {}\".format(\n join(settings.GRADER_DIRECTORY,\n submission.lab.grader_filename),\n submission.code).split(), stdout=PIPE, stderr=STDOUT,\n cwd=settings.GRADER_DIRECTORY) as proc:\n for line in proc.stdout:\n if line.decode() == \"\\n\":\n continue\n submission.output += line.decode()\n submission.save()\n if proc.returncode != 0:\n raise CalledProcessError(\n proc.returncode, proc.args,\n output=(b\"\" if proc.stdout.closed else proc.stdout.read()),\n stderr=None)\n except CalledProcessError as e:\n submission.output = e.output.decode()\n except Exception:\n submission.output = traceback.format_exc()\n submission.complete = True\n submission.save()\n return True\n","repo_name":"ovkulkarni/ai-grader","sub_path":"grader/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"30906592871","text":"from unittest import TestCase\n\nfrom testfixtures import compare\n\nfrom archivist.sources.packages import Plugin\nfrom tests.helpers import ShouldFailSchemaWith, SingleCommandMixin\n\n\nclass TestPackages(SingleCommandMixin, TestCase):\n\n def test_rpm(self):\n self.Popen.set_command('rpm -qa', stdout=b'some packages')\n plugin = Plugin(**Plugin.schema(dict(type='packages', name='rpm')))\n plugin.process(self.dir.path)\n self.dir.compare(expected=['rpm'])\n compare(b'some packages', self.dir.read('rpm'))\n\n def test_dpkg(self):\n self.Popen.set_command('dpkg -l', stdout=b'some packages')\n plugin = Plugin(**Plugin.schema(dict(type='packages', name='dpkg')))\n plugin.process(self.dir.path)\n self.dir.compare(expected=['dpkg'])\n compare(b'some packages', self.dir.read('dpkg'))\n\n def test_wrong(self):\n text = \"not a valid value for dictionary value @ data['name']\"\n with ShouldFailSchemaWith(text):\n Plugin.schema(dict(type='packages', name='foo'))\n","repo_name":"simplistix/archivist","sub_path":"tests/test_source_packages.py","file_name":"test_source_packages.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"33523769191","text":"import os\nimport sys\nimport mmap\nimport imp\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nimport xlrd\nfrom xml.sax.saxutils import quoteattr as xml_quoteattr\n\ndef stage():\n #Initilize window\n gui = Tk()\n gui.geometry('350x200+500+300')\n gui.title('Import Library')\n\n def load_dir(): #Directory Selection Location\n global sel_dir\n sel_dir = filedialog.askdirectory()\n dir_title = Label(gui,text = \"Selected Directory : \" ).grid(row = 1, column = 0, sticky = W)\n chosen_dir = Label(gui,text = sel_dir ).grid(row = 1, column = 1, columnspan = 3, sticky = W)\n return\n\n def load_excel(): #Excel Selection Location\n global sel_excel\n sel_excel = filedialog.askopenfile()\n excel_title = Label(gui,text = \"Selected Excel File : \" ).grid(row = 3, column = 0, sticky = W)\n chosen_excel = Label(gui,text = sel_excel.name ).grid(row = 3, column = 1, columnspan = 3, sticky = W)\n return\n\n def opt_set():\n g_img = c_img.get().lower()\n g_title = c_title.get().lower()\n g_desc = c_desc.get().lower()\n g_collection = c_collection.get().lower()\n g_subcollection = c_subcollection.get().lower()\n\n global ex_opt\n ex_opt = [g_img, g_title, g_desc, g_collection, g_subcollection]\n\n if ex_opt[0]==\"\":\n messagebox.showwarning(title = \"Error\", message = \"You must select a column for at least the file names\")\n\n try:\n sel_dir\n sel_excel\n\n except NameError:\n messagebox.showwarning(title = \"Error\", message = \"You must choose a directory , excel sheet, and at least the File Names!\")\n else:\n for i in range (0,5):\n if ex_opt[i]==\"\":\n ex_opt[i] = -1\n else:\n ex_opt[i] = ord(ex_opt[i])-97\n parse()\n gui.quit()\n return\n\n #Choose image library directory\n labelDir = Label(gui,text =\"Find Image Directory\").grid(row = 0, column = 0, sticky = W)\n buttonDir = Button(gui, text =\"Browse\", command = load_dir).grid(row = 0, column = 1, sticky = W)\n\n #Choose Excel sheet\n labelExcel = Label(gui,text =\"Find Excel File\").grid(row = 2, column = 0, sticky = W)\n buttonExcel = Button(gui, text =\"Browse\", command = load_excel).grid(row = 2, column = 1, sticky = W)\n\n #Excel Column Options\n labelMatch = Label(gui,text = \"Match excel column letters to desired image attributes.\").grid(row = 4, column = 0, columnspan = 4, sticky = W)\n\n d_img = Label(gui,text = \"File names\").grid(row = 5, column = 0, sticky = W)\n d_title = Label(gui,text = \"Titles\").grid(row = 5, column = 1, sticky = W)\n d_desc = Label(gui,text = \"Descriptions\").grid(row = 7, column = 0, sticky = W)\n d_collection = Label(gui,text = \"Collections\").grid(row = 7, column = 1, sticky = W)\n d_subcollection = Label(gui,text = \"Subcollection\").grid(row = 9, column = 0, sticky = W)\n\n c_img = StringVar()\n c_title = StringVar()\n c_desc = StringVar()\n c_collection = StringVar()\n c_subcollection = StringVar()\n\n e_img = Entry(gui, textvariable = c_img).grid(row = 6, column = 0, sticky = W)\n e_title = Entry(gui, textvariable = c_title).grid(row = 6, column = 1, sticky = W)\n e_desc = Entry(gui, textvariable = c_desc).grid(row = 8, column = 0, sticky = W)\n e_collection = Entry(gui, textvariable = c_collection).grid(row = 8, column = 1, sticky = W)\n e_subcollection = Entry(gui, textvariable = c_subcollection).grid(row = 10, column = 0, sticky = W)\n\n buttonDone = Button(gui, text = \"Generate CML File\", command = opt_set).grid(row =14, column = 0)\n mainloop()\n return 1\n\ndef parse():\n #Open excel file and sets variable sh to the first worksheet\n wb=xlrd.open_workbook(sel_excel.name)\n sh = wb.sheet_by_index(0)\n\n\n #Stores the data from colums of the selected row\n def find_info(row):\n img=[]\n\n for i in range (1,5):\n if ex_opt[i] < 0:\n img.append(\"\")\n else:\n try:\n img.append(sh.cell(rowx=row, colx=ex_opt[i]).value)\n except UnicodeEncodeError:\n img_app = img.append(sh.cell(rowx=row, colx=ex_opt[i]).value)\n img_app.encode('ascii','xmlcharrefreplace')\n\n info_list = [img[0], img[1], img[2], img[3]]\n return info_list\n\n #Loops through excel sheet rows and generates data content for each row.\n def gencon(path):\n\t\t#Determines how many rows there are\n column = len(sh.col_values(ex_opt[0]))\n sourceId = 0\n result=\"\"\n\n for row in range(column):\n #Increments SourceID\n sourceId = sourceId + 1\n result += '
\\n%d\\n' % sourceId\n\n #Appends Picture name to Directory Path\n cell_value = sh.cell(rowx=row,colx=ex_opt[0]).value\n localPath = path+'/'+cell_value\n\n #Insurance Parantheses are correct\n parenSwitch = localPath.replace('\\\\','/')\n info_list = find_info(row)\n\n result += '\t%s\\n' % (parenSwitch[cleanPath:])\n result += '''\\\n\t\t%s\n\t\t%s\n\t\t%s\n\t\t%s\n\t\\n''' % (info_list[0],info_list[1],info_list[2],info_list[3])\n result += '\\n'\n return result\n\n\n #Number of characters to delete for path to start at local dir (location of the script).\n def refinePath(path):\n fullPath = path\n startPath = os.path.basename(path)\n changeNum = len(fullPath) - len(startPath)\n #localPath = fullPath[changeNum:]\n return changeNum\n\n\n #Default settings for the collection Viewer\n end = '''\\\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!>\n\n '''\n outfile = open('../ImageViewer_Config_template.cml','w')\n global cleanPath\n cleanPath = refinePath(os.getcwd())\n print ('Creating XML Template...')\n print ('\\n\\n \\n' + gencon(sel_dir) + end, file = outfile)\n print ('\\nDone!')\n return\n\nif __name__ == '__main__':\n stage()\n\n\n\n","repo_name":"pdbeard/cv_scripts","sub_path":"ImageViewer3.0.py","file_name":"ImageViewer3.0.py","file_ext":"py","file_size_in_byte":6576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"15449005526","text":"import datetime\nimport re\n\nimport numpy as np\nfrom aocd.models import Puzzle\n\nYEAR = datetime.datetime.today().year\nDAY = datetime.datetime.today().day\n\npuzzle = Puzzle(year=YEAR, day=DAY)\n\n\n# Part a\ndef a(data):\n print(data)\n breakpoint()\n\n\nexample_answer = a(puzzle.example_data)\nprint(example_answer)\nassert example_answer == ...\nanswer = a(puzzle.input_data)\nprint(\"a:\", answer)\npuzzle.answer_a = answer\n\n\n# Part b\ndef b(data):\n exit()\n\n\nexample_answer = b(puzzle.example_data)\nprint(example_answer)\nassert example_answer == ...\nanswer = b(puzzle.input_data)\nprint(\"b:\", answer)\npuzzle.answer_b = answer\n","repo_name":"SimonSegerblomRex/aoc","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"26099132474","text":"import tkinter\n\nfrom matplotlib.pyplot import title\n\nwindow = tkinter.Tk()\nwindow.title(\"First GUI Program, Not Technically\")\nwindow.minsize(width=500, height=300)\n\n# Label\nmy_label = tkinter.Label(text=\"Here is a Label\")\nmy_label.pack()\n\nwindow.mainloop()","repo_name":"ShaileshKV29/python-100-days","sub_path":"Day27 - GUI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"71443662326","text":"# -*- coding: utf-8 -*-\n\nfrom sklearn import neighbors\nfrom numpy import genfromtxt, savetxt\n\n# DIR = '/home/elder/projetos/kaggle/digit/'\nDIR = '/home/projects/github/kaggle/'\ndados = genfromtxt(open(DIR+'train.csv', 'r'), delimiter=',')[1:]\n\nlabels = [i[0] for i in dados]\ntreino = [i[1:] for i in dados]\n\nteste = genfromtxt(open(DIR+'test.csv', 'r'), delimiter=',')[1:]\n\nknn = neighbors.KNeighborsClassifier(n_jobs=-1)\nknn.fit(treino,labels)\n\n# rforest = RandomForestClassifier(n_estimators=300, n_jobs=-1)\n# rforest.fit(treino, labels)\n\nsavetxt(DIR+'outputknn1.csv', knn.predict(teste), delimiter=',', fmt='%d')\n","repo_name":"elderbeserra/digits","sub_path":"knn_teste.py","file_name":"knn_teste.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"7801534653","text":"import os, sys, errno, time, tempfile\n\nMAX_RETRIES = 5\nRETRY_DELAY = 0.5 # seconds\n\n##### ProcessLock (context manager) class to implement a file based process lock with retries and logging.\nclass ProcessLock:\n\n def __init__(self, lockfile_name, logger):\n self.lockfile_path = os.path.join(tempfile.gettempdir(), lockfile_name)\n self.logger = logger\n self.lock_file = None\n\n def __enter__(self):\n self.logger.debug(f'ProcessLock attempting to acquire lock file: {self.lockfile_path}')\n for i in range(MAX_RETRIES):\n attempt = i + 1\n try:\n self.lock_file = open(self.lockfile_path, 'x')\n self.logger.debug(f'ProcessLock lock file acquired: {self.lockfile_path}')\n break\n except OSError as e:\n if e.errno != errno.EEXIST:\n self.logger.debug(f'Unexpected error in attempt {attempt} of {MAX_RETRIES} to acquire lock file: {e}')\n raise\n self.logger.debug(f'ProcessLock failed to acquire lock file on attempt: {attempt} of {MAX_RETRIES}')\n if attempt == MAX_RETRIES:\n print('Unable to acquire lock, process is already running.')\n print(f'Lock file, {self.lockfile_path}, already exists, exiting.')\n self.logger.debug(f'Exiting script! Lock file already exists: {self.lockfile_path}')\n self.logger.close('Closing logger instance from ProcessLock before sys.exit().')\n sys.exit(1)\n time.sleep(RETRY_DELAY) \n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.release()\n if exc_type is not None:\n if issubclass(exc_type, SystemExit) and exc_value.code == 0:\n self.logger.debug('ProcessLock exited context successfully.')\n else:\n self.logger.debug(f'In ProcessLock __exit__ method an exception of type {exc_type} occurred with value {exc_value}')\n return False # If True, suppresses any exception that occurred\n\n def release(self):\n if self.lock_file:\n try:\n self.lock_file.close()\n os.unlink(self.lockfile_path)\n self.lock_file = None\n self.logger.debug(f'ProcessLock lock file released: {self.lockfile_path}')\n except OSError as e:\n print(f'Unexpected error releasing lock file, {self.lockfile_path}: {e}')\n","repo_name":"7alpha3/SQLite_WSS_Data","sub_path":"process_lock.py","file_name":"process_lock.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"72391407286","text":"def shopping_cart(*args):\n products_dict = {'Soup': [], 'Pizza': [], 'Dessert': []}\n result = []\n\n for el in args:\n if el == 'Stop':\n break\n\n meal_type = el[0]\n meal_product = el[1]\n\n if meal_product not in products_dict[meal_type]:\n if meal_type == 'Soup' and len(products_dict['Soup']) < 3:\n products_dict[meal_type].append(meal_product)\n elif meal_type == 'Pizza' and len(products_dict['Pizza']) < 4:\n products_dict[meal_type].append(meal_product)\n elif meal_type == 'Dessert' and len(products_dict['Dessert']) < 2:\n products_dict[meal_type].append(meal_product)\n\n for value in products_dict.values():\n if value:\n break\n else:\n return 'No products in the cart!'\n\n sorted_products = sorted(products_dict.items(), key=lambda x: (-len(x[1]), x[0]))\n\n for m_type, m_products in sorted_products:\n result.append(f'{m_type}:')\n for product in sorted(m_products):\n result.append(f' - {product}')\n\n return '\\n'.join(result)\n\n\nprint(shopping_cart(\n ('Pizza', 'ham'),\n ('Soup', 'carrots'),\n ('Pizza', 'cheese'),\n ('Pizza', 'flour'),\n ('Dessert', 'milk'),\n ('Pizza', 'mushrooms'),\n ('Pizza', 'tomatoes'),\n 'Stop',\n))\n\nprint()\n\nprint(shopping_cart(\n ('Pizza', 'ham'),\n ('Dessert', 'milk'),\n ('Pizza', 'ham'),\n 'Stop',\n))\n\nprint()\n\nprint(shopping_cart(\n 'Stop',\n ('Pizza', 'ham'),\n ('Pizza', 'mushrooms'),\n))\n","repo_name":"StivanD/Sofutni-Python","sub_path":"03_python_advanced/Advanced/10_exam_preparations/08_python_advanced_exam_25_june_2022/03_shopping_cart.py","file_name":"03_shopping_cart.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"24649281718","text":"import argparse\nimport os\nimport pprint\nimport shutil\nimport sys\nimport random\nimport logging\nimport time\nimport timeit\nfrom pathlib import Path\nimport time\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom pp_liteseg import PPLiteSeg\nimport cv2\nimport torch.nn.functional as F\nimport datasets\n\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train segmentation network')\n\n parser.add_argument('--image',\n help='test image path',\n default=\"mainz_000001_009328_leftImg8bit.png\",\n type=str)\n parser.add_argument('--weights',\n help='cityscape pretrained weights',\n default=\"ppliteset_pp2torch_cityscape_pretrained.pth\",\n type=str)\n parser.add_argument('opts',\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER)\n\n args = parser.parse_args()\n\n return args\n\n\ndef colorEncode(labelmap, colors, mode='RGB'):\n labelmap = labelmap.astype('int')\n labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3),\n dtype=np.uint8)\n for label in np.unique(labelmap):\n if label < 0:\n continue\n labelmap_rgb = labelmap_rgb + (labelmap == label)[:, :, np.newaxis] * \\\n np.tile(colors[label],\n (labelmap.shape[0], labelmap.shape[1], 1))\n\n if mode == 'BGR':\n return labelmap_rgb[:, :, ::-1]\n else:\n return labelmap_rgb\n\n\ndef main():\n base_size = 512\n wh = 2\n mean = [0.5, 0.5, 0.5],\n std = [0.5, 0.5, 0.5]\n args = parse_args()\n\n model = PPLiteSeg()\n\n model.eval()\n\n print(\"ppliteseg:\", model)\n ckpt = torch.load(args.weights)\n model = model.cuda()\n if 'state_dict' in ckpt:\n model.load_state_dict(ckpt['state_dict'])\n else:\n model.load_state_dict(ckpt)\n\n img = cv2.imread(args.image)\n imgor = img.copy()\n img = cv2.resize(img, (wh * base_size, base_size))\n image = img.astype(np.float32)[:, :, ::-1]\n image = image / 255.0\n image -= mean\n image /= std\n\n image = image.transpose((2, 0, 1))\n image = torch.from_numpy(image)\n\n # image = image.permute((2, 0, 1))\n\n image = image.unsqueeze(0)\n image = image.cuda()\n start = time.time()\n out = model(image)\n end = time.time()\n print(\"infer time:\", end - start, \" s\")\n out = out[0].squeeze(dim=0)\n outadd = F.softmax(out, dim=0)\n outadd = torch.argmax(outadd, dim=0)\n predadd = outadd.detach().cpu().numpy()\n pred = np.int32(predadd)\n colors = np.random.randint(0, 255, 19 * 3)\n colors = np.reshape(colors, (19, 3))\n # colorize prediction\n pred_color = colorEncode(pred, colors).astype(np.uint8)\n pred_color = cv2.resize(pred_color,(imgor.shape[1],imgor.shape[0]))\n\n im_vis = cv2.addWeighted(imgor, 0.7, pred_color, 0.3, 0)\n cv2.imwrite(\"results.jpg\", im_vis)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"midasklr/PPLiteSeg.pytorch","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"76"}
+{"seq_id":"24113049050","text":"import numpy as np\nimport pandas as pd\nfrom autogbt.sampler import MajorityUnderSampler\n\n\ndef _test_sample(y):\n sampler = MajorityUnderSampler()\n idx = sampler.sample(y, 40000, 3.0)\n assert len(idx) == 40000\n assert y[idx].sum() == 10000\n\n\ndef test_sample_with_series():\n y = pd.Series(np.concatenate([np.ones((10000)), np.zeros((100000))]))\n y = y.sample(frac=1.0)\n _test_sample(y)\n\n\ndef test_sample_with_ndarray():\n y = np.concatenate([np.ones((10000)), np.zeros((100000))])\n _test_sample(y)\n\n\ndef test_sample_for_regression():\n y = np.concatenate([\n 2*np.ones((10000)),\n 1*np.ones((10000)),\n 0*np.ones((10000)),\n ])\n sampler = MajorityUnderSampler()\n idx = sampler.sample(y, 0.1, 3.0)\n assert len(idx) == 3000\n","repo_name":"pfnet-research/autogbt-alt","sub_path":"test/test_sampler.py","file_name":"test_sampler.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"76"}
+{"seq_id":"4289529686","text":"import com.ihsan.foundation.pobjecthelper as phelper\r\nimport time, sys\r\n\r\ndef FillDataView(uideflist, rec, parameter) :\r\n helper = phelper.PObjectHelper(uideflist.config)\r\n Obj, Field = helper.LoadScript('GeneralModule.S_ObjectEditor').FindObj(uideflist.config, 'Donor',\r\n ((parameter.FirstRecord.ID,parameter.FirstRecord.key),))\r\n Obj = Obj.CastToLowestDescendant()\r\n uideflist.SetData('uipData'+Obj.DonorType,Obj.PObjConst)\r\n rec.ViewType = Obj.DonorType\r\n rec.Data = Obj.GetFieldByName(parameter.FirstRecord.ID)\r\n return rec\r\n \r\ndef FormSetDataEx(uideflist, parameter) :\r\n config = uideflist.config\r\n helper = phelper.PObjectHelper(uideflist.config)\r\n paramMode = {\r\n 'New':'rec',\r\n 'Edit':'helper.LoadScript(\\'GeneralModule.S_ObjectEditor\\').FillDataMode(uideflist,rec, parameter)',\r\n 'View':'FillDataView(uideflist,rec, parameter)',\r\n 'SENTINEL':''\r\n }\r\n rec = uideflist.uipFilter.Dataset.AddRecord()\r\n\r\n rec = eval(paramMode[parameter.FirstRecord.mode])\r\n ID = parameter.FirstRecord.ID\r\n rec.ID = ID\r\n rec.mode = parameter.FirstRecord.mode\r\n rec.UserPengubah = config.SecurityContext.UserId\r\n rec.TglUbah = (config.Now())\r\n rec.TerminalUbah = config.SecurityContext.InitIP\r\n rec.Input_Data = 'I'\r\n rec.TglAwal = rec.TglUbah\r\n rec.TglAkhir = rec.TglUbah\r\n\r\ndef FindData(config, parameter, returns):\r\n helper = phelper.PObjectHelper(config)\r\n FieldStruct = ('DonorId','DonorName','AddressStreet','AddressKelurahan',\r\n 'AddressSubDistrict','AddressCity','AddressProvince','AddressPostalCode',\r\n 'PhoneNumber','MobileNumber','Email','Fax','ReferenceBy')\r\n ChildFields = {\r\n 'IndividualDonor':('Gender','BirthPlace','BirthDate','IdentityType',\r\n 'IdentityNumber','Religion','NPWP','MaritalState','Language',\r\n 'LastFormalEducation','FieldOfWork','IncomePerMonth','ExpensePerMonth'),\r\n 'CorporateDonor':('Corporation','NPWP','SIUPP',\r\n 'TDP','LocationType','OwnerType','EconomicSector')\r\n }\r\n if parameter.FirstRecord.SearchType == 'I' :\r\n Idx=0\r\n else :\r\n Idx=8\r\n\r\n param = '%s = \\'%s\\' ' % \\\r\n (FieldStruct[Idx],parameter.FirstRecord.Data)\r\n\r\n #loop sql mengisi record\r\n strSQL = 'select %s from Donor where %s ' %(parameter.FirstRecord.IDName,param)\r\n \r\n resSQL = config.CreateSQL(strSQL).RawResult\r\n resSQL.First()\r\n LsVal = ()\r\n Type = ''\r\n if not resSQL.Eof :\r\n Obj = config.CreatePObjImplProxy(parameter.FirstRecord.ClassName)\r\n Obj.Key = resSQL.GetFieldValue(parameter.FirstRecord.IDName)\r\n Obj = Obj.CastToLowestDescendant()\r\n FieldStruct += ChildFields[Obj.ClassName]\r\n Type = Obj.DonorType\r\n for field in FieldStruct :\r\n LsVal += (Obj.GetFieldByName(field),)\r\n \r\n returns.CreateValues(['uip','uipData'+Type],['Struct',str(FieldStruct)],['Values',str(LsVal)])\r\n\r\n return 1\r\n\r\ndef GetHistData(config, parameter, returns) :\r\n FieldStruct = ('TransactionItemId','TransactionDate','TransactionCode','BranchCode',\r\n 'MutationType','Amount','CurrencyCode','Rate','EkuivalenAmount')\r\n \r\n helper = phelper.PObjectHelper(config)\r\n #oParameterGlobal = helper.GetObject('ParameterGlobal', 'FIN_PART')\r\n oParameterGlobal = config.CreatePObjImplProxy('ParameterGlobal')\r\n oParameterGlobal.key = 'HIST_COUNT'\r\n #FIN = int(oParameterGlobal.Nilai_Parameter)\r\n FIN = 100\r\n \r\n returns.CreateValues(['FIN',FIN],['uip','uipTrans'],['Struct',str(FieldStruct)])\r\n returnpacket.AddDataPacketStructureEx('uipResult','Values:string')\r\n returnpacket.BuildAllStructure()\r\n dsResult = returnpacket.AddNewDataset('uipResult')\r\n \r\n s = ' \\\r\n SELECT FROM AccountTransactionItem \\\r\n [ \\\r\n Nomor_Rekening = :Nomor_Rekening and \\\r\n Tanggal_Transaksi >= :Tanggal_Awal and \\\r\n Tanggal_Transaksi < :Tanggal_Akhir \\\r\n ] \\\r\n ( \\\r\n Id_Detil_Transaksi, \\\r\n Tanggal_Transaksi, \\\r\n Id_Parameter_Transaksi, \\\r\n Kode_Jurnal, \\\r\n Keterangan, \\\r\n Jenis_Mutasi, \\\r\n Saldo_Awal, \\\r\n Nilai_Mutasi, \\\r\n LHistTransaksi.Nomor_Referensi, \\\r\n LHistTransaksi.LBatchTransaksi.Nomor_Batch,\\\r\n LHistTransaksi.Tanggal_Input,\\\r\n Self \\\r\n ) \\\r\n THEN ORDER BY ASC , ASC Id_Detil_Transaksi;'\r\n \r\n strSQL = 'select '\r\n for Field in FieldStruc :\r\n strSQL += Field + ','\r\n strSQL = strSQL[:-1] + ' from TransactionItem where %s ' %param\r\n\r\n resSQL = config.CreateSQL(strSQL).RawResult\r\n resSQL.First()\r\n\r\n while not resSQL.Eof :\r\n rec = dsResult.AddRecord()\r\n LsVal = ()\r\n for field in FieldStruc :\r\n LsVal += (resSQL.GetFieldValue(field),)\r\n rec.Values = str(LsVal)\r\n resSQL.Next()\r\n\r\n return 1\r\n","repo_name":"mech4/PKTrx","sub_path":"dialogs/Donatur/fPeragaanDonatur_data.py","file_name":"fPeragaanDonatur_data.py","file_ext":"py","file_size_in_byte":4855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"35291266039","text":"from ..common_imports import *\nfrom .weaver import *\nfrom ..core.model import Ref, FuncOp\nfrom ..core.utils import Hashing\nfrom ..core.config import dump_output_name, parse_output_idx\n\n\nclass CallStruct:\n def __init__(self, func_op: FuncOp, inputs: Dict[str, Ref], outputs: List[Ref]):\n self.func_op = func_op\n self.inputs = inputs\n self.outputs = outputs\n\n\nclass Workflow:\n \"\"\"\n An intermediate representation of a collection of calls, possibly not all of\n which have been executed, following a particular computational graph.\n\n Used to:\n - represent work to be done in a `batch` context as a data structure\n - encode an entire workflow for e.g. testing scenarios that simulate\n real-world workloads\n \"\"\"\n\n def __init__(self):\n ### encoding the shape\n # in topological order\n self.var_nodes: List[ValNode] = []\n # note that there may be many var nodes with the same causal hash\n self.var_node_to_causal_hash: Dict[ValNode, str] = {}\n # in topological order\n self.op_nodes: List[CallNode] = []\n # self.causal_hash_to_op_node: Dict[str, FuncQuery] = {}\n self.op_node_to_causal_hash: Dict[CallNode, str] = {}\n ### encoding instance data\n # multiple refs may map to the same query node\n self.value_to_var: Dict[Ref, ValNode] = {}\n # for a given op node, there may be multiple call structs\n self.op_node_to_call_structs: Dict[CallNode, List[CallStruct]] = {}\n\n def check_invariants(self):\n assert set(self.var_node_to_causal_hash.keys()) == set(self.var_nodes)\n assert set(self.op_node_to_causal_hash.keys()) == set(self.op_nodes)\n assert set(self.op_node_to_call_structs.keys()) == set(self.op_nodes)\n assert set(self.value_to_var.values()) <= set(self.var_nodes)\n for op_node in self.op_nodes:\n for call_struct in self.op_node_to_call_structs[op_node]:\n input_locations = {\n k: self.value_to_var[v] for k, v in call_struct.inputs.items()\n }\n assert input_locations == op_node.inputs\n output_locations = {\n dump_output_name(i): self.value_to_var[v]\n for i, v in enumerate(call_struct.outputs)\n }\n assert output_locations == op_node.outputs\n\n def get_default_hash(self) -> str:\n return Hashing.get_content_hash(obj=\"null\")\n\n @property\n def callable_op_nodes(self) -> List[CallNode]:\n # return op nodes that have non-empty inputs\n res = []\n var_to_values = self.var_to_values()\n for op_node in self.op_nodes:\n if all([len(var_to_values[var]) > 0 for var in op_node.inputs.values()]):\n res.append(op_node)\n return res\n\n @property\n def inputs(self) -> List[ValNode]:\n # return [var for var in self.var_nodes if var.creator is None]\n return [var for var in self.var_nodes if len(var.creators) == 0]\n\n def var_to_values(self) -> Dict[ValNode, List[Ref]]:\n res = defaultdict(list)\n for value, var in self.value_to_var.items():\n res[var].append(value)\n return res\n\n def add_var(self, val_query: Optional[ValNode] = None) -> ValNode:\n res = (\n val_query\n if val_query is not None\n # else ValQuery(creator=None, created_as=None)\n else ValNode(creators=[], created_as=[], constraint=None, tp=AnyType())\n )\n # if res.creator is None:\n if len(res.creators) == 0:\n causal_hash = self.get_default_hash()\n else:\n # creator_hash = self.op_node_to_causal_hash[res.creator]\n creator_hash = self.op_node_to_causal_hash[res.creators[0]]\n # causal_hash = Hashing.get_content_hash(obj=[creator_hash,\n # res.created_as])\n causal_hash = Hashing.get_content_hash(\n obj=[creator_hash, res.created_as[0]]\n )\n self.var_nodes.append(res)\n self.var_node_to_causal_hash[res] = causal_hash\n return res\n\n def get_op_hash(\n self,\n func_op: FuncOp,\n node_inputs: Optional[Dict[str, ValNode]] = None,\n val_inputs: Optional[Dict[str, Ref]] = None,\n ) -> str:\n assert (node_inputs is None) != (val_inputs is None)\n if val_inputs is not None:\n node_inputs = {\n name: self.value_to_var[val] for name, val in val_inputs.items()\n }\n assert node_inputs is not None\n input_causal_hashes = {\n name: self.var_node_to_causal_hash[val] for name, val in node_inputs.items()\n }\n input_causal_hashes = {\n k: v for k, v in input_causal_hashes.items() if v != self.get_default_hash()\n }\n input_causal_hashes = sorted(input_causal_hashes.items())\n op_representation = [\n input_causal_hashes,\n func_op.sig.versioned_internal_name,\n ]\n causal_hash = Hashing.get_content_hash(obj=op_representation)\n return causal_hash\n\n def add_op(\n self,\n inputs: Dict[str, ValNode],\n func_op: FuncOp,\n ) -> Tuple[CallNode, Dict[str, ValNode]]:\n # TODO: refactor the `FuncQuery` creation here\n res = CallNode(inputs=inputs, func_op=func_op, outputs={}, constraint=None)\n causal_hash = self.get_op_hash(node_inputs=inputs, func_op=func_op)\n self.op_nodes.append(res)\n self.op_node_to_causal_hash[res] = causal_hash\n # create outputs\n outputs = {}\n for i in range(func_op.sig.n_outputs):\n # output = self.add_var(val_query=ValQuery(creator=res,\n # created_as=i))\n output_name = dump_output_name(index=i)\n output = self.add_var(\n val_query=ValNode(\n creators=[res],\n created_as=[output_name],\n constraint=None,\n tp=AnyType(),\n )\n )\n outputs[output_name] = output\n # assign outputs to op\n res.set_outputs(outputs=outputs)\n self.op_node_to_call_structs[res] = []\n return res, outputs\n\n def add_value(self, value: Ref, var: ValNode):\n assert var in self.var_nodes\n self.value_to_var[value] = var\n\n def add_call_struct(self, call_struct: CallStruct):\n # process inputs\n func_op, inputs, outputs = (\n call_struct.func_op,\n call_struct.inputs,\n call_struct.outputs,\n )\n if any([inp not in self.value_to_var.keys() for inp in inputs.values()]):\n raise NotImplementedError()\n op_hash = self.get_op_hash(func_op=func_op, val_inputs=inputs)\n if op_hash not in self.op_node_to_causal_hash.values():\n # create op\n op_node, output_nodes = self.add_op(\n inputs={name: self.value_to_var[inp] for name, inp in inputs.items()},\n func_op=func_op,\n )\n else:\n candidates = [\n op_node\n for op_node in self.op_nodes\n if self.op_node_to_causal_hash[op_node] == op_hash\n and op_node.inputs\n == {name: self.value_to_var[inp] for name, inp in inputs.items()}\n ]\n op_node = candidates[0]\n output_nodes = op_node.outputs\n # process outputs\n outputs_dict = {dump_output_name(i): output for i, output in enumerate(outputs)}\n for k in outputs_dict.keys():\n self.value_to_var[outputs_dict[k]] = output_nodes[k]\n self.op_node_to_call_structs[op_node].append(call_struct)\n\n ############################################################################\n ###\n ############################################################################\n @staticmethod\n def from_call_structs(call_structs: List[CallStruct]) -> \"Workflow\":\n \"\"\"\n Assumes calls are given in topological order\n \"\"\"\n res = Workflow()\n input_var = res.add_var()\n for call_struct in call_structs:\n inputs = call_struct.inputs\n for inp in inputs.values():\n if inp not in res.value_to_var.keys():\n res.add_value(value=inp, var=input_var)\n res.add_call_struct(call_struct)\n return res\n\n @staticmethod\n def from_traversal(\n vqs: List[ValNode],\n ) -> Tuple[\"Workflow\", Dict[ValNode, ValNode]]:\n vqs, fqs = traverse_all(vqs, direction=\"backward\")\n vqs_topsort = reversed(vqs)\n fqs_topsort = reversed(fqs)\n # input_vqs = [vq for vq in vqs_topsort if vq.creator is None]\n input_vqs = [vq for vq in vqs_topsort if len(vq.creators) == 0]\n res = Workflow()\n vq_to_new_vq = {}\n for vq in input_vqs:\n new_vq = res.add_var(val_query=vq)\n vq_to_new_vq[vq] = new_vq\n for fq in fqs_topsort:\n new_inputs = {name: vq_to_new_vq[vq] for name, vq in fq.inputs.items()}\n new_fq, new_outputs = res.add_op(inputs=new_inputs, func_op=fq.func_op)\n for k in new_outputs.keys():\n vq, new_vq = fq.outputs[k], new_outputs[k]\n vq_to_new_vq[vq] = new_vq\n return res, vq_to_new_vq\n\n ############################################################################\n ###\n ############################################################################\n @property\n def empty(self) -> bool:\n return len(self.value_to_var) == 0\n\n @property\n def shape_size(self) -> int:\n return len(self.op_nodes) + len(self.var_nodes)\n\n @property\n def num_calls(self) -> int:\n return sum(\n [\n len(call_structs)\n for call_structs in self.op_node_to_call_structs.values()\n ]\n )\n\n @property\n def is_saturated(self) -> bool:\n var_to_values = self.var_to_values()\n return all([len(var_to_values[var]) > 0 for var in self.var_nodes])\n\n @property\n def has_delayed(self) -> bool:\n return any([value.is_delayed() for value in self.value_to_var.keys()])\n\n def print_shape(self):\n var_names = {var: f\"var_{i}\" for i, var in enumerate(self.var_nodes)}\n for var in self.inputs:\n print(f\"{var_names[var]} = Q()\")\n for op_node in self.op_nodes:\n numbered_outputs = {\n parse_output_idx(k): v for k, v in op_node.outputs.items()\n }\n outputs_list = [numbered_outputs[i] for i in range(len(numbered_outputs))]\n lhs = \", \".join([var_names[var] for var in outputs_list])\n print(\n f\"{lhs} = {op_node.func_op.sig.ui_name}(\"\n + \", \".join(\n [f\"{name}={var_names[var]}\" for name, var in op_node.inputs.items()]\n )\n + \")\"\n )\n\n\nclass History:\n def __init__(self, workflow: Workflow, node: ValNode):\n self.workflow = workflow\n self.node = node\n","repo_name":"amakelov/mandala","sub_path":"mandala/queries/workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":11153,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"76"}
+{"seq_id":"3888518083","text":"import re\nfrom nltk.corpus import stopwords\n\n# identify which sentences may have a company in it\ndef identify_potential_sentence(sentence):\n matches = re.findall(r\"(?:(?:[A-Z]+[a-z]*) ?)+\", sentence)\n if matches:\n return matches\n else:\n return False\n\n# removes the stop words from a found match\ndef remove_stop_words(word):\n stop_words = set(stopwords.words('english'))\n cleaned_word = ' '.join([x for x in word.split(' ') if x.lower() not in stop_words]).rstrip()\n return cleaned_word\n\ndef find_potential_companies(list_of_sentences):\n potential_matches = []\n for sentence_index, sentence in enumerate(list_of_sentences):\n matches = identify_potential_sentence(sentence) \n if matches:\n # filter out the matches with annoying APBloomberg or AP stuff in it\n cleaned_matches = [remove_stop_words(x) for x in matches]\n remove_empty_words = [x for x in cleaned_matches if x != '']\n for match in remove_empty_words:\n potential_matches.append(match)\n return potential_matches","repo_name":"mattwparas/iems308qasystem","sub_path":"find_companies.py","file_name":"find_companies.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"33675057547","text":"from django.test import TestCase\nfrom polls.models import Product\n\nclass ProductTestCase(TestCase):\n def create_product(self, name, price, desciption, category):\n p=Product() \n p.name = name\n p.price =price\n p.desciption = desciption\n p.category= category\n return p\n\n def test_category_fail(self):\n w = self.create_product(name=\"computer system\", price=100, desciption=\"nothing\", category=\"shoes\")\n w.addProduct()\n self.assertFalse(Product.objects.filter(name=\"computer system\").exists())\n\n def test_category_fail2(self):\n p = Product()\n p.name = \"test_category\"\n p.category = \"test_category\"\n p= p.addProduct()\n self.assertEqual(p, None)\n\n\n \n\n \n","repo_name":"rondogency/HotPot","sub_path":"polls/test2/tests_product_category.py","file_name":"tests_product_category.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"72021419124","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n\n# For example, here's several helpful packages to load in \n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\n\n\nfrom subprocess import check_output\n\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\n\n\n# Any results you write to the current directory are saved as output.\nimport kagglegym\n\nimport numpy as np\n\nimport pandas as pd\n\nimport tensorflow as tf\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn import preprocessing as pp\n\n\n\nenv = kagglegym.make()\n\no = env.reset()\ncol = ['technical_20']\n\ntrain = o.train[col + ['id', 'timestamp', 'y']].copy(deep=True)\n\n\n\nim = pp.Imputer(strategy='median')\n\ntrain[col] = im.fit_transform(train[col])\n\nsX = pp.StandardScaler()\n\ntrain[col] = sX.fit_transform(train[col])\n\ntrain['b'] = 1\n\n\n\ny_min = train.y.min()\n\ny_max = train.y.max()\n\n\n\ndf_id = train[['id', 'timestamp']].groupby('id').agg([np.min])\n\ndf_id.reset_index(level=0, inplace=True)\n\ntrain = pd.merge(train, df_id, on='id', how='inner')\n\ntrain = train.rename(columns={train.columns[len(train.columns)-1]: 'min_ts'})\n\ntrain = train.loc[(train.min_ts > 1) & (train.yy_min)].copy(deep=True)\n\n\n\n\n\nfeatures = ['b']+col\n\nn = len(features)\n\n\n\nlearning_rate = 0.01\n\ntraining_epochs = 1000\n\ncost_history = np.empty(shape=[1],dtype=float)\n\n\n\nX = tf.placeholder(tf.float32,[None,n])\n\nY = tf.placeholder(tf.float32,[None,1])\n\nW = tf.Variable(tf.zeros([n,1]))\n\n\n\ninit = tf.global_variables_initializer()\n\n\n\ny_ = tf.matmul(X, W)\n\n\n\ncost = tf.add(tf.reduce_mean(tf.square(y_ - Y)), tf.reduce_mean(tf.square(W)))\n\ntraining_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n\n\nsess = tf.Session()\n\nsess.run(init)\n\n\n\nfor epoch in range(training_epochs):\n\n sess.run(training_step,feed_dict={X: train[features], Y: train[['y']].values})\nwhile True:\n\n o.features[col] = im.transform(o.features[col])\n\n o.features[col] = sX.transform(o.features[col])\n\n o.features['b'] = 1\n\n \n\n o.target.y = sess.run(y_, feed_dict={X:o.features[features]})\n\n o.target.y = np.clip(o.target.y, y_min, y_max)\n\n \n\n o, reward, done, info = env.step(o.target)\n\n if done:\n\n print(info)\n\n break\n\n if o.features.timestamp[0] % 100 == 0:\n\n print(reward)","repo_name":"aorursy/new-nb-7.2","sub_path":"tarobxl_the-power-of-less.py","file_name":"tarobxl_the-power-of-less.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"13930398301","text":"from __future__ import print_function\nimport unittest\nimport sys\nimport itertools\nimport string\nimport random\nimport threading\n\nfrom streamsx.topology.topology import *\nfrom streamsx.topology.tester import Tester\nimport streamsx.ec as ec\nimport streamsx.spl.op as op\n\nimport test_vers\n\nclass AddChannel(object):\n def __init__(self):\n pass\n\n def __call__(self, tuple):\n return tuple, self.channel\n\n def __enter__(self):\n self.channel = ec.channel(self)\n\n def __exit__(self, type, value, traceback):\n pass\n\nclass CheckSameChannel(object):\n def __init__(self, gv=None):\n self.seen = {}\n self.gv = gv\n\n def __call__(self, tuple):\n if self.gv is None:\n v = tuple[0]\n else:\n v = self.gv(tuple)\n if v in self.seen:\n if tuple[1] != self.seen[v]:\n return \"Different channels for \" + v + \" \" + (tuple[1], self.seen[v])\n else:\n self.seen[v] = tuple[1]\n return v\n\ndef stupid_hash(v):\n return hash(v+89)\n\ndef s2_hash(t):\n return hash(t['s2'])\n\n@unittest.skipIf(not test_vers.tester_supported() , \"Tester not supported\")\nclass TestUDP(unittest.TestCase):\n\n # Fake out subTest\n if sys.version_info.major == 2:\n def subTest(self, **args): return threading.Lock()\n\n def setUp(self):\n Tester.setup_standalone(self)\n\n def test_TopologyParallelRoundRobin(self):\n for width in (1,3):\n with self.subTest(width=width):\n topo = Topology(\"test_TopologyParallel\" + str(width))\n s = topo.source(range(17,142))\n s = s.parallel(width)\n s = s.map(lambda tuple : tuple + 19)\n s = s.end_parallel()\n\n tester = Tester(topo)\n tester.contents(s, range(36,161), ordered=width==1)\n tester.test(self.test_ctxtype, self.test_config)\n print(tester.result)\n\n def test_TopologyParallelHash(self):\n for width in (1,3):\n with self.subTest(width=width):\n topo = Topology(\"test_TopologyParallelHash\" + str(width))\n s = topo.source(range(17,142))\n s = s.parallel(width, routing=Routing.HASH_PARTITIONED)\n s = s.map(lambda tuple : tuple + 19)\n s = s.map(AddChannel())\n s = s.end_parallel()\n\n expected = []\n for v in range(17,142):\n expected.append((v+19, hash(v) % width))\n\n tester = Tester(topo)\n tester.contents(s, expected, ordered=width==1)\n tester.test(self.test_ctxtype, self.test_config)\n print(tester.result)\n\n def test_TopologyParallelHashFunction(self):\n for width in (1,7):\n with self.subTest(width=width):\n topo = Topology(\"test_TopologyParallelHashFunction\" + str(width))\n s = topo.source(range(17,142))\n s = s.parallel(width, Routing.HASH_PARTITIONED, stupid_hash)\n s = s.map(lambda tuple : tuple + 23)\n s = s.map(AddChannel())\n s = s.end_parallel()\n\n expected = []\n for v in range(17,142):\n expected.append((v+23, (hash(v) + 89) % width))\n\n tester = Tester(topo)\n tester.contents(s, expected, ordered=width==1)\n tester.test(self.test_ctxtype, self.test_config)\n print(tester.result)\n\n def test_StringHash(self):\n \"\"\"\n Test hashing works when the schema is tuple.\n \"\"\"\n raw = []\n for v in range(20):\n raw.append(''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(v)))\n data = []\n for v in range(7):\n data.extend(raw)\n random.shuffle(data)\n \n for width in (1,3):\n with self.subTest(width=width):\n topo = Topology(\"test_StringHash\" + str(width))\n s = topo.source(data)\n s = s.as_string()\n s = s.parallel(width, Routing.HASH_PARTITIONED)\n s = s.map(AddChannel())\n s = s.end_parallel()\n s = s.map(CheckSameChannel())\n\n tester = Tester(topo)\n tester.contents(s, data, ordered=width==1)\n tester.test(self.test_ctxtype, self.test_config)\n print(tester.result)\n\n def test_SPLHashFunc(self):\n \"\"\"\n Test hashing works when the schema is a general SPL one\n using an explicit hash function.\n \"\"\"\n raw = []\n for v in range(20):\n raw.append(''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(v)))\n data = []\n for v in range(7):\n data.extend(raw)\n random.shuffle(data)\n \n for width in (1,4):\n with self.subTest(width=width):\n topo = Topology(\"test_SPLHash\" + str(width))\n s = topo.source(data)\n s = s.as_string()\n f = op.Map('spl.relational::Functor', s,\n schema = 'tuple')\n f.s2 = f.output('string + \"_1234\"')\n s = f.stream\n s = s.parallel(width, Routing.HASH_PARTITIONED, s2_hash)\n s = s.map(AddChannel())\n s = s.end_parallel()\n s = s.map(CheckSameChannel(lambda t : t[0]['s2']))\n\n expected = []\n for v in data:\n expected.append(v + '_1234')\n\n tester = Tester(topo)\n tester.contents(s, expected, ordered=width==1)\n tester.test(self.test_ctxtype, self.test_config)\n print(tester.result)\n\n@unittest.skipIf(not test_vers.tester_supported() , \"Tester not supported\")\nclass TestDistributedUDP(TestUDP):\n def setUp(self):\n Tester.setup_distributed(self)\n\n@unittest.skipIf(not test_vers.tester_supported() , \"Tester not supported\")\nclass TestBluemixUDP(TestUDP):\n def setUp(self):\n Tester.setup_streaming_analytics(self, force_remote_build=True)\n\nclass TestUDPNoExec(unittest.TestCase):\n def test_no_default_hash(self):\n topo = Topology('test_SPLBeaconFilter')\n s = op.Source(topo, \"spl.utility::Beacon\",\n 'tuple',\n params = {'period': 0.2, 'iterations':100})\n s.seq = s.output('IterationCount()')\n self.assertRaises(NotImplementedError, s.stream.parallel, 3, Routing.HASH_PARTITIONED)\n","repo_name":"wmarshall484/streamsx.topology","sub_path":"test/python/topology/test2_udp.py","file_name":"test2_udp.py","file_ext":"py","file_size_in_byte":6439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"}
+{"seq_id":"34963871383","text":"from email import message\nfrom turtle import title\n\nfirst_name = \"anger\"\nlast_name = \"liu\"\n# 在字符串中插入变量\n# f 表示 format ,用 f 格式化的字符串叫做 f-字符串\n# python 格式化变量值为 String 的格式 f\"{varialble}\"\nfull_name = f\"{first_name} {last_name}\"\nprint(full_name)\n# 可以在 f 字符串使用任意字符串拼接变量\n# full_name.title() 将变量调用title() 方法的值转换成字符串进行拼接\nprint(f\"Hello,{full_name.title()} !\")\n# 将 f 字符串赋值给变量\nmessage = f\"this is a message\".title(), {full_name}\nprint(message)\n","repo_name":"elevenanger/python-course","sub_path":"variables_and_simple_data_types/strings/full_name.py","file_name":"full_name.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"20760566610","text":"from dictionaryHandler import addSortedToFlashcardDictionary\nfrom utilities import backupFile, displayFlagReminders\nimport os\n\n\ndef mergeDictionaryInput(dictionary_path, dict_input_path,\n backup_dictionary, backup_dict_input,\n presort_dictionary):\n \"\"\" Merges dictionary input file with dictionary, ignoring repeated words and adding new words sorted and all lowercase. If presort_dictionary is set to False, requires and assumes that flashcard dictionary file is already sorted, and operates faster.\n \"\"\"\n\n displayFlagReminders(dont_move_images_reminder=False)\n\n if backup_dictionary:\n backupFile(dictionary_path, \"dictionary\")\n\n if backup_dict_input:\n backupFile(dict_input_path, \"dict_input\")\n\n if not os.path.exists(dict_input_path):\n print(\"Couldn't find dictionary input file \\\"{}\\\"\".format(dict_input_path))\n raise FileNotFoundError\n\n with open(dict_input_path, mode=\"r\", encoding=\"utf-8\") as input_file:\n input_list = input_file.readlines()\n input_list = list(filter(None, map(str.rstrip, map(str.lower, input_list))))\n\n\n addSortedToFlashcardDictionary(input_list, dictionary_path, presort_dictionary)\n","repo_name":"b-boechat/Ankinator","sub_path":"mergeDictionaryInput.py","file_name":"mergeDictionaryInput.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"6588581303","text":"\"\"\"\nIn LeetCode Store, there are some kinds of items to sell. Each item has a price.\n\nHowever, there are some special offers, and a special offer consists of one or more different kinds of items with a sale price.\n\nYou are given the each item's price, a set of special offers, and the number we need to buy for each item. The job is to output the lowest price you have to pay for exactly certain items as given, where you could make optimal use of the special offers.\n\nEach special offer is represented in the form of an array, the last number represents the price you need to pay for this special offer, other numbers represents how many specific items you could get if you buy this offer.\n\nYou could use any of special offers as many times as you want.\n\nExample 1:\n\nInput: [2,5], [[3,0,5],[1,2,10]], [3,2]\nOutput: 14\nExplanation:\nThere are two kinds of items, A and B. Their prices are $2 and $5 respectively.\nIn special offer 1, you can pay $5 for 3A and 0B\nIn special offer 2, you can pay $10 for 1A and 2B.\nYou need to buy 3A and 2B, so you may pay $10 for 1A and 2B (special offer #2), and $4 for 2A.\nExample 2:\n\nInput: [2,3,4], [[1,1,0,4],[2,2,1,9]], [1,2,1]\nOutput: 11\nExplanation:\nThe price of A is $2, and $3 for B, $4 for C.\nYou may pay $4 for 1A and 1B, and $9 for 2A ,2B and 1C.\nYou need to buy 1A ,2B and 1C, so you may pay $4 for 1A and 1B (special offer #1), and $3 for 1B, $4 for 1C.\nYou cannot add more items, though only $9 for 2A ,2B and 1C.\nNote:\n\nThere are at most 6 kinds of items, 100 special offers.\nFor each item, you need to buy at most 6 of them.\nYou are not allowed to buy more items than you want, even if that would lower the overall price.\n\"\"\"\n\n\nclass Solution(object):\n def shoppingOffers(self, price, special, needs):\n \"\"\"\n :type price: List[int]\n :type special: List[List[int]]\n :type needs: List[int]\n :rtype: int\n \"\"\"\n \n self.res = float('inf')\n \n def _dfs(rest, cost):\n # buy original\n tmp_cost = 0\n for idx in range(len(rest)):\n tmp_cost += rest[idx] * price[idx]\n self.res = min(self.res, cost + tmp_cost)\n\n # take a special\n for plan in special:\n avl = True\n new_rest = [0] * len(rest)\n for idx in range(len(plan)-1):\n item_rest = rest[idx] - plan[idx]\n if item_rest < 0:\n avl = False\n break\n new_rest[idx] = item_rest\n if avl:\n _dfs(new_rest, cost + plan[-1])\n _dfs(needs, 0)\n return self.res\n\n\n\n","repo_name":"wangyunge/algorithmpractice","sub_path":"eet/Shopping_Offers.py","file_name":"Shopping_Offers.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"18472204991","text":"import numpy as n\r\nfrom scipy.integrate import simps\r\ndef FDHFQ(Q0prime,Actual_Multiplication_Factor,Burnup,Q0primedangerous,Le):\r\n Qprime = n.zeros(401)\r\n Qprimed = n.zeros(401)\r\n avgQprime = 0\r\n Len = n.linspace(0,Le,401)\r\n for v in range(401):\r\n Qprime[v] = Q0prime*Actual_Multiplication_Factor[v]\r\n avgQprime += Qprime[v]\r\n Qprimed[v] = Q0primedangerous*Actual_Multiplication_Factor[v]\r\n G = n.polyfit(Len[:],Qprime[:],2)\r\n H = n.polyfit(Len[:],Qprimed[:],2)\r\n AverageQprime = avgQprime / (v+1)\r\n FQ = Q0prime/AverageQprime\r\n FQdan = Q0primedangerous/AverageQprime\r\n UFr = 2.6/FQ #Uncertanty factor to make FQ the same as a nominal AP1000 reactor\r\n UFdr = 2.6/FQdan\r\n A = simps(G)\r\n B = simps(H)\r\n FDH = B/A\r\n return FQ,FQdan,FDH,G,UFr,UFdr,FQ,FQdan","repo_name":"notxesnayr/sub-channel","sub_path":"FDHFQ.py","file_name":"FDHFQ.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"74053649206","text":"from ray.rllib.algorithms.ppo import PPOConfig\nfrom ray.rllib.algorithms.sac import SACConfig\nfrom ray.rllib.algorithms.mbmpo import MBMPOConfig\nfrom ray.tune.logger import pretty_print\nfrom env.SimpleEnvironment import SimpleRobotEnviroment\nfrom env.SimpleEnvironment_condensed_obs import SimpleRobotEnviromentCO\nfrom env.SimpleEnvironment_waypoints import SimpleRobotEnvironmentWP\nimport numpy as np\nimport torch\nimport random\nfrom ray.rllib.algorithms.algorithm import Algorithm\nimport cv2\n\n# for the custom callback\nfrom typing import Dict\nfrom ray.rllib.env import BaseEnv\nfrom ray.rllib.policy import Policy\nfrom ray.rllib.evaluation import MultiAgentEpisode, RolloutWorker\nfrom ray.rllib.algorithms.callbacks import DefaultCallbacks\n\nSEED = 4096\n\nclass GoalCallbacks(DefaultCallbacks):\n\n def on_episode_end(self, worker: RolloutWorker, base_env: BaseEnv,\n policies: Dict[str, Policy], episode: MultiAgentEpisode,\n **kwargs):\n final_x = episode.last_observation_for()[0]\n final_y = episode.last_observation_for()[1]\n final_yaw = episode.last_observation_for()[2]\n goal_x = episode.last_observation_for()[3]\n goal_y = episode.last_observation_for()[4]\n goal_yaw = episode.last_observation_for()[5]\n success = episode.last_info_for()[\"Success\"]\n crash = episode.last_info_for()[\"Crash\"]\n\n episode.custom_metrics[\"final_distance\"] = np.linalg.norm(np.array([goal_x, goal_y]) - np.array([final_x,final_y]))\n episode.custom_metrics[\"final_angle_difference\"] = min(np.abs(goal_yaw - final_yaw), 2*np.pi - np.abs(goal_yaw - final_yaw))\n episode.custom_metrics[\"reached_goal\"] = success\n episode.custom_metrics[\"crash\"] = crash\n\n \nclass GoalCallbacksCO(DefaultCallbacks):\n\n def on_episode_end(self, worker: RolloutWorker, base_env: BaseEnv,\n policies: Dict[str, Policy], episode: MultiAgentEpisode,\n **kwargs):\n final_distance = episode.last_observation_for()[0]\n final_angle_diff = abs(episode.last_observation_for()[2])\n success = episode.last_info_for()[\"Success\"]\n crash = episode.last_info_for()[\"Crash\"]\n # goal_yaw = episode.last_observation_for()[3]\n # final_yaw = episode.last_observation_for()[2]\n # final_angle_diff = min(np.abs(goal_yaw - final_yaw), 2*np.pi - np.abs(goal_yaw - final_yaw))\n\n episode.custom_metrics[\"final_distance\"] = final_distance\n episode.custom_metrics[\"final_angle_difference\"] = final_angle_diff\n episode.custom_metrics[\"reached_goal\"] = success\n episode.custom_metrics[\"crash\"] = crash\n\ndef set_seeds(seed):\n torch.manual_seed(seed) # Sets seed for PyTorch RNG\n torch.cuda.manual_seed_all(seed) # Sets seeds of GPU RNG\n np.random.seed(seed=seed) # Set seed for NumPy RNG\n random.seed(seed)\n\ndef convert_images_to_video(rgb_images):\n\n x = len(rgb_images[0])\n y = len(rgb_images[0][0])\n size = (x,y)\n\n out = cv2.VideoWriter('working_solution.mp4',cv2.VideoWriter_fourcc(*'mp4v'),15, size)\n\n for i in range(len(rgb_images)):\n rgb_img = cv2.cvtColor(rgb_images[i], cv2.COLOR_RGB2BGR)\n out.write(rgb_img)\n out.release()\n\n\nif __name__ == '__main__':\n\n # algo = (\n # PPOConfig()\n # # .training(lr=1e-4)\n # # .training(model={'use_lstm':True})\n # # .training(train_batch_size=60000, sgd_minibatch_size=4096)\n # # Increase horizon from 200 to 400 as robot was ending before reaching goal\n # .rollouts(num_rollout_workers=1,horizon=600)\n # .resources(num_gpus=0)\n # .environment(SimpleRobotEnviroment, env_config={\"render_mode\":\"rgb_array\"})\n # .callbacks(GoalCallbacks)\n # Seed for reproducibility and statistical significance\n # .debugging(seed=SEED)\n # .build()\n # )\n\n # algo = (\n # PPOConfig()\n # # .training(lr=1e-4)\n # # .training(model={'use_lstm':True})\n # # .training(train_batch_size=60000, sgd_minibatch_size=4096)\n # # Increase horizon from 200 to 400 as robot was ending before reaching goal\n # .rollouts(num_rollout_workers=1,horizon=600)\n # .resources(num_gpus=0)\n # .environment(SimpleRobotEnviromentCO, env_config={\"render_mode\":\"rgb_array\"})\n # .callbacks(GoalCallbacksCO)\n # Seed for reproducibility and statistical significance\n # .debugging(seed=SEED)\n # .build()\n # )\n # print(algo.config.horizon)\n # print(\"m\")\n \n\n horizon_val = 600 \n # algo = (\n # SACConfig()\n # .rollouts(num_rollout_workers=8,horizon=horizon_val)\n # .resources(num_gpus=0)\n # .environment(SimpleRobotEnviromentCO, env_config={\"horizon\":horizon_val,\"render_mode\":\"rgb_array\"})\n # .callbacks(GoalCallbacksCO)\n # .framework(framework=\"torch\")\n # # Seed for reproducibility and statistical significance\n # .debugging(seed=SEED)\n # .build()\n # )\n\n \n # algo = (\n # SACConfig()\n # .rollouts(num_rollout_workers=8,horizon=horizon_val)\n # .resources(num_gpus=0)\n # .environment(SimpleRobotEnviroment, env_config={\"horizon\":horizon_val, \"render_mode\":\"rgb_array\"})\n # .callbacks(GoalCallbacks)\n # .framework(framework=\"torch\")\n # # Seed for reproducibility and statistical significance\n # # .debugging(seed=SEED)\n # .build()\n # )\n\n algo = (\n SACConfig()\n .rollouts(num_rollout_workers=8,horizon=horizon_val)\n .resources(num_gpus=0)\n .environment(SimpleRobotEnviromentCO, env_config={\"horizon\":horizon_val, \"render_mode\":\"rgb_array\"})\n .callbacks(GoalCallbacksCO)\n .framework(framework=\"torch\")\n # Seed for reproducibility and statistical significance\n # .debugging(seed=SEED)\n .build()\n )\n \n # For testing\n algo.restore(\"/Users/emilymorris/ray_results/SAC_SimpleRobotEnviromentCO_2023-01-11_11-48-18_3vmn6e3/checkpoint_003501\")\n\n # num_episodes = 6000\n # for i in range(num_episodes):\n # print(i)\n # result = algo.train()\n # # print(result[\"custom_metrics\"])\n # # print(pretty_print(result))\n\n # if i % 10 == 0 or i==num_episodes-1:\n # checkpoint_dir = algo.save()\n # print(f\"Checkpoint saved in directory {checkpoint_dir}\")\n\n # Set all our seeds for the environment\n # set_seeds(seed=SEED)\n\n # i = 0\n # while True:\n # print(i)\n # result = algo.train()\n # # print(result[\"custom_metrics\"])\n # # print(pretty_print(result))\n\n # if i % 10 == 0:\n # checkpoint_dir = algo.save()\n # print(f\"Checkpoint saved in directory {checkpoint_dir}\")\n \n # i+=1\n\n \n env = SimpleRobotEnviromentCO(num_obstacles=6, init_distance=0.3)\n # env = SimpleRobotEnviroment(num_obstacles=1, init_distance=0.9)\n obs = env.reset()\n done = False\n print(obs)\n\n import matplotlib.pyplot as plt\n def displayImage(image):\n plt.imshow(image)\n plt.axis('off')\n plt.show()\n\n x = env.render()\n # displayImage(x)\n \n images = [x]\n for i in range(300):\n print(i)\n if not done:\n action = algo.compute_single_action(obs)\n print(\"Action: \", action)\n obs, reward, done, _ = env.step(action)\n print(\"ROBOT: \", env.robot.pose)\n print(\"Observation:\",obs)\n print(\"Reward: \", reward)\n print(\"Done\",done)\n images.append(env.render())\n else:\n print(\"Done\")\n\n\n # x = env.render()\n # displayImage(x)\n convert_images_to_video(images)\n\n ","repo_name":"emorris7/robot_navigation_rl","sub_path":"PathFinder.py","file_name":"PathFinder.py","file_ext":"py","file_size_in_byte":7756,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"}
+{"seq_id":"28621580368","text":"# # Globals that tell the script the layout of the csv files\n# from matplotlib import pyplot as plt\nimport internal.csv_utils as utils\nimport statistics\n\n\ndef export_number_revs(data, csv_name, no_compile_fail=False):\n # Export the number of revisions to a file\n\n # Clean the data\n cleaned_data = utils.clean_data(data, omit=['EmptyCommit', 'NoCoverage'])\n\n # Get the dates\n exit_statuses, dates = utils.get_columns(cleaned_data, ['exit', 'time'])\n\n # Count the number of revisions for each of the exit statuses\n num_revs = {'OK': 0, 'SomeTestFailed': 0, 'TimedOut': 0, 'compileError': 0, }\n for exit_status in exit_statuses:\n if exit_status not in num_revs:\n num_revs[exit_status] = 0\n num_revs[exit_status] += 1\n\n # Get the first and last dates\n first_date = dates[0]\n last_date = dates[-1]\n\n # Get the number of months between the first and last dates\n num_months = (last_date.year - first_date.year) * 12 + (last_date.month - first_date.month)\n\n # Append num_months with mo\n num_months = f'{num_months}mo'\n\n # Print the number of revisions and the number of days\n print(f'Number of revisions for {csv_name}: {num_revs}')\n print(f'Number of months for {csv_name}: {num_months}')\n\n set_to_analyze = ['OK', 'SomeTestFailed', 'TimedOut', 'compileError']\n if no_compile_fail:\n set_to_analyze.remove('compileError')\n\n # Transform the num_revs dict into a list of numbers\n status_numbers = ','.join(\n [str(num_revs[status]) for status in set_to_analyze])\n\n # Append the sum of the numbers to the end of the list apart from compileError\n status_numbers += f',{sum([num_revs[status] for status in [\"OK\", \"SomeTestFailed\", \"TimedOut\"]])}'\n\n # Construct a CSV row with format csv_name, status numbers, num_months\n csv_row = f'{csv_name},{status_numbers},{num_months}'\n\n return csv_row\n\ndef export_date_range(data, csv_name):\n # Clean the data\n cleaned_data = utils.clean_data(data, omit=['EmptyCommit', 'NoCoverage', 'compileError'])\n\n # Get the dates\n covlines, notcovlines, changed_test_files, dates = utils.get_columns(cleaned_data, ['covlines', 'notcovlines', 'changed_test_files', 'time'])\n\n # Get the indices of lines where covlines + notcovlines > 0 or changed_test_files > 0\n indices = [i for i in range(len(covlines)) if covlines[i] + notcovlines[i] > 0 or changed_test_files[i] > 0]\n\n # Get the first and last dates given the indices\n first_date = dates[indices[0]]\n last_date = dates[indices[-1]]\n\n # Write the dates as month(space)year with month as a string (like Apr)\n first_date = f'{first_date.strftime(\"%b\")} {first_date.year}'\n last_date = f'{last_date.strftime(\"%b\")} {last_date.year}'\n\n # Construct a CSV row with format csv_name, first_date, last_date\n csv_row = f'{csv_name},{first_date},{last_date}'\n\n return csv_row\n\n\ndef export_eloc_tloc(data, csv_name):\n # Export stats like ELOC and TLOC and language\n\n cleaned_data = utils.clean_data(data)\n\n # Get eloc, tloc, and language\n eloc_data, tloc_data = utils.get_columns(cleaned_data, ['eloc', 'testsize'])\n\n lang_map = {\n 'Binutils': ('C', 'DejaGNU'),\n 'Git': ('C', 'C/Perl'),\n 'Lighttpd2': ('C', 'Python'),\n 'Memcached': ('C', 'C/Perl'),\n 'Redis': ('C', 'Tcl'),\n 'ZeroMQ': ('C++', 'C++'),\n 'Apr': ('C', 'C'),\n 'Curl': ('C', 'Perl/Python'),\n 'Vim': ('C', 'Vim Script'),\n }\n\n # Convert all the map keys to lowercase\n lang_map = {k.lower(): v for k, v in lang_map.items()}\n\n # Get the language (first is code language, second is test language) finding if csv_name contains part of the key in lang_map\n try:\n lang = [lang_map[key][0] for key in lang_map if key in csv_name.lower()][0]\n test_lang = [lang_map[key][1] for key in lang_map if key in csv_name.lower()][0]\n except IndexError:\n print(f'Could not find language for {csv_name}')\n return None\n\n # Get the eloc and tloc - from the last row\n eloc = eloc_data[-1]\n tloc = tloc_data[-1]\n\n # Construct a CSV row with format csv_name, lang, eloc, test_lang, tloc\n csv_row = f'{csv_name},{lang},\"{eloc:,}\",{test_lang},\"{tloc:,}\"'\n\n return csv_row\n\n\ndef export_delta_eloc_tloc(data, csv_name):\n cleaned_data = utils.clean_data(data)\n\n # Get eloc, tloc, and language\n revs, eloc_data, tloc_data = utils.get_columns(cleaned_data, ['rev', 'eloc', 'testsize'])\n\n if csv_name == 'Lighttpd2':\n # Get index of revision 21d9d5e\n index = revs.index('21d9d5e')\n # Now filter revisions, eloc_data, and coverage to only include revisions after (and including) 21d9d5e\n revisions = revs[index:]\n eloc_data = eloc_data[index:]\n tloc_data = tloc_data[index:]\n\n # Get the last eloc and tloc\n eloc = eloc_data[-1]\n tloc = tloc_data[-1]\n\n # Get the first eloc and tloc\n first_eloc = eloc_data[0]\n first_tloc = tloc_data[0]\n\n # Calculate the delta eloc and tloc\n delta_eloc = eloc - first_eloc\n delta_tloc = tloc - first_tloc\n\n # Calculate the delta eloc and tloc as a percentage of the first eloc and tloc\n delta_eloc_percent = delta_eloc / first_eloc * 100\n delta_tloc_percent = delta_tloc / first_tloc * 100\n\n # Construct a CSV row with format csv_name, final eloc, delta eloc, final tloc, delta tloc\n # csv_row = f'{csv_name},\"{eloc:,}\",\"{delta_eloc:,}\",\"{tloc:,}\",\"{delta_tloc:,}\"'\n\n # Construct a CSV row with format csv_name, delta eloc, delta eloc percent, delta tloc, delta tloc percent\n csv_row = f'{csv_name},\"{delta_eloc:,}\",+{delta_eloc_percent:.1f}%,\"{delta_tloc:,}\",+{delta_tloc_percent:.1f}%'\n\n return csv_row\n\n\ndef export_code_coverage(data, csv_name):\n # Export the final percentage code coverage\n\n cleaned_data = utils.clean_data(data)\n\n # Get the eloc and coverage\n eloc_data, coverage_data, covlines, notcovlines, patch_coverage = utils.get_columns(cleaned_data, ['eloc', 'coverage', 'covlines', 'notcovlines', 'patchcoverage'])\n\n # Calculate the percentage code coverage which is the last coverage divided by the last eloc multiplied by 100\n percent_coverage = coverage_data[-1] / eloc_data[-1] * 100\n\n # Get all indices where covlines + notcovlines is not 0\n nonzero_indices = [i for i in range(len(covlines)) if covlines[i] + notcovlines[i] != 0]\n\n lines_modified = [covlines[i] + notcovlines[i] for i in nonzero_indices]\n\n patch_coverage = [patch_coverage[i] for i in nonzero_indices]\n\n # Normalize the lines modified into line weights (so that the sum of all line weights is 1)\n line_weights = [lines_modified[i] / sum(lines_modified) for i in range(len(lines_modified))]\n\n weighted_patch_coverage = [patch_coverage[i] * line_weights[i] for i in range(len(patch_coverage))]\n\n avg_weighted_patch_coverage = sum(weighted_patch_coverage)\n\n # Calculate the average patch coverage\n avg_patch_coverage = sum(patch_coverage) / len(patch_coverage)\n\n # Construct a CSV row with format csv_name, percent coverage\n csv_row = f'{csv_name},{percent_coverage:.1f}%,{avg_patch_coverage:.1f}%,{avg_weighted_patch_coverage:.1f}%'\n\n return csv_row\n\n\ndef export_lines_hunks_files(data, csv_name):\n # Export stats like lines, hunks, and files\n\n cleaned_data = utils.clean_data(data)\n\n # Get the lines, hunks, and files\n cov_lines, not_cov_lines, hunks, files = utils.get_columns(cleaned_data,\n ['covlines', 'notcovlines', 'ehunks3', 'echanged_files'])\n\n # # Get the differences between consecutive elocs\n # eloc_diffs = [abs(eloc[i] - eloc[i - 1]) for i in range(1, len(eloc))]\n #\n # # Set the first eloc_diff to 0\n # eloc_diffs = [0] + eloc_diffs\n\n # sum the covlines and notcovlines to get the total lines of code\n lines = [cov_lines[i] + not_cov_lines[i] for i in range(len(cov_lines))]\n\n # # Limit to the first 250 revisions\n # lines = lines[:250]\n # hunks = hunks[:250]\n # files = files[:250]\n\n # Find indices of rows where either of cov_lines and not_cov_lines are nonzero\n # (can do covlines + notcovlines > 0 since they are always positive)\n nonzero_indices = [i for i in range(len(lines)) if cov_lines[i] + not_cov_lines[i] > 0]\n\n # Filter lines and hunks to only include nonzero indices\n lines = [lines[i] for i in nonzero_indices]\n hunks = [hunks[i] for i in nonzero_indices]\n files = [files[i] for i in nonzero_indices]\n\n # Get the median of the lines, hunks, and files\n lines = int(statistics.median(lines))\n hunks = int(statistics.median(hunks))\n files = int(statistics.median(files))\n\n # # Get the mean of the lines, hunks, and files\n # lines = int(statistics.mean(lines))\n # hunks = int(statistics.mean(hunks))\n # files = int(statistics.mean(files))\n\n # Construct a CSV row with format csv_name, lines, hunks, files\n csv_row = f'{csv_name},{lines},{hunks},{files}'\n\n return csv_row\n\n\ndef export_bucketed_patch_coverage(data, csv_name):\n # Clean the data\n cleaned_data = utils.clean_data(data)\n\n # Get the coverage data, eloc and echanged_files\n eloc_data, coveredlines, notcoveredlines = utils.get_columns(cleaned_data, ['eloc', 'covlines', 'notcovlines'])\n\n eloc_diffs = [coveredlines[i] + notcoveredlines[i] for i in range(len(coveredlines))]\n\n nonzero_indices = []\n for i in range(len(eloc_data)):\n if eloc_data[i] > 0:\n if coveredlines[i] + notcoveredlines[i] > 0:\n nonzero_indices.append(i)\n\n eloc_diffs = [eloc_diffs[i] for i in nonzero_indices]\n coveredlines = [coveredlines[i] for i in nonzero_indices]\n\n bins = [10, 100, 1000, float('inf')]\n\n bucketed_cov_perc_data = [0] * len(bins)\n total_covered = [0] * len(bins)\n total_total = [0] * len(bins)\n\n for i in range(len(eloc_diffs)):\n for j in range(len(bins)):\n if eloc_diffs[i] <= bins[j]:\n bucketed_cov_perc_data[j] += 1\n total_covered[j] += coveredlines[i]\n total_total[j] += eloc_diffs[i]\n break\n\n # Get the average coverage percentages\n bucketed_cov_perc_data_av = [total_covered[i] * 100 / total_total[i] if total_total[i] != 0 else 0 for i in range(len(total_covered))]\n\n # Also replace any 0s in bucketed_cov_perc_data with -\n csv_data = [csv_name] + [f'{data},{av:.1f}%' if av != 0 else f'{data},-' for data, av in\n zip(bucketed_cov_perc_data, bucketed_cov_perc_data_av)]\n csv_row = ','.join(csv_data)\n\n return csv_row\n\n\ndef export_non_det_revisions(data, csv_name):\n # Clean the data (not removing all OK rows since some repos return OK but have different return values under the\n # hood which make for some interesting results)\n cleaned_data = utils.clean_data(data, omit=['EmptyCommit', 'NoCoverage', 'compileError', 'TimedOut'])\n\n # Get the commit hash, repeats, and non_det columns\n commit_hash, repeats, non_det = utils.get_columns(cleaned_data, ['rev', 'repeats', 'non_det'])\n\n # Get the indices of the non_det revisions\n non_det_indices = [i for i in range(len(non_det)) if non_det[i] == 'True']\n\n # Get the commit hashes of the non_det revisions\n non_det_commit_hashes = [commit_hash[i] for i in non_det_indices]\n\n # Get the number of repeats of the non_det revisions\n non_det_repeats = [repeats[i] for i in non_det_indices][0]\n\n # Get the number of non_det revisions\n num_non_det_revs = len(non_det_commit_hashes)\n\n # Get the number of non_det revisions as a percentage of the total number of revisions\n num_non_det_revs_perc = num_non_det_revs / len(commit_hash) * 100\n\n # Construct a csv row with the format csv_name, num_non_det_revs, num_not_det_revs_perc, non_det_repeats, non_det_commit_hashes*\n csv_row = f'{csv_name},{num_non_det_revs},{num_non_det_revs_perc:.1f},{non_det_repeats},\\\"[{\",\".join(non_det_commit_hashes)}]\\\"'\n\n return csv_row\n\n\ndef export_coverage_delta(data, csv_name):\n # Assumes the last test failure registered doesn't massively affect the coverage\n cleaned_data = utils.clean_data(data,\n omit=['EmptyCommit', 'NoCoverage', 'compileError', 'TimedOut'])\n\n # Get the coverage data and eloc\n revisions, eloc_data, coverage = utils.get_columns(cleaned_data, ['rev','eloc', 'coverage'])\n\n # Filtering out initial bug that cause coverage to be 2% from a bug whereas it should be at least 34% for Lighttpd2.\n if csv_name == 'Lighttpd2':\n # Get index of revision 21d9d5e\n index = revisions.index('21d9d5e')\n # Now filter revisions, eloc_data, and coverage to only include revisions after (and including) 21d9d5e\n revisions = revisions[index:]\n eloc_data = eloc_data[index:]\n coverage = coverage[index:]\n\n\n # Calculate the coverage at the start and end of the project\n start_coverage = coverage[0] / eloc_data[0] * 100\n end_coverage = coverage[-1] / eloc_data[-1] * 100\n\n # Calculate the delta in coverage\n delta_coverage = end_coverage - start_coverage\n\n # Calculate the percentage delta in coverage\n delta_coverage_perc = delta_coverage / start_coverage * 100\n\n # If delta_coverage and delta_coverage_perc are positive, add a + to the start of the string\n\n # Construct a csv row with the format csv_name, start_coverage, end_coverage, delta_coverage, delta_coverage_perc\n csv_row = f'{csv_name},{start_coverage:.1f},{end_coverage:.1f},{delta_coverage_perc:+.1f}%'\n\n return csv_row\n\n\ndef write_stats(paths, csv_names, limit=None):\n write_multiple_csv(export_number_revs, paths, csv_names, ['App', 'OK', 'TF', 'TO', 'CF', 'Total Working', 'Time'],\n 'num_revs_all', limit=limit, no_filter=True)\n write_multiple_csv(export_number_revs, paths, csv_names, ['App', 'OK', 'TF', 'TO', 'MCT', 'Time'],\n 'num_revs_mct', limit=limit, no_compile_fail=True)\n write_multiple_csv(export_date_range, paths, csv_names, ['App', 'Start Date', 'End Date'], 'date_range',\n limit=limit)\n write_multiple_csv(export_eloc_tloc, paths, csv_names, ['App', 'Lang.', 'ELOC', 'Lang.', 'TLOC'], 'eloc_tloc',\n limit=limit)\n write_multiple_csv(export_delta_eloc_tloc, paths, csv_names, ['App', 'ΔELOC', 'ΔELOC%', 'ΔTLOC', 'ΔTLOC%'],\n 'delta_eloc_tloc', limit=limit)\n write_multiple_csv(export_lines_hunks_files, paths, csv_names, ['App', 'Lines', 'Hunks', 'Files'],\n 'lines_hunks_files', limit=limit)\n write_multiple_csv(export_bucketed_patch_coverage, paths, csv_names,\n ['App', '<= 10 NP', '<= 10 C', '11-100 NP', '11-100 C', '101-1000 NP', '101-1000 C', '> 1000 NP', '> 1000 C'],\n 'bucketed_patch_coverage', limit=limit)\n write_multiple_csv(export_code_coverage, paths, csv_names, ['App', 'Final Cov. %', 'Avg. Patch Cov. %', 'Line-Weighted A.P.C. %'], 'code_coverage', limit=limit)\n write_multiple_csv(export_coverage_delta, paths, csv_names, ['App', 'Start Cov. %', 'End Cov. %', 'Cov. % Δ'],\n 'coverage_delta', limit=limit)\n\n paths, csv_names = utils.filter_to_non_det_supported(paths, csv_names)\n\n write_multiple_csv(export_non_det_revisions, paths, csv_names,\n ['App', 'Nondet. Result', '% Total Working Flaky', 'Repeats', 'Nondet. Commits'], 'non_det_revs')\n\n\ndef write_multiple_csv(func, paths, csv_names, header, name, limit=None, no_filter=False, **kwargs):\n # Run a function on multiple CSV files\n rows = []\n for i in range(len(csv_names)):\n csv_data = utils.extract_data(f'{paths[i]}', csv_names[i])\n if csv_data is None:\n continue\n if not no_filter:\n # Filter the data to only include revisions that modify executable code or test files\n csv_data, _ = utils.filter_data_by_exec_test(csv_data)\n if limit is not None:\n csv_data = csv_data[-limit:]\n res = func(csv_data, csv_names[i], **kwargs)\n if res is not None:\n rows.append(res)\n\n # Convert header to a CSV row\n header = ','.join(header)\n\n write_csv(f'stats/{name}.csv', header, rows)\n\n # Print wrote to file\n print(f'Wrote to stats/{name}.csv')\n\n\ndef write_csv(csv_name, header, rows):\n # Write the rows to the csv\n with open(csv_name, 'w') as f:\n # Write the header\n f.write(header + '\\n')\n\n # Write the rows\n for row in rows:\n f.write(row + '\\n')\n\n return\n\n\nif __name__ == '__main__':\n import os\n import argparse\n import glob\n\n # argparse the location of the input file (e.g. remotedata/apr/Apr.csv)\n parser = argparse.ArgumentParser()\n # argparse for either an input file or a directory\n parser.add_argument('input', help='The input file or directory to process')\n # add a directory option so if --dir is present, the input is a directory, otherwise it is a file\n parser.add_argument('--dir', action='store_true',\n help='The input is a directory (dir/repo1/*.csv, dir/repo2/*.csv)')\n # add a limit option to limit the number of revisions to process\n parser.add_argument('--limit', type=int, help='The number of revisions to process')\n\n args = parser.parse_args()\n\n print(args.limit)\n\n # Make a stats directory if it doesn't exist\n if not os.path.isdir('stats'):\n os.mkdir('stats')\n\n if args.dir:\n # Make sure the input is a directory\n if not os.path.isdir(args.input):\n raise NotADirectoryError(f'{args.input} is not a directory')\n\n # Get the names of the CSV files (basenames)\n paths = glob.glob(f'{args.input}/*/*.csv')\n\n # Add to paths a level up\n if len(paths) == 0:\n paths += glob.glob(f'{args.input}/*.csv')\n\n included_paths = ['remotedata/apr/Apr_repeats.csv',\n 'remotedata/binutils-gdb/BinutilsGdb_repeats.csv',\n 'remotedata/curl/Curl_repeats.csv',\n 'remotedata/git/Git_repeats.csv',\n 'remotedata/lighttpd2/Lighttpd2_repeats.csv',\n 'remotedata/memcached/Memcached_repeats.csv',\n 'remotedata/redis/Redis_repeats.csv',\n 'remotedata/vim/Vim_repeats.csv',\n 'remotedata/zeromq/Zeromq_repeats.csv']\n\n # Get indices of all paths that contain the word 'diffcov'\n diffcov_indices = [i for i in range(len(paths)) if 'diffcov' in paths[i]]\n # Remove all paths that contain the word 'diffcov'\n paths = [paths[i] for i in range(len(paths)) if i not in diffcov_indices]\n\n # Make sure we have at least one CSV file\n if len(paths) == 0:\n raise FileNotFoundError(f'No CSV files found in {args.input}')\n\n paths = [x for x in paths if x in included_paths]\n\n # Make sure we have at least one valid CSV file\n if len(paths) == 0:\n raise FileNotFoundError(f'No non-excepted CSV files found in {args.input}')\n\n csv_names = [os.path.basename(x) for x in paths]\n\n # Remove the .csv extension\n csv_names = [x[:-4] for x in csv_names]\n\n # Trim CSV names\n csv_names = utils.reformat_csv_names(csv_names)\n\n csv_paths = sorted(zip(csv_names, paths))\n csv_names, paths = zip(*csv_paths)\n csv_names = list(csv_names)\n paths = list(paths)\n\n print(f'Paths: {paths}')\n # Print the names of the CSV files\n print(f'CSV names: {csv_names}')\n print(\"=====================================================\")\n\n # Stats for number of revs\n write_stats(paths, csv_names, limit=args.limit)\n\n else:\n # Make sure we have a file not a directory and that it is a CSV, throw a nice error otherwise\n if os.path.isdir(args.input):\n raise IsADirectoryError(\n f'Input {args.input} is a directory (single input should be a file, try using --dir)')\n if not os.path.isfile(args.input):\n raise FileNotFoundError(f'Input {args.input} is not a file')\n if not args.input.endswith('.csv'):\n raise TypeError(f'File {args.input} is not a CSV file')\n\n # Get the name of the CSV file (basename)\n csv_name = os.path.basename(args.input)\n\n # Remove the .csv extension\n csv_name = csv_name[:-4]\n\n # Stats for number of revs\n write_stats([args.input], [csv_name], limit=args.limit)\n\n print(\"All done!\")\n","repo_name":"srg-imperial/covrig","sub_path":"postprocessing/get_stats.py","file_name":"get_stats.py","file_ext":"py","file_size_in_byte":20690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"1340891573","text":"def entryTime(s, keypad):\r\n\r\n digit_to_pos = {} #dico \r\n for i in range(0,len(keypad)-1): #jusqua 9 nb caracteres\r\n digit = keypad[i] \r\n digit_to_pos[digit] = (i // 3, i % 3) #tuple deplacer et appuyer\r\n\r\n \r\n total_time = 0\r\n cur_pos = digit_to_pos[s[1]] # on stocke le premiere element\r\n \r\n for i in range(1, len(s)):\r\n digit = s[i]\r\n next_pos = digit_to_pos[digit]\r\n\r\n time = abs(next_pos[0] - cur_pos[0]) + abs(next_pos[1] - cur_pos[1])#case0+curseur pos0+curseur pos1+curseur pos1\r\n\r\n total_time += time\r\n cur_pos = next_pos #curseur prend la postion du curseur suivant\r\n\r\n return total_time\r\n\r\ns = \"423692\"\r\nkeypad = \"923857614\"\r\nresult = entryTime(s, keypad)\r\nprint(result) # devrait afficher 8\r\n","repo_name":"shyamsubrun/Test_CrytalChain","sub_path":"e1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"21383971355","text":"from scrapy.utils.project import get_project_settings\nfrom scrapy.crawler import CrawlerProcess\n\n\n# 执行多个爬虫\nclass Runner(object):\n @classmethod\n def run(self):\n setting = get_project_settings()\n process = CrawlerProcess(setting)\n didntWorkSpider = []\n\n for spider_name in process.spiders.list():\n if spider_name in didntWorkSpider:\n continue\n print(\"Running spider %s\" % (spider_name))\n process.crawl(spider_name)\n process.start()\n\n\nif __name__ == '__main__':\n Runner().run()\n","repo_name":"zonectmac/amazonspider","sub_path":"AmazonScrapy/AmazonScrapy/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"45560367803","text":"# pylint: disable=invalid-name, unused-variable, unused-argument\n\"\"\"Schedule for binary dense operator.\"\"\"\nfrom __future__ import absolute_import as _abs\nimport tvm\nfrom .. import tag\nfrom .. import generic\n\n\n@generic.schedule_binary_dense.register([\"cpu\"])\ndef schedule_binary_dense(outs):\n \"\"\"Schedule for binary_dense.\n\n Parameters\n ----------\n outs: Array of Tensor\n The computation graph description of binary_dense\n in the format of an array of tensors.\n\n Returns\n -------\n s: Schedule\n The computation schedule for binary_dense.\n \"\"\"\n outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs\n s = tvm.create_schedule([x.op for x in outs])\n scheduled_ops = []\n\n def _schedule(A, B, C):\n s[C].split(s[C].op.reduce_axis[0], factor=8)\n s[C].parallel(s[C].op.axis[0])\n if C.op in s.outputs:\n Out = C\n else:\n Out = outs[0].op.output(0)\n xo, xi = s[Out].split(Out.op.axis[1], factor=8)\n s[Out].vectorize(xi)\n\n def traverse(OP):\n \"\"\"Internal travserse function\"\"\"\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_broadcast(OP.tag):\n if OP not in s.outputs:\n s[OP].compute_inline()\n for tensor in OP.input_tensors:\n if tensor.op.input_tensors and tensor.op not in scheduled_ops:\n traverse(tensor.op)\n # schedule binary_dense\n elif OP.tag == 'binary_dense':\n output = OP.output(0)\n data = OP.input_tensors[0]\n weight = OP.input_tensors[1]\n _schedule(data, weight, output)\n else:\n raise RuntimeError(\"Unsupported operator: %s\" % OP.tag)\n\n scheduled_ops.append(OP)\n\n traverse(outs[0].op)\n return s\n","repo_name":"researchmm/tasn","sub_path":"tasn-mxnet/3rdparty/tvm/topi/python/topi/x86/binary_dense.py","file_name":"binary_dense.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":216,"dataset":"github-code","pt":"76"}
+{"seq_id":"19560818471","text":"import abc\nimport sortedcontainers\nfrom collections import Counter, namedtuple\nfrom operator import methodcaller\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom loguru import logger\n\nfrom src.request import LinkDownloader\n\n\nclass BaseLinkParser(metaclass=abc.ABCMeta):\n \"\"\"\n Parses all pages and gathers all links.\n \"\"\"\n\n def __init__(self, start_url: str, tools: list):\n self.start_url = start_url\n self.tools = tools\n\n @abc.abstractmethod\n def get_all_links(self):\n \"\"\"\n Returns list of all links for a single job offer.\n \"\"\"\n ...\n\n @staticmethod\n def get_page(page_url: str):\n response = requests.get(page_url)\n return response.text\n\n @abc.abstractmethod\n def get_last_page_index(self):\n \"\"\"\n Return amount of pages. If possible.\n \"\"\"\n ...\n\n\nclass DjinniLinkParser(BaseLinkParser):\n\n def __init__(self, start_url, tools):\n self.all_data = \"\"\n self.start_page_data = requests.get(start_url).text\n self.offers_links = None\n self.BASE_URL = start_url\n super().__init__(start_url, tools)\n self.get_all_links()\n\n def get_base_url(self):\n return self.BASE_URL + \"&page=\"\n\n def get_all_links(self):\n links = []\n for page_index in range(1, self.get_last_page_index() + 1):\n page_url = self.get_base_url() + str(page_index)\n logger.info(\"Getting '%s'\" % page_url)\n page = self.get_page(page_url)\n parser = BeautifulSoup(page, \"html.parser\")\n links.extend(parser.findAll(\"a\", class_=\"profile\"))\n self.offers_links = links\n\n def get_last_page_index(self):\n html_parser = BeautifulSoup(self.start_page_data, \"html.parser\")\n try:\n # Black magic. Specific for djinni.\n last_tag = html_parser.findAll(\"a\", class_=\"page-link\")[-2]\n except IndexError:\n return 1\n return int(last_tag.text) + 1\n\n def get_data(self):\n all_data = []\n links_downloader = LinkDownloader(\n list(map(lambda x: \"https://djinni.co\" + x[\"href\"], self.offers_links))\n )\n links_downloader.download_all()\n for item in links_downloader.results:\n try:\n parser = BeautifulSoup(item, \"html.parser\")\n profile = parser.find(\"p\", class_=\"profile\")\n data = parser.findAll(\"div\", class_=\"profile-page-section\")\n res = list(map(methodcaller(\"getText\"), data))\n try:\n profile_text = profile.getText()\n all_data.append(profile_text)\n except AttributeError:\n logger.error(\"Job has no profile section\")\n t = ' '.join(res)\n all_data.append(t)\n except Exception as e:\n logger.critical(f\"Failed to get page {e}\")\n self.all_data = \" \".join(all_data).replace(\"\\n\", ' ').lower()\n\n def handle_data(self):\n counter = Counter(self.all_data.lower().split())\n\n results = sortedcontainers.SortedList(key=lambda x: -x.results)\n SkillResult = namedtuple(\"Skill\", \"skill results\")\n for tool in self.tools:\n amount = counter.get(tool) or 0\n results.add(\n SkillResult(tool, amount)\n )\n return results\n","repo_name":"dborodin836/job-statistics","sub_path":"src/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"29771455911","text":"import pandas as pd \nimport numpy as np\nfrom math import pi\n\nclass Distribution:\n\n def __init__(self):\n #initiate the distribution\n self.mean=0\n self.var=1\n self.const=np.sqrt(2*pi)\n\n def init_distribution(self,arr):\n #initiate the mean\n self.mean=np.mean(arr)\n #initiate the standard deviation\n self.var=np.std(arr)\n #initiate the constant of distribution\n self.const=self.const*self.var\n\n def calc_prob(self,x):\n #calc (x-mu)/sigma\n norm=(x-self.mean)/self.var\n #calc the exp\n prob=np.exp(-1*norm*norm/2)\n #get the final density of probability\n prob=prob/self.const\n return prob\n\ndef calc(df):\n #compute P(Y)\n #enumerate the all the different targets\n all_targs=dict(df[\"target\"].value_counts())\n #initialize an empty dict\n prior={}\n for key in all_targs:\n #calcs the prior probability of the distribution\n prior[key]=all_targs[key]/len(df)\n \n #compute P(X) for all the features\n #gets the list of columns\n cols=list(df.columns)\n #remove the target column\n cols=cols[:-1]\n #initialize the individual dictionary to calculate the probability of all features.\n individual={}\n for col in cols:\n #initialize the distribution\n dist=Distribution()\n dist.init_distribution(np.array(df[col]))\n #store the object\n individual[col]=dist\n\n #compute P(X|Y) for all the features\n #likelihood measures the probability of the sample existing given the class\n likelihood={}\n for target in all_targs:\n #initialize the probability for likelihood for the given class\n likelihood[target]={}\n #selects all the records with same target\n tdf=df[df[\"target\"]==target]\n for col in cols:\n dist=Distribution()\n #initiate the distribution given the class\n dist.init_distribution(np.array(tdf[col]))\n likelihood[target][col]=dist\n \n return likelihood,prior,individual\n\ndef get_class_prob(x,y,prior,likelihood,individual):\n #calculate P(X)\n p_x=1\n for feature in x:\n if feature==\"target\":\n continue\n #P(X)=P(X_1 & X_2.....X_N)=P(X_1)*P(X_2)...P(X_N) assuming feature independence\n p_x=p_x*individual[feature].calc_prob(x[feature])\n\n\n #calculate P(X|Y)\n p_x_y=1\n for feature in x:\n if feature==\"target\":\n continue\n p_x_y=p_x_y*likelihood[y][feature].calc_prob(x[feature])\n\n #implementation of the bayes theorm\n final_prob=prior[y]*p_x_y/p_x\n\n return final_prob\n\ndef classify(x,prior,likelihood,individual):\n print(x)\n #initiate empty dictionary for all the classes\n all_class={}\n #initiate a sum variable since we are measuring the probability over continuous distribution density can be greate than one\n s=0\n #iterate over classes\n for cl in prior:\n #get the class probability\n all_class[cl]=get_class_prob(x,cl,prior,likelihood,individual)\n #add it to sum variable for normalizing\n s+=all_class[cl]\n \n #normalize the class probability\n for cl in prior:\n all_class[cl]/=s\n #print the decision\n print(all_class)\n\ndef main():\n #read the dataset\n df=pd.read_csv(\"naive.csv\")\n #rename the last column as target\n cols=list(df.columns)\n cols[-1]=\"target\"\n df.columns=cols\n #print the head of dataset\n print(df.head(10))\n #initiate the classifier\n likelihood,prior,individual=calc(df)\n #classify all the rows once again we can add specific addtions as well :)\n for index,row in df.iterrows():\n classify(dict(row),prior,likelihood,individual) \n \nif __name__==\"__main__\":\n main()\n \n \n\n","repo_name":"HarshitGupta11/ml_lab","sub_path":"bayes.py","file_name":"bayes.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"39607887451","text":"import argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom environment.synthetic_online import MackeyGlass\nfrom agent.online_rnn_backprop import RecurrentNet\n\ndef exp_mackyglass_rnn():\n # setting the random seeds\n np.random.seed(0)\n torch.manual_seed(0)\n\n net = RecurrentNet(1,30,1,step_size=0.01)\n net.double()\n\n env = MackeyGlass(tau=17)\n xs = [1.2]\n yhats = []\n losses = []\n errors = []\n for i in range(100000):\n x = env.get_sample()\n x_t = torch.tensor([[xs[-1]]]).double()\n y_hat = net.forward(x_t)\n loss = net.save_targets(torch.tensor(x).double())\n y_hat_val = y_hat.data[0][0].numpy()\n error = (y_hat_val - x) * (y_hat_val - x)\n xs.append(x)\n yhats.append(y_hat)\n if i > 95000:\n errors.append(error)\n plt.plot(errors)\n plt.show()\nif __name__ == '__main__':\n exp_mackyglass_rnn()","repo_name":"Amir-19/prediction-pillar","sub_path":"experiment/exp_mackeyglass_onestep.py","file_name":"exp_mackeyglass_onestep.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"}
+{"seq_id":"15300218999","text":"__author__ = 'Venno'\nclass Solution:\n # @param {integer} n\n # @return {string}\n def countAndSay(self, n):\n result = '1'\n s = ['1']\n if n == 1:\n return result\n for i in range(2, n):\n start = 0\n temp = []\n while start < len(s):\n count = 1\n next = start +1\n# unfinished\n\n","repo_name":"VennoFang/leetcode","sub_path":"python/Count and Say.py","file_name":"Count and Say.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"74779728244","text":"\nimport sys\nimport os\nimport platform\nimport json\nimport re\n\n# Globally load in bundled images\nglobals()[\"qbit.resources\"] = __import__(\"qbit.resources\")\n\nimport qbit.colour\n\nAPPLICATION_NAME = \"Qbit\"\nAPPLICATION_VERSION = \"0.0024\"\nAPPLICATION_DESCRIPTION = \"IRC Micro-Client\"\n\nHOST_OS = platform.system()\nHOST_OS_VERSION = platform.release()\nHOST_PLATFORM = platform.platform(aliased=1)\n\nDEFAULT_IRC_NICKNAME = \"qbit\"\nDEFAULT_IRC_USERNAME = \"qbit\"\nDEFAULT_IRC_IRCNAME = f\"Qbit {APPLICATION_VERSION}\"\n\nINSTALL_DIRECTORY = sys.path[0]\nQBIT_DIRECTORY = os.path.join(INSTALL_DIRECTORY, \"qbit\")\nIMAGE_SPINNER = os.path.join(QBIT_DIRECTORY, \"spinner.gif\")\n\nINITIAL_WINDOW_WIDTH = 500\nINITIAL_WINDOW_HEIGHT = 300\n\nINSTALL_DIRECTORY = sys.path[0]\nQBIT_DIRECTORY = os.path.join(INSTALL_DIRECTORY, \"qbit\")\nCONFIG_DIRECTORY = os.path.join(INSTALL_DIRECTORY, \"config\")\n\nDISPLAY_CONFIGURATION = os.path.join(CONFIG_DIRECTORY, \"display.json\")\nLAST_SERVER_INFORMATION_FILE = os.path.join(CONFIG_DIRECTORY, \"lastserver.json\")\nAUTOJOIN_FILE = os.path.join(CONFIG_DIRECTORY, \"autojoin.json\")\nUSER_FILE = os.path.join(CONFIG_DIRECTORY, \"user.json\")\n\nIMAGE_QBIT_ICON = \":/qbit.png\"\nIMAGE_PAGE_ICON = \":/page.png\"\nIMAGE_EXIT_ICON = \":/exit.png\"\nIMAGE_RESTART_ICON = \":/restart.png\"\nIMAGE_USER_ICON = \":/user.png\"\nIMAGE_CLEAR_ICON = \":/clear.png\"\nIMAGE_SERVER_ICON = \":/server.png\"\nIMAGE_ABOUT_ICON = \":/about.png\"\nIMAGE_PLUS_ICON = \":/plus.png\"\nIMAGE_MINUS_ICON = \":/minus.png\"\nIMAGE_NO_ICON = \":/no.png\"\nIMAGE_UNIGNORE_ICON = \":/unignore.png\"\nIMAGE_X_ICON = \":/x.png\"\nIMAGE_SAVE_ICON = \":/save.png\"\n\nIMAGE_LOGO = \":/logo.png\"\nIMAGE_PYTHON = \":/python.png\"\nIMAGE_QT = \":/qt.png\"\nIMAGE_GPL = \":/gpl.png\"\n\n# Set display defaults\nQBIT_FONT = \"Consolas\"\n\nNORMAL_FONT_SIZE = 10\nBIG_FONT_SIZE = 12\nSMALL_FONT_SIZE = 8\n\nLINK_URLS = True\n\nTEXT_BACKGROUND_COLOR = \"#ffffff\"\nTEXT_COLOR = \"#000000\"\nERROR_COLOR = \"#FF0000\"\nSYSTEM_COLOR = \"#FF9C00\"\nSELF_COLOR = \"#FF0000\"\nUSERNAME_COLOR = \"#00007F\"\nACTION_COLOR = \"#009300\"\nLINK_COLOR = \"#00007F\"\nNOTICE_COLOR = \"#9C009C\"\nMOTD_COLOR = \"#00007F\"\nERROR_COLOR = \"#FF0000\"\n\nGRADIENT_LIGHTEN = 0.95\n\nMAX_USERNAME_SIZE = 16\n\nCHANNEL_INFO_NAME = 0\nCHANNEL_INFO_KEY = 1\nCHANNEL_INFO_LIMIT = 2\nCHANNEL_INFO_INVITEONLY = 3\nCHANNEL_INFO_ALLOWEXTERNAL = 4\nCHANNEL_INFO_TOPICLOCKED = 5\nCHANNEL_INFO_PROTECTED = 6\nCHANNEL_INFO_SECRET = 7\nCHANNEL_INFO_MODERATED = 8\nCHANNEL_INFO_NOCOLORS = 9\n\ndef is_integer(n):\n\ttry:\n\t\tint(n)\n\texcept ValueError:\n\t\treturn False\n\treturn True\n\ndef save_display_config(config):\n\twith open(DISPLAY_CONFIGURATION, \"w\") as write_data:\n\t\tjson.dump(config, write_data)\n\ndef loadDisplayConfig():\n\tif os.path.isfile(DISPLAY_CONFIGURATION):\n\t\twith open(DISPLAY_CONFIGURATION, \"r\") as read_data:\n\t\t\tdata = json.load(read_data)\n\t\t\treturn data\n\telse:\n\t\tdc = {\n\t\t\t\"font\": QBIT_FONT,\n\t\t\t\"fontsize\": NORMAL_FONT_SIZE,\n\t\t\t\"fontbig\": BIG_FONT_SIZE,\n\t\t\t\"fontsmall\": SMALL_FONT_SIZE,\n\t\t\t\"background\": TEXT_BACKGROUND_COLOR,\n\t\t\t\"text\": TEXT_COLOR,\n\t\t\t\"error\": ERROR_COLOR,\n\t\t\t\"system\": SYSTEM_COLOR,\n\t\t\t\"self\": SELF_COLOR,\n\t\t\t\"user\": USERNAME_COLOR,\n\t\t\t\"action\": ACTION_COLOR,\n\t\t\t\"link\": LINK_COLOR,\n\t\t\t\"notice\": NOTICE_COLOR,\n\t\t\t\"motd\": MOTD_COLOR,\n\t\t\t\"links\": LINK_URLS,\n\t\t\t\"width\": INITIAL_WINDOW_WIDTH,\n\t\t\t\"height\": INITIAL_WINDOW_HEIGHT\n\t\t}\n\t\twith open(DISPLAY_CONFIGURATION, \"w\") as write_data:\n\t\t\tjson.dump(dc, write_data)\n\t\treturn dc\n\n# Load in display settings from file\nDC = loadDisplayConfig()\nQBIT_FONT = DC[\"font\"]\nNORMAL_FONT_SIZE = DC[\"fontsize\"]\nBIG_FONT_SIZE = DC[\"fontbig\"]\nSMALL_FONT_SIZE = DC[\"fontsmall\"]\nTEXT_BACKGROUND_COLOR = DC[\"background\"]\nTEXT_COLOR = DC[\"text\"]\nERROR_COLOR = DC[\"error\"]\nSYSTEM_COLOR = DC[\"system\"]\nSELF_COLOR = DC[\"self\"]\nUSERNAME_COLOR = DC[\"user\"]\nACTION_COLOR = DC[\"action\"]\nLINK_COLOR = DC[\"link\"]\nNOTICE_COLOR = DC[\"notice\"]\nMOTD_COLOR = DC[\"motd\"]\nLINK_URLS = DC[\"links\"]\nINITIAL_WINDOW_WIDTH = DC[\"width\"]\nINITIAL_WINDOW_HEIGHT = DC[\"height\"]\n\nCHAT_TEMPLATE = f\"\"\"\n\n \n\t\n\t | !USER! | \n\t | \n\t !MESSAGE! | \n\t
\n \n
\n\"\"\"\n\nACTION_TEMPLATE = \"\"\"\n\n \n\t\n\t | !USER! !MESSAGE! | \n\t
\n \n
\n\"\"\"\n\nSYSTEM_TEMPLATE = \"\"\"\n\n \n\t\n\t | !MESSAGE! | \n\t
\n \n
\n\"\"\"\n\ndef inject_www_links(txt):\n\tif not LINK_URLS: return txt\n\turls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', txt)\n\tfor u in urls:\n\t\tu = re.sub('<[^<]+?>', '', u)\n\t\tlink = f\"{u}\"\n\t\ttxt = txt.replace(u,link)\n\treturn txt\n\ndef pad_nick(nick,size):\n\tx = size - len(nick)\n\tif x<0 : x = 0\n\ty = ' '*x\n\treturn f\"{y}{nick}\"\n\ndef system_display(text):\n\tmsg = SYSTEM_TEMPLATE.replace('!COLOR!',SYSTEM_COLOR)\n\tmsg = msg.replace('!BACKGROUND!',TEXT_BACKGROUND_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\treturn msg\n\ndef error_display(text):\n\tmsg = SYSTEM_TEMPLATE.replace('!COLOR!',ERROR_COLOR)\n\tmsg = msg.replace('!BACKGROUND!',TEXT_BACKGROUND_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\treturn msg\n\ndef chat_display(user,text,max):\n\ttext = remove_html_markup(text)\n\tuser = pad_nick(user,max)\n\ttext = inject_www_links(text)\n\tmsg = CHAT_TEMPLATE.replace('!USER!',user)\n\tmsg = msg.replace('!COLOR!',USERNAME_COLOR)\n\tmsg = msg.replace('!BACKGROUND!',TEXT_BACKGROUND_COLOR)\n\tmsg = msg.replace('!CCHAT!',TEXT_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\t# Gradient\n\tBG = qbit.colour.Color(TEXT_BACKGROUND_COLOR)\n\tLIGHT_COLOR = qbit.colour.Color(USERNAME_COLOR,luminance=GRADIENT_LIGHTEN)\n\tUSER_BACKGROUND_GRADIENT = f\"background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 {BG}, stop:1 {LIGHT_COLOR});\"\n\tmsg = msg.replace(\"!GRADIENT!\",USER_BACKGROUND_GRADIENT)\n\n\treturn msg\n\ndef mychat_display(user,text,max):\n\ttext = remove_html_markup(text)\n\tuser = pad_nick(user,max)\n\ttext = inject_www_links(text)\n\tmsg = CHAT_TEMPLATE.replace('!USER!',user)\n\tmsg = msg.replace('!COLOR!',SELF_COLOR)\n\tmsg = msg.replace('!BACKGROUND!',TEXT_BACKGROUND_COLOR)\n\tmsg = msg.replace('!CCHAT!',TEXT_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\t# Gradient\n\tBG = qbit.colour.Color(TEXT_BACKGROUND_COLOR)\n\tLIGHT_COLOR = qbit.colour.Color(SELF_COLOR,luminance=GRADIENT_LIGHTEN)\n\tUSER_BACKGROUND_GRADIENT = f\"background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 {BG}, stop:1 {LIGHT_COLOR});\"\n\tmsg = msg.replace(\"!GRADIENT!\",USER_BACKGROUND_GRADIENT)\n\n\treturn msg\n\ndef action_display(user,text):\n\ttext = remove_html_markup(text)\n\ttext = inject_www_links(text)\n\tmsg = ACTION_TEMPLATE.replace('!USER!',user)\n\tmsg = msg.replace('!COLOR!',ACTION_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\t# Gradient\n\tBG = qbit.colour.Color(TEXT_BACKGROUND_COLOR)\n\tLIGHT_COLOR = qbit.colour.Color(ACTION_COLOR,luminance=GRADIENT_LIGHTEN)\n\tUSER_BACKGROUND_GRADIENT = f\"background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 {BG}, stop:1 {LIGHT_COLOR});\"\n\tmsg = msg.replace(\"!GRADIENT!\",USER_BACKGROUND_GRADIENT)\n\n\treturn msg\n\ndef notice_display(user,text,max):\n\ttext = remove_html_markup(text)\n\tuser = pad_nick(user,max)\n\ttext = inject_www_links(text)\n\tmsg = CHAT_TEMPLATE.replace('!USER!',user)\n\tmsg = msg.replace('!COLOR!',NOTICE_COLOR)\n\tmsg = msg.replace('!BACKGROUND!',TEXT_BACKGROUND_COLOR)\n\tmsg = msg.replace('!CCHAT!',TEXT_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\t# Gradient\n\tBG = qbit.colour.Color(TEXT_BACKGROUND_COLOR)\n\tLIGHT_COLOR = qbit.colour.Color(NOTICE_COLOR,luminance=GRADIENT_LIGHTEN)\n\tUSER_BACKGROUND_GRADIENT = f\"background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 {BG}, stop:1 {LIGHT_COLOR});\"\n\tmsg = msg.replace(\"!GRADIENT!\",USER_BACKGROUND_GRADIENT)\n\n\treturn msg\n\ndef motd_display(text,max):\n\tuser = pad_nick(\"MOTD\",max)\n\ttext = inject_www_links(text)\n\tmsg = CHAT_TEMPLATE.replace('!USER!',user)\n\tmsg = msg.replace('!COLOR!',MOTD_COLOR)\n\tmsg = msg.replace('!BACKGROUND!',TEXT_BACKGROUND_COLOR)\n\tmsg = msg.replace('!CCHAT!',TEXT_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\t# Gradient\n\tBG = qbit.colour.Color(TEXT_BACKGROUND_COLOR)\n\tLIGHT_COLOR = qbit.colour.Color(MOTD_COLOR,luminance=GRADIENT_LIGHTEN)\n\tUSER_BACKGROUND_GRADIENT = f\"background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 {BG}, stop:1 {LIGHT_COLOR});\"\n\tmsg = msg.replace(\"!GRADIENT!\",USER_BACKGROUND_GRADIENT)\n\n\treturn msg\n\ndef remove_html_markup(s):\n\ttag = False\n\tquote = False\n\tout = \"\"\n\n\tfor c in s:\n\t\t\tif c == '<' and not quote:\n\t\t\t\ttag = True\n\t\t\telif c == '>' and not quote:\n\t\t\t\ttag = False\n\t\t\telif (c == '\"' or c == \"'\") and tag:\n\t\t\t\tquote = not quote\n\t\t\telif not tag:\n\t\t\t\tout = out + c\n\n\treturn out\n\ndef save_last_server(host,port,password,ssl):\n\tsinfo = {\n\t\t\t\"host\": host,\n\t\t\t\"port\": port,\n\t\t\t\"password\": password,\n\t\t\t\"ssl\": ssl\n\t\t}\n\twith open(LAST_SERVER_INFORMATION_FILE, \"w\") as write_data:\n\t\tjson.dump(sinfo, write_data)\n\ndef get_last_server():\n\tif os.path.isfile(LAST_SERVER_INFORMATION_FILE):\n\t\twith open(LAST_SERVER_INFORMATION_FILE, \"r\") as read_server:\n\t\t\tdata = json.load(read_server)\n\t\t\treturn data\n\telse:\n\t\tsi = {\n\t\t\t\"host\": '',\n\t\t\t\"port\": '',\n\t\t\t\"password\": '',\n\t\t\t\"ssl\": False\n\t\t}\n\t\treturn si\n\ndef save_autojoin_channels(chans):\n\twith open(AUTOJOIN_FILE, \"w\") as write_data:\n\t\tjson.dump(chans, write_data)\n\ndef get_autojoins():\n\tif os.path.isfile(AUTOJOIN_FILE):\n\t\twith open(AUTOJOIN_FILE, \"r\") as read_server:\n\t\t\tdata = json.load(read_server)\n\t\t\treturn data\n\telse:\n\t\treturn []\n\ndef get_user():\n\tif os.path.isfile(USER_FILE):\n\t\twith open(USER_FILE, \"r\") as read_user:\n\t\t\tdata = json.load(read_user)\n\t\t\treturn data\n\telse:\n\t\tsi = {\n\t\t\t\"nick\": DEFAULT_IRC_NICKNAME,\n\t\t\t\"username\": DEFAULT_IRC_USERNAME,\n\t\t\t\"realname\": DEFAULT_IRC_IRCNAME\n\t\t}\n\t\treturn si\n\ndef save_user(user):\n\twith open(USER_FILE, \"w\") as write_data:\n\t\tjson.dump(user, write_data)\n","repo_name":"danhetrick/qbit","sub_path":"qbit/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":10311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"74735413045","text":"import labrad\nimport numpy\nimport matplotlib\n\nfrom matplotlib import pyplot\n#get access to servers\ncxn = labrad.connect()\ndv = cxn.data_vault\nfigure = pyplot.figure()\npyplot.title(\"Sideband Rabi Flopping Drift, all same parameters\")\n\nfor dataset in ['2217_20','2211_05', '2219_42','2218_33']:\n dv.cd(['', 'Experiments', '729Experiments', 'RabiFlopping', '2013Jan25', dataset])\n dv.open(1)\n data = dv.get().asarray\n x = data[:,0] * 10**6 #now in microseconds\n pyplot.plot(x, data[:,1],'o-', label = '2013Jan25_{0}'.format(dataset))\n\npyplot.ylabel('Excitation Probability')\npyplot.xlabel(r'Excitation Time $\\mu s$')\npyplot.ylim([0,1])\npyplot.legend()\npyplot.show()\n","repo_name":"HaeffnerLab/cct","sub_path":"old_scripts/dataAnalysis/729Experiments/2013Jan25/sideband_flop_drift.py","file_name":"sideband_flop_drift.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"}
+{"seq_id":"8554640355","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport random\nfrom tensorflow.keras.preprocessing.image import img_to_array, load_img\nfrom tensorflow.keras.models import Model\n\n\ndef plot_feature_maps(model, img):\n \"\"\"\n Given a convolutional neural network model and an appropriately shaped\n image, it plots the feature maps of the convolutional and pooling layers.\n\n Args:\n model: keras `Model` object, containing the model to plot feature\n maps of\n img: numpy array. Image represented in the form of a numpy array of\n shape (1, size, size, num_channels)\n \"\"\"\n # lets first create a list of outputs from successive layers of the model\n successive_outputs = [layer.output for layer in model.layers[1:]]\n\n # now lets create another model with the input of the model passed in,\n # and outputs of each layer of the passed in model\n visualization_model = Model(inputs=model.input, outputs=successive_outputs)\n\n # now run the image through the network, obtaining intermediate feature maps\n successive_feature_maps = visualization_model.predict(img)\n\n # layer names to have them as part of our plot for readability purposes\n layer_names = [layer.name for layer in model.layers]\n\n # loop through the layers\n for layer_name, feature_map in zip(layer_names, successive_feature_maps):\n # plot feature maps only for conv/pool layers\n if len(feature_map.shape) == 4:\n n_channels = feature_map.shape[-1]\n size = feature_map.shape[1]\n\n # We will tile our images in this matrix\n display_grid = np.zeros((size, size * n_channels))\n\n # looping through each filter of a layer\n for i in range(n_channels):\n # postprocessing the image to be visually palatable\n img = feature_map[0, :, :, i]\n img -= img.mean()\n img /= img.std()\n img *= 64\n img += 128\n img = np.clip(img, 0, 255).astype('uint8')\n # tile each filter into a horizontal grid\n display_grid[:, i * size: (i + 1) * size] = img\n\n # displaying the feature map grid\n scale = 20. / n_channels\n plt.figure(figsize=(scale * n_channels, scale))\n plt.title(layer_name)\n plt.grid(False)\n plt.imshow(display_grid, aspect='auto', cmap='viridis')\n\n\ndef find_image(path, model):\n \"\"\"\n Given a path to a folder containing images, it returns a numpy array\n representation of the image suitable for passing through the keras\n model (also passed in) to plot feature maps.\n\n Args:\n path: str. Path to the folder containing images\n model: keras `Model` object.\n\n Returns:\n img: numpy array. Representation of the image suitable for\n visualizing feature maps of the model\n \"\"\"\n # string formatting for reliability\n if not path.endswith('/'):\n path += '/'\n\n # creating a list of all images in directory passed in\n all_images = [f for f in os.listdir(path) if f.endswith('.jpg')]\n\n # finding a random image from the directory passed in\n img_path = path + random.choice(all_images)\n img = load_img(img_path, target_size=(model.input_shape[1],\n model.input_shape[2]))\n\n # convert the image into its numpy representation\n img = img_to_array(img) # shape: (img_size, img_size, 3)\n img = img.reshape((1,) + img.shape) # shape: (1, size, size, 3)\n return img / 255.0\n\n\ndef plot_feature_maps_from_random_img(model, folder_path):\n \"\"\"\n randomly chooses an image from a directory containing images and plots\n the feature maps of that image as gone through the model.\n\n Args:\n model: keras `Model` object, containing the model to print feature\n maps of\n folder_path: str. Path to the folder containing images\n \"\"\"\n img = find_image(folder_path, model)\n plot_feature_maps(model, img)\n\n\ndef plot_loss(history):\n \"\"\"\n Plots the loss of the model as a function of training epochs\n\n Args:\n history: `History` object, containing the training history of the model\n \"\"\"\n loss = history.history['loss']\n\n # plotting the validation loss if a validation set exists\n if 'val_loss' in history.history:\n val_loss = history.history['val_loss']\n plt.plot(val_loss, label='validation loss')\n\n plt.plot(loss, label='training loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.title('Model loss vs. training epochs')\n plt.show()\n\n\ndef plot_accuracy(history):\n \"\"\"\n Plots the accuracy history of the model as a function of training epochs\n\n Args:\n history: `History` object, containing the training history of the model\n \"\"\"\n acc = history.history['acc']\n\n # plotting the validation accuracy if there exists a validation set\n if 'val_acc' in history.history:\n val_acc = history.history['val_acc']\n plt.plot(val_acc, label='validation accuracy')\n\n plt.plot(acc, label='training accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.title('Model accuracy vs. training epochs')\n plt.show()\n\n\nif __name__ == '__main__':\n from C2.W1.cats_dogs_reduced import train_model\n\n path = '../../Data/cats-and-dogs_reduced/'\n model, history = train_model()\n\n plot_feature_maps_from_random_img(model, path+'train/cats')\n plot_loss(history)\n plot_accuracy(history)\n","repo_name":"connected-ftarlan/tf-specialization","sub_path":"C2/W1/visualize_feature_maps.py","file_name":"visualize_feature_maps.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"7928163712","text":"# Given a binary tree, determine if it is height-balanced.\n#\n# For this problem, a height-balanced binary tree is defined as a binary tree in which\n# the depth of the two subtrees of every node never differ by more than 1.\n#\n# Tags: Tree, Depth-first Search\n# Difficulty: Easy\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def isBalanced(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n return self.maxDepth(root)[0]\n\n def maxDepth(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool, int\n \"\"\"\n if not root:\n return True, 0\n\n left_balanced, left_depth = self.maxDepth(root.left)\n right_balanced, right_depth = self.maxDepth(root.right)\n balanced = left_balanced and right_balanced and abs(left_depth - right_depth) <= 1\n\n max_depth = max(left_depth, right_depth) + 1 if balanced else 0\n return balanced, max_depth\n","repo_name":"lostarray/LeetCode","sub_path":"110_Balanced_Binary_Tree.py","file_name":"110_Balanced_Binary_Tree.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"20003428378","text":"from django.core.paginator import Paginator\nfrom django.db.models import Count\nfrom django.shortcuts import render_to_response, redirect\nfrom django.template import RequestContext\nfrom django.db.models import Q\nfrom functions import *\nfrom models import Deck, Suit, Meaning, MinorArcana, MajorArcana\nfrom models import Spread, CardPosition\nfrom random import choice\n\ndef deck_list(request):\n \"\"\" This is a view to show a list of all available decks with a few details \n about each one. We can't use a generic view because we need to cross-reference\n the suits associated with each deck. \"\"\"\n \n decks = Deck.objects.all()\n \n # Pull the suits for each deck and put them in a tuple with each deck\n deck_list = []\n for deck in decks:\n \n suits = Suit.objects.filter(deck=deck.id)\n deck_list += [ (deck, suits) ]\n \n pages = Paginator(deck_list, 10, 3)\n \n # Check if page is an int, if not deliver page 1\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1 \n \n # Check if page is in range, if not deliver last page\n try:\n result_list = pages.page(page)\n except (EmptyPage, InvalidPage):\n result_list = pages.page( pages.num_pages ) \n \n context = {'result_list': result_list}\n\n return render_to_response('diyTarot/deck_list.html', \n context_instance=RequestContext(request, context)) \n \ndef spread_list(request):\n \"\"\" This is a view to display the list of spreads. Can't use a generic \n view because want to cross-reference with the card positions to \n give information like the number of cards in a spread. \"\"\"\n\n # Set up the structures used to add successive filters and handle the query string\n active_options = request.GET.copy()\n query_list = []\n \n # Set up the query objects to search spreads and filter by size\n apply_spread_search_filter(active_options, query_list)\n apply_spread_size_filter(active_options, query_list)\n \n # Annotate each spread with the number of positions associated with it, for sorting\n # and also for filtering by size\n spreads = Spread.objects.annotate(size=Count('cardposition')).filter(*query_list).order_by('size')\n\n # Paginate the results\n pages = Paginator(spreads, 10, 3)\n current_page = get_current_page(active_options, pages)\n\n # Temporary, limited tag list before tags are added to the model\n tags = ['daily', 'traditional', 'love', 'work', 'advice', 'choice'] \n tag_results = {}\n for tag in tags:\n tag_query = [Q(title__icontains=tag) | Q(description__icontains=tag)]\n tag_results[tag] = Spread.objects.filter(*tag_query).count()\n \n # Check if there is a default deck stored in the current session\n # This determines which deck to point you to in the links on the spread list, \n # by remembering your preference in the session.\n if 'deck' in request.session:\n deck = request.session['deck']\n else:\n # @TODO: make the default deck configurable?\n deck = 1\n \n context = {'result_list': current_page,\n 'deck': deck,\n 'active_options': active_options,\n 'tag_results': tag_results}\n \n return render_to_response('diyTarot/spread_list.html',\n context_instance=RequestContext(request, context))\n \ndef card_list(request):\n \"\"\" This is a view to display all cards in the system, across all decks. Via the \n GET headers it paginates and filters the resulting list if the various arguments\n exist and are valid in the request. \"\"\"\n \n # Pull all get parameters from the request into a querydict structure.\n active_options = request.GET.copy()\n \n # Set up the structures used to add successive filters and handle the query string\n filter_args = {} \n query_list = []\n order_args = []\n \n # Try to apply all possible filters one by one. \n # If an option's value is not valid it is removed from display_options.\n apply_card_search_filter(active_options, query_list)\n apply_key_filter(active_options, filter_args, 'deck', 'deck')\n apply_card_filter(active_options, filter_args)\n apply_suit_filter(active_options, filter_args)\n apply_rank_filter(active_options, filter_args)\n \n apply_sorting_order(active_options, order_args)\n \n # To search the meanings as well as the Cards, need to get the list of tarot indexes which\n # link to meanings that match the search\n if 'search' in active_options:\n \n meaning_query_list = []\n apply_keyword_search_filter(active_options, meaning_query_list)\n \n # Get all tarot_indexes which have keyword strings containing the search term.\n meanings = Meaning.objects.filter(*meaning_query_list).values('tarot_index', 'meaning_set').order_by('meaning_set')\n \n # Get the id of each card (or set of cards) which matches each tarot_index / meaning_set pair\n # This assures that only the cards that use the matching meaning set get pulled, instead of all cards\n # with a certain tarot index.\n ids = []\n for meaning in meanings:\n ids += Card.objects.filter(tarot_index=meaning['tarot_index'], \n deck__meaning_set=meaning['meaning_set']).values_list('id') \n \n # Reformat list so it can be read directly by the id__in parameter \n id_list = [card_id[0] for card_id in ids]\n\n # Pull only the cards that have the same tarot index AND whose deck has the same meaning_set\n query_list[0] |= Q(id__in=id_list)\n\n # Some of the options only apply to the MinorArcana schema.\n # The rank and suit filters will set the card option automatically.\n if ('cards' in active_options and\n active_options['cards'] =='minors'):\n cards = MinorArcana.objects.filter(*query_list).filter(**filter_args).order_by(*order_args)\n else:\n cards = Card.objects.filter(*query_list).filter(**filter_args).order_by(*order_args)\n\n # Used by the shared sidebar navigation menu\n base_url = \"/diytarot/cards/\"\n \n # Populate the deck and suit lists used in navigation\n deck_list = Deck.objects.values('name', 'id')\n suit_list = Suit.objects.filter(deck=deck_list[0]['id']).values('suit', 'name')\n \n # Paginate the queryset and fetch the current page from the URL, with validation\n pages = Paginator(cards, 10, 3)\n current_page = get_current_page(active_options, pages)\n \n context = {'result_list': current_page,\n 'base_url': base_url,\n 'active_options': active_options,\n 'deck_list': deck_list,\n 'suit_list': suit_list}\n\n return render_to_response('diyTarot/card_list.html',\n context_instance=RequestContext(request, context)) \n\ndef deck_detail(request, deck_id):\n \"\"\" This is a view to show all the cards associated with a particular \n tarot deck. \"\"\"\n \n # Get the list of cards, return the deck listing page if deck doesn't exist.\n try:\n deck = Deck.objects.get(pk=deck_id)\n except Deck.DoesNotExist:\n return deck_list(request)\n \n # Set up the structures used to add successive filters and handle the query string\n active_options = request.GET.copy()\n filter_args = {'deck': deck_id} \n order_args = []\n \n # Apply the filters one by one. If an option is not valid it is removed from display_options.\n apply_card_filter(active_options, filter_args)\n apply_suit_filter(active_options, filter_args)\n apply_rank_filter(active_options, filter_args)\n apply_sorting_order(active_options, order_args)\n \n # Some of the options only apply to the MinorArcana schema.\n # The rank and suit filters will set the card option automatically.\n if ('cards' in active_options and\n active_options['cards'] =='minors'):\n cards = MinorArcana.objects.filter(**filter_args).order_by(*order_args)\n else:\n cards = Card.objects.filter(**filter_args).order_by(*order_args)\n\n # The base url, since there is a different one for deck view and all cards view\n base_url = \"/diytarot/decks/%s/\" % deck_id\n \n # Populate the suit list used in navigation\n suit_list = Suit.objects.filter(deck=deck_id).values('suit', 'name')\n \n # Paginate the queryset and fetch the current page from the URL, with validation\n pages = Paginator(cards, 10, 3)\n current_page = get_current_page(active_options, pages)\n \n context = {'deck': deck,\n 'result_list': current_page,\n 'base_url': base_url,\n 'suit_list': suit_list,\n 'active_options': active_options}\n\n return render_to_response('diyTarot/deck_detail.html',\n context_instance=RequestContext(request, context))\n\ndef random_card(request):\n \"\"\" This is a view which simply chooses a card at random and gives you the \n detail view for it. \"\"\"\n \n # Get a list of all ids, choose one randomly, and get a handle to that Card object.\n card_ids = Card.objects.values('id')\n random_id = choice( card_ids )['id']\n card = Card.objects.get(pk=random_id)\n \n # Display the card detail view for the random card. Don't redirect, because this way you\n # can refresh the page and get another random card.\n return card_detail(request, card.tarot_index, card.deck.id)\n\ndef card_detail(request, tarot_index, deck_id):\n \"\"\" This is a view to show all information about a specific card in a \n specific deck. \"\"\" \n \n # If the card isn't in this deck, load the list of all cards in the deck\n try:\n card = Card.objects.get(tarot_index=tarot_index, deck=deck_id)\n except Deck.DoesNotExist:\n return tarot_card_detail(request, tarot_index)\n except Card.DoesNotExist:\n return deck_detail(request, deck_id)\n \n try:\n meaning = Meaning.objects.get(meaning_set=card.deck.meaning_set, tarot_index=tarot_index)\n except Meaning.DoesNotExist:\n meaning = Meaning()\n \n # For next and previous page links\n indices = get_nearest_indices(tarot_index, deck_id)\n \n # For the side navigation\n majors_list = MajorArcana.objects.filter(deck=deck_id).order_by('tarot_index')\n first_major = ''\n if majors_list.count() > 0:\n first_major = majors_list[0].tarot_index\n \n minors_list = MinorArcana.objects.filter(deck=deck_id).order_by('suit', 'tarot_index')\n first_minor = ''\n if minors_list.count() > 0:\n first_minor = minors_list[0].tarot_index\n \n # Gives us the list of suits which have no cards in them, for completion.\n empty_suit_list = []\n suits = Suit.objects.filter(deck=deck_id)\n for suit in suits:\n cards = MinorArcana.objects.filter(suit=suit.id)\n if cards.count() == 0:\n empty_suit_list += [suit]\n \n related_cards = Card.objects.filter(tarot_index=tarot_index).values('deck', 'deck__name')\n\n context = {'card': card,\n 'meaning': meaning,\n 'majors_list': majors_list,\n 'minors_list': minors_list,\n 'empty_suit_list': empty_suit_list,\n 'related_cards': related_cards,\n 'first_major': first_major,\n 'first_minor': first_minor,\n 'next_card_index': indices['next_index'],\n 'previous_card_index': indices['previous_index'], }\n \n return render_to_response('diyTarot/card_detail.html',\n context_instance=RequestContext(request, context)) \n\ndef tarot_card_detail(request, tarot_index):\n \"\"\" This is a view to show all of the tarot cards of a particular index, \n across all decks in the system. So, if you send it 1 (The Magician), it\n will display all Magician cards. \"\"\"\n \n # Retrieve the card with the matching tarot_index from the right deck\n cards = Card.objects.filter(tarot_index=tarot_index).order_by('deck')\n meanings = Meaning.objects.filter(tarot_index=tarot_index)\n if meanings.count() > 0:\n meaning = meanings[0]\n else:\n meaning = {'keywords': 'None provided.',\n 'reversed_keywords': 'None provided.'}\n \n if len(cards) == 0:\n # If there is no matching tarot_index in any of the decks, then load\n # the view with all the cards\n return card_list(request)\n \n else:\n active_options = request.GET.copy()\n \n # Paginate the queryset and fetch the current page from the URL, with validation\n pages = Paginator(cards, 10, 3)\n current_page = get_current_page(active_options, pages)\n \n # The base url, since there is a different one for deck view and all cards view\n base_url = \"/diytarot/cards/%s/\" % tarot_index\n \n # For the next and previous links\n indices = get_nearest_indices(tarot_index)\n \n # For the side navigation\n default_deck_id = 1\n majors_list = MajorArcana.objects.filter(deck=default_deck_id).order_by('tarot_index')\n first_major = ''\n if majors_list.count() > 0:\n first_major = majors_list[0].tarot_index\n \n minors_list = MinorArcana.objects.filter(deck=default_deck_id).order_by('suit', 'tarot_index')\n first_minor = ''\n if minors_list.count() > 0:\n first_minor = minors_list[0].tarot_index\n \n # Gives us the list of suits which have no cards in them, for completion.\n empty_suit_list = []\n suits = Suit.objects.filter(deck=default_deck_id)\n for suit in suits:\n cards = MinorArcana.objects.filter(suit=suit.id)\n if cards.count() == 0:\n empty_suit_list += [suit]\n \n related_cards = Card.objects.filter(tarot_index=tarot_index).values('deck', 'deck__name')\n \n context = {'result_list': current_page,\n 'active_options': active_options,\n 'base_url': base_url,\n 'meaning': meaning,\n 'previous_card_index': indices['previous_index'],\n 'next_card_index': indices['next_index'],\n 'majors_list': majors_list,\n 'minors_list': minors_list,\n 'empty_suit_list': empty_suit_list,\n 'related_cards': related_cards,\n 'first_major': first_major,\n 'first_minor': first_minor}\n \n return render_to_response('diyTarot/tarot_card_detail.html',\n context_instance=RequestContext(request, context))\n\ndef reading(request, spread_id, deck_id):\n \"\"\" This is a view for displaying card readings on a given spread and deck. \n By default the cards drawn are random, but if a string of saved cards called\n 'cards' is passed in via query string it will try to load those cards, returning\n an error if the string is invalid.\"\"\"\n \n try:\n spread = Spread.objects.get(pk=spread_id)\n except Spread.DoesNotExist:\n return spread_list(request)\n \n try:\n deck = Deck.objects.get(pk=deck_id)\n deck_name = deck.name\n except Deck.DoesNotExist:\n return deck_list(request)\n \n # Get all the positions in the spread and pull out the maximums for layout\n positions = CardPosition.objects.filter(spread=spread.id).order_by('index')\n num_positions = positions.count()\n max_x_coordinate = positions.aggregate(Max('x_coordinate'))['x_coordinate__max'] \n max_y_coordinate = positions.aggregate(Max('y_coordinate'))['y_coordinate__max'] \n \n # Get all of the layout information for the template to use later\n layout = calculate_layout(positions, max_x_coordinate, max_y_coordinate)\n \n # If we have a query string, try to display the saved reading \n if request.method == 'GET' and request.GET.get('cards') is not None:\n \n # Get the query string\n reading_string = request.GET.get('cards')\n try:\n # Try to parse out the saved reading encoding, and catch the exceptions.\n reading = load_saved_reading(reading_string, num_positions, deck.id)\n \n except (IndexError, TypeError, ValueError, Card.DoesNotExist):\n # Exceptions with custom messages are raised in the helper function,\n # then they are caught and their text is passed to the template for display\n return render_to_response('diyTarot/reading.html',\n {'error': 'Problem loading saved reading.',\n 'spread': spread,\n 'deck': deck })\n else : \n # Otherwise, create a random reading that is different every time the page is loaded.\n # Select all cards, filter by the chosen deck, put in random order and then \n # slice off the number of cards that appear in the spread\n random_cards = Card.objects.all().filter(deck=deck_id).order_by('?')[:num_positions]\n reversal_odds = [False, False, False, False, False, False, False, True, True, True]\n \n reading = []\n for card in random_cards:\n reading += [{'card': card,\n 'reversed': choice(reversal_odds)}]\n \n # Put together the card object, position object, layout coordinates for display in the template\n # and generate a save string for the thrown cards.\n card_list = []\n saved_card_list = []\n for (thrown_card, position, coordinates) in zip(reading, positions, layout['coordinates']):\n \n card_list += [(position, thrown_card, coordinates)]\n saved_card_list += [\"%d.%d\" % (thrown_card['card'].tarot_index, \n int(thrown_card['reversed']))]\n \n # Build the string to re-create this reading. \n save_string = (',').join(saved_card_list)\n \n # Lists for use in the navigation menu\n deck_list = Deck.objects.values('id', 'name').order_by('name')\n spread_list = Spread.objects.values('id', 'title').order_by('title')\n \n deck_options = {}\n # Check if there is a default deck stored in the current session\n if 'deck' in request.session:\n deck_options['session_deck_id'] = request.session['deck']\n else:\n deck_options['session_deck_id'] = '1'\n \n deck_options['session_deck_name'] = Deck.objects.get(id=deck_options['session_deck_id']).name\n deck_options['display_deck_id'] = deck_id\n deck_options['display_deck_name'] = deck_name\n \n context = {'spread': spread,\n 'card_list': card_list,\n 'save_string': save_string,\n 'layout': layout['sizes'],\n 'deck_options': deck_options,\n 'deck_list': deck_list,\n 'spread_list': spread_list,} \n \n return render_to_response('diyTarot/reading.html',\n context_instance=RequestContext(request, context))\n \ndef update_reading_settings(request, spread_id):\n \"\"\" This is the view that sets the persistent settings for readings: which deck to use\n and whether to enable card reversals. It is invoked when the user updates their reading\n settings. \"\"\"\n \n # Get the form data, see if it's valid, and if needed update\n # the session variables.\n try:\n deck_id = int(request.GET.get('deck', 1))\n except ValueError:\n deck_id = 1\n \n # Check if the deck is a valid deck in the system\n deck = Deck.objects.filter(id=deck_id)\n if deck.count() > 0:\n request.session['deck'] = deck_id\n\n # Then just invoke the reading view\n target = \"/diytarot/reading/%s/%s/\" % (spread_id, deck_id)\n return redirect(target)\n\ndef random_reading(request):\n \"\"\" This is a view to get you directly to a tarot reading without browsing through\n the various spreads. Then, you can change the reading settings via the sidebar. \"\"\"\n \n deck_id_list = Deck.objects.values_list('id')\n deck_id = int(choice(deck_id_list)[0])\n \n spread_id_list = Spread.objects.values_list('id')\n spread_id = int(choice(spread_id_list)[0])\n \n # Then just invoke the reading view\n target = \"/diytarot/reading/%s/%s/\" % (spread_id, deck_id)\n return redirect(target)\n\ndef two_cards_exercise(request):\n \"\"\" This is a view for the game/exercise Two Cards, a Question, and a Sentence, which\n chooses two random cards and a random question and asks the user to make a \n one-sentence interpretation. \"\"\"\n \n \n \n context = {'deck_options': None,\n 'deck_list': deck_list,\n 'card_list': card_list,} \n \n return render_to_response('diyTarot/reading.html',\n context_instance=RequestContext(request, context))\n \n\n\n ","repo_name":"blakecsutton/diyTarot","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21093,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"}
+{"seq_id":"70321462645","text":"#!/usr/bin/env python3\n# Read a TMP101 sensor\n# sudo apt install python3-smbus\n\nimport Adafruit_BBIO.GPIO as GPIO\nimport smbus\nimport time\n\n#Setup the array\nswitch = [\"GP1_3\", \"GP1_4\"]\nleds = [\"RED\", \"GREEN\"]\n\n#Button Event Handler\ndef updateLED(channel):\n state = GPIO.input(channel)\n GPIO.output(map[channel], state)\n print(\"%dF\" %(tMap[channel]))\n \n#Dictionary to hold button/led pairs\nmap = {}\ntemp = [0, 0]\ntMap = {}\n \n#For loop to set up the button/leds\nfor x in range (0, 2) :\n GPIO.setup(leds[x], GPIO.OUT)\n GPIO.setup(switch[x], GPIO.IN)\n GPIO.output(leds[x], 1)\n map[switch[x]] = leds[x]\n tMap[switch[x]] = temp[x]\n GPIO.add_event_detect(switch[x], GPIO.BOTH, callback=updateLED)\n\nbus = smbus.SMBus(1)\naddress = 0x48\naddress2 = 0x4a\nbus.write_byte_data(address, 3, 0x1c)\nbus.write_byte_data(address2, 3, 0x1a)\n\nwhile True:\n temp[0] = bus.read_byte_data(address, 0)\n temp[1] = bus.read_byte_data(address2, 0)\n \n tMap[switch[0]] = temp[0] * 9/5 + 32\n tMap[switch[1]] = temp[1] * 9/5 + 32\n time.sleep(0.25)\n ","repo_name":"hummcs7749/ece497","sub_path":"hw03/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"73594945524","text":"import random\n\nimport flask\n\napp = flask.Flask(__name__)\n\n\n# @app.route('/')\n# def index():\n# night = random.random() # Генератор случайных чисел от 0 до 1\n# return flask.render_template('index.html', night=night)\n\n\n@app.route('/')\ndef index():\n return flask.render_template('main.html')\n\n\n@app.route('/about')\ndef about_index():\n return flask.render_template('about.html')\n\n\n@app.route('/students')\ndef students_view():\n students = [\n \"Смирнов Хольгер Филиппович\",\n \"Демидович Налина Кирилловна\",\n \"Рыбакова Хитер Валерьевна\",\n \"Жуков Орион Святославович\"\n ]\n return flask.render_template('students.html', students=students)\n\n\n# Передача словаря в шалон\n@app.route('/roses')\ndef roses_view():\n # Ключ - Red\n # Списоок - [\"Freedom\", \"Forever young\", \"Explorer\"]\n roses = {\n \"Red\": [\"Freedom\", \"Forever young\", \"Explorer\"],\n \"White\": [\"Polar star\", \"Mondial\", \"Vendella\"],\n \"other\": [\"Engagement\", \"Topaz\", \"Miss Piggy\"]\n }\n return flask.render_template('roses.html', roses=roses)\n\n# Фильтры\n@app.route('/galaxies')\ndef galaxies_view():\n nearby_galaxies = {\n 1: {\"galaxy\": \"Карликовая галактика в Большом Псе\",\n \"distance_trillionkm\": 241248.627051,\n \"distance_ly\": 25500,\n \"description\": \"Галактика Местной группы, находящаяся в созвездии Большого Пса...\"},\n 2: {\"galaxy\": \"Большое Магелланово Облако\",\n \"distance_trillionkm\": 1542099.06703,\n \"distance_ly\": 163000,\n \"description\": \"Спутник Млечного Пути, расположенная на расстоянии около 163 тыс. св. лет...\"},\n 3: {\"galaxy\": \"Карликовая эллиптическая галактика в Стрельце\",\n \"distance_trillionkm\": 662251.133081,\n \"distance_ly\": 70000,\n \"description\": \"Эллиптическая галактика-спутник Млечного Пути. Проме обычного...\"}\n }\n return flask.render_template('galaxies.html', nearby_galaxies=nearby_galaxies)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"kuznetsi/flask_test","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"43048741840","text":"\"\"\"jobproject URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.urls import path\nfrom jobapp import views\nurlpatterns = [\n\n path('',views.home),\n\n path('login/',views.joblogin),\n path('jobpost/',views.post_job),\n path('register/',views.regis),\n path('success/',views.emailsuccess),\n path('verify/',views.emailverify),\n path('verify/',views.verify),\n path('error/',views.error),\n path('jobpro/',views.jobpro),\n path('edit_comp//',views.edit_comp),\n path('regcomp/',views.regcomp),\n path('userregister/',views.userregister),\n path('userlogin/',views.userlogin),\n path('jobshow/',views.jobshow),\n path('jobshow1/',views.jobshow1),\n path('applyjob/',views.apply_job),\n path('user_profile/',views.user_profile),\n path('view_profile/',views.view_profile),\n path('useredit/',views.user_edit)\n\n]\n","repo_name":"filmiya/jobproject","sub_path":"jobapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"32994452577","text":"\"\"\"Class that represents the interaction network of Stack overflow.\"\"\"\n\n# Imports\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n\nimport user as agent\nimport utils\n\n\nclass network:\n \"\"\"\n Framework of the model that represents the interaction network of Stack overflow.\n\n Attributes\n ----------\n new_users : int\n number of users added every timestep\n upvote_treshold : int\n minimum reputation to gain upvoting privilige\n upvote_bias : int\n number of upvotes a user is satisfied with\n distr : list\n contains the type and parameters of the distributions from which the probabilities are sampled\n tag_cdf : numpy.ndarray\n cummulative distribution function of the tags (communities)\n tags : list\n contains the ids of the users with a certain tag for all tags\n users : list\n contains all the users in the system\n questions : list\n all the questions ever asked during the simulation\n\n Methods\n -------\n determine_tag()\n Determine the tag of a user.\n create_user(i)\n Create a new user.\n step()\n Single timestep of the model.\n run(t)\n Execute the model for a certain number of timesteps.\n \"\"\"\n\n def __init__(self, n, tags, treshold=15, bias=12, distr=[[0.5, 0.25], [0.5, 0.25], [0.5, 0.25], [0.5, 0.25]]):\n \"\"\"\n Initialize an interaction network.\n\n Parameters\n ----------\n n : int\n number of users added every timestep\n tags : str\n .txt file containing probabilities of the different communities\n treshold : int\n minimum reputation to gain upvoting privilige, default is 15\n bias : int\n number of upvotes a user is satisfied with, default is 12\n distr : list (4 x 2)\n contains the mean and std (as a list) of the distributions from which the probabilities are sampled\n the first list is for p_ask followed by p_answer, p_interact and p_active\n default values are mean 0.5 and std 0.25 (normal distribution)\n for uniform distribution, set the mean to None\n for exponential distribution set the mean equal to the rate and the std to None\n \"\"\"\n self.new_users = n\n self.upvote_treshold = treshold\n self.upvote_bias = bias\n\n # Distributions for the interaction parameters of the users\n self.distr = distr\n\n # Calculate the cummulative distribution of the tags\n tag_pdf = np.loadtxt(tags, usecols=1)\n tag_pdf = tag_pdf / np.sum(tag_pdf)\n self.tag_cdf = utils.calc_cdf(tag_pdf)\n\n self.tags = [[] for _ in range(len(tag_pdf))]\n self.users = []\n self.questions = []\n\n def determine_tag(self):\n \"\"\"\n Determine the tag of a user.\n\n Returns\n -------\n tag : int\n tag of the user\n \"\"\"\n tag = 0\n u = np.random.uniform()\n while u > self.tag_cdf[tag]:\n tag += 1\n\n return tag\n\n def create_user(self, i):\n \"\"\"\n Create a new user.\n\n Parameters\n ----------\n i : int\n id of the user\n\n Returns\n -------\n new_user : .user\n new user\n \"\"\"\n # Tag\n tag = self.determine_tag()\n self.tags[tag].append(i)\n\n # User\n new_user = agent.user(self, i, tag)\n\n # Probabilities\n attributes = ['p_ask', 'p_answer', 'p_interact', 'p_active']\n for i, param in enumerate(self.distr):\n if param[0] is None:\n # Uniform distribution\n p = utils.draw_uniform()\n elif param[1] is None:\n # Exponential distribution\n p = utils.draw_exponential(param[0])\n else:\n # Normal distribution\n p = utils.draw_normal(param[0], param[1])\n\n setattr(new_user, attributes[i], p)\n setattr(new_user, attributes[i] + '_begin', p)\n\n return new_user\n\n def step(self):\n \"\"\"Single timestep of the model.\"\"\"\n # Add new users to the system\n for _ in range(self.new_users):\n user = self.create_user(len(self.users))\n self.users.append(user)\n\n # Iterate over users based on activity, most active users go first\n order = list(np.copy(self.users))\n order.sort(key=lambda x: x.p_active, reverse=True)\n for user in order:\n user.step()\n\n def run(self, t):\n \"\"\"\n Execute the model for a certain number of timesteps.\n\n Parameters\n ----------\n t : int\n number of timesteps\n \"\"\"\n for _ in range(t):\n self.step()\n\n def reset(self):\n \"\"\"Reset the system (does not change the parameter settings).\"\"\"\n self.tags = [[] for _ in range(len(self.tag_cdf))]\n self.users = []\n self.questions = []\n\n def get_upvote_distr(self, binsize):\n \"\"\"\n Get the distribution of upvotes given per user.\n\n Parameters\n ----------\n binsize : float\n length of one interval\n\n Returns\n -------\n pdf : numpy.ndarray\n probability density function of the number of upvotes\n bins : numpy.ndarray\n edges of the bins\n \"\"\"\n # Get the data on the upvotes\n upvotes = []\n for user in self.users:\n upvotes.append(user.n_questions_upvoted + user.n_answers_upvoted)\n\n bins = np.arange(binsize, max(upvotes) + binsize + 1, binsize)\n pdf = np.zeros(len(bins))\n for value in upvotes:\n pdf[(value // binsize)] += 1\n\n pdf /= np.sum(pdf)\n\n return pdf, bins\n\n def get_reputation_distr(self, binsize):\n \"\"\"\n Get the distribution of reputation.\n\n Parameters\n ----------\n binsize : float\n length of one interval\n\n Returns\n -------\n pdf : numpy.ndarray\n probability density function of the reputation\n bins : numpy.ndarray\n edges of the bins\n \"\"\"\n # Get the data on reputation\n reputation = []\n for user in self.users:\n reputation.append(user.reputation)\n\n bins = np.arange(binsize, max(reputation) + binsize + 1, binsize)\n pdf = np.zeros(len(bins))\n for value in reputation:\n pdf[(value // binsize)] += 1\n\n pdf /= np.sum(pdf)\n\n return pdf, bins\n\n def get_regression_coeff(self, data='upvotes', binsize=20):\n \"\"\"\n Calculate the linear regression coefficient of the distribution of upvotes or reputation.\n\n Parameters\n ----------\n data : str ('upvotes' or 'reputation')\n specifies for which distribution the coefficient should be calculated, default is upvotes\n binsize : float\n length of the interval used in calculating the pdf\n\n Returns\n -------\n coeff : float\n coefficient of the linear regression line\n \"\"\"\n if data == 'upvotes':\n pdf, bins = self.get_upvote_distr(binsize)\n else:\n pdf, bins = self.get_reputation_distr(binsize)\n\n # Calculate the log of the data\n pdf_log = []\n bins_log = []\n\n for ind, value in enumerate(pdf):\n if value != 0:\n bins_log.append(np.log10(bins[ind]))\n pdf_log.append(np.log10(value))\n\n lin_model = LinearRegression().fit(np.array(bins_log).reshape((-1, 1)), pdf_log)\n\n return (lin_model.coef_)[0]\n","repo_name":"AaronDC60/ABM_stackoverflow_network","sub_path":"code/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"19435373081","text":"from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Object\nimport ROOT\nimport os\nimport itertools\nfrom PhysicsTools.HeppyCore.utils.deltar import deltaR\n\n\nROOT.PyConfig.IgnoreCommandLineOptions = True\n\n_rootLeafType2rootBranchType = {\n 'UChar_t': 'b',\n 'Char_t': 'B',\n 'UInt_t': 'i',\n 'Int_t': 'I',\n 'Float_t': 'F',\n 'Double_t': 'D',\n 'ULong64_t': 'l',\n 'Long64_t': 'L',\n 'Bool_t': 'O'\n}\n\n\nclass TriggerAnalyzer(Module):\n def __init__(self, particlePdgId, triggerBits, branchNames, recoCollection, maxDR, maxRelDpt):\n self.particlePdgId = particlePdgId\n self.triggerBits = triggerBits\n self.recoCollectionName = recoCollection\n self.maxDR=maxDR\n self.branchNames=branchNames\n self.maxRelDpt=maxRelDpt\n pass\n\n def beginJob(self):\n pass\n\n def endJob(self):\n pass\n\n def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n self.out = wrappedOutputTree\n\n self.out.branch(\"%s_trgDR\"%(self.recoCollectionName),\n _rootLeafType2rootBranchType['Float_t'],\n lenVar=\"n%s\"%(self.recoCollectionName))\n\n self.out.branch(\"%s_trgRelDpt\"%(self.recoCollectionName),\n _rootLeafType2rootBranchType['Float_t'],\n lenVar=\"n%s\"%(self.recoCollectionName))\n\n for name in self.branchNames:\n self.out.branch(\"%s_%s\"%(self.recoCollectionName,name),\n _rootLeafType2rootBranchType['Bool_t'],\n lenVar=\"n%s\"%(self.recoCollectionName))\n pass\n \n\n def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n pass\n\n def filterBranchNames(self, branches, collection):\n out = []\n for br in branches:\n name = br.GetName()\n if not name.startswith(collection + '_'):\n continue\n out.append(name.replace(collection + '_', ''))\n self.branchType[out[-1]] = br.FindLeaf(br.GetName()).GetTypeName()\n return out\n\n\n def analyze(self, event):\n \"\"\"process event, return True (go to next module) or False (fail, go to next event)\"\"\"\n# print self.recoCollectionName\n reco_objs = Collection(event, self.recoCollectionName)\n trg_objs = Collection(event, \"TrigObj\")\n \n fired_trg_objs=[]\n fired_bits=[]\n for trg in trg_objs:\n if abs(trg.id) != self.particlePdgId:\n continue;\n bits=[ False for i in range(len(self.triggerBits)) ]\n for ibit,bit in enumerate(self.triggerBits):\n if trg.filterBits & (1< batch_size:\n sentlist = [sentences[i*batch_size:(i+1)*batch_size] for i in range(np.ceil(total/batch_size).astype(int))]\n result = []\n torch.cuda.empty_cache()\n \n for txts in sentlist:\n batch_dat = get_sample(txts)\n batch_input = torch.LongTensor(batch_dat).to(config.device)\n ret = translate(batch_input, model, use_beam=beam_search)\n #print('translate:\\n', '\\n'.join(ret))\n result.extend(ret)\n torch.cuda.empty_cache()\n return result\n\ndef readtxt(fname, encoding='utf-8'):\n try:\n with open(fname, 'r', encoding=encoding) as f: \n data = f.read()\n return data\n except Exception as e:\n return ''\n\ndef savetofile(txt, filename, encoding='utf-8', method='a+'):\n try:\n with open(filename, method, encoding=encoding) as f: \n f.write(str(txt)+ '\\n')\n return 1\n except :\n return 0\n\n\ndef GPU_memory(gpuid=0):\n NUM_EXPAND = 1024 * 1024\n handle = nvmlDeviceGetHandleByIndex(gpuid)\n info = nvmlDeviceGetMemoryInfo(handle)\n\n gpu_memory_used = info.used / NUM_EXPAND\n #print('Total Memory:%d MB, Used Memory:%d MB'% (gpu_memory_total, gpu_memory_used))\n return gpu_memory_used\n\nclass GPU_MEM():\n def __init__(self, gupid=0, interval=1):\n self.gupid = gupid\n self.interval = interval\n self.status = 0\n self.data = []\n self.queue = mp.Queue()\n self.process = None\n\n def get_gpu_memory(self):\n while True:\n mem = GPU_memory(self.gupid)\n #print('memory:', mem)\n self.queue.put(mem)\n if self.interval>0:\n time.sleep(self.interval)\n else:\n break;\n \n def build(self):\n pass\n self.data = []\n self.process = mp.Process(target=self.get_gpu_memory)\n\n def start(self, interval=1):\n self.interval = interval\n if not self.process is None:\n self.process.start()\n #self.process.join()\n \n def stop(self):\n self.interval = 0\n #self.process.stop()\n self.process.terminate()\n self.data = self.get_queue()\n\n def mem_ave(self):\n if self.data == []:\n ret = 0\n else:\n ret = np.average(self.data)\n \n return ret\n \n def mem_max(self):\n if self.data == []:\n ret = 0\n else:\n ret = np.max(self.data)\n \n return ret\n\n def get_queue(self):\n ret = []\n while not self.queue.empty():\n dat = self.queue.get()\n ret.append(dat)\n return ret\n\nif __name__ == \"__main__\":\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n import warnings\n warnings.filterwarnings('ignore')\n #translate_example()\n online_translate()\n\n\n","repo_name":"cfl2005/ECTransformer","sub_path":"translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":6296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"23840299051","text":"# Submission Result: Accepted 100% :D\nclass Solution(object):\n def hammingDistance(self, x, y):\n \"\"\"\n :type x: int\n :type y: int\n :rtype: int\n \"\"\"\n n = x ^ y\n s = bin(n)\n count = 0\n for s1 in s:\n if s1 == '1':\n count += 1\n return count\n\n","repo_name":"wzbbbb/LeetCode-OJ","sub_path":"Hamming_Distance.py","file_name":"Hamming_Distance.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"18685850245","text":"import numpy as np\nimport scipy.spatial.distance as scidist\nfrom keras.models import Model\nfrom common.Enums import DistanceMetrics\n\n\ndef computeFeatureWiseMetric(consumer_batch, shop_features, metric):\n assert consumer_batch.shape[1] == shop_features.shape[1], \"Consumer batch and shop features must have same feature dimensin\"\n consumer_count = consumer_batch.shape[0]\n shop_count = shop_features.shape[0]\n consumer_batch = np.expand_dims(consumer_batch, axis=1)\n consumer_batch = np.tile(consumer_batch, (1, shop_count, 1))\n shop_features = np.expand_dims(shop_features, axis = 0)\n shop_features = np.tile(shop_features, (consumer_count, 1, 1))\n\n diff = consumer_batch - shop_features\n\n if metric == DistanceMetrics.L1:\n return np.abs(diff)\n elif metric == DistanceMetrics.L2:\n return np.square(diff)\n else:\n raise Exception(\"Invalid metric\")\n\ndef computeDistances(consumer_features, shop_features, metric=DistanceMetrics.L1, model = None, batchSize = 100):\n assert isinstance(consumer_features, np.ndarray), 'Consumer features must be an numpy array of size n * d'\n assert isinstance(shop_features, np.ndarray), 'Shop features must be a numpy array of size m * d'\n assert consumer_features.shape[1] == shop_features.shape[1], 'Consumer and shop features must have same dimension'\n\n if model is not None:\n print(\"Computing Trained Model based distance metric\")\n assert isinstance(model, Model), \"model must be a keras model\"\n result = np.array([]).reshape((-1, shop_features.shape[0]))\n num_batches = consumer_features.shape[0] // batchSize + 1\n batch_iter = 1\n for start in range(0, consumer_features.shape[0], batchSize):\n last_index = min(consumer_features.shape[0], start + batchSize)\n\n consumer_batch = consumer_features[start: last_index]\n feature_wise_metric = computeFeatureWiseMetric(consumer_batch, shop_features, metric)\n feature_wise_metric = feature_wise_metric.reshape((-1, feature_wise_metric.shape[2]))\n\n similarity = model.predict(feature_wise_metric)\n # We multiply by negative 1 since higher scores means they are more similar, aka negative of distance.\n similarity = -1 * similarity.reshape((consumer_batch.shape[0], -1))\n\n print(\"Finished batch {} of {}\".format(batch_iter, num_batches))\n batch_iter +=1\n result = np.concatenate((result, similarity))\n\n return result\n\n else:\n print(\"Computing Vanilla Distance Metric\")\n if(metric == DistanceMetrics.L1):\n metric_string = 'cityblock'\n elif(metric == DistanceMetrics.L2):\n metric_string = 'euclidean'\n elif (metric == DistanceMetrics.Cosine):\n metric_string = 'cosine'\n else:\n raise Exception(\"Invalid Distance Metric\")\n return scidist.cdist(consumer_features, shop_features, metric=metric_string)","repo_name":"wutenghu/CS231nFinalProject","sub_path":"common/helpers/computeDistances.py","file_name":"computeDistances.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"}
+{"seq_id":"25474223956","text":"import io,os\nimport avro.schema\nimport avro.io\n\nschema = avro.schema.Parse(open(\"SimpleClass.avsc\", \"rb\").read())\n\nfd = os.open('SimpleClass.avro', os.O_RDONLY)\nBUFSIZE = 2**32-1\nbyte_data = os.read(fd, BUFSIZE)\nos.close(fd)\n\nbytes_reader = io.BytesIO(byte_data)\ndecoder = avro.io.BinaryDecoder(bytes_reader)\nreader = avro.io.DatumReader(schema)\n\n# How do I know how many records are encoded?\ndata = reader.read(decoder)\nprint(data)\ndata = reader.read(decoder)\nprint(data)\n\n","repo_name":"IRISMeister/IRIS-PEX-MQTT-dotnet","sub_path":"datavol/share/SimpleClass-decoder.py","file_name":"SimpleClass-decoder.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"22950315380","text":"#!/usr/bin/python3\n\nclass Node:\n \"\"\"class that define a node of a singly linked list\"\"\"\n\n def __init__(self, data, next_node=None):\n \"\"\"initialize a node with data and nex_node\"\"\"\n self.data = data\n self.next_node = next_node\n \n \n @property\n def data(self):\n \"\"\"\"return the data of the node\"\"\"\n return self.__data\n\n @data.setter\n def data(self, value):\n \"\"\"sets the data of th node\"\"\"\n if type(value) is not int:\n raise TypeError(\"data must be an integer\")\n else:\n self.__data = value\n \n @property\n def next_node(self):\n \"\"\"return the next_node of the node\"\"\"\n return self.__next_node\n\n @next_node.setter\n def next_node(self, value):\n \"\"\"sets the nex_nod of the nod\"\"\"\n if value is not None and type(value) is not Node:\n raise TypeError(\"next_node must be a Node object\")\n else:\n self.__next_node = value\n\nclass SinglyLinkedList:\n \"\"\"class that define a singly linked list\"\"\"\n\n def __init__(self):\n \"\"\"initiate a singly linked list with a header attribute\"\"\"\n self.head = None\n\n def __str__(self):\n \"\"\"return a string representing a single linked list\"\"\"\n s = \"\"\n node = self.head\n while node:\n s += str(node.data)\n node = node.next_node\n if node:\n s += \"\\n\"\n return s\n\n def sorted_insert(self, value):\n \"\"\"insert a new node into correct sorted of the list\"\"\"\n new_node = Node(value)\n if self.head is None:\n self.head = new_node\n else:\n node = self.head\n prev = None\n while node and value > node.data:\n prev = node\n node = node.next_node\n if prev is None:\n new_node.next_node = self.head\n self.head = new_node\n else:\n new_node.next_node = node\n prev.next_node = new_node\n","repo_name":"GodwinCyber/alx-higher_level_programming","sub_path":"0x06-python-classes/100-singly_linked_list.py","file_name":"100-singly_linked_list.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"25982257416","text":"\"\"\"\nProgram: recursiveRectangles.py\nAuthor: Chad Lister\nDate: 01/03/2021\n\nThis program draws 3 rectangles using a 1/3 and 2/3 formula given in the assignment.\n\n1) Computation is:\n\n fill background with random color\n using recursion\n fill first 1/3 with random color\n fill bottom 2/3 with random color\n\n2) Output is:\n\n a canvas with 3 rectangles of random colors\n\n\"\"\"\n\nfrom turtle import Turtle\nimport random\n\n# Random color list since rgb doesn't work.\ncolor = [\"black\", \"red\", \"green\", \"blue\", \"yellow\", \"gray\", \"white\"]\n\ndef fillRectangle(newTurtle, height, width, x, y):\n \"\"\" Draws and fills a rectangle. \"\"\"\n\n newTurtle.up()\n newTurtle.goto(x, y)\n newTurtle.down()\n newTurtle.begin_fill()\n newTurtle.forward(width)\n newTurtle.right(90)\n newTurtle.forward(height)\n newTurtle.right(90)\n newTurtle.forward(width)\n newTurtle.right(90)\n newTurtle.forward(height)\n newTurtle.right(90)\n newTurtle.end_fill()\n\n return\n\ndef main():\n \"\"\" The main function. \"\"\"\n\n # define parameters.\n newTurtle = Turtle()\n newTurtle.speed(30)\n newTurtle.hideturtle()\n\n #newTurtle.fillcolor(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n height = newTurtle.screen.window_height()\n width = newTurtle.screen.window_width()\n x = -360\n y = 360\n\n # initial background..\n newTurtle.fillcolor(color[random.randint(0, 6)])\n fillRectangle(newTurtle, height, width, x, y)\n \n x = -136\n y = 360\n width = (width / 3 + 2) * 2\n c = 1\n\n for c in range(2):\n \n newTurtle.fillcolor(color[random.randint(0, 6)])\n fillRectangle(newTurtle, height, width, x, y)\n x = -360\n y = 136\n height = (height / 3 + 2) * 2\n width = newTurtle.screen.window_width()\n\n return\n\nmain()\n","repo_name":"Chad-Lister/ITEC_3001_Python_Programming","sub_path":"Chapter # 7 Projects # 3, 4, 9, 10/Chapter # 7 Project # 4/Chapter # 7 Project # 4.py","file_name":"Chapter # 7 Project # 4.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"3628553790","text":"\"\"\"\nIrondomo Protocol client example. Uses the IDPClient API to hide all IDP aspects\nAuthor: Matteo Ferrabone \n\"\"\"\nimport os\nimport sys\nimport zmq.auth\nfrom IronDomo import IDPClient\n\ndef main():\n verbose = '-v' in sys.argv\n base_dir = os.path.dirname(__file__)\n keys_dir = os.path.join(base_dir, 'certificates')\n public_keys_dir = os.path.join(base_dir, 'public_keys')\n secret_keys_dir = os.path.join(base_dir, 'private_keys')\n client_secret_file = os.path.join(secret_keys_dir, \"client.key_secret\")\n client_public, client_secret = zmq.auth.load_certificate(client_secret_file)\n server_public_file = os.path.join(public_keys_dir, \"server.key\")\n print('Server Secret File: {0}'.format(server_public_file))\n server_public, dummy = zmq.auth.load_certificate(server_public_file)\n\n print('Server Key: {0}'.format(server_public))\n\n client = IDPClient.IronDomoClient(\"tcp://127.0.0.1:6556\", verbose, ('P+S690P{iVPfx {0}\".format(count)\n try:\n reply = client.send(b\"echo\", request)#.encode())\n print('Message: {0}'.format(count))\n except KeyboardInterrupt:\n break\n else:\n # also break on failure to reply:\n if reply is None:\n break\n count += 1\n client.close()\n print(\"%i requests/replies processed\" % count)\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"waterviewsrl/IronDomo","sub_path":"examples/python/client/client_curve.py","file_name":"client_curve.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"}
+{"seq_id":"35090844759","text":"#https://school.programmers.co.kr/learn/courses/30/lessons/42579\n#list를 돌면서 곡의 종류와 그 종류에 해당하는 인덱스 그리고 총합까지 알아야했다.\n#그래서 dict 두개로 분리하여 하나는 총합 하나는 인덱스와 곡수를 기록시킨후에\n#sort 조건들을 맞춰서 answer에 넣어주었다. 이 때 길이가 1일 수도 있어서 테케 예외처리로 1일때를 추가해줘야 만점이 나옵니다.\ndef solution(genres, plays):\n dic = {}\n dic1 = {}\n answer = []\n for i in range(len(genres)):\n dic[genres[i]] = dic.get(genres[i], []) + [[plays[i],i]]\n for j in range(len(genres)):\n dic1[genres[j]] = dic1.get(genres[j],0) + plays[j]\n dic1 = sorted(dic1.items(), key = lambda item: item[1], reverse = True)\n for i in dic1:\n temp = dic[i[0]]\n temp.sort(key=lambda x:x[0], reverse=True)\n if len(temp) ==1:\n for j in range(1):\n answer.append(temp[j][1])\n else:\n for j in range(2):\n answer.append(temp[j][1])\n return answer","repo_name":"EagerProgrammer/CodingTest-Python-","sub_path":"0519.py","file_name":"0519.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"2575935207","text":"import unittest\nfrom stack import Stack\n\nclass TestStack(unittest.TestCase):\n \"\"\"\n Tests for stack.py\n \"\"\"\n def setUp(self):\n self.stack = Stack()\n\n def test_stack_with_one_item(self):\n self.stack.push(1)\n out = self.stack.pop()\n self.assertTrue(out == 1)\n\n def test_stack_with_multiple_items(self):\n self.stack.push(1)\n self.stack.push(2)\n self.stack.push(3)\n out = self.stack.pop()\n self.assertTrue(out == 3)\n out = self.stack.pop()\n self.assertTrue(out == 2)\n out = self.stack.pop()\n self.assertTrue(out == 1)\n\n def test_stack_with_empty_stack(self):\n out = self.stack.pop()\n\n def test_stack_peek(self):\n self.stack.push(1)\n self.assertTrue(self.stack.peek() == 1)\n # Then peek again to check the item is still on the stack\n self.assertTrue(self.stack.peek() == 1)\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestStack)\nunittest.TextTestRunner(verbosity=2).run(suite)","repo_name":"jamesharrop/learning-python","sub_path":"Stack/test_stack.py","file_name":"test_stack.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"9353801842","text":"import math\nimport pandas as pd\nimport numpy as np\nimport csv\nimport os\nimport peakutils\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport sys\nfrom scipy.signal import find_peaks, peak_prominences, filtfilt, butter\nfrom matplotlib.backend_bases import MouseButton\n\n\n\"\"\"Reads in all csv files in folder and creates an array of pandas dataframes\n returns array of dfs\"\"\"\ndef read_csvs():\n cellData = []\n # when we have a folder of, files, read in from directory path\n filename = \"realResults.csv\" #file outputted by ROI manager\n path = \"plugins/CalciumSignal/pythonscript/cell_data/\"\n df = pd.read_csv((path + filename))\n df = df.filter(regex=\"Mean\")\n df = df.dropna(axis=\"columns\") ##eliminate columns with NaN values\n return df\n\n\n\n\"\"\"Stores all relevant graph data to a csv for the ImageJ plugin to use\"\"\"\ndef write_csv(df):\n # when we have a folder of, files, read in from directory path\n path = \"plugins/CalciumSignal/pythonscript/cell_data/\"\n df.to_csv(os.path.join(path, \"graph_data.csv\"))\n return\n\n\n\n\"\"\" Stores peak locations at the correct frame # in dataframe.\nIf there is a peak, value will be 1, if no peak detected, value is -1.\"\"\"\ndef writePeaksToDf(peakIndx,df, cellnum):\n peaks = [-1] * len(df)\n colName = \"Cell\" + str(cellnum+1) + \"_Peaks\"\n for peakFrame in peakIndx:\n peaks[peakFrame] = 1\n newDf = df.copy()\n newDf[colName] = peaks\n return newDf\n\n\n\n\"\"\"Finds first rough baseline from data\n Looks at all elements below the average and averages them\n returns the base\"\"\"\ndef findBaseline(avg, intensities, cellDf):\n for elem in intensities:\n if elem > avg:\n intensities.remove(elem)\n base = sum(intensities) / len(intensities)\n cellDf['baseline'] = base\n return (base)\n\n\n\n\"\"\"Creates a new df column with normalized data\"\"\"\n\"\"\"TODO: maybe later changed so can use findBaseline function instead\"\"\"\n\"\"\"For now, only use when passing in normalized data\"\"\"\ndef findNormalizedBase(ndata, df):\n #ndata -> normalized data\n average = ndata.mean()\n df[\"ndata\"] = ndata\n baselineArray = df[\"ndata\"].values.tolist()\n for elem in baselineArray:\n if elem > average:\n baselineArray.remove(elem)\n newBase = sum(baselineArray) / len(baselineArray)\n df['normalbaseline'] = newBase\n\n\n\n\"\"\"ONLY USE WHEN NOT SMOOTHING THE DATA\"\"\"\n\"\"\"Normalizes the baseline for the original data\"\"\"\ndef normalizeData(base1, df, cellMean):\n y = df[cellMean] #list of intensities\n base2 = peakutils.baseline(y, math.floor(base1))\n normalizedData = y - base2\n findNormalizedBase(normalizedData, df) #new normbaseline column created\n return base2\n\n\n\"\"\"Smooths data points so signal is more clean\nCalls findNormalizedBase to find baseline of smoothed data\n\"\"\"\ndef smoothDataPoints(normalBase, df, cellMean):\n data = df[cellMean].values.tolist()\n c, d = butter(3, 0.1, 'lowpass') #.3 for less smoothed data\n filteredLowPass = filtfilt(c, d, data)\n newbase = peakutils.baseline(filteredLowPass, math.floor(normalBase))\n findNormalizedBase(filteredLowPass-newbase,df)\n return filteredLowPass, newbase\n\n\n\n\"\"\"\nThis function is for testing only\n\"\"\"\ndef plotPeakCellData(x,y,df):\n plt.figure()\n plt.xlabel(\"Video Frame (#)\")\n plt.ylabel(\"Normalized Calcium Intensity\")\n plt.title(\"Calcium Intensity Over Time; Normalized and Smoothed Data with Peaks\")\n plt.plot(y)\n plt.plot(x,y[x],\"x\")\n plt.plot(df[\"normalbaseline\"],color='red',label=\"baseline\")\n\n\n\"\"\"\nPlots the calcium signaling data on a graph\n\"\"\"\ndef plotOriginalCellData(y, figure):\n plt.title(\"Original Calcium Intensity Over Time\")\n plt.xlabel(\"Video Frame (#)\")\n plt.ylabel(\"Calcium Intensity\")\n figure.gca().plot(y)\n\n\n\n\"\"\"\nFunction matches the peak detected on the smoothed graph to the correct frame in original data\n\"\"\"\ndef matchRefinedPeakToActualPeak(peaks, originalData):\n # since data was smoothed when peaks were detected, look for highest point around frame\n # where peak was detected in the original data based on an error deviation\n peakIndices = []\n for peak in peaks:\n highPointIndex = peak\n for value in range(peak - 30, peak + 30):\n if originalData[value] > originalData[highPointIndex]:\n highPointIndex = value\n peakIndices.append(highPointIndex)\n return peakIndices\n\n##GLOBAL VARIABLES###\ncellData = read_csvs()\ncellID = 0\nfig = plt.figure()\nmax = len(cellData.columns)\n##GLOBAL VARIABLES###\n\n\n\n\"\"\"\nPlots peaks on a graph with the original data (peaks are marked with \"x\")\n\"\"\"\ndef plotPeaksOnOriginalData(peaks,data,cellnum,figure):\n plt.title(\"Original Calcium Intensity Over Time with Peaks\")\n plt.xlabel(\"Video Frame (#)\")\n plt.ylabel(\"Calcium Intensity\")\n\n for idx in peaks:\n figure.gca().plot(idx, data[idx],\"x\")\n\n\n\n\"\"\"\nFunction rechecks peak columns to figure out where the peaks are and replots them on the current figure.\nMainly used after user_addPeak or user_removePeak to replot and properly display figure with the new additions or new\nremovals\n\"\"\"\ndef replot_cell(figure):\n # print(\"HERE\")\n figure.canvas.manager.set_window_title(\"Cell %d\" %(cellID + 1))\n peakCol = \"Cell\" + str(cellID + 1) + \"_Peaks\"\n dataCol = \"Mean\" + str(cellID + 1)\n\n plotOriginalCellData(cellData[dataCol].values.tolist(), figure)\n if peakCol in cellData.columns:\n for i in range(0,len(cellData[peakCol])):\n if cellData[peakCol][i] == 1: #1 signifies there is a peak, -1 means there is no peak\n # print(\"peak\")\n figure.gca().plot(i,cellData[dataCol][i],marker=\"x\",color=\"red\")\n\n\n\n\"\"\"\nFunction does main calculations for cell data. Calls other functions to determine baseline,\nget smoothed/refined data values, and determine peaks. Once it's finished, it saves the\npeaks to the dataframe.\n\"\"\"\ndef cell_calculations():\n global cellID\n global cellData\n\n # we're really starting from Cell 0 because of indices. but it's easier for the client to start from 1\n #figure.canvas.manager.set_window_title(\"Cell %d\" %(cellID + 1))\n # figure.canvas.toolbar.pack_forget()\n cell = cellData.columns[cellID]\n videoFrames = len(cellData)\n average = cellData[cell].mean()\n originalIntensities = cellData[cell].values.tolist()\n # find baseline\n firstBaseline = findBaseline(average, list(originalIntensities), cellData)\n # normalize Data - don't need to use for now\n # normalBase = normalizeData(firstBaseline, cell, cellMean)\n smoothedData, smoothedBase = smoothDataPoints(firstBaseline,cellData,cell)\n # plot graph\n refinedData = smoothedData - smoothedBase\n\n peaks, properties = find_peaks(refinedData, prominence=(5))\n #plotOriginalCellData(originalIntensities, figure)\n #plotPeakCellData(peaks,refinedData,cell)\n peakIndices = matchRefinedPeakToActualPeak(peaks,originalIntensities)\n #plotPeaksOnOriginalData(peakIndices,originalIntensities,cellID,figure)\n cellData = writePeaksToDf(peakIndices,cellData,cellID)\n #return cellData\n\n\n\n\n# key event listener for switching between cell graphs\ndef on_press(event):\n global cellData\n global cellID\n global fig\n global max\n\n # right arrow key to advance, left to go back (WASD scheme used as a backup)\n # graphs should wrap if you go past the last cell or before the first one -- hence, \"carousel view\"\n if event.key in ['right', 'left', 'd', 'a']:\n if event.key == 'right' or event.key == 'd':\n cellID += 1\n if cellID >= max:\n cellID = 0\n if event.key == 'left' or event.key == 'a':\n if cellID > 0:\n cellID -= 1\n elif cellID <= 0:\n cellID = max - 1\n\n fig.clear()\n event.canvas.figure.clear()\n replot_cell(event.canvas.figure)\n event.canvas.draw()\n\n\n\n\"\"\"\nFunction calls the proper add/remove peak function depending on type of mouse click\n\"\"\"\ndef on_click(event):\n # print(\"on_click\")\n if event.button is MouseButton.LEFT:\n # print(\"LEFT\") # normal click\n # call add peak function\n user_addPeak(event)\n elif event.button is MouseButton.RIGHT:\n # print(\"Right\") # right click - remove\n # call remove peak function\n user_removePeak(event)\n\n\n\n\"\"\"\nFunction removes peak from the graph\n -registers x,y coordinate of mouse click\n -determines closest peak in df (based on frame range) to mouse click\n -removes this point from df (make it -1)\n -replot any peaks\n\"\"\"\ndef user_removePeak(event):\n global cellData\n global fig\n\n # print(\"remove peak from graph function\")\n peakCol = \"Cell\" + str(cellID + 1) + \"_Peaks\"\n dataCol = \"Mean\" + str(cellID + 1)\n\n if event.inaxes: # checks to see if user clicked on the plotted graph\n ax = event.inaxes # the axes instance\n x = int(event.xdata)\n y = int(event.ydata)\n # print('data coords %f %f' % (x, y))\n\n #finding the closest already defined peak (if there is any) to the mouseclick so we can remove it\n removeIdx = x\n diff = x\n for data in range(x - 10, x + 10): # original was 30\n try:\n if cellData[peakCol][data] == 1:\n if abs(cellData[dataCol][data] - cellData[dataCol][removeIdx]) < diff:\n removeIdx = data\n diff = abs(cellData[dataCol][data] - cellData[dataCol][removeIdx])\n except:\n continue # ignore indexes that are out of range\n\n #cellData[peakCol][removeIdx] = -1\n cellData.loc[removeIdx,peakCol] = -1\n\n fig.clear()\n event.canvas.figure.clear()\n replot_cell(event.canvas.figure)\n\n event.canvas.draw()\n\n # print(\"DONE\")\n plt.show()\n\n\n\n\"\"\"\nFunction adds peak to the graph\n -registers x,y coordinate of mouse click\n -determines relatively highest y value in df (based on frame range) to mouse click\n -adds this point as peak (make it 1)\n -replot any peaks\n\"\"\"\ndef user_addPeak(event):\n global cellData\n global fig\n\n # print(\"add peak to graph function\")\n peakCol = \"Cell\" + str(cellID + 1) + \"_Peaks\"\n dataCol = \"Mean\" + str(cellID + 1)\n\n if event.inaxes: # checks to see if user clicked on the plotted graph\n ax = event.inaxes # the axes instance\n x = int(event.xdata)\n y = int(event.ydata)\n # print('data coords %f %f' % (x, y))\n\n maxValIdx = x\n for data in range(x - 10, x + 10): # original was 30\n try:\n if cellData[dataCol][data] > cellData[dataCol][maxValIdx]:\n maxValIdx = data\n except:\n continue # ignore indexes that are out of range\n\n cellData.loc[maxValIdx,peakCol] = 1\n # print(cellData.loc[maxValIdx, peakCol])\n # print(\"x: \" + str(maxValIdx))\n # print(\"y: \" + str(cellData[dataCol][maxValIdx]))\n\n fig.clear()\n event.canvas.figure.clear()\n replot_cell(event.canvas.figure)\n event.canvas.draw()\n\n # print(\"DONE\")\n plt.show()\n\n\n\ndef main():\n # uncomment below line for debugging only (and be sure to close stdout at the end)\n # this redirects print() output to output.txt, which you will find in the Fiji.app directory after program finishes\n # sys.stdout = open('output.txt', 'w')\n\n # sorry about the globals. it's for a good cause, I promise.\n global cellData\n global cellID\n global fig\n\n fig.canvas.mpl_connect('key_press_event', on_press)\n fig.canvas.mpl_connect('button_press_event', on_click)\n\n numColumns = len(cellData.columns)\n for col in range(0,numColumns):\n cellID = col\n cell_calculations()\n\n cellID = 0\n write_csv(cellData)\n path = \"plugins/CalciumSignal/pythonscript/cell_data/\"\n cellData = pd.read_csv((path + \"graph_data.csv\"))\n\n #plot cells and open carousel view for user to click through\n replot_cell(fig)\n plt.show()\n\n # write to csv at the end (after window is closed)!\n write_csv(cellData)\n\n # uncomment below for debugging only (also see output.txt at the start of main)\n # sys.stdout.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Eoldham/CISC498Project-Group17","sub_path":"release/peakscript.py","file_name":"peakscript.py","file_ext":"py","file_size_in_byte":12271,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"}
+{"seq_id":"39563526664","text":"import random\nimport cv2\nimport ctypes\nfrom PIL import Image\nimport numpy as np\nimport re\nimport math\n\nSET_WIDTH = 2560\nSET_HEIGHT = 1440\n\n\ndef preprocess(img):\n img = img.convert(\"RGB\")\n\n # Convert the image to a NumPy array\n img_np = np.array(img)\n\n # Define the color codes to filter for\n color_codes = [\n (255, 255, 255),\n ]\n\n # Define the color similarity threshold\n threshold = 60 # Adjust this value to control the leeway in color matching\n\n # Filter for colors in the screenshot using NumPy operations\n filtered_mask = np.zeros_like(img_np[:, :, 0], dtype=bool)\n for color_code in color_codes:\n color_code_np = np.array(color_code)\n color_difference = np.sum(np.abs(img_np[:, :, :3] - color_code_np), axis=2)\n matches = color_difference <= threshold\n filtered_mask |= matches\n\n # Expand the filtered mask to have the same shape as the screenshot image array\n filtered_mask_expanded = np.expand_dims(filtered_mask, axis=2)\n filtered_mask_expanded = np.repeat(filtered_mask_expanded, 3, axis=2)\n\n # Create a filtered image using the filtered mask\n filtered_image_np = np.where(filtered_mask_expanded, img_np, 0)\n thres_img = cv2.threshold(filtered_image_np, 127, 255, cv2.THRESH_BINARY_INV)[1]\n\n return Image.fromarray(thres_img)\n\n\ndef get_left_top_width_height(pos):\n \"\"\"\n Returns a tuple of four values representing the left, top, width, and height of a rectangle\n defined by the two corners of a rectangle given as the argument 'pos'.\n\n Args:\n pos (tuple): A tuple of two tuples, representing the top-left and bottom-right corners of a rectangle.\n\n Returns:\n tuple: A tuple of four integers representing the left, top, width, and height of the rectangle.\n\n Example:\n If pos = ((10, 20), (50, 80)), the function returns (10, 20, 40, 60), which represents a rectangle\n with top-left corner at (10, 20), width 40, and height 60.\n \"\"\"\n l_x = pos[0][0]\n r_x = pos[1][0]\n\n l_y = pos[0][1]\n r_y = pos[1][1]\n\n return (l_x, l_y, r_x - l_x, r_y - l_y)\n\n\ndef get_monitor_resolution():\n \"\"\"\n Returns the resolution of the primary monitor in pixels as a tuple (width, height).\n This function uses the `GetSystemMetrics()` function from the Windows user32.dll library,\n so it is only compatible with Windows operating systems.\n\n Returns:\n - tuple: A tuple of integers representing the width and height of the primary monitor's resolution.\n \"\"\"\n user32 = ctypes.windll.user32\n return user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)\n\n\ndef get_scalars():\n \"\"\"\n Returns the scaling factors to fit the desired width and height\n (SET_WIDTH and SET_HEIGHT) into the user's monitor resolution.\n\n Returns:\n - w_scalar (float): The scaling factor for the width dimension.\n - h_scalar (float): The scaling factor for the height dimension.\n \"\"\"\n u_width, u_height = get_monitor_resolution()\n\n w_scalar = SET_WIDTH / u_width\n h_scalar = SET_HEIGHT / u_height\n\n if u_width < SET_WIDTH:\n w_scalar = u_width / SET_WIDTH\n\n if u_height < SET_HEIGHT:\n h_scalar = u_height / SET_HEIGHT\n\n return w_scalar, h_scalar\n\n\ndef get_scaled_position(x, y):\n \"\"\"\n Takes in the x and y coordinates and returns their scaled position on the user's screen, based on the user's monitor resolution and the desired width and height set in SET_WIDTH and SET_HEIGHT constants.\n\n Args:\n x (int): The x-coordinate of the position to be scaled.\n y (int): The y-coordinate of the position to be scaled.\n\n Returns:\n A tuple (x, y) containing the scaled position of the input coordinates on the user's screen.\n \"\"\"\n w_scalar, h_scalar = get_scalars()\n\n x *= w_scalar\n y *= h_scalar\n\n return (x, y)\n\n\ndef get_scaled_pos(pos):\n \"\"\"\n Scales the position of a rectangular region by the user's monitor resolution to ensure\n that the screenshot is taken with the same aspect ratio regardless of the user's screen resolution.\n\n Args:\n pos (tuple): A tuple of four coordinates (x1, y1, x2, y2) defining a rectangular region.\n\n Returns:\n list: A list of two tuples containing the scaled coordinates of the top-left and bottom-right corners\n of the rectangular region.\n \"\"\"\n scaled_pos = []\n\n for corner in pos:\n corner_pos = get_scaled_position(*corner)\n scaled_pos.append(corner_pos)\n\n return scaled_pos\n\n\ndef clean_string(str):\n \"\"\"\n Cleans a given string by removing all non-alphanumeric characters except for spaces and periods.\n\n Args:\n str (str): The string to be cleaned.\n\n Returns:\n str: The cleaned string.\n\n Example:\n >>> clean_string(\"Hi! This is a string with (a lot) of [punctuation] and $ymbols.\")\n 'Hi This is a string with a lot of punctuation and yymbols'\n \"\"\"\n res = re.findall(r\"([\\d\\w\\s\\.]+)\\W\", str)\n return res[0].strip(\"\\n\") if len(res) else \"\"\n\n\ndef get_pos_in_area(area):\n \"\"\"\n Generate a random position within the given area.\n\n Args:\n area: A tuple containing two tuples representing the top-left and bottom-right corners\n of the area, respectively. Each corner is represented by a tuple of (x, y) coordinates.\n\n Returns:\n A tuple containing the x and y coordinates of the randomly generated position within the area.\n \"\"\"\n x = random.randint(area[0][0], area[1][0])\n y = random.randint(area[0][1], area[1][1])\n return x, y\n\n\ndef get_centre_pos_from_box(box):\n \"\"\"Calculate the center position of a bounding box object.\n\n Args:\n box (pyautogui.Box): A bounding box object.\n\n Returns:\n tuple: A tuple of the x and y coordinates of the center of the box.\n \"\"\"\n return (box.left + box.width // 2, box.top + box.height // 2)\n\n\ndef get_centre_pos(left, top, width, height):\n \"\"\"Calculate the center position of a rectangle given its coordinates.\n\n Args:\n left (int): The x-coordinate of the top-left corner of the rectangle.\n top (int): The y-coordinate of the top-left corner of the rectangle.\n width (int): The width of the rectangle.\n height (int): The height of the rectangle.\n\n Returns:\n tuple: A tuple of the x and y coordinates of the center of the rectangle.\n \"\"\"\n return (left + width // 2, top + height // 2)\n\n\ndef get_distance(pos1, pos2):\n x1, y1 = pos1\n x2, y2 = pos2\n return math.sqrt(((x1 - x2) ** 2) + ((y1 - y2) ** 2))\n\n\n","repo_name":"lindenhutchinson/overwatch-settings-sync","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"27548305337","text":"# Librairies\n\nimport json\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n\n\n\n\n# Constantes\n\nN = 3 # nombre de commentaires souhaités\n\n\n\n\n\n\n# Fonctions\n\ndef videosId() :\n\n with open(\"input.json\", 'r') as input_file :\n input_data = json.load(input_file)\n\n return input_data[\"videos_id\"]\n\n\n\ndef videoUrl(video_id) :\n\n url = \"https://www.youtube.com/watch?v=\"\n\n return url + video_id\n\n\n\ndef videoData(video_url) :\n\n video_requete =requests.get(video_url).text\n\n return BeautifulSoup(video_requete, \"html.parser\")\n\n\n\ndef videoTitre(video_data) :\n\n titre = video_data.find(\"meta\", attrs = {\"name\":\"title\"})\n\n return titre.get(\"content\")\n\n\n\ndef videoAuteur(video_data) :\n\n auteur = video_data.find(\"link\", attrs = {\"itemprop\":\"name\"})\n\n return auteur.get(\"content\")\n\n\n\ndef videoPoucesBleus(video_data) :\n\n scripts = video_data.find_all(\"script\") \n for script in scripts :\n if \"clics\" in script.get_text() :\n position_debut = script.string.index(\"LIKE\") + 69\n position_fin = script.string.index(\"clics\") - 1\n pouces_bleus = script.string[position_debut:position_fin].replace(\"\\u202f\", \"\")\n break\n\n return int(pouces_bleus)\n\n\n\ndef videoDescription(video_data) :\n\n scripts = video_data.find_all(\"script\")\n for script in scripts :\n if \"shortDescription\" in script.get_text() :\n position_debut = script.string.index(\"shortDescription\") + 19\n position_fin = script.string.index(\"isCrawlable\") - 3\n description = script.string[position_debut:position_fin]\n break\n\n return description\n\n\n\ndef videoLiensTimestamp(video_data) :\n\n scripts = video_data.find_all(\"script\")\n nombre = 0\n liens = []\n for script in scripts :\n if \"continuePlayback\" in script.get_text() :\n nombre = script.string.count(\"continuePlayback\")//4\n marqueur = 0\n for i in range(nombre) :\n position = script.string[marqueur:].index(\"continuePlayback\") + marqueur\n position_debut = script.string[:position].rindex(\"url\") + 6\n position_fin = script.string[:position].rindex(\"webPageType\") - 3\n liens.append(\"https://www.youtube.com\" + script.string[position_debut:position_fin].replace(\"\\\\u0026\", \"&\"))\n marqueur = position + 1\n break\n\n return liens\n\n\n\ndef videoLiensAutres(video_data) :\n\n scripts = video_data.find_all(\"script\")\n liens_https = []\n liens_http = []\n for script in scripts :\n if \"shortDescription\" in script.get_text() :\n position = script.string.index(\"shortDescription\")\n marqueur = script.string.index(\"isCrawlable\")\n nombre_https = script.string[position:marqueur].count(\"https://\")\n while len(liens_https) < nombre_https :\n position_debut = script.string[position:].index(\"https://\") + position\n position_fin = script.string[position_debut:].index(\"\\\\n\") + position_debut\n liens_https.append(script.string[position_debut:position_fin])\n position = position_fin + 1\n position = script.string.index(\"shortDescription\")\n marqueur = script.string.index(\"isCrawlable\")\n nombre_http = script.string[position:marqueur].count(\"http://\")\n while len(liens_http) < nombre_http :\n position_debut = script.string[position:].index(\"http://\") + position\n position_fin = script.string[position_debut:].index(\"\\\\n\") + position_debut\n liens_http.append(script.string[position_debut:position_fin])\n position = position_fin + 1\n break\n\n return liens_https + liens_http\n\n\n\ndef videoCommentaires(video_data, n = N) :\n\n return 0\n\n\n\n\n\n\n# Script\n\nids = videosId()\nurls = list(map(videoUrl, ids))\nvideos = list(map(videoData, urls))\ntitres = list(map(videoTitre, videos))\nauteurs = list(map(videoAuteur, videos))\nlikes = list(map(videoPoucesBleus, videos))\ndescriptions = list(map(videoDescription, videos))\nliens_timestamp = list(map(videoLiensTimestamp, videos))\nliens_autres = list(map(videoLiensAutres, videos))\ncommentaires = list(map(videoCommentaires, videos))\n\nclass VideoYoutube :\n\n def __init__(self, i) :\n self.titre = titres[i]\n self.auteur = auteurs[i]\n self.likes = likes[i]\n self.description = descriptions[i]\n self.liens_timestamp = liens_timestamp[i]\n self.liens_autres = liens_autres[i]\n self.id = ids[i]\n self.commentaires = commentaires[i]\n\n def dictionnaire(self) :\n return {\"titre\":self.titre,\n \"auteur\":self.auteur,\n \"likes\":self.likes,\n \"description\":self.description,\n \"liens\":(self.liens_timestamp + self.liens_autres),\n \"id\":self.id,\n \"commentaires\":self.commentaires}\n\nvideos_youtube = dict()\nfor i in range(len(ids)) :\n video_youtube = VideoYoutube(i)\n videos_youtube[\"video_{}\".format(i)] = video_youtube.dictionnaire()\n\nwith open(\"output.json\", 'w') as output_file :\n json.dump(videos_youtube, output_file)","repo_name":"Wael382/TP_Youtube_Scraper","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":5195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"1221442119","text":"file = open(\"input.txt\")\nlines = file.readlines()\n\nreactor = set()\nfor line in lines:\n state, coords = line.replace(\"\\n\", \"\").split(\" \")\n x, y, z = coords.split(\",\")\n x = tuple(map(int, x.split(\"=\")[1].split(\"..\")))\n y = tuple(map(int, y.split(\"=\")[1].split(\"..\")))\n z = tuple(map(int, z.split(\"=\")[1].split(\"..\")))\n if x[0] < -50 or x[1] > 50: break\n\n state = state == \"on\"\n\n for _x in range(x[0], x[1]+1):\n for _y in range(y[0], y[1]+1):\n for _z in range(z[0], z[1]+1):\n if state: reactor.add((_x, _y, _z))\n else: \n if (_x, _y, _z) in reactor: reactor.remove((_x, _y, _z))\n\nprint(len(reactor))","repo_name":"nalo26/Advent-Of-Code","sub_path":"2021/day22/day22 - part1.py","file_name":"day22 - part1.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"}
+{"seq_id":"43281913590","text":"'''\nName: PrintLoss\nDesriptption: Experiment One \nEmail: yesunhuang@mail.ustc.edu.cn\nOpenSource: https://github.com/yesunhuang\nMsg: Experiment One\nAuthor: YesunHuang\nDate: 2022-04-17 20:40:50\n'''\n#import all the things we need\nimport os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nimport torch\nimport matplotlib.pyplot as plt\n\nDRAW_LOSS=True\n #Save path:\nif __name__=='__main__':\n currentPath=os.getcwd()\n netSavepath=os.path.join(currentPath,'TrainedNet','Exp')\n figSavepath=os.path.join(currentPath,'data','figures')\n\nif __name__=='__main__':\n QFPM_filename='QExpF1.pt'\n QFPM_FT_filename='QExpFT1.pt'\n CSM_filename='CExpFN.pt'\n CQCM_filename='CExpFQC.pt'\n QISMM_filename='QExpFM.pt'\n QFPMD_filename='QExpFD.pt'\n\n# Load loss\nif __name__=='__main__':\n loss_QFPM=torch.load(os.path.join(netSavepath,QFPM_filename))['Loss']\n trainLoss_QFPM=[l[0] for l in loss_QFPM];testLoss_QFPM=[l[1] for l in loss_QFPM]\n loss_QFPM_FT=torch.load(os.path.join(netSavepath,QFPM_FT_filename))['Loss']\n trainLoss_QFPM_FT=[l[0] for l in loss_QFPM_FT];testLoss_QFPM_FT=[l[1] for l in loss_QFPM_FT]\n loss_CSM=torch.load(os.path.join(netSavepath,CSM_filename))['Loss']\n trainLoss_CSM=[l[0] for l in loss_CSM];testLoss_CSM=[l[1] for l in loss_CSM]\n loss_CQCM=torch.load(os.path.join(netSavepath,CQCM_filename))['Loss']\n trainLoss_CQCM=[l[0] for l in loss_CQCM];testLoss_CQCM=[l[1] for l in loss_CQCM]\n loss_QISMM=torch.load(os.path.join(netSavepath,QISMM_filename))['Loss']\n trainLoss_QISMM=[l[0] for l in loss_QISMM];testLoss_QISMM=[l[1] for l in loss_QISMM]\n loss_QFPMD=torch.load(os.path.join(netSavepath,QFPMD_filename))['Loss']\n trainLoss_QFPMD=[l[0] for l in loss_QFPMD];testLoss_QFPMD=[l[1] for l in loss_QFPMD]\n \n# Draw loss\nif DRAW_LOSS and __name__=='__main__':\n figName='TrainLoss'\n fig,axes=plt.subplots(1,1,figsize=(8,6))\n axes.set_xlabel('Epoch')\n axes.set_ylabel('Train Loss')\n #axes.set_xlim(0,300)\n cor={'QDMM':'lightskyblue','QDMM_FT':'limegreen','CSM':'lightcoral',\\\n 'CQCM':'khaki','QDMMM':'orange','QDMMD':'violet'}\n axes.plot(range(0,len(trainLoss_QFPM)),trainLoss_QFPM,color=cor['QDMM'],linestyle='-',label='QDMM')\n axes.plot(range(0,len(trainLoss_QFPMD)),trainLoss_QFPMD,color=cor['QDMMD'],linestyle='-',label='QDMMD')\n axes.plot(range(0,len(trainLoss_QISMM)),trainLoss_QISMM,color=cor['QDMMM'],linestyle='-',label='QDMMM')\n axes.plot(range(0,len(trainLoss_QFPM_FT[302:])),trainLoss_QFPM_FT[302:],color=cor['QDMM_FT'],linestyle='-',label='QDMM(FT)')\n axes.plot(range(0,len(trainLoss_CSM)),trainLoss_CSM,color=cor['CSM'],linestyle='-',label='CSM')\n axes.plot(range(0,len(trainLoss_CQCM)),trainLoss_CQCM,color=cor['CQCM'],linestyle='-',label='CQCM')\n plt.legend()\n plt.show()\n fig.savefig(os.path.join(figSavepath,figName+'.svg'),dpi=600,format='svg',bbox_inches='tight')\n fig.savefig(os.path.join(figSavepath,figName+'.pdf'),dpi=600,format='pdf',bbox_inches='tight')","repo_name":"yesunhuang/QRNNs_Memory","sub_path":"src/PrintLoss.py","file_name":"PrintLoss.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"1405381767","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n # dfs and hashtable, save the path: O(N) time & space\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n \"\"\"\n def dfs(root, target, path):\n path.append(root)\n if root == target:\n return\n elif root.val > target.val:\n dfs(root.left, target, path)\n else:\n dfs(root.right, target, path)\n \n path_p, path_q = [], []\n dfs(root, p, path_p)\n dfs(root, q, path_q)\n path_p = {node:1 for node in path_p}\n for node in path_q[::-1]:\n if node in path_p:\n return node\n return None\n \n # dfs, if subtree t has both p and q, t is LCA: O(N) time, O(N) space\n def lowestCommonAncestor(self, root, p, q):\n if root in [p, q, None]:\n return root\n l = self.lowestCommonAncestor(root.left, p, q)\n r = self.lowestCommonAncestor(root.right, p, q)\n if l and r:\n return root\n return l if l else r\n \n # bst dfs: O(N) time, O(1) space\n def lowestCommonAncestor(self, root, p, q):\n if (root.val - p.val) * (root.val - q.val) <= 0:\n return root\n if p.val < root.val:\n return self.lowestCommonAncestor(root.left, p, q)\n return self.lowestCommonAncestor(root.right, p, q)\n ","repo_name":"haomingchan0811/Leetcode","sub_path":"235. Lowest Common Ancestor of a Binary Search Tree.py","file_name":"235. Lowest Common Ancestor of a Binary Search Tree.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"6331632571","text":"\"\"\"Motif object definition.\n\nA single Motif object stores important values, used during the following steps of \nGRAFIMO's analysis, such as motif PSSM, p-value matrix, scaling factor, offset,\nmotif information, etc.\n\"\"\"\n\n\nfrom grafimo.grafimo_errors import NotValidMotifMatrixError \nfrom grafimo.utils import isListEqual, DNA_ALPHABET \n\nfrom typing import List, Optional, Dict \n\nimport pandas as pd\nimport numpy as np\n\n\nclass Motif(object):\n \"\"\"\n This class defines a DNA motif object.\n\n In a single object we carry: \n * the original count matrix or probability matrix \n * the motif scaled scoring matrix \n * the P-value matrix used to assign a P-value to each motif \n occurrence candidate score \n * the parameters used to scale the matrix (to revert the scaled \n score to the log-odds score) \n * the background probability distribution used, while processing the\n PWM values \n * the motif width\n * the minimum value in the scoring matrix\n * the maximum value in the scoring matrix\n * the motif name (both ID and extended name)\n * the motif alphabet \n \n ...\n\n Attributes\n ----------\n _count_matrix : numpy.ndarray\n motif probability matrix\n _score_matrix : numpy.ndarray\n scaled motif scoring matrix\n _min_val : int\n minimum value of the scaled scoring matrix\n _max_value : int\n maximum value of the scaled scoring matrix\n _scale : int\n scaling value\n _offset : numpy.double\n offset used during motif matrix scaling\n _bg : dict\n background probability distribution\n _width : int\n motif width\n _motif_id : str\n motif ID\n _motif_name : str\n motif extended name\n _alphabet : list()\n DNA motif alphabet\n _isScaled : bool\n flag value to state if the scoring matrix has been scaled\n\n Methods\n -------\n setMotif_matrix(motif_matrix : pandas.DataFrame)\n set the count matrix\n setMotif_scoreMatrix(score_matrix : numpy.ndarray)\n set the scoring matrix\n setMotif_pval_matrix(pval_mat : numpy.array)\n set the P-value matrix\n setMin_val(min_val : int)\n set the scoring matrix minimum value\n setMax_val(max_val : int)\n set the scoring matrix maximum value\n setScale(scale : int)\n set the scoring matrix scaling factor\n setOffset(offset : numpy.double)\n set the scaling offset\n setBg(bgs : dict)\n set the background probability distribution\n setWidth(width : int)\n set motif width\n setMotifID(motif_id : str)\n set motif ID\n setMotifName(motif_name : str)\n set motif extended name\n setAlphabet(alphabet : list)\n set DNA motif alphabet\n setIsScaled(isScaled : bool)\n set the isScaled flag value\n getMotif_matrix()\n return the motif count matrix\n getMotif_scoreMatrix()\n return the motif scaled scoring matrix\n getMotif_pval_mat()\n return the P-value matrix\n getMin_val()\n return the scoring matrix minimum value\n getMax_val()\n return the scoring matrix maximum value\n getScale()\n return the matrix scaling factor\n getOffset()\n return the offset used while scaling the motif scoring matrix\n getBg()\n return the background probability distribution\n getWidth():\n return motif width\n getMotifID()\n return the motif ID\n getMotifName()\n return the motif extended name\n getAlphabet()\n return the DNA motif alphabet\n getIsScaled()\n return the isScaled flag value\n compute_minValue()\n compute the minimum value of the scaled scoring motif matrix\n print()\n print one matrix among the counts one, the scoring one or the \n P-value one \n \"\"\"\n\n # class attributes value initialization\n _min_val = -np.inf\n _max_val = np.inf\n _scale = -1\n _offset = 0\n _width = -1\n _is_scaled = False\n #-------------------------------------------------------------------\n # Motif methods\n # \n # these errors should never appear --> no need for error formatting\n # can assume that debug mode == True\n def __init__(\n self,\n count_matrix: np.ndarray,\n width: int,\n alphabet: List[str],\n motif_id: str,\n motif_name: str,\n nucsmap: dict\n ):\n if not isinstance(count_matrix, np.ndarray):\n errmsg = f\"\\n\\nERROR: Expected {type(np.ndarray).__name__}, got {type(count_matrix).__name__}.\\n\"\n raise TypeError(errmsg)\n if count_matrix.size == 0 or sum(sum(count_matrix)) == 0:\n errmsg = \"\\n\\nERROR: Empty motif count matrix.\\n\"\n raise NotValidMotifMatrixError(errmsg)\n if not isinstance(width, int):\n errmsg = f\"\\n\\nERROR: Expected {int.__name__}, got {type(width).__name__}.\\n\"\n raise TypeError(errmsg)\n if width <= 0:\n errmsg = f\"\\n\\nERROR: Forbidden motif width ({width}).\\n\"\n raise ValueError(errmsg)\n if not isinstance(motif_id, str):\n errmsg = f\"\\n\\nERROR: Expected {str.__name__}, got {type(motif_id).__name__}.\\n\"\n raise TypeError(errmsg)\n if not motif_id:\n errmsg = \"\\n\\nERROR: Not valid motif ID.\\n\"\n raise ValueError(errmsg)\n if not isinstance(motif_name, str):\n errmsg = f\"\\n\\nERROR: Expected {str.__name__}, got {type(motif_name).__name__}.\\n\"\n raise TypeError(errmsg)\n if not motif_name:\n errmsg = \"\\n\\nERROR: Not valid motif name.\\n\"\n raise ValueError(errmsg)\n if not isinstance(alphabet, list):\n errmsg = f\"\\n\\nERROR: Expected {list.__name__}, got {type(alphabet).__name__}.\\n\"\n raise TypeError(errmsg)\n if not isListEqual(alphabet, DNA_ALPHABET):\n errmsg = \"\\n\\nERROR: The motif is not built on DNA alphabet.\\n\"\n raise ValueError(errmsg)\n if not isinstance(nucsmap, dict):\n errmsg = f\"\\n\\nERROR: Expected {dict.__name__}, got {type(nucsmap).__name__}.\\n\"\n raise TypeError(errmsg)\n self._count_matrix = count_matrix\n self._width = width\n self._motif_id = motif_id\n self._motif_name = motif_name\n self._alphabet = alphabet\n self._nucsmap = nucsmap\n\n\n def set_motif_matrix(self, motif_matrix: pd.DataFrame) -> None:\n if not isinstance(motif_matrix, pd.DataFrame):\n errmsg = f\"\\n\\nERROR: Expected {type(pd.DataFrame).__name__}, got {type(motif_matrix).__name__}.\\n\"\n raise TypeError(errmsg)\n if motif_matrix.empty:\n errmsg = \"\\n\\nERROR: Empty motif matrix.\\n\"\n raise ValueError(errmsg)\n self._count_matrix = motif_matrix\n\n\n def set_motif_score_matrix(self, score_matrix: np.ndarray) -> None:\n if not isinstance(score_matrix, np.ndarray):\n errmsg = f\"\\n\\nERROR: Expected {type(np.ndarray).__name__}, got {type(score_matrix).__name__}.\\n\"\n raise TypeError(errmsg)\n if score_matrix.size == 0 or sum(sum(score_matrix)) == 0:\n errmsg = \"\\n\\nERROR: Empty motif score matrix.\\n\"\n raise ValueError(errmsg)\n self._score_matrix = score_matrix\n\n\n def set_motif_pval_matrix(self, pval_mat: np.array) -> None:\n if not isinstance(pval_mat, np.ndarray):\n errmsg = f\"\\n\\nERROR: Expected {type(np.array).__name__}, got {type(pval_mat).__name__}.\\n\"\n raise TypeError(errmsg)\n if len(pval_mat) == 0:\n errmsg = \"\\n\\nERROR: Empty motif p-value matrix.\\n\"\n raise ValueError(errmsg)\n if sum(pval_mat) == 0:\n errmsg = \"\\n\\nERROR: Not valid motif p-value matrix.\\n\"\n raise ValueError(errmsg)\n self._pval_matrix = pval_mat\n\n\n def set_min_val(self, min_val: int) -> None:\n if not isinstance(min_val, int):\n errmsg = f\"\\n\\nERROR: Expected {int.__name__}, got {type(min_val).__name__}.\\n\"\n raise TypeError(errmsg)\n if min_val <= -np.inf:\n errmsg = f\"\\n\\nERROR: Forbidden value {min_val}.\\n\"\n raise ValueError(errmsg)\n self._min_val = min_val\n\n\n def set_max_val(self, max_val: int) -> None:\n if not isinstance(max_val, int):\n errmsg = f\"\\n\\nERROR: Expected {int.__name__}, got {type(max_val).__name__}.\\n\"\n raise TypeError(errmsg)\n if max_val >= np.inf:\n errmsg = f\"\\n\\nERROR: Forbidden value ({max_val}).\\n\"\n raise ValueError(errmsg)\n self._max_val = max_val\n\n\n def set_scale(self, scale: int) -> None:\n if not isinstance(scale, int):\n errmsg = f\"\\n\\nERROR: Expected {int.__name__}, got {type(scale).__name__}.\\n\"\n raise TypeError(errmsg)\n if scale <= 0:\n errmsg = \"\\n\\nERROR: Scaling factor must be positive integer number.\\n\"\n raise ValueError(errmsg)\n self._scale = scale\n\n\n def set_offset(self, offset: np.double) -> None: \n if not isinstance(offset, np.double):\n errmsg = f\"\\n\\nERROR: Expected {type(np.double).__name__}, got {type(offset).__name__}.\\n\"\n raise TypeError(errmsg) \n self._offset = offset\n\n\n def set_bg(self, bgs: Dict[str, float]) -> None:\n if not isinstance(bgs, dict):\n errmsg = f\"\\n\\nERROR: Expected {dict.__name__}, got {type(bgs).__name__}.\\n\"\n raise TypeError(errmsg)\n self._bg = bgs\n\n\n def set_width(self, width: int) -> None:\n if not isinstance(width, int):\n errmsg = f\"\\n\\nERROR: Expected {int.__name__}, got {type(width).__name__}.\\n\"\n raise TypeError(errmsg)\n if width <= 0:\n errmsg = \"\\n\\nERROR: Not valid motif width.\\n\"\n raise ValueError(errmsg)\n self._width = width\n\n\n def set_motif_id(self, motif_id: str) -> None:\n if not isinstance(motif_id, str):\n errmsg = f\"\\n\\nERROR: Expected {str.__name__}, got {type(motif_id).__name__}.\\n\"\n raise TypeError(errmsg)\n if not motif_id:\n errmsg = \"\\n\\nERROR: Not valid motif ID.\\n\"\n raise ValueError(errmsg)\n self._motif_id = motif_id\n\n\n def set_motif_name(self, motif_name: str) -> None:\n if not isinstance(motif_name, str):\n errmsg = f\"\\n\\nERROR: Expected {str.__name__}, got {type(motif_name).__name__}.\\n\"\n raise TypeError(errmsg)\n if not motif_name:\n errmsg = \"\\n\\nERROR: Not valid motif name.\\n\"\n raise ValueError(errmsg)\n self._motif_name = motif_name\n\n\n def set_alphabet(self, alphabet: List[str]) -> None:\n if not isinstance(alphabet, list):\n errmsg = f\"\\n\\nERROR: Expected {list.__name__}, got {type(alphabet).__name__}.\\n\"\n raise TypeError(errmsg)\n if len(alphabet) == 0:\n errmsg = \"\\n\\nERROR: Empty motif alphabet.\\n\"\n raise ValueError(errmsg)\n if not isListEqual(alphabet, DNA_ALPHABET):\n errmsg = \"\\n\\nERROR: The motif is not built on DNA alphabet.\\n\"\n raise ValueError(errmsg)\n self.alphabet = alphabet\n\n\n def set_is_scaled(self) -> None:\n if self._is_scaled:\n errmsg = \"\\n\\nERROR: The motif matrix has already been scaled.\\n\"\n raise AssertionError(errmsg)\n self._is_scaled = True\n\n\n def _get_motif_matrix(self) -> np.ndarray:\n if self._count_matrix.size == 0 or sum(sum(self._count_matrix)) == 0:\n errmsg = \"\\n\\nERROR: \\\"self._count_matrix\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._count_matrix\n \n @property\n def count_matrix(self):\n return self._get_motif_matrix()\n \n\n def _get_motif_score_matrix(self) -> np.ndarray:\n if self._score_matrix.size == 0 or sum(sum(self._score_matrix)) == 0:\n errmsg = \"\\n\\nERROR: \\\"self._score_matrix\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._score_matrix\n\n @property\n def score_matrix(self):\n return self._get_motif_score_matrix()\n\n\n def _get_motif_pval_mat(self) -> np.ndarray:\n if self._pval_matrix.size == 0 or sum(self._pval_matrix) == 0:\n errmsg = \"\\n\\nERROR: \\\"self._pval_matrix\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._pval_matrix\n \n @property\n def pval_matrix(self):\n return self._get_motif_pval_mat()\n\n\n def _get_min_val(self) -> int:\n return self._min_val\n\n @property\n def min_val(self):\n return self._get_min_val()\n\n\n def _get_max_val(self) -> int:\n return self._max_val\n\n @property\n def max_val(self):\n return self._get_max_val()\n\n\n def _get_scale(self) -> int:\n return self._scale\n\n @property\n def scale(self):\n return self._get_scale()\n\n\n def _get_nucsmap(self):\n if not bool(self._nucsmap):\n errmsg = \"\\n\\nERROR: \\\"self._nucsmap\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._nucsmap\n \n @property\n def nucsmap(self):\n return self._get_nucsmap()\n\n\n def _get_offset(self) -> np.double:\n return self._offset\n\n @property\n def offset(self):\n return self._get_offset()\n\n\n def _get_bg(self) -> dict:\n if not bool(self._bg):\n errmsg = \"\\n\\nERROR: \\\"self._bg\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._bg\n\n @property\n def bg(self):\n return self._get_bg()\n\n\n def _get_width(self) -> int:\n return self._width\n\n @property\n def width(self):\n return self._get_width()\n\n\n def _get_motif_id(self) -> str:\n if not self._motif_id:\n errmsg = \"\\n\\nERROR: \\\"self._motif_id\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._motif_id\n\n @property\n def motif_id(self):\n return self._get_motif_id()\n\n\n def _get_motif_name(self) -> str:\n if not self._motif_name:\n errmsg = \"\\n\\nERROR: \\\"self._motif_name\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._motif_name\n\n @property\n def motif_name(self):\n return self._get_motif_name()\n\n\n def _get_alphabet(self) -> List[str]:\n if not self._alphabet:\n errmsg = \"\\n\\nERROR: \\\"self._alphabet\\\" si empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._alphabet\n \n @property\n def alphabet(self):\n return self._get_alphabet()\n\n\n def _get_is_scaled(self) -> bool:\n return self._is_scaled\n\n @property\n def is_scaled(self):\n return self._get_is_scaled()\n\n\n def compute_min_value(self) -> None:\n min_value = self._score_matrix.min()\n self._min_val = min_value\n\n\n def print(self, matrix: str) -> None:\n if not isinstance(matrix, str):\n errmsg = \"\\n\\nERROR: Expected str, got {}.\\n\"\n raise TypeError(errmsg.format(type(matrix).__name__))\n if not matrix:\n errmsg = \"\\n\\nERROR: Unable to guess what should be printed.\\n\"\n raise ValueError(errmsg)\n available_matrices = [\"raw_counts\", \"score_matrix\", \"pval_matrix\"]\n if matrix not in available_matrices:\n errmsg = \"\\n\\nERROR: Unknown motif matrix.\\n\"\n raise ValueError(errmsg)\n if matrix == \"raw_counts\": print(self._count_matrix)\n elif matrix == \"score_matrix\": print(self._score_matrix)\n elif matrix == \"pval_matrix\": print(self._pval_matrix)\n else: # we should never reach this point\n errmsg = \"\\n\\nERROR: Unknown motif matrix.\\n\"\n raise ValueError(errmsg)\n \n# end of Motif\n\n","repo_name":"pinellolab/GRAFIMO","sub_path":"src/grafimo/motif.py","file_name":"motif.py","file_ext":"py","file_size_in_byte":15872,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"76"}
+{"seq_id":"3325258978","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# =================================================================\n# =================================================================\n\nfrom oslo.config import cfg\n\nibmpowervm_opts = [\n cfg.StrOpt('powervm_vif_driver',\n default='paxes_nova.'\n 'virt.ibmpowervm.vif.ivm.local_driver_ivm.IBMPowerVMVlanVIFDriverLocal',\n help='Driver to plug/Unplug VLANs into adapters'),\n cfg.StrOpt('powervm_vif_topo_driver',\n default='paxes_nova.'\n 'virt.ibmpowervm.vif.ivm.local_topo_ivm.IBMPowerVMNetworkTopoIVMLocal',\n help='Driver to gather the topology of the network.'),\n cfg.StrOpt('powervm_base_operator_factory',\n default='paxes_nova.virt.ibmpowervm.ivm.local_operator.BaseOperatorFactory',\n help='Run VIOS command with SSH or local')\n]\n\nCONF = cfg.CONF\nCONF.register_opts(ibmpowervm_opts)\n","repo_name":"windskyer/k_nova","sub_path":"paxes_nova/virt/ibmpowervm/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"5479663774","text":"import os\nimport time\nimport pandas as pd\nfrom datetime import datetime\n\nfrom dash import Dash, no_update, ctx, Output, Input, State\nimport dash_bootstrap_components as dbc\nfrom dash.exceptions import PreventUpdate\n\nfrom src.market_data import update_market_data\nfrom src.components.table_cards import get_row_highlight_condition\nfrom src.components.figures import get_candlestick_figure, get_bar_figure\nfrom src.utils import filter_df, add_emas\n\n\ndef register_callbacks(app: Dash):\n\n @app.long_callback(\n Output(\"timestamp\", \"data\"),\n Input(\"update_button\", \"n_clicks\"),\n running=[\n (Output(\"update_button\", \"disabled\"), True, False),\n (Output(\"update_button\", \"children\"), [dbc.Spinner(size=\"sm\"), \" Updating...\"], \"Update Data\"),\n ]\n )\n def update_data(n_clicks):\n \"\"\" \n Update all market data on startup or when the update button was clicked. \n Once the data is ready, the timestamp is updated, which triggers other callbacks.\n \"\"\"\n timestamp = int(time.time())\n update_market_data()\n return timestamp\n\n\n @app.callback(\n Output(\"last_update_text\", \"children\"),\n Input(\"timestamp\", \"data\"),\n prevent_initial_call=True,\n )\n def set_last_update_text(timestamp):\n \"\"\" Display the time of the last update once the new data is available. \"\"\"\n return f\"Last update: {datetime.fromtimestamp(timestamp).strftime('%d.%m.%Y, %H:%M')}\"\n\n \n @app.callback(\n Output(\"trend_table\", \"data\"),\n Input(\"timestamp\", \"data\"),\n Input(\"radio_trend\", \"value\"),\n prevent_initial_call=True,\n )\n def update_trend_table(timestamp, filter):\n \"\"\" Update the data table of the uptrend screener whenever the data was updated or another filter was selected. \"\"\"\n df = pd.read_csv(os.path.join(\"data\", \"market_data.csv\"), index_col=\"name\")\n df = df.drop([\"BTC\"]) # only keep altcoins\n df[\"id\"] = df.index\n df = filter_df(df, filter)\n df = df[[\"id\", \"trend_strength\", \"gain_1d\", \"gain_1w\", \"gain_1m\"]]\n\n return df.to_dict(\"records\")\n\n\n @app.callback(\n Output(\"pump_table\", \"data\"),\n Input(\"timestamp\", \"data\"),\n Input(\"radio_pump\", \"value\"),\n prevent_initial_call=True,\n )\n def update_pump_table(timestamp, filter):\n \"\"\" Update the data table of the pump screener whenever the data was updated or another filter was selected. \"\"\"\n df = pd.read_csv(os.path.join(\"data\", \"market_data.csv\"), index_col=\"name\")\n df = df.drop([\"BTC\"]) # only keep altcoins\n df[\"id\"] = df.index\n df = filter_df(df, filter)\n df = df.loc[df[\"pump_strength\"] > 2]\n df = df[[\"id\", \"pump_strength\", \"gain_1d\", \"gain_1w\", \"gain_1m\"]] \n df = df.sort_values(by=[\"pump_strength\"], ascending=False)\n\n return df.to_dict(\"records\")\n\n\n @app.callback(\n Output(\"trend_table\", \"page_current\"),\n Output(\"pump_table\", \"page_current\"),\n Input(\"timestamp\", \"data\"),\n Input(\"trend_table\", \"sort_by\"),\n )\n def reset_to_first_page(timestamp, sort_by):\n \"\"\" \n Go to the first page of both data tables whenever the data was updated. \n Go to the first page of the uptrend data table whenever the user changes the sorting.\n \"\"\"\n if ctx.triggered_id == \"timestamp\":\n return 0, 0\n return 0, no_update\n\n\n @app.callback(\n Output(\"altcoin\", \"data\"),\n Output(\"trend_table\", \"active_cell\"), Output(\"trend_table\", \"selected_cells\"), Output(\"trend_table\", \"style_data_conditional\"),\n Output(\"pump_table\", \"active_cell\"), Output(\"pump_table\", \"selected_cells\"), Output(\"pump_table\", \"style_data_conditional\"),\n Input(\"trend_table\", \"active_cell\"), Input(\"pump_table\", \"active_cell\"), Input(\"timestamp\", \"data\"),\n Input(\"radio_trend\", \"value\"), Input(\"radio_pump\", \"value\"),\n State(\"trend_table\", \"style_data_conditional\"), State(\"pump_table\", \"style_data_conditional\"),\n prevent_initial_call=True,\n )\n def select_altcoin(active_cell_trend, active_cell_pump, timestamp, filter_trend, filter_pump, style_trend, style_pump):\n \"\"\" Highlight the table row of the currently selected altcoin. \"\"\"\n # remove highlighting when reloading or applying filters\n if ctx.triggered_id in [\"timestamp\", \"radio_trend\", \"radio_pump\"]:\n style_trend[1] = {}\n style_pump[1] = {}\n return no_update, None, [], style_trend, None, [], style_pump\n\n altcoin = no_update\n if ctx.triggered_id == \"trend_table\":\n if active_cell_trend:\n condition = get_row_highlight_condition(active_cell_trend[\"row\"])\n style_trend[1] = condition\n style_pump[1] = {}\n altcoin = active_cell_trend[\"row_id\"]\n else:\n style_trend[1] = {}\n style_pump = no_update\n else:\n if active_cell_pump:\n condition = get_row_highlight_condition(active_cell_pump[\"row\"])\n style_pump[1] = condition\n style_trend[1] = {}\n altcoin = active_cell_pump[\"row_id\"]\n else:\n style_pump[1] = {}\n style_trend = no_update\n \n return altcoin, None, [], style_trend, None, [], style_pump\n\n \n @app.callback(\n Output(\"bar_chart\", \"children\"),\n Input(\"timestamp\", \"data\"),\n Input(\"radio_overview_filter\", \"value\"),\n Input(\"radio_overview_timeframe\", \"value\"),\n prevent_initial_call=True,\n )\n def update_overview_card(timestamp, filter, timeframe):\n \"\"\" \n Update the bar figure containing the top gainers whenever the data was updated \n or another filter or timeframe was selected. \n \"\"\"\n df = pd.read_csv(os.path.join(\"data\", \"market_data.csv\"), index_col=\"name\")\n col = f\"gain_{timeframe.lower()}\"\n btc_gain = df.loc[\"BTC\", col]\n df = df.drop([\"BTC\"]) # only keep altcoins\n df = filter_df(df, filter)\n df = df.sort_values(by=[col], ascending=False).iloc[:30]\n\n return get_bar_figure(names=df.index, gains=df[col], btc_gain=btc_gain, timeframe=timeframe)\n \n\n @app.callback(\n Output(\"bitcoin_chart\", \"children\"),\n Input(\"timestamp\", \"data\"),\n Input(\"radio_btc_chart\", \"value\"),\n prevent_initial_call=True,\n )\n def update_bitcoin_chart(timestamp, timeframe):\n \"\"\" Update the Bitcoin chart whenever the data was updated or another timeframe was selected. \"\"\"\n klines = pd.read_csv(os.path.join(\"data\", \"klines\", \"BTC.csv\"), index_col=\"timestamp\")\n klines = add_emas(klines=klines, ema_lengths=[12, 21, 50])\n\n if timeframe == \"1W\":\n klines = klines.iloc[-42:]\n else:\n klines = klines.iloc[-186:]\n\n return get_candlestick_figure(title=\"BTC / USD\", klines=klines)\n \n\n @app.callback(\n Output(\"altcoin_usd_chart\", \"children\"), \n Output(\"altcoin_btc_chart\", \"children\"),\n Input(\"timestamp\", \"data\"),\n Input(\"altcoin\", \"data\"),\n Input(\"radio_altcoin_chart\", \"value\"), \n prevent_initial_call=True,\n )\n def update_altcoin_charts(timestamp, altcoin, timeframe):\n \"\"\" Update both altcoin charts whenever the data was updated or another timeframe was selected. \"\"\"\n if altcoin in [None, \"\"]:\n raise PreventUpdate\n \n btc_klines = pd.read_csv(os.path.join(\"data\", \"klines\", \"BTC.csv\"), index_col=\"timestamp\")\n usd_denom_klines = pd.read_csv(os.path.join(\"data\", \"klines\", f\"{altcoin}.csv\"), index_col=\"timestamp\")\n btc_denom_klines = pd.DataFrame(\n index=usd_denom_klines.index,\n data={\n \"open\": usd_denom_klines[\"open\"] / btc_klines[\"open\"], \n \"high\": usd_denom_klines[\"high\"] / btc_klines[\"close\"],\n \"low\": usd_denom_klines[\"low\"] / btc_klines[\"close\"], \n \"close\": usd_denom_klines[\"close\"] / btc_klines[\"close\"],\n },\n ).dropna()\n\n usd_denom_klines = add_emas(klines=usd_denom_klines, ema_lengths=[12, 21, 50])\n btc_denom_klines = add_emas(klines=btc_denom_klines, ema_lengths=[12, 21, 50])\n\n if timeframe == \"1W\":\n usd_denom_klines = usd_denom_klines.iloc[-42:]\n btc_denom_klines = btc_denom_klines.iloc[-42:]\n else:\n usd_denom_klines = usd_denom_klines.iloc[-186:]\n btc_denom_klines = btc_denom_klines.iloc[-186:]\n\n usd_chart = get_candlestick_figure(title=f\"{altcoin} / USD\", klines=usd_denom_klines)\n btc_chart = get_candlestick_figure(title=f\"{altcoin} / BTC\", klines=btc_denom_klines)\n\n return usd_chart, btc_chart\n\n\n @app.callback(\n Output(\"bitcoin_tradingview\", \"children\"),\n Output(\"bitcoin_exchanges\", \"children\"),\n Input(\"timestamp\", \"data\"),\n prevent_initial_call=True,\n )\n def update_bitcoin_links(timestamp):\n \"\"\" Update the TradingView and exchange links for Bitcoin whenever the data was updated. \"\"\"\n df = pd.read_csv(os.path.join(\"data\", \"config.csv\"), index_col=\"name\")\n tradingview_link = dbc.CardLink(\"TradingView\", target=\"_blank\", href=df.loc[\"BTC\", \"chart_usd\"])\n \n exchange_links = []\n if type(df.loc[\"BTC\", \"spot_usd\"]) == str:\n exchange_links.append(dbc.CardLink(\"Spot (USD)\", target=\"_blank\", href=df.loc[\"BTC\", \"spot_usd\"]))\n if type(df.loc[\"BTC\", \"perps\"]) == str:\n exchange_links.append(dbc.CardLink(\"Perpetuals\", target=\"_blank\", href=df.loc[\"BTC\", \"perps\"]))\n\n return tradingview_link, exchange_links\n\n\n @app.callback(\n Output(\"altcoin_tradingview\", \"children\"),\n Output(\"altcoin_exchanges\", \"children\"),\n Input(\"altcoin\", \"data\"),\n prevent_initial_call=True,\n )\n def update_altcoin_links(altcoin):\n \"\"\" Update the TradingView and exchange links for the current altcoin whenever a new altcoin was selected. \"\"\"\n df = pd.read_csv(os.path.join(\"data\", \"config.csv\"), index_col=\"name\")\n\n tradingview_links = []\n if type(df.loc[altcoin, \"chart_usd\"]) == str:\n tradingview_links.append(dbc.CardLink(\"TradingView (USD)\", target=\"_blank\", href=df.loc[altcoin, \"chart_usd\"]))\n if type(df.loc[altcoin, \"chart_btc\"]) == str:\n tradingview_links.append(dbc.CardLink(\"TradingView (BTC)\", target=\"_blank\", href=df.loc[altcoin, \"chart_btc\"]))\n\n exchange_links = []\n if type(df.loc[altcoin, \"spot_usd\"]) == str:\n exchange_links.append(dbc.CardLink(\"Spot (USD)\", target=\"_blank\", href=df.loc[altcoin, \"spot_usd\"]))\n if type(df.loc[altcoin, \"spot_btc\"]) == str:\n exchange_links.append(dbc.CardLink(\"Spot (BTC)\", target=\"_blank\", href=df.loc[altcoin, \"spot_btc\"]))\n if type(df.loc[altcoin, \"perps\"]) == str:\n exchange_links.append(dbc.CardLink(\"Perpetuals\", target=\"_blank\", href=df.loc[altcoin, \"perps\"]))\n\n return tradingview_links, exchange_links\n","repo_name":"fyangch/crypto-dashboard","sub_path":"src/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":11195,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"76"}
+{"seq_id":"1549436510","text":"import json\nimport time\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef yahoo_scraper():\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582\"}\n\n tickers = []\n exchange = 'amex'\n with open(f'../02_data_ticker/{exchange}.json', encoding='utf-8') as file:\n tickers += json.load(file)\n\n url = 'https://finance.yahoo.com/quote/{}/profile?p={}'\n data = []\n count = 1\n\n for row in tickers:\n try:\n symbol = row[1]\n response = requests.get(url.format(symbol, symbol), headers=headers) # url profile website is scraped\n\n soup = BeautifulSoup(response.text, 'html.parser')\n pattern = re.compile(r'\\s--\\sData\\s--\\s')\n script_data = soup.find('script', text=pattern).contents[0]\n start = script_data.find(\"context\")-2\n json_data = json.loads(script_data[start:-12])\n\n sector = json_data['context']['dispatcher']['stores']['QuoteSummaryStore']['assetProfile']['sector']\n description = (json_data['context']['dispatcher']['stores']['QuoteSummaryStore']\n ['assetProfile']['longBusinessSummary'])\n\n data.append({\"name\": row[0], 'sector': sector, \"description\": description})\n print(\"added: \", row[0])\n count += 1\n\n except KeyError as e:\n print(\"not found: \", row[0], str(e))\n\n print(\"Results:\", len(tickers), \"tickers\", count, \"added and\", len(tickers)-count, \"not found\")\n\n # save the data list into a JSON file\n with open('sync_data.json', 'w') as f:\n json.dump(data, f)\n\n\ndef main():\n start = time.time()\n yahoo_scraper()\n end = time.time()\n print(\"Took {} seconds.\".format(end - start))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"commutativity/project_NLP","sub_path":"06_sync_async_comparison/1_sync_scraping.py","file_name":"1_sync_scraping.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"5008701188","text":"\"\"\"poll models #2\n\nRevision ID: c4a90fed59fb\nRevises: 7505195b2951\nCreate Date: 2023-05-28 17:11:55.304035\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlmodel\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c4a90fed59fb'\ndown_revision = '7505195b2951'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('question', sa.Column('text', sqlmodel.sql.sqltypes.AutoString(), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('question', 'text')\n # ### end Alembic commands ###\n","repo_name":"fearsd/leaders_hack_2023","sub_path":"backend/migrations/versions/c4a90fed59fb_poll_models_2.py","file_name":"c4a90fed59fb_poll_models_2.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"19271742927","text":"import datetime\nfrom pyexpat import model\nfrom fastapi import FastAPI, HTTPException, Depends\nfrom pydantic import BaseModel, Field\nimport models\nfrom database import engine, SessionLocal\nfrom sqlalchemy.orm import Session\nfrom typing import Union\nfrom functools import wraps\nimport requests\n\n\napp = FastAPI()\nemployee_api_endpoint = \"http://127.0.0.1:8090/employees/\"\n\nmodels.Base.metadata.create_all(bind=engine)\n\n\ndef get_db():\n try:\n db = SessionLocal()\n yield db\n finally:\n db.close()\n\n\ndef check_employee(func):\n @wraps(func)\n async def wrapper(*args, **kwargs):\n employee_id = kwargs[\"employee_id\"]\n r = requests.get(employee_api_endpoint + str(employee_id), timeout=5)\n if r.status_code == 404:\n raise HTTPException(\n status_code=404, detail=f\"Employee ID {employee_id} : Does not exist\"\n )\n return await func(*args, **kwargs)\n\n return wrapper\n\n\nclass TimeSheet(BaseModel):\n employe_id: int = Field()\n date: Union[datetime.date] = Field()\n hours_worked: int = Field()\n\n\n@app.get(\"/timesheet/{employee_id}\")\n@check_employee\nasync def list_timesheet_records(employee_id: int, db: Session = Depends(get_db)):\n return (\n db.query(models.TimeSheet)\n .filter(models.TimeSheet.employee_id == employee_id)\n .values(\n models.TimeSheet.employee_id,\n models.TimeSheet.date,\n models.TimeSheet.hours_worked,\n )\n )\n\n\n@app.post(\"/timesheet\")\ndef create_timesheet(timesheet: TimeSheet, db: Session = Depends(get_db)):\n\n tm_model = models.TimeSheet()\n tm_model.employee_id = timesheet.employe_id\n tm_model.date = timesheet.date\n tm_model.hours_worked = timesheet.hours_worked\n\n db.add(tm_model)\n db.commit()\n\n return timesheet\n\n\n@app.get(\"/timesheet/{employee_id}/{date}\")\n@check_employee\nasync def get_timesheet_record(\n employee_id: int, date: datetime.date, db: Session = Depends(get_db)\n):\n print(date)\n return (\n db.query(models.TimeSheet)\n .filter(\n models.TimeSheet.employee_id == employee_id, models.TimeSheet.date == date\n )\n .values(\n models.TimeSheet.employee_id,\n models.TimeSheet.date,\n models.TimeSheet.hours_worked,\n )\n )\n","repo_name":"Ansh111222/timesheet_api","sub_path":"timesheet.py","file_name":"timesheet.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"13332761005","text":"# Read agent\nimport pickle\n\nimport gym\n\nwith open(\"models/unit2/taxi_model.pkl\", \"rb\") as f:\n model = pickle.load(f)\n\n# Evaluate our Agent\nfrom unit2.general.q_learn import evaluate_agent\n\n# Get env from gym\nenv = gym.make(model[\"env_id\"])\n\n# Get parameters from model\nmax_steps = model[\"max_steps\"]\nn_eval_episodes = model[\"n_eval_episodes\"]\nQtable_taxi = model[\"qtable\"]\neval_seed = model[\"eval_seed\"]\n\nmean_reward, std_reward = evaluate_agent(env, max_steps, n_eval_episodes, Qtable_taxi, eval_seed)\nprint(f\"Mean_reward={mean_reward:.2f} +/- {std_reward:.2f}\")","repo_name":"Akrielz/Reinforcement-Learning-HF","sub_path":"unit2/taxi_v3/eval_taxi.py","file_name":"eval_taxi.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"19899139839","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\ndef first(request):\n html = \"\"\"\n First page
\n Home
\n Second page\n \"\"\"\n return HttpResponse(html)\n\ndef second(request):\n html = \"\"\"\n Second page
\n First page\n \"\"\"\n return HttpResponse(html)\n","repo_name":"azizdevfull/django-lessons","sub_path":"lesson1/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"24353470426","text":"# coding: utf-8\nimport numpy as np\n\nfrom ..utils import handleKeyError\nfrom ..utils import flatten_dual\nfrom ..utils import ItemsetTreeDOTexporter\nfrom ..utils import DOTexporterHandler\n\nITEM_MINING_METHODS = [\"all\", \"closed\"]\n\ndef create_one_hot(data):\n \"\"\"\n Create the one-hot binary matrix.\n @params data : (list) Each element of data (data[i]) have variable length items.\n @return one_hot : shape=(num_data,num_unique)\n one_hot[n][i]=1 means data[n] contains idx2data[i]\n @return idx2data : Dictionary from index to original data.\n \"\"\"\n unique_data = sorted(list(set(flatten_dual(data))))\n num_unique = len(unique_data)\n num_data = len(data)\n data2idx = dict(zip(unique_data, range(num_unique)))\n one_hot = np.zeros(shape=(num_data,num_unique), dtype=np.int32)\n for i,row in enumerate(data):\n one_hot[i, np.asarray([data2idx[e] for e in row], dtype=int)]=1\n idx2data = dict(zip(range(num_unique), unique_data))\n return one_hot, idx2data\n\nclass Node():\n \"\"\" Node for Tree structure.\n @params database :\n @params itemset :\n @params num_items :\n @params freq :\n @params tail :\n @params threshold :\n \"\"\"\n def __init__(self, itemset, freq, tail):\n self.itemset = itemset\n self.freq = freq # (=len(database))\n self.tail = tail\n self.children = []\n\n def _recurse_all(self, database, threshold, num_items):\n \"\"\" Find ALL closed Itemsets. \"\"\"\n for i in range(self.tail+1, num_items):\n next_itemset = self.itemset + [i]\n next_data = database[database[:,i]==1,:]\n freq = len(next_data)\n if freq >= threshold:\n child = Node(itemset=next_itemset, freq=freq, tail=i)\n child._recurse_all(next_data, threshold, num_items)\n self.children.append(child)\n\n def _recurse_closed(self, database, threshold, num_items):\n \"\"\" Find ONLY closed Itemsets. \"\"\"\n for i in range(self.tail+1, num_items):\n next_data = database[database[:,i]==1,:]\n freq = len(next_data)\n if freq >= threshold:\n add_itemset = i+np.where(np.all(next_data[:,i:], axis=0))[0]\n next_itemset = self.itemset + add_itemset.tolist()\n child = Node(itemset=next_itemset, freq=freq, tail=max(add_itemset))\n child._recurse_closed(next_data, threshold, num_items)\n self.children.append(child)\n\nclass FrequentSet():\n def __init__(self, threshold):\n self.root = None\n self.threshold = threshold\n self.freq_sets = []\n\n def fit(self, database, method=\"closed\"):\n \"\"\"\n @param database: Binary Matrix. shape=(num_transactions, num_items)\n \"\"\"\n method = method.lower()\n handleKeyError(lst=ITEM_MINING_METHODS, method=method)\n num_transactions, num_items = database.shape\n self.root = Node(itemset=[], freq=num_transactions, tail=-1)\n self.root.__getattribute__({\n \"all\" : \"_recurse_all\",\n \"closed\" : \"_recurse_closed\",\n }[method]).__call__(database, self.threshold, num_items)\n self.num_items = num_items\n self.all = self.get_itemsets(self.root)\n\n def get_itemsets(self, node):\n freq_sets = [node.itemset]\n for child in node.children:\n freq_sets.extend(self.get_itemsets(node=child))\n return freq_sets\n\n def export_graphviz(self, out_file=None, feature_names=None,\n class_names=None, cmap=\"jet\", filled=True,\n rounded=True, precision=3):\n if class_names is None:\n class_names = np.arange(self.num_items)\n exporter = ItemsetTreeDOTexporter(\n cmap=cmap, class_names=class_names,\n filled=filled, rounded=rounded, precision=precision\n )\n return DOTexporterHandler(exporter, root=self.root, out_file=out_file)\n","repo_name":"iwasakishuto/Kerasy","sub_path":"kerasy/search/itemset.py","file_name":"itemset.py","file_ext":"py","file_size_in_byte":3973,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"}
+{"seq_id":"11360404512","text":"#!/usr/bin/env python\n\n\nimport sys\nimport os\nimport random\nimport struct\n\n\nMEMSIZE = 1024*1024\nNREGS = 10\nMAXTEXT = 2000\nMAXINST = 55 #mine is 52 # I am almost sure it is possible to make it 1 or 2 instructions smaller\n\n\nclass Instruction:\n op = None\n imm = 0\n o1 = None\n o2 = None\n dsp = None\n target = None\n memory_address = 0\n clock = 0\n\n\n def __init__(self, tstr):\n def parse_immediate(tt):\n if tt.startswith(\"0x\"):\n try:\n v = int(tt, 16)\n except ValueError:\n assert False\n else:\n try:\n v = int(tt)\n except ValueError:\n assert False\n assert v>=0\n assert v-1000\n assert v<1000\n return v\n\n def parse_register(tt):\n assert len(tt) == 2\n assert tt[0] == \"r\"\n try:\n v = int(tt[1])\n except ValueError:\n assert False\n assert v>=0\n assert v=0\n assert v=1\n assert len(sstr)<=4\n\n if len(sstr) == 1:\n t_op = sstr[0]\n assert t_op in [\"halt\", \"time\", \"magic\", \"reset\"]\n self.op = t_op\n\n elif len(sstr) == 2:\n t_op, t_1 = sstr\n assert t_op in [\"jmp\", \"jmpz\"]\n self.op = t_op\n\n if self.op == \"jmp\":\n self.target = parse_target(t_1)\n elif self.op == \"jmpz\":\n self.target = parse_target(t_1)\n else:\n assert False\n\n elif len(sstr) == 3:\n t_op, t_1, t_2 = sstr\n assert t_op in [\"mov\", \"movc\", \"jmpg\", \"add\", \"sub\", \"mul\", \"and\", \"or\", \"xor\"]\n self.op = t_op\n\n if self.op == \"mov\":\n self.o1 = parse_register(t_1)\n self.o2 = parse_register(t_2)\n\n elif self.op in [\"add\", \"sub\", \"mul\", \"and\", \"or\", \"xor\"]:\n self.o1 = parse_register(t_1)\n self.o2 = parse_register(t_2)\n\n elif self.op == \"movc\":\n self.o1 = parse_register(t_1)\n self.imm = parse_immediate(t_2)\n\n elif self.op == \"jmpg\":\n self.target = parse_target(t_2)\n self.o1 = parse_register(t_1)\n\n else:\n assert False\n\n elif len(sstr) == 4:\n t_op, t_1, t_2, t_3 = sstr\n assert t_op in [\"movfrom\", \"movto\"]\n self.op = t_op\n\n if self.op == \"movfrom\":\n self.o1 = parse_register(t_1)\n self.memory_address = parse_memory(t_2)\n self.dsp = parse_register(t_3)\n\n elif self.op == \"movto\":\n self.o1 = parse_register(t_1)\n self.memory_address = parse_memory(t_2)\n self.dsp = parse_register(t_3)\n\n else:\n assert False\n\n else:\n assert False\n\n\n def pprint(self):\n tstr = \"%s %s %s %s %s %s\" % \\\n (self.op, \n \"None\" if self.o1==None else \"r%d\"%self.o1,\n \"None\" if self.o2==None else \"r%d\"%self.o2,\n hex(self.imm), \"None\" if self.target==None else self.target, self.memory_address)\n return tstr\n\n\nclass Cpu:\n ipointer = 0\n instructions = None\n registers = None\n memory = None\n clock = 0\n cache = None\n execution_level = 0\n secret_values = None\n\n\n def __init__(self):\n self.instructions = []\n self.cache = {}\n self.secret_values = (random.randint(1,4200000000), random.randint(1,4200000000) , random.randint(1,4200000000), random.randint(1,4200000000))\n\n self.reset()\n\n\n def reset(self):\n self.ipointer = 0\n #self.instructions = []\n self.registers = [0 for r in range(NREGS)]\n self.memory = [0 for _ in range(MEMSIZE)]\n self.clock = 0\n for k in self.cache.keys():\n self.cache[k] = 0\n self.execution_level += 1\n #secret_values\n\n\n def load_instructions(self, tt):\n for line in tt.split(\"\\n\"):\n if \"#\" in line:\n line = line.split(\"#\")[0]\n line = line.strip()\n if not line:\n continue\n self.instructions.append(Instruction(line))\n assert len(self.instructions) <= MAXINST\n\n def run(self, debug=0):\n if debug>0: print(self.pprint())\n ins = self.instructions[0]\n for i,v in enumerate(self.secret_values):\n self.memory[i] = v\n\n while (self.ipointer>=0 and self.ipointer0: print(\"==>\", ins.pprint())\n self.execute(ins)\n if debug>0: print(self.pprint(debug=debug))\n #print(\"===\", self.ipointer, len(self.instructions), self.ipointer>=0 and self.ipointer0: print(\"Terminated! (at %d)\" % self.ipointer)\n if debug>0: print(\"State:\")\n if debug>0: print(self.pprint())\n\n def execute(self, ins):\n self.clock += 1\n\n if ins.op == \"movc\":\n self.registers[ins.o1] = ins.imm\n self.ipointer += 1\n\n elif ins.op == \"magic\":\n if self.execution_level == 2:\n #print(self.registers, self.secret_values, self.registers[0:2], self.registers[0:2] == self.secret_values)\n if tuple(self.registers[0:4]) == self.secret_values:\n #print(\"===\")\n with open(\"flag.txt\", \"rb\") as fp:\n cc = fp.read()\n cc = cc.strip()\n cc = cc.ljust(len(self.registers)*4, b\"\\x00\")\n for i in range(len(self.registers)):\n self.registers[i] = struct.unpack(\"=0 \n assert nt < len(self.instructions)\n self.ipointer = nt\n\n elif ins.op == \"jmpz\":\n if self.registers[0] == 0:\n nt = self.ipointer + ins.target\n assert nt >=0 \n assert nt < len(self.instructions)\n self.ipointer = nt\n else:\n self.ipointer += 1\n\n elif ins.op == \"jmpg\":\n if self.registers[0] > self.registers[ins.o1]:\n nt = self.ipointer + ins.target\n assert nt >=0 \n assert nt < len(self.instructions)\n self.ipointer = nt\n else:\n self.ipointer += 1\n\n elif ins.op == \"mov\":\n self.registers[ins.o1] = self.registers[ins.o2]\n self.ipointer += 1\n\n # ALU\n elif ins.op == \"sub\":\n v = self.registers[ins.o1] - self.registers[ins.o2]\n self.registers[ins.o1] = (v & 0xffffffff)\n self.ipointer += 1\n\n elif ins.op == \"add\":\n v = self.registers[ins.o1] + self.registers[ins.o2]\n self.registers[ins.o1] = (v & 0xffffffff)\n self.ipointer += 1\n\n elif ins.op == \"mul\":\n v = self.registers[ins.o1] * self.registers[ins.o2]\n self.registers[ins.o1] = (v & 0xffffffff)\n self.ipointer += 1\n\n elif ins.op == \"and\":\n v = self.registers[ins.o1] & self.registers[ins.o2]\n self.registers[ins.o1] = (v & 0xffffffff)\n self.ipointer += 1\n\n elif ins.op == \"or\":\n v = self.registers[ins.o1] | self.registers[ins.o2]\n self.registers[ins.o1] = (v & 0xffffffff)\n self.ipointer += 1\n\n elif ins.op == \"xor\":\n v = self.registers[ins.o1] ^ self.registers[ins.o2]\n self.registers[ins.o1] = (v & 0xffffffff)\n self.ipointer += 1\n\n # MEMORY\n elif ins.op == \"movfrom\":\n mem_location = ins.memory_address + self.registers[ins.dsp]\n mem_location = mem_location % len(self.memory)\n\n #print(mem_location, self.cache)\n if mem_location in self.cache:\n #print(\"-cache\")\n v = self.cache[mem_location]\n v = (v & 0xffffffff)\n self.registers[ins.o1] = v\n self.ipointer += 1\n else:\n #print(\"-nocache\")\n v = self.memory[mem_location]\n self.cache[mem_location] = v\n self.execute(ins)\n\n elif ins.op == \"movto\":\n mem_location = ins.memory_address + self.registers[ins.dsp]\n mem_location = mem_location % len(self.memory)\n\n if mem_location in self.cache:\n del self.cache[mem_location]\n v = (self.registers[ins.o1] & 0xffffffff)\n self.memory[mem_location] = v\n self.ipointer += 1\n\n else:\n assert False\n\n return \n\n def pprint(self, debug=0):\n tstr = \"\"\n tstr += \"%d> \"%self.ipointer\n tstr += \"[%d] \"%self.clock\n tstrl = []\n for i,r in enumerate(self.registers):\n tstrl.append(\"r%d=%d\"%(i,r))\n tstr += \",\".join(tstrl)\n if debug>1:\n tstr += \"\\nM->\"\n vv = []\n for i,v in enumerate(self.memory):\n if v!=0:\n vv.append(\"%d:%d\"%(i,v))\n tstr += \",\".join(vv)\n tstr += \"\\nC->\"\n tstr += repr(self.cache)\n return tstr\n\n\n\n\n\n\ndef main():\n print(\"Welcome to a very fast VM!\")\n print(\"Give me your instructions.\")\n print(\"To terminate, send 3 consecutive empty lines.\")\n\n\n instructions = \"\"\n wlc = 0\n while True:\n line = input()\n #print(repr(line))\n if not line.strip():\n wlc += 1\n else:\n wlc = 0\n instructions += line + \"\\n\"\n if wlc >= 3 or len(instructions) > MAXTEXT:\n break\n\n c = Cpu()\n print(\"Parsing...\")\n c.load_instructions(instructions)\n print(\"Running...\")\n c.run()\n\n print(\"Done!\")\n print(\"Registers: \" + repr(c.registers))\n print(\"Goodbye.\")\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n\n\n#~/pypy3.6/bin/pypy3 ./cpu.py test\n#~/pypy3.6/bin/pypy3 ./cpu.py exploit\n\n\n","repo_name":"b01lers/b01lers-ctf-2022","sub_path":"pwn/veryfastvm/src/cpu_original.py","file_name":"cpu_original.py","file_ext":"py","file_size_in_byte":11148,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"76"}
+{"seq_id":"15875733674","text":"from typing import Literal\r\n\r\nimport rev\r\nimport wpilib\r\nimport wpilib.drive\r\nimport wpiutil\r\nfrom photonvision import PhotonCamera, SimVisionSystem, SimVisionTarget\r\nfrom robotpy_apriltag import AprilTagField, loadAprilTagLayoutField\r\nfrom wpilib import DriverStation\r\nfrom wpilib import RobotBase, RobotController\r\nfrom wpilib.simulation import DifferentialDrivetrainSim\r\nfrom wpimath.estimator import DifferentialDrivePoseEstimator\r\nfrom wpimath.geometry import Pose2d, Rotation3d, Translation3d, Transform3d\r\nfrom wpimath.kinematics import DifferentialDriveKinematics\r\nfrom wpimath.system import LinearSystemId\r\nfrom wpimath.system.plant import DCMotor\r\n\r\nimport ports\r\nfrom gyro import NavX, ADIS16448, ADIS16470, ADXRS, Empty\r\nfrom utils.property import autoproperty, defaultSetter\r\nfrom utils.safesubsystem import SafeSubsystem\r\nfrom utils.sparkmaxsim import SparkMaxSim\r\nfrom utils.sparkmaxutils import configureFollower, configureLeader\r\n\r\n\r\nselect_gyro: Literal[\"navx\", \"adis16448\", \"adis16470\", \"adxrs\", \"empty\"] = \"adis16470\"\r\napril_tag_field = loadAprilTagLayoutField(AprilTagField.k2023ChargedUp)\r\ncam_to_robot = Transform3d(Translation3d(-0.375, 0.0, -0.165), Rotation3d(0, 0, 0))\r\n\r\n\r\nclass Drivetrain(SafeSubsystem):\r\n encoder_conversion_factor = autoproperty(0.056)\r\n\r\n def __init__(self) -> None:\r\n super().__init__()\r\n\r\n # Motors\r\n self._motor_left = rev.CANSparkMax(ports.drivetrain_motor_front_left, rev.CANSparkMax.MotorType.kBrushless)\r\n configureLeader(self._motor_left, \"brake\")\r\n\r\n self._motor_left_follower = rev.CANSparkMax(ports.drivetrain_motor_rear_left,\r\n rev.CANSparkMax.MotorType.kBrushless)\r\n configureFollower(self._motor_left_follower, self._motor_left, \"brake\")\r\n\r\n self._motor_right = rev.CANSparkMax(ports.drivetrain_motor_front_right,\r\n rev.CANSparkMax.MotorType.kBrushless)\r\n configureLeader(self._motor_right, \"brake\", True)\r\n\r\n self._motor_right_follower = rev.CANSparkMax(ports.drivetrain_motor_rear_right,\r\n rev.CANSparkMax.MotorType.kBrushless)\r\n configureFollower(self._motor_right_follower, self._motor_right, \"brake\")\r\n\r\n self._drive = wpilib.drive.DifferentialDrive(self._motor_left, self._motor_right)\r\n self.addChild(\"DifferentialDrive\", self._drive)\r\n\r\n # Photon Vision\r\n self.latest = None\r\n\r\n # Encoders\r\n self._encoder_left = self._motor_left.getEncoder()\r\n self._encoder_right = self._motor_right.getEncoder()\r\n self._left_encoder_offset = self._encoder_left.getPosition()\r\n self._right_encoder_offset = self._encoder_right.getPosition()\r\n\r\n # Gyro\r\n self._gyro = {\r\n \"navx\": NavX,\r\n \"adis16448\": ADIS16448,\r\n \"adis16470\": ADIS16470,\r\n \"adxrs\": ADXRS,\r\n \"empty\": Empty,\r\n }[select_gyro]()\r\n\r\n # Odometry\r\n self._kinematics = DifferentialDriveKinematics(trackWidth=0.56)\r\n self._estimator = DifferentialDrivePoseEstimator(self._kinematics, self._gyro.getRotation2d(), 0, 0,\r\n initialPose=Pose2d(0, 0, 0))\r\n\r\n self._field = wpilib.Field2d()\r\n wpilib.SmartDashboard.putData(\"Field\", self._field)\r\n\r\n self.alliance = DriverStation.getAlliance()\r\n\r\n self.addChild(\"Gyro\", self._gyro)\r\n\r\n if RobotBase.isReal():\r\n self.cam = PhotonCamera(\"mainCamera\")\r\n PhotonCamera.setVersionCheckEnabled(False)\r\n else: # sim\r\n self._motor_left_sim = SparkMaxSim(self._motor_left)\r\n self._motor_right_sim = SparkMaxSim(self._motor_right)\r\n self._system = LinearSystemId.identifyDrivetrainSystem(1.98, 0.2, 5, 0.3)\r\n self._drive_sim = DifferentialDrivetrainSim(self._system, 0.64, DCMotor.NEO(4), 1.5, 0.08, [\r\n 0.001, 0.001, 0.001, 0.1, 0.1, 0.005, 0.005])\r\n\r\n # Cam sim\r\n cam_diag_fov = 75.0\r\n max_led_range = 20\r\n cam_resolution_width = 320\r\n cam_resolution_height = 240\r\n min_target_area = 10\r\n self.sim_vision = SimVisionSystem(\"cam\", cam_diag_fov, cam_to_robot, max_led_range,\r\n cam_resolution_width, cam_resolution_height, min_target_area)\r\n for i in range(1, 9):\r\n self.sim_vision.addSimVisionTarget(SimVisionTarget(april_tag_field.getTagPose(i), 8, 8, i))\r\n self.cam = self.sim_vision.cam\r\n\r\n self.use_vision = True\r\n\r\n\r\n def arcadeDrive(self, forward: float, rotation: float) -> None:\r\n self._drive.arcadeDrive(forward, rotation, False)\r\n\r\n def tankDrive(self, left: float, right: float) -> None:\r\n self._drive.tankDrive(left, right, False)\r\n\r\n def simulationPeriodic(self):\r\n self._drive_sim.setInputs(\r\n self._motor_left.get() * RobotController.getInputVoltage(),\r\n self._motor_right.get() * RobotController.getInputVoltage())\r\n self._drive_sim.update(0.02)\r\n self._motor_left_sim.setPosition(self._drive_sim.getLeftPosition() / self.encoder_conversion_factor + self._left_encoder_offset)\r\n self._motor_left_sim.setVelocity(self._drive_sim.getLeftVelocity())\r\n self._motor_right_sim.setPosition(self._drive_sim.getRightPosition() / self.encoder_conversion_factor + self._right_encoder_offset)\r\n self._motor_right_sim.setVelocity(self._drive_sim.getRightVelocity())\r\n self._gyro.setSimAngle(self._drive_sim.getHeading().degrees())\r\n self.sim_vision.processFrame(self._drive_sim.getPose())\r\n\r\n def getRotation(self):\r\n return self._gyro.getRotation2d()\r\n\r\n def getPitch(self):\r\n return self._gyro.getPitch()\r\n\r\n def getLeftEncoderPosition(self):\r\n return (self._encoder_left.getPosition() - self._left_encoder_offset) * self.encoder_conversion_factor\r\n\r\n def getRightEncoderPosition(self):\r\n return (self._encoder_right.getPosition() - self._right_encoder_offset) * self.encoder_conversion_factor\r\n\r\n def getAverageEncoderPosition(self):\r\n return (self.getLeftEncoderPosition() + self.getRightEncoderPosition()) / 2\r\n\r\n def getPose(self):\r\n return self._estimator.getEstimatedPosition()\r\n\r\n def getField(self):\r\n return self._field\r\n\r\n def periodic(self):\r\n self._estimator.update(self._gyro.getRotation2d(), self.getLeftEncoderPosition(),\r\n self.getRightEncoderPosition())\r\n\r\n self.latest = self.cam.getLatestResult()\r\n if self.use_vision and self.latest.hasTargets():\r\n img_capture_time = self.latest.getTimestamp()\r\n cam_to_target = self.latest.getBestTarget().getBestCameraToTarget()\r\n target_to_cam = cam_to_target.inverse()\r\n target_on_field = april_tag_field.getTagPose(self.latest.getBestTarget().getFiducialId())\r\n if target_on_field is not None:\r\n camera_on_field = target_on_field.transformBy(target_to_cam)\r\n robot_on_field = camera_on_field.transformBy(cam_to_robot).toPose2d()\r\n self._estimator.addVisionMeasurement(robot_on_field, img_capture_time)\r\n\r\n self._field.setRobotPose(self._estimator.getEstimatedPosition())\r\n\r\n def initSendable(self, builder: wpiutil.SendableBuilder) -> None:\r\n super().initSendable(builder)\r\n builder.addDoubleProperty(\"Left motor\", lambda: self._motor_left.get() or -999.0, defaultSetter)\r\n builder.addDoubleProperty(\"Right Motor\", lambda: self._motor_right.get() or -999.0, defaultSetter)\r\n builder.addDoubleProperty(\"Left Encoder Position\", self.getLeftEncoderPosition, defaultSetter)\r\n builder.addDoubleProperty(\"Right Encoder Position\", self.getRightEncoderPosition, defaultSetter)\r\n\r\n","repo_name":"Ultime5528/FRC2023","sub_path":"subsystems/drivetrain.py","file_name":"drivetrain.py","file_ext":"py","file_size_in_byte":7979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"3917965443","text":"import math\r\n\r\n#Punto 1\r\ndef calificativo(nombre, calif): #define la funcion del punto numero 1\r\n return nombre+' '+calif\r\n \r\n#Punto 2\r\ndef cuadrado(numero): #define la funcion del punto numero 2\r\n return numero**2\r\n\r\n#Punto 3\r\ndef suma(num1,num2): #define la funcion del punto numero 3 y parte del 4\r\n return num1+num2\r\n\r\n#Punto 4\r\ndef rest(num1,num2): #define las funciones del punto numero 4\r\n return num1-num2 \r\ndef multi (num1,num2):\r\n return num1*num2\r\ndef div(num1,num2):\r\n return num1/num2\r\n\r\n#Punto 5\r\ndef parte_entera(numero:float)->int: #define la funcion del punto numero 5\r\n return math.trunc(numero)\r\n\r\n#Punto 6\r\ndef valorIVA(valor_prod): #define la funcion del punto numero 6\r\n return valor_prod*0.19\r\n\r\n#Punto 7\r\ndef area_perim_circ(radio): #define la funcion del punto numero 7\r\n area=math.pi*radio**2\r\n perim=math.pi*2*radio\r\n return area, perim\r\n \r\n#Punto 8\r\ndef area_hexag(lado,apot): #define la funcion del punto numero 8\r\n area=lado*apot*3\r\n return area\r\n\r\n#Punto 9 \r\ndef promedio(num1,num2,num3): #define la funcion del punto numero 9\r\n suma=num1+num2+num3\r\n return suma/3\r\n\r\n#Punto 10\r\ndef intercambiar_var(var1,var2): #define la funcion del punto numero 10\r\n aux=var1\r\n var1=var2\r\n var2=aux\r\n return var1,var2\r\n\r\n#Punto 11\r\ndef tiempo_caida(altura): #define la funcion del punto numero 11\r\n tiempo=math.sqrt(2*altura/9.8)\r\n return round(tiempo,3) #redondea a 3 decimales el retorno\r\n\r\n#Punto 12\r\ndef distancia(acel,tiempo,velIn): #define la funcion del punto numero 12\r\n distancia=velIn*tiempo+(acel/2)*(tiempo**2)\r\n return round(distancia,3) #redondea a 3 decimales el retorno\r\n\r\n#Punto 13\r\ndef velocidad_final(acel,velIn,tiempo): #define la funcion del punto numero 13\r\n vel_fin=velIn+acel*tiempo\r\n return round(vel_fin,3) #redondea a 3 decimales el retorno\r\n\r\n#Punto 14\r\ndef energia(masa,vel): #define la funcion del punto numero 14\r\n energia=(masa*vel**2)/2\r\n return round(energia,3) #redondea a 3 decimales el retorno\r\n\r\n#Punto 15\r\ndef distancia_coor(x1,y1,x2,y2): #define la funcion del punto numero 15\r\n distancia=math.sqrt((x2-x1)**2+(y2-y1)**2)\r\n return round(distancia,3) #redondea a 3 decimales el retorno\r\n\r\n#Punto 16\r\ndef segundos_a_horas(segundos): #define la funcion del punto numero 16\r\n horas=(segundos/60)/60\r\n return round(horas,2) #redondea a 2 decimales el retorno\r\n\r\n#Punto 17\r\ndef segundos_a_minutos(segundos): #define la funcion del punto numero 17\r\n minutos=segundos/60\r\n return round(minutos,3) #redondea a 3 decimales el retorno\r\n\r\n#Punto 18\r\ndef segundos_a_formato(segundos): #define la funcion del punto numero 18\r\n hora=segundos//3600\r\n segundos=segundos%3600\r\n minuto=segundos//60\r\n segundos=segundos%60\r\n return hora,minuto,segundos\r\n\r\n#Punto 19\r\ndef billetes(dinero_cant): #define la funcion del punto numero 19\r\n dicc={'100k':0,'50k':0,'20k':0,'10k':0,'5k':0,'2k':0,'1k':0} #crea un dicc con las posibilidades\r\n dinero_cant=dinero_cant//1000\r\n dicc_fin={} #crea un dicc vacio donde se almacenara la respuesta\r\n for x in dicc: #evalua cada elmento de dicc\r\n dicc[x]=dinero_cant//int(x[0:-1])\r\n \r\n dinero_cant=dinero_cant%int(x[0:-1])\r\n for x in dicc: #evalua cada elemento de dicc modificado\r\n if dicc[x]!=0: #si el elemento se modifico es agregado al dicc vacio\r\n dicc_fin[x]=dicc[x]\r\n \r\n return dicc_fin\r\n\r\n#Punto 20\r\ndef inverso(numero): #define la funcion del punto numero 20\r\n prim=numero//1000\r\n numero=numero%1000\r\n seg=numero//100\r\n numero=numero%100\r\n terc=numero//10\r\n cuart=numero%10\r\n numero=cuart*1000+terc*100+seg*10+prim \r\n if len(str(numero))==3: #si el invertido tiene 3 digitos porque no toma en cuenta el 0 a la izq, se agrega un 0\r\n numero='0'+str(numero)\r\n elif len(str(numero))==2: #si el invertido tiene 2 digitos por la misma razon se agregan dos 0: '00'\r\n numero='00'+str(numero)\r\n elif len(str(numero))==1: #si el invertido tiene 1 digito por la misma razon se agregan tres 0: '000'\r\n numero='000'+str(numero)\r\n return numero\r\n\r\n#Punto 21\r\ndef par_impar(numero): #define la funcion del punto numero 21\r\n respuesta=0\r\n if numero%2==0:\r\n respuesta='Par'\r\n else:\r\n respuesta='Impar'\r\n return respuesta\r\n\r\n#Punto 22\r\ndef positivo_negativo(numero): #define la funcion del punto numero 22\r\n respuesta=0\r\n if numero>=0:\r\n respuesta='Positivo'\r\n else:\r\n respuesta='Negativo'\r\n return respuesta\r\n\r\n#Punto 23\r\ndef pos_neg_par_impar(numero): #define la funcion del punto numero 23\r\n par_o_impar=par_impar(numero) #invoca la funcion del punto 21\r\n signo=positivo_negativo(numero) #invoca la funcion del punto 22\r\n return par_o_impar+'-'+signo\r\n \r\n#Punto 24 \r\ndef descuento_5porc(valor_prod): #define la funcion del punto numero 24\r\n iva=valorIVA(valor_prod)\r\n valor_prod+=iva\r\n if iva>=150000:\r\n valor_prod*=0.95\r\n return valor_prod\r\n\r\n#Punto 25\r\ndef punto25(numero): #define la funcion del punto numero 25\r\n if numero>=10:\r\n numero*=3\r\n else:\r\n numero/=4\r\n return numero\r\n\r\n#Punto 26\r\ndef nota_final(n1,n2,n3,n4,n5): #define las funciones del punto numero 26\r\n nota_final=(n1*0.15)+(n2*0.2)+(n3*0.15)+(n4*0.3)+(n5*0.2) \r\n return nota_final\r\ndef aprueba_reprueba(nota_final):\r\n respuesta='Felicitaciones :D'\r\n if nota_final<20:\r\n respuesta='No puede habilitar :('\r\n elif nota_final<30:\r\n respuesta='Reprobó, tendra que habilitar >:)'\r\n elif nota_final<=45:\r\n respuesta='Aprobó :3'\r\n return respuesta\r\n\r\n#Punto 27\r\ndef mayor(num1,num2): #define la funcion del punto numero 27\r\n mayor=num1\r\n if num2>num1:\r\n mayor=num2\r\n return mayor\r\n\r\n#Punto 28\r\ndef decimal(numero): #define la funcion del punto numero 28\r\n return float(numero)\r\n\r\n#Punto 29\r\ndef mayor_menor(num1,num2,num3): #define la funcion del punto numero 29\r\n mayor=max(num1,num2,num3) #selecciona el maximo numero\r\n menor=min(num1,num2,num3) #selecciona el minimo numero\r\n return mayor,menor\r\n\r\n#Punto 30\r\ndef suma_mayor_menor(num1,num2,num3): #define la funcion del punto numero 30\r\n suma=num1+num2\r\n respuesta='Mayor'\r\n if suma1000 and dias>7:\r\n valor=valor*0.85\r\n if valor<100000:\r\n valor=100000\r\n return valor\r\n\r\n#Punto 32 \r\ndef anio_bisiesto(anio): #define la funcion del punto numero 32\r\n if anio % 4 == 0 and (anio % 100 != 0 or anio % 400 == 0):\r\n respuesta=\"Es bisiesto\"\r\n else:\r\n respuesta=\"No es bisiesto\"\r\n return respuesta\r\n\r\n#Punto 33\r\ndef ecuacion_cuadratica(a,b,c): #define la funcion del punto numero 33\r\n x1=(-b+math.sqrt(b**2-(4*a*c)))/2*a\r\n x2=(-b-math.sqrt(b**2-(4*a*c)))/2*a\r\n return x1,x2\r\n\r\n#Punto 34\r\ndef usuario_contrasenia(usuario,contrasenia): #define la funcion del punto numero 34\r\n user=input('Ingrese su usuario: ')\r\n password=int(input('Digite su contraseña: '))\r\n mensaje=''\r\n contador=0\r\n while True: #crea un ciclo infinito\r\n contador+=1\r\n if user==usuario and contrasenia==password:\r\n mensaje='Inicio de sesión exitoso'\r\n break #cuando ambas condiciones se cumple rompe el ciclo\r\n else:\r\n if contador==3:\r\n mensaje='Excedio el numero de intentos, su computador explotara en 10 segundos'\r\n break #cuando se llega a 3 intentos fallidos de inicio de sesion rompe el ciclo\r\n print('Usuario o contraseña incorrectos')\r\n user=input('Ingrese su usuario nuevamente: ')\r\n password=int(input('Digite su contraseña nuevamente: ')) \r\n return mensaje\r\n\r\n#Punto 35\r\ndef nombre_numero(numero): #define la funcion del punto numero 35\r\n nombre=''\r\n if numero==0:\r\n nombre='CERO'\r\n elif numero==1:\r\n nombre='UNO'\r\n elif numero==2:\r\n nombre='DOS'\r\n elif numero==3:\r\n nombre='TRES'\r\n elif numero==4:\r\n nombre='CUATRO'\r\n elif numero==5:\r\n nombre='CINCO'\r\n elif numero==6:\r\n nombre='SEIS'\r\n elif numero==7:\r\n nombre='SIETE'\r\n elif numero==8:\r\n nombre='OCHO'\r\n elif numero==9:\r\n nombre='NUEVE'\r\n elif numero==10:\r\n nombre='DIEZ'\r\n return nombre\r\n\r\n#Punto 36\r\ndef cantidad_digitos(numero): #define la funcion del punto numero 36\r\n cantidad=len(str(numero))\r\n return cantidad\r\n\r\n#Punto 37\r\ndef incremen_disminuyen(num1,num2,num3): #define la funcion del punto numero 37\r\n mensaje=''\r\n if num1num2 and num2>num3:\r\n mensaje='Estan disminuyendo'\r\n else:\r\n mensaje='No incrementan ni disminuyen'\r\n return mensaje\r\n\r\n#Punto 38\r\ndef entre_0y5(num1,num2): #define la funcion del punto numero 38\r\n resp=False\r\n if (num1>0 and num1<5) and (num2>0 and num2<5):\r\n resp=True\r\n return resp\r\n\r\n#Punto 39\r\ndef dias_semana(numero): #define la funcion del punto numero 39\r\n resp=''\r\n if numero>0 and numero<8:\r\n if numero==1:\r\n resp='Lunes'\r\n elif numero==2:\r\n resp='Martes'\r\n elif numero==3:\r\n resp='Miercoles'\r\n elif numero==4:\r\n resp='Jueves'\r\n elif numero==5:\r\n resp='Viernes'\r\n elif numero==6:\r\n resp='Sabado'\r\n elif numero==7:\r\n resp='Domingo'\r\n else:\r\n resp='El numero no es valido'\r\n return resp\r\n\r\n#Punto 40\r\ndef iguales(num1,num2,num3): #define la funcion del punto numero 40\r\n resp=False\r\n if num1==num2 or num1==num3 or num2==num3:\r\n resp=True\r\n return resp\r\n\r\n#Punto 41\r\ndef numero_naturales(): #define la funcion del punto numero 41\r\n for x in range (1,11): #recorre cada numero de 1 a 10\r\n print('->',x)\r\n pass\r\n\r\n#Punto 42\r\ndef numero_naturales_impar(): #define la funcion del punto numero 42\r\n for x in range(1,21): #recorre cada numero de 1 a 20\r\n if x%2!=0:\r\n print('->',x)\r\n pass\r\n\r\n#Punto 43\r\ndef numero_naturales_par(): #define la funcion del punto numero 43\r\n for x in range(1,21): #recorre cada numero de 1 a 20\r\n if x%2==0:\r\n print('->',x)\r\n pass\r\n\r\n#Punto 44\r\ndef numero_naturales_n(num): #define la funcion del punto numero 44\r\n for x in range (1,num+1): #recorre cada numero de 1 al numero ingresado\r\n print('->',x)\r\n pass\r\n\r\n#Punto 45\r\ndef numero_naturales_secuencia(num): #define la funcion del punto numero 45\r\n for x in range (1,num+1): #recorre cada numero de 1 al numero ingresado\r\n if x%2==0: \r\n print('->',-x)\r\n else:\r\n print('->',x)\r\n pass\r\n\r\n#Punto 46\r\ndef numero_naturales_entre(num1,num2): #define la funcion del punto numero 46\r\n if num1',x)\r\n else: \r\n print('~ERROR~ El segundo numero no es mayor')\r\n pass\r\n\r\n#Punto 47\r\ndef suma_numeros_entre(num1,num2): #define la funcion del punto numero 47\r\n suma=0\r\n if num1'))\r\n contador+=1\r\n suma+=num\r\n promedio=suma/contador\r\n return suma,promedio\r\n\r\n#Punto 50\r\ndef promedio_par_impar(numero): #define la funcion del punto numero 50\r\n suma_par=0\r\n suma_impar=0\r\n contador_par=0\r\n contador_impar=0\r\n for x in range(1,numero+1): #recorre cada numero de 1 al numero ingresado\r\n num=int(input('Ingrese el numero '+str(x)+':\\n->'))\r\n if num%2==0:\r\n contador_par+=1\r\n suma_par+=num\r\n else:\r\n contador_impar+=1\r\n suma_impar+=num\r\n #antes de realizar el promedio verifica que el divisor no sea 0\r\n if contador_par!=0: \r\n promedio_par=suma_par/contador_par\r\n else:\r\n promedio_par=0\r\n if contador_impar!=0:\r\n promedio_impar=suma_impar/contador_impar\r\n else:\r\n promedio_impar=0\r\n return promedio_par,promedio_impar\r\n\r\n#Punto 51\r\ndef es_positivo (valor): #define la funcion del punto numero 51\r\n while True: #crea un ciclo infinito \r\n if valor<0:\r\n valor=int(input('~ERROR~Ingrese un numero entero positivo: '))\r\n else: \r\n break #si el valor es mayor o igual a cero rompe el ciclo\r\n return valor\r\n\r\n#Punto 52\r\ndef cantidad_mayor_menor_100(numero): #define la funcion del punto numero 52\r\n mayor=0\r\n menor=0\r\n for x in range(1,numero+1): #recorre cada numero de 1 al numero ingresado\r\n num=int(input('Ingrese el numero '+str(x)+':\\n->'))\r\n if num>100:\r\n mayor+=1\r\n elif num<100:\r\n menor+=1\r\n return mayor,menor\r\n\r\n#Punto 53\r\ndef clasificar_numeros(numero): #define la funcion del punto numero 53\r\n positivo=0\r\n negativo=0\r\n par=0\r\n impar=0\r\n mult_ocho=0\r\n for x in range(1,numero+1): #recorre cada numero de 1 al numero ingresado\r\n num=int(input('Ingrese el numero '+str(x)+':\\n->'))\r\n #suma 1 a la variable de la condicion que se cumpla\r\n if num>=0:\r\n positivo+=1\r\n else:\r\n negativo+=1\r\n if num%2==0:\r\n par+=1\r\n else:\r\n impar+=1\r\n if num%8==0:\r\n mult_ocho+=1\r\n return positivo,negativo,par,impar,mult_ocho\r\n\r\n#Punto54\r\ndef punto54(): #define la funcion del punto numero 54\r\n contador_par=0\r\n contador_5=0\r\n contador=0\r\n contador_impar=0\r\n while True: #crea un ciclo infinito\r\n contador+=1\r\n num=int(input('Ingrese el numero '+str(contador)+':\\n->'))\r\n #suma 1 a la variable de la condicion que se cumpla\r\n if num==5:\r\n contador_5+=1\r\n if num%2==0:\r\n contador_par+=1\r\n else:\r\n contador_impar+=1\r\n if contador_par==10:\r\n break #rompe el ciclo cuando se ingresan 10 pares\r\n if contador_5==20:\r\n break #rompe el ciclo cuando se ingresan 20 numero '5'\r\n return contador, contador_par,contador_impar,contador_5\r\n\r\n#Punto 55\r\ndef cantidad_factores(numero): #define la funcion del punto numero 55\r\n contador=0\r\n for num in range(2,numero): #recorre cada numero de 2 al numero ingresado\r\n if numero%num==0:\r\n contador+=1\r\n return contador\r\n\r\n#Punto 56\r\ndef invertir_cadena(cadena): #define la funcion del punto numero 56\r\n return cadena[::-1] #retorna la cadena desde el final saltando -1 espacio hasta llegar al principio\r\n\r\n#Punto 57\r\ndef patron(): #define la funcion del punto numero 57\r\n patron=''\r\n for x in range (1,11):#recorre cada numero de 1 a 11\r\n patron=patron+str(x)\r\n print(patron) \r\n\r\n\r\n\r\n#Inicia con 1, para comenzar el ciclo while\r\nprint('--------------------------------------------------------------\\nINICIO DEL PROGRAMA\\n--------------------------------------------------------------')\r\npunto=1 \r\nwhile punto!=0: #Condicion del ciclo: Punto ingresado diferente a 0\r\n punto=int(input('Ingresa el punto a ejecutar\\nPara finalizar ingresa 0: ')) #Pide el punto a ejecutar\r\n \r\n #Elige el punto que se ingreso y lo ejecuta:\r\n if punto==1: #Interfaz del punto 1\r\n nombre=input('Ingresa el nombre: ') #Pide el nombre\r\n calif=input('Ingresa el cualificativo: ') #Pide el calificativo\r\n print(calificativo(nombre, calif)) #Imprime el resultado del punto 1\r\n \r\n elif punto==2: #Interfaz del punto 2\r\n numero=int(input('Ingresa el numero: ')) #Pide el numero\r\n print('El cuadrado del numero es',cuadrado(numero)) #Imprime el cuadrado del numero\r\n \r\n elif punto==3: #Interfaz del punto 3\r\n num1=int(input('Ingresa el primer numero a sumar: ')) #Pide el primer numero\r\n num2=int(input('Ingresa el segundo numero a sumar: ')) #Pide el segundo numero\r\n print('La suma de los numeros es',suma(num1, num2)) #Imprime la suma de los numeros\r\n \r\n elif punto==4: #Interfaz del punto 4\r\n num1=int(input('Ingresa el primer numero: ')) #Pide el primer numero\r\n num2=int(input('Ingresa el segundo numero: ')) #Pide el segundo numero\r\n print('La suma es:',suma(num1, num2)) #Imprime la suma\r\n print('La resta es:',rest(num1, num2)) #Imprime la resta\r\n print('La multiplicacion es:',multi(num1, num2)) #Imprime la multiplicacion\r\n print('La division es:',div(num1, num2)) #Imprime la division \r\n \r\n elif punto==5: #Interfaz del punto 5\r\n numero=float(input('Ingresa el numero con su decimal: '))\r\n print('La parte entera es',parte_entera(numero))\r\n print('La parte decimal es', round(numero-parte_entera(numero),3)) \r\n \r\n elif punto==6: #Interfaz del punto 6\r\n valor_prod=int(input('Ingrese el valor del producto: '))\r\n print('El valor del producto es: $', valor_prod)\r\n print('El valor del IVA es: $',valorIVA(valor_prod))\r\n print('El valor con IVA incluido es: $',valor_prod+valorIVA(valor_prod))\r\n \r\n elif punto==7: #Interfaz del punto 7\r\n radio=int(input('Ingrese el radio del circulo: '))\r\n area,perim= area_perim_circ(radio)\r\n print('El area del circulo es:',round(area,3))\r\n print('El perimetro del circulo es:',round(perim,3))\r\n \r\n elif punto==8: #Interfaz del punto 8\r\n lado=int(input('Ingrese el lado del hexagono: '))\r\n apot=int(input('Ingrese el apotema del hexagono: '))\r\n print('El area del hexagono es',area_hexag(lado, apot)) \r\n \r\n elif punto==9: #Interfaz del punto 9\r\n num1=int(input('Ingrese el primer numero: '))\r\n num2=int(input('Ingrese el segundo numero: '))\r\n num3=int(input('Ingrese el tercer numero: '))\r\n print('El promedio de los 3 numeros es:',promedio(num1, num2, num3))\r\n \r\n elif punto==10: #Interfaz del punto 10\r\n var1=int(input('Ingrese el primer valor: '))\r\n var2=int(input('Ingrese el segundo valor: '))\r\n var1,var2=intercambiar_var(var1, var2)\r\n print('Ahora el primer valor es',var1,'y el segundo es',var2) \r\n \r\n elif punto==11: #Interfaz del punto 11\r\n altura=int(input('Ingresa la altura de la que cayo el objeto: '))\r\n print('El tiempo que tardo en caer fue',tiempo_caida(altura))\r\n \r\n elif punto==12: #Interfaz del punto 12\r\n acel=int(input('Ingresa la aceleracion: '))\r\n velIn=int(input('Ingresa la velocidad inicial: '))\r\n tiempo=int(input('Ingresa el tiempo donde se encontrara el objeto: '))\r\n print('La distancia recorrida en',tiempo,'segundos es:',distancia(acel, tiempo, velIn),'metros')\r\n \r\n elif punto==13: #Interfaz del punto 13\r\n acel=int(input('Ingresa la aceleracion: '))\r\n velIn=int(input('Ingresa la velocidad inicial: '))\r\n tiempo=int(input('Ingresa el tiempo donde se encontrara el objeto: '))\r\n print('La velocidad final del objeto es:',velocidad_final(acel, velIn, tiempo),'m/s')\r\n \r\n elif punto==14: #Interfaz del punto 14\r\n masa=int(input('Ingresa la masa del objeto: '))\r\n vel=int(input('Ingresa la velocidad del objeto: '))\r\n print('La energia en Julios es', energia(masa, vel))\r\n \r\n elif punto==15: #Interfaz del punto 15 \r\n x1=int(input('Ingrese el valor de la primer coordenada x1: '))\r\n y1=int(input('Ingrese el valor de la primer coordenada y1: '))\r\n x2=int(input('Ingrese el valor de la segunda coordenada x2: '))\r\n y2=int(input('Ingrese el valor de la segunda coordenada y2: '))\r\n print('La distancia entre las coordenadas es:',distancia_coor(x1, y1, x2, y2))\r\n \r\n elif punto==16: #Interfaz del punto 16\r\n segundos=int(input('Ingrese la cantidad de segundos: '))\r\n print('La cantidad de segundos en horas es',segundos_a_horas(segundos) ,'hora(s)')\r\n \r\n elif punto==17: #Interfaz del punto 17\r\n segundos=int(input('Ingrese la cantidad de segundos: '))\r\n print('La cantidad de segundos en minutos es',segundos_a_minutos(segundos) ,'minuto(s)')\r\n \r\n elif punto==18: #Interfaz del punto 18\r\n segundos=int(input('Ingrese la cantidad de segundos: '))\r\n hora,minuto,segundos=segundos_a_formato(segundos)\r\n str(hora)+':'+str(minuto)+':'+str(segundos)\r\n print('La hora es:',hora,'hora(s),',minuto,'minuto(s),',segundos,'segundo(s)')\r\n print(str(hora)+':'+str(minuto)+':'+str(segundos))\r\n \r\n elif punto==19: #Interfaz del punto 19\r\n dinero_cant=int(input('Ingresa la cantidad de dinero: '))\r\n print('La cantidad de billetes en miles es:\\n'+str(billetes(dinero_cant)))\r\n \r\n elif punto==20: #Interfaz del punto 20\r\n numero=int(input('Ingresa el numero de 4 cifras a invertir: '))\r\n print('El numero invertido es:',inverso(numero))\r\n \r\n elif punto==21: #Interfaz del punto 21\r\n numero=int(input('Ingresa el numero: '))\r\n print('El numero ingresado es',par_impar(numero))\r\n\r\n elif punto==22: #Interfaz del punto 22\r\n numero=int(input('Ingresa el numero: '))\r\n print('El numero ingresado es',positivo_negativo(numero))\r\n\r\n elif punto==23: #Interfaz del punto 23\r\n numero=int(input('Ingresa el numero: '))\r\n print('El numero ingresado es',pos_neg_par_impar(numero))\r\n\r\n elif punto==24: #Interfaz del punto 24\r\n valor_prod=int(input('Ingrese el valor de la venta: '))\r\n print('El valor de la venta con IVA es:',descuento_5porc(valor_prod))\r\n\r\n elif punto==25: #Interfaz del punto 25\r\n numero=int(input('Ingresa el numero: '))\r\n if numero>=10:\r\n print('El triple del numero es:',punto25(numero))\r\n else:\r\n print('La cuarta parte del numero es:',punto25(numero))\r\n\r\n elif punto==26: #Interfaz del punto 26\r\n print('\\nIngrese la notas a continuacion de 0 a 50')\r\n n1=int(input('Nota 1: '))\r\n n2=int(input('Nota 2: '))\r\n n3=int(input('Nota 3: '))\r\n n4=int(input('Nota 4: '))\r\n n5=int(input('Nota 5: '))\r\n nota_def=nota_final(n1, n2, n3, n4, n5)\r\n print('\\nLa nota final es:',nota_def)\r\n print(aprueba_reprueba(nota_def))\r\n\r\n elif punto==27: #Interfaz del punto 27\r\n print('\\nQué número es mayor?')\r\n num1=int(input('Ingrese el primer número: '))\r\n num2=int(input('Ingrese el segundo número: '))\r\n print('El númeró mayor es:',mayor(num1, num2))\r\n\r\n elif punto==28: #Interfaz del punto 28\r\n print('\\nConvierte el numero a decimal')\r\n numero=int(input('Ingresa el numero: '))\r\n print('El decimal es',decimal(numero)) \r\n \r\n elif punto==29: #Interfaz del punto 29\r\n num1=int(input('Ingrese el primer numero: '))\r\n num2=int(input('Ingrese el segundo numero: '))\r\n num3=int(input('Ingrese el tercer numero: '))\r\n mayor,menor=mayor_menor(num1, num2, num3)\r\n print('El numero mayor es',mayor)\r\n print('El numero menor es',menor)\r\n \r\n elif punto==30: #Interfaz del punto 30\r\n num1=int(input('Ingrese el primer numero: '))\r\n num2=int(input('Ingrese el segundo numero: '))\r\n num3=int(input('Ingrese el tercer numero: '))\r\n print('La suma del primer y el segundo numero es',\r\n suma_mayor_menor(num1, num2, num3),'que el tercer numero')\r\n \r\n elif punto==31: #Interfaz del punto 31\r\n print('Valor del pasaje de avión')\r\n distancia=int(input('Ingrese la distancia en km a recorrer: '))\r\n dias=int(input('Ingrese los dias de estancia: '))\r\n print('El valor del pasaje de avión es $',valor_pasaje(distancia, dias))\r\n\r\n elif punto==32: #Interfaz del punto 32\r\n anio=int(input('Ingresa el año: '))\r\n print('El año', anio_bisiesto(anio))\r\n \r\n elif punto==33: #Interfaz del punto 33\r\n print('Resolución de ecuacion cuadratica ax^2+bx+c')\r\n a=int(input('Ingresa el valor de a: '))\r\n b=int(input('Ingresa el valor de b: '))\r\n c=int(input('Ingresa el valor de c: '))\r\n x1,x2=ecuacion_cuadratica(a, b, c)\r\n print(\"\"\"La solucion de las ecuaciones son:\\n\r\n x1 = {}\r\n x2 = {}\"\"\".format(x1,x2))\r\n\r\n elif punto==34: #Interfaz del punto 34\r\n usuario=input('Usuario predeterminado: ')\r\n contrasenia=int(input('Contraseña predeterminada: '))\r\n print(usuario_contrasenia(usuario, contrasenia))\r\n\r\n elif punto==35: #Interfaz del punto 35\r\n numero=int(input('Ingresa el numero de 0 a 10: '))\r\n print('El',numero,'se escribe asi:',nombre_numero(numero))\r\n\r\n elif punto==36: #Interfaz del punto 36\r\n numero=int(input('Ingresa el numero: '))\r\n print('El numero de digitos de',numero,'es',cantidad_digitos(numero))\r\n\r\n elif punto==37: #Interfaz del punto 37\r\n num1=int(input('Ingrese el primer numero: '))\r\n num2=int(input('Ingrese el segundo numero: '))\r\n num3=int(input('Ingrese el tercer numero: '))\r\n print('Los numeros',incremen_disminuyen(num1, num2, num3))\r\n\r\n elif punto==38: #Interfaz del punto 38\r\n num1=int(input('Ingrese el primer numero: '))\r\n num2=int(input('Ingrese el segundo numero: '))\r\n print('Los dos numeros se encuentras entre 0 y 5:',entre_0y5(num1, num2))\r\n\r\n elif punto==39: #Interfaz del punto 39\r\n numero=int(input('Ingresa el numero de la semana: '))\r\n dia=dias_semana(numero)\r\n if len(dia)<8:\r\n print('El dia de la semana es',dias_semana(numero)) \r\n else:\r\n print(dia)\r\n\r\n elif punto==40: #Interfaz del punto 40\r\n num1=int(input('Ingrese el primer numero: '))\r\n num2=int(input('Ingrese el segundo numero: '))\r\n num3=int(input('Ingrese el tercer numero: '))\r\n print('Entre los tres hay dos numeros por lo menos iguales:',iguales(num1, num2, num3))\r\n\r\n elif punto==41: #Interfaz del punto 41\r\n print('10 primeros numeros naturales:')\r\n numero_naturales()\r\n \r\n elif punto==42: #Interfaz del punto 42\r\n print('10 primeros numeros naturales impares:')\r\n numero_naturales_impar()\r\n \r\n elif punto==43: #Interfaz del punto 43\r\n print('10 primeros numeros naturales pares:')\r\n numero_naturales_par()\r\n\r\n elif punto==44: #Interfaz del punto 44\r\n num=int(input('Ingrese el numero: '))\r\n print('Los',num,'primeros numeros naturales:')\r\n numero_naturales_n(num)\r\n\r\n elif punto==45: #Interfaz del punto 45\r\n num=int(input('Ingresa el numero: '))\r\n print('La secuencia es:')\r\n numero_naturales_secuencia(num) \r\n\r\n elif punto==46: #Interfaz del punto 46\r\n num1=int(input('Ingrese el primer numero: '))\r\n num2=int(input('Ingrese el segundo numero: '))\r\n print('Los numeros entre',num1,'y',num2,'son:')\r\n numero_naturales_entre(num1, num2)\r\n \r\n elif punto==47: #Interfaz del punto 47\r\n num1=int(input('Ingrese el primer numero: '))\r\n num2=int(input('Ingrese el segundo numero: '))\r\n print('La suma de los numeros entre',num1,'y',num2,'es:')\r\n print('->',suma_numeros_entre(num1, num2))\r\n\r\n elif punto==48: #Interfaz del punto 48\r\n print('Suma y promedio')\r\n suma,promedio=suma_promedio_numeros(10)\r\n print('La suma es:',suma,'\\nEl promedio es:',promedio)\r\n \r\n elif punto==49: #Interfaz del punto 49\r\n print('Suma y promedio')\r\n numero=int(input('Ingrese la cantidad de numeros a digitar: '))\r\n suma,promedio=suma_promedio_numeros(numero)\r\n print('La suma es:',suma,'\\nEl promedio es:',promedio)\r\n\r\n elif punto==50: #Interfaz del punto 50\r\n print('Promedio pares e impares')\r\n numero=int(input('Ingrese la cantidad de numeros a digitar: '))\r\n prom_par,prom_impar=promedio_par_impar(numero)\r\n print('El promedio de los numeros pares es:',prom_par,\r\n '\\nEl promedio de los numeros impares es:',prom_impar)\r\n \r\n elif punto==51: #Interfaz del punto 51\r\n valor=int(input('Ingrese un numero entero positivo: '))\r\n print('Muy bien, su numero es:',es_positivo(valor))\r\n \r\n elif punto==52: #Interfaz del punto 52\r\n numero=int(input('Ingrese la cantidad de numeros a digitar: '))\r\n mayor,menor= cantidad_mayor_menor_100(numero)\r\n print('La cantidad de numeros mayores a 100 es:',mayor,\r\n '\\nLa cantidad de numeros menores a 100 es:',menor)\r\n \r\n elif punto==53: #Interfaz del punto 53\r\n numero=int(input('Ingrese la cantidad de numeros a digitar: '))\r\n positivo,negativo,par,impar,mult_ocho=clasificar_numeros(numero)\r\n print('La cantidad de numeros positivos es:',positivo,\r\n '\\nLa cantidad de numeros negativos es:',negativo,\r\n '\\nLa cantidad de numeros pares es:',par,\r\n '\\nLa cantidad de numeros impares es:',impar,\r\n '\\nLa cantidad de numeros multiplos de 8:',mult_ocho) \r\n \r\n elif punto==54: #Interfaz del punto 54\r\n total,par,impar,num5=punto54()\r\n print('La cantidad total de numeros es:',total,\r\n '\\nLa cantidad de numeros pares es:',par,\r\n '\\nLa cantidad de numeros impares es:',impar,\r\n '\\nLa cantidad de numero 5 digitados es:',num5)\r\n \r\n elif punto==55: #Interfaz del punto 55\r\n numero=int(input('Ingrese el numero: '))\r\n print('La cantidad de factores de',numero,'ademas del mismo y 1 es:',cantidad_factores(numero))\r\n \r\n elif punto==56: #Interfaz del punto 56\r\n cadena=input('Ingrese la cadena a invertir: ')\r\n print('La cadena invertida es:\\n'+invertir_cadena(cadena))\r\n \r\n elif punto==57: #Interfaz del punto 57\r\n patron()\r\n \r\n elif punto==0:\r\n print('--------------------------------------------------------------\\nFIN DEL PROGRAMA\\n--------------------------------------------------------------')\r\n break #si se ingresa 0 rompe el ciclo y el programa finaliza\r\n \r\n else:\r\n print('El punto ingresado no existe') #imprime si se ingresa un punto que no existe \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"JersonPorras7D2/50ejercicios","sub_path":"30ejercicios.py","file_name":"30ejercicios.py","file_ext":"py","file_size_in_byte":31067,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"32912668445","text":"import pandas as pd\n\n\nfiles = {\"idle\": 13, \"onst\": 16}\nfilename = \"data/idle_unpca.xlsx\"\t\t\t\t\t# 改这里\nfile = filename.strip(\"data/\").strip(\"_unpca.xlsx\")\n\n\ndf = pd.read_excel(filename,\t\t\t\t\t\t# 读取excel文件\n\t\t\t\t\t\tsheet_name=\"Sheet1\",\t\t# 读取对应的sheet\n\t\t\t\t\t\theader=0, \t\t\t\t\t# 第0行为列索引\n\t\t\t\t\t\tindex_col=0,\t\t\t\t# 第0列为行索引\n\t\t\t\t\t\tnames=None,\t\t\t\t\t# 不定义DataFrame的名字\n\t\t\t\t\t\tencoding='utf8')\n\ndf2 = df[0:files[file]]\nprint(\"Spearman:\\n\", df.corr('spearman').loc[:, [\"Score\"]])\n","repo_name":"HitMasq/Sound-Quality","sub_path":"correlation_obj_subj.py","file_name":"correlation_obj_subj.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"35629294061","text":"from mrjob.job import MRJob \n\nclass MovingAverage(MRJob):\n window = 3\n\n def mapper(self, _, line):\n company, timestamp, value = line.split(',')\n\n yield company, (timestamp, float(value))\n\n def reducer(self, key, values):\n items = sorted(list(values))\n\n sum = 0.0\n for i in range(len(items)):\n item = items[i]\n timestamp = item[0]\n value = item[1]\n\n sum += value\n\n if(i + 1) > self.window:\n sum -= items[i-self.window][1]\n \n q = min(i + 1, self.window)\n \n moving = sum / q\n\n yield key, (item, moving)\n\nif __name__ == '__main__':\n MovingAverage.run()","repo_name":"synara/map-reduce","sub_path":"202104-09-10/movingavg.py","file_name":"movingavg.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"33784621251","text":"import streamlit as st\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport backend as bd\n\n# LAYOUT SETTINGS\npage_title = \"Air composition\"\npage_title_2 = \"Air Composition Forecast\"\npage_icon = \":mask:\"\nlayout = \"wide\"\n\n# APP LAYOUT\nst.set_page_config(page_title=page_title,\n page_icon=page_icon,\n layout=layout)\nst.title(page_title_2)\nst.info(\"\"\"\n**_Note:_** You can turn off and turn on lines in each graph by clicking \non the plot legend.\n\"\"\")\n\ntry:\n # CURRENT TIME\n current_time = st.session_state['current_time']\n\n # DATA\n location = st.session_state['location']\n latitude = location[0]\n longitude = location[1]\n\n # Data for graph of meteo forecast\n air_data = bd.get_air_composition_data(latitude, longitude)\n air_dates = air_data['hourly']['time']\n pm10 = air_data['hourly']['pm10']\n pm2_5 = air_data['hourly']['pm2_5']\n carbon_monoxide = air_data['hourly']['carbon_monoxide']\n nitrogen_dioxide = air_data['hourly']['nitrogen_dioxide']\n sulphur_dioxide = air_data['hourly']['sulphur_dioxide']\n ozone = air_data['hourly']['ozone']\n pollen_dates = air_data['hourly']['time'][:96]\n alder_pollen = air_data['hourly']['alder_pollen']\n birch_pollen = air_data['hourly']['birch_pollen']\n grass_pollen = air_data['hourly']['grass_pollen']\n mugwort_pollen = air_data['hourly']['mugwort_pollen']\n olive_pollen = air_data['hourly']['olive_pollen']\n ragweed_pollen = air_data['hourly']['ragweed_pollen']\n european_aqi = air_data['hourly']['european_aqi']\n\n # Create air composition plot\n air_fig = go.Figure()\n # Add pm10\n air_fig.add_trace(\n go.Scatter(x=air_dates, y=pm10,\n name=\"pm10\",\n marker_color=px.colors.qualitative.G10[3]\n )\n )\n # Add pm2.5\n air_fig.add_trace(\n go.Scatter(x=air_dates, y=pm2_5,\n name=\"pm2.5\",\n marker_color=px.colors.qualitative.G10[7]\n )\n )\n air_fig.add_trace(\n go.Scatter(x=air_dates, y=carbon_monoxide,\n name=\"carbon monoxide\",\n marker_color=px.colors.qualitative.G10[5]\n )\n )\n air_fig.add_trace(\n go.Scatter(x=air_dates, y=nitrogen_dioxide,\n name=\"nitrogen dioxide\",\n marker_color=px.colors.qualitative.G10[6]\n )\n )\n air_fig.add_trace(\n go.Scatter(x=air_dates, y=sulphur_dioxide,\n name=\"sulphur dioxide\",\n marker_color=px.colors.qualitative.G10[2]\n )\n )\n air_fig.add_trace(\n go.Scatter(x=air_dates, y=ozone,\n name=\"ozone\",\n marker_color=px.colors.qualitative.G10[9]\n )\n )\n # Add current time\n air_fig.add_vline(x=current_time,\n line_width=1,\n line_dash=\"dot\",\n line_color=\"red\"\n )\n # Update axis names, hover, legend\n air_fig.update_layout(\n title=dict(text=\"Air composition (μg/m³)\"),\n xaxis_title=\"Date\",\n yaxis_title=\"μg/m³\",\n hovermode='x',\n legend=dict(orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n )\n )\n\n # Create pollen situation plot\n pollen_fig = go.Figure()\n pollen_fig.add_trace(\n go.Scatter(x=pollen_dates, y=alder_pollen,\n name=\"alder pollen\",\n marker_color=px.colors.qualitative.Dark2[7]\n )\n )\n pollen_fig.add_trace(\n go.Scatter(x=pollen_dates, y=birch_pollen,\n name=\"birch pollen\",\n marker_color=px.colors.qualitative.Dark2[6]\n )\n )\n pollen_fig.add_trace(\n go.Scatter(x=pollen_dates, y=grass_pollen,\n name=\"grass pollen\",\n marker_color=px.colors.qualitative.Dark2[0]\n )\n )\n pollen_fig.add_trace(\n go.Scatter(x=pollen_dates, y=olive_pollen,\n name=\"olive pollen\",\n marker_color=px.colors.qualitative.Dark2[4]\n )\n )\n pollen_fig.add_trace(\n go.Scatter(x=pollen_dates, y=mugwort_pollen,\n name=\"mugwort pollen\",\n marker_color=px.colors.qualitative.Dark2[1]\n )\n )\n pollen_fig.add_trace(\n go.Scatter(x=pollen_dates, y=ragweed_pollen,\n name=\"ragweed pollen\",\n marker_color=px.colors.qualitative.Dark2[3]\n )\n )\n # Add current time\n pollen_fig.add_vline(x=current_time,\n line_width=1,\n line_dash=\"dot\",\n line_color=\"red\"\n )\n # Update axis names, hover, legend\n pollen_fig.update_layout(\n title=dict(text=\"Pollen in the Air (grains/m³)\"),\n xaxis_title=\"Date\",\n yaxis_title=\"grains/m³\",\n hovermode='x',\n legend=dict(orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n )\n )\n\n # Show plot in app and set auto resize with browser window\n st.plotly_chart(air_fig, use_container_width=True)\n st.info(\"\"\"\n Pollen forecast is only available for Europe during pollen season.\"\"\")\n st.plotly_chart(pollen_fig, use_container_width=True)\nexcept (KeyError) as error:\n st.markdown(\"Go to the main page and **select location**!\")\n\n","repo_name":"ivan4an/web-forecast-app","sub_path":"pages/3_Air_composition_forecast.py","file_name":"3_Air_composition_forecast.py","file_ext":"py","file_size_in_byte":6024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"14882768516","text":"import yaml\nimport random\nwords = {}\n\nclass WordPair(object):\n def __init__(self, ger, eng):\n self.ger = ger\n self.eng = eng\n\ndef load_words():\n words = list()\n with open('words.yaml') as f:\n d = yaml.load(f)\n for k, v in d.items():\n words.append(WordPair(k,v))\n return words\n\n\ndef main():\n d = load_words()\n limit = len (d)\n for i in range(10):\n print(\"\\n - - - - - - - - - - -\")\n print(i+1)\n x = random.randrange(0, limit)\n print(\"->Eng: \" + d[x].eng)\n input(\"Guess: \")\n print(\"-=n:\n if arr[i]<=arr[j]:\n arr[i],arr[j]=arr[j],arr[i]\n else:\n largest=i\n if arr[i]<=arr[j]:\n largest=j\n if arr[largest] a[indeks + 1]:\n a[indeks], a[indeks + 1] = a[indeks + 1], a[indeks]\n swapped = True\n\n if swapped is False:\n break\n\nprint(a)\nprint(\"Jumlah perulangannya:\", loop_count)\nproses = len(a) - 1\nprint(\"Jumlah proses yang terjadi:\", proses)\n# while proses > 0:\n","repo_name":"alvxyz/PythonClass","sub_path":"Struktur Data (Pak Bowo)/AlvianTeddyCahyaPutra_Jobsheet3_BubbleSort/AlvianTeddyCahyaPutra_Jobsheet3_ShortBubbleSort.py","file_name":"AlvianTeddyCahyaPutra_Jobsheet3_ShortBubbleSort.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"36807492545","text":"from torch.utils.data import Dataset\nimport numpy as np\nimport cv2\n\nfrom PIL import Image\n\nfrom utils.proc import sample_cnt, sample_cnt_with_idx \n\n\nclass ValDataset(Dataset):\n def __init__(self, dataset, opt):\n self.dataset = dataset\n self.opt = opt\n\n if self.dataset.anno_type == 'pointset':\n self.read_anno = self._read_pointset\n elif self.dataset.anno_type == 'mask':\n self.read_anno = self._read_mask\n \n def __getitem__(self, idx):\n seq = self.dataset[idx]\n\n img_files, anno_files, others = seq\n if 'init_path' in others:\n init_path = others['init_path']\n\n if 'idx_path' in others:\n idx_path = others['idx_path']\n idx = np.loadtxt(idx_path)\n else:\n idx = None\n \n init = self.read_anno(init_path)\n\n if not idx is None:\n init, idx = sample_cnt_with_idx(init, idx, self.opt.num_cp)\n else:\n init = sample_cnt(init, self.opt.num_cp)\n\n imgs = []\n annos = []\n\n for img_path in img_files:\n img = Image.open(img_path)\n img = np.array(img)\n imgs.append(img)\n\n for anno_path in anno_files:\n if anno_path:\n anno = self.read_anno(anno_path)\n else:\n anno = []\n annos.append(anno)\n\n others['img_files'] = img_files\n others['anno_files'] = anno_files \n\n return imgs, annos, init, idx, others\n\n\n @staticmethod\n def _read_pointset(txt_path):\n return np.loadtxt(txt_path)\n\n @staticmethod\n def _read_mask(self, png_path):\n mask = Image.open(png_path)\n cnt, _ = cv2.findContours(mask, \n cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_NONE)\n if not cnt:\n return []\n\n if len(cnt) > 1 or len(cnt[0]) > 1:\n raise NotImplementedError('more than 1 contour in a mask is not supported')\n \n return cnt[0][0]\n\n def __len__(self):\n return len(self.dataset)\n\n \n\n\n\n","repo_name":"ghnam-ken/PoST","sub_path":"dataset/val_dataset.py","file_name":"val_dataset.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"76"}
+{"seq_id":"25365050229","text":"import json\n\nfrom classes.room import Room\nfrom classes.teacher import Teacher\n\n\nwith open(\"data/teachers.json\", \"r\") as read_file:\n teacher_data = json.load(read_file)\n\nwith open(\"data/rooms.json\", \"r\") as read_file:\n room_data = json.load(read_file)\n\nwith open(\"data/class_sets.json\", \"r\") as read_file:\n class_set_data = json.load(read_file)\n\nTEACHERS = [\n Teacher(v[\"Name\"], v[\"Subjects\"], v[\"PreferredRoom\"]) for v in teacher_data.values()\n]\n\nROOMS = [\n Room(v[\"RoomNumber\"], v[\"Available\"], v[\"Subjects\"], v[\"Capacity\"])\n for v in room_data.values()\n]\n\nCLASS_SETS = [v for v in class_set_data.values()]\n","repo_name":"braddotcoffee/timetablegenerator","sub_path":"controllers/data_controller.py","file_name":"data_controller.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"1289380763","text":"import numpy as np\nimport os\nimport grn_sim as sim\n\nfrom matplotlib import rc, gridspec\nimport matplotlib.pyplot as plt\n\nrc('font', **{'family':'serif','serif':['Palatino']})\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n\nrc('text',usetex=True)\nrc('text.latex', preamble=r'\\usepackage{amssymb}') \n\n# dynamics\ng1a=lambda g2,m1,m2,tau: m1*tau/(1+g2*g2)\ng1b=lambda g2,m1,m2,tau: np.sqrt(m2*tau/g2-1)\n\ng1dot = lambda g1,g2, m1, m2, tau: m1/(1+g2*g2)-g1/tau\ng2dot = lambda g1,g2, m1, m2, tau: m2/(1+g1*g1)-g2/tau\n\n#solve for stuff\nsn_m1 = np.arange(2,4.1,0.01)\npf_tau = np.arange(0.1,4.2,0.02)\nsn_g1 = [sim.get_yss(3, m1, 1, False) for m1 in sn_m1]\npf_g1 = [sim.get_yss(1,1,tau,False) for tau in pf_tau]\n\nsn_zs = [np.sort(z) for z in sn_g1]\nsn_n0 = np.array([z[0] for z in sn_zs])\nsn_n1 = np.array([z[2] for z in sn_zs if len(z)>1])\nsn_s = np.array([z[1] for z in sn_zs if len(z)>1])\nsn_s_m1 = np.array([sn_m1[i] for i in range(sn_m1.shape[0]) if sn_g1[i].shape[0]>1])\n\n\npf_zs = [np.sort(z) for z in pf_g1]\npf_n0 = np.array([z[0] for z in pf_zs])\npf_n1 = np.array([z[2] for z in pf_zs if len(z)>1])\npf_s = np.array([z[1] for z in pf_zs if len(z)>1])\npf_s_tau = np.array([pf_tau[i] for i in range(pf_tau.shape[0]) if pf_g1[i].shape[0]>1])\n\n#### the figure ####\n### plt.style.reload_library()\nplt.style.use('sty/one_col_fig.mplstyle')\n# nr = 90\n# nc = 45\n\nspc_ht = 3\ntau_ht = 3\nm1_ht = 7\n\nmarg_wd = 3\nm1_wd = 9\nspc_wd = 2\ntau_wd = 9\n\n# row heights\nhts = np.array([\n\n tau_ht,\n tau_ht,\n tau_ht,\n spc_ht,\n m1_ht\n])\n\nwds = np.array([\n marg_wd,\n m1_wd,\n spc_wd,\n tau_wd\n])\n\nrs = np.cumsum(hts) # starting rows\ncs = np.cumsum(wds) # starting cols\n\nnr = np.sum(hts)\nnc = np.sum(wds)\n\nwid = 8.7/2.54\nht = wid*nr/nc\n\nfig = plt.figure(figsize=(wid, ht), dpi=200)\n\ngs = gridspec.GridSpec(nr, nc, hspace=0)\n\n# g1 vs g2\n#axA = plt.subplot( gs[0 :rs[2], cs[0]:cs[1]]) # m1\naxA1 = plt.subplot( gs[0 :rs[0], cs[0]:cs[1]]) # m1\naxA2 = plt.subplot( gs[rs[0]:rs[1], cs[0]:cs[1]]) # m1\naxA3 = plt.subplot( gs[rs[1]:rs[2], cs[0]:cs[1]]) # m1\n\naxB1 = plt.subplot( gs[0 :rs[0], cs[2]:cs[3]]) # tau\naxB2 = plt.subplot( gs[rs[0]:rs[1], cs[2]:cs[3]]) # tau\naxB3 = plt.subplot( gs[rs[1]:rs[2], cs[2]:cs[3]]) # tau\n\naxC = plt.subplot( gs[rs[3]:rs[4], cs[0]:cs[1]]) # g1 vs m1\naxD = plt.subplot( gs[rs[3]:rs[4], cs[2]:cs[3]]) # g1 vs m1\n\ncaps = ['A','B','C','D']\nri = [0,0,rs[3],rs[3]]\nci = [0,cs[1],0,cs[1]]\nys = [1,1,2.5,2.5]\nfor i in range(len(caps)):\n\n cap_ax=plt.subplot(gs[ri[i]:ri[i]+1,ci[i]:ci[i]+1])\n cap_ax.text(s=caps[i],\n x=0,y=ys[i],fontsize=14, verticalalignment='top',horizontalalignment='left')\n cap_ax.axis('off')\n\n\n#########################################\n###### A-B: phase diagrams ##############\n#########################################\ntaus = np.array([[1,1,1],[0.5,2,4]])\nm1s = np.array([[2,3,6],[1,1,1]])\nm2s = np.array([[3,3,3],[1,1,1]])\n\nlss = ['-','--']\n\naxs = [[axA1,axA2,axA3],[axB1, axB2, axB3]]\n\nming1 = 0.1\nmaxg1 = 10\nming2 = 0.03\nmaxg2 = 5\nnv1d = 20\nnpts = 100\n\n# g1s = np.linspace(0.1,4,100)\n# g2s = np.linspace(0.1,4,100)\n\n#g1s = np.linspace(ming1,maxg1,npts)\ng2s = np.linspace(ming2,maxg2,npts)\n\nxs = np.linspace(ming2, maxg2, nv1d)\nys = np.linspace(ming1, maxg1, nv1d)\n\ng2s = np.logspace(np.log10(ming2),np.log10(maxg2),npts)\nxs = np.logspace(np.log10(ming2)-1, np.log10(maxg2)+1, nv1d)\nys = np.logspace(np.log10(ming1)-1, np.log10(maxg1)+1, nv1d)\n\nxx, yy = np.meshgrid(xs,ys)\nxxf = xx.reshape(-1)\nyyf = yy.reshape(-1)\nrs = np.vstack([xxf,yyf]).T\nzfills = ['none','none','none']\nzmarks = ['o','s','o']\nfc = ['gray','none','gray']\nfor i in range(len(axs)):\n for j in range(len(axs[i])):\n ax = axs[i][j]\n tau = taus[i,j]\n m1 = m1s[i,j]\n m2 = m2s[i,j]\n g2i = np.where(m2*tau/g2s>=1)[0]\n\n ncg2 = g2s[g2i]\n ncg1a = g1a(ncg2, m1, m2, tau)\n ncg1b = g1b(ncg2, m1, m2, tau)\n\n ax.plot(ncg2, ncg1a, color = 'r', ls = lss[0],lw=1,zorder=1)\n ax.plot(ncg2, ncg1b, color = 'b', ls = lss[1],lw=1,zorder=1)\n\n g2zs = np.sort(sim.get_yss(m1,m2,tau,False))\n g1zs = g1a(g2zs, m1, m2, tau)\n\n # plot the zeros\n for k in range(g2zs.shape[0]):\n ax.plot(g2zs[k],g1zs[k],color='k',marker=zmarks[k],\n markeredgewidth=1,markersize=4,alpha=1,zorder=2,markerfacecolor=fc[k])#fillstyle=zfills[k],\n\n # plot the vector field\n uu = g2dot(yy,xx,m1,m2,tau)\n vv = g1dot(yy,xx,m1,m2,tau)\n uvnorm = np.sqrt(uu*uu + vv*vv)\n uuh = uu/uvnorm\n vvh = vv/uvnorm\n ax.quiver(xxf, yyf, uuh, vvh, color = 'k', width=0.004,\n headwidth=5, headlength=4,alpha=0.55,pivot='tip',scale=20,zorder=0)\n\n skip = 10\n ncg1vs = np.array([ncg1a[::skip], ncg1b[::skip]])\n ncg2v = ncg2[::skip]\n for k in range(ncg1vs.shape[0]):\n ncg1v = ncg1vs[k]\n uu = g2dot(ncg1v,ncg2v,m1,m2,tau)\n vv = g1dot(ncg1v,ncg2v,m1,m2,tau)\n uvnorm = np.sqrt(uu*uu + vv*vv)\n uuh = uu/uvnorm\n vvh = vv/uvnorm\n ax.quiver(ncg2v, ncg1v, uuh, vvh, color = 'k', width=0.004,\n headwidth=5, headlength=4,alpha=1,pivot='tip',scale=20,zorder=3)\n\n # format\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim(ming2,maxg2)\n ax.set_ylim(ming1,maxg1)\n\n\n if i==1:\n ax.set_yticklabels([])\n label=r'$k_D=${0:.2f}'.format(1/tau)\n else:\n label=r'$m_1=${0}'.format(m1)\n\n if j>0:\n ax.set_yticks([0.1,1])\n ax.set_yticklabels([\"0.1\",\"1\"])\n else:\n ax.set_yticks([0.1,1,10])\n ax.set_yticklabels([\"0.1\",\"1\",\"10\"])\n\n props = dict(boxstyle='round,pad=0.01', facecolor='wheat', alpha=0.5,ec='none')\n# ax.text(x=0.98,y=0.95,s=label,transform=ax.transAxes,\n# verticalalignment='top', horizontalalignment='right',bbox=props,fontsize=6)\n ax.text(x=0.02,y=0.02,s=label,transform=ax.transAxes,\n verticalalignment='bottom', horizontalalignment='left',bbox=props,fontsize=6)\n\n if j==2:\n ax.set_xticks([0.1,1])\n ax.set_xticklabels([\"0.1\",\"1\"])\n\n\n\naxA3.set_xlabel(r'$g_2$',labelpad=-4)\naxB3.set_xlabel(r'$g_2$',labelpad=-4)\naxA2.set_ylabel(r'$g_1$')\n\n########################################\n#######C: saddle node g1################\n########################################\naxC.plot(sn_m1, sn_n0,'ko',markersize=0.5)\naxC.plot(sn_s_m1, sn_n1,'ko', markersize=0.5)\naxC.plot(sn_s_m1, sn_s,'k--',fillstyle='none')\n\naxC.set_xlabel(r'$m_1$')\naxC.set_ylabel(r'$g_1$')\n########################################\n#######D: pitchfork g1################\n########################################\naxD.plot(pf_tau, pf_n0,'ko',markersize=0.5)\naxD.plot(pf_s_tau, pf_n1,'ko',markersize=0.5)\naxD.plot(pf_s_tau, pf_s,'k--',fillstyle='none')\n\naxD.set_xlabel(r'$1/k_D$')\n\nfigdir = 'figs'\nos.makedirs(figdir, exist_ok=True)\nplt.savefig('{0}/figS2_dyn_syss.pdf'.format(figdir), bbox_inches='tight')\n","repo_name":"Simfreed/sc_bifurc_figs","sub_path":"python/phase_planes.py","file_name":"phase_planes.py","file_ext":"py","file_size_in_byte":7176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"8031809410","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 2 19:28:53 2015\n\n@author: florian\n\"\"\"\n\n\"\"\"\nL'objectif est de générer un fichier de données sur le prix des Renault Zoé \nsur le marché de l'occasion en Ile de France, PACA et Aquitaine. \nVous utiliserezleboncoin.fr comme source. Le fichier doit être propre et contenir \nles infos suivantes : version ( il y en a 3), année, kilométrage, prix, \ntéléphone du propriétaire, est ce que la voiture est vendue par un professionnel ou un particulier.\nVous ajouterez une colonne sur le prix de l'Argus du modèle \nque vous récupérez sur ce site http://www.lacentrale.fr/cote-voitures-renault-zoe--2013-.html.\n\nLes données quanti (prix, km notamment) devront être manipulables (pas de string, pas d'unité).\nVous ajouterez une colonne si la voiture est plus chere ou moins chere que sa cote moyenne.\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\ndef getSoupFromUrl(url):\n #Execute q request toward Youtube\n request = requests.get(url)\n #parse the restult of the request\n soup = BeautifulSoup(request.text, 'html.parser')\n return soup\n \ndef extractIntFromText(text):\n return int(text.replace(\" \",\"\"))\n\nZoeLeBoncoin = pd.DataFrame(columns = ['titre', 'reg', 'version', 'année','km','prix','tél','vendeur','argus'])\n\ntypeAcheteur = {\"professionnel\":\"c\",\"particulier\":\"p\"}\nregion = [\"ile_de_france\",\"provence_alpes_cote_d_azur\",\"aquitaine\"]\n \nurl_liste = 'http://www.leboncoin.fr/voitures/offres/ile_de_france/?q=zoe&f=c'\nsoup = getSoupFromUrl(url_liste)\nuneAnnonce={}\nuneAnnonceList=[]\n\nentries = soup.find(\"div\", { \"class\" : \"list-lbc\" }).find_all(\"a\") \nfor entry in entries:\n if \"zoe\" in entry['title'].lower():\n uneAnnonce[\"titre\"]=entry['title']\n uneAnnonce[\"region\"]=\"ile de France\"\n uneAnnonce[\"type vendeur\"]=\"Pro\"\n url_annonce = entry['href']\n soupAnnonce = getSoupFromUrl(url_annonce)\n uneAnnonce[\"prix en euros\"] = int(soupAnnonce.find(\"div\", { \"class\" : \"lbcParams withborder\" }).find(\"span\", { \"class\" : \"price\" })['content']) \n uneAnnonce[\"année\"] = int(soupAnnonce.find(\"div\", { \"class\" : \"lbcParams criterias\" }).find(\"td\", { \"itemprop\" : \"releaseDate\" }).get_text())\n uneAnnonce[\"kimometrage\"] = extractIntFromText(soupAnnonce.find(\"div\", { \"class\" : \"lbcParams criterias\" }).select(\"tr:nth-of-type(3) > td:nth-of-type(1)\")[0].text[:-3])\n \n uneAnnonce[\"description\"]=soupAnnonce.find(\"div\", { \"itemprop\" : \"description\"}).get_text() \n if \"life\" in uneAnnonce[\"description\"].lower():\n uneAnnonce[\"version\"]=\"LIFE CHARGE RAPIDE\"\n elif \"intens\" in uneAnnonce[\"description\"].lower():\n uneAnnonce[\"version\"]=\"INTENS CHARGE RAPIDE\" \n elif \"zen\" in uneAnnonce[\"description\"].lower():\n uneAnnonce[\"version\"]=\"ZEN CHARGE RAPIDE\"\n else:\n uneAnnonce[\"version\"] = \"VERSION INCONNUE\"\n \n uneAnnonceList = pd.DataFrame({'titre':uneAnnonce[\"titre\"],'reg':uneAnnonce[\"region\"],'version':uneAnnonce[\"version\"],'année':uneAnnonce[\"année\"],'km':uneAnnonce[\"kimometrage\"],'prix':uneAnnonce[\"prix en euros\"],'tél':\"\",'vendeur':uneAnnonce[\"type vendeur\"],'argus':\"\"},index=[0]) \n ZoeLeBoncoin=pd.concat([ZoeLeBoncoin,uneAnnonceList])\n ZoeLeBoncoin.reset_index()\n\n\n# GESTION DE L4ARGUS\nurlargus=\"http://www.lacentrale.fr/cote-voitures-renault-zoe--2013-.html\"\nsoupargus = getSoupFromUrl(urlargus)\ndicoargus={}\nentries=soupargus.find(\"div\",{\"id\" : \"listing_quot\"}).find_all(\"a\",{\"style\" : \"color:#007EFF; text-decoration:underline\"})\nfor entry in entries:\n soupargusmodele = getSoupFromUrl(\"http://www.lacentrale.fr/\"+entry[\"href\"])\n coteargus=extractIntFromText(soupargusmodele.find(\"span\",{\"class\":\"Result_Cote arial tx20\"}).text[:-2])\n dicoargus[entry.text]=coteargus\ncoteArgusData=pd.DataFrame(dicoargus,index=[\"cote argus\"]).T\n\netudefinale = pd.merge(ZoeLeBoncoin,coteArgusData,left_on=\"version\",right_index=True,how=\"left\" )\netudefinale['position_argus']=etudefinale.apply(lambda x:\"supérieur à l'argus\" if x[\"prix\"]>x[\"cote argus\"] else \"inferieur à l'argus\",axis=1)\nprint (etudefinale)\n \n \n\n \n \n \n \n ","repo_name":"rachidalili/MS-BGD2015","sub_path":"florian-firmin/Lesson4/exo_dom_Lesson04.py","file_name":"exo_dom_Lesson04.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"fr","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"}
+{"seq_id":"29290233806","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom math import *\nimport csv\nfrom plot_fig import Plot_fig as PF\n\n\nclass Path:\n\tdef __init__(self, point_gap):\n\t\tself.POINT_GAP = point_gap # 経路点列の幅[m]\n\n\t\tself.path_x = []\n\t\tself.path_y = []\n\t\t# self.path_yaw = []\n\t\n\t# 経路点列に点を追加\n\tdef add_point(self, x, y, col='r'):\n\t\t# 点描画\n\t\tpf.make_point(x, y, col)\n\t\t# 点追加\n\t\tself.path_x.append(x)\n\t\tself.path_y.append(y)\n\n\t# 経路点列に直線上の点列を追加(開始点はプロットされるが,終了点はされない)\n\tdef add_line(self, x1, y1, x2, y2, col='r'):\n\t\t# 2点間の線分表示\n\t\tpf.make_line(x1, y1, x2, y2, col)\n\n\t\t# 距離,傾きの計算\n\t\tdist = sqrt((x2-x1)**2 + (y2-y1)**2)\n\t\tslope = atan2(y2-y1, x2-x1)\n\t\t\n\t\t# 等間隔点列生成&表示\n\t\tx = []\n\t\ty = []\n\t\tfor i in range(int(dist/self.POINT_GAP) + 1):\n\t\t\t# 点列生成\n\t\t\tx.append(x1 + self.POINT_GAP*cos(slope)*i)\n\t\t\ty.append(y1 + self.POINT_GAP*sin(slope)*i)\n\t\t\t# 点列を経路に追加\n\t\t\tself.path_x.append(x[i])\n\t\t\tself.path_y.append(y[i])\n\t\t\t# 点列表示\n\t\t\tpf.make_point(x[i],y[i],col) \n\n\t# CSVファイル出力\n\tdef make_csv(self, filename):\n\t\twith open(filename,'w') as csvfile:\n\t\t\twriter = csv.writer(csvfile, lineterminator='\\n')\n\n\t\t\tfor i in range(len(self.path_x)):\n\t\t\t\twriter.writerow([self.path_x[i], self.path_y[i]])\n\n\n# 直線上の経路点列の幅[m]\nPOINT_GAP = 0.01 # 経路点列の点の幅[m]\n\n# 座標\nP_TR_START = (11.4, 0.5)\nP_DR_START = (0.5, 5.425)\nP_DR_RETRY = (5.425, 2.45)\n\npf = PF()\n\ndef main():\n\tpf.make_point(*P_TR_START,'r')\n\n\t# 直線\n\tpath1 = Path(POINT_GAP)\n\t# path1.add_line(*P_DR_START, *(2, 2), 'b')\n\t# path1.add_line(*(2, 2), *(4, 0.75), 'b')\n\t# path1.add_line(*(4, 0.75), *(5.425, 0.75), 'b')\n\t# path1.add_line(*(5.425, 0.75), *P_DR_RETRY, 'b')\n\t# path1.add_point(*P_DR_RETRY, 'b')\n\t# path1.make_csv('../pathes/dr_st_rt.csv')\n\n\t# path1.add_line(*P_DR_RETRY,*(5.425, 5.4), 'b')\n\t# path1.add_point(*(5.425, 5.4), 'b')\n\t# path1.add_line(*(0,0),*(0,3), 'b')\n\t# path1.add_line(*(3,0),*(3,3), 'b')\n\t# path1.add_line(*(3,3),*(0,3), 'b')\n\t# path1.add_line(*(0,3),*(0,0), 'b')\n\t# path1.add_point(*(0, 3), 'b')\n\n\t# path1.add_line(*P_DR_RETRY, *(4.75, 3.45), 'b')\n\t# path1.add_line(*(4.75, 3.45), *(4.75, 5.95), 'b')\n\t# path1.make_csv('../pathes/dr_rt_type3.csv')\n\n\t# path1.add_line(*P_DR_RETRY, *(4.75, 3.45), 'b')\n\t# path1.add_line(*(4.75, 3.45), *(4.75, 8.45), 'b')\n\t# path1.make_csv('../pathes/dr_rt_type2_oku.csv')\n\n\t# path1.add_line(*P_DR_RETRY, *(4.75, 3.45), 'b')\n\t# path1.add_line(*(4.75, 3.45), *(5.25, 3.45), 'b')\n\t# path1.make_csv('../pathes/dr_rt_type2_temae.csv')\n\n\t# path1.add_line(*(4.75, 8.45), *(4.75, 3.45), 'b')\n\t# path1.make_csv('../pathes/dr_type2_oku_type2_temae.csv')\n\n\tpf.show()\n\n\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"tsukurobo/abu2021","sub_path":"old_packages/auto_drive/make_path/make_path.py","file_name":"make_path.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"17987397045","text":"#! /usr/bin/env python3\nimport rospy\nimport tf2_ros\nfrom std_msgs.msg import String\nfrom std_srvs.srv import Empty, EmptyResponse, EmptyRequest, Trigger, TriggerRequest\nfrom geometry_msgs.msg import PoseStamped, TwistStamped, Vector3, TransformStamped, Quaternion\nfrom trajectory_msgs.msg import MultiDOFJointTrajectory, MultiDOFJointTrajectoryPoint\nfrom nav_msgs.msg import Path\nfrom mavros_msgs.msg import State, ExtendedState\nfrom vertical_aam.srv import *\nfrom transitions import Machine\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\nfrom trajectory import trajectory\n\n#todo: fix velocity ffs\n\nclass printStateMachine(object):\n states = ['Ground', 'Takeoff', 'Scan', 'Print', 'Land', 'Manual', 'Loiter']\n\n transitions = [\n {'trigger': 'startTakeoff', 'source': ['Ground', 'Manual'], 'dest': 'Takeoff', 'before': 'on_startTakeoff'},\n {'trigger': 'startScan', 'source': ['Takeoff', 'Manual'], 'dest': 'Scan', 'before': 'on_startScan'},\n {'trigger': 'startPrint', 'source': 'Loiter', 'dest': 'Print', 'before': 'on_startPrint'},\n {'trigger': 'endPrint', 'source': 'Print', 'dest': 'Scan', 'before': 'on_endPrint'},\n {'trigger': 'startLoiter', 'source': 'Scan', 'dest': 'Loiter', 'before': 'on_startLoiter'},\n {'trigger': 'startLanding', 'source': '*', 'dest': 'Land'},\n {'trigger': 'finishLanding', 'source': ['Land', 'Manual'], 'dest': 'Ground'},\n {'trigger': 'manualTakeover', 'source': '*', 'dest': 'Manual', 'before': 'on_manualTakeover'},\n {'trigger': 'switchToGround', 'source': ['Manual', 'Landing'], 'dest': 'Ground' },\n ]\n \n def __init__(self):\n # get config parameters from parameter server\n self.rate = rospy.get_param('/print_planner/setpoint_rate')\n self.tol_speed = rospy.get_param('/print_planner/tol_speed')\n self.takeoff_hgt = rospy.get_param('/print_planner/tol_height') \n self.scan_hgt = rospy.get_param('/print_planner/scan_height') \n self.pause_before_print = 1.0 \n self.scan_time = 5 \n\n self.yaw = 0.0\n self.tooltip_state = \"RETRACTED\"\n self.tooltip_pose = PoseStamped()\n self.tooltip_pose.header.frame_id = \"map\"\n self.tooltip_twist = TwistStamped()\n self.tooltip_twist.header.frame_id = \"map\"\n \n self.pose = PoseStamped()\n self.pose.header.frame_id = \"map\"\n self.velocity = TwistStamped()\n self.velocity.header.frame_id = \"map\"\n self.acceleration = TwistStamped()\n self.acceleration.header.frame_id = \"map\"\n\n self.trajectory = trajectory()\n self.tooltip_trajectory = trajectory()\n self.operator_confirmed = False\n\n\n # initiate state machine model with states and transitions listed above\n self.machine = Machine(model=self, states=self.states, transitions=self.transitions, initial = 'Ground')#, on_exception='on_exception')\n\n # publisher for on-board position controller\n self.sp_position_pub = rospy.Publisher(\n '/setpoint/pose', PoseStamped, queue_size=1, tcp_nodelay=True)\n self.sp_vel_pub = rospy.Publisher(\n '/setpoint/vel', TwistStamped, queue_size=1, tcp_nodelay=True)\n\n # publish current state for debugging\n self.pub_drone_state = rospy.Publisher(\n '/printer/state', String, queue_size=1, tcp_nodelay=True)\n\n # publishers to manipulator\n self.pub_tooltip_state = rospy.Publisher(\n '/manipulator/state', String, queue_size=1, tcp_nodelay=True)\n self.pub_tooltip_pose = rospy.Publisher(\n '/tooltip_setpoint/pose', PoseStamped, queue_size=1, tcp_nodelay=True)\n self.pub_tooltip_twist = rospy.Publisher(\n '/tooltip_setpoint/velocity', TwistStamped, queue_size=1, tcp_nodelay=True) \n\n # vizualisation publishers\n self.traj_viz_pub = rospy.Publisher(\n '/printer/drone_trajectory', Path, queue_size=1) \n self.tip_traj_viz_pub = rospy.Publisher(\n '/printer/tooltip_trajectory', Path, queue_size=1) \n\n # drone state subscriber\n state_sub = rospy.Subscriber(\n '/mavros/state', State, self._state_cb, queue_size=5, tcp_nodelay=True)\n ext_state_sub = rospy.Subscriber(\n '/mavros/extended_state', ExtendedState, self._ext_state_cb, queue_size=5, tcp_nodelay=True)\n local_position_sub = rospy.Subscriber(\n '/mavros/local_position/pose', PoseStamped, self._local_pos_cb, queue_size=1, tcp_nodelay=True)\n local_velocity_sub = rospy.Subscriber(\n '/mavros/local_position/velocity_body', TwistStamped, self._local_vel_cb, queue_size=1, tcp_nodelay=True)\n \n authorisation_service = rospy.Service('start_layer', Empty, self.authorisation_srv)\n\n # wait for drone to come online\n rospy.wait_for_message('/mavros/state', State)\n rospy.wait_for_message('/mavros/extended_state', ExtendedState)\n rospy.wait_for_message('/mavros/local_position/pose', PoseStamped)\n rospy.wait_for_service('generate_layer')\n \n # timer callback to send setpoints at a reasonable rate \n sp_timer = rospy.Timer(rospy.Duration(1.0/self.rate), self._timer_cb, reset=True)\n\n # initiate landing position at location where node is started\n self.pad_pose = PoseStamped()\n self.pad_pose = self.local_pose\n self.pad_pose.pose.position.z = self.takeoff_hgt\n rospy.loginfo(\"Landing site initiated at x=\" + str(self.pad_pose.pose.position.x) +\n \", y=\" + str(self.pad_pose.pose.position.y) + \".\")\n \n self.scan_start = self.pad_pose\n self.scan_start.pose.position.z = self.scan_hgt\n \n self.tfBuffer = tf2_ros.Buffer(rospy.Duration(20.0))\n listener = tf2_ros.TransformListener(self.tfBuffer)\n\n def authorisation_srv(self, req):\n self.operator_confirmed = True\n resp = EmptyResponse()\n return resp\n\n #--------------------------------------------------------------------------------------------------------------\n #callbacks on state transitions\n\n def on_startTakeoff(self):\n rospy.loginfo(\"Takeoff initiated\")\n self.pad_pose = PoseStamped()\n self.pad_pose = self.local_pose\n self.pad_pose.pose.position.z = self.takeoff_hgt\n\n self.scan_start = self.pad_pose\n self.scan_start.pose.position.z = self.scan_hgt\n \n rospy.loginfo(\"Landing site updated at x=\" + str(self.pad_pose.pose.position.x) +\n \", y=\" + str(self.pad_pose.pose.position.y) + \".\")\n\n def on_startScan(self):\n # reset aft_pgo_map here\n self.trajectory.reset()\n self.trajectory.transition(self.pose, self.scan_start)\n self.tooltip_trajectory.pause(self.scan_start, self.scan_time)\n self.trajectory.publish_viz_trajectory(self.traj_viz_pub)\n call_scan_reset_service()\n\n def on_startPrint(self):\n pause_time = self.pause_before_print\n\n #load tooltip trajectory\n self.tooltip_trajectory.reset()\n self.tooltip_trajectory.pause(self.tooltip_trajectory.trajectoryPoint2Pose(self.tooltip_layer.points[0]), pause_time)\n self.tooltip_trajectory.append_traj(self.tooltip_layer)\n self.tooltip_trajectory.publish_viz_trajectory(self.tip_traj_viz_pub)\n\n #load drone trajectory\n self.trajectory.reset()\n self.trajectory.pause(self.trajectory.trajectoryPoint2Pose(self.drone_layer.points[0]), pause_time)\n self.trajectory.append_traj(self.drone_layer)\n self.trajectory.publish_viz_trajectory(self.traj_viz_pub)\n\n #open nozzle\n call_nozzle_open_service()\n\n def on_endPrint(self):\n #close nozzle\n call_nozzle_close_service()\n self.on_startScan()\n \n def on_startLoiter(self):\n layer = call_slicing_service()\n\n try:\n tf_map2print = self.tfBuffer.lookup_transform('map', 'print_origin', rospy.Time.now(), timeout=rospy.Duration(5))\n tf_tip2drone = self.tfBuffer.lookup_transform('tooltip_init_r', 'base_link', rospy.Time.now(), timeout=rospy.Duration(5))\n tf_tip2tip = self.tfBuffer.lookup_transform('tooltip_init_r', 'tooltip_init', rospy.Time.now(), timeout=rospy.Duration(5))\n except:\n rospy.logerr(\"Unable to fetch TFs!\")\n\n layer = self.trajectory.transform_trajectory(layer, tf_map2print)\n\n self.drone_layer = self.trajectory.offset_trajectory(layer, tf_tip2drone)\n \n self.tooltip_layer = self.trajectory.rotate_trajectory(layer, tf_tip2tip)\n\n self.trajectory.reset()\n self.trajectory.transition(self.pose, self.trajectory.trajectoryPoint2Pose(self.drone_layer.points[0]))\n self.trajectory.publish_viz_trajectory(self.traj_viz_pub)\n\n def on_manualTakeover(self):\n rospy.loginfo(\"Manual takeover\")\n call_nozzle_close_service()\n\n def on_exception(self):\n rospy.logerr('state machine exception!')\n self.startLanding()\n call_nozzle_close_service()\n\n #---------------------------------------------------------------------------------------------------------------\n # callbacks to occur on timer event - need to be defined for every state that is called\n\n def during_Loiter(self):\n self.tooltip_state = \"STAB_3DOF\"\n if self.operator_confirmed:\n complete, pose, velocity = self.trajectory.follow()\n if not complete:\n self.pose = pose\n self.velocity = velocity\n else:\n self.operator_confirmed = False\n self.startPrint()\n\n def during_Scan(self):\n self.tooltip_state = \"STAB_3DOF\"\n scan_complete, pose, velocity = self.trajectory.follow()\n if not scan_complete:\n self.pose = pose\n self.velocity = velocity\n else:\n self.startLoiter()\n \n def during_Print(self):\n self.tooltip_state = \"STAB_6DOF\"\n print_complete, pose, velocity = self.trajectory.follow()\n tooltip_print_complete, tip_pose, tip_velocity = self.tooltip_trajectory.follow()\n if not print_complete:\n self.pose = pose\n self.velocity = velocity\n self.tooltip_pose = tip_pose\n self.tooltip_twist = tip_velocity\n else:\n self.endPrint()\n \n def during_Takeoff(self):\n self.tooltip_state = \"HOME\"\n self.velocity.twist.angular = Vector3(0,0,0)\n #increase target z to deined loiter height\n if self.pose.pose.position.z < self.takeoff_hgt:\n self.pose.pose.position.z += self.tol_speed / self.rate\n self.velocity.twist.linear = Vector3(0,0,self.tol_speed)\n else: #when target has reached loiter height and drone knows its flying, move to next state \n self.pose.pose.position.z = self.takeoff_hgt\n self.velocity.twist.linear = Vector3(0,0,0)\n self.startScan()\n\n def during_Land(self):\n self.tooltip_state = \"HOME\"\n self.velocity.twist.angular = Vector3(0,0,0)\n #reduce height of z setpoint until altitude is zero\n if self.pose.pose.position.z > 0 and not (self.mavros_ext_state.landed_state == 1):\n self.pose.pose.position.z += -self.tol_speed / self.rate\n self.velocity.twist.linear = Vector3(0,0,-self.tol_speed)\n else:\n self.switchToGround()\n\n def during_Manual(self):\n # If flying -> goto home position\n self.pose = self.local_pose\n self.velocity = self.local_velocity\n self.tooltip_state = \"STAB_3DOF\"\n if self.mavros_ext_state.landed_state == 1:\n self.switchToGround()\n if self.mavros_state.mode == \"OFFBOARD\":\n self.startScan()\n \n def during_Ground(self):\n # if landed -> takeoff. \n self.pose = self.local_pose\n self.velocity = self.local_velocity\n self.tooltip_state = \"HOME\"\n if self.mavros_state.armed:\n self.tooltip_state = \"HOME\"\n if self.mavros_ext_state.landed_state == 2:\n self.manualTakeover()\n if self.mavros_state.mode == \"OFFBOARD\":\n self.tooltip_state = \"HOME\"\n self.startTakeoff()\n \n def during_always(self): #this callback always runs to check if not in offboard mode\n if self.mavros_state.mode != \"OFFBOARD\" and not (self.state == 'Manual' or self.state == 'Ground'):\n self.manualTakeover()\n\n #----------------------------------------------------------------------------------------------\n #ros callbacks\n\n def _timer_cb(self, event): #timer callback runs at specified rate to output setpoints\n self.during_always()\n exec(\"self.during_\" + str(self.state) + \"()\") #execute the function name corresponding to the current state\n # update time stamps and publish current values of drone and manipulator commands\n \n self.pose.header.stamp = rospy.Time.now()\n self.velocity.header.stamp = rospy.Time.now()\n\n self.tooltip_pose.header.stamp = rospy.Time.now()\n self.tooltip_twist.header.stamp = rospy.Time.now()\n # self.tooltip_twist =\n # self.tooltip_pose =\n\n self.sp_position_pub.publish(self.pose)\n self.sp_vel_pub.publish(self.velocity)\n self.pub_drone_state.publish(String(str(self.state)))\n self.pub_tooltip_state.publish(String(self.tooltip_state))\n self.pub_tooltip_pose.publish(self.tooltip_pose)\n self.pub_tooltip_twist.publish(self.tooltip_twist)\n\n def _state_cb(self, state_msg):\n self.mavros_state = state_msg\n\n def _ext_state_cb(self, ext_state_msg):\n #reference for landed_state:\n # uint8 LANDED_STATE_UNDEFINED = 0\n # uint8 LANDED_STATE_ON_GROUND = 1\n # uint8 LANDED_STATE_IN_AIR = 2\n # uint8 LANDED_STATE_TAKEOFF = 3\n # uint8 LANDED_STATE_LANDING = 4\n self.mavros_ext_state = ext_state_msg\n\n def _local_pos_cb(self, local_pose_msg):\n self.local_pose = local_pose_msg\n\n def _local_vel_cb(self, local_vel_msg):\n self.local_velocity = local_vel_msg\n\n #---------------------\n\ndef call_slicing_service():\n slice_print = rospy.ServiceProxy('generate_layer', generateLayer)\n req = generateLayerRequest()\n resp = slice_print(req)\n return resp.trajectory\n\ndef call_nozzle_open_service():\n try:\n open_nozzle = rospy.ServiceProxy('open_nozzle', Trigger)\n req = TriggerRequest()\n resp = open_nozzle(req)\n except:\n rospy.logwarn(\"printing hardware not connected\")\n\ndef call_nozzle_close_service():\n try:\n close_nozzle = rospy.ServiceProxy('close_nozzle', Trigger)\n req = TriggerRequest()\n resp = close_nozzle(req)\n except:\n rospy.logwarn(\"printing hardware not connected\")\n\ndef call_scan_reset_service():\n try:\n restart_mapping = rospy.ServiceProxy('restart_mapping', Empty)\n req = EmptyRequest()\n resp = restart_mapping(req)\n except:\n rospy.logwarn(\"mapping restart unavailable\")\n\nif __name__ == '__main__':\n # initialize node\n rospy.init_node('print_state_machine', anonymous=True)\n pSM = printStateMachine()\n rospy.spin()","repo_name":"lachie-aerialrobotics/vertical_AAM","sub_path":"scripts/printing/print_state_machine.py","file_name":"print_state_machine.py","file_ext":"py","file_size_in_byte":15628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"27517327370","text":"import os\nimport re\nimport requests\n\nfrom .._cache import _test_cache, _insert_cache\nfrom ._string_aware_strip import (\n _string_aware_comment_strip,\n _string_aware_generic_strip\n)\n\n\n_CACHE_DIR = os.path.abspath(os.path.join(__file__, \"..\", \".cache\", \"js\"))\n_TOPTAL_CACHE_DIR = os.path.join(_CACHE_DIR, \"toptal\")\n\n_SINGLE_LINE_COMMENT_RE = re.compile(r\"//.*\")\n_MULTI_LINE_COMMENT_RE = re.compile(r\"/\\*[\\s\\S]*?\\*/\")\n_EXTRA_NEWLINES_RE = re.compile(r\"\\n{2,}\")\n_EXTRA_SPACES_RE = re.compile(r\" {2,}\")\n_EXTRA_TABS_RE = re.compile(r\"\\t{2,}\")\n\n\ndef _strip_comments(source):\n \"\"\"Strip comments from Javascript source.\n\n Args:\n source (str): Input Javascript.\n\n Returns:\n str: Stripped source.\n \"\"\"\n source = _string_aware_comment_strip(_SINGLE_LINE_COMMENT_RE, source)\n source = _string_aware_comment_strip(_MULTI_LINE_COMMENT_RE, source)\n return source\n\n\ndef _strip_whitespace(source, has_consistent_semicolons=True):\n \"\"\"Strip excessive whitespace.\n\n Args:\n source (str): Input Javascript.\n has_consistent_semicolons (bool): If the source code\n has consistent semicolons. (Default: True)\n\n Returns:\n str: Stripped source.\n \"\"\"\n base_op_characters = \"[+-*^|&<>{;:?=,(!~/\"\n semicolon_fragile_characters = \")]\"\n if has_consistent_semicolons:\n base_op_characters += \"}\"\n else:\n semicolon_fragile_characters += \"}\"\n all_op_characters = semicolon_fragile_characters + base_op_characters\n\n # 1 + 2 + 3 => 1+2+3\n base_strip = re.compile(\n r\"\\s*([\" + re.escape(base_op_characters) + r\"])\\s*\"\n )\n\n # hello ] => hello]\n prefix_fragile_strip = re.compile(\n r\"\\s+([\" + re.escape(semicolon_fragile_characters) + r\"])\"\n )\n\n # [ [ ] ] => [[]]\n sequential_operator_strip = re.compile(\n r\"([\"+ re.escape(all_op_characters) + r\"])\"\n r\"\\s+(?=[\" + re.escape(all_op_characters) + r\"])\"\n )\n # )\\n\\n\\nthing => )\\nthing\n fragile_semicolon_strip = re.compile(\n r\"\\s*([\"+ re.escape(semicolon_fragile_characters) + r\"])\\s+\"\n )\n\n source = _string_aware_generic_strip(base_strip, lambda x: x.group(1), source)\n source = _string_aware_generic_strip(sequential_operator_strip, lambda x: x.group(1), source)\n source = _string_aware_generic_strip(prefix_fragile_strip, lambda x: \"{0}\\n\".format(x.group(1)), source)\n source = _string_aware_generic_strip(fragile_semicolon_strip, lambda x: \"{0}\\n\".format(x.group(1)), source)\n source = _string_aware_generic_strip(_EXTRA_NEWLINES_RE, lambda x: \"\\n\", source)\n source = _string_aware_generic_strip(_EXTRA_SPACES_RE, lambda x: \" \", source)\n source = _string_aware_generic_strip(_EXTRA_TABS_RE, lambda x: \"\\t\", source)\n source = source.strip()\n return source\n\n\ndef _request_toptal_minify_js(source):\n \"\"\"Request minifaction from Toptal.\n\n Args:\n source (str): Javascript to minify.\n\n Returns:\n tuple(bool, str): Success flag, minified text.\n \"\"\"\n try:\n found, cached = _test_cache(_TOPTAL_CACHE_DIR, source)\n if found:\n return True, cached\n response = requests.post(\n \"https://www.toptal.com/developers/javascript-minifier/api/raw\",\n data={\"input\": source}\n )\n if response.ok:\n _insert_cache(_TOPTAL_CACHE_DIR, source, response.text)\n return True, response.text\n except Exception:\n pass\n return False, \"\"\n\n\ndef _minify_js(source, has_consistent_semicolons=True):\n \"\"\"Minify Javascript source with custom minifier.\n\n Args:\n source (str): Javascript to minify.\n has_consistent_semicolons (bool): If the source code\n has consistent semicolons. (Default: True)\n\n Returns:\n str: Minified Javascript.\n \"\"\"\n found, cached = _test_cache(_CACHE_DIR, source)\n if found:\n return cached\n minified = source\n minified = _strip_comments(minified)\n minified = _strip_whitespace(minified, has_consistent_semicolons)\n _insert_cache(_CACHE_DIR, source, minified)\n return minified\n\n\ndef minify_js(source, allow_toptol=True, has_consistent_semicolons=True):\n \"\"\"Minify Javascript source.\n\n Args:\n source (str): Javascript to minify.\n allow_toptol (bool): Allow the use of www.toptol.com to\n minify (Default: True)\n has_consistent_semicolons (bool): If the source code\n has consistent semicolons. (Default: True)\n\n Returns:\n str: Minified Javascript.\n \"\"\"\n source = source.strip()\n if not source:\n return source\n if allow_toptol:\n ok, minified = _request_toptal_minify_js(source)\n if ok:\n return minified\n return _minify_js(source, has_consistent_semicolons)\n","repo_name":"alister-chowdhury/alister-chowdhury.github.io","sub_path":"builder/minify/minify_js.py","file_name":"minify_js.py","file_ext":"py","file_size_in_byte":4763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"9120315149","text":"import configparser\nfrom colorama import Fore, Back, Style\nimport os\n\ndef get_key():\n script_dir = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(script_dir, 'config.ini')\n # 读取INI文件\n config = configparser.ConfigParser()\n config.read(config_path)\n # config.read('config.ini')\n\n # 获取key变量\n key = config.get('Section1', 'key', fallback='')\n\n # 如果key为空,则要求用户输入并保存到INI文件\n if not key:\n key = input('Please enter your api-key:')\n config.set('Section1', 'key', key)\n with open('config.ini', 'w') as configfile:\n config.write(configfile)\n\n print('api-key set successfully')\n\n return key\n\ndef change_key(newKey):\n config = configparser.ConfigParser()\n config.read('config.ini')\n config.set('Section1', 'key', newKey)\n with open('config.ini', 'w') as configfile:\n config.write(configfile)\n print( Fore.YELLOW +'api-key changed successfully' + Style.RESET_ALL)","repo_name":"ForestTrees/TerminalGPT","sub_path":"keyConfig.py","file_name":"keyConfig.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"4903183322","text":"import numpy as np\nimport time as t\n\n# my own modules\nimport molecule\n\n# create class objects\nm = molecule.Molecule()\nnm = molecule.Normal_modes()\nsp = molecule.Structure_pool_method()\n# define stuff\n#natom = 3\nnatom = 14\n#xyzheader, comment, atomlist, xyz = m.read_xyz(\"xyz/test.xyz\")\nxyzheader, comment, atomlist, xyz = m.read_xyz(\"xyz/chd.xyz\")\natomic_numbers = [m.periodic_table(symbol) for symbol in atomlist]\n#dim = 3\n#tcm, fcm = m.triangle_cm(atomic_numbers, xyz, dim)\n\n# normal mode definitions\n#nmfile = \"nm/test_normalmodes.txt\"\nnmfile = \"nm/chd_normalmodes.txt\"\ndisplacements = nm.read_nm_displacements(nmfile, natom)\ndisplacement = displacements[0, :, :] # 1st mode displacements\nfactor = 1\n\n# xray testing\nx = molecule.Xray()\nqlen = 101\nqvector = np.linspace(0, 10, qlen, endpoint=True) # q probably in a.u.\n\n\ndef test_read_xyz():\n assert xyzheader == 3, \"xyzheader should be 3\"\n assert comment.__contains__(\"test\"), \"comment should be 'test'\"\n assert atomlist[0] == \"O\", \"1st atom should be O\"\n assert atomic_numbers[0] == 8, \"1st atomic charge should be 8\"\n assert xyz[0, 0] == 0.0, \"Upper left coordinate should be 0.0\"\n\n\ndef test_write_xyz():\n fname = \"xyz/out.xyz\"\n comment = \"test\"\n m.write_xyz(fname, comment, atomlist, xyz)\n with open(fname) as out:\n assert out.readline() == \"3\\n\", \"1st line of out.xyz != 3\"\n assert out.readline() == \"test\\n\", \"2nd line of out.xyz != 'test'\"\n\ndef test_read_xyz_traj():\n natoms, comment, atomlist, xyz_traj = m.read_xyz_traj('xyz/chd_target_traj.xyz', 12)\n fname = 'out.xyz'\n m.write_xyz_traj(fname, atomlist, xyz_traj)\n\ndef test_sort_array():\n print(atomic_numbers)\n print(xyz)\n xyz_sorted = m.sort_array(xyz, atomic_numbers)\n print(xyz_sorted)\n print(atomlist)\n atoms = m.sort_array(atomlist, atomic_numbers)\n print(atoms)\n # add assertion ...\n\n\ndef test_periodic_table():\n h = m.periodic_table(\"H\")\n he = m.periodic_table(\"He\")\n c = m.periodic_table(\"C\")\n assert h == 1, \"H should have atom number 1\"\n assert he == 2, \"He should have atom number 2\"\n assert c == 6, \"C should have atom number 2\"\n\n\ndef test_triangle_cm():\n print(\"tcm\")\n print(tcm)\n assert round(tcm[0, 0]) == 74, \"rounded [0, 0] element != 74\"\n assert tcm[0, 1] == 8, \"[0, 1] element not != 8\"\n assert tcm[-1, -1] == 0.5, \"bottom right element != 0.5\"\n assert tcm[1, 0] == 0, \"bottom left diagonal != 0\"\n\n\ndef test_full_cm():\n print(\"fcm\")\n print(fcm)\n assert fcm[1, 0] == fcm[0, 1], \"upper diagonal != lower diagonal\"\n assert fcm[2, 0] == fcm[0, 2], \"upper diagonal != lower diagonal\"\n assert fcm[2, 1] == fcm[1, 2], \"upper diagonal != lower diagonal\"\n\n\ndef test_read_nm_displacements():\n assert displacements[0, 0, 1] == 0.07049, \"displacements[0, 0, 1] != 0.07049\"\n assert displacements[1, 1, 0] == 0.58365, \"displacements[1, 1, 0] != 0.58365\"\n\n\ndef test_displace_xyz():\n displaced_xyz = nm.displace_xyz(xyz, displacement, factor)\n assert displaced_xyz[1, 0] == 0.57028, (\n \"displaced_xyz[1, 0] !== 0.57028, for factor %d\" % factor\n )\n\n\ndef test_displace_write_xyz():\n displacement = displacements[0, :, :] # 1st mode displacements\n factor = 1\n displaced_xyz = nm.displace_xyz(xyz, displacement, factor)\n fname = \"xyz/displaced.xyz\"\n comment = \"displaced\"\n m.write_xyz(fname, comment, atomlist, displaced_xyz)\n with open(fname) as out:\n assert out.readline() == \"3\\n\", \"1st line of %s != 3\" % fname\n assert out.readline() == \"displaced\\n\", \"2nd line of %s != %s\" % (\n fname,\n comment,\n )\n\n\ndef test_nm_displacer():\n factors = [1, 1, 1]\n modes = [0, 1, 2]\n displaced_xyz = nm.nm_displacer(xyz, displacements, modes, factors)\n assert round(displaced_xyz[0, 1], 5) == round(\n xyz[0, 1] + 0.07049 + 0.05016 + 0.00003, 5\n ), \"displaced xyz error\"\n assert round(displaced_xyz[1, 0], 5) == round(\n xyz[1, 0] - 0.42972 + 0.58365 - 0.55484, 5\n ), \"displaced xyz error\"\n\n\ndef test_atomic_factor():\n atom_number = 1 # atom_number = 1 is hydrogen, etc.\n atom_factor = x.atomic_factor(atom_number, qvector)\n assert round(atom_factor[0], 3) == 1.0, \"H atomic factor (q = 0) != 1\"\n assert (\n round(x.atomic_factor(2, qvector)[0], 3) == 2.0\n ), \"He atomic factor (q = 0) != 2\"\n\n\ndef test_iam_calc():\n #compton_array = x.compton_spline(\n # atomic_numbers, qvector\n #) # atomic compton factors\n iam = x.iam_calc(atomic_numbers, xyz, qvector)\n np.savetxt('iam.dat', iam)\n #assert round(iam[0], 1) == 100.0, \"H2O molecular factor (q = 0) != 100\"\n\n#test_iam_calc()\n\ndef test_iam_calc_2d():\n #compton_array = x.compton_spline(\n # atomic_numbers, qvector\n #) # atomic compton factors\n atomic, molecular, rotavg = x.iam_calc_2d(atomic_numbers, xyz, qvector)\n iam = atomic + molecular\n np.savetxt('atomic2d.dat', atomic)\n np.savetxt('molecular2d.dat', molecular)\n np.savetxt('rotavg.dat', rotavg)\n np.savetxt('iam2d.dat', iam)\n\ntest_iam_calc_2d()\n\ndef test_iam_calc_3d():\n atomic, molecular = x.iam_calc_3d(atomic_numbers, xyz, qvector)\n iam = atomic + molecular\n np.savetxt('atomic3d.dat', atomic)\n np.savetxt('molecular3d.dat', molecular)\n #np.savetxt('rotavg.dat', rotavg)\n np.savetxt('iam3d.dat', iam)\n\n#test_iam_calc_3d()\n\ndef test_distances_array():\n dist_array = m.distances_array(xyz)\n assert dist_array[1, 2] == 2, \"distance between hydrogens != 2\"\n\ndef test_simulate_trajectory():\n xyzheader, comment, atomlist, xyz = m.read_xyz(\"xyz/nmm.xyz\")\n starting_xyz = xyz\n natom = xyz.shape[0]\n nsteps = 100\n step_size = 0.5\n wavenumbers = np.loadtxt('quantum/nmm_wavenumbers.dat')[:, 1]\n nmfile = \"nm/nmm_normalmodes.txt\"\n displacements = nm.read_nm_displacements(nmfile, natom)\n xyz_traj = sp.simulate_trajectory(starting_xyz, displacements, wavenumbers, nsteps, step_size)\n sp.xyz_traj_to_file(atomlist, xyz_traj)\n\n#test_simulate_trajectory()\n\ndef test_simulated_annealing():\n _, _, atomlist, xyz = m.read_xyz(\"xyz/nmm.xyz\")\n atomic_numbers = [m.periodic_table(symbol) for symbol in atomlist]\n starting_iam = x.iam_calc(atomic_numbers, xyz, qvector)\n starting_xyz = xyz\n wavenumbers = np.loadtxt('quantum/nmm_wavenumbers.dat')[:, 1]\n nmfile = \"nm/nmm_normalmodes.txt\"\n natom = 18\n displacements = nm.read_nm_displacements(nmfile, natom)\n # experiment percent diff\n _, _, _, xyz_displaced = m.read_xyz(\"xyz/nmm_displaced.xyz\")\n displaced_iam = x.iam_calc(atomic_numbers, xyz_displaced, qvector)\n experiment_pcd = 100 * (displaced_iam/starting_iam - 1)\n # run sim annealing\n nsteps = 10\n convergence_value = 0.001\n cooling_rate=4.0\n step_size=0.1\n save_xyz_path=True\n xyz_min_traj, chi2_path = sp.simulated_annealing(\n starting_xyz,\n displacements,\n wavenumbers,\n experiment_pcd,\n qvector,\n nsteps,\n convergence_value,\n cooling_rate,\n step_size,\n save_xyz_path,\n )\n save_xyz_traj_file = True\n if save_xyz_traj_file:\n fname = 'data/min_traj.xyz'\n sp.xyz_traj_to_file(atomlist, xyz_min_traj, fname)\n\ndef test_simulated_annealing_v4():\n _, _, atomlist, starting_xyz = m.read_xyz(\"xyz/chd.xyz\")\n atomic_numbers = [m.periodic_table(symbol) for symbol in atomlist]\n nmfile = \"nm/chd_normalmodes.txt\"\n natoms = 14\n displacements = nm.read_nm_displacements(nmfile, natoms)\n qlen = 99\n qvector = np.linspace(0, 12, qlen, endpoint=True)\n starting_iam = x.iam_calc(atomic_numbers, starting_xyz, qvector)\n # \"experiment\" target percent diff\n tlen = 18\n target_pcd_array = np.zeros((qlen, tlen))\n _, _, _, target_xyz_array = m.read_xyz_traj(\"xyz/chd_target_traj.xyz\", tlen)\n for t in range(tlen):\n target_iam = x.iam_calc(atomic_numbers, target_xyz_array[:, : , t], qvector)\n target_pcd_array[:, t] = 100 * (target_iam / starting_iam - 1)\n target_pcd_array[:, t] /= np.max(np.abs(target_pcd_array[:, t])) # normalise abs. max value to 1\n target_pcd = target_pcd_array[:, 0]\n\n starting_temp = 0.2\n nsteps = 10000\n step_size = 0.1\n chi2_best, pcd_best, xyz_best = sp.simulated_annealing_v4(\n displacements,\n target_pcd,\n qvector,\n starting_temp,\n nsteps,\n step_size,\n )\n print(chi2_best)\n print(pcd_best)\n print(xyz_best)\n\n#start = t.time()\n#test_simulated_annealing_v4()\n#end = t.time()\n#total = float(end - start)\n#print('time taken: %f' % total)\n\ndef test_gradient_d():\n xyzheader, comment, atomlist, xyz = m.read_xyz(\"xyz/target.xyz\")\n qlen = 81\n qvector = np.linspace(0.1, 8, qlen, endpoint=True)\n target_iam = x.iam_calc(atomic_numbers, xyz, qvector)\n nsteps=1000\n step_size=5e-9\n chi2_best, iam_best, rk_best = sp.gradient_d( target_iam, qvector, nsteps, step_size )\n np.savetxt('iam_target.dat', target_iam)\n np.savetxt('iam_best_%10.8f.dat' % chi2_best, iam_best)\n\ntest_gradient_d()\n\n","repo_name":"tnorthey/molecule","sub_path":"test_functions.py","file_name":"test_functions.py","file_ext":"py","file_size_in_byte":9071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"10947809762","text":"import cv2\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\n\nfrom torch.utils.data import Dataset\nfrom data.preparation import get_df_series\nfrom params import PATIENT_TARGETS, IMG_TARGETS_EXTENDED\n\n\ndef to_one_hot_patient(y):\n \"\"\"\n Convert a patient target tensor to a one-hot encoded representation.\n Each column with index less than or equal to 1 (bowel, extrav) are unchanged.\n Columns with index greater than 1 are one-hot encoded based on their original class values.\n\n Args:\n y (torch.Tensor): The input multi-class tensor of shape (N, C), where N is the number\n of samples and C is the number of classes.\n\n Returns:\n torch.Tensor: A one-hot encoded tensor of shape (N, K), where K is the sum of the number\n of classes in each column of the input tensor.\n \"\"\"\n new_y = []\n for i in range(y.size(1)):\n if i <= 1:\n new_y.append(y[:, i].unsqueeze(-1))\n else:\n y_ = (\n torch.zeros(y.size(0), 3)\n .to(y.device)\n .scatter(1, y[:, i].view(-1, 1).long(), 1)\n )\n new_y.append(y_)\n return torch.cat(new_y, -1)\n\n\ndef get_frames(frame, n_frames, frames_c, stride=1, max_frame=100):\n \"\"\"\n Calculate a sequence of frame indices based on the specified parameters.\n If stride is -1, sample n_frames from 0 to max_frame using linear spacing.\n\n Args:\n frame (int): The central frame index around which the sequence is generated.\n n_frames (int): The number of frames in the sequence.\n frames_c (int): The number of frames to be repeated and offset around each frame.\n stride (int, optional): The step size between frames. Defaults to 1.\n max_frame (int, optional): The maximum frame index allowed. Defaults to 100.\n\n Returns:\n numpy.ndarray: An array of frame indices representing the calculated sequence.\n \"\"\"\n if stride == -1:\n frames = np.linspace(0, max_frame, n_frames + 4, endpoint=True, dtype=int)[\n 2:-2\n ]\n\n else:\n frames = np.arange(n_frames) * stride\n frames = frames - frames[n_frames // 2] + frame\n\n if frames_c:\n offset = np.tile(np.arange(-1, 2) * frames_c, len(frames))\n frames = np.repeat(frames, 3) + offset\n\n if frames.min() < 0:\n frames -= frames.min()\n elif frames.max() > max_frame:\n frames += max_frame - frames.max()\n\n frames = np.clip(frames, 0, max_frame)\n return frames\n\n\nclass AbdominalDataset(Dataset):\n \"\"\"\n Dataset for training 2D classification models.\n\n Attributes:\n df_img (pandas DataFrame): Metadata containing image information.\n df_patient (pandas DataFrame): Metadata containing patient information.\n transforms (albu transforms): Transforms to apply to the images.\n frames_chanel (int): The number of frames to consider for channel stacking.\n n_frames (int): The number of frames to use.\n stride (int): The step size between frames.\n train (bool): Flag indicating whether the dataset is for training.\n classes (list): List of target classes.\n targets (numpy.ndarray): Array of patient targets.\n max_frames (dict): Dictionary of maximum frames per series.\n \"\"\"\n def __init__(\n self,\n df_patient,\n df_img,\n transforms=None,\n frames_chanel=0,\n n_frames=0,\n stride=1,\n train=False,\n ):\n \"\"\"\n Constructor.\n\n Args:\n df_patient (pandas DataFrame): Metadata containing patient information.\n df_img (pandas DataFrame): Metadata containing image information.\n transforms (albu transforms, optional): Transforms to apply to images and masks. Defaults to None.\n frames_chanel (int, optional): Number of frames to consider for channel stacking. Defaults to 0.\n n_frames (int, optional): The number of frames to use. Defaults to 0.\n stride (int, optional): The step size between frames. Defaults to 1.\n train (bool, optional): Flag indicating whether the dataset is for training. Defaults to False.\n \"\"\"\n self.df_img = df_img\n self.df_patient = df_patient\n self.transforms = transforms\n self.frames_chanel = frames_chanel\n self.n_frames = n_frames\n self.stride = stride\n self.train = train\n\n self.classes = IMG_TARGETS_EXTENDED\n\n self.targets = df_patient[PATIENT_TARGETS].values\n self.max_frames = dict(\n df_img[[\"series\", \"frame\"]].groupby(\"series\").max()[\"frame\"]\n )\n\n def __len__(self):\n \"\"\"\n Get the length of the dataset.\n\n Returns:\n int: Length of the dataset.\n \"\"\"\n return len(self.df_patient) * len(self.classes)\n\n def __getitem__(self, idx):\n \"\"\"\n Item accessor.\n Frames are sampled the following way:\n - kidney / liver / spleen / negative bowel : Inside the organ.\n - positive bowel / positive extravasation : Using the frame-level labels.\n - Negative extravasation : Anywhere\n\n Args:\n idx (int): Index.\n\n Returns:\n torch.Tensor: Image as a tensor of shape [(N) x C, H, W].\n torch.Tensor: Label as a tensor of shape [9].\n torch.Tensor: Aux label as a tensor of shape [9]. Not used.\n \"\"\"\n tgt_idx = idx % len(self.classes)\n tgt = self.classes[tgt_idx]\n\n idx = idx // len(self.classes)\n patient = self.df_patient[\"patient_id\"].values[idx]\n y_patient = self.targets[idx]\n\n df_img = self.df_img[self.df_img[\"patient_id\"] == patient]\n\n # Restrict to considered class\n if (df_img[self.classes[tgt_idx]] == y_patient[tgt_idx]).max():\n df_img = df_img[df_img[self.classes[tgt_idx]] == y_patient[tgt_idx]]\n else: # Class has no match, use argmax - should not happen\n raise NotImplementedError\n\n # Restrict to segmentation > 0.9 for negatives\n if not y_patient[tgt_idx]:\n seg = df_img[f'pred_{tgt.split(\"_\")[0]}'].values\n seg = seg / (seg.max() + 1e-6)\n df_img = df_img[seg > 0.9]\n\n # Restrict to one series\n series = (\n np.random.choice(df_img[\"series\"].unique())\n if self.train\n else df_img[\"series\"].values[0]\n )\n df_img = df_img[df_img[\"series\"] == series]\n\n # Sort by frame\n df_img = df_img.sort_values(\"frame\").reset_index(drop=True)\n\n # Pick a row\n if self.train:\n ps = np.exp(\n -(\n (\n (np.arange(len(df_img)) - len(df_img) // 2)\n / (0.4 * len(df_img))\n )\n ** 2\n )\n ) # gaussian\n row_idx = np.random.choice(len(df_img), p=ps / ps.sum())\n row = df_img.iloc[row_idx]\n else:\n row = df_img.iloc[len(df_img) // 2] # center\n\n if self.frames_chanel > 0 or self.n_frames > 1:\n frame = row.frame\n\n if self.n_frames <= 1:\n frame = np.clip(\n frame,\n self.frames_chanel,\n self.max_frames[series] - self.frames_chanel,\n )\n frames = [frame - self.frames_chanel, frame, frame + self.frames_chanel]\n else:\n frames = get_frames(\n frame,\n self.n_frames,\n self.frames_chanel,\n stride=self.stride,\n max_frame=self.max_frames[series],\n )\n\n prefix = row.path.rsplit(\"_\", 1)[0]\n paths = [prefix + f\"_{f:04d}.png\" for f in frames]\n image = np.array([cv2.imread(path, 0) for path in paths]).transpose(1, 2, 0)\n\n else:\n frame = row.frame\n image = cv2.imread(row.path)\n\n image = image.astype(np.float32) / 255.0\n\n if self.transforms:\n transformed = self.transforms(image=image)\n image = transformed[\"image\"]\n\n y_patient = torch.tensor(y_patient, dtype=torch.float)\n y_img = torch.tensor(row[self.classes], dtype=torch.float)\n\n if y_img.size(-1) == 5: # Patient level - TODO : y_patient ?\n y_img = to_one_hot_patient(y_img.unsqueeze(0))[0]\n\n if self.n_frames > 1:\n if self.frames_chanel:\n image = image.view(\n self.n_frames, 3, image.size(1), image.size(2)\n )\n else:\n image = (\n image.view(1, self.n_frames, image.size(1), image.size(2))\n .repeat(3, 1, 1, 1)\n .transpose(0, 1)\n )\n else:\n if not self.frames_chanel:\n image = image.repeat(3, 1, 1)\n\n return image, y_img, y_patient\n\n\nclass AbdominalCropDataset(Dataset):\n \"\"\"\n Dataset for training 2.5D crop classification models.\n\n Attributes:\n df_img (pandas DataFrame): Metadata containing image information.\n df_patient (pandas DataFrame): Metadata containing patient information.\n df_series (pandas DataFrame): Metadata containing information about image series.\n transforms (albu transforms): Transforms to apply to the images.\n frames_chanel (int): The number of frames to consider for channel stacking.\n n_frames (int): The number of frames to use.\n stride (int): The step size between frames.\n train (bool): Flag indicating whether the dataset is for training.\n sigmas (dict): Dictionary containing Gaussian sigmas for various organs.\n \"\"\"\n def __init__(\n self,\n df_patient,\n df_img,\n transforms=None,\n frames_chanel=0,\n n_frames=0,\n stride=1,\n train=False,\n use_soft_target=False,\n df_series=None,\n ):\n \"\"\"\n Constructor for the AbdominalCropDataset class.\n\n Args:\n df_patient (pandas DataFrame): Metadata containing patient information.\n df_img (pandas DataFrame): Metadata containing image information.\n transforms (albu transforms, optional): Transforms to apply to images and masks. Defaults to None.\n frames_chanel (int, optional): Number of frames to consider for channel stacking. Defaults to 0.\n n_frames (int, optional): The number of frames to use. Defaults to 0.\n stride (int, optional): The step size between frames. Defaults to 1.\n train (bool, optional): Flag indicating whether the dataset is for training. Defaults to False.\n use_soft_target (bool, optional): Flag indicating the use of soft targets. Defaults to False.\n df_series (pandas DataFrame, optional): Metadata containing info about series. Defaults to None.\n \"\"\"\n self.df_img = df_img\n self.df_patient = df_patient\n self.df_series = (\n get_df_series(df_patient, df_img) if df_series is None else df_series\n )\n self.targets = self.df_series[\"target\"].values\n\n self.transforms = transforms\n self.frames_chanel = frames_chanel\n self.n_frames = n_frames\n self.stride = stride\n\n self.train = train\n\n self.sigmas = {\"kidney\": 0.15, \"spleen\": 0.2, \"liver\": 0.3}\n\n def __len__(self):\n \"\"\"\n Get the length of the dataset.\n\n Returns:\n int: Length of the dataset.\n \"\"\"\n return len(self.df_series)\n\n def __getitem__(self, idx):\n \"\"\"\n Item accessor. Samples a random frame inside the organ.\n\n Args:\n idx (int): Index.\n\n Returns:\n torch.Tensor: Image as a tensor of shape [(N,) C, H, W].\n torch.Tensor: Label as a tensor of shape [3].\n int: Dummy value.\n \"\"\"\n img = np.load(self.df_series[\"img_path\"].values[idx])\n\n organ = self.df_series[\"organ\"].values[idx]\n if organ == \"kidney\":\n d = int(img.shape[1] * 3 / 4)\n img = np.concatenate([img[:, :, :d], img[:, :, -d:]], -1)\n\n # Pick frame(s)\n if self.train:\n ps = np.exp(\n -(\n (\n (np.arange(len(img)) - len(img) // 2)\n / (self.sigmas[organ] * len(img))\n )\n ** 2\n )\n ) # gaussian\n m = 5 + self.stride * (self.n_frames - 1) + self.frames_chanel\n ps[:m] = 0 # Stay in bounds\n ps[-m:] = 0 # Stay in bounds\n if ps.max():\n frame = np.random.choice(len(img), p=ps / ps.sum())\n else:\n frame = len(img) // 2 + np.random.choice([-2, -1, 0, 1, 2])\n else:\n frame = len(img) // 2 # center\n\n frames = get_frames(\n frame,\n self.n_frames,\n self.frames_chanel,\n stride=self.stride,\n max_frame=len(img) - 1,\n )\n\n # Load\n image = img[np.array(frames)].transpose(1, 2, 0)\n image = image.astype(np.float32) / 255.0\n\n # Augment\n if self.transforms:\n transformed = self.transforms(image=image)\n image = transformed[\"image\"]\n\n y_img = torch.zeros(3, dtype=torch.float)\n y_img[self.targets[idx]] = 1\n\n # Reshape\n if self.n_frames > 1:\n if self.frames_chanel:\n image = image.view(\n self.n_frames, 3, image.size(1), image.size(2)\n ) # .transpose(0, 1)\n else:\n image = (\n image.view(1, self.n_frames, image.size(1), image.size(2))\n .repeat(2, 1, 1, 1)\n .transpose(0, 1)\n )\n\n return image, y_img, 0\n\n\nclass AbdominalInfDataset(Dataset):\n \"\"\"\n Dataset for infering 2D classification models.\n It is optimized to compute the CNN forward only once when models are 2.5D :\n Trick is to extract CNN features for all images,\n and then compute the sequential head by retrieving the indexed features.\n\n Attributes:\n df (pandas DataFrame): Metadata containing image information.\n transforms (albu transforms): Transforms to apply to the images.\n frames_chanel (int): The number of frames to consider for channel stacking.\n n_frames (int): The number of frames to use.\n stride (int): The step size between frames.\n imgs (dict): Dictionary for storing loaded images.\n features (list): List of precompted features.\n single_frame (bool): Flag indicating if only a single frame is used for each item.\n \"\"\"\n def __init__(\n self,\n df,\n transforms=None,\n frames_chanel=0,\n n_frames=1,\n stride=1,\n imgs={},\n features=[],\n single_frame=False,\n ):\n \"\"\"\n Constructor.\n The single frame flag is used for features precomputation.\n\n Args:\n df (pandas DataFrame): Metadata containing image information.\n transforms (albu transforms, optional): Transforms to apply to images and masks. Defaults to None.\n frames_chanel (int, optional): Number of frames to consider for channel stacking. Defaults to 0.\n n_frames (int, optional): The number of frames to use. Defaults to 1.\n stride (int, optional): The step size between frames. Defaults to 1.\n imgs (dict, optional): Dictionary for storing loaded images. Defaults to an empty dictionary.\n features (list, optional): List of precomputed features. Defaults to an empty list.\n single_frame (bool, optional): Whether a single frame is used for each item. Defaults to False.\n \"\"\"\n self.df = df\n self.info = self.df[[\"path\", \"patient_id\", \"series\", \"frame\"]].values\n self.transforms = transforms\n\n self.frames_chanel = frames_chanel\n self.n_frames = n_frames\n self.stride = stride\n self.single_frame = single_frame\n\n self.max_frames = dict(df[[\"series\", \"frame\"]].groupby(\"series\").max()[\"frame\"])\n\n self.imgs = imgs\n self.features = features\n\n if len(features):\n self.features = dict(zip(self.get_keys(), features))\n\n def __len__(self):\n \"\"\"\n Get the length of the dataset.\n\n Returns:\n int: Length of the dataset.\n \"\"\"\n return len(self.df)\n\n def get_keys(self):\n \"\"\"\n Get keys for indexing features.\n\n Returns:\n list: List of keys.\n \"\"\"\n keys = []\n for idx in range(len(self.df)):\n path, patient, series, frame = self.info[idx]\n frames = get_frames(\n frame,\n 1,\n self.frames_chanel,\n stride=1,\n max_frame=self.max_frames[series],\n )\n key = f'{patient}_{series}_{\"-\".join(list(frames.astype(str)))}'\n keys.append(key)\n return keys\n\n def _getitem_feature(self, idx):\n \"\"\"\n Item accessor for features.\n\n Args:\n idx (int): Index.\n\n Returns:\n np.ndarray: Features.\n int: Dummy value.\n int: Dummy value.\n \"\"\"\n path, patient, series, frame = self.info[idx]\n\n all_frames = get_frames(\n frame,\n self.n_frames,\n self.frames_chanel,\n stride=self.stride,\n max_frame=self.max_frames[series],\n )\n all_frames = all_frames.reshape(-1, 3)\n\n fts = []\n for frames in all_frames:\n key = f'{patient}_{series}_{\"-\".join(list(frames.astype(str)))}'\n fts.append(self.features[key])\n fts = np.array(fts)\n return fts, 0, 0\n\n def __getitem__(self, idx):\n \"\"\"\n Item accessor.\n Refer to _getitem_feature if features are precomputed.\n\n Args:\n idx (int): Index.\n\n Returns:\n torch.Tensor: Image as a tensor.\n int: Dummy value.\n int: Dummy value.\n \"\"\"\n if len(self.features):\n return self._getitem_feature(idx)\n\n path, patient, series, frame = self.info[idx]\n\n if self.single_frame:\n frames = get_frames(\n frame,\n 1,\n self.frames_chanel,\n stride=1,\n max_frame=self.max_frames[series],\n )\n else:\n frames = get_frames(\n frame,\n self.n_frames,\n self.frames_chanel,\n stride=self.stride,\n max_frame=self.max_frames[series],\n )\n\n paths = [path.rsplit(\"_\", 1)[0] + f\"_{f:04d}.png\" for f in frames]\n\n image = []\n for path, frame in zip(paths, frames):\n try:\n img = self.imgs[path]\n except Exception:\n img = cv2.imread(path, 0)\n if not (idx + 1 % 10000): # clear buffer\n self.imgs = {}\n self.imgs[path] = img\n\n image.append(img)\n\n image = np.array(image).transpose(1, 2, 0)\n image = image.astype(np.float32) / 255.0\n\n if self.transforms:\n transformed = self.transforms(image=image)\n image = transformed[\"image\"]\n\n if not self.single_frame:\n if self.n_frames > 1:\n if self.frames_chanel:\n image = image.view(\n self.n_frames, 3, image.size(1), image.size(2)\n )\n else:\n image = (\n image.view(1, self.n_frames, image.size(1), image.size(2))\n .repeat(3, 1, 1, 1)\n .transpose(0, 1)\n )\n # else:\n if image.size(0) == 1:\n image = image.repeat(3, 1, 1)\n\n return image, 0, 0\n\n\nclass SegDataset(Dataset):\n \"\"\"\n Dataset for training segmentation models.\n Masks are not used in the pipeline here, we only use the classification part.\n\n Attributes:\n df (pandas DataFrame): Metadata containing image and mask information.\n for_classification (bool): Flag indicating whether the dataset is used for classification.\n use_soft_target (bool): Flag indicating whether soft targets are used.\n transforms (albu transforms): Transforms to apply to images and masks.\n\n \"\"\"\n def __init__(\n self,\n df,\n for_classification=True,\n use_soft_target=False,\n transforms=None,\n ):\n \"\"\"\n Constructor for the SegDataset class.\n\n Args:\n df (pandas DataFrame): Metadata containing image and mask information.\n for_classification (bool, optional): Whether the dataset is used for classif. Defaults to True.\n use_soft_target (bool, optional): Whether soft targets are used. Defaults to False.\n transforms (albu transforms, optional): Transforms to apply to images and masks. Defaults to None.\n \"\"\"\n self.df = df\n self.transforms = transforms\n self.for_classification = for_classification\n\n self.img_paths = df[\"img_path\"].values\n self.mask_paths = df[\"mask_path\"].values\n\n targets = [\n \"pixel_count_liver\",\n \"pixel_count_spleen\",\n \"pixel_count_left-kidney\",\n \"pixel_count_right-kidney\",\n \"pixel_count_bowel\",\n ]\n if use_soft_target:\n self.img_targets = df[[c + \"_norm\" for c in targets]].values\n else:\n self.img_targets = df[targets].values > 100\n\n def __len__(self):\n \"\"\"\n Get the length of the dataset.\n\n Returns:\n int: Length of the dataset.\n \"\"\"\n return len(self.img_paths)\n\n def __getitem__(self, idx):\n \"\"\"\n Item accessor.\n\n Args:\n idx (int): Index.\n\n Returns:\n torch.Tensor: Image as a tensor.\n torch.Tensor: Mask as a tensor (if not for classification).\n torch.Tensor: Label as a tensor.\n \"\"\"\n image = cv2.imread(self.img_paths[idx]).astype(np.float32) / 255.0\n\n y = torch.tensor(self.img_targets[idx], dtype=torch.float)\n\n if not self.for_classification:\n mask = cv2.imread(self.mask_paths[idx], 0)\n\n mask = np.where(mask == 4, 3, mask)\n mask = np.where(mask == 5, 4, mask)\n\n transformed = self.transforms(image=image, mask=mask)\n image = transformed[\"image\"]\n mask = transformed[\"mask\"]\n mask = mask.unsqueeze(0).float()\n\n return image, mask, y\n\n image = self.transforms(image=image)[\"image\"]\n return image, y, 0\n\n\nclass Seg3dDataset(Dataset):\n \"\"\"\n Dataset for training 3D segmentation models.\n\n Attributes:\n df (pandas DataFrame): Metadata containing image and mask information.\n train (bool): Flag indicating whether the dataset is used for training.\n test (bool): Flag indicating whether the dataset is used for testing.\n \"\"\"\n def __init__(\n self,\n df,\n train=False,\n test=False,\n ):\n \"\"\"\n Constructor.\n\n Args:\n df (pandas DataFrame): Metadata containing image and mask information.\n train (bool, optional): Whether the dataset is used for training. Defaults to False.\n test (bool, optional): Whether the dataset is used for testing. Defaults to False.\n \"\"\"\n self.df = df\n self.train = train\n self.test = test\n\n self.img_paths = df[\"img_path\"].values\n self.mask_paths = df[\"mask_path\"].values\n\n if train:\n import monai.transforms as transforms\n\n # https://docs.monai.io/en/0.3.0/transforms.html\n self.transforms = transforms.Compose(\n [\n transforms.RandAffined(\n translate_range=[256 * 0.1] * 3,\n padding_mode=\"zeros\",\n keys=[\"image\", \"mask\"],\n prob=0.5,\n ),\n transforms.RandRotated(\n range_x=(-0.3, 0.3),\n range_y=(-0.3, 0.3),\n range_z=(-0.3, 0.3),\n mode=\"nearest\",\n keys=[\"image\", \"mask\"],\n prob=0.5,\n ),\n transforms.RandZoomd(\n min_zoom=0.666,\n max_zoom=1.5,\n mode=\"nearest\",\n keys=[\"image\", \"mask\"],\n prob=0.5,\n ),\n ]\n )\n else:\n self.transforms = None\n\n self.imgs = {}\n self.masks = {}\n if not test:\n for idx in range(len(self.img_paths)):\n self.imgs[self.img_paths[idx]] = np.load(self.img_paths[idx])[None]\n self.masks[self.mask_paths[idx]] = np.load(self.mask_paths[idx])[None]\n\n def __len__(self):\n \"\"\"\n Get the length of the dataset.\n\n Returns:\n int: Length of the dataset.\n \"\"\"\n return len(self.img_paths)\n\n def __getitem__(self, idx):\n \"\"\"\n Item accessor.\n\n Args:\n idx (int): Index.\n\n Returns:\n torch.Tensor: Image as a tensor.\n torch.Tensor: Mask as a tensor (if not for testing).\n int: Dummy value.\n \"\"\"\n image = self.imgs.get(\n self.img_paths[idx],\n np.load(self.img_paths[idx])[None],\n )\n\n if not self.test:\n mask = self.masks.get(\n self.mask_paths[idx], np.load(self.mask_paths[idx])[None]\n )\n # Merge both kidneys !\n mask = np.where(mask == 4, 3, mask)\n mask = np.where(mask == 5, 4, mask)\n else:\n mask = 0\n\n if self.transforms is not None:\n res = self.transforms({\"image\": image, \"mask\": mask})\n image = res[\"image\"].as_tensor().float() / 255.0\n mask = res[\"mask\"].as_tensor()\n else:\n image = torch.from_numpy(image).float() / 255.0\n if not self.test:\n mask = torch.from_numpy(mask)\n\n return image, mask, 0\n\n\nclass PatientFeatureDataset(Dataset):\n \"\"\"\n Dataset for training RNN models.\n\n Attributes:\n df_patient (pandas DataFrame): Metadata containing patient information.\n df_img (pandas DataFrame): Metadata containing image information.\n exp_folders (list of tuples): Experiment folders and modes.\n max_len (int, optional): Maximum length for feature sequences. Defaults to None.\n restrict (bool, optional): Flag to restrict feature length. Defaults to False.\n resize (tuple, optional): Tuple specifying the size for resizing features. Defaults to None.\n \"\"\"\n def __init__(\n self,\n df_patient,\n df_img,\n exp_folders,\n max_len=None,\n restrict=False,\n resize=None,\n ):\n \"\"\"\n Constructor.\n\n Args:\n df_patient (pandas DataFrame): Metadata containing patient information.\n df_img (pandas DataFrame): Metadata containing image information.\n exp_folders (list of tuples): Experiment folders and modes.\n max_len (int, optional): Maximum length for feature sequences. Defaults to None.\n restrict (bool, optional): Flag to restrict feature length. Defaults to False.\n resize (tuple, optional): Tuple specifying the size for resizing features. Defaults to None.\n \"\"\"\n self.df_patient = df_patient\n self.fts, self.crop_fts = self.retrieve_features(df_img, exp_folders)\n self.ids = list(self.fts.keys())\n self.max_len = max_len\n self.restrict = restrict\n self.resize = resize\n\n def retrieve_features(self, df, exp_folders):\n \"\"\"\n Retrieve and organize features from experiment folders.\n\n Args:\n df (pandas DataFrame): Metadata containing image information.\n exp_folders (list of tuples): Experiment folders and modes.\n\n Returns:\n dict: Features dictionary.\n dict: Crop features dictionary.\n \"\"\"\n features_dict = {}\n crop_features_dict = {}\n for fold in sorted(df[\"fold\"].unique()):\n df_val = df[df[\"fold\"] == fold].reset_index(drop=True)\n\n fts = []\n for exp_folder, mode in exp_folders:\n if mode == \"seg\":\n seg = np.load(exp_folder + f\"pred_val_{fold}.npy\")\n fts.append(seg)\n elif mode == \"crop\":\n continue\n else: # proba\n ft = np.load(exp_folder + f\"pred_val_{fold}.npy\")\n fts.append(ft)\n\n kidney = (\n seg[:, 2:4].max(-1, keepdims=True)\n if seg.shape[-1] == 5\n else seg[:, 2:3]\n )\n fts.append(\n np.concatenate(\n [\n ft[:, :1] * seg[:, -1:], # bowel\n ft[:, 1:2] * seg.max(-1, keepdims=True), # extravasation\n ft[:, 2:5] * kidney, # kidney\n ft[:, 5:8] * seg[:, :1], # liver\n ft[:, 8:] * seg[:, 1:2], # spleen\n ],\n -1,\n )\n )\n try:\n fts = np.concatenate(fts, axis=1)\n except Exception:\n fts = np.zeros(len(df_val))\n\n df_val[\"index\"] = np.arange(len(df_val))\n slice_starts = (\n df_val.groupby([\"patient_id\", \"series\"])[\"index\"].min().to_dict()\n )\n slice_ends = (\n df_val.groupby([\"patient_id\", \"series\"])[\"index\"].max() + 1\n ).to_dict()\n\n for k in slice_starts.keys():\n start = slice_starts[k]\n end = slice_ends[k]\n\n if df_val[\"frame\"][start] < df_val[\"frame\"][end - 1]:\n features_dict[k] = fts[start:end]\n else:\n features_dict[k] = fts[start:end][::-1]\n\n crop_fts = []\n for exp_folder, mode in exp_folders:\n if mode == \"crop\":\n if not len(df_val):\n continue\n\n preds = np.load(exp_folder + f\"pred_val_{fold}.npy\")\n df_series = get_df_series(\n self.df_patient[self.df_patient[\"fold\"] == fold],\n df_val,\n )\n\n for i, c in enumerate([\"pred_healthy\", \"pred_low\", \"pred_high\"]):\n df_series[c] = preds[:, i]\n df_series = (\n df_series.groupby([\"patient_id\", \"series\"])\n .agg(list)\n .reset_index()\n )\n\n i = 2\n crop_scores = np.array(\n [\n np.array(df_series[p].values.tolist())\n for p in [\"pred_healthy\", \"pred_low\", \"pred_high\"]\n ]\n ).transpose(1, 2, 0)\n crop_fts.append(crop_scores)\n\n if len(crop_fts):\n crop_scores = np.concatenate(crop_fts, -1)\n for i, (p, s) in enumerate(df_series[[\"patient_id\", \"series\"]].values):\n try:\n _ = features_dict[(p, s)]\n crop_features_dict[(p, s)] = crop_scores[i] # cls x score\n except KeyError:\n print(p, s)\n\n return features_dict, crop_features_dict\n\n def __len__(self):\n return len(self.fts)\n\n @staticmethod\n def restrict_fts(fts):\n \"\"\"\n Restrict the length of features.\n\n Args:\n fts (numpy.ndarray): Features array.\n\n Returns:\n numpy.ndarray: Restricted features array.\n \"\"\"\n if len(fts) > 400:\n fts = fts[len(fts) // 6:]\n else:\n fts = fts[len(fts) // 8:]\n return fts\n\n @staticmethod\n def resize_fts(fts, size, max_len=None):\n \"\"\"\n Resize features.\n\n Args:\n fts (numpy.ndarray): Features array.\n size (tuple): Size for resizing.\n max_len (int, optional): Maximum length for features. Defaults to None.\n\n Returns:\n numpy.ndarray: Resized features array.\n \"\"\"\n if max_len is not None: # crop too long\n fts = fts[-max_len:]\n\n fts = fts[::2].copy()\n\n fts = F.interpolate(\n torch.from_numpy(fts.T).float().unsqueeze(0), size=size, mode=\"linear\"\n )[0].transpose(0, 1)\n return fts\n\n def __getitem__(self, idx):\n \"\"\"\n Item accessor.\n\n Args:\n idx (int): Index.\n\n Returns:\n dict: Features and crop features (if available).\n torch.Tensor: Label as a tensor.\n int: Dummy value.\n \"\"\"\n patient_series = self.ids[idx]\n\n fts = self.fts[patient_series]\n crop_fts = self.crop_fts.get(patient_series, None)\n\n if self.restrict:\n fts = self.restrict_fts(fts)\n\n if self.resize:\n fts = self.resize_fts(fts, self.resize, self.max_len)\n else:\n if self.max_len is not None:\n fts = self.pad(fts)\n fts = torch.from_numpy(fts).float()\n\n if crop_fts is not None:\n crop_fts = torch.from_numpy(crop_fts).float()\n else:\n crop_fts = 0\n\n y = self.df_patient[self.df_patient[\"patient_id\"] == patient_series[0]][\n PATIENT_TARGETS\n ].values[0]\n\n y = torch.from_numpy(y).float() # bowel, extravasion, kidney, liver, spleen\n\n return {\"x\": fts, \"ft\": crop_fts}, y, 0\n","repo_name":"TheoViel/kaggle_rsna_abdominal_trauma","sub_path":"src/data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":34648,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"76"}
+{"seq_id":"28405269475","text":"\"\"\"ManhuaKO site downloader\"\"\"\n\nimport time\nfrom typing import Iterable, Set\nfrom urllib.parse import quote\n\nfrom bs4 import BeautifulSoup\n\nfrom .base import Chapter, ChapterImage, Language, Manga, Site\n\n\nclass ManhuaKO(Site):\n @property\n def name(self) -> str:\n return \"ManhuaKO\"\n\n @property\n def url(self) -> str:\n return \"https://manhuako.com\"\n\n @property\n def supported_languages(self) -> Set[Language]:\n return {Language.es}\n\n def search(self, query: str, lang: Language = None) -> Iterable[Manga]:\n with self.session.get(f\"{self.url}/home/search\", params={\"mq\": query}) as resp:\n resp.raise_for_status()\n soup = BeautifulSoup(resp.text, \"html.parser\")\n pages = [soup]\n pagelist = soup.find(\"ul\", class_=\"pagination\")\n if pagelist:\n # get only the second page\n for page in pagelist(\"a\")[1:2]:\n with self.session.get(page[\"href\"]) as resp:\n resp.raise_for_status()\n soup = BeautifulSoup(resp.text, \"html.parser\")\n pages.append(soup)\n\n for page in pages:\n for card in page(\"div\", {\"class\": \"card\"}):\n if card.findNext(\"p\", {\"class\": \"type\"}).text == \"Novela\":\n continue\n anchor = card.findNext(\"a\", {\"class\": \"white-text\"})\n yield Manga(\n url=anchor[\"href\"],\n name=anchor.text.strip(),\n cover=card.findNext(\"img\")[\"src\"],\n )\n\n def get_chapters(self, manga: Manga) -> Iterable[Chapter]:\n with self.session.get(manga.url) as resp:\n resp.raise_for_status()\n soup = BeautifulSoup(resp.text, \"html.parser\")\n pages = [soup]\n pagelist = soup.find(\"ul\", class_=\"pagination\")\n if pagelist:\n last_page = int(\n pagelist(\"a\")[-1][\"href\"].strip(\"/\").rsplit(\"/\", maxsplit=1)[-1]\n )\n for page_number in range(2, last_page + 1):\n with self.session.get(f\"{manga.url}/page/{page_number}\") as resp:\n resp.raise_for_status()\n soup = BeautifulSoup(resp.text, \"html.parser\")\n pages.append(soup)\n time.sleep(0.1)\n for page in pages:\n page = page.find(\"table\", {\"class\": \"table-chapters\"})\n for item in page(\"tr\"):\n item = item.findNext(\"a\")\n yield Chapter(name=item.text.strip(), url=item[\"href\"])\n\n def get_images(self, chapter: Chapter) -> Iterable[ChapterImage]:\n with self.session.get(chapter.url) as resp:\n resp.raise_for_status()\n soup = BeautifulSoup(resp.text, \"html.parser\")\n soup = soup.find(\"div\", {\"id\": \"pantallaCompleta\"})\n for img in soup(\"img\"):\n yield ChapterImage(url=quote(img[\"src\"], safe=\":/%\"))\n","repo_name":"adbenitez/simplebot_manga","sub_path":"simplebot_manga/manga_api/manhuako.py","file_name":"manhuako.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"72922136564","text":"# 문제 링크: https://leetcode.com/problems/minimum-number-of-operations-to-move-all-balls-to-each-box/\n\nclass Solution:\n def minOperations(self, boxes: str) -> List[int]:\n answer = [0] * len(boxes)\n\n count = ops = 0\n for i in range(len(boxes)):\n answer[i] += ops\n if boxes[i] == '1':\n count += 1\n ops += count\n\n count = ops = 0\n for i in range(len(boxes) - 1, -1, -1):\n answer[i] += ops\n if boxes[i] == '1':\n count += 1\n ops += count\n\n return answer\n","repo_name":"jamesujeon/coding-problem-solutions","sub_path":"leetcode/python 3/1769-2.py","file_name":"1769-2.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"42397802189","text":"import sys\n\n\n\ntry:\n i =1 # gli argomenti li devo aggiungere nel json di visualcode\n while i != 3:\n \n \n file = open(sys.argv[i] , \"r\")\n \n for x in file:\n print(x)\n \n file.close()\n i += 1\n \nexcept:\n print(\"file non trovato\")\n quit()\n \n \n \n \n\n \n \n\n\n ","repo_name":"GiuseppeGambac/Python","sub_path":"08_File_eccezioni/151_concatena.py","file_name":"151_concatena.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"9108801951","text":"import pysam\nimport argparse\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser('Bed to vcf')\n\tparser.add_argument('-b','--bed', help=\"in.bed\")\n\tparser.add_argument('-r','--ref', help=\"reference.fasta\")\n\tparser.add_argument('-o','--out', help=\"out.vcf\")\n\n\tglobal opts\n\topts = parser.parse_args()\n\tfa = pysam.Fastafile(opts.ref)\n\tvcf=open(opts.out,'w')\n\tvcf.write('##fileformat=VCFv4.1'+'\\n')\n\tvcf.write('##INFO='+'\\n')\n\tvcf.write('#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\tSAMPLE\"'+'\\n')\n\twith open(opts.bed,'r') as bed:\n\t\tfor line in bed:\n\t\t\tif line.startswith('#') or line.startswith('@'):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tchrom,start,end,ref,alt=line.rstrip().split('\\t')[:5]\n\t\t\t\tid=line.rstrip().split('\\t')[-1]\n\n\t\t\t\ttry:\n\t\t\t\t\tfa.fetch(chrom,start)\n\t\t\t\texcept:\n\t\t\t\t\tchrom='chr'+chrom\n\n\t\t\t\tif ref == '-':\n\t\t\t\t\tref = fa.fetch(chrom, int(start)-1, int(start)).upper()\n\n\t\t\t\t\talt = ref.upper() + alt.upper()\n\t\t\t\telif alt== '-':\n\t\t\t\t\tref = fa.fetch(chrom, int(start)-1, int(end)).upper()\n\t\t\t\t\talt = ref[0].upper()\n\n\t\t\t\tvcf.write('\\t'.join([chrom, start, '.', ref, alt,'.','.','ID='+id,'.','.'])+'\\n')","repo_name":"urtism/CMG","sub_path":"SCRIPT_CMG/SCRIPT_PYTHON/FILE_MANIPULATION/bed_to_vcf.py","file_name":"bed_to_vcf.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"13283208444","text":"\"\"\"\nServer that accepts encrypted messages to set a GPIO pin to high upon valid\nrequest from a client.\n\"\"\"\n\n__author__ = 'Tiziano Bettio'\n__license__ = 'MIT'\n__version__ = '0.1'\n__copyright__ = \"\"\"Copyright (c) 2019 Tiziano Bettio\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\"\"\"\n\nimport hashlib\nimport threading\nimport sys\nimport os\n\npath = os.path.split(__file__)[0]\nif path not in sys.path:\n sys.path.insert(0, path)\nif os.getcwd() != path:\n os.chdir(path)\n\nimport opener\nfrom settings import PASSPHRASE\nfrom settings import TIMEOUT\nfrom settings import SALT_LEN\n\nfrom sessionhandler import SessionHandler\n\nsession_handler = SessionHandler(TIMEOUT, SALT_LEN)\n\n\ndef application(environ, start_response):\n session_handler.cleanup()\n if environ['PATH_INFO'] == '/salt':\n response_body = session_handler.new_session().encode()\n status = '200 OK'\n response_headers = [\n ('Content-Type', 'text/plain'),\n ('Content-Length', str(len(response_body)))\n ]\n elif environ['PATH_INFO'] == '/':\n response_body = open('snippets/form.html', 'r').read()\n response_body = response_body.replace('@@salt@@',\n session_handler.new_session())\n response_body = response_body.encode()\n status = '200 OK'\n response_headers = [\n ('Content-Type', 'text/html'),\n ('Content-Length', str(len(response_body)))\n ]\n elif environ['PATH_INFO'] == '/open':\n q_split = environ['QUERY_STRING'].split('=')\n response_body = open('snippets/invalid.html', 'r').read().encode()\n if len(q_split) == 2:\n salt, hashstring = q_split\n if session_handler.valid(salt):\n session_handler.invalidate(salt)\n hm = hashlib.sha3_512()\n raw = PASSPHRASE + salt\n hm.update(raw.encode())\n chk_hashstring = hm.hexdigest()\n if hashstring == chk_hashstring:\n t = threading.Thread(target=opener.open_door)\n t.start()\n response_body = open('snippets/success.html', 'r').read()\n response_body = response_body.encode()\n\n status = '200 OK'\n response_headers = [\n ('Content-Type', 'text/html'),\n ('Content-Length', str(len(response_body)))\n ]\n else:\n # instead of 404, just send random amount of random hex data...\n num_bytes = int().from_bytes(os.urandom(2), 'little') // 2 + 1\n response_body = hex(int().from_bytes(os.urandom(num_bytes), 'little'))\n response_body = response_body[2:].encode()\n # response_body = session_handler._active() !!!DEBUGGING ONLY!!!\n status = '200 OK'\n response_headers = [\n ('Content-Type', 'text/plain'),\n ('Content-Length', str(len(response_body)))\n ]\n\n start_response(status, response_headers)\n return [response_body]\n\n","repo_name":"tizilogic/gdo-server","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"5771986001","text":"# Today was again just more coding problems discussed in class.\nimport math\n\ndef test(g):\n \"\"\"\n >>> test(1)\n 1\n \"\"\"\n return g\n\n#min path using dynamic programming using\n#bottom up approach\ndef minpath(dag, u, v):\n \"\"\"\n >>> dag = [[(1,3),(2,1)],[(3,2)],[(3,2)],[]]\n >>> minpath(dag, 0, 3)\n \"\"\"\n d = [math.inf] * len(dag)\n d[u] = 0\n for s in topsort(dag):\n for (t,w) in dag[s]:\n d[t] = min(d[s] + w, d[t])\n \n #Returning the final item in the memo\n #Which should also be the final item in the dag\n return d[v]\n\n#######End example\n\n\n#####Warshall Algo\ndef warshall(adj):\n T = adj\n for w in range(len(adj)):\n for u in range(len(adj)):\n for v in range(len(adj)):\n if T[u][w] == 1 and T[w][v] == 1:\n T[u][v] = 1\n\n return T\n\n\n###Shortest possible path changem-up version:\ndef warshall2(g):\n n = len(g)\n for w in range(n):\n for u in range(n):\n for v in range(n):\n g[u][v] = min(g[u][v], g[u][w] + g[w][v])\n \n return g\n#######End example\n\n\n\n\n####Floyd-Warshall algo example\n#changing possible paths in a adj. matrix IF there is actually a path\n#from u to v\ndef floyd_warshall(g):\n '''i have no idea what to put for a test case here...'''\n n = len(g)\n for w in range(n):\n for u in range(n):\n for v in range(n):\n #left hand = itself OR bitwise and operation\n g[u][v] = g[u][v] | (g[u][w] & g[w][v])\n return g \n\n########End example\n\n\n#####Longest Common Substring\n###Looking at two strings, the longest possible string compared against the two\n#O(s * t)\ndef LCS(s,t):\n '''What even is testing?'''\n T = [[0]* (range(len(t))) for i in s]\n largest = 0\n for i in range(len(s)):\n for j in range(len(t)):\n if s[i] == t[j]:\n if i == 0 or j == 0:\n T[i][j] = 1\n else:\n T[i][j] = T[i-1][j-1] + 1\n largest = max(largest, T[i][j])\n return largest\n\n \n####Subset Sum\n##Find sum subset of ints that add up to target T\ndef subsetSum(xs, i):\n '''Didn't have time for this one'''\n pass\n###End example\n\n\n\n\n\n\n######Code pulled from assignment 4\n#Works mostly I think?\ndef topsort(d):\n seen = [-1] * len(d)\n sT = []\n for i in range(len(seen)):\n if seen[i] != True:\n sT = topMaker(d, 0, [0], seen)\n return sT\n\ndef topMaker(d, u, sT, seen):\n seen[u] = True\n for w in d[u]:\n if seen[w] != True:\n sT.append(w)\n topMaker(d, w, sT, seen)\n \n return sT\n\n\n\nif __name__ == \"__main__\": \n import doctest\n doctest.testmod()","repo_name":"NAlexH2/cs350","sub_path":"0-notes/w7d2_DynP_P2.py","file_name":"w7d2_DynP_P2.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"24293545362","text":"import os\nimport re\nimport unicodedata\nimport string\nimport random\nimport base64\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nfrom mongoengine import DoesNotExist\nfrom werkzeug._compat import text_type\nfrom werkzeug._compat import PY2\nfrom pypinyin import pinyin, Style\n\nuid_chars = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0')\n\n_ascii_strip_re = {\n 'user': re.compile(r'[^A-Za-z0-9]'),\n 'app': re.compile(r'[^A-Za-z0-9-]'),\n 'module': re.compile(r'[^A-Za-z0-9]'),\n 'dataset': re.compile(r'[^A-Za-z0-9-]'),\n}\n\nsplit_re = {\n 'user': '[^A-Za-z0-9]',\n 'app': '[^A-Za-z0-9-]',\n 'module': '[^A-Za-z0-9]',\n 'dataset': '[^A-Za-z0-9-]',\n}\nconnector_re = {\n 'user': '',\n 'app': '-',\n 'module': '',\n 'dataset': '-',\n}\nAKEY = '27cfbc4d262403839797636105d0a476' # AES key must be either 16, 24, or 32 bytes long\n\n# iv = Random.new().read(AES.block_size)\niv = 'This is an IV456'\n\n\ndef encode(message):\n obj = AES.new(AKEY.encode(\"utf8\"), AES.MODE_CFB, iv.encode(\"utf8\"))\n message = bytes(message, encoding=\"utf8\")\n return base64.urlsafe_b64encode(obj.encrypt(message)).decode(\"utf-8\")\n\n\ndef decode(cipher):\n obj2 = AES.new(AKEY, AES.MODE_CFB, iv)\n if not isinstance(cipher, str):\n cipher = cipher.encode(\"uft-8\")\n return obj2.decrypt(base64.urlsafe_b64decode(cipher))\n\n\ndef generate_args_str(args):\n array = [\"%s=%s\" % (k, (v if not isinstance(v, str) else \"'%s'\" % v))\n for k, v in args.items()]\n return ', '.join(array)\n\n\n# def remove_dot(string):\n# string.replace('.', '')\n# return string\n\n\ndef slugify(value, allow_unicode=False):\n \"\"\"\n Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.\n Remove characters that aren't alphanumerics, underscores, or hyphens.\n Convert to lowercase. Also strip leading and trailing whitespace.\n \"\"\"\n if value == '':\n value = 'field' + rand_str(3)\n\n value = str(value)\n if allow_unicode:\n value = unicodedata.normalize('NFKC', value)\n else:\n value = unicodedata.normalize('NFKD', value).encode(\n 'ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value).strip()\n return re.sub(r'[-\\s]+', '-', value)\n\n\ndef rand_str(length):\n return ''.join(\n random.choices(string.ascii_uppercase + string.digits, k=length))\n\n\ndef split_without_empty(string):\n return [x.strip() for x in string.split(',') if x]\n\n\n# werkzeug.utils\ndef secure_name(filename, type='user'):\n r\"\"\"Pass it a filename and it will return a secure version of it. This\n filename can then safely be stored on a regular file system and passed\n to :func:`os.path.join`. The filename returned is an ASCII only string\n for maximum portability.\n On windows systems the function also makes sure that the file is not\n named after one of the special device files.\n >> secure_filename(\"My cool movie.mov\")\n 'My_cool_movie.mov'\n >> secure_filename(\"../../../etc/passwd\")\n 'etc_passwd'\n >> secure_filename(u'i contain cool \\xfcml\\xe4uts.txt')\n 'i_contain_cool_umlauts.txt'\n The function might return an empty filename. It's your responsibility\n to ensure that the filename is unique and that you abort or\n generate a random filename if the function returned an empty one.\n .. versionadded:: 0.5\n :param filename: the filename to secure\n :param type: ['user', 'app', 'module', 'dataset']\n \"\"\"\n if isinstance(filename, text_type):\n filename = ''.join([p[0] for p in pinyin(filename, style=Style.TONE2)])\n from unicodedata import normalize\n filename = normalize('NFKD', filename).encode('ascii', 'ignore')\n if not PY2:\n filename = filename.decode('ascii')\n for sep in os.path.sep, os.path.altsep:\n if sep:\n filename = filename.replace(sep, ' ')\n filename = str(\n re.compile(r'[^A-Za-z0-9-_]').sub('', connector_re[type].join(\n re.split('[\\-_ ]+', filename)))).strip(\n connector_re[type]).lower()\n\n return filename\n\n\ndef short_uid(uid_length):\n count = len(uid_chars) - 1\n c = ''\n for i in range(0, uid_length):\n c += uid_chars[random.randint(0, count)]\n return c\n\n\ndef gen_rand_name(name, get_func, times=1, **kwargs):\n from server3.constants import RCUserDoesNotExists\n for i in range(times):\n try:\n get_func(name, **kwargs)\n except (DoesNotExist, RCUserDoesNotExists):\n break\n else:\n name += short_uid(2)\n return name\n\n\ndef gen_rand_str(N=8, low=False):\n if low:\n return ''.join(\n random.choices(string.ascii_lowercase + string.digits, k=N))\n return ''.join(random.choices(string.ascii_uppercase + string.digits, k=N))\n","repo_name":"yssAI/tp_project","sub_path":"server3/utility/str_utility.py","file_name":"str_utility.py","file_ext":"py","file_size_in_byte":4767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"73407559926","text":"from NSF_AM_Pathway import ReadIni\nfrom NSF_AM_Pathway import SyllabiTextProcessing\nfrom NSF_AM_Pathway import SentenceEncoder\nfrom NSF_AM_Pathway import Reports\n\nRESULTS_LOC, RESULT_CUR_DIR, COLLEGE_NAME, SYLLABI_ROOT_LOC, SYLLABI_LOC, REPORT_TYPE, COURSE_TYPE = ReadIni.getINIVariables()\nprint(ReadIni.getINIVariables())\n#Text Processing\nsyllabusT = SyllabiTextProcessing.getTextFromSyllabi(SYLLABI_LOC)\nsyllabiClean = SyllabiTextProcessing.textCleanUp(syllabusT)\nverb, noun = SyllabiTextProcessing.getVerbNounLemma(syllabiClean)\nverbNoun = SyllabiTextProcessing.getVerbNounPair(syllabiClean)\nclassifiedVerbs, notInBlooms = SyllabiTextProcessing.classifyToBloomsLevel(set(verb))\n\n#Get BOK Verbs\nsyllabusT_BOK = SyllabiTextProcessing.getTextFromSyllabi([SYLLABI_ROOT_LOC +'\\\\BOK'])\nsyllabiClean_BOK = SyllabiTextProcessing.textCleanUp(syllabusT_BOK)\nverb_BOK, noun_BOK = SyllabiTextProcessing.getVerbNounLemma(syllabiClean_BOK)\n# Plot reports\nReports.getReport(verb, verb_BOK, REPORT_TYPE)\n\n","repo_name":"PalCat/AM_Pathways","sub_path":"FrameworkDriver.py","file_name":"FrameworkDriver.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"15463477649","text":"from scopus_harvester.response_to_json import response_to_json\nfrom scopus_harvester.file_to_data import file_to_data\n\ndef scopus_open_access(response):\n \"\"\"Returns an indicator\n 0 (is not open access),\n 1 (is open access) or\n None (no open acces info).\n \"\"\"\n output=[]\n f=response_to_json(response)\n data=file_to_data(f)\n if len(data)>0:\n for _ in range(len(data)):\n if data[_][\"openaccess\"] is None:\n output.append(None)\n else:\n output.append(data[_][\"openaccess\"])\n else:\n output.append(data[0][\"openaccess\"])\n return output\n\n\n","repo_name":"rbrtjwrk/scopus_harvester","sub_path":"scopus_harvester/scopus_open_access.py","file_name":"scopus_open_access.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"24714648651","text":"from draw import draw_pixbuf, propagate_expose, draw_vlinear, cairo_state\nfrom skin_config import skin_config\nfrom utils import get_window_shadow_size\nimport gobject\nimport gtk\n\nclass EventBox(gtk.EventBox):\n '''Event box.'''\n\t\n def __init__(self):\n '''Init event box.'''\n gtk.EventBox.__init__(self)\n self.set_visible_window(False)\n \nclass ImageBox(gtk.EventBox):\n '''Box just contain image.'''\n\t\n def __init__(self, image_dpixbuf):\n '''Init image box.'''\n # Init.\n gtk.EventBox.__init__(self)\n self.set_visible_window(False)\n self.image_dpixbuf = image_dpixbuf\n \n # Set size.\n pixbuf = self.image_dpixbuf.get_pixbuf()\n self.set_size_request(pixbuf.get_width(), pixbuf.get_height())\n \n # Connect expose signal.\n self.connect(\"expose-event\", self.expose_image_box)\n \n def expose_image_box(self, widget, event):\n '''Expose image box.'''\n # Init.\n cr = widget.window.cairo_create()\n rect = widget.allocation\n pixbuf = self.image_dpixbuf.get_pixbuf()\n \n # Draw.\n draw_pixbuf(cr, pixbuf, rect.x, rect.y)\n \n # Propagate expose.\n propagate_expose(widget, event)\n \n return True\n \ngobject.type_register(ImageBox)\n\nclass BackgroundBox(gtk.VBox):\n '''Box to expande background.'''\n\t\n def __init__(self):\n '''Init background box.'''\n # Init.\n gtk.VBox.__init__(self)\n self.set_can_focus(True)\n \n self.connect(\"expose-event\", self.expose_background_box)\n \n def draw_mask(self, cr, x, y, w, h):\n '''Draw mask.'''\n draw_vlinear(cr, x, y, w, h,\n [(0, (\"#FF0000\", 1)),\n (1, (\"#FF0000\", 1))]\n )\n \n def expose_background_box(self, widget, event):\n '''Expose background box.'''\n cr = widget.window.cairo_create()\n rect = widget.allocation\n toplevel = widget.get_toplevel()\n coordinate = widget.translate_coordinates(toplevel, rect.x, rect.y)\n (offset_x, offset_y) = coordinate\n \n with cairo_state(cr):\n cr.rectangle(rect.x, rect.y, rect.width, rect.height)\n cr.clip()\n \n (shadow_x, shadow_y) = get_window_shadow_size(toplevel)\n skin_config.render_background(cr, widget, shadow_x, shadow_y)\n \n self.draw_mask(cr, rect.x, rect.y, rect.width, rect.height) \n\n return False\n \ngobject.type_register(BackgroundBox)\n\n","repo_name":"netphi/deepin-ui","sub_path":"dtk/ui/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"}
+{"seq_id":"24111978363","text":"n=int(input())\n\ndef pm(n):\n f=True\n if n==1:\n return False\n for i in range(2,n//2+1):\n if(n%i==0):\n f=False\n break \n return f\n \ndef spl(n):\n f=False\n for i in range(1,n//2+1):\n if(pm(i) and pm(n-i)):\n f=True\n break\n return f\nprint(spl(n))\n\n \n \n","repo_name":"azarcoder/Python","sub_path":"T4TEQ/functions/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"43754686613","text":"# users/models.py\nfrom django.contrib.auth.models import AbstractUser\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db import models\n\n\nclass CustomUser(AbstractUser):\n email = models.EmailField(_(\"email\"), max_length=255, unique=True)\n first_name = models.CharField(_(\"first_name\"), max_length=30)\n last_name = models.CharField(_(\"last_name\"), max_length=30)\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['first_name', 'last_name', 'username']\n\n def __str__(self):\n return self.email\n\n def get_full_name(self):\n return self.first_name + ' ' + self.last_name\n\n\nSTATES = (\n ('AL', 'Alabama'),\n ('AK', 'Alaska'),\n ('AZ', 'Arizona'),\n ('AR', 'Arkansas'),\n ('CA', 'California'),\n ('CO', 'Colorado'),\n ('CT', 'Connecticut'),\n ('DE', 'Delaware'),\n ('FL', 'Florida'),\n ('GA', 'Georgia'),\n ('HI', 'Hawaii'),\n ('ID', 'Idaho'),\n ('IL', 'Illinois'),\n ('IN', 'Indiana'),\n ('IA', 'Iowa'),\n ('KS', 'Kansas'),\n ('KY', 'Kentucky'),\n ('LA', 'Louisiana'),\n ('ME', 'Maine'),\n ('MD', 'Maryland'),\n ('MA', 'Massachusetts'),\n ('MI', 'Michigan'),\n ('MN', 'Minnesota'),\n ('MS', 'Mississippi'),\n ('MO', 'Missouri'),\n ('MT', 'Montana'),\n ('NE', 'Nebraska'),\n ('NV', 'Nevada'),\n ('NH', 'New Hampshire'),\n ('NJ', 'New Jersey'),\n ('NM', 'New Mexico'),\n ('NY', 'New York'),\n ('NC', 'North Carolina'),\n ('ND', 'North Dakota'),\n ('OH', 'Ohio'),\n ('OK', 'Oklahoma'),\n ('OR', 'Oregon'),\n ('PA', 'Pennsylvania'),\n ('RI', 'Rhode Island'),\n ('SC', 'South Carolina'),\n ('SD', 'South Dakota'),\n ('TN', 'Tennessee'),\n ('TX', 'Texas'),\n ('UT', 'Utah'),\n ('VT', 'Vermont'),\n ('VA', 'Virginia'),\n ('WA', 'Washington'),\n ('WV', 'West Virginia'),\n ('WI', 'Wisconsin'),\n ('WY', 'Wyoming'),\n)\n\n\nclass Address(models.Model):\n street = models.CharField(max_length=45)\n city = models.CharField(max_length=45)\n state = models.CharField(max_length=2, choices=STATES)\n zip = models.IntegerField()\n\n def __str__(self):\n return \"%s, %s, %s, %s\" % (self.street, self.city, self.state,\n self.zipcode)\n","repo_name":"jjacobson/Sahara","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"14176923284","text":"#!/usr/bin/env python\n\n# Cutter Coryell\n# Ay 190\n# WS3 Problem 2\n\nimport numpy as np\nimport scipy.special as sp\nimport matplotlib.pyplot as pl\nimport plot_defaults\n\n# Part A\n\n# parameters\nmax_exponent = 8\n\nnumber_density_coeff = 1.05495e35 # cm^(-3)\n\nns = [2**p for p in range(1,max_exponent)]\n\ndef f(x):\n return x * x * np.exp(x) / (np.exp(x) + 1)\n\n[xs, ws] = sp.l_roots(ns[-1], 0)\n\nQs = np.array([np.sum(ws[:n] * f(xs)[:n]) for n in ns])\nnumber_densities = number_density_coeff * Qs\n\nprint(\"\\nPart A\\n\")\nprint(\"Number of nodes:\\n{}\".format(ns))\nprint(\"Number density [cm^(-3)]:\\n{}\".format(number_densities))\nprint(\"Change in number density [cm^(-3)]:\\n{}\".format(number_densities[1:]\n - number_densities[:-1]))\n\n# Answer: 1.902*10^35 cm^(-3)\n\n# Part B\n\n# parameters\nn = 100 # number of nodes in Legendre Quadrature\ndE = 5 # energy bin size (MeV)\nmax_E = 155 # energy cutoff (MeV)\n\nEs = np.arange(0, 155, dE) # energies\nxs = Es / 20.0 # x parameter, energy / temperature (20 MeV)\n\ndef x(y, a, b):\n return 0.5 * (y + 1) * (b - a) + a\n\ndef f(y, a, b):\n x_ = x(y, a, b)\n return 0.5 * (b - a) * x_ * x_ / (np.exp(x_) + 1)\n\n[ys, ws] = sp.p_roots(n, 0)\n\nQs = np.array([np.sum(ws * f(ys, xs[i], xs[i+1])) for i in range(len(xs) - 1)])\n\nprint(\"\\nPart B\\n\")\nprint(\"Total number density: {}\".format(number_density_coeff * np.sum(Qs)))\n\nmyfig = pl.figure(figsize=(10,8))\nmyfig.subplots_adjust(left=0.13)\nmyfig.subplots_adjust(bottom=0.14)\nmyfig.subplots_adjust(top=0.90)\nmyfig.subplots_adjust(right=0.95)\npl.bar(Es[:-1], number_density_coeff * Qs / 10**34, color='c', width=5)\npl.xlim(0, max_E - dE)\npl.xlabel(\"Energy bin [MeV]\")\npl.ylabel(r\"Number density [$\\times 10^{34}$ cm$^{-3}$]\")\npl.title(\"Number Density versus Energy\", fontsize=30)\npl.savefig(\"problem2b.pdf\")\npl.show()\n","repo_name":"savione/cutter-ay190","sub_path":"ws3/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"42705543500","text":"N = 4 # 5\r\nstages = [4,4,4,4,4] # [2,1,2,6,2,4,3,3]\r\n\r\ndef solution(N, stages):\r\n answer = []\r\n temp = []\r\n survive = len(stages)\r\n for i in range(1, N+1):\r\n fail = 0\r\n for j in range(len(stages)):\r\n if i == stages[j]:\r\n fail += 1 \r\n temp.append((i, fail/survive))\r\n survive -= fail\r\n temp = sorted(temp, key=lambda x:-x[1])\r\n for i in temp:\r\n answer.append(i[0])\r\n return answer\r\n\r\nprint(solution(N, stages))","repo_name":"SEJUNHONG/CodingTest","sub_path":"Sorting/실패율.py","file_name":"실패율.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"69824383286","text":"import argparse\nimport glob\nimport io\nimport json\nimport multiprocessing as mp\nimport os\nfrom os.path import basename, exists\n\nfrom cytoolz import curry\nimport numpy as np\nfrom tqdm import tqdm\nimport lmdb\n\nimport msgpack\nimport msgpack_numpy\nmsgpack_numpy.patch()\n\n\ndef _compute_nbb(img_dump, conf_th, max_bb, min_bb, num_bb):\n num_bb = max(min_bb, (img_dump['conf'] > conf_th).sum())\n num_bb = min(max_bb, num_bb)\n return int(num_bb)\n\n\n@curry\ndef load_npz(conf_th, max_bb, min_bb, num_bb, fname, keep_all=False):\n try:\n img_dump = np.load(fname, allow_pickle=True)\n if keep_all:\n nbb = None\n else:\n nbb = _compute_nbb(img_dump, conf_th, max_bb, min_bb, num_bb)\n dump = {}\n for key, arr in img_dump.items():\n if arr.dtype == np.float32:\n arr = arr.astype(np.float16)\n if arr.ndim == 2:\n dump[key] = arr[:nbb, :]\n elif arr.ndim == 1:\n dump[key] = arr[:nbb]\n else:\n raise ValueError('wrong ndim')\n except Exception as e:\n # corrupted file\n print(f'corrupted file {fname}', e)\n dump = {}\n nbb = 0\n\n name = basename(fname)\n return name, dump, nbb\n\n\ndef dumps_npz(dump, compress=False):\n with io.BytesIO() as writer:\n if compress:\n np.savez_compressed(writer, **dump, allow_pickle=True)\n else:\n np.savez(writer, **dump, allow_pickle=True)\n return writer.getvalue()\n\n\ndef dumps_msgpack(dump):\n return msgpack.dumps(dump, use_bin_type=True)\n\n\ndef main(opts):\n if opts.img_dir[-1] == '/':\n opts.img_dir = opts.img_dir[:-1]\n split = basename(opts.img_dir)\n if opts.keep_all:\n db_name = 'all'\n else:\n if opts.conf_th == -1:\n db_name = f'feat_numbb{opts.num_bb}'\n else:\n db_name = (f'feat_th{opts.conf_th}_max{opts.max_bb}'\n f'_min{opts.min_bb}')\n if opts.compress:\n db_name += '_compressed'\n if not exists(f'{opts.output}/{split}'):\n os.makedirs(f'{opts.output}/{split}')\n env = lmdb.open(f'{opts.output}/{split}/{db_name}', map_size=1024**4)\n txn = env.begin(write=True)\n files = glob.glob(f'{opts.img_dir}/*.npz')\n load = load_npz(opts.conf_th, opts.max_bb, opts.min_bb, opts.num_bb,\n keep_all=opts.keep_all)\n name2nbb = {}\n with mp.Pool(opts.nproc) as pool, tqdm(total=len(files)) as pbar:\n for i, (fname, features, nbb) in enumerate(\n pool.imap_unordered(load, files, chunksize=128)):\n if not features:\n continue # corrupted feature\n if opts.compress:\n dump = dumps_npz(features, compress=True)\n else:\n dump = dumps_msgpack(features)\n txn.put(key=fname.encode('utf-8'), value=dump)\n if i % 1000 == 0:\n txn.commit()\n txn = env.begin(write=True)\n name2nbb[fname] = nbb\n pbar.update(1)\n txn.put(key=b'__keys__',\n value=json.dumps(list(name2nbb.keys())).encode('utf-8'))\n txn.commit()\n env.close()\n if opts.conf_th != -1 and not opts.keep_all:\n with open(f'{opts.output}/{split}/'\n f'nbb_th{opts.conf_th}_'\n f'max{opts.max_bb}_min{opts.min_bb}.json', 'w') as f:\n json.dump(name2nbb, f)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--img_dir\", default=None, type=str,\n help=\"The input images.\")\n parser.add_argument(\"--output\", default=None, type=str,\n help=\"output lmdb\")\n parser.add_argument('--nproc', type=int, default=8,\n help='number of cores used')\n parser.add_argument('--compress', action='store_true',\n help='compress the tensors')\n parser.add_argument('--keep_all', action='store_true',\n help='keep all features, overrides all following args')\n parser.add_argument('--conf_th', type=float, default=0.2,\n help='threshold for dynamic bounding boxes '\n '(-1 for fixed)')\n parser.add_argument('--max_bb', type=int, default=100,\n help='max number of bounding boxes')\n parser.add_argument('--min_bb', type=int, default=10,\n help='min number of bounding boxes')\n parser.add_argument('--num_bb', type=int, default=100,\n help='number of bounding boxes (fixed)')\n args = parser.parse_args()\n main(args)\n","repo_name":"ChenRocks/UNITER","sub_path":"scripts/convert_imgdir.py","file_name":"convert_imgdir.py","file_ext":"py","file_size_in_byte":4673,"program_lang":"python","lang":"en","doc_type":"code","stars":752,"dataset":"github-code","pt":"76"}
+{"seq_id":"23872175618","text":"\"\"\"Определяет схемы URL для pixlands.\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\napp_name = 'pixlands'\n\nurlpatterns = [\n # Домашняя страница\n url(r'^$', views.index, name='index'),\n # Вывод публичных тем\n url(r'^topics/$', views.topics, name='topics'),\n # Вывод личных тем\n url(r'^profile/$', views.profile, name='profile'),\n # Страница с подробной информацией по отдельной теме\n url(r'^topics/(?P\\d+)/$', views.topic, name='topic'),\n # Страница для добавления новой темы\n url(r'^new_topic/$', views.new_topic, name='new_topic'),\n # Страница для добавления нового изображения\n url(r'^add_image/(?P\\d+)/$', views.add_image, name='add_image'),\n # Страница для добавления изображения пользователя\n url(r'^add_profile_pic/$', views.add_profile_pic, name='add_profile_pic'),\n # Страница для редактирования темы\n url(r'^edit_topic/(?P\\d+)/$', views.edit_topic, name='edit_topic'),\n # Страница редактирования текста изображения и удаления\n url(r'^edit_image/(?P\\d+)/$', views.edit_image, name='edit_image'),\n # Страница для удаления темы\n url(r'^delete_topic/(?P\\d+)/$', views.delete_topic, name='delete_topic'),\n # Страница для удаления фото\n url(r'^delete_image/(?P\\d+)/$', views.delete_image, name='delete_image'),\n # Страница изображения\n url(r'^image/(?P\\d+)/$', views.image, name='image'),\n # Страница добавления комментария\n url(r'^add_comment/(?P\\d+)/$', views.add_comment, name='add_comment'),\n # Страница лайка на странице изображеия\n url(r'^like_on_image/(?P\\d+)/$', views.like_on_image, name='like_on_image'),\n # Страница лайка на странице топика\n url(r'^like_on_topic/(?P\\d+)/$', views.like_on_topic, name='like_on_topic'),\n # Страница лайка на странице поиска\n url(r'^like_on_search/(?P\\d+)/$', views.like_on_search, name='like_on_search'),\n # Страница для удаления комментария\n url(r'^delete_comment/(?P\\d+)/$', views.delete_comment, name='delete_comment'),\n # Страница поиска\n url(r'^search/$', views.search, name='search'),\n]","repo_name":"renenoir/pixland","sub_path":"pixlands/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"8584087933","text":"import os\n\n\nclass RC4():\n def __init__(self):\n self.key = \"YXUCYRDV\"\n self.keyBytes = []\n\n for byte in self.key:\n self.keyBytes.append(ord(byte))\n\n def crypt(self, fileBytes, keyBytes):\n\n cipherList = []\n\n keyLen = len(keyBytes)\n fileLen = len(fileBytes)\n S = [i for i in range(256)]\n\n j = 0\n for i in range(256):\n j = (j + S[i] + keyBytes[i % keyLen]) % 256\n S[i], S[j] = S[j], S[i]\n\n i = 0\n j = 0\n for m in range(fileLen):\n i = (i + 1) % 256\n j = (j + S[i]) % 256\n S[i], S[j] = S[j], S[i]\n k = S[(S[i] + S[j]) % 256]\n cipherList.append(k ^ fileBytes[m])\n\n return cipherList\n\n def encrypt(self, file):\n file_name = os.path.basename(file)\n enc_file_name = file_name + \".enc\"\n enc_file_path = os.path.join(os.getcwd(), enc_file_name)\n\n with open(file, \"rb\") as in_file:\n stream = in_file.read()\n plainBytes = list(stream)\n\n cipherList = self.crypt(plainBytes, self.keyBytes)\n\n with open(enc_file_path, \"wb\") as out_file:\n out_file.write(bytes(cipherList))\n\n return enc_file_path\n\n def decrypt(self, file):\n with open(file, 'rb') as in_file:\n stream = in_file.read()\n cipherBytes = list(stream)\n\n file_name = os.path.basename(file)\n dec_file_name = \".\".join(file_name.split('.')[:-1])\n dec_file_path = os.path.join(os.getcwd(), dec_file_name)\n\n plainList = self.crypt(cipherBytes, self.keyBytes)\n\n with open(dec_file_path, 'wb') as out_file:\n out_file.write(bytes(plainList))\n\n return dec_file_path\n","repo_name":"Zoniacer/kij-symmetric-cipher","sub_path":"Client/encryption/RC4.py","file_name":"RC4.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"42239678543","text":"import random\nfrom typing import Dict, Tuple\n\nimport gymnasium as gym\nimport numpy as np\nimport pytest\nfrom gymnasium import spaces\nfrom stable_baselines3.common.callbacks import EventCallback, StopTrainingOnNoModelImprovement\nfrom stable_baselines3.common.env_util import make_vec_env\nfrom stable_baselines3.common.envs import FakeImageEnv, IdentityEnv, IdentityEnvBox\nfrom stable_baselines3.common.monitor import Monitor\nfrom stable_baselines3.common.policies import ActorCriticPolicy\n\nfrom sb3_contrib import MaskablePPO\nfrom sb3_contrib.common.envs import InvalidActionEnvDiscrete, InvalidActionEnvMultiBinary, InvalidActionEnvMultiDiscrete\nfrom sb3_contrib.common.maskable.callbacks import MaskableEvalCallback\nfrom sb3_contrib.common.maskable.evaluation import evaluate_policy\nfrom sb3_contrib.common.maskable.utils import is_masking_supported\nfrom sb3_contrib.common.wrappers import ActionMasker\n\n\ndef make_env():\n return InvalidActionEnvDiscrete(dim=20, n_invalid_actions=10)\n\n\nclass ToDictWrapper(gym.Wrapper):\n \"\"\"\n Simple wrapper to test MultInputPolicy on Dict obs.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n self.observation_space = spaces.Dict({\"obs\": self.env.observation_space})\n\n def reset(self, **kwargs) -> Tuple[Dict[str, np.ndarray], Dict]:\n return {\"obs\": self.env.reset(seed=kwargs.get(\"seed\", 0))[0]}, {} # type: ignore[dict-item]\n\n def step(self, action):\n obs, reward, terminated, truncated, infos = self.env.step(action)\n return {\"obs\": obs}, reward, terminated, truncated, infos\n\n\ndef test_identity():\n \"\"\"\n Performance test.\n A randomly initialized model cannot solve that task (score ~=6),\n nor a model without invalid action masking (score ~=30 after training)\n which such a low training budget.\n \"\"\"\n env = InvalidActionEnvDiscrete(dim=70, n_invalid_actions=55)\n model = MaskablePPO(\n \"MlpPolicy\",\n env,\n gamma=0.4,\n seed=32,\n verbose=0,\n )\n model.learn(3000)\n evaluate_policy(model, env, n_eval_episodes=20, reward_threshold=90, warn=False)\n\n\ndef test_bootstraping():\n # Max ep length = 100 by default\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=10)\n env = gym.wrappers.TimeLimit(env, 30)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, seed=8)\n model.learn(128)\n\n\ndef test_supports_discrete_action_space():\n \"\"\"\n No errors using algorithm with an env that has a discrete action space\n \"\"\"\n\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=10)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, seed=8)\n model.learn(100)\n evaluate_policy(model, env, warn=False)\n\n # Mask all actions except the good one, a random model should succeed\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=19)\n model = MaskablePPO(\"MlpPolicy\", env, seed=8)\n evaluate_policy(model, env, reward_threshold=99, warn=False)\n\n\ndef test_supports_multi_discrete_action_space():\n \"\"\"\n No errors using algorithm with an env that has a multidiscrete action space\n \"\"\"\n\n env = InvalidActionEnvMultiDiscrete(dims=[2, 3], n_invalid_actions=1)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, seed=8)\n model.learn(100)\n evaluate_policy(model, env, warn=False)\n\n # Mask all actions except the good ones, a random model should succeed\n env = InvalidActionEnvMultiDiscrete(dims=[2, 3], n_invalid_actions=3)\n model = MaskablePPO(\"MlpPolicy\", env, seed=8)\n evaluate_policy(model, env, reward_threshold=99, warn=False)\n\n\ndef test_supports_multi_binary_action_space():\n \"\"\"\n No errors using algorithm with an env that has a multidiscrete action space\n \"\"\"\n\n env = InvalidActionEnvMultiBinary(dims=3, n_invalid_actions=1)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, seed=8)\n model.learn(100)\n evaluate_policy(model, env, warn=False)\n\n # Mask all actions except the good ones, a random model should succeed\n env = InvalidActionEnvMultiBinary(dims=3, n_invalid_actions=3)\n model = MaskablePPO(\"MlpPolicy\", env, seed=8)\n evaluate_policy(model, env, reward_threshold=99, warn=False)\n\n\ndef test_disabling_masking():\n \"\"\"\n Behave like normal PPO if masking is disabled, which allows for envs that don't provide masks\n \"\"\"\n\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=19)\n model = MaskablePPO(\"MlpPolicy\", env, seed=8)\n evaluate_policy(model, env, reward_threshold=99, warn=False)\n\n # With masking disabled, perfect performance disappears\n with pytest.raises(AssertionError):\n evaluate_policy(model, env, reward_threshold=99, warn=False, use_masking=False)\n\n # Without masking disabled, learning/evaluation will fail if the env doesn't provide masks\n env = IdentityEnv(dim=2)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, seed=8)\n with pytest.raises(ValueError):\n model.learn(100)\n with pytest.raises(ValueError):\n evaluate_policy(model, env, warn=False)\n\n model.learn(100, use_masking=False)\n evaluate_policy(model, env, warn=False, use_masking=False)\n\n\ndef test_masked_evaluation():\n \"\"\"\n Masking can be enabled or disabled for evaluation, but masking should perform better.\n \"\"\"\n\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=19)\n model = MaskablePPO(\"MlpPolicy\", env, seed=8)\n masked_avg_rew, _ = evaluate_policy(model, env, warn=False)\n unmasked_avg_rew, _ = evaluate_policy(model, env, warn=False, use_masking=False)\n assert masked_avg_rew > unmasked_avg_rew\n\n\ndef test_supports_multi_envs():\n \"\"\"\n Learning and evaluation works with VecEnvs\n \"\"\"\n\n env = make_vec_env(make_env, n_envs=2)\n assert is_masking_supported(env)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=256, gamma=0.4, seed=32, verbose=1)\n model.learn(100)\n evaluate_policy(model, env, warn=False)\n\n env = make_vec_env(IdentityEnv, n_envs=2, env_kwargs={\"dim\": 2})\n assert not is_masking_supported(env)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=256, gamma=0.4, seed=32, verbose=1)\n with pytest.raises(ValueError):\n model.learn(100)\n with pytest.raises(ValueError):\n evaluate_policy(model, env, warn=False)\n model.learn(100, use_masking=False)\n evaluate_policy(model, env, warn=False, use_masking=False)\n\n\ndef test_callback(tmp_path):\n \"\"\"\n No errors using MaskableEvalCallback during learning\n \"\"\"\n\n env = make_env()\n eval_env = make_env()\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, gamma=0.4, seed=32, verbose=1)\n model.learn(100, callback=MaskableEvalCallback(eval_env, eval_freq=100, warn=False, log_path=tmp_path))\n\n model.learn(100, callback=MaskableEvalCallback(Monitor(eval_env), eval_freq=100, warn=False), progress_bar=True)\n\n\ndef test_child_callback():\n \"\"\"\n Stop callback and callback on new best rewards\n \"\"\"\n\n env = make_env()\n eval_env = make_env()\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, n_epochs=1)\n stop_callback = StopTrainingOnNoModelImprovement(1, 2)\n new_best_mean_callback = EventCallback()\n eval_callback = MaskableEvalCallback(\n Monitor(eval_env),\n eval_freq=64,\n callback_after_eval=stop_callback,\n callback_on_new_best=new_best_mean_callback,\n )\n model.learn(128, callback=eval_callback)\n assert new_best_mean_callback.n_calls > 0\n assert stop_callback.n_calls > 0\n assert stop_callback.n_calls >= new_best_mean_callback.n_calls\n\n\ndef test_maskable_policy_required():\n \"\"\"\n MaskablePPO requires a policy that subclasses MaskableActorCriticPolicy\n \"\"\"\n\n env = make_env()\n with pytest.raises(ValueError):\n MaskablePPO(ActorCriticPolicy, env)\n\n\ndef test_discrete_action_space_required():\n \"\"\"\n MaskablePPO requires an env with a discrete (ie non-continuous) action space\n \"\"\"\n\n env = IdentityEnvBox()\n with pytest.raises(AssertionError):\n MaskablePPO(\"MlpPolicy\", env)\n\n\n@pytest.mark.parametrize(\"share_features_extractor\", [True, False])\ndef test_cnn(share_features_extractor):\n def action_mask_fn(env):\n random_invalid_action = random.randrange(env.action_space.n)\n return [i != random_invalid_action for i in range(env.action_space.n)]\n\n env = FakeImageEnv()\n env = ActionMasker(env, action_mask_fn)\n\n model = MaskablePPO(\n \"CnnPolicy\",\n env,\n n_steps=64,\n seed=32,\n verbose=1,\n policy_kwargs=dict(\n features_extractor_kwargs=dict(features_dim=32),\n share_features_extractor=share_features_extractor,\n ),\n )\n model.learn(100)\n evaluate_policy(model, env, warn=False)\n\n\ndef test_dict_obs():\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=10)\n env = ToDictWrapper(env)\n model = MaskablePPO(\"MultiInputPolicy\", env, n_steps=64, seed=8)\n model.learn(64)\n evaluate_policy(model, env, warn=False)\n\n # Mask all actions except the good one, a random model should succeed\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=19)\n env = ToDictWrapper(env)\n model = MaskablePPO(\"MultiInputPolicy\", env, seed=8)\n evaluate_policy(model, env, reward_threshold=99, warn=False)\n # MultiDiscrete\n env = InvalidActionEnvMultiDiscrete(dims=[2, 3], n_invalid_actions=1)\n env = ToDictWrapper(env)\n model = MaskablePPO(\"MultiInputPolicy\", env, n_steps=32, seed=8)\n model.learn(32)\n # MultiBinary\n env = InvalidActionEnvMultiBinary(dims=3, n_invalid_actions=1)\n env = ToDictWrapper(env)\n model = MaskablePPO(\"MultiInputPolicy\", env, n_steps=32, seed=8)\n model.learn(32)\n","repo_name":"Stable-Baselines-Team/stable-baselines3-contrib","sub_path":"tests/test_invalid_actions.py","file_name":"test_invalid_actions.py","file_ext":"py","file_size_in_byte":9645,"program_lang":"python","lang":"en","doc_type":"code","stars":353,"dataset":"github-code","pt":"76"}
+{"seq_id":"37765885746","text":"from nltk import CFG, Tree\nimport copy\nfrom pprint import pprint\n\n# question1.py (edited by Pavlos Musenidis)\n# Jonas Kuhn, University of Stuttgart, 2020\n# course \"Parsing\"\n\n# Boolean variable for switching tracing info on and off\ntrace = True # set this to False if you don't want to see intermediate steps\n\n# Boolean variable for running parser interactively on user input or on pre-specified input\ninteractive = False # True\n\n# internal format of cfg production rules with reversed right-hand sides (!)\n\ngrammar = \"\"\"\nS -> NP VP \nNP -> DET N | DET N PP | 'I'\nVP -> V | V NP | V NP PP\nPP -> P NP \nDET -> 'the' | 'an' | 'my' | 'most'\nP -> 'in'\nN -> 'elephant' | 'elephants' | 'mouse' | 'mice' | 'pajamas'\nV -> 'sneezed' | 'giggled' | 'trumpeted' | 'saw' | 'shot'\n\"\"\"\n\n\ndef load_grammar(grammar):\n G = {}\n cfg = CFG.fromstring(grammar)\n for p in cfg.productions():\n p = p.__str__().split()\n for i in range(len(p)):\n p[i] = p[i].strip(\"'\")\n G.setdefault(p[0], [])\n right = p[2:]\n right.reverse()\n G[p[0]].append(right)\n return G\n\n\n# main procedure:\ndef parse(G, tokens):\n # G: dict with list of reversed rhs's for each non-terminal\n # tokens: list of input tokens\n\n if trace: print(\"parsing \", tokens, \"...\")\n\n # initialize data structures:\n stack = ['S']\n inbuffer = tokens\n seq = []\n agenda = []\n solutions = []\n\n # main loop:\n while True:\n if trace: print(' {:<40}{:>40}'.format(str(stack), str(inbuffer)))\n\n # expand\n if stack != [] and inbuffer != [] and stack[-1] in G:\n replace = stack[-1]\n if [inbuffer[0]] in G[replace]:\n if trace: print(\" >expand: \", stack[-1], \" -R-> \", G[stack[-1]][0])\n right = G[replace][G[replace].index([inbuffer[0]])]\n seq.append((stack[-1], len(right)))\n del stack[-1]\n stack += right\n else:\n for production in G[replace]:\n new_seq = copy.deepcopy(seq)\n new_stack = copy.deepcopy(stack)\n new_inbuffer = copy.deepcopy(inbuffer)\n new_seq.append((new_stack[-1], len(production)))\n del new_stack[-1]\n new_stack += production\n last = production\n agenda.append((new_stack, new_inbuffer, new_seq))\n stack = agenda[-1][0]\n inbuffer = agenda[-1][1]\n seq = agenda[-1][2]\n del agenda[-1]\n if trace: print(\" >expand: \", replace, \" -R-> \", last)\n\n\n # match\n elif stack != [] and inbuffer != [] and stack[-1] == inbuffer[0]:\n if trace: print(\" >match: \", stack[-1], \" -R-> \", inbuffer[0])\n seq.append((stack[-1], 0))\n del stack[-1]\n del inbuffer[0]\n\n\n # termination\n elif stack == inbuffer == []:\n if trace: print(' {:<40}{:>40}'.format(str(stack), str(inbuffer)))\n solutions.append(seq)\n print(\"found one solution!\\n\")\n if agenda != []:\n print(\"searching for more solutions...\\n\")\n stack = agenda[-1][0]\n inbuffer = agenda[-1][1]\n seq = agenda[-1][2]\n del agenda[-1]\n else:\n if solutions != []:\n print(\"failure!\\n\\n\\n\\n\\n\\n\\n\")\n else:\n print(\"success!\\n\\n\\n\\n\\n\\n\\n\")\n return solutions\n else:\n if trace: print(\" >dead end!\")\n if agenda != []:\n print(\"searching for more solutions...\\n\")\n stack = agenda[-1][0]\n inbuffer = agenda[-1][1]\n seq = agenda[-1][2]\n del agenda[-1]\n else:\n if solutions == []:\n print(\"failure!\\n\\n\\n\\n\\n\\n\\n\")\n else:\n print(\"success!\\n\\n\\n\\n\\n\\n\\n\")\n return solutions\n\n\ndef build_tree(seq):\n if seq == []:\n return []\n else:\n sub = seq[0]\n del seq[0]\n subtrees = []\n for i in range(sub[1]):\n subtree = build_tree(seq)\n subtrees.append(subtree[0])\n return(Tree(sub[0], subtrees), seq)\n\n\ndef demo():\n G = load_grammar(grammar)\n if trace: print(\"Internal grammar representation:\\n\", grammar)\n\n if interactive:\n while True:\n # interactive way of running the parser in user input:\n\n sentence = input('Type sentence or type \"q\" to exit: ') # user can input the string to be parsed\n if sentence != \"q\":\n tokens = sentence.split() # split up string in tokens (using the default separator, i.e. space)sequence = parse(G, tokens)\n solutions = parse(G, tokens)\n for sequence in solutions:\n parsetree = build_tree(sequence)\n parsetree[0].draw()\n else:\n exit()\n else:\n tokens = \"the elephant saw the mouse\".split()\n solutions = parse(G, tokens)\n for sequence in solutions:\n parsetree = build_tree(sequence)\n parsetree[0].draw()\n tokens = \"I shot the elephant shot my pajamas\".split()\n solutions = parse(G, tokens)\n for sequence in solutions:\n parsetree = build_tree(sequence)\n parsetree[0].draw()\n tokens = \"I shot the elephant in my pajamas\".split()\n solutions = parse(G, tokens)\n for sequence in solutions:\n parsetree = build_tree(sequence)\n parsetree[0].draw()\n\n\ndemo()\n","repo_name":"Pavlos-96/Parsing_Project","sub_path":"5_2.py","file_name":"5_2.py","file_ext":"py","file_size_in_byte":5766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"72151799284","text":"ERROR_MESSAGES = {\n\t'sz_err':'Invalid file format: size mismatch number of intances.',\n\t'sz_instance':'Invalid file format: number of parameters mismatched.',\n\t'max_range':'Invalid file format: MAX_RANGE exceeded.',\n\t'sz_not_int':'Invalid file format: first line must be an integer.',\n\t'param_not_int':'Invalid file format: integer not recognized.',\n\t'file_not_allowed':'File format not allowed, must be .txt.',\n}\n\n#Config variables\nFILE_REL_FOLDER = 'files/'\nINPUT_FILE = 'input.txt'\nOUTPUT_FILE = 'output.txt'\nALLOWED_EXTENSIONS = set(['txt'])\n\n#Divisibility configs\nMAX_RANGE = 10000\nINSTANCE_SZ = 3\n\n#Test configurations\nTEST_INPUT_DIR = 'input_files/'\nTEST_SUCCESS_EXP = TEST_INPUT_DIR + 'input1_success_0.txt'\nTEST_FAIL_EXP = TEST_INPUT_DIR + 'input1_fail_3.txt'\n\n","repo_name":"LeviVasconcelos/anchorloans","sub_path":"anchorapp/configurations.py","file_name":"configurations.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"42771241368","text":"import random\nimport requests\n\nimport time\n\nstarttime = time.time()\n\nwhile True:\n print(\"tick\")\n\n soundVal = random.randint(0, 2000)\n motionVal = random.randint(0, 60)\n\n response = requests.post(\"http://127.0.0.1:5000/api/sound\",\n headers={\"Content-Type\": \"application/json\"},\n json={'value': soundVal,\n 'sensorId': 'SoundTest'})\n\n print(response.text)\n\n response2 = requests.post(\"http://127.0.0.1:5000/api/motion\",\n headers={\"Content-Type\": \"application/json\"},\n json={'value': motionVal,\n 'sensorId': 'MotionTest'})\n\n print(response2.text)\n\n time.sleep(60.0 - ((time.time() - starttime) % 60.0))\n","repo_name":"DuncanBH/IoT-Security-Backend","sub_path":"DummyDataCreator.py","file_name":"DummyDataCreator.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"38484290587","text":"import random\nprint(\" ============================= \")\nprint(\" THE GAME \")\nprint(\" ============================= \")\n\nplayer = input(\"Ingrese el nombre del jugador: \")\nprint(f\"Hola {player}, he pensado un número entre el 1 y 100, tienes 8 intentos para adivinarlo\")\n\nintentos = 0\nnumero = 0\nnum_random = random.randint(1,100)\n\nwhile intentos < 8:\n numero = int(input(\"Ingrese un número: \"))\n intentos += 1\n\n if numero not in range(1,101):\n print(\"El número ingresado no esta en el rango mencionado, 1 al 100\")\n elif numero < num_random:\n print(\"Incorrecto, elegiste un número menor al número secreto\")\n elif numero > num_random:\n print(\"Incorrecto, elegiste un número mayor al número secreto\")\n elif numero == num_random:\n print(f\"Ganaste Felicitaciones ! Has adivinado en {intentos} intentos\")\n break\n \nif numero != num_random:\n print(f\"Lo siento, se han agotado los intentos. El número secreto era {num_random}\")\n\n\n","repo_name":"nahuRo/Python","sub_path":"day 4/proyecto.py","file_name":"proyecto.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"73368417205","text":"# nome = input('Qual é o seu nome? ')\n# print('Olá, {:.^20}. É um prazer conhecer você!'.format(nome))\n\nn1 = int(input('Digite um número: '))\nn2 = int(input('Digite outro número: '))\ns = n1+n2\nm = n1*n2\nd = n1/n2\ni = n1//n2\ne = n1**n2\n\nprint('A soma é {}, a multiplicação é {}, \\n a divisão é {:.3f} \\n'.format(s,m,d,i,e, ),end='=>>>')\nprint('A divisão inteira é {}, exponencial é {}'.format(i, e))\n","repo_name":"DaltonBorges/Curso-de-Python","sub_path":"pythonTeste/aula07a.py","file_name":"aula07a.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"29003772881","text":"# -*- coding: utf-8 -*-\nimport re\n\nimport scrapy\n\nfrom freelance.items import TaskItem, PRICE_PERIOD_HOURLY, PRICE_PERIOD_PROJECT\n\n\nre_task_id = re.compile(r'\\/(\\d+)\\/')\n\n\nclass FlSpider(scrapy.Spider):\n name = 'fl'\n allowed_domains = ['fl.ru']\n start_urls = ['https://www.fl.ru/projects/']\n\n def parse(self, response):\n for dom_item in response.css('.b-post'):\n task_url = response.urljoin(dom_item.css('.b-post__link::attr(href)').extract_first())\n task_title = dom_item.css('.b-post__link::text').extract_first()\n\n item = TaskItem(\n url=task_url,\n title=task_title,\n )\n\n yield scrapy.Request(url=task_url, callback=self.parse_detail, meta={\"item\": item})\n\n def parse_detail(self, response):\n task_id = self.get_id(response)\n item = response.meta.get('item')\n\n item['description'] = self.get_description(response, task_id)\n\n yield item\n\n\n def get_description(self, response, id):\n try:\n return response.css('#projectp' + str(id)).extract_first().strip()\n except (AttributeError) as e:\n self.logger.warn('get description error (%s) %s' % (response.url, e))\n return ''\n\n @staticmethod\n def get_id(response):\n return re.search(r'/(\\d+)/', response.url).group(1)","repo_name":"power-freelance/examples","sub_path":"python/freelance-parser/freelance/spiders/fl.py","file_name":"fl.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"37087287577","text":"from discord.ext import commands\nfrom discord import Member\n\nfrom managers import mongo_manager\nfrom config import SERVER_COL_NAME\nimport config\n\nclass UtilityCog(commands.Cog):\n\n @commands.command(name=\"ping\", description=\"Returns the bot's latency\")\n async def ping(self, ctx:commands.Context):\n await ctx.send(f\"Bot's Latency : **{round(ctx.bot.latency * 1000, 2)} ms**\")\n\n @commands.command(name=\"prefix\", description=\"Sets the prefix of the bot\")\n async def prefix(self, ctx:commands.Context, prefix:str=None):\n\n if prefix is None:\n return await ctx.reply(f\"Current Prefix is {config.PREFIX}\")\n\n try:\n if not ctx.author.guild_permissions.administrator:\n return await ctx.reply(\"You need administrator privilages to update the guild prefix.\")\n\n updated_data = {\"prefix\" : prefix}\n\n mongo_manager.manager.update_all_data(SERVER_COL_NAME, {}, updated_data)\n config.modify_prefix_timer_max(prefix=prefix, timer=None, max=None)\n\n return await ctx.reply(f\"Prefix changed to **{prefix}**\")\n except Exception as e:\n return await ctx.reply(f\"Error occured while changing the prefix. \\n```{e}```\")\n\n @commands.command(name=\"max\", description=\"Sets the max collection size limit\")\n async def max(self, ctx:commands.Context, max:int=None):\n \n if max is None:\n return await ctx.reply(f\"Maximum Collection Size is **{config.MAX}**\")\n\n try:\n if not ctx.author.guild_permissions.administrator:\n return await ctx.reply(\"You need administrator privilages to update collection max.\")\n\n updated_data = {\"max\" : max}\n\n mongo_manager.manager.update_all_data(SERVER_COL_NAME, {}, updated_data)\n config.modify_prefix_timer_max(prefix=None, timer=None, max=int(max))\n\n return await ctx.reply(f\"New Collection Max is set to **{max}**\")\n except Exception as e:\n await ctx.reply(f\"Error occured while trying to update the collection max. \\n```{e}```\")\n\ndef setup(bot:commands.Bot):\n bot.add_cog(UtilityCog())","repo_name":"Devanshu19/PokeCol","sub_path":"cogs/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"}
+{"seq_id":"10255220697","text":"#!/usr/bin/env python3\n\nimport sys\nimport argparse\nfrom argparse import RawDescriptionHelpFormatter, _HelpAction\nimport xml.etree.ElementTree as ET\nfrom interpret_src.Instruction import Instruction\nfrom interpret_src.Argument import Argument\nimport re\nimport interpret_src.errorTypes as errorTypes\nimport copy\n\nclass Nil:\t\n\t\"\"\"This class just simulate Nil type\"\"\"\n\n\tdef __init__(self):\n\t\tpass\n\tdef __str__(self):\n\t\treturn \"nil\"\n\tdef __repr__(self):\n\t\treturn \"nil\"\n\nNIL = Nil()\n\n\ndef parseArguments():\n\t\"\"\"Parse arguments with slightly modifed argparse\"\"\"\n\n\tclass ModifiedArgumentParser(argparse.ArgumentParser):\n\t\t\"\"\"Just override error exit status code in argparse to be correct\"\"\"\n\t\tdef error(self, message):\n\t\t\texitWithError(errorTypes.wrongParameters, message)\n\n\tclass Modified_HelpAction(_HelpAction):\n\t\t\"\"\"\n\t\tJust override __call__ so if --help argument is combined with others \n\t\tit exits with propper error\n\t\t\"\"\"\n\n\t\tdef __init__(self,\n\t\t\t\toption_strings,\n\t\t\t\tdest='==SUPPRESS==',\n\t\t\t\tdefault='==SUPPRESS==',\n\t\t\t\thelp=None):\n\t\t super(_HelpAction, self).__init__(\n\t\t\t\toption_strings=option_strings,\n\t\t\t\tdest=dest,\n\t\t\t\tdefault=default,\n\t\t\t\tnargs=0,\n\t\t\t\thelp=help)\n\n\t\tdef __call__(self, parser, namespace, values, option_string=None):\n\t\t\ttry:\n\t\t\t\tsourceArg = namespace._get_kwargs()[1][1]\n\t\t\t\tinputArg = namespace._get_kwargs()[2][1]\n\t\t\texcept:\n\t\t\t\tparser.exit(99, '%s: error: %s\\n' % (parser.prog,\"Internal error\"))\n\n\t\t\tif sourceArg or inputArg:\n\t\t\t\tparser.error(\"--help argumnet cannot be combined with others\")\t\t\n\t\t\telse:\n\t\t\t\tparser.print_help()\n\t\t\t\tparser.exit()\n\t \n\targparse._HelpAction = Modified_HelpAction\n\n\tp = ModifiedArgumentParser(formatter_class=RawDescriptionHelpFormatter, description=\"Interpret for IPPcode19. Source code must be in XML representation, for example generated by parse.php. If one of the arguments --input and --source is missing interpret loads particular data from stdin.\", epilog=\"\"\"\t\"\"\", add_help=False)\n\tp.add_argument('--help', action='help', help='show this help message and exit')\n\tp.add_argument(\"--source\", help = \"file with XML representation of source code. If not given, --input is required.\", metavar=('FILE'))\n\tp.add_argument(\"--input\", help = \"input file which will be used at interpretation. If not given, --source is required.\", metavar=('FILE'))\n\n\targs = p.parse_args()\n\n\tif not (args.source or args.input):\n\t p.error('Give me at least one of the arguments --input and --source')\n\n\treturn args.source, args.input\n\ndef getLinesFromFile(file):\n\t\"\"\" \n\tGet list of lines from file without line endings \n\t\n\tParameters: \n\tfile (str): file you want lines from\n\t\n\tReturns: \n\tlist of str: lines from file without line endings \n\t\"\"\"\n\n\tif file:\t\t\n\t\ttry:\n\t\t\twith open(file, \"r\") as f:\n\t\t\t\tdata = f.readlines()\n\t\texcept:\n\t\t\texitWithError(errorTypes.cannotOpenSourceFiles, f\"File: {file}\")\n\telse:\n\t\treturn None\n\t\n\treturn [line.strip() for line in data]\n\n\ndef exitWithError(errorType, additionalMessage = None):\n\t\"\"\" \n\tExit with specific error type, propper exit code and friendly text\n\t\n\tParameters: \n\terrorType (Error): Custom error object\n\tadditionalMessage (str): Text that will be printed to stderr\n\t\"\"\"\t\n\n\tprint(errorType, file=sys.stderr)\n\t\n\tif additionalMessage:\n\t\tprint(additionalMessage, file=sys.stderr)\n\n\tsys.exit(errorType.code)\n\ndef customAssert(result, errorType):\n\t\"\"\" \n\tCustom assert that can exit with specified error type\n\t\n\tParameters: \n\tresult (Bool): Result from condition that needs to be true\n\t\"\"\"\t\n\n\tif not result:\n\t\texitWithError(errorType)\n\n\ndef checkXmlHeader(sourceLines):\n\t\"\"\" \n\tCheck if xml header in source lines is correct\n\t\n\tParameters: \n\tsourceLines (list of str): source code that contains xml hedaer at the beggining\n\t\"\"\"\t\n\n\tfor line in sourceLines:\n\t\tif line.strip() != \"\":\n\t\t\tfirstLineOfXml = line\n\t\t\tcustomAssert(firstLineOfXml == r'', errorTypes.xmlNotWellFormated)\n\t\t\tbreak\t\n\ndef getInstructionsFromXml(root):\n\t\"\"\" \n\tGet list of instructions from xml code\n\t\n\tParameters: \n\troot : root of xml code from ElementTree\n\t\n\tReturns: \n\tinstructionsList (list of Instruction): list of instructions indeed\n\t\"\"\"\n\n\tinstructionsList = []\n\n\tfor child in root:\n\t\tcustomAssert(child.tag == \"instruction\", errorTypes.xmlStructureSyntaxLex)\n\n\t\tfor instrAttrib in child.attrib:\n\t\t\tcustomAssert(instrAttrib in (\"order\", \"opcode\"), errorTypes.xmlStructureSyntaxLex)\n\n\t\targuments = []\n\n\t\tfor arg in child:\n\t\t\tfor atrib in arg.attrib:\n\t\t\t\tcustomAssert(atrib == \"type\", errorTypes.xmlStructureSyntaxLex)\n\n\t\t\tresult = re.search(\"^arg([1-3])$\", arg.tag)\n\n\t\t\tcustomAssert(result, errorTypes.xmlStructureSyntaxLex)\n\t\t\t\n\t\t\torder = result.group(1)\n\t\t\targument = Argument(arg.get(\"type\"), arg.text, order)\n\t\t\targuments.append(argument)\n\n\t\targuments.sort(key=lambda argument: argument.order)\n\n\t\t# Assert if in instruction is for example just arg2, but arg1 is missing, also duplicates\n\t\tfor index, argument in enumerate(arguments, start=1):\n\t\t\tcustomAssert(argument.order == index, errorTypes.xmlStructureSyntaxLex)\n\n\t\tinstruction = Instruction(child.get(\"opcode\"), arguments, child.get(\"order\"))\n\t\tinstructionsList.append(instruction)\n \n\tinstructionsList.sort(key=lambda instruction: instruction.order)\n\n\t# Assert if for example there is instruction with order 2 but instruction with order 1 is missing, \n\t# also when there are order duplicates\n\tfor index, instruction in enumerate(instructionsList, start=1):\n\t\tcustomAssert(instruction.order == index, errorTypes.xmlStructureSyntaxLex)\t\n\n\treturn instructionsList\n\n\ndef makeInstructionRule(name, arg_1=None, arg_2=None, arg_3=None):\n\t\"\"\" \n\tMake one Instruction from instruction and arguments names. E.g. \n\t\n\tParameters: \n\tname (str) : Name of instruction\n\targ_1 (str) : Name of 1. argument\n\targ_2 (str) : Name of 2. argument\n\targ_3 (str) : Name of 3. argument\n\t\n\tReturns: \n\tInstruction : new instruction according to parameters\n\t\"\"\"\n\n\targuments = []\n\n\tif arg_1:\n\t\targuments.append(Argument(arg_1))\n\tif arg_2:\n\t\targuments.append(Argument(arg_2))\n\tif arg_3:\n\t\targuments.append(Argument(arg_3))\n\n\treturn Instruction(name, arguments)\n\ndef getInstrutionsRules():\n\t\"\"\" \n\tGet instruction rules, this defines IPPcode19 syntax\n\t\n\tReturns: \n\tinstructionRules (list of Instruction) : filled instruction rules\n\t\"\"\"\t\n\n\tinstructionRules = []\n\n\tinstructionRules.append(makeInstructionRule(\"MOVE\", \"var\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"CREATEFRAME\"))\n\tinstructionRules.append(makeInstructionRule(\"PUSHFRAME\"))\n\tinstructionRules.append(makeInstructionRule(\"POPFRAME\"))\n\tinstructionRules.append(makeInstructionRule(\"DEFVAR\", \"var\"))\n\tinstructionRules.append(makeInstructionRule(\"CALL\", \"label\"))\n\tinstructionRules.append(makeInstructionRule(\"RETURN\"))\n\tinstructionRules.append(makeInstructionRule(\"PUSHS\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"POPS\", \"var\"))\t\n\tinstructionRules.append(makeInstructionRule(\"ADD\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"SUB\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"MUL\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"IDIV\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"LT\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"GT\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"EQ\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"AND\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"OR\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"NOT\", \"var\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"INT2CHAR\", \"var\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"STRI2INT\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"READ\", \"var\", \"type\"))\n\tinstructionRules.append(makeInstructionRule(\"WRITE\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"CONCAT\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"STRLEN\", \"var\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"GETCHAR\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"SETCHAR\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"TYPE\", \"var\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"LABEL\", \"label\"))\n\tinstructionRules.append(makeInstructionRule(\"JUMP\", \"label\"))\n\tinstructionRules.append(makeInstructionRule(\"JUMPIFEQ\", \"label\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"JUMPIFNEQ\", \"label\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"EXIT\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"DPRINT\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"BREAK\"))\n\n\treturn instructionRules\n\n\ndef checkOperandLexems(instructionsList):\n\t\"\"\" \n\tChceck if lexems in operands are correct\n\t\n\tParameters: \n\tinstructionsList (list of Instruction) : list of instructions from source code\n\t\"\"\"\n\n\tfor instruction in instructionsList:\n\t\tfor argument in instruction.arguments:\n\t\t\tif argument.type == \"var\":\n\t\t\t\tresult = re.search(r'^(GF|LF|TF)@([a-z]|[A-Z]|[\\_\\-\\$\\&\\%\\*\\?\\!])(\\w|[\\_\\-\\$\\&\\%\\*\\?\\!])*$', argument.name)\n\t\t\telif argument.type == \"string\":\n\t\t\t\tresult = re.search(r'^([^\\s\\#\\\\]|\\\\[0-9]{3})*$', argument.name)\t\n\t\t\telif argument.type == \"int\":\n\t\t\t\tresult = re.search(r'^[-\\+]?[0-9]+$', argument.name)\t\n\t\t\telif argument.type == \"bool\":\n\t\t\t\tresult = re.search(r'^(false|true)$', argument.name)\t\n\t\t\telif argument.type == \"nil\":\n\t\t\t\tresult = re.search(r'^nil$', argument.name)\t\t\t\t\n\t\t\telif argument.type == \"label\":\n\t\t\t\tresult = re.search(r'^([a-z]|[A-Z]|[\\_\\-\\$\\&\\%\\*\\?\\!])(\\w|[\\_\\-\\$\\&\\%\\*\\?\\!])*$', argument.name)\n\t\t\telif argument.type == \"type\":\n\t\t\t\tresult = re.search(r'^(string|int|bool)$', argument.name)\t\t\t\t\n\t\t\telse:\n\t\t\t\tresult = False\n\n\t\t\tcustomAssert(result, errorTypes.xmlStructureSyntaxLex)\n\ndef checkSyntax(instructionsList):\n\t\"\"\" \n\tCheck syntax in source coce\n\t\n\tParameters: \n\tinstructionsList (list of Instruction) : list of instructions from source code\n\t\"\"\"\t\n\n\tinstructionRules = getInstrutionsRules();\n\n\tfor instruction in instructionsList:\n\t\tcustomAssert(instruction in instructionRules, errorTypes.xmlStructureSyntaxLex)\n\ndef checkLabelsSematics(instructionsList):\n\t\"\"\" \n\tCheck labels semantics, i.e. if there is redefinition or using not defined label\n\t\n\tParameters: \n\tinstructionsList (list of Instruction) : list of instructions from source code\n\t\"\"\"\n\n\tdefinedLabels = []\n\n\t# Find labels, check if there is no attempt of redefinition\n\tfor instruction in instructionsList:\n\t\tif instruction.name == \"LABEL\":\n\t\t\tlabelName = instruction.arguments[0].name\n\t\t\tcustomAssert(labelName not in definedLabels, errorTypes.semantics)\n\n\t\t\tdefinedLabels.append(labelName)\n\n\t# Check if label not defined\n\tfor instruction in instructionsList:\n\t\tif instruction.name != \"LABEL\":\n\t\t\tfor argument in instruction.arguments:\n\t\t\t\tif argument.type == \"label\":\n\t\t\t\t\tcustomAssert(argument.name in definedLabels, errorTypes.semantics)\n\ndef replaceEscapeSequences(instructionsList):\n\t\"\"\" \n\tReplace escape sequences in string types in source code. E.g. a\\032a => a a\n\t\n\tParameters: \n\tinstructionsList (list of Instruction) : list of instructions from source code\n\t\n\tReturns: \n\tinstructionsList (list of Instruction) : list of instructions with replaced escape sequences\n\t\"\"\"\n\n\tfor instruction in instructionsList:\n\t\tfor argument in instruction.arguments:\n\t\t\tif argument.type == \"string\":\n\t\t\t\tescapedUnicodesList = re.findall(r'(\\\\[0-9]{3})+', argument.name)\n\n\t\t\t\tfor escapedUnicode in escapedUnicodesList:\n\t\t\t\t\tunicodeAsChar = chr(int(escapedUnicode[1:]))\n\t\t\t\t\targument.name = argument.name.replace(escapedUnicode, unicodeAsChar)\n\n\treturn instructionsList\n\ndef readInput(inputLines):\n\t\"\"\" \n\tGeneralizes reading from input. \n\tIf there is file with input text take data from there, otherwise read from stdin\n\t\n\tParameters: \n\tinputLines (list of str) : from file that user added as cli argument\n\t\n\tReturns: \n\tString : text from input\n\t\"\"\"\t\n\n\tif inputLines != None:\n\t\treturn inputLines.pop(0)\n\telse:\n\t\treturn input()\n\n\ndef\tgetLablesIndexes(instructionsList):\n\t\"\"\" \n\tGet labels name from source code with indexes, so it will be easier to do jumps to labels\n\tE.g.: {\"foo\" : 5, \"boo\" : 9}\n\t\n\tParameters: \n\tinstructionsList (list of Instruction) : list of instructions from source code\n\t\n\tReturns: \n\tlabelsIndexDict (Dictionary) : labels name from source code with indexes \n\t\"\"\"\t\n\n\tlabelsIndexDict = {}\n\n\tfor i, instruction in enumerate(instructionsList):\n\t \tif instruction.name == \"LABEL\":\n\t \t\tlabelName = instruction.arguments[0].name\n\t \t\tlabelsIndexDict[labelName] = i\n\n\treturn labelsIndexDict\n\n\ndef interpretCode(instructionsList, inputLines):\n\t\"\"\" \n\tInterpert code\n\t\n\tParameters: \n\tinstructionsList (list of Instruction) : list of instructions from source code\n\tinputLines (list of str) : from file that user added as cli argument \n\t\"\"\"\t\n\n\t# Position of labels in instructionsList E.g.: {\"foo\" : 5, \"boo\" : 9}\n\t# So it will be easier to jumping to these labes\n\tlabelsIndexDict = getLablesIndexes(instructionsList)\n\n\tGF = {}\n\tLF = []\n\tTF = None\n\n\tstackPushsPops = []\n\tstackReturnToCaller = []\n\n\tdataToDestination = None\n\tdestinationData = None\n\tsourceDataFirst = None\n\tsourceDataSecond = None\n\tlabelToJump = None\n\n\tcurrentInstructionIndex = 0\n\tprocessedInstructionsCount = 0\n\n\twhile currentInstructionIndex < len(instructionsList):\n\n\t\tinstruction = instructionsList[currentInstructionIndex]\n\t\tcurrentInstructionIndex += 1\n\t\tprocessedInstructionsCount += 1\n\n\t\t# Semantics assert and get data from instruction source and destination argmuents\n\t\tfor argument in instruction.arguments:\n\n\t\t\t\t# For classic: INSTRUCTION dest [source] [source]\n\t\t\t\tif argument.type == \"var\" and argument.order == 1 and instruction.name not in (\"PUSHS\", \"WRITE\", \"EXIT\", \"DPRINT\"):\n\t\t\t\t\tframe = argument.name.split(\"@\", 1)[0]\n\t\t\t\t\tvarName = argument.name.split(\"@\", 1)[1]\n\n\t\t\t\t\tif frame == \"GF\":\t\t\t\n\t\t\t\t\t\tif instruction.name != \"DEFVAR\":\n\t\t\t\t\t\t\tcustomAssert(varName in GF, errorTypes.variableNotDefined)\n\t\t\t\t\t\t\tdestinationData = GF[varName]\t\t\t\t\n\t\t\t\t\telif frame == \"LF\":\n\t\t\t\t\t\tcustomAssert(len(LF) != 0, errorTypes.frameNotExists)\n\t\t\t\t\t\tif instruction.name != \"DEFVAR\":\n\t\t\t\t\t\t\tcustomAssert(varName in LF[-1], errorTypes.variableNotDefined)\t\t\t\t\t\n\t\t\t\t\t\t\tdestinationData = LF[-1][varName]\t\t\t\t\n\t\t\t\t\telif frame == \"TF\":\n\t\t\t\t\t\tcustomAssert(TF != None, errorTypes.frameNotExists)\n\t\t\t\t\t\tif instruction.name != \"DEFVAR\":\n\t\t\t\t\t\t\tcustomAssert(varName in TF, errorTypes.variableNotDefined)\t\t\t\t\t\n\t\t\t\t\t\t\tdestinationData = TF[varName]\t\t\t\t\n\n\t\t\t\t# For classic: INSTRUCTION dest source [source] E.g.: ADD var symb symb\n\t\t\t\t# Or: INSTRUCTION source E.g.: WRITE symb\n\t\t\t\tif argument.order in (2, 3) or instruction.name in (\"PUSHS\", \"WRITE\", \"EXIT\", \"DPRINT\"):\n\t\t\t\t\tif argument.type == \"var\":\n\t\t\t\t\t\tframe = argument.name.split(\"@\", 1)[0]\n\t\t\t\t\t\tvarName = argument.name.split(\"@\", 1)[1]\n\n\t\t\t\t\t\tif frame == \"GF\":\n\t\t\t\t\t\t\tcustomAssert(varName in GF, errorTypes.variableNotDefined)\n\t\t\t\t\t\t\tsourceData = GF[varName]\n\t\t\t\t\t\telif frame == \"LF\":\n\t\t\t\t\t\t\tcustomAssert(len(LF) != 0, errorTypes.frameNotExists)\n\t\t\t\t\t\t\tcustomAssert(varName in LF[-1], errorTypes.variableNotDefined)\n\t\t\t\t\t\t\tsourceData = LF[-1][varName]\n\t\t\t\t\t\telif frame == \"TF\":\n\t\t\t\t\t\t\tcustomAssert(TF != None, errorTypes.frameNotExists)\n\t\t\t\t\t\t\tcustomAssert(varName in TF, errorTypes.variableNotDefined)\n\t\t\t\t\t\t\tsourceData = TF[varName]\n\n\t\t\t\t\t\t# Because second argument in TYPE dont have to be initialized\n\t\t\t\t\t\tcustomAssert(sourceData != None or instruction.name == \"TYPE\", errorTypes.missingValue)\n\n\t\t\t\t\telif argument.type in (\"string\", \"type\"):\n\t\t\t\t\t\tsourceData = argument.name\n\t\t\t\t\telif argument.type == \"int\":\n\t\t\t\t\t\tsourceData = int(argument.name)\n\t\t\t\t\telif argument.type == \"bool\":\n\t\t\t\t\t\tsourceData = argument.name == \"true\"\n\t\t\t\t\telif argument.type == \"nil\":\n\t\t\t\t\t\tsourceData = NIL\t\t\t\n\n\t\t\t\t\tif argument.order in (1, 2):\n\t\t\t\t\t\tsourceDataFirst = sourceData\n\t\t\t\t\telif argument.order == 3:\n\t\t\t\t\t\tsourceDataSecond = sourceData\t\t\t\t\n\n\t\t\t\telif argument.type == \"label\":\n\t\t\t\t\tlabelToJump = argument.name\n\n\n\t\tif instruction.name == \"DEFVAR\":\n\n\t\t\tdataToDestination = None\n\n\t\telif instruction.name == \"MOVE\":\n\n\t\t\tdataToDestination = sourceDataFirst\n\n\t\telif instruction.name == \"CREATEFRAME\":\n\n\t\t\tTF = {}\n\n\t\telif instruction.name == \"PUSHFRAME\":\n\t\t\tcustomAssert(TF != None, errorTypes.frameNotExists)\n\t\t\tLF.append(TF)\t\n\t\t\tTF = None\n\n\t\telif instruction.name == \"POPFRAME\":\n\t\t\tcustomAssert(len(LF) != 0, errorTypes.frameNotExists)\n\t\t\tTF = LF.pop()\n\n\t\telif instruction.name == \"WRITE\":\n\t\t\tif type(sourceDataFirst) != Nil:\n\t\t\t\tif type(sourceDataFirst) == bool:\n\t\t\t\t\tprint(str(sourceDataFirst).lower(), end=\"\")\n\t\t\t\telse:\n\t\t\t\t\tprint(sourceDataFirst, end=\"\")\n\n\t\telif instruction.name == \"READ\":\n\t\t\texpectedType = sourceDataFirst\n\n\t\t\ttry:\n\t\t\t\treadedData = readInput(inputLines)\n\t\t\texcept:\n\t\t\t\treadedData = 0\n\t\t\t\tif expectedType == \"string\":\n\t\t\t\t\treadedData = \"\"\n\n\t\t\tif expectedType == \"string\":\n\t\t\t\treadedData = str(readedData)\n\t\t\telif expectedType == \"int\":\n\t\t\t\ttry:\n\t\t\t\t\treadedData = int(readedData)\n\t\t\t\texcept:\n\t\t\t\t\treadedData = 0\n\t\t\telif expectedType == \"bool\":\t\t\t\t\n\t\t\t\treadedData = str(readedData).lower() == \"true\"\n\n\t\t\tdataToDestination = readedData\n\n\t\telif instruction.name == \"ADD\":\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == int, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst + sourceDataSecond\n\n\t\telif instruction.name == \"SUB\":\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == int, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst - sourceDataSecond\n\n\t\telif instruction.name == \"MUL\":\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == int, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst * sourceDataSecond\n\n\t\telif instruction.name == \"IDIV\":\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(sourceDataSecond != 0, errorTypes.wrongOperandValue)\n\t\t\tdataToDestination = sourceDataFirst // sourceDataSecond\t\t\t\t\t\t\t\t\t\n\n\t\telif instruction.name == \"LT\":\n\t\t\tcustomAssert(type(sourceDataFirst) == type(sourceDataSecond), errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst < sourceDataSecond\n\t\t\n\t\telif instruction.name == \"GT\":\n\t\t\tcustomAssert(type(sourceDataFirst) == type(sourceDataSecond), errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst > sourceDataSecond\n\n\t\telif instruction.name == \"EQ\":\n\t\t\tcustomAssert(type(sourceDataFirst) == type(sourceDataSecond) \n\t\t\t\t\t\t\tor type(sourceDataFirst) == Nil \n\t\t\t\t\t\t\tor type(sourceDataSecond) == Nil, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst == sourceDataSecond\n\n\t\telif instruction.name == \"AND\":\n\t\t\tcustomAssert(type(sourceDataFirst) == bool, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == bool, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst and sourceDataSecond\n\n\t\telif instruction.name == \"OR\":\n\t\t\tcustomAssert(type(sourceDataFirst) == bool, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == bool, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst or sourceDataSecond\n\n\t\telif instruction.name == \"NOT\":\n\t\t\tcustomAssert(type(sourceDataFirst) == bool, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = not sourceDataFirst\n\n\t\telif instruction.name == \"PUSHS\":\n\n\t\t\tstackPushsPops.append(sourceDataFirst)\n\n\t\telif instruction.name == \"POPS\":\n\t\t\tcustomAssert(len(stackPushsPops) != 0, errorTypes.missingValue)\t\n\t\t\tdataToDestination = stackPushsPops.pop()\n\n\t\telif instruction.name == \"JUMP\":\n\n\t\t\tcurrentInstructionIndex = labelsIndexDict[labelToJump]\n\n\t\telif instruction.name == \"JUMPIFEQ\":\n\t\t\tcustomAssert(type(sourceDataFirst) == type(sourceDataSecond), errorTypes.wrongOperandType)\n\t\t\tif sourceDataFirst == sourceDataSecond:\n\t\t\t\tcurrentInstructionIndex = labelsIndexDict[labelToJump]\n\n\t\telif instruction.name == \"JUMPIFNEQ\":\n\t\t\tcustomAssert(type(sourceDataFirst) == type(sourceDataSecond), errorTypes.wrongOperandType)\n\t\t\tif sourceDataFirst != sourceDataSecond:\n\t\t\t\tcurrentInstructionIndex = labelsIndexDict[labelToJump]\n\n\t\telif instruction.name == \"CALL\":\n\t\t\tstackReturnToCaller.append(currentInstructionIndex)\n\t\t\tcurrentInstructionIndex = labelsIndexDict[labelToJump]\t\t\n\n\t\telif instruction.name == \"RETURN\":\n\t\t\tcustomAssert(len(stackReturnToCaller) != 0, errorTypes.missingValue)\n\t\t\tcurrentInstructionIndex = stackReturnToCaller.pop()\t\t\t\t\t\n\n\t\telif instruction.name == \"INT2CHAR\":\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\ttry:\n\t\t\t\tdataToDestination = chr(sourceDataFirst)\n\t\t\texcept:\n\t\t\t\texitWithError(errorTypes.wrongStringManipulation)\n\n\t\telif instruction.name == \"STRI2INT\":\n\t\t\tcustomAssert(type(sourceDataFirst) == str, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == int, errorTypes.wrongOperandType)\n\t\t\ttry:\n\t\t\t\tdataToDestination = ord(sourceDataFirst[sourceDataSecond])\n\t\t\texcept:\n\t\t\t\texitWithError(errorTypes.wrongStringManipulation)\n\n\t\telif instruction.name == \"CONCAT\":\n\t\t\tcustomAssert(type(sourceDataFirst) == str, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == str, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst + sourceDataSecond\n\n\t\telif instruction.name == \"STRLEN\":\n\t\t\tcustomAssert(type(sourceDataFirst) == str, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = len(sourceDataFirst)\n\n\t\telif instruction.name == \"GETCHAR\":\n\t\t\tcustomAssert(type(sourceDataFirst) == str, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == int, errorTypes.wrongOperandType)\n\t\t\ttry:\n\t\t\t\tdataToDestination = sourceDataFirst[sourceDataSecond]\n\t\t\texcept:\n\t\t\t\texitWithError(errorTypes.wrongStringManipulation)\n\n\t\telif instruction.name == \"SETCHAR\":\n\t\t\tcustomAssert(type(destinationData) == str, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == str, errorTypes.wrongOperandType)\n\n\t\t\tdestinationData = list(destinationData)\n\n\t\t\ttry:\n\t\t\t\tdestinationData[sourceDataFirst] = sourceDataSecond[0]\n\t\t\t\tdataToDestination = \"\".join(destinationData)\n\t\t\texcept:\n\t\t\t\texitWithError(errorTypes.wrongStringManipulation)\t\t\t\t\n\n\t\telif instruction.name == \"TYPE\":\n\t\t\t# Non initialized variable\n\t\t\tif sourceDataFirst == None:\n\t\t\t\tdataToDestination = \"\"\n\t\t\telif type(sourceDataFirst) == Nil:\n\t\t\t\tdataToDestination = \"nil\"\n\t\t\telif type(sourceDataFirst) == int:\n\t\t\t\tdataToDestination = \"int\"\n\t\t\telif type(sourceDataFirst) == str:\n\t\t\t\tdataToDestination = \"string\"\n\t\t\telif type(sourceDataFirst) == bool:\n\t\t\t\tdataToDestination = \"bool\"\n\n\t\telif instruction.name == \"EXIT\":\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(0 <= sourceDataFirst <= 49, errorTypes.wrongOperandValue)\n\t\t\tsys.exit(sourceDataFirst)\n\n\t\telif instruction.name == \"DPRINT\":\n\t\t\tif type(sourceDataFirst) != Nil:\n\t\t\t\tif type(sourceDataFirst) == bool:\n\t\t\t\t\tprint(str(sourceDataFirst).lower(), file=sys.stderr, end=\"\")\n\t\t\t\telse:\t\n \t\t\t\t\tprint(sourceDataFirst, file=sys.stderr, end=\"\")\n\n\t\telif instruction.name == \"BREAK\":\n\t\t\tprint(\"Current instruction index:\", currentInstructionIndex - 1, file=sys.stderr)\n\t\t\tprint(\"Number of procesed instructions:\", processedInstructionsCount, \"(including this BREAK)\", file=sys.stderr)\n\t\t\tprint(\"GF:\", GF, file=sys.stderr)\n\t\t\tprint(\"TF:\", TF, file=sys.stderr)\n\t\t\tprint(\"LF:\", LF, file=sys.stderr)\n\n\n\t\t# Write dataToDestination to destination variable in frame\n\t\tif len(instruction.arguments) != 0:\n\t\t\tif instruction.arguments[0].type == \"var\" and instruction.name not in (\"PUSHS\", \"WRITE\", \"EXIT\", \"DPRINT\"):\n\t\t\t\tframe = instruction.arguments[0].name.split(\"@\", 1)[0]\n\t\t\t\tvarName = instruction.arguments[0].name.split(\"@\", 1)[1]\n\n\t\t\t\tif frame == \"GF\":\t\t\t\t\t\n\t\t\t\t\tGF[varName] = dataToDestination\n\t\t\t\telif frame == \"LF\":\n\t\t\t\t\tLF[-1][varName] = dataToDestination\n\t\t\t\telif frame == \"TF\":\n\t\t\t\t\tTF[varName] = dataToDestination\n\n\t\t\ndef main():\n\tsourceFile, inputFile = parseArguments()\n\n\tif sourceFile:\n\t\tsourceLines = getLinesFromFile(sourceFile)\n\telse:\n\t\tsourceLines = [line.strip() for line in sys.stdin]\t \n\t\n\tinputLines = getLinesFromFile(inputFile)\n\n\ttry:\n\t\troot = ET.fromstringlist(sourceLines)\n\texcept:\n\t\texitWithError(errorTypes.xmlNotWellFormated)\n\n\tcustomAssert(root.tag == \"program\", errorTypes.xmlStructureSyntaxLex)\n\tcustomAssert(root.get(\"language\") == \"IPPcode19\", errorTypes.xmlStructureSyntaxLex)\n\tfor atrib in root.attrib:\n\t\tcustomAssert(atrib in (\"language\", \"name\" , \"description\"), errorTypes.xmlStructureSyntaxLex)\n\tcheckXmlHeader(sourceLines)\n\n\tinstructionsList = getInstructionsFromXml(root)\n\n\tcheckOperandLexems(instructionsList)\n\tcheckSyntax(instructionsList)\n\n\tcheckLabelsSematics(instructionsList)\n\n\tinstructionsList = replaceEscapeSequences(instructionsList)\n\n\tinterpretCode(instructionsList, inputLines)\n\n\nif __name__== \"__main__\":\n\tmain()\n","repo_name":"dvagala/VUT-FIT-IPP-Code-interpreter","sub_path":"interpret.py","file_name":"interpret.py","file_ext":"py","file_size_in_byte":25149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"12439576161","text":"#!/usr/bin/env python\n# Written by Miguel Brown, 2016-Apr-27. Takes table with gene metrics and selects for high variability\n'''\nWritten by Miguel Brown, 2016-Apr-27. Bins and normalizes genes for last step in drop-seq training set creation\nPrints to stdout, specify > output_file at end!!!\nUsage: ./3_bin_norm_sel.py \n\nArguments:\n table with gene metrics mean, variance and disperion\n Z score cutoff for selection\n\nOptions:\n-h\n'''\nimport sys\nfrom docopt import docopt\nimport numpy\nfrom scipy import stats\n\nargs = docopt(__doc__)\n\ntable = open(args[''], 'r')\nscore = float(args[''])\nnbins = 20\n\nhead = next(table)\n# get range of dataset to bin\ngenes = []\nmeans = []\ndm = []\nfor line in table:\n data = line.rstrip('\\n').split('\\t')\n genes.append(data[0])\n means.append(float(data[1]))\n dm.append(float(data[-1]))\ntable.close()\n# binning done by means\n(hist, bins) = numpy.histogram(means, nbins)\npos = numpy.digitize(means, bins)\nsys.stderr.write('Bin edges:\\n')\nfor i in bins:\n sys.stderr.write('\\t' + str(i) + '\\n')\n# \"Validation\" done by dispersion metric!\nsys.stdout.write('bin\\tgene\\tdm\\tzscore\\n')\nfor i in xrange(0, len(bins), 1):\n cur = []\n ind = []\n for j in xrange(0, len(pos), 1):\n if pos[j] == i:\n ind.append(j)\n cur.append(dm[j])\n if len(cur) > 1:\n zcur = stats.zscore(cur)\n else:\n sys.stderr.write('Nothing fit into bin ' + str(i) + '\\n')\n continue\n flag = 0\n for j in xrange(0, len(cur), 1):\n if zcur[j] >= score:\n sys.stdout.write(str(i) + '\\t' + '\\t'.join((genes[ind[j]], str(dm[ind[j]]), str(zcur[j]))) + '\\n')\n flag = 1\n if flag == 0:\n sys.stderr.write('Nothing variable enough in bin ' + str(i) + '\\n')","repo_name":"WhiteLab/dropseq","sub_path":"3_bin_norm_sel.py","file_name":"3_bin_norm_sel.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"39669117297","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/7/19 13:45\n# @Author : Yize Wang\n# @File : load_cases.py\n# @Software: AutoBladed\n\nimport numpy as np\nimport pandas as pd\nfrom PyQt5.QtCore import QThread, pyqtSignal\n\nfrom exceptions import SheetNameError\n\n\nclass CasesConfig(QThread):\n\t\"\"\"\n\twhen the cases configuration is large, the parse time would be so long\n\thence, here I use thread to encapsulate this calculations\n\t\"\"\"\n\n\thead = \"head\"\n\titems = \"items\"\n\n\tfinish_signal = pyqtSignal(bool, str)\n\n\tdef __init__(self, filename: str):\n\t\tsuper(CasesConfig, self).__init__()\n\n\t\t# record input\n\t\tself.filename = filename\n\n\t\t# use dictionary to store the cases configurations\n\t\tself.majors = []\t\t\t# major cases\n\t\tself.minors = []\t\t\t# minor cases\n\t\tself.cases_config = []\t\t# their configurations\n\n\t\treturn\n\n\tdef run(self):\n\t\traw_data = pd.read_excel(self.filename, sheet_name=None, engine=\"openpyxl\")\n\n\t\t# parse majors of the cases\n\t\tself.parse_majors(raw_data)\n\n\t\ttry:\n\t\t\t# parse minors of the cases\n\t\t\tself.parse_minors(raw_data)\n\t\texcept Exception as exc:\n\t\t\tself.finish_signal.emit(False, str(exc))\n\t\telse:\n\t\t\t# parse configurations\n\t\t\tself.parse_configs(raw_data)\n\n\t\t\tself.finish_signal.emit(True, \"Successful\")\n\n\t\treturn\n\n\tdef parse_majors(self, raw_data: dict):\n\t\t# all the sheets\n\t\tsheets = raw_data.keys()\n\t\t# at the left of dot\n\t\tself.majors = list(set([sheet.split(\".\")[0].strip() for sheet in sheets]))\n\t\t# sort them\n\t\tself.majors.sort()\n\n\t\treturn\n\n\tdef parse_minors(self, raw_data: dict):\n\t\tnum_majors = len(self.majors)\n\t\t# initialize the minors array\n\t\tself.minors = [[] for i in range(num_majors)]\n\n\t\tsheets = raw_data.keys()\n\t\tfor sheet in sheets:\n\t\t\tmajor_and_minor = sheet.split(\".\")\n\t\t\tif len(major_and_minor) != 2:\n\t\t\t\traise SheetNameError(self.filename)\n\n\t\t\t# loop for each sheet\n\t\t\tidx = self.majors.index(major_and_minor[0].strip())\n\t\t\t# append this sheet\n\t\t\tself.minors[idx].append(major_and_minor[1].strip())\n\n\t\tfor i in range(num_majors):\n\t\t\t# sort them\n\t\t\tself.minors[i].sort()\n\n\t\treturn\n\n\tdef parse_configs(self, raw_data: dict):\n\t\t# initialize the empty configuration list\n\t\tnum_major = len(self.majors)\n\t\tfor i in range(num_major):\n\t\t\ttemp = [1 for j in range(len(self.minors[i]))]\n\t\t\tself.cases_config.append(temp)\n\n\t\tfor sheet, value in raw_data.items():\n\t\t\tmajor, minor = sheet.split(\".\")\n\t\t\tidx_major = self.majors.index(major.strip())\n\t\t\tidx_minor = self.minors[idx_major].index(minor.strip())\n\n\t\t\thead = value.columns\n\t\t\tvalue = np.array(value).tolist()\n\t\t\tself.cases_config[idx_major][idx_minor] = {self.head: head, self.items: value}\n\n\t\treturn\n\n\nif __name__ == '__main__':\n\timport sys\n\tfrom PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton\n\n\t# initialize an application instance\n\tapp = QApplication(sys.argv)\n\n\twindow = QMainWindow()\n\twindow.setFixedSize(800, 300)\n\tbtn = QPushButton(window)\n\n\twork = CasesConfig(\"../../data/LoadCases.xlsx\")\n\tbtn.clicked.connect(work.start)\n\n\twindow.show()\n\n\tsys.exit(app.exec_())","repo_name":"wangyize0125/AutoBladed","sub_path":"src/kernels/load_cases.py","file_name":"load_cases.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"71268020087","text":"import pygame as pg\nfrom random import uniform\nfrom vehicle import Vehicle, mass_to_size_constant, circle_width\n\nboid_uid = 0\n\nclass Boid(Vehicle):\n\n # CONFIG\n debug = False\n min_speed = .001\n max_speed = .02\n max_force = 1\n max_turn = 5\n perception = 60\n # crowding = 15\n low_crowding = 1\n high_crowding = 15\n can_wrap = True\n edge_distance_pct = 5\n ###############\n\n def __init__(self):\n global boid_uid\n self.uid = boid_uid\n boid_uid += 1\n Boid.set_boundary(Boid.edge_distance_pct)\n\n # Randomize starting position and velocity\n start_position = pg.math.Vector2(\n uniform(0, Boid.max_x),\n uniform(0, Boid.max_y))\n start_velocity = pg.math.Vector2(\n uniform(-1, 1) * Boid.max_speed,\n uniform(-1, 1) * Boid.max_speed)\n\n super().__init__(start_position, start_velocity,\n Boid.min_speed, Boid.max_speed,\n Boid.max_force, Boid.can_wrap)\n\n self.rect = self.image.get_rect(center=self.position)\n\n self.debug = Boid.debug\n # self.debug = True\n\n def separation(self, boids):\n steering = pg.Vector2()\n for boid in boids:\n dist = self.position.distance_to(boid.position)\n # Lower crowding based on mass - after the threshold\n if dist < self.high_crowding:\n steering -= boid.position - self.position\n steering = self.clamp_force(steering)\n return steering\n\n def alignment(self, boids):\n steering = pg.Vector2()\n for boid in boids:\n steering += boid.velocity\n steering /= len(boids)\n steering -= self.velocity\n steering = self.clamp_force(steering)\n return steering / 8\n\n def cohesion(self, boids):\n steering = pg.Vector2()\n for boid in boids:\n steering += boid.position\n steering /= len(boids)\n steering -= self.position\n steering = self.clamp_force(steering)\n return steering / 100\n\n def update(self, dt, boids):\n steering = pg.Vector2()\n\n if not self.can_wrap:\n steering += self.avoid_edge()\n\n neighbors = self.get_neighbors(boids)\n if neighbors:\n\n separation = self.separation(neighbors)\n alignment = self.alignment(neighbors)\n cohesion = self.cohesion(neighbors)\n\n # DEBUG\n # separation *= 0\n # alignment *= 0\n # cohesion *= 0\n\n steering += separation + alignment + cohesion\n\n # steering = self.clamp_force(steering)\n\n super().update(dt, steering)\n\n def get_neighbors(self, boids):\n neighbors = []\n for boid in boids:\n if boid != self:\n dist = self.position.distance_to(boid.position)\n # We see in a circle\n if dist < self.perception:\n neighbors.append(boid)\n if dist < self.radius and self.mass > boid.mass:\n # print(f\"boid {self.uid} eating {boid.uid}\")\n self.eat_boid(boid)\n # Eat!\n # pass\n # print(f\"We would eat boid {boid.uid}!\")\n return neighbors\n\n def eat_boid(self, target):\n print(f\"Boid {self.uid} mass {self.mass} eating {target.uid} mass {target.mass}\")\n self.mass += target.mass\n target.mass = 0.\n target.kill()\n # target.delete = True\n # del target\n old_radius = self.radius\n self.radius = pow(self.mass, 0.5) * mass_to_size_constant\n center_offset = (self.radius - old_radius) / 2\n self.position += pg.Vector2(center_offset, center_offset)\n del self.image\n self.image = pg.Surface((self.radius * 2, self.radius * 2), pg.SRCALPHA)\n pg.draw.circle(\n surface = self.image,\n color = pg.Color(\"White\"),\n center = (int(self.radius), int(self.radius)),\n radius = self.radius,\n width = circle_width)\n","repo_name":"belarm/boids","sub_path":"boid.py","file_name":"boid.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"35296409317","text":"#INÍCIO HEAP E SUAS OPERAÇÕES\n\n#ATENDE AO ITEM 06 IMPLEMENTAR HEAP E SUAS OPERAÇÕES\n#Liste de prioridade(exemplo)\nPRIORITY_LIST = [30, 27, 24, 20, 25, 19, 22, 10, 15, 40, 18, 14, 11, 17, 21, 8, 1, 3, 4, 9, 7, 6]\n#30 -> 12\n#15 -> 45\n#Vetor que será utilizado para armazenar os elementos da fila de prioridades\nH = []\n\n#Vetor utilizar na Construção do Heap\nBUILD_HEAP = [4, 7, 3, 10, 2, 15, 1, 9, 6, 23, 42, 34, 65, 20]\nBH = []\n#Classe que que cria um elemento da Heap\n#Possui 2 atrubutos: prioridade e indice, cada objeto desse será adicionando ao vetor H\nclass HeapElements:\n def __init__(self, priority, index):\n self.priority = priority\n self.index = index\n #Define como esse objeto será impresso\n def __repr__(self):\n return \"[prior: \"+str(self.priority)+\" -> index: \"+str(self.index)+\"]\"\n\n\n#Método para imprimir a lista de prioridades, vetor H\ndef printH(text, H):\n print(text)\n for i in H:\n print(i)\n\n#Método CORRIGE DESCENDO. \ndef rectifyDown(H, i):\n #Pega a posição que foi modificada e de onde a correção iniciará\n greater = i\n #Avalia se o filho esquerdo da posição [i] ([2i]) é menor que a última posição do array\n #Avalia se a prioridade do Filho é maior que a do Pai\n if (2*i <= len(H)) and (H[(2*i)].priority > H[greater].priority):\n greater = 2*i\n \n #Igual a avaliação anterior, só que para o filho direito\n if ((2*i) + 1 <= len(H)) and (H[(2*i) + 1].priority > H[greater].priority):\n greater = (2*i) + 1\n \n #Caso a posição do elemento de maior prioridade seja diferente da posição da entreda da função\n #SIgnofica que um dos filhos tem prioridade maior que a do seu pai\n #Então esses elementos precisam trocar de lugar para manter a condição de Heap\n if greater != i:\n #armazena temporariamente o objeto que será trocado \n temporary = H[i].priority\n #troca a posição do pai com seu filho de maior prioridade\n H[i].priority = H[greater].priority\n H[greater].priority = temporary\n\n #Faz uma chamada recursiva a partir da nova posição para continuar corrigindo o Heap\n return rectifyDown(H, greater)\n\n#Método CORRIGE SUBINDO. \ndef rectifyUp(H, i):\n #pega o elemnto Pai do elemento que será corrigido\n parent = int(i/2)\n\n #Avalia quem tem maior prioridade e faz a troca\n if i >= 1 and H[i].priority > H[parent].priority:\n temporary = H[i].priority\n H[i].priority = H[parent].priority\n H[parent].priority = temporary\n\n #faz uma chamada recursvia para o próxim Pai e segue corrigindo até que chegue na \"raiz\"\n return rectifyUp(H, parent)\n\n#Funão que constroi um heap a patir de qualquer vetor \ndef buildHeap(build_heap):\n #pega o primeiro elemento que possui filhos e aplica o algoritmo de corrigir desecendo\n leng = int((len(build_heap)/2)-1)\n while leng >= 0:\n #a partir do primeiro elemento que possui filhos\n #aplica o corrige descendo até o início do vetor \n rectifyDown(BH, leng)\n #decrementa até chegar na posição 0 do vetor \n leng = leng -1\n\n#Função insere elementos em um heap\ndef insert(H, value):\n #pega o tamanho do vetor para se o índice do novo elemento\n tam = len(H)\n #adiciona o elemento no final, primeira posição disponível\n H.append(HeapElements(value, tam))\n #a partir a últim a posição corrige o vetor subindo até a raiz\n return rectifyUp(H, tam)\n\n\n#Função para remover um elemento do heap\ndef remove(H):\n #cria o elemento que vai ser retornado\n x = None\n leng = len(H)\n #faz a troca do primeir com o último elemento\n #no final corrige o vetor descendo\n if leng >= 1:\n x = H[0]\n H[0] = H[leng -1]\n H[0].index = 0\n H.pop(leng-1)\n rectifyUp(H, 0)\n #retorno o elemento removido\n return x\n#Função que altera a prioridade de um elemento na heap\ndef changeHeap(H, i, k):\n aux = H[i].priority\n #seta a nova prioridade no elemento H[i] \n H[i].priority = k\n\n #se a prioridade anterior do elemento for menor, corrige subindo\n if aux < k: return rectifyUp(H, i)\n\n #se a prioridade anterior do elemento for maior, corrige descendo\n if aux > k: return rectifyDown(H, i)\n\n#Simples função para criar uma fila de prioridades com o vetor de prioridade e os objetos da classe HeapElements\ndef createPriorityQueue(queue, vector):\n index = 0\n for priority in queue:\n element = HeapElements(priority, index)\n vector.append(element)\n index=index+1\n\ncreatePriorityQueue(PRIORITY_LIST, H)\n\ncreatePriorityQueue(BUILD_HEAP, BH)\n\n#Imprime a fila de prioridades\nprintH(\"Lista Inicial\", H)\n\n\n#Corrige descendo a fila de porioridades com a alteração do elemento\n#na posição 0. Era 30 na H[0], foi substituido pra 12.\n#Agora a posição H[0] = 12\n#rectifyDown(H, 0)\n\n#Imprime a fila corrigida.\n#O elemento 12 agora está na posição H[9]\nprint(\"__________________\")\n#printH(\"Corrige Descendo\", H)\n\n#Corrige subindo a fila de porioridades com a alteração do elemento\n#O elemento 45 foi adicionando na posição 8\n#Após a correção subndo o elemnento 45 vai para sua posição corre H[0]\n#rectifyUp(H, 8)\n\n#Imprime lista após a correção\n#printH(\"Corrige Descendo\", H)\n\n#Lista que será convertida em heap\n#printH(\"Lista para construir o Heap\", BH)\n\n#Função que transforma o vetor em Heap\n#buildHeap(BH)\n#Lista após a transformação em heap\n#printH(\"Construindo Heap\", BH)\n\n#Inserindo um elemento novo no heap\n#insert(H, 79);\n\n#Imprimindo a heap após a inserção, \n#o elemento novo, 79, foi para a pósição H[0], posição com maior prioridade\n#printH(\"Inserindo elemento no Heap\", H)\n\n#Remove o elemento de maior prioridade\n#print(remove(H))\n\n#printH(\"Removendo um elemento do heap\", H)\n\n#Alterando a prioridade do elemento na posição H[0] de 30 para 2\n#changeHeap(H, 0, 2)\n#Imprimindo a lista após a alteração\n#printH(\"Alterando um elemento da Heap\", H)\n\n#FIM HEAP E SUAS OPERAÇÕES","repo_name":"gleydsonbrito/projeto-analise-algoritmo","sub_path":"AtividadePAA/heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"32365358416","text":"from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', 'trucks.views.home', name='home'),\n url(r'^about/$', 'trucks.views.about', name='about'),\n)\n","repo_name":"colin2328/ginger","sub_path":"trucks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"72839197686","text":"from uuid import UUID\nfrom fastapi import APIRouter, Depends, Body\nfrom sqlalchemy.orm.attributes import flag_modified\nfrom ert_storage.database import Session, get_db\nfrom ert_storage import database_schema as ds, json_schema as js\nfrom ert_storage.json_schema.prior import (\n PriorConst,\n PriorTrig,\n PriorNormal,\n PriorLogNormal,\n PriorErtTruncNormal,\n PriorStdNormal,\n PriorUniform,\n PriorErtDUniform,\n PriorLogUniform,\n PriorErtErf,\n PriorErtDErf,\n)\nfrom typing import Any, Mapping, List, Type\n\n\nrouter = APIRouter(tags=[\"experiment\"])\n\n\n@router.get(\"/experiments\", response_model=List[js.ExperimentOut])\ndef get_experiments(\n *,\n db: Session = Depends(get_db),\n) -> List[js.ExperimentOut]:\n experiments = db.query(ds.Experiment).all()\n return [_experiment_from_db(exp) for exp in experiments]\n\n\n@router.get(\"/experiments/{experiment_id}\", response_model=js.ExperimentOut)\ndef get_experiment_by_id(\n *, db: Session = Depends(get_db), experiment_id: UUID\n) -> js.ExperimentOut:\n experiment = db.query(ds.Experiment).filter_by(id=experiment_id).one()\n return _experiment_from_db(experiment)\n\n\n@router.post(\"/experiments\", response_model=js.ExperimentOut)\ndef post_experiments(\n *,\n db: Session = Depends(get_db),\n ens_in: js.ExperimentIn,\n) -> js.ExperimentOut:\n experiment = ds.Experiment(name=ens_in.name)\n\n if ens_in.priors:\n db.add_all(\n ds.Prior(\n function=ds.PriorFunction.__members__[prior.function],\n experiment=experiment,\n name=name,\n argument_names=[x[0] for x in prior if isinstance(x[1], (float, int))],\n argument_values=[x[1] for x in prior if isinstance(x[1], (float, int))],\n )\n for name, prior in ens_in.priors.items()\n )\n\n db.add(experiment)\n db.commit()\n return _experiment_from_db(experiment)\n\n\n@router.get(\n \"/experiments/{experiment_id}/ensembles\", response_model=List[js.EnsembleOut]\n)\ndef get_experiment_ensembles(\n *, db: Session = Depends(get_db), experiment_id: UUID\n) -> List[ds.Ensemble]:\n return db.query(ds.Ensemble).join(ds.Experiment).filter_by(id=experiment_id).all()\n\n\n@router.put(\"/experiments/{experiment_id}/userdata\")\nasync def replace_experiment_userdata(\n *,\n db: Session = Depends(get_db),\n experiment_id: UUID,\n body: Any = Body(...),\n) -> None:\n \"\"\"\n Assign new userdata json\n \"\"\"\n experiment = db.query(ds.Experiment).filter_by(id=experiment_id).one()\n experiment.userdata = body\n db.commit()\n\n\n@router.patch(\"/experiments/{experiment_id}/userdata\")\nasync def patch_experiment_userdata(\n *,\n db: Session = Depends(get_db),\n experiment_id: UUID,\n body: Any = Body(...),\n) -> None:\n \"\"\"\n Update userdata json\n \"\"\"\n experiment = db.query(ds.Experiment).filter_by(id=experiment_id).one()\n experiment.userdata.update(body)\n flag_modified(experiment, \"userdata\")\n db.commit()\n\n\n@router.get(\"/experiments/{experiment_id}/userdata\", response_model=Mapping[str, Any])\nasync def get_experiment_userdata(\n *,\n db: Session = Depends(get_db),\n experiment_id: UUID,\n) -> Mapping[str, Any]:\n \"\"\"\n Get userdata json\n \"\"\"\n experiment = db.query(ds.Experiment).filter_by(id=experiment_id).one()\n return experiment.userdata\n\n\n@router.delete(\"/experiments/{experiment_id}\")\ndef delete_experiment(*, db: Session = Depends(get_db), experiment_id: UUID) -> None:\n experiment = db.query(ds.Experiment).filter_by(id=experiment_id).one()\n db.delete(experiment)\n db.commit()\n\n\nPRIOR_FUNCTION_TO_PYDANTIC: Mapping[ds.PriorFunction, Type[js.Prior]] = {\n ds.PriorFunction.const: PriorConst,\n ds.PriorFunction.trig: PriorTrig,\n ds.PriorFunction.normal: PriorNormal,\n ds.PriorFunction.lognormal: PriorLogNormal,\n ds.PriorFunction.ert_truncnormal: PriorErtTruncNormal,\n ds.PriorFunction.stdnormal: PriorStdNormal,\n ds.PriorFunction.uniform: PriorUniform,\n ds.PriorFunction.ert_duniform: PriorErtDUniform,\n ds.PriorFunction.loguniform: PriorLogUniform,\n ds.PriorFunction.ert_erf: PriorErtErf,\n ds.PriorFunction.ert_derf: PriorErtDErf,\n}\n\n\ndef prior_to_dict(prior: ds.Prior) -> dict:\n return (\n PRIOR_FUNCTION_TO_PYDANTIC[prior.function]\n .parse_obj(\n {key: val for key, val in zip(prior.argument_names, prior.argument_values)}\n )\n .dict()\n )\n\n\ndef experiment_priors_to_dict(experiment: ds.Experiment) -> Mapping[str, dict]:\n return {p.name: prior_to_dict(p) for p in experiment.priors}\n\n\ndef _experiment_from_db(exp: ds.Experiment) -> js.ExperimentOut:\n return js.ExperimentOut(\n id=exp.id,\n name=exp.name,\n ensemble_ids=exp.ensemble_ids,\n priors=experiment_priors_to_dict(exp),\n userdata=exp.userdata,\n )\n","repo_name":"equinor/ert-storage","sub_path":"src/ert_storage/endpoints/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":4847,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"}
+{"seq_id":"30265622366","text":"n=int(input())\r\r\na=list(map(int,input().split()))\r\r\nans=0\r\r\n\r\r\nfor i in range(n):\r\r\n for j in range(i,n):\r\r\n ans=max(ans,a[i:j+1].count(0)+a[:i].count(1)+a[j+1:].count(1))\r\r\n # print(\"ans\", ans ,\"i\",i,\"j\",j)\r\r\n\r\r\nprint(ans)\r\r\n","repo_name":"utkarsh-dubey/Codeforces","sub_path":"327A.py","file_name":"327A.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"38189022740","text":"## Imports\nfrom __future__ import print_function\nimport sys\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SparkSession\n\nimport math\n \n## Module Constants\nAPP_NAME = \"My Spark Application\"\n \n## Closure Functions\ndef stdDev(sumX, sumSquared, n):\n mean = sumX / n\n stdDeviation = math.sqrt((sumSquared - n*mean*mean) / n)\n return(mean, stdDeviation)\n\n## Main functionalitya\n \ndef main(sc):\n pass\n \nif __name__ == \"__main__\":\n \t# create an instance of a SparkSession as spark\n spark = SparkSession.builder.appName(\"wordcount\").getOrCreate()\n\n # create SparkContext as sc\n sc = spark.sparkContext\n\n data = [(\"A\", 2.), (\"A\", 4.), (\"A\", 9.), (\"B\", 10.), (\"B\", 20.), (\"Z\", 3.), (\"Z\", 5.), (\"Z\", 8.), (\"Z\", 12.)]\n\n rdd = sc.parallelize(data)\n print(rdd.collect())\n print(rdd.count())\n\n # mean and standard deviation\n sumCount = rdd.combineByKey(lambda value: (value, value*value, 1),\n lambda x, value: (x[0] + value, x[1] + value*value, x[2]+1),\n lambda x, y: (x[0] + y[0], x[1] + y[1], x[2]+y[2])\n )\n print(sumCount.collect())\n\n meanAndStdDev = sumCount.mapValues(lambda x: stdDev(x[0], x[1], x[2]))\n print(meanAndStdDev.collect())\n\n # done!\n spark.stop()\n","repo_name":"laylalaisy/LearningNote_Spark","sub_path":"9_combine_by_key/combineByKey2.py","file_name":"combineByKey2.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"10520656450","text":"import turtle as t\nimport random\n\n\n#Random walk\n\ntorti=t.Turtle()\nt.colormode(255)\ntorti.shape(\"turtle\")\n\ndef colorAleatorio():\n r=random.randint(0,255)\n g=random.randint(0,255)\n b=random.randint(0,255)\n color=(r,g,b)\n return color\n\ndirecciones=[0,90,180,270]\ntorti.pensize(10)\ntorti.speed(\"fastest\")\nfor i in range(150):\n torti.color(colorAleatorio())\n torti.forward(30)\n torti.setheading(random.choice(direcciones))\n\npantalla=t.Screen()\npantalla.exitonclick()","repo_name":"UlisesJuarez/Juegos_con_turtle","sub_path":"Inicios turtle graphics/ejercicios/reto2.py","file_name":"reto2.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"2624957686","text":"\"\"\"\nContains views for DawgHouse\n\"\"\"\n\n# pylint: disable=no-member\n# pylint: disable=undefined-variable\n\nimport json\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom django.db.models import Q\nfrom Levenshtein import distance\nfrom .models import Bark, DawgHouseUser, SniffRequest, Comment\nfrom .forms import LoginForm, CustomUserCreationForm\n\n\ndef home_view(request):\n \"\"\"Shows login/signup if not authenticated otherwise timeline\"\"\"\n if request.user.is_authenticated:\n user = request.user\n friends = user.friends.all()\n barks = Bark.objects.filter(Q(user=user) | Q(user__in=friends)).order_by(\n \"-timestamp\"\n )\n\n context = {\n \"barks\": barks,\n }\n\n return render(request, \"main_page.html\", context)\n\n return render(request, \"homepage.html\")\n\n\ndef login_view(request):\n \"\"\"Handles loging form POST data\"\"\"\n form = LoginForm()\n if request.method == \"POST\":\n form = LoginForm(request.POST)\n if form.is_valid():\n user = authenticate(\n username=form.cleaned_data[\"username\"],\n password=form.cleaned_data[\"password\"],\n )\n if user is not None:\n login(request, user)\n return redirect(\"/main/\")\n \n message = \"Unrecognized Dawgtag or Password\"\n\n return render(request, \"login.html\", {\"form\": form, \"message\": message})\n\n return render(request, \"login.html\", {\"form\": form})\n\n\ndef signup_view(request):\n \"\"\"Handles signup form POST data\"\"\"\n if request.method == \"POST\":\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n messages.success(request, \"Account created successfully\")\n login(request, user)\n return redirect(\"/main/\")\n \n for field in form:\n for error in field.errors:\n messages.error(request, f\"{field.label}: {error}\")\n return redirect(\"/signup/\")\n \n\n form = CustomUserCreationForm()\n\n return render(request, \"signup.html\", {\"form\": form})\n\n\ndef logout_view(request):\n \"\"\"Logs user out\"\"\"\n logout(request)\n return redirect(\"/\")\n\n\n@login_required\ndef send_sniff_request(request, user_ID):\n \"\"\"Creates new sniff request in DB\"\"\"\n from_user = request.user\n to_user = DawgHouseUser.objects.get(id=user_ID)\n sniff_request, created = SniffRequest.objects.get_or_create(\n from_user=from_user, to_user=to_user\n )\n if created:\n messages.success(request, \"Sniff request sent successfully.\")\n return redirect(\"profile\", username=to_user.username)\n\n messages.warning(request, \"Sniff request was already sent.\")\n return redirect(\"profile\", username=to_user.username)\n\n\n@login_required\ndef accept_sniff_request(request, request_ID):\n \"\"\"Moves sniff request to friends table and deltes from sniff table\"\"\"\n sniff_request = SniffRequest.objects.get(id=request_ID)\n if sniff_request.to_user == request.user:\n sniff_request.to_user.friends.add(sniff_request.from_user)\n sniff_request.from_user.friends.add(sniff_request.to_user)\n sniff_request.delete()\n return redirect(\"home_view\")\n\n return redirect(\"home_view\")\n\n@login_required\ndef decline_sniff_request(request, request_id):\n \"\"\"Deletes sniff from table\"\"\"\n sniff_request = SniffRequest.objects.get(id=request_id)\n if sniff_request.to_user == request.user:\n sniff_request.delete()\n return redirect(\"home_view\")\n \n return redirect(\"home_view\")\n\n@login_required\ndef send_example_view(request):\n \"\"\"Sends information to DB to make new sniff request\"\"\"\n allusers = DawgHouseUser.objects.all()\n all_sniff_requests = SniffRequest.objects.all()\n\n context = {\n \"allusers\": allusers,\n \"all_sniff_requests\": all_sniff_requests,\n }\n return render(request, \"sniff_example.html\", context)\n\n\n@login_required\ndef accept_example_view(request):\n \"\"\"Renders sniff request template\"\"\"\n all_sniff_requests = SniffRequest.objects.all()\n\n context = {\n \"all_sniff_requests\": all_sniff_requests,\n }\n return render(request, \"accept_sniffs_example.html\", context)\n\n\ndef profile_view(request, username):\n \"\"\"Determines which profile template to render and renders it\"\"\"\n logged_in_user = request.user\n user = get_object_or_404(DawgHouseUser, username=username)\n friends_list = user.friends.all()\n barks = Bark.objects.filter(user=user).order_by(\"-timestamp\")\n context = {\n \"user\": user,\n \"barks\": barks,\n \"friends_list\": friends_list,\n \"logged_in_user\": logged_in_user\n }\n if user == request.user:\n return render(request, \"user_profile.html\", context)\n if request.user in user.friends.all():\n return render(request, \"friend_view.html\", context)\n\n return render(request, \"non_friend_view.html\", context)\n\n\n@login_required\ndef post_bark(request):\n \"\"\"Logic for posting a new bark\"\"\"\n if request.method == \"POST\":\n bark_content = request.POST.get(\"bark_content\")\n\n new_bark = Bark(user=request.user, content=bark_content)\n new_bark.save()\n\n return redirect(f\"/profile/{request.user.username}/\")\n\n return redirect(\"/\")\n\n@login_required\ndef home_post_bark(request):\n \"\"\"Logic for posting a bark from homepage\"\"\"\n if request.method == \"POST\":\n bark_content = request.POST.get(\"bark_content\")\n\n new_bark = Bark(user=request.user, content=bark_content)\n new_bark.save()\n\n return redirect(\"/main/\")\n\n return redirect(\"/\")\n\n@login_required\ndef delete_bark(request, id):\n \"\"\"Logic for deleting a bark\"\"\"\n post = get_object_or_404(Bark, pk=id)\n\n if request.method == \"DELETE\":\n # Check if the user has permission to delete the post\n if request.user == post.user:\n post.delete() # Delete the post\n return JsonResponse({\"success\": True})\n\n return JsonResponse({\"success\": False, \"error\": \"Permission denied\"})\n return JsonResponse({\"success\": False, \"error\": \"Invalid request method\"})\n\n\n@csrf_exempt\n@login_required\ndef repost_post(request, bark_id):\n \"\"\"Logic for reposting\"\"\"\n if request.method == \"POST\":\n # Get the original bark\n original_bark = get_object_or_404(Bark, id=bark_id)\n existing_repost = Bark.objects.filter(\n original_bark=original_bark,\n user=request.user,\n is_repost=True\n ).first()\n\n if existing_repost:\n existing_repost.delete()\n original_bark.num_howls -= 1\n original_bark.save()\n return JsonResponse({\"success\": True, \"is_repost\": False})\n # Create a new Bark instance\n new_bark = Bark(\n content=original_bark.content,\n user=request.user, # Use the currently logged-in user as the author\n is_repost=True,\n original_bark=original_bark,\n )\n original_bark.num_howls += 1\n new_bark.save()\n original_bark.save()\n\n return JsonResponse({\"success\": True, \"is_repost\":True})\n return JsonResponse({\"success\": False})\n\n\n@csrf_exempt\n@login_required\ndef edit_bark_ajax(request):\n \"\"\"Ajax logic for editing a post\"\"\"\n if request.method == \"POST\":\n data = json.loads(request.body)\n post_id = data.get(\"post_id\")\n new_content = data.get(\"new_content\")\n post = Bark.objects.filter(id=post_id).first()\n\n if (\n post and request.user == post.user\n ): # Check if the post exists and the user is the owner of the bark\n post.content = new_content\n post.save()\n return JsonResponse({\"success\": True})\n return JsonResponse({\"success\": False})\n\n\n@login_required\ndef add_comment(request, bark_id): # include bark_id here\n \"\"\"Logic for adding a comment to a post\"\"\"\n if request.method == \"POST\":\n comment_text = request.POST.get(\"comment_text\")\n user = request.user\n\n if comment_text:\n bark = Bark.objects.get(id=bark_id) # now bark_id is defined\n comment = Comment(bark=bark, name=user, body=comment_text)\n bark.num_yips = Comment.objects.filter(bark=bark).count() + 1\n comment.save()\n bark.save()\n\n return JsonResponse({\"user\": user.username, \"text\": comment_text})\n\n return JsonResponse({\"error\": \"Comment text is empty\"}, status=400)\n\n return JsonResponse({}, status=400)\n\n\n@login_required\ndef delete_comment(request, comment_id):\n \"\"\"Logic for deleting a comment\"\"\"\n try:\n comment = Comment.objects.get(id=comment_id)\n\n # Check if the user is the owner of the comment or the owner of the Bark.\n if request.user in (comment.name, comment.bark.user):\n comment.delete()\n\n bark = comment.bark\n bark.num_yips = Comment.objects.filter(bark=bark).count()\n bark.save()\n\n return JsonResponse({\"success\": True})\n\n return JsonResponse({\"success\": False, \"error\": \"Permission denied\"})\n except Comment.DoesNotExist:\n return JsonResponse({\"success\": False, \"error\": \"Comment not found\"})\n\n\n@login_required\ndef give_treat(request, bark_id, user_which, return_to):\n \"\"\"Logic for liking a post\"\"\"\n bark = get_object_or_404(Bark, id=bark_id)\n user = request.user\n\n if user in bark.treated_by.all():\n bark.num_likes -= 1\n bark.treated_by.remove(user)\n else:\n bark.num_likes += 1\n bark.treated_by.add(user)\n\n bark.save()\n\n if return_to == \"main_timeline\":\n return redirect(\"main_timeline\")\n if return_to == \"profile\":\n return redirect(f\"/profile/{user_which}/\")\n \n return redirect(f\"/profile/{return_to}/\")\n\n\n@method_decorator(csrf_exempt, name=\"dispatch\")\ndef edit_bio_ajax(request):\n \"\"\"Ajax logic for editing a bio\"\"\"\n if request.method == \"POST\":\n data = json.loads(request.body)\n new_bio = data.get(\"bio\")\n request.user.bio = new_bio\n request.user.save()\n return JsonResponse({\"success\": True})\n return JsonResponse({\"success\": False})\n\n\ndef search_users(request):\n \"\"\"Fuzzy username matching alogorithm\"\"\"\n if request.method == \"POST\":\n username = request.POST.get(\"username\", None)\n if username:\n users = DawgHouseUser.objects.all()\n similar_users = []\n for user in users:\n dist = distance(username, user.username)\n username_length = len(username)\n similarity_ratio = (\n 1 - dist / max(username_length, len(user.username))\n ) * 100\n if similarity_ratio >= 60:\n similar_users.append(user)\n return render(\n request, \"search_results.html\", {\"similar_users\": similar_users}\n )\n return render(request, \"search_users.html\")\n\n\n@login_required\ndef main_timeline(request):\n \"\"\"Retrieves all posts to display and renders homepage template\"\"\"\n user = request.user\n friends = user.friends.all()\n barks = Bark.objects.filter(Q(user=user) | Q(user__in=friends)).order_by(\n \"-timestamp\"\n )\n\n for friend in friends:\n print(friend.username)\n\n context = {\n \"barks\": barks,\n }\n return render(request, \"main_page.html\", context)\n\n@login_required\ndef change_profile_picture(request, picture_path):\n \"\"\"Sets 'profile_picture' in DB to static image path\"\"\"\n request.user.profile_picture = picture_path\n request.user.save()\n \n return redirect(f\"/profile/{request.user.username}\")\n","repo_name":"Ragnarok9401/intro_software_eng_jf1774","sub_path":"DawgHouse/homepage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"18136794272","text":"from game.players import BasePokerPlayer\nfrom agents.model import DQN, ConvNet, DDPG\nfrom .utils import *\nimport numpy as np\nimport torch\nimport random as rand\nimport copy\n\nround_map = {\"preflop\": 0, \"flop\": 1, \"turn\": 2, \"river\": 3}\nsuits = list(Card.SUIT_MAP.keys())\nranks = list(Card.RANK_MAP.keys())\n\ndef gen_cards_im(cards):\n a = torch.zeros(4, 13)\n for i, card in enumerate(cards):\n s = suits.index(card.suit)\n r = ranks.index(card.rank)\n a[s][r] = 1\n return torch.nn.functional.pad(a, (2, 2, 6, 7))\n\nclass DDPGPlayer(BasePokerPlayer):\n def __init__(self, do_train=True, model_path=\"./DDPG\", batch_size=128, capacity=5000, device=\"cuda:4\"):\n self.do_train = do_train\n self.model_path = model_path\n self.cache = []\n if self.do_train:\n self.model = DDPG(self.model_path, batch_size, capacity, c=device)\n else:\n self.model = torch.load(self.model_path, map_location=torch.device('cpu'))\n self.model.actor = self.model.actor.to(device)\n self.model.act_target = self.model.act_target.to(device)\n self.model.critic = self.model.critic.to(device)\n self.model.cri_target = self.model.cri_target.to(device)\n self.model.c = device\n \n self.watch = ConvNet().to(\"cpu\")\n self.watch.load_state_dict(torch.load(\"./embedding/model-999.pt\"))\n self.step = 0\n self.update_step = 5000\n self.last_image = None\n self.last_features = None\n self.last_action = None\n \n self.CHE = 0\n \n #self.model = torch.load(model_path, map_location=lambda storage, loc: storage)\n\n def declare_action(self, valid_actions, hole_card, round_state):\n #print(self.step)\n community_card = round_state[\"community_card\"]\n hole_card = gen_cards(hole_card)\n community_card = gen_cards(community_card)\n hc = gen_cards_im(hole_card)\n cc = gen_cards_im(community_card)\n un = hc + cc\n img = torch.stack([hc, cc, un])\n with torch.no_grad():\n wr = self.watch(img)\n #wr = estimate_hole_card_win_rate(nb_simulation=5000, nb_player=2, hole_card=hole_card, community_card=community_card)\n #print(wr)\n features = [round_state['pot']['main']['amount'], round_state['small_blind_pos'], round_state['big_blind_pos'], round_state['dealer_btn'], round_state['next_player'], round_state['round_count'], wr]\n features.extend([s['stack'] for s in round_state['seats'] if s['uuid'] == self.uuid])\n features.extend([s['stack'] for s in round_state['seats'] if s['uuid'] != self.uuid])\n features.extend([0 if i != round_map[round_state['street']] else 1 for i in range(4)])\n features.append([s['stack'] for s in round_state['seats'] if s['uuid'] == self.uuid][0] - self.street_stack)\n features.append([s['stack'] for s in round_state['seats'] if s['uuid'] == self.uuid][0] - self.round_stack)\n features.append(round_state['pot']['main']['amount'] - [s['stack'] for s in round_state['seats'] if s['uuid'] == self.uuid][0] + self.round_stack)\n features = torch.Tensor(features)\n \n \n money = float(self.model.choose_action(img, features, valid_actions))\n \n if valid_actions[2][\"amount\"][\"max\"] == -1:\n if money < -10:\n action = valid_actions[0][\"action\"]\n amount = valid_actions[0][\"amount\"]\n else:\n action, amount = valid_actions[1]['action'], valid_actions[1]['amount']\n \n elif money < -10:\n action = valid_actions[0][\"action\"]\n amount = valid_actions[0][\"amount\"]\n elif money <= valid_actions[1][\"amount\"]:\n action = valid_actions[1][\"action\"]\n amount = valid_actions[1][\"amount\"]\n else:\n action = valid_actions[2][\"action\"]\n amount = min(max(money, valid_actions[2][\"amount\"][\"min\"]), valid_actions[2][\"amount\"][\"max\"])\n \n if self.do_train:\n if self.last_image != None:\n self.cache.append([self.last_image, self.last_features, self.last_action, img, features])\n self.last_image = copy.deepcopy(img)\n self.last_features = copy.deepcopy(features)\n self.last_action = copy.deepcopy(amount) if money > 0 else copy.deepcopy(money)\n #print(action, amount, money)\n \n if self.step > self.update_step:\n if self.CHE == 0:\n print(\"-----START_LEARNING-------\")\n self.CHE = 1\n self.model.learn()\n \n self.step += 1\n \n return action, amount\n\n def receive_game_start_message(self, game_info):\n self.start_stack = game_info[\"rule\"][\"initial_stack\"]\n\n def receive_round_start_message(self, round_count, hole_card, seats):\n self.round_stack = [s['stack'] for s in seats if s['uuid'] == self.uuid][0]\n\n def receive_street_start_message(self, street, round_state):\n self.street_stack = [s['stack'] for s in round_state['seats'] if s['uuid'] == self.uuid][0]\n\n def receive_game_update_message(self, new_action, round_state):\n pass\n\n def receive_round_result_message(self, winners, hand_info, round_state):\n# print(\"WWW\")\n if self.do_train:\n if self.last_image != None:\n reward = [s['stack'] for s in round_state['seats'] if s['uuid'] == self.uuid][0] - self.round_stack\n# print(reward)\n# print(winners)\n# print(hand_info)\n reward = (2 * (reward >= 0) - 1) * np.log(1 + abs(reward))\n #print(reward)\n for C in self.cache:\n self.model.store_memory(C[0], C[1], C[2], reward, C[3], C[4])\n self.model.store_memory(self.last_image, self.last_features, self.last_action, reward, self.last_image, self.last_features)\n self.cache = []\n self.last_image = None\n self.last_features = None\n self.last_action = None\n\n\ndef setup_ai():\n return DDPGPlayer()\n\n","repo_name":"yahcreepers/FAI-Final_Project","sub_path":"agents/DDPG.py","file_name":"DDPG.py","file_ext":"py","file_size_in_byte":6191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"39862057484","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"size_effect_normalization\",\n version=\"1.0\",\n author=\"Mathias Gotsmy\",\n author_email=\"mathias.gotsmy@univie.ac.at\",\n description=\"A python package for size effect normalization in time series metabolome data sets.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Gotsmy/sweat_normalization\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNUv3 License\",\n \"Operating System :: OS Independent\",\n ],\n package_dir={\"\": \"src\"},\n packages=setuptools.find_packages(where=\"src\"),\n python_requires=\">=3.7,<3.8\",\n include_package_data=True,\n package_data={'': ['data/*.csv']},\n install_requires=['importlib-metadata==3.7.3',\n 'matplotlib==3.3.4',\n 'numpy==1.20.1',\n 'pandas==1.3.0',\n 'pickleshare==0.7.5',\n 'scipy==1.7.0',\n 'tqdm==4.50.0',]\n)\n","repo_name":"Gotsmy/sweat_normalization","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"20140645210","text":"from datetime import datetime as dt\n\nfrom common.logger import get_logger\nfrom orchestrator.config import ORDER_EXPIRATION_THRESHOLD_IN_MINUTES\nfrom orchestrator.order_status import OrderStatus\n\nlogger = get_logger(__name__)\n\n\nclass TransactionHistoryDAO:\n def __init__(self, repo):\n self.__repo = repo\n\n def insert_transaction_history(self, obj_transaction_history):\n transaction_history = obj_transaction_history.get_transaction_history()\n query_response = self.__repo.execute(\n \"INSERT INTO transaction_history (username, order_id, order_type, status, payment_id, payment_method, \"\n \"raw_payment_data, transaction_hash, row_created, row_updated)\"\n \"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) \"\n \"ON DUPLICATE KEY UPDATE payment_id = %s, payment_method = %s, raw_payment_data = %s, transaction_hash = %s, row_updated = %s\",\n [\n transaction_history[\"username\"],\n transaction_history[\"order_id\"],\n transaction_history[\"order_type\"],\n transaction_history[\"status\"],\n transaction_history[\"payment_id\"],\n transaction_history[\"payment_method\"],\n transaction_history[\"raw_payment_data\"],\n transaction_history[\"transaction_hash\"],\n dt.utcnow(),\n dt.utcnow(),\n transaction_history[\"payment_id\"],\n transaction_history[\"payment_method\"],\n transaction_history[\"raw_payment_data\"],\n transaction_history[\"transaction_hash\"],\n dt.utcnow()\n ]\n )\n if query_response[0] == 1:\n return True\n return False\n\n def get_order_id_for_expired_transaction(self):\n params = [OrderStatus.PAYMENT_INITIATED.value, OrderStatus.PAYMENT_INITIATION_FAILED.value,\n OrderStatus.PAYMENT_EXECUTION_FAILED.value, ORDER_EXPIRATION_THRESHOLD_IN_MINUTES]\n order_id_raw_data = self.__repo.execute(\n \"SELECT order_id FROM transaction_history WHERE status IN (%s, %s, %s) AND \"\n \"TIMESTAMPDIFF(MINUTE, row_created, NOW()) > %s \",\n [OrderStatus.PAYMENT_INITIATED.value, OrderStatus.PAYMENT_INITIATION_FAILED.value,\n OrderStatus.PAYMENT_EXECUTION_FAILED.value, ORDER_EXPIRATION_THRESHOLD_IN_MINUTES])\n list_of_order_id = [rec[\"order_id\"] for rec in order_id_raw_data]\n return list_of_order_id\n\n def update_transaction_status(self, list_of_order_id, status):\n if len(list_of_order_id) == 0:\n return \"No order id found\"\n temp_holder = (\"%s, \" * len(list_of_order_id))[:-2]\n params = [status] + list_of_order_id + [OrderStatus.PAYMENT_INITIATED.value,\n OrderStatus.PAYMENT_INITIATION_FAILED.value,\n OrderStatus.PAYMENT_EXECUTION_FAILED.value]\n update_transaction_status_response = self.__repo.execute(\n \"UPDATE transaction_history SET status = %s WHERE order_id IN (\" + temp_holder + \") AND status IN (%s, %s, %s)\",\n params)\n logger.info(f\"update_transaction_status: {update_transaction_status_response}\")\n return update_transaction_status_response\n\n def get_transaction_details_for_given_order_id(self, order_id):\n transaction_data = self.__repo.execute(\n \"SELECT username, order_id, order_type, status, payment_id, payment_type, payment_method, raw_payment_data, \"\n \"transaction_hash FROM transaction_history WHERE order_id = %s\", [order_id])\n if len(transaction_data) == 0:\n raise Exception(\"Order Id does not exist.\")\n return transaction_data[0]\n","repo_name":"singnet/snet-marketplace-service","sub_path":"orchestrator/dao/transaction_history_dao.py","file_name":"transaction_history_dao.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"76"}
+{"seq_id":"14207869115","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import filedialog\n\nglobal root\nglobal scrnwparam\nglobal scrnhparam\nscrnwparam = 185\nscrnhparam = 150\n\nimport os, sys, re, shutil\nfrom pathlib import Path\nfrom threading import Thread\nimport time\nimport datetime as datetime2\n\nfrom pdfminer.layout import LAParams, LTTextBox, LTText, LTChar, LTAnno\nfrom pdfminer.pdfpage import PDFPage\nfrom pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\nfrom pdfminer.converter import PDFPageAggregator\n\n\nglobal root\n\n\nglobal PlaceInputDir\nglobal PlaceFilesArray\nglobal PlaceInputDirSel\nglobal PlaceOutputDirSel\nglobal PlaceWorking\nglobal PlaceStartTime\n\n\n\n\n\n\ndef main():\n global root\n\n root = Tk()\n root.resizable(False, False)\n \n scrnw = (root.winfo_screenwidth()//2) - scrnwparam\n scrnh = (root.winfo_screenheight()//2) - scrnhparam\n root.geometry('375x250+{}+{}'.format(scrnw, scrnh))\n \n app = GUI(root)\n root.mainloop()\n\n\n\n\nclass GUI(Frame):\n def __init__(self, parent):\n Frame.__init__(self, parent, background=\"white\") \n self.parent = parent\n self.parent.title(\"\")\n self.pack(fill=BOTH, expand=1)\n self.initUI()\n \n def initUI(self):\n global PlaceInputDirLbl\n PlaceInputDirLbl = Label(text=\"Выберите файлы на сортировку:\", background=\"white\", font=(\"Arial\", 10))\n \n global PlaceInputDirEntry\n PlaceInputDirEntry = Entry(fg=\"black\", bg=\"white\", width=20)\n PlaceInputDirEntry.configure(state = DISABLED)\n \n global PlaceInputDirBtn\n PlaceInputDirBtn = Button(text='Выбор', command=PlaceInputDirChoose)\n \n global PlaceInputFilesCountLbl\n PlaceInputFilesCountLbl = Label(text=\"\", background=\"white\")\n \n \n global PlaceOutputDirLbl\n PlaceOutputDirLbl = Label(text=\"Выберите папку для отсортированных:\", background=\"white\", font=(\"Arial\", 10))\n \n global PlaceOutputDirEntry\n PlaceOutputDirEntry = Entry(fg=\"black\", bg=\"white\", width=20)\n PlaceOutputDirEntry.configure(state = DISABLED)\n \n global PlaceOutputDirBtn\n PlaceOutputDirBtn = Button(text='Выбор', command=PlaceOutputDirChoose)\n \n \n global PlaceStartBtn\n PlaceStartBtn = Button(text='Запуск', command=PlaceStart)\n PlaceStartBtn.configure(state = DISABLED)\n \n global PlaceStatusLbl\n PlaceStatusLbl = Label(text=\"\", background=\"white\")\n \n global PlaceTimeLbl\n PlaceTimeLbl = Label(text=\"\", background=\"white\")\n\n\n PlaceInputDirLbl.place (x=16, y=7+40)\n PlaceInputDirEntry.place (x=20, y=30+40, width=275)\n PlaceInputDirBtn.place (x=305, y=29+40, height=20)\n PlaceInputFilesCountLbl.place (x=16, y=49+40)\n PlaceOutputDirLbl.place (x=17, y=87+35)\n PlaceOutputDirEntry.place (x=75, y=111+35, width=275)\n PlaceOutputDirBtn.place (x=20, y=110+35, height=20)\n PlaceStartBtn.place (x=20, y=190, width=85)\n PlaceStatusLbl.place (x=120, y=183)\n PlaceTimeLbl.place (x=120, y=200)\n \n \n global PlaceInputDir\n global PlaceFilesArray\n global PlaceInputDirSel\n global PlaceOutputDirSel\n global PlaceWorking\n global PlaceStartTime\n \n PlaceInputDir = \"\"\n PlaceFilesArray = []\n \n PlaceInputDirSel = False\n PlaceOutputDirSel = False\n \n PlaceWorking = False\n PlaceStartTime = \"\"\n\n\n\n\n\ndef PlaceInputDirChoose():\n global PlaceInputDir\n\n PlaceInputDir = filedialog.askdirectory(title=\"Выберите папку на сортировку\")\n \n if PlaceInputDir:\n PlaceInputDirEntry.configure(state = NORMAL)\n PlaceInputDirEntry.delete(0,END)\n PlaceInputDirEntry.insert(0,str(Path(PlaceInputDir).name))\n PlaceInputDirEntry.configure(state = DISABLED)\n \n print('PlaceInputDir:', PlaceInputDir)\n \n PlaceInputDirCheck()\n else:\n print('PlaceInputDir is NOT defined')\n \n\ndef PlaceInputDirCheck():\n global PlaceInputDir\n global PlaceFilesArray\n global PlaceInputDirSel\n global PlaceOutputDirSel\n\n PlaceFilesArray.clear()\n for file in os.listdir(PlaceInputDir):\n if file.endswith(\".pdf\"):\n PlaceFilesArray.append(os.path.join(PlaceInputDir, file))\n \n if len(PlaceFilesArray) == 0:\n PlaceInputFilesCountLbl.config(text = 'Нет файлов PDF !')\n print('no pdf in folder !')\n PlaceInputDirSel = False\n else:\n PlaceInputFilesCountLbl.config(text = 'Количество файлов PDF: ' + str(len(PlaceFilesArray)))\n print('number of valid files -', str(len(PlaceFilesArray)))\n PlaceInputDirSel = True\n\n if PlaceInputDirSel and PlaceOutputDirSel:\n PlaceStartBtn.configure(state = NORMAL)\n else:\n PlaceStartBtn.configure(state = DISABLED)\n\n\ndef PlaceOutputDirChoose():\n global PlaceOutputDir\n global PlaceInputDirSel\n global PlaceOutputDirSel\n \n PlaceOutputDir = filedialog.askdirectory(title=\"Выберите папку для отсортированных\")\n if PlaceInputDir:\n PlaceOutputDirEntry.configure(state = NORMAL)\n PlaceOutputDirEntry.delete(0,END)\n PlaceOutputDirEntry.insert(0,str(Path(PlaceOutputDir).name))\n PlaceOutputDirEntry.configure(state = DISABLED)\n \n PlaceOutputDirSel = True\n print('PlaceOutputDir:', PlaceOutputDir)\n else:\n print('PlaceInputDir is NOT defined')\n \n \n if PlaceInputDirSel and PlaceOutputDirSel:\n PlaceStartBtn.configure(state = NORMAL)\n else:\n PlaceStartBtn.configure(state = DISABLED)\n\n\ndef PlaceStart():\n\n PlaceMainThreadthread = Thread(target=PlaceMainThread)\n PlaceMainThreadthread.start()\n timethread = Thread(target=PlaceTimeUpdater)\n timethread.start()\n\n\ndef PlaceMainThread():\n global PlaceWorking\n global PlaceStartTime\n\n global PlaceInputDir\n global PlaceOutputDir\n global PlaceFilesArray\n \n PlaceStartTime = time.time()\n PlaceWorking = True\n PlaceBlockGUI(True)\n \n for f in range(len(PlaceFilesArray)):\n print(\"_________________________\")\n print('Документ №{0}: {1}'.format(f+1, Path(PlaceFilesArray[f]).name))\n \n invoicedata = PlaceFileTextSearch(PlaceFilesArray[f])\n if isinstance(invoicedata, list):\n statustext = \"Обработка {0} из {1}\".format(f, len(PlaceFilesArray))\n PlaceStatusLbl.config(text = str(statustext))\n print('ИНН, КПП документа: {0}'.format(invoicedata))\n \n fileoutputdir = Path(PlaceOutputDir, invoicedata[1])\n fileoutputdirexist = os.path.exists(fileoutputdir)\n print('fileoutputdir: {0}, exists: {1}'.format(fileoutputdir, fileoutputdirexist))\n \n if not fileoutputdirexist:\n os.makedirs(fileoutputdir)\n \n fileoutputpath = Path(fileoutputdir, Path(PlaceFilesArray[f]).name).as_posix()\n shutil.move(PlaceFilesArray[f], fileoutputpath)\n \n \n else:\n print('Не найдено ИНН, КПП !')\n msgbxlbl = ['В документе не найдено ИНН, КПП !', '{0}'.format(PlaceFilesArray[f])]\n messagebox.showerror(\"\", \"\\n\".join(msgbxlbl))\n \n \n PlaceStatusLbl.config(text = \"Обработка завершена !\")\n PlaceWorking = False\n PlaceBlockGUI(False)\n PlaceInputDirCheck()\n\n\ndef PlaceFileTextSearch(file):\n\n with open(file, 'rb') as pdftomine:\n manager = PDFResourceManager()\n laparams = LAParams()\n dev = PDFPageAggregator(manager, laparams=laparams)\n interpreter = PDFPageInterpreter(manager, dev)\n pages = PDFPage.get_pages(pdftomine)\n\n for pagenumber, page in enumerate(pages):\n if pagenumber == 0:\n interpreter.process_page(page)\n layout = dev.get_result()\n \n for textbox in layout:\n if isinstance(textbox, LTText):\n for line in textbox:\n text = line.get_text().replace('\\n', '')\n if len(text) == 22 or len(text) == 20:\n invoiceinn = re.sub(\"[^0-9]\", \"\", (text.partition(\"/\")[0]))\n invoicekpp = re.sub(\"[^0-9]\", \"\", (text.partition(\"/\")[2]))\n if invoiceinn.isnumeric() and invoicekpp.isnumeric():\n if len(invoiceinn)==10 and len(invoicekpp)==9:\n #print(\"_________________________\")\n #print('ИНН документа: {0}'.format(invoiceinn))\n #print('КПП документа: {0}'.format(invoicekpp))\n invoicedata = [invoiceinn, invoicekpp]\n return invoicedata\n break\n return \"NONE\"\n\n\ndef PlaceBlockGUI(yes):\n if yes:\n PlaceInputDirBtn.configure(state = DISABLED)\n PlaceOutputDirBtn.configure(state = DISABLED)\n PlaceStartBtn.configure(state = DISABLED)\n else:\n PlaceInputDirBtn.configure(state = NORMAL)\n PlaceOutputDirBtn.configure(state = NORMAL)\n PlaceStartBtn.configure(state = NORMAL)\n\n\ndef PlaceTimeUpdater():\n global PlaceWorking\n global PlaceStartTime\n\n time.sleep(0.01)\n while PlaceWorking:\n CreateDocTime = time.time()\n result = CreateDocTime - PlaceStartTime\n result = datetime2.timedelta(seconds=round(result))\n PlaceTimeLbl.config(text = str(result))\n time.sleep(0.01)\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"albertkovach/Python","sub_path":"templates/pdf/storesort.py","file_name":"storesort.py","file_ext":"py","file_size_in_byte":10149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"73123422004","text":"from typing import List\nfrom project.animal import Animal\nfrom project.worker import Worker\n\n\nclass Zoo:\n def __init__(self, name: str, budget: int, animal_capacity: int, workers_capacity: int):\n self.name = name\n self.budget = budget\n self.animal_capacity = animal_capacity\n self.workers_capacity = workers_capacity\n self.animals = []\n self.workers = []\n\n @property\n def budget(self):\n return self.__budget\n \n @budget.setter\n def budget(self, value):\n self.__budget = value\n \n @property\n def animal_capacity(self):\n return self.__animal_capacity\n \n @animal_capacity.setter\n def animal_capacity(self, value):\n self.__animal_capacity = value\n \n @property\n def workers_capacity(self):\n return self.__workers_capacity\n \n @workers_capacity.setter\n def workers_capacity(self, value):\n self.__workers_capacity = value\n\n def add_animal(self, animal: Animal, price: [int, float]):\n if self.__budget >= price and self.__animal_capacity > len(self.animals):\n self.animals.append(animal)\n self.__budget -= price\n return f\"{animal.name} the {animal.__class__.__name__} added to the zoo\"\n if self.__budget < price and self.__animal_capacity > len(self.animals):\n return \"Not enough budget\"\n if self.__budget >= price and self.__animal_capacity == len(self.animals):\n return \"Not enough space for animal\"\n\n def hire_worker(self, worker: Worker):\n if self.__workers_capacity > len(self.workers):\n self.workers.append(worker)\n return f\"{worker.name} the {worker.__class__.__name__} hired successfully\"\n return \"Not enough space for worker\"\n\n def fire_worker(self, worker: str):\n for w in self.workers:\n if w.name == worker:\n self.workers.remove(w)\n return f\"{worker} fired successfully\"\n return f\"There is no {worker} in the zoo\"\n\n def pay_workers(self):\n if sum([w.salary for w in self.workers]) <= self.__budget:\n self.__budget -= sum([w.salary for w in self.workers])\n return f\"You payed your workers. They are happy. Budget left: {self.__budget}\"\n return \"You have no budget to pay your workers. They are unhappy\"\n\n def tend_animals(self):\n total_money_needed = 0\n for animal in self.animals:\n total_money_needed += animal.money_for_care\n if self.__budget >= total_money_needed:\n self.__budget -= total_money_needed\n return f\"You tended all the animals. They are happy. Budget left: {self.__budget}\"\n return \"You have no budget to tend the animals. They are unhappy.\"\n\n def profit(self, amount):\n self.__budget += amount\n\n def animals_status(self):\n lions = list(filter(lambda a: a.__class__.__name__ == \"Lion\", self.animals))\n tigers = list(filter(lambda a: a.__class__.__name__ == \"Tiger\", self.animals))\n cheetahs = list(filter(lambda a: a.__class__.__name__ == \"Cheetah\", self.animals))\n\n result = [\n f\"You have {len(self.animals)} animals\",\n f\"----- {len(lions)} Lions:\",\n ]\n result.extend(lions)\n\n result.append(f\"----- {len(tigers)} Tigers:\")\n result.extend(tigers)\n\n result.append(f\"----- {len(cheetahs)} Cheetahs:\")\n result.extend(cheetahs)\n\n return \"\\n\".join(str(x) for x in result)\n\n def workers_status(self):\n info = {\"Keeper\": [], \"Caretaker\": [], \"Vet\": []}\n [info[w.__class__.__name__].append(str(w)) for w in self.workers]\n\n result = [\n f\"You have {len(self.workers)} workers\",\n f\"----- {len(info['Keeper'])} Keepers:\",\n *info['Keeper'],\n f\"----- {len(info['Caretaker'])} Caretakers:\",\n *info['Caretaker'],\n f\"----- {len(info['Vet'])} Vets:\",\n *info['Vet'],\n ]\n\n return \"\\n\".join(result)\n","repo_name":"ldmario/SoftUni-Courses","sub_path":"04. OOP with Python February 2023/Encapsulation/Exercise/01. wild_cat_zoo/project/zoo.py","file_name":"zoo.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"16243965084","text":"with open(\"day7.txt\") as f:\n lines = f.readlines()\n\nmine = \"shiny gold\"\nbag_rules = {}\nfor line in lines:\n words = line.split(\" \")\n outer = \" \".join(words[:2])\n bag_rules[outer] = []\n if \"no other bags\" not in line:\n segments = \" \".join(words[4:])\n bag_rules[outer] = []\n for segment in segments.split(\",\"):\n inner = \" \".join(segment.strip(\" \").split(\" \")[1:3])\n bag_rules[outer].append(inner)\n\ndef allowed(outer, inner):\n print(outer, inner)\n if len(bag_rules[outer]) == 0:\n return False\n elif inner in bag_rules[outer]:\n return True\n else:\n return any([allowed(key, inner) for key in bag_rules[outer]])\n\ncount = 0\nfor key in bag_rules:\n if allowed(key, mine):\n count += 1\n\nprint(count)\n","repo_name":"wplohrmann/projects","sub_path":"aoc_2020/day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"20077166727","text":"from cgi import test\nfrom selenium import webdriver\nfrom time import sleep\nclass TestEven:\n def test(self):\n self.driver = webdriver.Chrome()\n url = 'http://www.baidu.com'\n self.driver.get(url)\n self.driver.maximize_window()\n self.driver.implicitly_wait(30)\n input_x = self.driver.find_element_by_id('su')\n input_x.send_keys('测试---软件测试')\n sleep(2)\n even_x = self.driver.find_element_by_id('kw')\n even_x.click()\n sleep(2)\n print(input_x)\n print(even_x)\n self.driver.quit()\n return input_x,even_x\n \n \n# if __name__ == '__main__':\n # test()\n\n# test1 = TestEven()\n","repo_name":"Elt-wlj/learn","sub_path":"LogTest/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"}
+{"seq_id":"41054781890","text":"import base64\nimport json\nimport sys\nimport time\n\nimport serial\n\n\nclass SerialConnection(object):\n def __init__(\n self,\n in_port,\n in_baud,\n in_parity,\n in_data_bits,\n in_stop_bits,\n in_hw_ctrl,\n in_sw_ctrl,\n in_timeout=60,\n termination=\"\\r\\n\",\n ):\n super().__init__()\n self.port = in_port\n self.baud = in_baud\n self.parity = get_parity_value(in_parity)\n self.data_bits = in_data_bits\n self.stop_bits = in_stop_bits\n self.timeout = in_timeout\n self.rtscts = in_hw_ctrl\n self.xonxoff = in_sw_ctrl\n self.termination = termination\n self.cmd_read_wait = 0.1\n self.cmd_timeout = 1\n self.child = None\n self.logs = []\n\n def connect(self):\n result_dict = {\"error\": 0, \"message\": \"\"}\n try:\n self.child = serial.Serial(\n port=self.port,\n baudrate=self.baud,\n bytesize=self.data_bits,\n parity=self.parity,\n stopbits=self.stop_bits,\n timeout=self.timeout,\n xonxoff=self.xonxoff,\n rtscts=self.rtscts,\n )\n if self.child:\n result_dict[\"child\"] = self\n return_value = self.child.isOpen()\n if return_value:\n self.child.flushInput()\n self.child.flushOutput()\n\n else:\n result_dict[\"error\"] = 1\n result_dict[\"message\"] = \"child is None\"\n self.logs.append(\"Cannot open the serial connection to the device\")\n except Exception as e:\n result_dict[\"error\"] = 1\n result_dict[\"message\"] = e\n self.logs.append(str(e))\n return result_dict\n\n def disconnect(self):\n if self.child:\n self.child.close()\n\n def send_command_device(self, cmd):\n result_dict = {\"error\": 0}\n try:\n if self.child and self.child.isOpen():\n cmd_write = cmd.encode(\"ascii\") + self.termination.encode(\"ascii\")\n self.child.write(cmd_write)\n # Read the output and send it back\n msg = self.read()\n else:\n result_dict[\"error\"] = 1\n msg = \"The port is not open\"\n self.logs.append(\"Failed to open the port\")\n except Exception as e:\n result_dict[\"error\"] = 1\n msg = \"An Exception occurred while writing to port \" + self.port\n print(str(e))\n self.logs.append(str(e))\n\n result_dict[\"message\"] = msg\n\n return result_dict\n\n def send_command(self, cmd, timeout=30):\n self.cmd_timeout = timeout\n result_dict = self.connect()\n if result_dict[\"error\"] == 0:\n result_dict = self.send_command_device(cmd)\n self.disconnect()\n\n return result_dict\n\n def read(self):\n \"\"\"\n read data from serial port\n :return: read data\n \"\"\"\n time.sleep(self.cmd_read_wait) # Need to wait before reading\n output = []\n self.child.timeout = 1\n length = 1\n # Timeout\n time_spent_so_far = 0.0\n start_time = time.time()\n while length != 0 and time_spent_so_far <= self.cmd_timeout:\n msg = self.child.readline().decode(\"ascii\").strip()\n output.append(msg)\n length = len(msg)\n delta = time.time() - start_time\n time_spent_so_far = delta\n return output\n\n\n# Static methods\ndef get_parity_value(in_parity):\n parity = None\n if in_parity == 0:\n parity = serial.PARITY_NONE\n elif in_parity == 1:\n parity = serial.PARITY_ODD\n elif in_parity == 2:\n parity = serial.PARITY_EVEN\n return parity\n\n\nif __name__ == \"__main__\":\n arg_jsn_string = sys.argv[1]\n base64_bytes = arg_jsn_string.encode(\"ascii\")\n message_bytes = base64.b64decode(base64_bytes)\n message = message_bytes.decode(\"ascii\")\n arg_dict = json.loads(message)\n\n hex_string_termination = arg_dict[\"termination\"]\n in_pck_termination = \"\"\n i = 0\n while (len(hex_string_termination)) > i:\n hex_data = hex_string_termination[i : i + 2]\n hex_data = int(hex_data, 16)\n hex_data = chr(hex_data)\n in_pck_termination += hex_data\n i += 2\n # cmd\n conn = SerialConnection(\n in_port=arg_dict[\"port\"],\n in_baud=arg_dict[\"baud\"],\n in_parity=arg_dict[\"parity\"],\n in_data_bits=arg_dict[\"data_bits\"],\n in_stop_bits=arg_dict[\"stop_bits\"],\n in_hw_ctrl=arg_dict[\"rtscts\"],\n in_sw_ctrl=arg_dict[\"xonxoff\"],\n termination=in_pck_termination,\n )\n result = conn.send_command(arg_dict[\"cmd\"], arg_dict[\"timeout\"])\n print(result[\"message\"])\n","repo_name":"terragraph/terragraph-ctf","sub_path":"ctf/common/connections/serial_jumphost_api/serial_api_v1/SerialConnection_api.py","file_name":"SerialConnection_api.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"22700406307","text":"import numpy as np\n# import structlog\nfrom eliot import start_action, Message, to_file\nfrom pathlib import Path\nfrom pyqtgraph import ViewBox\nfrom PySide2.QtWidgets import QMainWindow, QFileDialog, QMessageBox\nfrom PySide2.QtCore import QObject, QThread, Signal, Slot, QMutex\nfrom . import common\nfrom .common import PlotData, UiSettings\nfrom .comp_worker import ComputationWorker\nfrom .exp_worker import ExperimentWorker\nfrom .generated_ui import Ui_MainWindow\n\n\n# logger = structlog.get_logger()\nto_file(open(\"log.txt\", \"w\"))\n\n\nclass MainWindowSignals(QObject):\n measure = Signal()\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.signals = MainWindowSignals()\n self.save_data_dir = None\n self.current_measurement = 0\n self.max_measurements = 0\n self.collecting = False\n self.time_axis = None\n self.mutex = QMutex()\n self.comp_thread = QThread()\n self.exp_thread = QThread()\n self._connect_components()\n self._set_initial_widget_states()\n self._store_line_objects()\n self._set_plot_mouse_mode()\n\n def _set_initial_widget_states(self):\n with start_action(action_type=\"_set_initial_widget_states\"):\n self.update_max_measurements(self.ui.measurements.value())\n self.ui.stop_btn.setDisabled(True)\n self.ui.reset_avg_btn.setDisabled(True)\n self.ui.save_loc.setDisabled(True)\n self.ui.save_loc_browse_btn.setDisabled(True)\n\n def _connect_components(self):\n \"\"\"Connect widgets and events to make the UI respond to user interaction.\n \"\"\"\n # Start/Stop Buttons\n self.ui.start_btn.clicked.connect(self.start_collecting)\n self.ui.stop_btn.clicked.connect(self.stop_collecting)\n # Measurement Counter\n self.ui.measurements.valueChanged.connect(self.update_max_measurements)\n # Save data controls\n self.ui.save_data_checkbox.stateChanged.connect(self.save_loc_set_state)\n self.ui.save_loc_browse_btn.clicked.connect(self.get_save_location)\n # Start/Stop point\n self.ui.stop_pt_checkbox.stateChanged.connect(self.stop_pt_set_state)\n # Dark current\n self.ui.dark_curr_checkbox.stateChanged.connect(self.dark_curr_set_state)\n\n def closeEvent(self, event):\n \"\"\"Clean up worker threads if the window is closed while collecting data.\n\n Notes\n -----\n This overrides the default closeEvent method of QMainWindow.\n \"\"\"\n with start_action(action_type=\"close\"):\n if self.collecting:\n with start_action(action_type=\"quit_threads\"):\n self.comp_thread.quit()\n self.exp_thread.quit()\n with start_action(action_type=\"wait_comp_thread\"):\n self.comp_thread.wait()\n with start_action(action_type=\"wait_exp_thread\"):\n self.exp_thread.wait()\n event.accept()\n\n def _store_line_objects(self):\n \"\"\"Store references to the lines so the data can be updated later.\n \"\"\"\n starting_data = (np.arange(100), np.zeros(100))\n self.live_par_line = self.ui.live_par_graph.plot(*starting_data)\n self.live_perp_line = self.ui.live_perp_graph.plot(*starting_data)\n self.live_ref_line = self.ui.live_ref_graph.plot(*starting_data)\n self.live_da_par_line = self.ui.live_da_par_graph.plot(*starting_data)\n self.live_da_perp_line = self.ui.live_da_perp_graph.plot(*starting_data)\n self.live_da_cd_line = self.ui.live_da_cd_graph.plot(*starting_data)\n self.avg_da_par_line = self.ui.avg_da_par_graph.plot(*starting_data)\n self.avg_da_perp_line = self.ui.avg_da_perp_graph.plot(*starting_data)\n self.avg_da_cd_line = self.ui.avg_da_cd_graph.plot(*starting_data)\n\n def _set_plot_mouse_mode(self):\n self.ui.live_par_graph.getPlotItem().getViewBox().setMouseMode(ViewBox.RectMode)\n self.ui.live_perp_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n self.ui.live_ref_graph.getPlotItem().getViewBox().setMouseMode(ViewBox.RectMode)\n self.ui.live_da_par_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n self.ui.live_da_perp_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n self.ui.live_da_cd_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n self.ui.avg_da_par_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n self.ui.avg_da_perp_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n self.ui.avg_da_cd_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n\n @Slot(np.ndarray)\n def set_time_axis(self, values):\n with start_action(action_type=\"set_time_axis\"):\n self.time_axis = values * 1e6\n\n @Slot(int)\n def update_max_measurements(self, x):\n with start_action(action_type=\"update_max_measurements\", new_max=x):\n self.max_measurements = x\n self.ui.measurement_counter_label.setText(\n f\"{self.current_measurement}/{self.max_measurements}\"\n )\n\n @Slot(int)\n def update_current_measurement(self, x):\n with start_action(action_type=\"update_current_measurement\", new_meas=x):\n self.current_measurement = x\n self.ui.measurement_counter_label.setText(\n f\"{self.current_measurement}/{self.max_measurements}\"\n )\n\n @Slot()\n def start_collecting(self):\n \"\"\"Begins collecting data when the \"Start\" button is pressed.\n \"\"\"\n with start_action(action_type=\"start_collecting\"):\n settings, should_quit = self._collect_settings()\n if should_quit:\n Message.log(should_quit=should_quit)\n return\n with start_action(action_type=\"create_workers\"):\n self.comp_worker = ComputationWorker(self.mutex, settings)\n self.exp_worker = ExperimentWorker(self.mutex, settings)\n self._connect_worker_signals()\n self.comp_worker.moveToThread(self.comp_thread)\n self.exp_worker.moveToThread(self.exp_thread)\n with start_action(action_type=\"start_threads\"):\n self.comp_thread.start()\n self.exp_thread.start()\n self.signals.measure.emit()\n Message.log(signal=\"measure\")\n self._disable_acq_controls()\n\n def _collect_settings(self):\n \"\"\"Collect all the settings from the UI.\n \"\"\"\n with start_action(action_type=\"collect_settings\"):\n settings = UiSettings()\n settings, should_quit = self._collect_meas_settings(settings)\n if should_quit:\n return settings, should_quit\n settings, should_quit = self._collect_instr_settings(settings)\n if should_quit:\n return settings, should_quit\n settings, should_quit = self._collect_save_settings(settings)\n if should_quit:\n return settings, should_quit\n settings, should_quit = self._collect_start_stop_settings(settings)\n if should_quit:\n return settings, should_quit\n settings, should_quit = self._collect_dark_curr_settings(settings)\n return settings, should_quit\n\n def _collect_dark_curr_settings(self, settings):\n with start_action(action_type=\"dark_current_settings\"):\n quit = False\n use_dark_curr = self.ui.dark_curr_checkbox.isChecked()\n Message.log(checked=use_dark_curr)\n if not use_dark_curr:\n Message.log(quit=quit)\n return settings, quit\n try:\n dark_curr_par = float(self.ui.dark_curr_par.text())\n dark_curr_perp = float(self.ui.dark_curr_perp.text())\n dark_curr_ref = float(self.ui.dark_curr_ref.text())\n settings.dark_curr_par = dark_curr_par\n settings.dark_curr_perp = dark_curr_perp\n settings.dark_curr_ref = dark_curr_ref\n except ValueError:\n quit = True\n Message.log(quit=quit)\n return settings, quit\n\n def _collect_meas_settings(self, settings):\n \"\"\"Collect the number of measurements from the UI.\n \"\"\"\n with start_action(action_type=\"measurement_settings\"):\n quit = False\n settings.num_measurements = self.ui.measurements.value()\n Message.log(quit=quit)\n return settings, quit\n\n def _collect_instr_settings(self, settings):\n \"\"\"Collect the settings from the UI related to the instrument.\n \"\"\"\n with start_action(action_type=\"instrument_settings\"):\n quit = False\n instr_name = self.ui.instr_name.text()\n Message.log(instrument_name=instr_name)\n if instr_name == \"\":\n Message.log(quit=quit)\n quit = True\n settings.instr_name = instr_name\n Message.log(quit=quit)\n return settings, quit\n\n def _collect_save_settings(self, settings):\n \"\"\"Collect the settings from the UI related to saving data.\n \"\"\"\n with start_action(action_type=\"save_data_settings\"):\n quit = False\n should_save = self.ui.save_data_checkbox.isChecked()\n Message.log(checked=should_save)\n if should_save and not self._saving_should_proceed():\n Message.log(quit=quit)\n quit = True\n settings.save = should_save\n settings.save_loc = self.ui.save_loc.text()\n Message.log(dir=settings.save_loc)\n Message.log(quit=quit)\n return settings, quit\n\n def _collect_start_stop_settings(self, settings):\n \"\"\"Collect the settings from the UI related to the Start/Stop points.\n \"\"\"\n with start_action(action_type=\"start_stop_settings\"):\n quit = False\n start_pt = self.ui.start_pt.value()\n settings.start_pt = start_pt\n Message.log(start=start_pt)\n if not self.ui.stop_pt_checkbox.isChecked():\n stop_pt = self.ui.stop_pt.value()\n settings.stop_pt = stop_pt\n if start_pt >= stop_pt:\n self._tell_start_greater_than_stop()\n quit = True\n Message.log(stop=settings.stop_pt)\n Message.log(quit=quit)\n return settings, quit\n\n def _connect_worker_signals(self):\n \"\"\"Connect signals for communication between workers and the main window.\n \"\"\"\n # Produced by the experiment worker\n self.exp_worker.signals.preamble.connect(self.comp_worker.store_preamble)\n self.exp_worker.signals.new_data.connect(self.comp_worker.compute_signals)\n self.exp_worker.signals.done.connect(self.cleanup_when_done)\n # Produced by the computation worker\n self.comp_worker.signals.time_axis.connect(self.set_time_axis)\n self.comp_worker.signals.new_data.connect(self.update_plots)\n self.comp_worker.signals.meas_num.connect(self.update_current_measurement)\n # Produced by the main window\n self.signals.measure.connect(self.exp_worker.measure)\n self.ui.reset_avg_btn.clicked.connect(self.comp_worker.reset_averages)\n\n def _disable_acq_controls(self):\n \"\"\"Disable certain controls while collecting data.\n \"\"\"\n # Disabled\n self.ui.start_btn.setDisabled(True)\n self.ui.instr_name.setDisabled(True)\n self.ui.measurements.setDisabled(True)\n self.ui.save_data_checkbox.setDisabled(True)\n self.ui.save_loc.setDisabled(True)\n self.ui.save_loc_browse_btn.setDisabled(True)\n self.ui.start_pt.setDisabled(True)\n self.ui.stop_pt.setDisabled(True)\n self.ui.stop_pt_checkbox.setDisabled(True)\n # Enabled\n self.ui.stop_btn.setEnabled(True)\n self.ui.reset_avg_btn.setEnabled(True)\n\n def _enable_acq_controls(self):\n \"\"\"Enable certain controls after data collection is complete.\n \"\"\"\n # Enabled\n self.ui.start_btn.setEnabled(True)\n self.ui.instr_name.setEnabled(True)\n self.ui.measurements.setEnabled(True)\n self.ui.save_data_checkbox.setEnabled(True)\n if self.ui.save_data_checkbox.isChecked():\n self.ui.save_loc.setEnabled(True)\n self.ui.save_loc_browse_btn.setEnabled(True)\n self.ui.start_pt.setEnabled(True)\n self.ui.stop_pt_checkbox.setEnabled(True)\n if not self.ui.stop_pt_checkbox.isChecked():\n self.ui.stop_pt.setEnabled(True)\n # Disabled\n self.ui.stop_btn.setDisabled(True)\n self.ui.reset_avg_btn.setDisabled(True)\n\n @Slot()\n def stop_collecting(self):\n \"\"\"Stops collecting data when the \"Stop\" button is pressed.\n \"\"\"\n with start_action(action_type=\"stop_collecting\"):\n with start_action(action_type=\"mutex\"):\n self.mutex.lock()\n common.SHOULD_STOP = True\n self.mutex.unlock()\n with start_action(action_type=\"quit_threads\"):\n self.comp_thread.quit()\n self.exp_thread.quit()\n with start_action(action_type=\"wait_comp_thread\"):\n self.comp_thread.wait()\n with start_action(action_type=\"wait_exp_thread\"):\n self.exp_thread.wait()\n self._enable_acq_controls()\n self.current_measurement = 0\n\n @Slot()\n def cleanup_when_done(self):\n \"\"\"Clean up workers and threads after data collection is complete.\n \"\"\"\n with start_action(action_type=\"done_collecting\"):\n with start_action(action_type=\"quit_threads\"):\n self.comp_thread.quit()\n self.exp_thread.quit()\n with start_action(action_type=\"wait_comp_thread\"):\n self.comp_thread.wait()\n with start_action(action_type=\"wait_exp_thread\"):\n self.exp_thread.wait()\n with start_action(action_type=\"mutex\"):\n self.mutex.lock()\n common.SHOULD_STOP = False\n self.mutex.unlock()\n self._enable_acq_controls()\n self.current_measurement = 0\n with start_action(action_type=\"dialog\"):\n QMessageBox.information(\n self, \"Done\", \"The experiment has finished.\", QMessageBox.StandardButton.Ok\n )\n\n @Slot(PlotData)\n def update_plots(self, data):\n \"\"\"Update the plots in the Live/Average tabs when new data is available.\n\n Parameters\n ----------\n data : PlotData\n Three live data channels and the signals computed from them.\n \"\"\"\n with start_action(action_type=\"update_plots\"):\n self.live_par_line.setData(self.time_axis, data.par)\n self.live_perp_line.setData(self.time_axis, data.perp)\n self.live_ref_line.setData(self.time_axis, data.ref)\n if data.da_par is not None:\n with start_action(action_type=\"update_da_plots\"):\n self.live_da_par_line.setData(self.time_axis, data.da_par)\n self.live_da_perp_line.setData(self.time_axis, data.da_perp)\n self.live_da_cd_line.setData(self.time_axis, data.da_cd)\n self.avg_da_par_line.setData(self.time_axis, data.avg_da_par)\n self.avg_da_perp_line.setData(self.time_axis, data.avg_da_perp)\n self.avg_da_cd_line.setData(self.time_axis, data.avg_da_cd)\n\n @Slot(int)\n def save_loc_set_state(self, state):\n \"\"\"Enable or disable the save location controls in response to the checkbox.\n\n Parameters\n ----------\n state : int\n An integer representing the state of the checkbox.\n\n Notes\n -----\n 0 - unchecked\n 2 - checked\n \"\"\"\n with start_action(action_type=\"save_loc_state\", state=state):\n if state == 0:\n self.ui.save_loc.setDisabled(True)\n self.ui.save_loc_browse_btn.setDisabled(True)\n Message.log(save=\"disabled\")\n elif state == 2:\n self.ui.save_loc.setEnabled(True)\n self.ui.save_loc_browse_btn.setEnabled(True)\n Message.log(save=\"enabled\")\n\n @Slot()\n def get_save_location(self):\n \"\"\"Get an existing directory in which to store the collected data.\n \"\"\"\n with start_action(action_type=\"get_save_location\"):\n self.save_data_dir = QFileDialog.getExistingDirectory()\n self.ui.save_loc.setText(self.save_data_dir)\n Message.log(dir=self.save_data_dir)\n\n def _save_loc_still_valid(self):\n \"\"\"Ensure that the path to the directory still exists before saving data to it.\n \"\"\"\n save_dir = Path(self.save_data_dir)\n return save_dir.exists()\n\n def _tell_save_loc_is_invalid(self):\n \"\"\"Tell the user that the current save location isn't valid or doesn't exist.\n \"\"\"\n with start_action(action_type=\"dialog\"):\n QMessageBox.critical(\n self,\n \"Invalid Save Location\",\n \"The current save data location is invalid or doesn't exist. Please choose a new location.\",\n QMessageBox.StandardButton.Ok,\n )\n\n def _save_would_overwrite(self):\n \"\"\"Returns True if the save directory contains *anything*.\n \"\"\"\n for item in Path(self.save_data_dir).iterdir():\n return True\n return False\n\n def _should_overwrite(self):\n \"\"\"Asks the user whether data in the save directory should be overwritten.\n \"\"\"\n with start_action(action_type=\"dialog\") as action:\n reply = QMessageBox.warning(\n self,\n \"Overwrite?\",\n \"The current directory contents will be erased. Continue?\",\n QMessageBox.StandardButton.Ok | QMessageBox.StandardButton.Cancel,\n )\n should_overwrite = reply == QMessageBox.StandardButton.Ok\n action.add_success_fields(overwrite=should_overwrite)\n return should_overwrite\n\n def _saving_should_proceed(self):\n \"\"\"Determine whether valid settings have been entered for saving data.\n \"\"\"\n with start_action(action_type=\"saving_should_proceed\"):\n try:\n loc_is_valid = self._save_loc_still_valid()\n except TypeError:\n loc_is_valid = False\n if not loc_is_valid:\n self._tell_save_loc_is_invalid()\n return False\n would_overwrite = self._save_would_overwrite()\n if would_overwrite and (not self._should_overwrite()):\n return False\n return True\n\n @Slot(int)\n def stop_pt_set_state(self, state):\n \"\"\"Enable or disable the \"Stop Point\" controls in response to the checkbox.\n\n Parameters\n ----------\n state : int\n An integer representing the state of the checkbox.\n\n Notes\n -----\n 0 - unchecked\n 2 - checked\n \"\"\"\n with start_action(action_type=\"stop_pt_state\", state=state):\n if state == 2:\n self.ui.stop_pt.setDisabled(True)\n Message.log(stop_pt=\"disabled\")\n elif state == 0:\n self.ui.stop_pt.setEnabled(True)\n Message.log(stop_pt=\"enabled\")\n\n def _tell_start_greater_than_stop(self):\n \"\"\"Tell the user that the current save location isn't valid or doesn't exist.\n \"\"\"\n QMessageBox.critical(\n self,\n \"Invalid Start/Stop Points\",\n \"The Start point must be less than the Stop point.\",\n QMessageBox.StandardButton.Ok,\n )\n\n @Slot(int)\n def dark_curr_set_state(self, state):\n \"\"\"Enable or disable the dark current controls in response to the checkbox.\n\n Parameters\n ----------\n state : int\n An integer representing the state of the checkbox.\n\n Notes\n -----\n 0 - unchecked\n 2 - checked\n \"\"\"\n with start_action(action_type=\"dark_curr_state\", state=state):\n if state == 0:\n self.ui.dark_curr_par.setDisabled(True)\n self.ui.dark_curr_perp.setDisabled(True)\n self.ui.dark_curr_ref.setDisabled(True)\n Message.log(dark_curr=\"disabled\")\n elif state == 2:\n self.ui.dark_curr_par.setEnabled(True)\n self.ui.dark_curr_perp.setEnabled(True)\n self.ui.dark_curr_ref.setEnabled(True)\n Message.log(dark_curr=\"enabled\")\n","repo_name":"zmitchell/ns_trcd","sub_path":"ns_trcd/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":21263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"38218631636","text":"# file_name:html_parse.py\n# 解析方法一\nfrom bs4 import BeautifulSoup\nimport urllib3\nimport json\nfrom get_html import get_html\nfrom Paper_class import security_Paper\nfrom utils import validateTitle,save_to_file\nimport pandas as pd\nimport warnings\nimport requests\nimport io\nimport os\n\nwarnings.filterwarnings('ignore')\nroot_dir = os.getcwd()\n\ndef list2csv(columns = [\"paper_title\",\"author\",\"paper_link\",\"pdf_link\",\"slides_link\",\"abstract\"],list = None,name=None):\n pd_DataFrame = pd.DataFrame(columns=columns, data=list)\n pd_DataFrame.to_csv(root_dir+name+\".csv\",encoding='utf-8')\n\n\ndef download_content(url):\n \"\"\"\n 第一个函数,用来下载网页,返回网页内容\n 参数 url 代表所要下载的网页网址。\n 整体代码和之前类似\n \"\"\"\n http = urllib3.PoolManager()\n response = http.request(\"GET\", url)\n response_data = response.data\n html_content = response_data.decode()\n return html_content\n\n# 输入参数为要分析的 html 文件名,返回值为对应的 BeautifulSoup 对象\ndef create_doc_from_html(html_content):\n doc = BeautifulSoup(html_content)\n return doc\n\ndef create_doc_from_filename(filename):\n with open(root_dir + \"/\"+ filename, \"r\", encoding='utf-8') as f:\n html_content = f.read()\n doc = BeautifulSoup(html_content)\n return doc\n\ndef security_list_parse(doc,tag=\"h2\",class_=\"node-title\"):\n link_list = doc.body.find_all(tag,class_=class_)\n link_paper = []\n for link in link_list:\n temp = link.find_all(\"a\")\n if len(temp)!=0:\n link_paper.append((paper_dict[\"security\"]+temp[0]['href'],temp[0].text))\n\n return link_paper\n\ndef security_single_parse(link):\n temp_content = create_doc_from_html(download_content(link))\n temp_content = create_doc_from_html(str(temp_content.find_all(\"section\", id=\"content\")))\n title = temp_content.find_all(\"h1\", id=\"page-title\")\n author = temp_content.find_all(\"div\", class_=\"field-name-field-paper-people-text\")\n abstract = temp_content.find_all(\"div\", class_=\"field-name-field-paper-description\")\n pdf_link = temp_content.find_all(\"div\", class_=\"field-name-field-presentation-pdf\")\n slide_link = temp_content.find_all(\"div\", class_=\"field-name-field-paper-slides-file\")\n\n return security_Paper(paper_title=title,abstract=abstract,pdf_link=pdf_link,author=author,slides_link=slide_link,paper_link=link)\n\n\ndef download_pdf(save_path,pdf_name,pdf_url):\n send_headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36\",\n \"Connection\": \"keep-alive\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8\"}\n response = requests.get(pdf_url, headers=send_headers)\n bytes_io = io.BytesIO(response.content)\n os.chdir(save_path)\n if not os.path.exists(\"%s.PDF\" % pdf_name):\n with open(\"%s.PDF\" % pdf_name, mode='wb') as f:\n f.write(bytes_io.getvalue())\n else:\n print(\"已存在\")\n os.chdir(root_dir)\n print('%s.PDF,下载成功!' % (pdf_name))\n\ndef new_file(root_dir,name):\n os.chdir(root_dir)\n if not os.path.exists(name):\n os.mkdir(name)\n os.chdir(name)\n path = os.getcwd()\n os.chdir(root_dir)\n return path\n\n\n# security的历年论文https://www.usenix.org/conferences/byname/108\n\npaper_dict = {\"security\":\"https://www.usenix.org/\"}\nconference_url = {\"security22_fall\":\"https://www.usenix.org/conference/usenixsecurity22/fall-accepted-papers\",\n \"security22_summer\":\"https://www.usenix.org/conference/usenixsecurity22/summer-accepted-papers\",\n \"security21_fall\":\"https://www.usenix.org/conference/usenixsecurity21/fall-accepted-papers\",\n \"security21_summer\":\"https://www.usenix.org/conference/usenixsecurity21/summer-accepted-papers\",\n \"security20_fall\":\"https://www.usenix.org/conference/usenixsecurity20/fall-accepted-papers\",\n \"security20_summer\":\"https://www.usenix.org/conference/usenixsecurity20/summer-accepted-papers\",\n \"security20_spring\":\"https://www.usenix.org/conference/usenixsecurity20/spring-accepted-papers\",\n \"security19_fall\":\"https://www.usenix.org/conference/usenixsecurity19/fall-accepted-papers\",\n }\n\nif __name__ == '__main__':\n use_history = True\n name = \"security21\"\n pdf_save_path = new_file(root_dir=root_dir+\"/pdf_info\",name=name)\n url = conference_url[name]\n\n if not use_history:\n get_html(name=name, url=url)\n result = download_content(url)\n save_to_file(root_dir + \"/history_file/\" + name + \".html\", result)\n doc = create_doc_from_html(result)\n else:\n doc = create_doc_from_filename(\"history_file/\"+name+\".html\")\n\n link_paper = security_list_parse(doc)\n paper_info_list = []\n pdf_link_list = []\n for link, paper_name in link_paper:\n print(paper_name)\n paper = security_single_parse(link)\n if paper.paper_title !=\"\":\n paper_info_list.append([paper.paper_title,paper.author,\n paper.paper_link,paper.pdf_link,\n paper.slides_link,paper.abstract])\n if paper.pdf_link != None:\n download_pdf(pdf_save_path, validateTitle(paper.paper_title), paper.pdf_link)\n\n # list2csv(list=paper_info_list,name=\"/paper_info/\"+name)\n # for title, link in pdf_link_list:\n # download_pdf(pdf_save_path,title,link)\n\n","repo_name":"xaddwell/paper_crawler","sub_path":"html_parse.py","file_name":"html_parse.py","file_ext":"py","file_size_in_byte":5625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"24882625627","text":"import collections\n\nclass Trump:\n def __init__(self, mark, num):\n self.mark = mark\n self.num = num\n\nclass Check_flag:\n def is_flush(self, cards):\n return all( i[\"mark\"] == cards[0][\"mark\"] for i in cards)\n\n def is_straight(self, cards):\n values = [i[\"num\"] for i in cards]\n straight = (values == list(range(values[0], values[0]-5, -1) ) or values == [14, 5, 4, 3, 2])\n return straight \n\nclass Porker:\n\n def ans_print(self, card_list, role):\n ans = \"\"\n for i in card_list:\n if 0 is i[\"mark\"]:\n ans = ans + \"S\"\n elif 1 is i[\"mark\"]:\n ans = ans + \"C\"\n elif 2 is i[\"mark\"]:\n ans = ans + \"D\"\n elif 3 is i[\"mark\"]:\n ans = ans + \"H\"\n\n if 14 is i[\"num\"]:\n ans = ans + \"A\"\n elif 13 is i[\"num\"]:\n ans = ans + \"K\"\n elif 12 is i[\"num\"]:\n ans = ans + \"Q\"\n elif 11 is i[\"num\"]:\n ans = ans + \"J\"\n else:\n ans = ans + str(i[\"num\"])\n ans = ans + \" \"\n print(ans)\n print(role)\n exit()\n\n def judgment_role(self, card_list_input):\n card_list = sorted(card_list_input, key = lambda x : x[\"num\"], reverse= True)\n\n straight = Check_flag().is_straight(card_list)\n flush = Check_flag().is_flush(card_list)\n\n if flush and \\\n card_list[0][\"num\"] is 14 and \\\n card_list[1][\"num\"] is 13 and \\\n card_list[2][\"num\"] is 12 and \\\n card_list[3][\"num\"] is 11 and \\\n card_list[4][\"num\"] is 10 :\n self.ans_print(card_list_input, \"ロイヤルストレートフラッシュ\")\n if straight and flush:\n self.ans_print(card_list_input, \"ストレートフラッシュ\")\n\n c = collections.Counter( [i[\"num\"] for i in card_list] )\n \n if c.most_common()[0][1] is 4:\n self.ans_print(card_list_input, \"フォーカード\")\n elif c.most_common()[0][1] is 3 and \\\n c.most_common()[1][1] is 2 : \n self.ans_print(card_list_input, \"フルハウス\")\n\n if flush :\n self.ans_print(card_list_input, \"フラッシュ\")\n\n if straight :\n self.ans_print(card_list_input, \"ストレート\")\n\n if c.most_common()[0][1] is 3 :\n self.ans_print(card_list_input, \"スリーカード\")\n\n if c.most_common()[0][1] is 2 and \\\n c.most_common()[1][1] is 2 : \n self.ans_print(card_list_input, \"ツーペア\")\n\n if c.most_common()[0][1] is 2 :\n self.ans_print(card_list_input, \"ワンペア\")\n\n self.ans_print(card_list_input, \"ハイカード\")\n","repo_name":"sharknasuhorse/isc-kadai","sub_path":"kadai2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"34297136193","text":"import os\n\nreplicate_to_pos_locus_to_sample_to_genotype_dict = {}\n\nms_command = ''\n\nwith open('/home/flavia/Desktop/MstoGfa/out2pop.ms') as f:\n\tms_command = f.readline().strip()\n\n\t# Skit the following 1 row(s)\n\tfor _ in range(1):\n\t\tf.readline()\n\n\t# Fast implementation (to improve for huge simulations)\n\tfor num_rep, replicate in enumerate(f.read().split('//')[1:]):\n\t\tprint('Replicate num.', num_rep)\n\n\t\treplicate_list = replicate.strip().split('\\n')\n\n\t\t# Params\n\t\tparam_dict = {}\n\t\tfor num_row, row in enumerate(replicate_list):\n\t\t\tif ':' not in row:\n\t\t\t\tbreak\n\t\t\tparam_name, param_info = row.split(': ')\n\t\t\tif param_name in row:\n\t\t\t\tparam_dict[param_name] = param_info\n\n\n\t\tif 'positions' not in param_dict.keys():\n\t\t\tprint('ERROR: no positions found for the replicate num. {}'.format(num_rep))\n\t\t\tcontinue\n\t\tparam_dict['positions'] = param_dict['positions'].strip().split(' ')\n\n\t\tif 'segsites' not in param_dict.keys():\n\t\t\tprint('ERROR: no segsites found for the replicate num. {}'.format(num_rep))\n\t\t\tcontinue\n\t\telse:\n\t\t\tparam_dict['segsites'] = int(param_dict['segsites'])\n\n\t\tif 'prob' not in param_dict.keys():\n\t\t\tprint('WARNING: no prob found for the replicate num. {}'.format(num_rep))\n\t\telse:\n\t\t\tparam_dict['prob'] = float(param_dict['prob'])\n\n\t\t\n\n\t\tsteps_to_write = ''\n\t\tpaths_to_write = ''\n\t\tlinks_to_write = ''\n\n\t\t# Diploid organisms\n\t\tfor step_id in range(0, int(param_dict['segsites']) * 2):\n\t\t\t#print('S\\t'+ str(step_id + 1) + '\\t' + ('0' if step_id < int(param_dict['segsites']) else '1'))\n\t\t\tsteps_to_write += 'S\\t'+ str(step_id + 1) + '\\t' + ('0' if step_id < int(param_dict['segsites']) else '1') + '\\n'\n\n\t\t#print('P\\treference_haplotype\\t' + ','.join([str(step_id) + '+' for step_id in range(0, int(param_dict['segsites']))]))\n\t\tpaths_to_write = 'P\\treference_haplotype\\t' + ','.join([str(step_id) + '+' for step_id in range(1, int(param_dict['segsites']) + 1)]) + '\\n'\n\t\t\t\n\t\tnew_dict = {}\n\t\tfor pos in range(0, int(param_dict['segsites'])):\n\t\t\tnew_dict[pos] = [str(pos + 1), str(pos + 1 + 8)]\n\n\t\t#for pos, nodeIdREF_nodeIdALT_list in new_dict.items():\n\t\t#\tprint(pos, '-->', nodeIdREF_nodeIdALT_list)\n\n\t\tstuff_already_seen = set()\n\t\tfor num_haplo, haplotype in enumerate(replicate_list[num_row:]):\n\t\t\txxx_path_list = []\n\t\t\tfor pos in range(0, len(haplotype)):\n\t\t\t\tnode_in_pos = new_dict[pos][int(haplotype[pos])]\n\t\t\t\txxx_path_list.append(node_in_pos + '+')\n\n\t\t\t#print('P\\t' + 'individual_' + str(int(num_haplo/2) + 1) + '_haplotype_' + str(int(num_haplo%2) + 1) + '\\t' + ','.join(xxx_path_list))\n\t\t\tpaths_to_write += 'P\\t' + 'individual_' + str(int(num_haplo/2) + 1) + '_haplotype_' + str(int(num_haplo%2) + 1) + '\\t' + ','.join(xxx_path_list) + '\\n'\n\n\t\t\tif haplotype not in stuff_already_seen: # if not already done, do it\n\t\t\t\tstuff_already_seen.add(haplotype)\n\t\t\t\t#print(haplotype)\n\n\t\t\t\tfor pos in range(0, len(haplotype)-1):\n\t\t\t\t\tnodo_1 = new_dict[pos][int(haplotype[pos])]\n\t\t\t\t\tnodo_2 = new_dict[pos+1][int(haplotype[pos+1])]\n\n\t\t\t\t\tnodo_12 = nodo_1 + '_' + nodo_2\n\t\t\t\t\tif nodo_12 not in stuff_already_seen:\n\t\t\t\t\t\tstuff_already_seen.add(nodo_12) \n\n\t\t\t\t\t\t#print('L\\t' + str(nodo_1) + '\\t+\\t' + str(nodo_2) + '\\t+\\t0M')\n\t\t\t\t\t\tlinks_to_write += 'L\\t' + str(nodo_1) + '\\t+\\t' + str(nodo_2) + '\\t+\\t0M\\n'\n\n\n\n\t\tpath_gfa = 'rep_' + str(num_rep) + '.gfa'\n\t\twith open(path_gfa, 'w') as fw:\n\t\t\tfw.write(steps_to_write + paths_to_write + links_to_write)\n\t\t\n\t\tprint(path_gfa + ' written')\n\n\n","repo_name":"Flavia95/Thesis","sub_path":"MstoGfa.py","file_name":"MstoGfa.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"}
+{"seq_id":"73600944245","text":"import discord\nimport scraper\nimport re\n\nclass MyClient(discord.Client):\n async def on_ready(self):\n print(\"Rastley is online!\")\n \n # check for rick roll in a message\n async def on_message(self, message):\n # return if message author is a bot\n if message.author.bot: return\n\n # check if there's a link in the message using regex and store all links in urls\n urls = re.findall(\"(?Phttps?://[^\\s]+)\", message.content)\n\n # check each url in urls for a rick roll\n for url in urls:\n if scraper.searchForRick(url):\n await message.channel.send(\"<@\" + str(message.author.id) + \"> https://youtu.be/Ux0YNqhaw0I\")\n\n\n\ntoken = input(\"What is the bot token? \")\nclient = MyClient()\nclient.run(token)","repo_name":"QuoteNat/rastley.py","sub_path":"rastley.py","file_name":"rastley.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"34255728875","text":"import logging\nfrom pathlib import Path\n\nimport sys\nfrom fastapi import FastAPI\nfrom loguru import logger\n\nfrom helpers.path_helpers import LOGS_PATH\n\n\ndef init_logging(app: FastAPI):\n formatter = '{level: <8} {time:YYYY-MM-DD HH:mm:ss.SSS} ' \\\n '- {name}:{function} - {message}'\n logger = CustomizeLogger.make_logger(formatter)\n app.logger = logger\n\n\n# See https://medium.com/1mgofficial/how-to-override-uvicorn-logger-in-fastapi-using-loguru-124133cdcd4e\nclass InterceptHandler(logging.Handler):\n loglevel_mapping = {\n 50: 'CRITICAL',\n 40: 'ERROR',\n 30: 'WARNING',\n 20: 'INFO',\n 10: 'DEBUG',\n 0: 'NOTSET',\n }\n\n def emit(self, record):\n try:\n level = logger.level(record.levelname).name\n except AttributeError:\n level = self.loglevel_mapping[record.levelno]\n\n frame, depth = logging.currentframe(), 2\n while frame.f_code.co_filename == logging.__file__:\n frame = frame.f_back\n depth += 1\n\n log = logger.bind(request_id='app')\n log.opt(\n depth=depth,\n exception=record.exc_info\n ).log(level, record.getMessage())\n\n\nclass CustomizeLogger:\n\n @classmethod\n def make_logger(cls, formatter):\n logger = cls.customize_logging(\n filepath=Path(LOGS_PATH, 'app_log.txt'),\n level='INFO',\n retention='1 months',\n rotation='20 days',\n format=formatter\n )\n return logger\n\n @classmethod\n def customize_logging(cls,\n filepath: Path,\n level: str,\n rotation: str,\n retention: str,\n format: str\n ):\n logger.remove()\n logger.add(\n sys.stdout,\n enqueue=True,\n backtrace=True,\n level=level,\n format=format\n )\n logger.add(\n str(filepath),\n rotation=rotation,\n retention=retention,\n enqueue=True,\n backtrace=True,\n level=level,\n format=format\n )\n logging.basicConfig(handlers=[InterceptHandler()], level=0)\n logging.getLogger(\"uvicorn.access\").handlers = [InterceptHandler()]\n for _log in ['uvicorn',\n 'uvicorn.error',\n 'fastapi'\n ]:\n _logger = logging.getLogger(_log)\n _logger.handlers = [InterceptHandler()]\n\n return logger.bind(request_id=None, method=None)\n","repo_name":"NobisIndustries/GitInsight","sub_path":"backend/helpers/logging_helpers.py","file_name":"logging_helpers.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"}
+{"seq_id":"39731821134","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"ipetrash\"\n\n\nfrom pathlib import Path\n\n\nDIR: Path = Path(__file__).resolve().parent\n\nDIR_LOGS: Path = DIR / \"logs\"\nDIR_LOGS.mkdir(parents=True, exist_ok=True)\n\nPORT_WEB: int = 12000\n","repo_name":"gil9red/get_metal_rates","sub_path":"app_web_server/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"12644952140","text":"\"\"\"\nTests split.py\n \n\"\"\"\nimport unittest\nimport numpy as np\nfrom IoTPy.core.agent import Agent, InList\nfrom IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue, run\nfrom IoTPy.agent_types.sink import sink_window\nfrom IoTPy.agent_types.basics import split_e, split_w, fsplit_2e, fsplit_2w\nfrom IoTPy.agent_types.check_agent_parameter_types import *\nfrom IoTPy.helper_functions.recent_values import recent_values\nfrom IoTPy.agent_types.split import *\n\n#------------------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------------------\n# TEST SPLIT\n#------------------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------------------\n\n\nclass test_split_agents(unittest.TestCase):\n \n def test_split_agents(self):\n \n scheduler = Stream.scheduler\n \n s = Stream('s')\n \n u = Stream('u')\n v = Stream('v')\n w = Stream('w')\n \n y = Stream('y')\n z = Stream('z')\n\n\n # Test split\n # func operates on a single element of the single input stream and\n # return a list of elements, one for each output stream.\n def h(element):\n return [element+1, element*2]\n def h_args(element, addend, multiplier):\n return [element+addend, element*multiplier]\n\n in_stream_split = Stream('in_stream_split')\n r = Stream('r')\n t = Stream('t')\n e = split_element(func=h, in_stream=in_stream_split,\n out_streams=[r, t], name='e')\n r_split, t_split = split_element_f(function=h, in_stream=in_stream_split,\n num_out_streams=2, )\n r_args, t_args = split_element_f(\n h_args, in_stream_split, 2, addend=1, multiplier=2)\n\n scheduler.step()\n assert recent_values(r) == []\n assert recent_values(t) == []\n assert recent_values(r_split) == recent_values(r)\n assert recent_values(t_split) == recent_values(t)\n assert recent_values(r_args) == recent_values(r)\n assert recent_values(t_args) == recent_values(t)\n\n in_stream_split.extend(list(range(5)))\n scheduler.step()\n assert recent_values(r) == [1, 2, 3, 4, 5]\n assert recent_values(t) == [0, 2, 4, 6, 8]\n assert recent_values(r_split) == recent_values(r)\n assert recent_values(t_split) == recent_values(t)\n assert recent_values(r_args) == recent_values(r)\n assert recent_values(t_args) == recent_values(t)\n\n in_stream_split.append(10)\n scheduler.step()\n assert recent_values(r) == [1, 2, 3, 4, 5, 11]\n assert recent_values(t) == [0, 2, 4, 6, 8, 20]\n\n in_stream_split.extend([20, 100])\n scheduler.step()\n assert recent_values(r) == [1, 2, 3, 4, 5, 11, 21, 101]\n assert recent_values(t) == [0, 2, 4, 6, 8, 20, 40, 200]\n assert recent_values(r_split) == recent_values(r)\n assert recent_values(t_split) == recent_values(t)\n assert recent_values(r_args) == recent_values(r)\n assert recent_values(t_args) == recent_values(t)\n\n # Test split with kwargs\n def f_list(element, list_of_functions):\n return [f(element) for f in list_of_functions]\n\n def f_0(element):\n return element*2\n def f_1(element):\n return element+10\n\n x = Stream('x')\n rr = Stream('rr')\n tt = Stream('tt')\n ee = split_element(func=f_list, in_stream=x, out_streams=[rr, tt], name='ee',\n list_of_functions=[f_0, f_1])\n x.extend(list(range(5)))\n scheduler.step()\n assert recent_values(rr) == [0, 2, 4, 6, 8]\n assert recent_values(tt) == [10, 11, 12, 13, 14]\n\n # ------------------------------------\n # Test split with state\n # func operates on an element of the single input stream and state.\n # func returns a list with one element for each output stream.\n def h_state(element, state):\n return ([element+state, element*state], state+1)\n r_state = Stream(name='r_state')\n t_state = Stream(name='t_state')\n in_stream_split_state = Stream('in_stream_split_state')\n \n e_state = split_element(\n func=h_state, in_stream=in_stream_split_state,\n out_streams=[r_state, t_state], name='e', state=0)\n\n scheduler.step()\n assert recent_values(r_state) == []\n assert recent_values(t_state) == []\n\n in_stream_split_state.extend(list(range(5)))\n scheduler.step()\n assert recent_values(r_state) == [0, 2, 4, 6, 8]\n assert recent_values(t_state) == [0, 1, 4, 9, 16]\n\n in_stream_split_state.append(20)\n scheduler.step()\n assert recent_values(r_state) == [0, 2, 4, 6, 8, 25]\n assert recent_values(t_state) == [0, 1, 4, 9, 16, 100]\n\n in_stream_split_state.extend([44, 93])\n scheduler.step()\n assert recent_values(r_state) == [0, 2, 4, 6, 8, 25, 50, 100]\n assert recent_values(t_state) == [0, 1, 4, 9, 16, 100, 264, 651]\n\n # ------------------------------------\n # Test split with state and args\n \n def hh_state(element, state, increment):\n return ([element+state, element*state], state+increment)\n \n rr_state = Stream(name='rr_state')\n tt_state = Stream(name='tt_state')\n in_stream_split_state_funcargs = Stream('in_stream_split_state_funcargs')\n\n ee_state_agent = split_element(\n func=hh_state,\n in_stream=in_stream_split_state_funcargs,\n out_streams=[rr_state, tt_state],\n name='ee_state_agent', state=0, increment=10)\n\n scheduler.step()\n assert recent_values(rr_state) == []\n assert recent_values(tt_state) == []\n\n in_stream_split_state_funcargs.extend(list(range(5)))\n scheduler.step() \n assert recent_values(rr_state) == [0, 11, 22, 33, 44]\n assert recent_values(tt_state) == [0, 10, 40, 90, 160]\n\n #------------------------------------------------------------------------------------------------\n # UNZIP AGENT TESTS\n #------------------------------------------------------------------------------------------------\n\n s_unzip = Stream('s_unzip')\n u_unzip = Stream('u_unzip')\n x_unzip = Stream('x_unzip')\n \n # ------------------------------------\n # Test unzip\n unzip(in_stream=s_unzip, out_streams=[x_unzip, u_unzip])\n d_unzip_fn = unzip_f(s_unzip, 2) \n \n \n s_unzip.extend([(1,10), (2,15), (3,18)])\n scheduler.step()\n assert recent_values(x_unzip) == [1, 2, 3]\n assert recent_values(u_unzip) == [10, 15, 18]\n assert recent_values(d_unzip_fn[0]) == x_unzip.recent[:3]\n assert recent_values(d_unzip_fn[1]) == u_unzip.recent[:3]\n \n s_unzip.extend([(37,96)])\n scheduler.step()\n assert recent_values(x_unzip) == [1, 2, 3, 37]\n assert recent_values(u_unzip) == [10, 15, 18, 96]\n assert recent_values(d_unzip_fn[0]) == x_unzip.recent[:4]\n assert recent_values(d_unzip_fn[1]) == u_unzip.recent[:4]\n\n\n #------------------------------------------------------------------------------------------------\n # SEPARATE AGENT TESTS\n #------------------------------------------------------------------------------------------------\n s_separate = Stream('s separate')\n u_separate = Stream('u separate')\n x_separate = Stream('x separate')\n\n d_separate = separate(\n in_stream=s_separate, out_streams=[x_separate,u_separate],\n name='d separate')\n x_sep_func, u_sep_func = separate_f(s_separate, 2)\n\n s_separate.extend([(0,10), (1,15), (0,20)])\n scheduler.step()\n assert recent_values(x_separate) == [10, 20]\n assert recent_values(u_separate) == [15]\n assert x_sep_func.recent == x_separate.recent\n assert u_sep_func.recent == u_separate.recent\n\n s_separate.extend([(1,96)])\n scheduler.step()\n assert recent_values(x_separate) == [10, 20]\n assert recent_values(u_separate) == [15, 96]\n assert recent_values(x_sep_func) == recent_values(x_separate)\n assert recent_values(u_sep_func) == recent_values(u_separate)\n\n #------------------------------------------------------------------------------------------------\n # TIMED_UNZIP TESTS\n #------------------------------------------------------------------------------------------------\n # timed_unzip tests\n t_unzip = Stream()\n a_unzip = Stream('a_unzip')\n b_unzip = Stream('b_unzip')\n\n timed_unzip(t_unzip, [a_unzip, b_unzip])\n t_unzip_0, t_unzip_1 = timed_unzip_f(in_stream=t_unzip, num_out_streams=2)\n\n t_unzip.extend(\n [(1, [\"A\", None]), (5, [\"B\", \"a\"]), (7, [None, \"b\"]),\n (9, [\"C\", \"c\"]), (10, [None, \"d\"])])\n\n \n scheduler.step()\n assert recent_values(t_unzip_0) == [(1, 'A'), (5, 'B'), (9, 'C')]\n assert recent_values(t_unzip_1) == [(5, 'a'), (7, 'b'), (9, 'c'), (10, 'd')]\n assert recent_values(a_unzip) == recent_values(t_unzip_0)\n assert recent_values(b_unzip) == recent_values(t_unzip_1)\n\n\n #------------------------------------------------------------------------------------------------\n # TEST SPLIT WITH STREAM_ARRAY\n #------------------------------------------------------------------------------------------------\n # Test split_element with StreamArray\n x = StreamArray('x')\n y = StreamArray('y')\n z = StreamArray('z')\n\n def h_args(element, addend, multiplier):\n return [element+addend, element*multiplier]\n\n this_agent = split_element(func=h_args, in_stream=x, out_streams=[y,z],\n addend=1.0 , multiplier=2.0, name='this_agent')\n\n add_to_x = np.linspace(0.0, 4.0, 5)\n x.extend(add_to_x)\n scheduler.step()\n assert np.array_equal(recent_values(y), add_to_x+1.0)\n assert np.array_equal(recent_values(z), add_to_x*2.0)\n\n # Test separate with StreamArray\n x = StreamArray('x', dimension=2)\n y = StreamArray('y')\n z = StreamArray('z')\n\n separate(x, [y,z])\n x.append(np.array([1.0, 10.0]))\n scheduler.step()\n assert np.array_equal(recent_values(z), np.array([10.0]))\n assert np.array_equal(recent_values(y), np.array([]))\n\n x.extend(np.array([[0.0, 2.0], [1.0, 20.0], [0.0, 4.0]]))\n scheduler.step()\n assert np.array_equal(recent_values(z), np.array([10.0, 20.0]))\n assert np.array_equal(recent_values(y), np.array([2.0, 4.0]))\n\n # ------------------------------------------------------\n # TEST split_list\n # ------------------------------------------------------\n x = Stream('x')\n y = Stream('y')\n z = Stream('z')\n\n def f(lst):\n return [v*2 for v in lst], [v*10 for v in lst]\n\n split_list(f, x, [y, z])\n\n x.extend(list(range(3)))\n scheduler.step()\n assert recent_values(y) == [v*2 for v in recent_values(x)]\n assert recent_values(z) == [v*10 for v in recent_values(x)]\n\n x.append(100)\n scheduler.step()\n assert recent_values(y) == [v*2 for v in recent_values(x)]\n assert recent_values(z) == [v*10 for v in recent_values(x)]\n \n\n # ------------------------------------------------------\n # TEST split_window\n # ------------------------------------------------------\n def f(window):\n return max(window), min(window)\n\n x = Stream('x')\n y = Stream('y')\n z = Stream('z')\n \n split_window(\n func=f, in_stream=x, out_streams=[y, z], window_size=2, step_size=2)\n\n x.extend(list(range(7)))\n scheduler.step()\n assert recent_values(y) == [1, 3, 5]\n assert recent_values(z) == [0, 2, 4]\n\n \n def f(window):\n return max(window), min(window)\n\n x = Stream('x')\n y = Stream('y')\n z = Stream('z')\n \n split_window(\n func=f, in_stream=x, out_streams=[y, z], window_size=3, step_size=3)\n\n x.extend(list(range(12)))\n scheduler.step()\n assert recent_values(y) == [2, 5, 8, 11]\n assert recent_values(z) == [0, 3, 6, 9]\n\n # ------------------------------------------------------\n # TEST split_tuple\n # ------------------------------------------------------\n x = Stream('x')\n y = Stream('y')\n z = Stream('z')\n split_tuple(in_stream=x, out_streams=[y, z])\n x.append((0, 'A'))\n x.extend([(1, 'B'), (2, 'C')])\n scheduler.step()\n assert recent_values(y) == [0, 1, 2]\n assert recent_values(z) == ['A', 'B', 'C']\n \n\n def f(window):\n return max(window), min(window)\n\n x = Stream('x')\n y = Stream('y')\n z = Stream('z')\n \n split_window(\n func=f, in_stream=x, out_streams=[y, z], window_size=3, step_size=3)\n\n x.extend(list(range(12)))\n scheduler.step()\n assert recent_values(y) == [2, 5, 8, 11]\n assert recent_values(z) == [0, 3, 6, 9]\n\n def test_split_with_basics(self):\n #----------------------------------------------\n # EXAMPLE: SIMPLE SPLIT\n # Split a stream into a list of streams. In this\n # example, a stream (s) is split into two streams:\n # u and v.\n # Decorate a conventional function to get a\n # stream function. This function returns a list\n # of two values corresponding to the two output\n # streams.\n @split_e\n def h(x):\n return [2*x, x+1000]\n\n # Create streams.\n s = Stream()\n u = Stream()\n v = Stream()\n\n # Create agents by calling the decorated function.\n h(s, [u,v])\n\n # Put data into input streams.\n DATA = list(range(5))\n s.extend(DATA)\n\n # Run the agents.\n run()\n\n # Check values of output streams.\n assert recent_values(u) == [2*x for x in DATA]\n assert recent_values(v) == [x+1000 for x in DATA]\n\n #----------------------------------------------\n # EXAMPLE: SPLIT WITH KEYWORD ARGUMENT\n # Split a stream into a list of streams. Use\n # a keyword argument in the splitting function.\n # Decorate a conventional function to get a\n # stream function. This function returns a list\n # of two values corresponding to the two output\n # streams. addend is a keyword argument in the\n # function that creates agents.\n @split_e\n def h(x, addend):\n return [x+addend, x+1000+addend]\n\n # Create streams.\n s = Stream()\n u = Stream()\n v = Stream()\n # Call decorated function.\n ADDEND=10\n h(s, [u,v], addend=ADDEND)\n # Put data into input streams.\n s.extend(DATA)\n # Run the decorated function.\n run()\n # Check values of output streams.\n assert recent_values(u) == [x+ADDEND for x in DATA]\n assert recent_values(v) == [x+1000+ADDEND for x in DATA]\n\n #----------------------------------------------\n # EXAMPLE: SPLIT WITH KEYWORD ARGUMENT AND STATE\n # Split a stream into a list of streams, with\n # a keyword argument and state.\n # Decorate a conventional function to get a\n # stream function. addend and multiplicand are\n # keyword arguments used in the call to create\n # agents. The function h returns 2 values:\n # (1) a list of two numbers corresponding to the\n # two output streams and\n # (2) the next state.\n @split_e\n def h(v, state, addend, multiplicand):\n next_state = state + 2\n return ([v+addend+state, v*multiplicand+state],\n next_state)\n\n # Create streams.\n s = Stream()\n u = Stream()\n v = Stream()\n\n # Call decorated function to create agents. The initial state\n # is 0. Include keyword arguments in the call.\n ADDEND = 10\n MULTIPLICAND = 2\n h(s, [u,v], state=0, addend=ADDEND, multiplicand=MULTIPLICAND)\n\n # Put data into input streams.\n s.extend(DATA)\n\n # Run the agent.\n run()\n\n # Check values of output streams.\n assert recent_values(u) == [10, 13, 16, 19, 22]\n assert recent_values(v) == [0, 4, 8, 12, 16]\n\n #----------------------------------------------\n # EXAMPLE: SPLIT WITH STATE AND NO KEYWORD ARGUMENTS\n # Split a stream into a list of streams, with\n # a state.\n # Decorate a conventional function to get a\n # stream function. This function returns two values\n # a list and the next state, where the list has two\n # values with one value for each output streams.\n @split_e\n def h(v, state):\n next_state = state + 1\n return [v+state, v+1000+state], next_state\n\n # Create streams.\n s = Stream()\n u = Stream()\n v = Stream()\n\n # Call decorated function to create agents.\n h(in_stream=s, out_streams=[u,v], state=0)\n # Put data into input streams.\n s.extend(DATA)\n\n # Run the decorated function.\n run()\n\n # Check values of output streams.\n assert recent_values(u) == [0, 2, 4, 6, 8]\n assert recent_values(v) == [1000, 1002, 1004, 1006, 1008]\n\n #----------------------------------------------\n # EXAMPLE: SPLIT USING FUNCTIONAL FORM FOR\n # SPLITTING A STREAM INTO 2 STREAMS.\n # Split a stream into exactly two streams.\n # This is in functional form, i.e. it creates\n # and returns two streams.\n # Decorate a conventional function to get a\n # stream function.\n @fsplit_2e\n def h(v):\n return [v, v+1000]\n\n # Create streams.\n s = Stream()\n\n # Call decorated function to create agents\n # Note that h creates streams u, v. It creates\n # 2 streams because the decorator is fsplit_2e \n u, v = h(s)\n # Put data into input streams.\n s.extend(DATA)\n # Run the decorated function.\n run()\n # Check values of output streams.\n assert recent_values(u) == DATA\n assert recent_values(v) == [1000+x for x in DATA]\n\n #----------------------------------------------\n # EXAMPLE: SPLIT USING FUNCTIONAL FORM FOR\n # SPLITTING A STREAM INTO 2 STREAMS.\n # Split a stream into exactly two streams, with a\n # keyword argument. This is in functional \n # form, i.e. it creates and returns two streams.\n # Decorate a conventional function to get a\n # stream function.\n @fsplit_2e\n def h(v, addend):\n return [v+addend, v+1000+addend]\n\n # Create streams.\n s = Stream()\n\n # Call decorated function to create agents. Note\n # functional form.\n u, v = h(s, addend=10)\n\n # Put data into input streams.\n s.extend(DATA)\n\n # Run the agents.\n run()\n\n # Check values of output streams.\n assert recent_values(u) == [10, 11, 12, 13, 14]\n assert recent_values(v) == [1010, 1011, 1012, 1013, 1014]\n\n #----------------------------------------------\n # EXAMPLE: FUNCTIONAL FORM\n # Split a stream into exactly two streams, with a\n # state and keyword argument. This is in functional \n # form, i.e. it creates and returns two streams.\n # Decorate a conventional function to get a\n # stream function.\n @fsplit_2e\n def h(v, state, addend):\n next_state = state + 1\n return ([v+addend+state, v+1000+addend+state],\n next_state)\n # Create streams.\n s = Stream()\n # Call decorated function.\n u, v = h(s, state=0, addend=10)\n # Put data into input streams.\n s.extend(list(range(5)))\n # Run the decorated function.\n run()\n # Check values of output streams.\n assert recent_values(u) == [10, 12, 14, 16, 18]\n assert recent_values(v) == [1010, 1012, 1014, 1016, 1018]\n\n #----------------------------------------------\n # Split a stream into exactly two streams, with a\n # state. This is in functional form,\n # i.e. it creates and returns two streams.\n # Decorate a conventional function to get a\n # stream function.\n @fsplit_2e\n def hk(v, state):\n next_state = state + 1\n return [v+state, v+1000+state], next_state\n # Create streams.\n s = Stream()\n # Call decorated function.\n u, v = h(s, state=0, addend=10)\n u, v = hk(s, state=0)\n # Put data into input streams.\n s.extend(list(range(5)))\n # Run the decorated function.\n run()\n # Check values of output streams.\n assert recent_values(u) == [0, 2, 4, 6, 8]\n assert recent_values(v) == [1000, 1002, 1004, 1006, 1008]\n\n #----------------------------------------------\n # Split a stream into a list of streams.\n # Window operation\n # Decorate a conventional function to get a\n # stream function.\n @split_w\n def h(window):\n return [sum(window), max(window)]\n # Create streams.\n s = Stream()\n u = Stream()\n v = Stream()\n # Call decorated function.\n h(s, [u,v], window_size=3, step_size=2)\n # Put data into input streams.\n s.extend(list(range(12)))\n # Run the decorated function.\n run()\n # Check values of output streams.\n assert recent_values(u) == [3, 9, 15, 21, 27]\n assert recent_values(v) == [2, 4, 6, 8, 10]\n\n #----------------------------------------------\n # Split a stream into a list of streams with\n # keyword argument. Window operation\n # Decorate a conventional function to get a\n # stream function.\n @split_w\n def h(window, addend):\n return [sum(window)+addend, max(window)+addend]\n\n # Create streams.\n s = Stream()\n u = Stream()\n v = Stream()\n\n # Call decorated function to create agents.\n h(s, [u,v], window_size=3, step_size=2, addend=1000)\n\n # Put data into input streams.\n s.extend(list(range(12)))\n\n # Run the agents.\n run()\n\n # Check values of output streams.\n assert recent_values(u) == [1003, 1009, 1015, 1021, 1027]\n assert recent_values(v) == [1002, 1004, 1006, 1008, 1010]\n\n #----------------------------------------------\n # Split a stream into a list of streams with state and\n # keyword argument. Window operation\n # Decorate a conventional function to get a\n # stream function.\n @split_w\n def h(window, state, addend):\n next_state = state + 1\n return ([sum(window)+addend+state,\n max(window)+addend+state], next_state)\n # Create streams.\n s = Stream()\n u = Stream()\n v = Stream()\n # Call decorated function.\n h(s, [u,v], window_size=3, step_size=2, state=0, addend=1000)\n # Put data into input streams.\n s.extend(list(range(12)))\n # Run the decorated function.\n run()\n # Check values of output streams.\n assert recent_values(u) == [1003, 1010, 1017, 1024, 1031]\n assert recent_values(v) == [1002, 1005, 1008, 1011, 1014]\n\n #----------------------------------------------\n # SPLITTING WITH WINDOWS\n #----------------------------------------------\n # EXAMPLE\n # Split a stream into a list of streams with state.\n # Window operation\n # Decorate a conventional function to get a\n # stream function. The first argument of the function\n # is a list, i.e., the window. The function returns\n # two values: a list and the next state where the list\n # has one item for eah output stream.\n @split_w\n def h(window, state):\n next_state = state + 1\n return [sum(window)+state, max(window)+state], next_state\n\n # Create streams.\n s = Stream()\n u = Stream()\n v = Stream()\n\n # Call decorated function to create agents.\n h(s, [u,v], window_size=3, step_size=2, state=0)\n\n # Put data into input streams.\n s.extend(list(range(12)))\n\n # Run the agents.\n run()\n\n # Check values of output streams.\n assert recent_values(u) == [3, 10, 17, 24, 31]\n assert recent_values(v) == [2, 5, 8, 11, 14]\n\n #----------------------------------------------\n # Split a stream into exactly TWO streams.\n # WINDOW operation\n # Decorate a conventional function to get a\n # stream function. This is in functional form,\n # i.e. it creates and returns a list of streams.\n @fsplit_2w\n def h(window):\n return sum(window), max(window)\n # Create streams.\n s = Stream()\n # Call decorated function. This function\n # creates and returns two streams.\n u, v = h(s, window_size=3, step_size=2)\n # Put data into input streams.\n s.extend(list(range(12)))\n # Run the decorated function.\n run()\n # Check values of output streams.\n assert recent_values(u) == [3, 9, 15, 21, 27]\n assert recent_values(v) == [2, 4, 6, 8, 10]\n\n #----------------------------------------------\n # Split a stream into exactly two streams with \n # keyword argument. Window operation\n # Decorate a conventional function to get a\n # stream function. This is in functional form,\n # i.e. it creates and returns two streams.\n @fsplit_2w\n def h(window, addend):\n return sum(window)+addend, max(window)+addend*2\n # Create streams.\n s = Stream()\n # Call decorated function. This function\n # creates and returns two streams.\n u, v = h(s, window_size=3, step_size=2, addend=1000)\n # Put data into input streams.\n s.extend(list(range(12)))\n # Run the decorated function.\n run()\n # Check values of output streams.\n assert recent_values(u) == [1003, 1009, 1015, 1021, 1027]\n assert recent_values(v) == [2002, 2004, 2006, 2008, 2010]\n\n\n #----------------------------------------------\n # Split a stream into exactly two streams with \n # state and keyword argument. Window operation\n # Decorate a conventional function to get a\n # stream function. This is in functional form,\n # i.e. it creates and returns two streams.\n @fsplit_2w\n def h(window, state, addend):\n next_state = state + 1\n return ([sum(window)+addend+state,\n max(window)+addend*2-state], next_state)\n # Create streams.\n s = Stream()\n # Call decorated function. This function\n # creates and returns two streams.\n u, v = h(s, window_size=3, step_size=2,\n state=0, addend=1000)\n # Put data into input streams.\n s.extend(list(range(12)))\n # Run the decorated function.\n run()\n # Check values of output streams.\n assert recent_values(u) == [1003, 1010, 1017, 1024, 1031]\n assert recent_values(v) == [2002, 2003, 2004, 2005, 2006]\n\n #----------------------------------------------\n # Split a stream into exactly two streams with \n # state. Window operation\n # Decorate a conventional function to get a\n # stream function. This is in functional form,\n # i.e. it creates and returns two streams.\n @fsplit_2w\n def h(window, state):\n next_state = state + 1\n return [sum(window)+state, max(window)-state], next_state\n # Create streams.\n s = Stream()\n # Call decorated function. This function\n # creates and returns two streams.\n u, v = h(s, window_size=3, step_size=2, state=0)\n # Put data into input streams.\n s.extend(list(range(12)))\n # Run the decorated function.\n run()\n # Check values of output streams.\n assert recent_values(u) == [3, 10, 17, 24, 31]\n assert recent_values(v) == [2, 3, 4, 5, 6]\n print ('success')\n\n\n \n\nif __name__ == '__main__':\n unittest.main() \n \n \n \n \n \n \n\n","repo_name":"AssembleSoftware/IoTPy","sub_path":"tests/agent_types/test_split.py","file_name":"test_split.py","file_ext":"py","file_size_in_byte":29306,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"76"}
+{"seq_id":"30330289891","text":"import itertools\nimport numpy as np\nfrom GRABIM import LadderCircuitAnalysis\n\n# REFERENCES\n# [1] Thomas R. Cuthbert. Broadband Direct-Coupled and Matching RF Networks. TRCPEP, 1999.\n# [2] Wideband Circuit Design. Herbert J Carlin\n\n# Generating matrix for the grid search as specified in [1], Eq. 6.3.5\ndef generating_matrix(dim):\n lst = list(itertools.product([0, 1], repeat=dim))\n arr = np.array(lst)\n\n ncols = arr.shape[0]\n for i in range(0, ncols):\n s = arr[i, :];\n s = [-1 if x==0 else x for x in s]\n arr[i, :] = s;\n\n return arr;\n\n# Scale the values of the matching network previously normalized to fmax and 1 Ohm\n# Reference: [2] Eq. 6.3.3 \ndef scaleValues_wrt_R_f(x, code, R, f):\n \n for i in range(len(x)):\n if ((code[i] == 'CS') or (code[i] == 'CP')):\n x[i] = x[i]/(R*f)\n elif ((code[i] == 'LS') or (code[i] == 'LP')):\n x[i] = R*x[i]/(f)\n \n return x\n\n\n# GRID SEARCH ALGORITHM\n# Reference [1]: Table 5.5.3. The GRABIM Grid Search Algorithm Without Details.\n# INPUTS:\n# ZS: Source impedance (without normalization)\n# ZL: Load impedance (withour normalization)\n# freq: Range of frequencies where the optimization is desired\n# m: Number of frequency points where optimization must be done\n# code: Candidate topology\n#\n# OUTPUT:\n# v_scaled: Values of the network components (scaled)\ndef GridSearch(ZS, ZL, freq, m, code, verbose=0, delta_X=10):\n ####### Impedance and frequency normalization #######\n # Normalize impedance to 1 Ohm\n max_ZS = np.max(np.abs(ZS))\n max_ZL = np.max(np.abs(ZL))\n max_Z = np.max([max_ZS, max_ZL])\n\n ZS_norm = ZS/max_Z;\n ZL_norm = ZL/max_Z;\n\n # Normalize frequency to 1 rad/s\n max_f = freq[-1]\n f_norm = freq/(max_f*2*np.pi);\n #####################################################\n\n # Log file\n if (verbose):\n f = open(\"GridSearch.log\", \"w\")\n \n dim = len(code);\n\n # Grid building\n base_point = np.ones(dim); # Base point\n C = generating_matrix(dim);\n X = delta_X*base_point*C; # Space for data search\n\n rho_max = 1; # Maximum reflection coefficient\n x_best = base_point; # Best point\n\n n_searches = X.shape[0]; # Number of combinations\n\n while delta_X >= 0.025:\n found_better_x = 0;\n for k in range(0, n_searches):\n sk = delta_X*C[k];\n xk = base_point + sk;\n\n vk = np.exp(xk); # Convert the grid vector into a search space vector\n\n # Calculate the reflection coefficient over the whole frequency span\n rho_k = LadderCircuitAnalysis.get_Input_Reflection_Coeff(ZS_norm, ZL_norm, code, vk, f_norm);\n max_rho_k = np.max(np.abs(rho_k));# Get the maximum\n\n if (verbose):\n print(\"Testing: x=(\", xk, '), v=(', vk, ') -> rho = ', max_rho_k, file = f )\n\n\n if (max_rho_k < rho_max): # A better combination was found\n rho_max = max_rho_k;\n x_best = xk;\n if (verbose):\n print(\"A better point was found x=(\", xk, \"), v=\", vk, ') -> rho =', rho_max, file = f)\n found_better_x = 1; # Then, recenter the grid and examine the search space again (same refinement factor)\n\n base_point = x_best;\n if (found_better_x == 0):\n # After examining the whole search space, shrink the search space around the best point by 1/4\n if (verbose):\n print(\"Shrinking grid (delta_x = deltax/4)\", file = f)\n delta_X *= 0.25;\n else:\n if (verbose):\n print(\"Centering grid around: x=(\", x_best, \"), v=(\", np.exp(x_best), ')', file = f)\n\n # Get reflection coefficient and VSWR of the best point \n RL = LadderCircuitAnalysis.get_ReturnLoss_from_ReflectionCoefficient(rho_max);\n VSWR = LadderCircuitAnalysis.get_VSWR_from_ReflectionCoefficient(rho_max);\n if (verbose):\n print(\"Best point found:\", x_best, file = f)\n print(\"Best rho:\", rho_max, \" RL = \", RL, \" VSWR = \", VSWR, file = f)\n\n\n # Transform the grid point into the search space point\n v_best = np.exp(x_best);\n # Scale the result according to the previous normalization\n v_scaled = scaleValues_wrt_R_f(v_best, code, max_Z, max_f*2*np.pi);\n \n rho_max = LadderCircuitAnalysis.get_Input_Reflection_Coeff(ZS, ZL, code, v_scaled, freq);\n rho_max = np.max(np.abs(rho_max))\n RL = LadderCircuitAnalysis.get_ReturnLoss_from_ReflectionCoefficient(rho_max);\n\n if (verbose):\n print(\"Result (scaled)\", v_scaled, file = f)\n f.close()\n return v_scaled, RL\n\n\ndef RemoveIrrelevantComponents(code, v_best, freq, ZS, ZL):\n \n max_ZS = np.max(np.abs(ZS))\n max_ZL = np.max(np.abs(ZL))\n max_Z = np.max([max_ZS, max_ZL])\n \n min_ZS = np.min(np.abs(ZS))\n min_ZL = np.min(np.abs(ZL))\n min_Z = np.min([min_ZS, min_ZL])\n \n k = 0\n index_to_remove = [];\n for comp in code:\n if (comp == 'LS'):\n w = 2*np.pi*freq[-1]; # Impedance at the highest frequency\n X = w*v_best[k];\n print('X(LS) = ', X*min_Z)\n if (X < 0.5*min_Z):\n # Remove component\n index_to_remove = np.append(index_to_remove, k)\n elif (comp == 'LP'):\n w = 2*np.pi*freq[0]; # Impedance at the lowest frequency\n X = w*v_best[k];\n print('X(LP) = ', X*max_Z)\n if (X > 5*max_Z):\n # Remove component\n index_to_remove = np.append(index_to_remove, k)\n elif (comp == 'CS'):\n w = 2*np.pi*freq[0]; # Impedance at the lowest frequency\n X = 1/(w*v_best[k]);\n print('X(CS) = ', X*min_Z)\n if (X < 0.33*max_Z):\n # Remove component\n index_to_remove = np.append(index_to_remove, k)\n elif (comp == 'CP'):\n w = 2*np.pi*freq[-1]; # Impedance at the highest frequency\n X = 1/(w*v_best[k]);\n print('X(CP) = ', X*max_Z)\n if (X > 5*max_Z):\n # Remove component\n index_to_remove = np.append(index_to_remove, k)\n k += 1\n \n # Remove irrelevant components\n print('To remove: ', index_to_remove)\n code = np.delete(code, index_to_remove)\n v_best = np.delete(v_best, index_to_remove)\n \n return [code, v_best]","repo_name":"andresmmera/GRABIM","sub_path":"GRABIM/GridSearch.py","file_name":"GridSearch.py","file_ext":"py","file_size_in_byte":6375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"18166150314","text":"# n : 정점의 개수, m : 간선의 개수\nn, m = tuple(map(int, input().split()))\n# graph : n개의 각 정점과 간선으로 이루어진 다른 정점을 연결 리스트로 표현하기 위한 1차원 배열\ngraph = [[] for _ in range(n + 1)]\n# visited : n개의 각 정점에 방문한 여부 확인하기 위한 1차원 배열\nvisited = [False for _ in range(n + 1)]\ncnt = 0 # cnt : 1ㅓㄴ 정점 제외하고 간선 따라 이동시 도달할 수 있는 정점의 총 개수\n\n# m개의 줄에 걸쳐 연결된 두 정점 (x,y) 입력 받아\n# 연결 리스트 graph에 추가\nfor _ in range(m):\n start, end = tuple(map(int, input().split()))\n graph[start].append(end)\n graph[end].append(start)\n\n# 현재 위치한 숫자 curr_num의 다음 이동 숫자 고르기\ndef dfs(curr_num):\n global cnt\n\n # 이동한 정점 curr_num과 연결된 모든 정점(number) 조회\n for number in graph[curr_num]:\n # number가 이미 방문한 정점인 경우 제외\n if not visited[number]:\n cnt += 1\n visited[number] = True\n dfs(number) # 해당 정점 number으로 이동\n\nvisited[1] = True # 1번 정점에 방문했음을 표시\ndfs(1) # 1번 정점에서 시작하여 DFS 탐색 시작\nprint(cnt) # 도달한 총 정점의 개수 출력","repo_name":"sujinjwa/algorithm","sub_path":"DFS/graph-traversal2.py","file_name":"graph-traversal2.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"24273512932","text":"def main():\n\tN=int(input())\n\t\n\ts = input().split(' ')\n\ts2 = input().split(' ')\n\t\n\tvaccine = [0]*N\n\tpatients= [0]*N\n\t\n\tfor x in range(0,N):\n\t\tvaccine[x] = int(s[x])\n\t\tpatients[x] = int(s2[x])\n\t\n\tvaccine.sort()\n\tpatients.sort()\n\t\n\tcount=0\n\tfor x in range(0,N):\n\t\tif vaccine[x] > patients[x]:\n\t\t\tcount+=1\n\t\t\n\tif count != N:\n\t\tprint('No')\n\telse:\n\t\tprint('Yes')\n\t\nmain()\n","repo_name":"jack-x/HackerEarthCode","sub_path":"E_SavePatients.py","file_name":"E_SavePatients.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"23857415515","text":"## getting some errors\nimport cv2\nprint(cv2.__version__)\n\ngoFlag = 0\n\ndef mouse_click(event, x, y, flags, params):\n global x1, y1, x2, y2\n global goFlag \n if event == cv2.EVENT_LBUTTONDOWN:\n x1 = x\n y1 = y\n goFlag = 0\n if event == cv2.EVENT_LBUTTONUP:\n x2 = x\n y2 = y\n goFlag = 1\n\ncv2.namedWindow('piCam')\ncv2.setMouseCallback('piCam', mouse_click)\n\n## want to keep this aspect ratio\n## display width/height\ndispW=1280\ndispH=960\n\n## if not 4 camera will be upside down, or horizontally flipped \nflip = 4\n\n## laucnhes g streamer nvarguscamerasrc\n## Don't want to run at full full fps as camera can't handle it\n## BGR is blue green red\ncamSet='nvarguscamerasrc ! video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'\n\n## camera is now ready to run\ncam = cv2.VideoCapture(camSet)\n\nwhile True:\n ## ret allows creating the var\n # #frame will get the last picture from the camera\n ret, frame=cam.read()\n ## Grabbing a frame and then showing the frame\n cv2.imshow('piCam', frame)\n\n if goFlag == 1:\n frame = cv2.rectangle(frame, (x1, y1), (x2, y2), (255,0,0), 3)\n roi = frame[y1:y2, x1:x2]\n cv2.imshow('ros', roi)\n cv2.moveWindow('ros', 1400, 0)\n\n cv2.moveWindow('nanoCam', 0, 0)\n ## checks every ms to see if key is pressed\n if cv2.waitKey(1) ==ord('q'):\n break\n## need to release camera otherwise will still run\ncam.release()\ncv2.destroyAllWindows()","repo_name":"hakbar0/py-projects","sub_path":"openCV/openCV11-ROI_mouse.py","file_name":"openCV11-ROI_mouse.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"44728714026","text":"from math import sin, pi\nfrom Figure import Figure, Polygon\nfrom Figure2D import point_2d\nfrom MyMatrix import Matrix\n\n\nclass TitanArum(Figure):\n \"\"\" ショクダイオオコンニャクの花に似てる \"\"\"\n\n def get_iter(self):\n if self.n == 0:\n return iter((self.poly),)\n\n center = sum([p for p in self.poly.points], point_2d(0, 0)).scaled(1 / 4)\n mid_points = [(self.poly.points[i - 1] + self.poly.points[i]).scaled(1 / 2) for i in range(4)]\n points = [self.poly.points[3], mid_points[3], self.poly.points[2],\n mid_points[0], center, mid_points[2],\n self.poly.points[0], mid_points[1], self.poly.points[1]]\n\n f = lambda x, y: (x * 0.9 + 0.1 * x * sin(4 * pi * y),\n y * 0.9 + 0.1 * y * sin(4 * pi * x))\n for i, p in enumerate(points):\n points[i] = point_2d(*f(p[0], p[1]))\n return iter((TitanArum(Polygon([points[6], points[7], points[4], points[3]]), self.n - 1),\n TitanArum(Polygon([points[7], points[8], points[5], points[4]]), self.n - 1),\n TitanArum(Polygon([points[4], points[5], points[2], points[1]]), self.n - 1),\n TitanArum(Polygon([points[3], points[4], points[1], points[0]]), self.n - 1)))\n\n def __init__(self, poly, n):\n super().__init__(3)\n self.n = n\n self.poly = poly\n\ns = 1.0\npoints = [point_2d(0.0, 0.0), point_2d(s, 0.0), point_2d(s, s), point_2d(0.0, s)]\nfigure = TitanArum(Polygon(points), 6).transform(Matrix.affine2D(trans=[0.025, 0.025], scale=[1.2, 1.2]))\n","repo_name":"yuki67/Figure","sub_path":"Gallery2D/TitanArum.py","file_name":"TitanArum.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"71399401846","text":"from nio.modules.settings import Settings\nfrom nio.testing import NIOTestCase\n\n\nclass TestSetSettings(NIOTestCase):\n\n \"\"\" Tests that set_settings is called in the right spot (after initializing\n settings and before initializing any other module, and settings are\n available within tests . \"\"\"\n\n def set_settings(self):\n\n Settings.set(\"test_section\", \"test_option\", \"test_value\")\n\n # assert only Settings module has been initialized\n self.assertEqual(len(self._module_initializer._initialized_modules), 1)\n settings_module = self.get_module(\"settings\")\n self.assertEqual(\n self._module_initializer._initialized_modules[0].__class__,\n settings_module.__class__)\n\n def test_set_settings(self):\n \"\"\" Makes sure settings are available within test \"\"\"\n\n self.assertEqual(Settings.get(\"test_section\", \"test_option\"),\n \"test_value\")\n\n # assert that at this point all modules have been initialized\n self.assertEqual(len(self._module_initializer._initialized_modules),\n len(self.get_test_modules()))\n","repo_name":"niolabs/nio","sub_path":"nio/testing/tests/test_set_settings.py","file_name":"test_set_settings.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"}
+{"seq_id":"13344333382","text":"from kivy.lang import Builder\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.uix.camera import Camera\r\nfrom kivymd.app import MDApp\r\nfrom kivymd.uix.label import MDLabel\r\nimport cv2\r\nimport numpy as np\r\nfrom keras.applications import ResNet50\r\nfrom keras.applications.resnet50 import preprocess_input\r\n\r\n# Charger un modèle pré-entraîné\r\nmodel = ResNet50(weights='imagenet')\r\n\r\n# Fonction pour prédire si l'image contient un chien\r\ndef dog_detector(img):\r\n img = cv2.resize(img, (224, 224)) # Redimensionner l'image\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convertir l'image en RGB\r\n img = np.expand_dims(img, axis=0)\r\n img = preprocess_input(img)\r\n prediction = model.predict(img)\r\n return (np.argmax(prediction) <= 268) and (np.argmax(prediction) >= 151)\r\n\r\n# Fonction pour la détection en temps réel\r\ndef live_dog_detection(camera, label):\r\n # Access the camera texture and convert it to a numpy array\r\n frame = np.frombuffer(camera.texture.pixels, dtype='uint8')\r\n frame = frame.reshape((camera.texture.height, camera.texture.width, 4))\r\n\r\n # Détection de chien en temps réel\r\n if dog_detector(frame):\r\n text = \"Chien détecté\"\r\n else:\r\n text = \"Pas de chien détecté\"\r\n\r\n label.text = text\r\n\r\nKV = '''\r\nBoxLayout:\r\n orientation: 'vertical'\r\n\r\n Camera:\r\n id: camera\r\n resolution: (640, 480)\r\n play: True\r\n\r\n MDLabel:\r\n id: detection_label\r\n text: \"Attente de détection...\"\r\n halign: 'center'\r\n'''\r\n\r\nclass MonApplication(MDApp):\r\n\r\n def build(self):\r\n return Builder.load_string(KV)\r\n\r\n def on_start(self):\r\n camera = self.root.ids.camera\r\n label = self.root.ids.detection_label\r\n # Use the 'on_texture' event to trigger the live_dog_detection function\r\n camera.bind(on_texture=lambda instance: self.live_dog_detection(instance, label))\r\n\r\nif __name__ == \"__main__\":\r\n MonApplication().run()\r\n","repo_name":"marilyneyapo/First-IA_PROJECT","sub_path":"kivy_apk.py","file_name":"kivy_apk.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"25836628886","text":"import RPi.GPIO as GPIO\nfrom hx711 import HX711\nfrom raspapp.models import Pin, Value\n\n\nclass SensorInstance:\n\n def __init__(self):\n\n self.FOOD_SENSOR_DATA_PIN = Pin.objects.get(name='food_data').number\n self.FOOD_SENSOR_CLOCK_PIN = Pin.objects.get(name='food_clock').number\n self.WATER_SENSOR_DATA_PIN = Pin.objects.get(name='water_data').number\n self.WATER_SENSOR_CLOCK_PIN = Pin.objects.get(name='water_clock').number\n\n self.FOOD_TARE = Value.objects.get(name='food_tare').value\n self.WATER_TARE = Value.objects.get(name='water_tare').value\n\n def clean_GPIOs(self):\n\n GPIO.cleanup()\n\n def setup_scale(self, data_pin, clock_pin):\n\n # Setting up the GPIO pins and the required constants for load sensor\n\n scale = HX711(data_pin, clock_pin)\n scale.set_reading_format(\"LSB\", \"MSB\")\n scale.set_reference_unit(92)\n\n return scale\n\n def food_scale(self, num_of_measurements=15):\n\n data_pin = self.FOOD_SENSOR_DATA_PIN\n clock_pin = self.FOOD_SENSOR_CLOCK_PIN\n tare = self.FOOD_TARE\n\n return self.do_scale(data_pin, clock_pin, num_of_measurements, tare)\n\n def water_scale(self, num_of_measurements=15):\n\n data_pin = self.WATER_SENSOR_DATA_PIN\n clock_pin = self.WATER_SENSOR_CLOCK_PIN\n tare = self.WATER_TARE\n\n return self.do_scale(data_pin, clock_pin, num_of_measurements, tare)\n\n def do_scale(self, data_pin, clock_pin, num_of_measurements, tare):\n\n # Declaration of fundamental variables.\n\n scale = self.setup_scale(data_pin, clock_pin)\n measurements = num_of_measurements\n estimated_exceptions = int(num_of_measurements / 3)\n exceptions = 0\n values = []\n\n # Capturing weight information in appropriate format.\n\n for i in range(measurements):\n scale.power_down()\n scale.power_up()\n val = scale.get_weight(1)\n meaning_val = int(-val / 5) + tare\n values.append(meaning_val)\n\n self.clean_GPIOs()\n\n # Removing exceptions if any exist.\n\n for i in range(estimated_exceptions):\n if min(values):\n values.remove(min(values))\n exceptions += 1\n if max(values):\n values.remove(max(values))\n exceptions += 1\n\n # Calculating the mean for the rest of values.\n\n result = sum(values) / (measurements - exceptions)\n\n return result\n","repo_name":"UgurAsaner/raspserver-Django","sub_path":"raspapp/libraries/sensor/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"3788646314","text":"from skimage import io\nimport numpy as np\nimport os, sys\nfrom matplotlib import pyplot as plt\nfrom scipy.linalg import norm\nfrom scipy.ndimage import rotate\n\n\ndef load_processed_images(base_path):\n ic = io.imread_collection(base_path+'*.gif')\n return ic\ndef extract_grey(image):\n return image == 127\ndef extract_white(image):\n return image == 191\ndef extract_csf(image):\n return image == 63\n\ndef process(base_path):\n ic = load_processed_images(base_path)\n features = []\n for i, img in enumerate(ic):\n g_img = extract_grey(img)\n w_img = extract_white(img)\n c_img = extract_csf(img)\n g_mean = mean(g_img)\n w_mean = mean(w_img)\n c_mean = mean(c_img)\n g_s_ud = symmetry_upTdown(g_img)\n g_s_lt = symmetry_lefTright(g_img)\n w_s_ud = symmetry_upTdown(w_img)\n w_s_lt = symmetry_lefTright(w_img)\n c_s_ud = symmetry_upTdown(c_img)\n c_s_lt = symmetry_lefTright(c_img)\n features.append([g_mean, w_mean, c_mean, g_s_ud, g_s_lt, w_s_lt, w_s_ud, c_s_lt, c_s_ud])\n\n return features\n\ndef mean(image):\n return np.mean(image)\n\ndef plot_preprocessed_img(image):\n f, axes = plt.subplots(nrows=2, ncols=3, figsize=(10, 5))\n grey_img = extract_grey(image)\n white_img = extract_white(image)\n csf_img = extract_csf(image)\n axes[0,0].imshow(grey_img, cmap=\"gray\")\n axes[0,0].axis('off')\n axes[0,1].imshow(white_img, cmap=\"gray\")\n axes[0,1].axis('off')\n axes[0,2].imshow(csf_img, cmap=\"gray\")\n axes[0,2].axis('off')\n plt.show()\n\ndef symmetry_upTdown(img):\n r_img = rotate(img, 90, reshape=False)\n fliplrimg = np.fliplr(r_img)\n arry = img - fliplrimg\n #flatten the array & sum the values\n return norm(arry.ravel(), 0)\n\ndef symmetry_lefTright(image):\n fliplrimg = np.fliplr(image)\n arry = image - fliplrimg\n return norm(arry.ravel(), 0)\n\n#dementia\ndementia_base_path = sys.path[0]+ os.sep + 'dementia'+os.sep +'subj' + os.sep\nnon_dementia_base_path = sys.path[0]+ os.sep + 'Non_dementia'+os.sep +'subj' + os.sep\nprint(process(dementia_base_path))\nprint(process(non_dementia_base_path))\n","repo_name":"Niteshsuresh/MRI","sub_path":"data/raw/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"29689096246","text":"from django.urls import path\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n\r\n path('', views.myhtml, name='myhtml'),\r\n\r\n path('', views.welcome, name='welcome'),\r\n path('', views.home, name='home'),\r\n path('', views.topcats, name='topcats'),\r\n path('', views.inputshow, name='inputshow'),\r\n\r\n path('', views.login_user, name='login'),\r\n path('', views.logout_user, name='logout'),\r\n path('', views.register_user, name='register'),\r\n\r\n path('/', views.detail, name='detail'),\r\n path('/results/', views.results, name='results'),\r\n path('/vote/', views.vote, name='vote'),\r\n]","repo_name":"Kaewkamphon62/Cats10TH","sub_path":"myweb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"72083884406","text":"from HTMLReader.Read import *\nfrom HTMLReader.strUtils import *\n\n\nclass Element:\n def __init__(self, raw_tag: str, content: str):\n '''\n A HTML element converted into python class\n '''\n\n self.content = content\n\n # Get tag type (eg. 'div' or 'h1')\n self.type = raw_tag.split(' ')[0].replace('<', '')\n\n # Get prop fields (eg. 'src', 'class' or / and 'id')\n props_fields = RemoveBetweenChar(ReplaceMultStr(raw_tag, [f'<{self.type} ', '=', '>'], ''), ['\"', \"'\"]).split()\n self.props = {}\n\n # Get props\n for field in props_fields:\n # Get start & end index of prop values\n prop_start_i = TryFind(raw_tag, [f\"{field}='\", f'{field}=\"'], 0)[0] + len(f'{field}=\"')\n prop_end_i = TryFind(raw_tag[prop_start_i:], [\"'\", '\"'], 0)[0] + prop_start_i\n\n # Split all prop values into list\n self.props[field] = raw_tag[prop_start_i:prop_end_i].split()\n\n @staticmethod\n def GetWithTag(raw_html: str, tag_type: str):\n '''\n Get all elements with the given tag\n '''\n \n elements = []\n\n while raw_html.count(tag_type) > 0:\n\n # Calculate opening tag index & get opening tag content (eg. '')\n opening_tag_start_i = raw_html.find(tag_type) - 1\n opening_tag_end_i = raw_html[opening_tag_start_i:].find('>') + opening_tag_start_i + 1\n opening_tag_content = raw_html[opening_tag_start_i:opening_tag_end_i]\n\n if raw_html[opening_tag_start_i] != '<':\n raw_html = raw_html.replace(tag_type, '█'*len(tag_type), 1)\n continue\n\n # Get closing tag index with GetClosingTag() function\n closing_tag_i = GetClosingTag(raw_html, opening_tag_content)\n \n # Get the content between the opening and closing tag\n content = raw_html[opening_tag_end_i:closing_tag_i]\n \n # Return new HTML element object\n elements.append(Element(opening_tag_content, content))\n\n raw_html = raw_html.replace(tag_type, '█'*len(tag_type), 1)\n\n return elements\n\n @staticmethod\n def Get(raw_html: str, identification: str):\n '''\n Get all elements with the given class\n '''\n \n elements = []\n\n while raw_html.count(identification) > 0:\n\n identification_i = raw_html.find(identification)\n\n # Calculate opening tag index & get opening tag content (eg. '
')\n opening_tag_start_i = FindIndexReverse(raw_html, '<', identification_i)\n opening_tag_end_i = raw_html[opening_tag_start_i:].find('>') + opening_tag_start_i + 1\n opening_tag_content = raw_html[opening_tag_start_i:opening_tag_end_i]\n\n # Get closing tag index with GetClosingTag() function\n closing_tag_i = GetClosingTag(raw_html, opening_tag_content)\n \n # Get the content between the opening and closing tag\n content = raw_html[opening_tag_end_i:closing_tag_i]\n \n # Return new HTML element object\n elements.append(Element(opening_tag_content, content))\n\n raw_html = raw_html.replace(identification, '█'*len(identification), 1)\n\n return elements","repo_name":"Monkvy/HTML-Reader","sub_path":"HTMLReader/Element.py","file_name":"Element.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"7851229645","text":"from controller import Controller\r\nfrom controller import cols_to_col\r\nimport numpy as np\r\nimport pygame, sys, colorsys\r\n\r\npygame.init()\r\n\r\ncon = Controller()\r\n\r\ndisplay = pygame.display.set_mode((256, 256))\r\n\r\nbasecol = [0, 1, 1]\r\ncol = basecol\r\nupdate = True\r\n\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.MOUSEMOTION:\r\n if event.buttons[0] == 1:\r\n pos = event.pos\r\n col[1] = (256-pos[0])/256\r\n col[2] = (256-pos[1])/256\r\n elif event.type == pygame.MOUSEWHEEL:\r\n col[0] += event.y/360\r\n if col[0] > 1: col[0] = 0\r\n elif col[0] < 0: col[0] = 1\r\n update = True\r\n \r\n n_rgb = colorsys.hsv_to_rgb(*col)\r\n rgb = [round(n_rgb[0]*255), round(n_rgb[1]*255), round(n_rgb[2]*255)]\r\n cin = cols_to_col(*rgb)\r\n print(col, rgb)\r\n con.send(cin, wait=(1/60)*1000, log=False)\r\n\r\n if update:\r\n update = False\r\n display.fill((0, 0, 0))\r\n for x in range(256):\r\n for y in range(256):\r\n n_rgb = colorsys.hsv_to_rgb(col[0], (256-x)/256, (256-y)/256)\r\n rgb = [round(n_rgb[0]*255), round(n_rgb[1]*255), round(n_rgb[2]*255)]\r\n display.set_at((x, y), rgb)\r\n pygame.display.flip()","repo_name":"jazzyocean/rgbtq","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"29647105347","text":"\"\"\"\nThe keys for the Flood Fill are:\n - Get valid neighbors: [up, down , left, right] -> within the matrix dimensions\n - Color of the source pixel\n - If source pixel color and color are the same: return image\n\n1. Iterate over each of the neighbors\n2. If the color matches the source pixel, update color\n2. Call recursively for the neighbors of the neighbors, pass updated image matrix\n\"\"\"\nfrom typing import List\n\nclass Solution:\n moves = [\n [-1, 0],\n [1, 0],\n [0, 1],\n [0, -1],\n ]\n\n def fill(self, image, sr, sc, color, def_color, m, n):\n for move in self.moves:\n r = sr+move[0]\n c = sc+move[1]\n if 0 <= r < m and 0 <= c < n:\n if image[r][c] == def_color:\n image[r][c] = color\n self.fill(image, r, c, color, def_color, m, n)\n\n return image\n\n\n def floodFill(self, image: List[List[int]], sr: int, sc: int, color: int) -> List[List[int]]:\n m = len(image)\n n = len(image[0])\n def_color = image[sr][sc]\n if color == def_color:\n return image\n image[sr][sc] = color\n result = self.fill(image, sr, sc, color, def_color, m, n)\n\n return result\n\nif __name__ == \"__main__\":\n sol = Solution()\n image1 = [\n [1,1,1],\n [1,1,0],\n [1,0,1],\n ]\n sr, sc = 1, 1\n color = 2\n result1 = sol.floodFill(image1, sr, sc, color)\n print(\"Result 1: \", result1)\n image2 = [\n [0,0,0],\n [0,0,0],\n ]\n sr, sc = 0, 0\n color = 0\n result2 = sol.floodFill(image2, sr, sc, color)\n print(\"Result 2: \", result2)\n","repo_name":"salasberryfin/leetcode-challenges","sub_path":"python/733-flood-fill/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"33542733691","text":"import scipy.spatial.kdtree as kd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass _spatial_smoother_2d(object):\n \"\"\"\n Stores domain parameters for all spatially adaptive kernel density estimators.\n\n Arguments:\n\n 1. num_neighbors: The number of neighbors to use for the adaptive smoothing parameter.\n 2. domx_params: A tuple specifying the parameters for the x-axis. Elements containg lower bound; upper bound; number of steps.\n 3. domy_params: A tuple specifying the parameters for the y-axis. Elements containg lower bound; upper bound; number of steps.\n\n \"\"\"\n def __init__(self,\n num_neighbors,\n domx_params,\n domy_params):\n\n self.k = num_neighbors\n self.domx_params = domx_params\n self.domy_params = domy_params\n\n def _construct_domain(self):\n # construct domain over x-axis\n domx = np.linspace( self.domx_params[0],\n self.domx_params[1],\n self.domx_params[2])\n # domain over the y-axis\n domy = np.linspace( self.domy_params[0],\n self.domy_params[1],\n self.domy_params[2])\n\n # return grid coordinates\n return(np.meshgrid(domx, domy))\n\n def plot_kernel(self,\n f,\n x = None,\n title = \"\",\n data_alpha = 1.0,\n filename = None):\n\n \"\"\"\n Plotting tools for spatially adaptive kernel smoothing functions.\n\n Arguments:\n\n 1. f: Matrix containing the kernel smoothed function.\n 2. x: Data used by the kernel smoothing the function. (optional)\n 3. title: Plot title. (optional)\n\n Returns:\n\n 1. Plots kernel smoothed data.\n\n \"\"\"\n domx, domy = self._construct_domain()\n\n plt.figure()\n ctf = plt.contourf(domx, domy, f, cmap = \"Blues\", levels = np.linspace(np.min(f), np.max(f), 31))\n plt.colorbar(ctf, shrink=0.9, format = '%.01e')\n plt.title(title)\n\n # plot the data if provided\n if x is not None:\n plt.scatter(x[:, 0], x[:, 1], facecolor = 'red', edgecolor = \"black\", zorder = 1, alpha = data_alpha)\n\n if filename is not None:\n plt.savefig(filename, dpi = 300)\n else:\n plt.show()\n\n\nclass knn_gaussian_2d(_spatial_smoother_2d):\n\n def __init__(self, \\\n num_neighbors, \\\n domx_params, \\\n domy_params):\n\n \"\"\"\n Parameters and functions for k nearest neighbor adaptive density estimator\n with a Gaussian kernel.\n\n Arguments:\n\n 1. domx_params: A tuple specifying the parameters for the x-axis. Elements containg lower bound; upper bound; number of steps.\n 2. domy_params: A tuple specifying the parameters for the y-axis. Elements containg lower bound; upper bound; number of steps.\n\n Example:\n\n domx = (0, 10, 100) # start at 0; end at 10; 100 steps\n domy = (-10, 10, 100) # start at -10; end at 10; 100 steps\n\n # initialize smoother with using 10 neighbors\n gaussian_smoother = knn_gaussian_2d(num_neighbors = 10, domx_params = domx, domy_params = domy)\n \"\"\"\n\n _spatial_smoother_2d.__init__(self, num_neighbors, domx_params, domy_params)\n\n def smooth(self, x):\n \"\"\"\n Apply K Nearest Neighbors adaptive Gaussian smoother to a provided 2D dataset.\n\n Arguments:\n\n 1. x: 2D dataset to be smoothed. It is assumed that the rows of\n the data matrix are the sample points.\n\n Returns:\n\n 1. Smoothed function over the specified domain.\n\n Example:\n\n TODO: Write sample code...\n \"\"\"\n domx, domy = self._construct_domain()\n dom = np.vstack((domx.ravel(), domy.ravel())).T\n\n # construct KD tree on data\n tree = kd.KDTree(x)\n\n # get k nearest neighbors\n dist = tree.query(dom, k = self.k)[0]\n tp_knn = dist[:, self.k - 1].reshape(-1, 1)\n\n ### ADAPTIVE KERNEL SMOOTHING\n # pairwise subtraction between grid points and each data point\n # reshape from tensor to matrix (K x 2)\n Fxy = np.subtract(dom[:, np.newaxis, :], x[np.newaxis, :, :]).reshape(-1, 2)\n Fxy = np.square(np.linalg.norm(Fxy, axis = 1)).reshape(dom.shape[0],-1)\n Fxy = np.divide(Fxy, -2 * tp_knn ** 2)\n Fxy = np.divide(np.exp(Fxy), 2 * np.pi * tp_knn ** 2)\n Fxy = Fxy.mean(axis = 1)\n\n return(Fxy.reshape(self.domy_params[2], self.domx_params[2]))\n\nclass knn_density_estimator(_spatial_smoother_2d):\n\n def __init__(self, num_neighbors, domx_params, domy_params):\n\n \"\"\"\n Parameters and functions for k-nearest neighbor adaptive density estimator.\n\n Arguments:\n\n 1. num_neighbors: The number of neighbors (k) used to estimate the density.\n 1. domx_params: A tuple specifying the parameters for the x-axis. Elements containg lower bound; upper bound; number of steps.\n 2. domy_params: A tuple specifying the parameters for the y-axis. Elements containg lower bound; upper bound; number of steps.\n\n Example:\n\n k = 10 # Number of neighbors to consider.\n domx = (0, 10, 100) # start at 0; end at 10; 100 steps.\n domy = (-10, 10, 100) # start at -10; end at 10; 100 steps.\n\n # initialize smoother with using 10 neighbors\n gaussian_smoother = knn_gaussian_2d(num_neighbors = 10, domx_params = domx, domy_params = domy)\n \"\"\"\n\n _spatial_smoother_2d.__init__(self, num_neighbors, domx_params, domy_params)\n\n def smooth(self, x):\n \"\"\"\n Apply K Nearest Neighbors density estimator over a grid.\n\n Arguments:\n\n 1. x: 2D dataset to be smoothed. It is assumed that the rows of\n the data matrix are the sample points.\n\n Returns:\n\n 1. Smoothed function over the specified domain.\n\n Example:\n\n TODO: Write sample code...\n \"\"\"\n domx, domy = self._construct_domain()\n dom = np.vstack((domx.ravel(), domy.ravel())).T\n\n # construct KD tree on data\n tree = kd.KDTree(x)\n\n # get k^{th} nearest neighbors to each point in the domain\n dist = tree.query(dom, k = self.k, p = 2)[0]\n dist_knn = dist[:, self.k - 1].reshape(self.domy_params[2], self.domx_params[2])\n dist_knn = np.divide(self.k / (x.shape[0] * np.pi), dist_knn ** 2)\n\n # KNN density estimator\n return(dist_knn)\n\nclass distance_to_measure(_spatial_smoother_2d):\n def __init__(self, domx_params, domy_params, num_neighbors = None, tau = None):\n \"\"\"Description here\"\"\"\n assert 0 < tau < 1 or tau is None, \\\n \"Parameter tau must be a numerical value in (0, 1).\"\n\n assert len(domx_params) == 3 and len(domy_params) == 3, \\\n \"Domain parameter tuples must contain three elements.\"\n\n assert isinstance(domx_params, tuple) and isinstance(domy_params, tuple), \\\n \"Domain parameters must be of type 'tuple'.\"\n\n\n if tau is None:\n _spatial_smoother_2d.__init__(self,\n num_neighbors = num_neighbors,\n domx_params = domx_params,\n domy_params = domy_params)\n else:\n _spatial_smoother_2d.__init__(self,\n num_neighbors = None,\n domx_params = domx_params,\n domy_params = domy_params)\n\n self.tau = tau\n\n def smooth(self, x):\n k = np.ceil(self.tau * x.shape[0]) if self.tau is not None else self.k\n k = int(k)\n domx, domy = self._construct_domain()\n dom = np.vstack((domx.ravel(), domy.ravel())).T\n\n tree = kd.KDTree(x)\n knn = tree.query(dom, k = k, p = 2)[1]\n\n #\n Fxy = np.subtract(dom[:, np.newaxis, :], x[knn, :]).reshape(-1, 2)\n Fxy = np.linalg.norm(Fxy, axis = 1).reshape(-1, k)\n Fxy = np.sqrt(Fxy.mean(axis = 1))\n\n return(Fxy.reshape(self.domy_params[2], self.domx_params[2]))\n\n\n\n\n# class knn_uniform_2d(_spatial_smoother_2d):\n# \"\"\"\n# Parameters and functions for k nearest neighbor adaptive density estimator\n# with a uniform kernel.\n# Arguments:\n# 1. domx_params: A tuple specifying the parameters for the x-axis. Elements containg lower bound; upper bound; number of steps.\n# 2. domy_params: A tuple specifying the parameters for the y-axis. Elements containg lower bound; upper bound; number of steps.\n# Example:\n# TODO: Write example codeblock\n# \"\"\"\n#\n# def __init__(self, num_neighbors, domx_params, domy_params):\n# _spatial_smoother_2d.__init__(self, num_neighbors, domx_params, domy_params)\n#\n# def smooth(self, x):\n# \"\"\"\n# Apply K Nearest Neighbors adaptive smoother with a uniform kernel to a\n# given 2D dataset.\n# Arguments:\n# 1. x: 2D dataset to be smoothed. It is assumed that the rows of\n# the data matrix are the sample points.\n# Returns:\n# 1. Smoothed function over the specified domain.\n# Example:\n# TODO: Write sample code...\n# \"\"\"\n#\n# domx, domy = self._construct_domain()\n# dom = np.vstack((domx.ravel(), domy.ravel())).T\n#\n# # construct KD tree on data\n# tree = kd.KDTree(x)\n#\n# # get k+1 nearest neighbors\n# #! the point itself is always the first nearest neighbor\n# dist = tree.query(x, k = self.k)[0]\n# tp_knn = dist[:, self.k - 1].reshape(-1, 1)\n# tp_knn = np.hstack([tp_knn, tp_knn])\n#\n# # ADAPTIVE KERNEL SMOOTHING\n# # pairwise subtraction between each grid point and each point in the data\n# Fxy = np.subtract(dom[:, np.newaxis, :], x[np.newaxis, :, :])\n\n ## divide each data point by its relative weight\n #tp_knn_big = np.tile(tp_knn, (dom.shape[0], 1)).reshape(dom.shape[0], -1, 2)\n #Fxy = np.divide(Fxy, tp_knn_big)\n\n ## find where values satisfy kernel condition\n #Fxy = (np.abs(Fxy) < 1.0) * 1.0\n\n ## get columns where both x, y coordinates are satisfied\n #Fxy = np.prod(Fxy, axis = 2)\n #Fxy = np.divide(Fxy.T, 4.0 * np.prod(tp_knn, 1).reshape(-1,1)).T\n #Fxy = np.mean(Fxy, axis = 1)\n\n #return(Fxy.reshape(domx.shape[0], domy.shape[0]))\n\nif __name__ == \"__main__\":\n \"\"\"For testing and illustrative purposes only\"\"\"\n import tdaw.examples.annulus_data as ad\n\n x = ad.sample_paired_annuli(R1 = 60,\n r1 = 40,\n R2 = 40,\n r2 = 20,\n center_modifier = 50,\n samples_from_shape = 500)\n\n domx_params = (np.min(x[:, 0]) - 10, np.max(x[:, 0]) + 10, 100)\n domy_params = (np.min(x[:, 1]) - 10, np.max(x[:, 1]) + 10, 100)\n\n dtm = distance_to_measure( tau = 0.10,\n domx_params = domx_params,\n domy_params = domy_params)\n dtm.plot_kernel(f = dtm.smooth(x), x = x)\n\n #gaussian_de = knn_gaussian_2d(num_neighbors = 8,\n # domx_params = ,\n # domy_params = )\n\n #gaussian_de.plot_kernel(f = gaussian_de.smooth(x), x = x, title = \"{} KNN with Gaussian Kernel\".format(gaussian_de.k))\n\n #from mpl_toolkits.mplot3d import Axes3D\n #fig = plt.figure()\n #ax = fig.gca(projection='3d')\n #X, Y = dtm._construct_domain()\n #surf = ax.plot_surface(X, Y, dtm.smooth(x), cmap = \"Blues\", linewidth=0, antialiased=False)\n #fig.colorbar(surf, shrink=0.25, aspect=5)\n #plt.show()\n\n # knn_de = knn_density_2d(num_neighbors = 8,\n # domx_params = (np.min(x[:, 0]) - 10, np.max(x[:, 0]) + 10, 50),\n # domy_params = (np.min(x[:, 1]) - 10, np.max(x[:, 1]) + 10, 100))\n\n #print knn_de.smooth(x).shape\n #knn_de.plot_kernel(f = knn_de.smooth(x), x = x, title = \"{} KNN Density Estimator\".format(knn_de.k))\n\n #adaptive_knn.plot_kernel(f = adaptive_knn.smooth(x), x = x, title = \"{} KNN with Gaussian Kernel\".format(adaptive_knn.k))\n","repo_name":"patricksmedina/tdatools","sub_path":"data_smoothing_kernels.py","file_name":"data_smoothing_kernels.py","file_ext":"py","file_size_in_byte":12605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"73336514165","text":"from django.db.models import Q\nfrom django.contrib import admin\nfrom django.contrib.admin.views.main import ChangeList\n\nclass InputFilter(admin.SimpleListFilter):\n template = 'admin/input_filter.html'\n\n def lookups(self, request, model_admin):\n # Dummy, required to show the filter.\n return ((),)\n\n def choices(self, changelist):\n # Grab only the \"all\" option.\n all_choice = next(super().choices(changelist))\n all_choice['query_parts'] = (\n (k, v)\n for k, v in changelist.get_filters_params().items()\n if k != self.parameter_name\n )\n yield all_choice\n\nclass ReferenceFilter(InputFilter):\n parameter_name = 'invoice'\n title = 'invoice number' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(reference__icontains=bit) |\n Q(supplier_invoice__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass POFilter(InputFilter):\n parameter_name = 'po'\n title = 'po' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(po__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass PartFilter(InputFilter):\n parameter_name = 'part'\n title = 'part' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(purchased_parts__part__icontains=bit) |\n Q(purchased_parts__internal__part_number__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass PricePartFilter(InputFilter):\n parameter_name = 'part'\n title = 'brand and part' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(part__part__icontains=bit) |\n Q(part__internal__part_number__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass InvoiceFilter(InputFilter):\n parameter_name = 'invoice'\n title = 'invoice'\n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return\n elif Q(term__icontains='none'):\n any_name = Q(supplier_invoice=None)\n return queryset.filter(any_name).distinct()\n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(supplier_invoice__supplier_invoice__icontains=bit) |\n Q(supplier_invoice__reference__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass PricePOFilter(InputFilter):\n parameter_name = 'po'\n title = 'po' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(supplier_invoice__po__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()","repo_name":"reinali07/autoshop-manager","sub_path":"sup_invoices/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"7180262944","text":"#####################\n# 34.2 속성 사용하기 ##\n##########################################\n# __init__ 메서드 안에서 self.속성에 값을 할당합니다\n# __init__ 메서드는 인스턴스(객체)를 초기화합니다.\n# 매직 메서드 ( 스페셜 메서드 )\n# -> 밑줄 두개(__)가 붙은 메소드\n# -> 파이썬이 자동으로 호출해주는 메서드\n##########################################\nprint('34.2')\nclass Person:\n def __init__(self):\n self.hello = '안녕하세요.'\n\ndef greeting(self):\n print(self.hello)\n\njames = Person()\njames.greeting() # 안녕하세요.\n\n########################\n# 34.2.1 self의 의미 ##\n##########################################\n# self는 인스턴스 자기 자신을 의미합니다.\n# 즉 위의 예제에서 self = Person() 자기 자신을 의미\n# james = Person() 호출 시\n# __init__(self)의 매개변수 self에 Person() 들어감\n#\n#\n# 34.2.2 인스턴스를 만들 때 값 받기\n# class 클래스이름:\n# def __init__(self, 매개변수1, 매개변수2):\n# self.속성1 = 매개변수1\n# self.속성2 = 매개변수2\n\n\n\n#####################################################################\n# 참고 | 특정 속성 사용 제한하기 : __slots__ = ['속성이름', '속성이름'..] ##\n####################################################################################\nprint('__slots__ = [ 속성 ] 사용하여 속성 비활성화 시키기')\nclass Person:\n __slots__ = ['name', 'age']\n # name, age 속성만 사용. # address 속성은 Person의 속성으로 인식 안됨\n\n def __init__(self, name='셀프', age=0, address='한'): # 키워드 인수\n self.name = name\n self.age = age\n self.address = address\n\nmaria = Person()\nmaria.name = '마리아'\nmaria.age = 30\n# maria.address = '서울'\nprint('name : {0}, age : {1}, address : {2}'.format(maria.name, maria.age))\n\n\n########################################\n# 참고 | 클래스의 위치 인수, 키워드 인수 사용 ##\n############################################\n# 위치인수와 리스트 언패킹 사용\nclass Person:\n def __init__(self, *args):\n self.name = args[0]\n self.age = args[1]\n self.address = args[2]\n\nmaria = Person(*['마리아', 20, '서울시 서초구 반포동'])\n\n# 키워드 인수와 딕셔너리 언패킹 사용\nclass Person:\n def __init__(self, **kwargs): # 키워드 인수\n self.name = kwargs['name']\n self.age = kwargs['age']\n self.address = kwargs['address']\n\nmaria1 = Person(name='마리아', age=20, address='서울시 서초구 반포동')\nmaria2 = Person(**{'name': '마리아', 'age': 20, 'address': '서울시 서초구 반포동'})","repo_name":"skyla15/HireMeProject-","sub_path":"0_Learning_Python/파이썬문법/34_Class/34_2_Method_Attribute.py","file_name":"34_2_Method_Attribute.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"35967018184","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport setuptools\n\nREQUIRED_PACKAGES = [\"absl-py\", \"numpy\", \"scipy\", \"jax\", \"jaxlib\", \"tensorflow\",\n \"flax\"]\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"jax-influence\",\n version=\"0.1\",\n description=\"Jax Influence.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/google-research/jax-influence\",\n author=\"Google Inc.\",\n packages=setuptools.find_packages(),\n license=\"Apache 2.0\",\n install_requires=REQUIRED_PACKAGES,\n)\n","repo_name":"google-research/jax-influence","sub_path":"pip_package/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"76"}
+{"seq_id":"31980322435","text":"from django.contrib.messages import success\nfrom django.core.checks import messages\nfrom django.shortcuts import redirect, render,get_object_or_404\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import CreateView\nfrom .models import Images, Profile, Comment\nfrom .forms import EditProfileForm, ImageForm, CommentForm, ProfileUpdateForm\nfrom django.views import generic\nfrom django.urls import reverse_lazy, reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import UserChangeForm\n# Create your views here.\n\n@login_required(login_url='/accounts/login/')\ndef index(request):\n name = 'instagram app'\n images = Images.objects.all()\n comments = Comment.objects.all()\n \n return render(request, 'instagram/index.html', {\"name\":name, \"images\":images, \"comments\":comments})\n\ndef image_detail(request, image_id):\n try:\n image = Images.objects.get(id = image_id)\n image_likes = image.like.count()\n \n except Images.DoesNotExist:\n raise Http404()\n\n return render(request,\"instagram/image.html\", {\"image\":image, \"image_likes\":image_likes})\n\n\n@login_required(login_url='/accounts/login/')\ndef like_image(request, image_id):\n image = Images.objects.get(id =image_id)\n image.like.add(request.user.profile)\n image.save()\n return HttpResponseRedirect(reverse('image_detail', args=[str(image_id)]))\n\n\n@login_required(login_url='/accounts/login/')\ndef new_image(request):\n current_user =request.user\n if request.method == 'POST':\n form = ImageForm(request.POST,request.FILES)\n if form.is_valid():\n image = form.save(commit = False)\n image.profile = current_user\n image.save()\n return redirect(\"index\")\n\n else:\n form = ImageForm()\n return render (request, 'new_image.html', {\"form\":form})\n\n@login_required(login_url='/accounts/login/')\ndef delete_image(request, image_id):\n item = Images.objects.get(id =image_id)\n if request.method =='POST':\n item.delete()\n return redirect('/')\n return render(request, 'instagram/delete.html', {\"item\":item})\n \n@login_required(login_url='/accounts/login/')\ndef update_image(request, image_id):\n image = Images.objects.get(id=image_id)\n update_form = ImageForm(instance=image)\n context = {\"update_form\": update_form}\n if request.method ==\"POST\":\n update_form = ImageForm(request.POST, instance = image)\n if update_form.is_valid():\n update_form.save()\n return redirect(\"/\")\n\n return render (request, 'instagram/update_image.html', context)\n \n@login_required(login_url='/accounts/login/')\ndef search(request):\n if 'user' in request.GET and request.GET['user']:\n search_term = request.GET.get('user')\n searched_users = Profile.search_profile(search_term)\n return render(request, 'instagram/search.html', {'users':searched_users})\n\n else: \n return render(request, 'instagram/search.html')\n \n\nclass UserEditView(generic.UpdateView):\n form_class = EditProfileForm\n template_name='django_registration/edit_profile.html'\n success_url =reverse_lazy('index')\n\n def get_object(self):\n return self.request.user\n\n@login_required(login_url='/accounts/login/')\ndef add_comment(request, image_id):\n image = get_object_or_404(Images, id=image_id)\n\n if request.method == 'POST':\n comment_form = CommentForm(request.POST, request.FILES, instance=image)\n if comment_form.is_valid():\n comments = comment_form.save(commit=False)\n comments.image = image\n comments.user = request.user\n \n return redirect('index')\n else:\n comment_form = CommentForm()\n \n return render(request, 'instagram/add_comment.html',{\"comment_form\":comment_form, \"image\":image})\n\n@login_required(login_url='/accounts/login/')\ndef profile(request):\n if request.method == 'POST':\n user_form=EditProfileForm(request.POST, instance =request.user)\n profile_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n return redirect('profile')\n else:\n user_form=EditProfileForm(instance =request.user)\n profile_form = ProfileUpdateForm(instance=request.user.profile)\n messages.success(request, f'Your profile was updated successfuly')\n context = {\"user_form\":user_form, \"profile_form\":profile_form}\n return render(request, 'django_registration/user_profile.html', context)\n","repo_name":"ian-otieno/Django-Instagram","sub_path":"Instagram/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"11472671664","text":"import os\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog\r\nfrom tkinter import messagebox \r\nfrom datetime import datetime\r\n\r\n\r\ngui = Tk()\r\ngui.geometry(\"350x100\")\r\ngui.title(\"VLC OneClick Screen Capture !\")\r\n\r\n\r\ndef getFolderPath():\r\n folder_selected = filedialog.asksaveasfilename(initialdir = \"/\",title = \"Save file as...\",defaultextension='.mp4',filetype=[('*.mp4', 'MP4 Files')])\r\n folderPath.set(folder_selected)\r\n\r\ndef GO():\r\n start_time = datetime.now()\r\n folder = folderPath.get()\r\n \r\n if folder == '':\r\n messagebox.showerror(title=\"Error...!\", message=\"Empty Path, Please check your Path.\")\r\n \r\n else:\r\n os.system('cmd /c \"\"C:/Program Files/VideoLAN/VLC/vlc.exe\" screen:// --qt-start-minimized :screen-fps=25 :run-time=9999 :quiet :sout=#transcode{vcodec=h264,vb072}:standard{access=file,mux=mp4,dst='+folder+'}\"')\r\n end_time = datetime.now()\r\n messagebox.showinfo(title=\"Done\", message=('Duration: {}'.format(end_time - start_time)))\r\ndef ABOUT():\r\n messagebox.showinfo(\"ALU DEV TEAM @ 2022\", \"by nikkpap (nikkpap@gmail.com)\")\r\n\r\nfolderPath = StringVar()\r\n\r\nlbl1 = Label(gui ,text=\"Save as capture...\").grid(row=0,column = 0)\r\nentry1 = Entry(gui,textvariable=folderPath, state=DISABLED).grid(row=0,column=1)\r\n\r\nbtnBrowse = ttk.Button(gui, text=\"Browse\",command=getFolderPath).grid(row=0,column=2)\r\nbtnGO = ttk.Button(gui ,text=\"Go\", command=GO).grid(row=4,column=0)\r\nbtnAbout = ttk.Button(gui ,text=\"About\", command=ABOUT).grid(row=4,column=1)\r\n\r\ngui.mainloop()\r\n","repo_name":"nikkpap/VLC-OneClick-Screen-Capture","sub_path":"VLC OneClick Screen Capture.pyw","file_name":"VLC OneClick Screen Capture.pyw","file_ext":"pyw","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"72976186484","text":"import os\nimport shutil\nimport subprocess\nimport csv\nimport bert_score\nimport sentence_transformers\nimport numpy as np\nfrom scipy.spatial.distance import cosine\n\n# TextDiversity pkgs\nfrom transformers import AutoModel, AutoTokenizer\nfrom sentence_transformers import SentenceTransformer\nfrom sklearn.decomposition import PCA\nfrom scipy.spatial import distance\nimport torch\nimport numpy as np\nimport nltk\nfrom nltk import word_tokenize, sent_tokenize\nfrom nltk.corpus import stopwords\nimport os\nimport itertools\nfrom multiprocessing import Pool\nimport spacy\n\n# locals\nimport metric\nfrom utils import *\n\nclass TokenSemanticDiversity(metric.TextDiversity):\n\n default_config = {\n # TextDiversity configs\n 'q': 1,\n 'normalize': False,\n 'distance_fn': distance.chebyshev, \n 'dim_reducer': PCA,\n 'remove_stopwords': False, \n 'scale_dist': \"exp\", \n 'sq_reg': False, \n 'mean_adj': True,\n 'verbose': False,\n # TokenSemanticDiversity configs\n 'MODEL_NAME':\"bert-large-uncased\",\n 'batch_size': 16,\n 'use_gpu': False,\n 'n_components': 'auto' \n }\n\n def __init__(self, config={}):\n config = {**self.default_config, **config} \n super().__init__(config)\n self.model = AutoModel.from_pretrained(config['MODEL_NAME'])\n self.tokenizer = AutoTokenizer.from_pretrained(config['MODEL_NAME'])\n self.undesirable_tokens = [\n self.tokenizer.pad_token_id, \n self.tokenizer.cls_token_id, \n self.tokenizer.sep_token_id\n ]\n self.batch_size = config['batch_size']\n self.device = torch.device('cuda' if config['use_gpu'] and torch.cuda.is_available() else 'cpu')\n self.verbose = config['verbose']\n\n # move model to device\n if isinstance(self.model, torch.nn.Module):\n self.model.to(self.device)\n\n def encode(self, input_ids, attention_mask):\n self.model.eval()\n with torch.no_grad():\n out = self.model(input_ids, attention_mask=attention_mask)\n emb = out[0]\n return emb\n\n def get_embeddings(self, corpus):\n inputs = self.tokenizer(corpus, return_tensors='pt', padding=True, truncation=True)\n batches = zip(chunker(inputs.input_ids, self.batch_size), \n chunker(inputs.attention_mask, self.batch_size))\n if self.verbose:\n print('getting token embeddings...')\n batches = tqdm(batches, total=int(len(inputs.input_ids)/self.batch_size))\n\n outputs = []\n for input_ids, attention_mask in batches:\n emb = self.encode(input_ids.to(self.device), \n attention_mask.to(self.device))\n outputs.append(emb)\n embeddings = torch.cat(outputs)\n\n # remove undesirable tokens\n idx = np.isin(inputs['input_ids'], self.undesirable_tokens, assume_unique=True, invert=True).reshape(-1)\n tok = np.array(self.tokenizer.convert_ids_to_tokens(inputs.input_ids.view(-1)))[idx]\n boe = embeddings.view(-1, embeddings.shape[-1])[idx].detach().cpu()\n\n # remove stopwords\n if self.config['remove_stopwords']:\n idx = np.isin(tok, stopwords.words('english'), invert=True)\n tok = tok[idx]\n boe = boe[idx]\n\n # compress embedding to speed up similarity matrix computation\n if self.config['n_components'] == \"auto\":\n n_components = min(max(2, len(boe) // 10), boe.shape[-1])\n if self.verbose:\n print('Using n_components={}'.format(str(n_components)))\n\n if type(n_components) == int and n_components > 0 and len(boe) > 1:\n boe = self.config['dim_reducer'](n_components=n_components).fit_transform(boe)\n\n if len(np.flatnonzero(np.core.defchararray.find(tok,'##')!=-1)) > 0:\n tok, boe = merge_bpe(tok, boe)\n\n return boe, tok\n\n def __call__(self, response_set): \n return super().__call__(response_set)\n\n\nclass SentenceSemanticDiversity(metric.TextDiversity):\n\n default_config = {\n # TextDiversity configs\n 'q': 1,\n 'normalize': False,\n 'distance_fn': distance.chebyshev, \n 'dim_reducer': PCA,\n 'remove_stopwords': False, \n 'scale_dist': \"exp\", \n 'sq_reg': False, \n 'mean_adj': True,\n 'verbose': False,\n # SentenceSemanticDiversity configs\n 'MODEL_NAME':\"stsb-roberta-large\",\n 'use_gpu': False,\n 'n_components': 'auto' \n }\n\n def __init__(self, config={}):\n config = {**self.default_config, **config} \n super().__init__(config)\n self.device = torch.device('cuda' if config['use_gpu'] and torch.cuda.is_available() else 'cpu')\n self.model = SentenceTransformer(config['MODEL_NAME'], device=self.device)\n self.verbose = config['verbose']\n\n def get_embeddings(self, corpus):\n\n boe = np.stack(self.model.encode(corpus))\n \n # compress embedding to speed up similarity matrix computation\n if self.config['n_components'] == \"auto\":\n n_components = min(max(2, len(boe) // 10), boe.shape[-1])\n if self.verbose:\n print('Using n_components={}'.format(str(n_components)))\n\n if type(n_components) == int and n_components > 0 and len(boe) > 1:\n boe = self.config['dim_reducer'](n_components=n_components).fit_transform(boe)\n\n return boe, corpus\n\n def __call__(self, response_set): \n return super().__call__(response_set)\n\nclass SyntacticDiversity(metric.TextDiversity):\n\n default_config = {\n # TextDiversity configs\n 'q': 1,\n 'normalize': False,\n 'dim_reducer': PCA,\n 'remove_stopwords': False, \n 'sq_reg': False, \n 'mean_adj': False,\n 'verbose': False,\n # SentenceSemanticDiversity configs\n 'MODEL_NAME': \"en_core_web_trf\",\n 'distance_fn': distance.hamming, \n 'scale_dist': \"invert\", \n 'part': 'pos_', \n 'part2int': True\n }\n\n def __init__(self, config={}):\n config = {**self.default_config, **config} \n super().__init__(config)\n self.model = spacy.load(config['MODEL_NAME'])\n self.verbose = config['verbose']\n\n\n def get_embeddings(self, corpus):\n\n # convert to spacy docs to get parts\n doc_parts = []\n for doc in corpus:\n for sent in sent_tokenize(doc):\n sent_ = []\n for w in self.model(sent):\n if self.config['remove_stopwords'] and w.text in stopwords.words('english'):\n continue\n part_ = getattr(w, self.config['part'])\n sent_.append(part_)\n doc_parts.append(sent_)\n\n species = doc_parts\n\n # pad to max sentence doc length\n pad_to = find_max_list(doc_parts)\n doc_parts = np.array([s + ['NULL']*(pad_to-len(s)) for s in doc_parts])\n\n # convert doc parts to int\n if self.config['part2int']:\n # build dict of unique doc parts\n part_map = set(itertools.chain(*doc_parts))\n part_map = {tag: i for i, tag in enumerate(part_map)}\n # convert to int for distance comparison\n part2int_fn = np.vectorize(part_map.get)\n doc_parts = part2int_fn(doc_parts)\n\n return doc_parts, species\n\n def __call__(self, response_set): \n return super().__call__(response_set)\n\nif __name__ == '__main__':\n\n def print_metric(metric, resp_set):\n print('{0}: {1:0.3f}'.format(type(metric).__name__, metric(resp_set)))\n\n # TEST\n response_set = ['i am going', 'i am going', 'lets go i i']\n\n config = {'normalize': False}\n print_metric(TokenSemanticDiversity(config), response_set)\n print_metric(SentenceSemanticDiversity(config), response_set)\n print_metric(SyntacticDiversity(config), response_set)\n\n config = {'normalize': True}\n print_metric(TokenSemanticDiversity(config), response_set)\n print_metric(SentenceSemanticDiversity(config), response_set)\n print_metric(SyntacticDiversity(config), response_set)","repo_name":"asakhala921/Sibyl_eval","sub_path":"text_diversity.py","file_name":"text_diversity.py","file_ext":"py","file_size_in_byte":8177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"3608359817","text":"from os.path import dirname, abspath, join\nimport sys\n\nTHIS_DIR = dirname(__file__)\nWORDS_DIR = abspath(join(THIS_DIR, '..', 'cn_stopwords.txt'))\n\nfs = open(WORDS_DIR, encoding=\"utf-8\")\nstopwords = fs.read()\nstopwords = stopwords.split(\"\\n\")\n\nclass Node:\n def __init__(self,start):\n self.start = start\n self.children = []\n self.parent = Node\n self.text = ''\n self.type = ''\n self.semantic_role = ''\n self.raw_type = ''\n\nclass ParserTree:\n def __init__(self, list_root_node = False):\n self.list_root_node = list_root_node\n self.root = None\n self.leaves = []\n self.keywords = set()\n\n def list_of_leaves(self, node):\n result = []\n for child in node.children:\n result.extend(self.list_of_leaves(child))\n if not result:\n return [node]\n\n return result\n\n def get_keywords(self, leaf:list):\n phaselist = ['NP','VP']\n for node in leaf:\n self.inorder(node, phaselist)\n if not self.keywords and self.list_root_node:\n if leaf[0].type != None and leaf[0].type in phaselist:\n self.keywords.append(leaf[0].text)\n return self.keywords\n\n def inorder(self, node, phaselist):\n parent = node.parent\n if not parent == self.root and parent.type in phaselist:\n phase = ''\n for i in parent.children:\n phase = phase + i.text\n if len(phase)>1 and phase not in stopwords:\n self.keywords.add(phase)\n elif not parent == self.root:\n self.inorder(node=parent, phaselist=phaselist)\n\n def clear(self):\n self.root = None\n self.leaves = []\n self.keywords = set()\n\n\n","repo_name":"IASLLab/Csharp2Python","sub_path":"Csharp2Python/utils_parser/tree/parser_tree.py","file_name":"parser_tree.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"1991196518","text":"from .argument import Argument\nfrom .ihex80 import IHEX80\n\n\nclass LabeledDevice:\n _labels: list[str]\n\n def __init__(self):\n self._labels = list()\n\n def bits(self, *bits):\n self._labels = [\n str(label)\n for labels in [\n value if isinstance(value, list) or isinstance(value, Argument) else [value]\n for value in bits\n ]\n for label in labels\n ]\n\n return self\n\nclass Device(LabeledDevice):\n _name: str\n _data: dict[int, str]\n \n def __init__(self, name: str):\n self._name = name\n self._data = dict()\n \n super().__init__()\n \n def __getitem__(self, item):\n return self._data.get(item, [None for _ in self._labels])\n\n def feed(self, idx, **kwargs):\n self._data[idx] = [\n kwargs.get(label, None) if prev is None else prev\n for label, prev in zip(self._labels, self[idx])\n ]\n \n def finalize(self, unsafe: bool = False):\n for idx, bits in self._data.items():\n if unsafe:\n self._data[idx] = ['0' if bit is None else bit for bit in bits]\n else:\n assert not any(bit is None for bit in bits), f\"Undefined bit on {idx}: {bits}\"\n\n IHEX80.save(\n f\"{self._name}.hex\", {\n idx: f\"{int(''.join(bits[::-1]), 2):02x}\"\n for idx, bits in self._data.items()\n }\n )\n\n\nclass NestedDevice(LabeledDevice):\n _host: Device\n\n def __init__(self, device: Device):\n self._host = device\n \n super().__init__()\n \n def __getitem__(self, item):\n return self._host[item]\n\n def feed(self, idx, **kwargs):\n self._host._data[idx] = data = [\n kwargs.get(label, None)\n for label in self._labels\n ]\n\n def finalize(self, *args, **kwargs):\n return\n","repo_name":"mocurin/asvt-hw-3-encoder","sub_path":"src/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"9121275484","text":"#!usr/bin/env python3\n\nimport os\n\nfrom random import randint \n\nbase_filename = os.path.basename(__file__).split('.')[0]\n\nwords = ['All', 'work', 'and', 'no', 'play', 'makes', 'Jack', 'a', 'dull', 'boy']\n\ndef get_output():\n output = ''\n for line_number in range(0,7):\n if line_number == 6:\n output += \" \".join(words) + \"\\n\"\n else:\n output += \"All work\\n\"\n return output\n\n\nfor file_number in range(1,2):\n output_path = f'output/{base_filename}-{file_number}.txt'\n output_data = get_output()\n with open(output_path, 'w') as _file:\n _file.write(output_data)\n print(output_data)\n","repo_name":"alanwsmith/jacktorrance-scripts","sub_path":"06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"16472122740","text":"import csv\r\n\r\n'''\r\nimport glob\r\nimport os\r\nimport pandas as pd\r\n\r\ncombined_csv = pd.concat( [ pd.read_csv(f) for f in filenames ] )\r\ncombined_csv.to_csv( \"combined_csv.csv\", index=False )\r\n\r\nfor f in glob.glob(\"data*.txt\"):\r\n os.system(\"cat \"+f+\" >> OutFile.txt\")\r\n\r\nfor f in glob.glob(\"capture*.csv\"):\r\n os.system(\"cat \"+f+\" >> captureFile.csv\")\r\n'''\r\n\r\nprint (\"insert time stamps file name\")\r\n\r\ntimeFile = str(input())\r\n\r\nprint(\"insert capture file name\")\r\n\r\nwiresharkFile = str(input())\r\n\r\n\r\nwith open(timeFile,\"r\") as fileout:\r\n\twith open(wiresharkFile, mode = 'r') as csv_file:\r\n\t\tcsv_reader = csv.DictReader(csv_file)\r\n\r\n\t\tnum = int(fileout.readline())\r\n\t\t# num == number of readings\r\n\r\n\t\tfor i in range(num):\r\n\t\t\tsafetyCount = 0\r\n\t\t\tstart = fileout.readline()\r\n\t\t\tend = fileout.readline()\r\n\r\n\t\t\tcurrentRow = csv_reader.next()\r\n\t\t\ttime = str(currentRow['Time'])[:8]\r\n\r\n\t\t\twhile time != start and time != end:\r\n\r\n\t\t\t\tcurrentRow = csv_reader.next()\r\n\t\t\t\ttime = str(currentRow['Time'])[:8]\r\n\r\n\t\t\tif time == start:\r\n\t\t\t\t# start writing into csv file\r\n\t\t\t\t# if file exists create new file\r\n\t\t\t\tfiles = fnmatch.filter((f for f in os.listdir('./Users/Nouha Tiyal/Desktop/QSUIRP/parse')), 'output*.csv')\r\n\t\t\t\tif not files: # is empty\r\n\t\t\t\t mun = ''\r\n\t\t\t\telif len(files) == 1:\r\n\t\t\t\t mun = '(1)'\r\n\t\t\t\telse:\r\n\t\t\t\t # files is supposed to contain 'somefile.txt'\r\n\t\t\t\t files.remove('output.csv')\r\n\t\t\t\t mun = '(%i)' % (int(re.search(r'\\(([0-9]+)\\)', max(files)).group(1))+1)\r\n\t\t\t\t####\r\n\t\t\t\twith open(\"output%s.csv\" % mun, mode = 'w') as output:\r\n\t\t\t\t\tfieldnames = ['TimeStamps','Protocol','RSSI']\r\n\t\t\t\t\twriter = csv.DictWriter(output, fieldnames = fieldnames)\r\n\t\t\t\t\twriter.writeheader()\r\n\t\t\t\t\twriter.writerow({'TimeStamps': time, 'Protocol': currentRow['Protocol'], 'RSSI': currentRow['RSSI Value']})\r\n\r\n\t\t\t\t\twhile time != end and safetyCount < 100:\r\n\r\n\t\t\t\t\t\tcurrentRow = csv_reader.next()\r\n\t\t\t\t\t\ttime = str(currentRow['Time'])[:8]\r\n\t\t\t\t\t\twriter.writerow({'TimeStamps': time, 'Protocol': currentRow['Protocol'], 'RSSI': currentRow['RSSI Value']})\r\n\t\t\t\t\t\tsafetyCount += 1\r\n\r\n","repo_name":"YaqoobAnsari/Deep-Learning-of-Radio-Link-Quality-in-Wireless-Networks-","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"30010825488","text":"\n# return true if second is one char away\ndef OneAway(first, second):\n print(\"\\n {} -> {}\".format(first, second))\n diffs = 0\n iter1 = 0\n iter2 = 0\n while True:\n spot1 = None\n spot2 = None\n if iter1 >= len(first):\n spot1 = -1\n else:\n spot1 = first[iter1]\n \n if iter2 >= len(second):\n spot2 = -1\n else: \n spot2 = second[iter2]\n \n if (spot1 != spot2):\n diffs+=1\n #account for deletes/insertions\n if(len(first) > len(second)):\n iter1+=1\n elif(len(second) > len(first)):\n iter2+=1\n #no deletes/grows just different\n else:\n iter1+=1\n iter2+=1\n else:\n iter1+=1\n iter2+=1\n \n if diffs > 1:\n return False\n # we having nothing left to look at\n if spot1 == spot2 == -1:\n return diffs <= 1\n\n# what constitutes unique A->a does whitespace matter? etc\ndef UniqueChar(words):\n print(\"\\nis {} all unique\".format(words))\n dict = {}\n for i in words:\n if i in dict:\n dict[i] += 1\n print(\"Found Dup in {} it is the {} copy\".format(i, dict[i]))\n return False\n else:\n dict[i] = 1\n print(\"No dup found\")\n return True\n \ndef UniqueCharNoDict(words):\n print(\"\\nis {} all unique\".format(words))\n bools = 256*[False]\n for i in words:\n if bools[ord(i)]:\n print(\"Found Dup: \\\"{}\\\"\".format(i))\n return False\n else:\n bools[ord(i)] = True\n print(\"No dup found\")\n return True\n\n#TBD string concatenation is slow so you should use a \"\".join\ndef compression(words):\n if (words == None or type(words) is not str):\n print (\"\\nWords not defined properly\")\n return\n print(\"\\nCompressing {}\".format(words))\n compressed = \"\"\n i = 0\n while(i < len(words)):\n count = 0\n if (words[i].isdigit()):\n compressed += \"+\" \n compressed += words[i]\n # this should always happen at least once since words[i] will always equal the last char we set\n while(i < len(words) and words[i] == compressed[-1]):\n count+=1\n i+=1\n if (compressed[-1].isdigit()):\n if count > 2:\n compressed += \"_{}\".format(str(count))\n elif (count == 2):\n compressed += compressed[-1]*(count-1) # we already have 1 hence - 1\n elif count > 1:\n compressed += str(count)\n print(\"compressed: {}\".format(compressed))\n return compressed\n\nif __name__ == \"__main__\":\n print(OneAway(\"pale\", \"ple\"))\n print(OneAway(\"\", \"\"))\n print(OneAway(\"pale\", \"bake\"))\n print(OneAway(\"pale\", \"bale\"))\n \n \n UniqueChar(\"Alex\")\n UniqueChar(\" Alex \")\n UniqueChar(\"Aalex\")\n UniqueChar(\"AalexA\")\n \n UniqueCharNoDict(\"Alex\")\n UniqueCharNoDict(\" Alex \")\n UniqueCharNoDict(\"Aalex\")\n UniqueCharNoDict(\"AalexA\")\n \n compression(\"AAAAAABBBBBBCDEEEEEeeeee\")\n compression(1234)\n compression(None)\n compression(\"AAAAAABBBBBBCDEEEEEeeeee11111111114445alex\")\n ","repo_name":"halfpeaw/CodeProblems","sub_path":"round2/stringQues.py","file_name":"stringQues.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"9951815738","text":"import os\nimport fnmatch\nimport io\nimport sys\nfrom optparse import OptionParser, Values\nfrom typing import List\n\nfrom littledarwin import JavaParse\nfrom tqdm import tqdm\nfrom chaosmeter import License\nfrom .metrics import *\nfrom .writers import *\n\nchaosMeterVersion = '0.1.7'\n\n\ndef main(mockArgs: list = None):\n \"\"\"\n Main ChaosMeter Function\n \"\"\"\n print(\"\"\"\n\n▄█▄ ▄ █ ██ ████▄ ▄▄▄▄▄ █▀▄▀█ ▄███▄ ▄▄▄▄▀ ▄███▄ █▄▄▄▄\n█▀ ▀▄ █ █ █ █ █ █ █ ▀▄ █ █ █ █▀ ▀ ▀▀▀ █ █▀ ▀ █ ▄▀\n█ ▀ ██▀▀█ █▄▄█ █ █ ▄ ▀▀▀▀▄ █ ▄ █ ██▄▄ █ ██▄▄ █▀▀▌\n█▄ ▄▀ █ █ █ █ ▀████ ▀▄▄▄▄▀ █ █ █▄ ▄▀ █ █▄ ▄▀ █ █\n▀███▀ █ █ █ ▀███▀ ▀ ▀███▀ █\n ▀ █ ▀ ▀\n ▀\n\n ChaosMeter version {} Copyright (C) 2020 Ali Parsai\n\n ChaosMeter comes with ABSOLUTELY NO WARRANTY.\n This is free software, and you are welcome to redistribute it\n under certain conditions; run ChaosMeter --license for details.\n\n\n \"\"\".format(chaosMeterVersion))\n\n optionParser = OptionParser(prog=\"chaosmeter\")\n options = parseCmdArgs(optionParser, mockArgs)\n\n sourcePath = os.path.abspath(options.sourcePath)\n targetPath = os.path.abspath(options.targetPath)\n\n # Find all metrics\n metricList = getAllMetrics()\n if len(metricList) == 0:\n print(\"No metrics found!\")\n sys.exit(3)\n\n for MetricClass in metricList:\n print(\"Found metric: \\\"\" + MetricClass.name + \"\\\"\")\n print(\"Found {} metrics.\\n\".format(len(metricList)))\n\n # We need to instatiate once in single-process mode. To be redesigned for mutli-process mode.\n javaParseInstance = JavaParse()\n metricInstanceList = instantiatePlugins(metricList, javaParseInstance)\n\n # Find all writers\n writerList = getAllWriters()\n if len(writerList) == 0:\n print(\"No writers found!\")\n sys.exit(4)\n\n for WriterClass in writerList:\n print(\"Found writer: \\\"\" + WriterClass.name + \"\\\"\")\n print(\"Found {} writers.\\n\".format(len(writerList)))\n\n writerInstanceList = instantiatePlugins(writerList, targetPath)\n\n fileList = findJavaFiles(sourcePath)\n\n print(os.linesep)\n print(\"Source Path: \", sourcePath)\n print(\"Target Path: \", targetPath)\n print(os.linesep)\n\n # Main loop\n fileCounter = 0\n completeResults = dict()\n completeResultsPath = os.path.join(targetPath, \"FinalReport\")\n wroteSkipMessage = False\n for srcFile in tqdm(fileList, dynamic_ncols=True, unit='files'):\n fileCounter += 1\n\n # Set paths\n fileRelativePath = os.path.relpath(srcFile, sourcePath)\n srcFileRoot, srcFileName = os.path.split(srcFile)\n targetDir = os.path.join(targetPath, os.path.relpath(srcFileRoot, sourcePath))\n targetFilePath = os.path.splitext(os.path.join(targetDir, srcFileName))[0]\n\n if options.isContinue:\n allExists = True\n for writerInstance in writerInstanceList:\n allExists = allExists and os.path.isfile(targetFilePath + writerInstance.extension)\n if allExists:\n if not wroteSkipMessage:\n tqdm.write(\"Skipping existing results...\")\n wroteSkipMessage = True\n continue\n try:\n tqdm.write(\"({:,}/{:,}) {}\".format(fileCounter, len(fileList), fileRelativePath))\n wroteSkipMessage = False\n except UnicodeError as e:\n tqdm.write(str(e) + os.linesep)\n tqdm.write(\"Non-unicode filename detected. Not showing in terminal.\")\n\n metricResults = calculateMetrics(srcFile, metricList, javaParseInstance, metricInstanceList)\n if metricResults is None:\n tqdm.write(\"Error in parsing Java code, skipping the file.\")\n continue\n\n metricResultsAggregate, metricLabels = aggregateMetrics(**metricResults)\n\n # Prepare the result file\n completeResults[fileRelativePath] = metricResultsAggregate\n\n if not os.path.exists(targetDir):\n os.makedirs(targetDir)\n\n for writerInstance in writerInstanceList:\n fileContent = writerInstance.createTargetFormat(metricResultsAggregate, metricLabels)\n writerInstance.write(targetFilePath, fileContent)\n\n if not options.isContinue:\n completeResultsLabels = [\"File\"]\n completeResultsLabels.extend(metricLabels)\n completeResultsAggregate = [completeResultsLabels]\n\n for cuName in sorted(completeResults.keys()):\n for methodName in sorted(completeResults[cuName].keys()):\n cellList = [cuName, methodName]\n cellList.extend(completeResults[cuName][methodName])\n completeResultsAggregate.append(cellList)\n\n for writerInstance in writerInstanceList:\n completeFileContent = writerInstance.createFinalReportTargetFormat(completeResultsAggregate)\n writerInstance.write(completeResultsPath, completeFileContent)\n\n print(os.linesep)\n\n return 0\n\n\ndef calculateMetrics(srcFile: str, metricList: List[Metric],\n javaParseExistingInstance: JavaParse = None,\n metricExistingInstanceList: List[Metric] = None):\n javaParseInstance = JavaParse() if javaParseExistingInstance is None else javaParseExistingInstance\n metricInstanceList = instantiatePlugins(metricList, javaParseInstance) \\\n if metricExistingInstanceList is None else metricExistingInstanceList\n\n try:\n # Parse source file\n sourceCode = getFileContent(srcFile)\n tree = javaParseInstance.parse(sourceCode)\n except Exception as e:\n return None\n\n # Calculate metrics\n metricResults = dict()\n for metricInstance in metricInstanceList:\n metricResults[metricInstance.name] = metricInstance.calculate(tree, sourceCode)\n del metricInstance\n del javaParseInstance\n\n return metricResults\n\n\ndef instantiatePlugins(classList, *args):\n instanceList = list()\n for pluginClass in classList:\n instance = pluginClass(*args)\n instanceList.append(instance)\n return instanceList\n\n\ndef findJavaFiles(sourcePath: str) -> List[str]:\n # Get the file list\n if not os.path.isdir(sourcePath):\n print(\"Source path must be a directory.\")\n sys.exit(5)\n\n fileList = list()\n print(\"Searching for Java files... \", end=\"\\r\")\n for root, dirnames, filenames in os.walk(sourcePath):\n for filename in fnmatch.filter(filenames, \"*.java\"):\n fileList.append(os.path.join(root, filename))\n print(\"Searching for Java files... {} found.\".format(len(fileList)), end=\"\\r\")\n\n if len(fileList) == 0:\n print(\"No Java files found in provided source path.\")\n sys.exit(6)\n\n return fileList\n\n\ndef getFileContent(filePath: str) -> str:\n with io.open(filePath, mode='r', errors='replace') as contentFile:\n file_data = contentFile.read()\n return str(file_data)\n\n\ndef parseCmdArgs(optionParser: OptionParser, mockArgs: list = None) -> Values:\n \"\"\"\n\n :param mockArgs:\n :type mockArgs:\n :param optionParser:\n :type optionParser:\n :return:\n :rtype:\n \"\"\"\n #\n # numberOfCPUs = os.cpu_count()\n # numberOfCPUs = numberOfCPUs if numberOfCPUs is not None else 1\n\n # parsing input options\n optionParser.add_option(\"-p\", \"--path\", action=\"store\", dest=\"sourcePath\",\n default=None, help=\"Path to Java source files\")\n optionParser.add_option(\"-t\", \"--target\", action=\"store\", dest=\"targetPath\",\n default=os.path.dirname(os.path.realpath(__file__)),\n help=\"Path to store results\")\n # optionParser.add_option(\"--workers\", action=\"store\", type=\"int\", dest=\"workers\",\n # default=numberOfCPUs, help=\"Number of workers to spawn\")\n optionParser.add_option(\"-c\", \"--continue\", action=\"store_true\", dest=\"isContinue\",\n default=False, help=\"Skips previously analyzed files\")\n optionParser.add_option(\"--license\", action=\"store_true\", dest=\"isLicenseActive\",\n default=False, help=\"Outputs the license and exit\")\n\n if mockArgs is None:\n (options, args) = optionParser.parse_args()\n else:\n (options, args) = optionParser.parse_args(args=mockArgs)\n\n if options.isLicenseActive:\n License.outputLicense()\n sys.exit(0)\n\n if options.sourcePath is None:\n optionParser.print_help()\n print(\"\\nYou need to specify at least the path to the source files.\\n\")\n print(\"\\nExample:\\n\\t ChaosMeter -p ./src/main -t ./target \\n\\n\")\n sys.exit(1)\n\n if not os.path.isdir(options.sourcePath):\n print(\"Source path must be a directory.\")\n sys.exit(2)\n\n return options\n","repo_name":"aliparsai/ChaosMeter","sub_path":"chaosmeter/ChaosMeter.py","file_name":"ChaosMeter.py","file_ext":"py","file_size_in_byte":9195,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"911484176","text":"import os\nimport cv2\nimport numpy as np\nimport rclpy\nimport pathlib\nimport math\nfrom sensor_msgs.msg import Image\nfrom ackermann_msgs.msg import AckermannDrive\nfrom rclpy.qos import qos_profile_sensor_data, QoSReliabilityPolicy\nfrom rclpy.node import Node\nfrom rclpy.qos import qos_profile_sensor_data, QoSReliabilityPolicy\nfrom .log_server import set_transmission\nfrom .log_server import set_steering\nfrom nav_msgs.msg import Odometry\nfrom ament_index_python.packages import get_package_share_directory\nfrom launch.substitutions.path_join_substitution import PathJoinSubstitution\nimport traceback\n\n\n\nCONTROL_COEFFICIENT = 0.0007\nCONTROL_COEFFICIENT_DETOUR = 0.3183\nANGLE_GAP = 0.435808714\n\nclass LaneFollower(Node):\n def __init__(self):\n try:\n super().__init__('field_follower')\n\n # ROS interface\n self.__ackermann_publisher = self.create_publisher(AckermannDrive, 'cmd_ackermann', 1)\n qos = qos_profile_sensor_data\n qos.reliability = QoSReliabilityPolicy.RELIABLE\n self.create_subscription(Odometry, '/odom', self.__on_odom, qos)\n\n qos_camera_data = qos_profile_sensor_data\n # In case ROS_DISTRO is not foxy the QoSReliabilityPolicy is strict.\n if 'ROS_DISTRO' in os.environ and os.environ['ROS_DISTRO'] != 'foxy':\n qos_camera_data.reliability = QoSReliabilityPolicy.RELIABLE\n #self.create_subscription(Image, 'vehicle/camera', self.__on_camera_image, qos_camera_data)\n self._logger.info('Field path follower initialized')\n package_dir = get_package_share_directory('webots_ros2_suv')\n points_path = f'{package_dir}/worlds/ulstu_field_points.txt'\n points = open(points_path, 'r')\n self.current_x = 210.23121027069885\n self.current_y = 77.42130129912289\n self.current_angle = 0\n self.x_coordinates = []\n self.y_coordinates = []\n x_flag = 1\n self.index_next_point = 1\n with open(points_path, 'r') as file:\n for line in file:\n if x_flag == 1:\n self.x_coordinates.append(float(line.strip()))\n x_flag = 0\n else:\n self.y_coordinates.append(float(line.strip()))\n x_flag = 1\n\n self._logger.info(points.read(500))\n except Exception as ex:\n self._logger.error(''.join(traceback.TracebackException.from_exception(ex).format()))\n\n #def wheel_control(self):\n #try:\n #infinity = 1\n #while infinity:\n # self._logger.info(str(self.current_angle))\n\n #else:\n # end = True\n #except Exception as ex:\n # self._logger.error(''.join(traceback.TracebackException.from_exception(ex).format()))\n\n\n def euler_from_quaternion(self, x, y, z, w):\n \"\"\"\n Convert a quaternion into euler angles (roll, pitch, yaw)\n roll is rotation around x in radians (counterclockwise)\n pitch is rotation around y in radians (counterclockwise)\n yaw is rotation around z in radians (counterclockwise)\n \"\"\"\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians\n\n def __on_odom(self, message):\n try:\n #self._logger.info(f'odom x: {message.pose.pose.position.x} y: {message.pose.pose.position.y} z: {message.pose.pose.position.z}')\n (roll, pitch, yaw) = self.euler_from_quaternion(message.pose.pose.orientation.x,\n message.pose.pose.orientation.y,\n message.pose.pose.orientation.z,\n message.pose.pose.orientation.w)\n # self._logger.info(f'yaw: {yaw}')\n self.current_angle = yaw - ANGLE_GAP\n self.current_x = float(message.pose.pose.position.x)\n self.current_y = float(message.pose.pose.position.y)\n if len(self.x_coordinates) != self.index_next_point:\n delta = self.calculate_distance(self.current_x, self.current_y,\n self.x_coordinates[self.index_next_point],\n self.y_coordinates[self.index_next_point])\n\n if (delta <= 3):\n self.index_next_point += 1\n self._logger.info(str(self.index_next_point))\n\n\n command_message = AckermannDrive()\n command_message.speed = 2.0\n command_message.steering_angle = 0.0\n calc_angle = self.calculate_angle(self.current_x, self.current_y,\n self.x_coordinates[self.index_next_point],\n self.y_coordinates[self.index_next_point])\n error = self.current_angle - calc_angle\n if error > 3.14159:\n error -= 6,28319\n elif error < 3.14159:\n error -= 6, 28319\n #self._logger.info(f'yaw: {yaw:.5f} curangle\" {self.current_angle:.5f} calc angle: {calc_angle:.5f}')\n command_message.steering_angle = error * CONTROL_COEFFICIENT_DETOUR\n set_transmission(command_message.speed / 25 + 1)\n set_steering(command_message.steering_angle)\n\n self.__ackermann_publisher.publish(command_message)\n except Exception as ex:\n self._logger.error(''.join(traceback.TracebackException.from_exception(ex).format()))\n\n def calculate_angle(self, x1, y1, x2, y2):\n dx = x2 - x1\n dy = y2 - y1\n angle = math.atan2(dy, dx) # Calculate the angle in radians\n #angle_deg = math.degrees(angle) # Convert the angle to degrees\n return angle\n\n def calculate_distance(self, x1, y1, x2, y2):\n distance = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n return distance\n\n def __on_camera_image(self, message):\n try:\n img = message.data\n img = np.frombuffer(img, dtype=np.uint8).reshape((message.height, message.width, 4))\n img = img[120:240, :]\n\n # Segment the image by color in HSV color space\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)\n #img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n #mask = cv2.inRange(img, np.array([50, 110, 150]), np.array([120, 255, 255]))\n mask = cv2.inRange(img, np.array([220, 220, 220]), np.array([255, 255, 255]))\n\n # Find the largest segmented contour\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n command_message = AckermannDrive()\n command_message.speed = 2.0\n command_message.steering_angle = 0.0\n\n if contours:\n largest_contour = max(contours, key=cv2.contourArea)\n largest_contour_center = cv2.moments(largest_contour)\n\n cv2.drawContours(img, largest_contour, -1, (0,255,0), 3)\n cv2.imshow(\"img\", img)\n if cv2.waitKey(25) & 0xFF == ord('q'):\n return\n\n if largest_contour_center['m00'] != 0:\n center_x = int(largest_contour_center['m10'] / largest_contour_center['m00'])\n error = 190 - center_x\n command_message.steering_angle = error * CONTROL_COEFFICIENT\n\n set_transmission(command_message.speed / 25 + 1)\n set_steering(command_message.steering_angle)\n\n self.__ackermann_publisher.publish(command_message)\n except Exception as ex:\n self._logger.error(''.join(traceback.TracebackException.from_exception(ex).format()))\n\n\ndef main(args=None):\n try:\n rclpy.init(args=args)\n follower = LaneFollower()\n #follower.wheel_control()\n rclpy.spin(follower)\n rclpy.shutdown()\n except KeyboardInterrupt:\n pass\n except Exception as err:\n print(f'node_gps stopped')\n finally:\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ulstu/cad-self-driving","sub_path":"simulation/webots_ros2_suv/webots_ros2_suv/field_follower.py","file_name":"field_follower.py","file_ext":"py","file_size_in_byte":8608,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"}
+{"seq_id":"13471059211","text":"import dwf\nimport numpy as np\nimport time\n\n\n #constants\nHZ_ACQ = 1e8\nN_SAMPLES = 8000\ntime_step = HZ_ACQ/N_SAMPLES\nfrequency = 2e6\nf1 = open(\"Buffer.csv\", \"w\")\nf2 = open(\"Spectrum.csv\", \"w\")\nf3 = open(\"Amplitude.csv\", \"w\")\n\n\nhdwf = dwf.Dwf()\n\ndwf_ao = dwf.DwfAnalogOut(hdwf)\ndwf_ao.nodeEnableSet(0, dwf_ao.NODE.CARRIER, True)\ndwf_ao.nodeFunctionSet(0, dwf_ao.NODE.CARRIER, dwf_ao.FUNC.SINE)\ndwf_ao.nodeFrequencySet(0, dwf_ao.NODE.CARRIER, frequency)\ndwf_ao.nodeAmplitudeSet(0, dwf_ao.NODE.CARRIER, 1)\ndwf_ao.configure(0, True)\n\n #set up acquisition\ndwf_ai = dwf.DwfAnalogIn(hdwf)\ndwf_ai.channelEnableSet(0, True)\ndwf_ai.channelRangeSet(0, 5.0)\ndwf_ai.acquisitionModeSet(dwf_ai.ACQMODE.SCAN_SHIFT)\ndwf_ai.frequencySet(HZ_ACQ)\ndwf_ai.bufferSizeSet(N_SAMPLES)\n\n #begin acquisition\ndwf_ai.configure(False, True)\n\n\n# HarmonicVector = np.fft.rfftfreq(N_SAMPLES, 1 / HZ_ACQ)\nstart = time.time()\nTIME = 0\nwhile TIME < 60:\n sts = dwf_ai.status(True)\n\n cValid = dwf_ai.statusSamplesValid()\n rgdSamples = dwf_ai.statusData(0, cValid)\n\n Spectrum = 2*np.abs(np.fft.rfft(rgdSamples)) / N_SAMPLES \n Harmonic = int(frequency/HZ_ACQ * N_SAMPLES)\n SignalAmplitude = Spectrum[Harmonic]\n \n print(SignalAmplitude)\n noise = np.delete(Spectrum, Harmonic)\n noiseRMS = np.sqrt(np.mean)\n SNR = 20 * np.log(SignalAmplitude/noiseRMS)\n \n f1.writelines([rgdSamples])\n f2.writelines([Spectrum])\n f3.write(SignalAmplitude + SNR)\n\n time.sleep(0.1)\n TIME = TIME + 0.1\n\n\n print(SNR)\nend = time.time()\n\nExecutionTime = end - start\nprint(ExecutionTime)\nf1.close()\nf2.close()\nf3.close()\n\n","repo_name":"mVladislavs/BCC_AD2","sub_path":"Breathing_experiment.py","file_name":"Breathing_experiment.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"941609564","text":"# --- Part One ---\ndef get_jolt_dist(data):\n data.append(0)\n data.append(max(data) + 3)\n data.sort()\n diff_1_count = 0\n diff_3_count = 0\n for i, dev in enumerate(data[:-1]):\n diff = data[i + 1] - dev\n if diff == 1:\n diff_1_count += 1\n if diff == 3:\n diff_3_count += 1\n\n return diff_1_count * diff_3_count\n\n\ntest_input = \"\"\"16\n10\n15\n5\n1\n11\n7\n19\n6\n12\n4\"\"\"\n\ntest_data = [int(l) for l in test_input.split('\\n')]\n\ntest_sol = get_jolt_dist(test_data)\n\nassert 7 * 5 == test_sol\n\nfile = open(\"input\", 'r')\ntxt_data = [int(l) for l in file.readlines()]\nfile.close()\n\nsolution_1 = get_jolt_dist(txt_data)\n\nprint(\"Part 1 solution: {}\".format(solution_1))\n","repo_name":"Aportillog/advent-of-code","sub_path":"aoc_2020/day_10/day_10.py","file_name":"day_10.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"39818495686","text":"import sys\r\ninput = lambda: sys.stdin.readline().rstrip('\\n')\r\n\r\n\r\ng = lambda: [*map(int, input().split())]\r\n\r\nfor _ in range(int(input())):\r\n a, b = g()\r\n r, q = divmod(b, a)\r\n if r > 1 and q == 0:\r\n print(1)\r\n else:\r\n print(0)","repo_name":"juwkim/boj","sub_path":"백준/Silver/25375. 아주 간단한 문제/아주 간단한 문제.py","file_name":"아주 간단한 문제.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"44993329386","text":"import gdb\n\nfrom constants import *\nfrom utils import *\nfrom wamr_types import *\n\nclass WASMFunction:\n def __init__(self, ref):\n self.ref = ref\n value = ref.cast(\n gdb.lookup_type('WASMFunctionInstance').pointer()\n )\n self.func = value.dereference()\n\n self.is_import_func = bool(self.func['is_import_func'])\n self.param_count = int(self.func['param_count'])\n self.local_count = int(self.func['local_count'])\n self.param_cell_num = int(self.func['param_cell_num'])\n self.ret_cell_num = int(self.func['ret_cell_num'])\n self.local_cell_num = int(self.func['local_cell_num'])\n self.local_offsets = self.func['local_offsets']\n self.param_types = self.func['param_types']\n self.local_types = self.func['local_types']\n\n if (self.is_import_func):\n self.import_func = self.func['u']['func_import']\n self.import_module = self.import_func['module_name']\n self.import_field = self.import_func['field_name']\n self.func_type = WASMFuncType(self.import_func['func_type'])\n else:\n self.func_type = WASMFuncType(self.func['u']['func']['func_type'])\n\n self.func_fields = [field.name for field in self.func.type.fields()]\n\n def get_max_stack_cell_num(self):\n max_local_slot = self.param_cell_num + self.local_cell_num\n if (self.is_import_func):\n return max_local_slot\n else:\n return max_local_slot + int(self.func['u']['func']['max_stack_cell_num'])\n\n def __str__(self):\n if (self.func.address == 0):\n return f'{GREEN}WASMFunctionInstance: null\\n{ENDC}'\n\n import_name = f'{self.import_module}|{self.import_field}' if self.is_import_func else \"no\"\n return f'{GREEN}WASMFunctionInstance: {hex(self.ref)}\\n' + \\\n f' * import func: {import_name}\\n' + \\\n f' * local_count: {self.local_count}\\n' + \\\n f' * ret_cell_num: {self.ret_cell_num}\\n' + \\\n f'{ENDC}\\n' + \\\n str(self.func_type)\n\nclass WASMInterpFrame:\n def __init__(self, ref):\n self.ref = ref\n value = ref.cast(\n gdb.lookup_type('WASMInterpFrame').pointer()\n )\n self.interp_frame = value.dereference()\n\n self.fast_interp = False\n\n # mode detection\n self.interp_frame_fields = [field.name for field in self.interp_frame.type.fields()]\n\n if ('ret_offset' in self.interp_frame_fields):\n self.fast_interp = True\n\n def _get_feature_description(self):\n features = ''\n features += f' * fast_interp: {\"on\" if self.fast_interp else \"off\"}\\n'\n\n return features\n\n def get_prev_frame(self):\n ref = self.interp_frame['prev_frame']\n if (ref != 0):\n return WASMInterpFrame(ref)\n\n return None\n\n def _get_operand_stack(self):\n return self.interp_frame['lp']\n\n def get_stack_addr(self, index):\n return self._get_operand_stack()[index].address\n\n def get_stack_i32(self, index):\n return self._get_operand_stack()[index]\n\n def get_stack_i64(self, index):\n addr = self.get_stack_addr(index)\n return gdb.Value(addr).cast(gdb.lookup_type('int64_t').pointer()).dereference()\n\n def get_stack_f32(self, index):\n addr = self.get_stack_addr(index)\n return gdb.Value(addr).cast(gdb.lookup_type('float').pointer()).dereference()\n\n def get_stack_f64(self, index):\n addr = self.get_stack_addr(index)\n return gdb.Value(addr).cast(gdb.lookup_type('double').pointer()).dereference()\n\n def get_stack_ref(self, index):\n addr = self.get_stack_addr(index)\n return gdb.Value(addr).cast(gdb.lookup_type('void').pointer().pointer()).dereference()\n\n def _get_conditional_info(self):\n info = ''\n if ('ret_offset' in self.interp_frame_fields):\n info += f' * ret_offset: {int(self.interp_frame[\"ret_offset\"])}\\n'\n if ('lp' in self.interp_frame_fields):\n info += f' * lp: {self.interp_frame[\"lp\"]}\\n'\n if ('operand' in self.interp_frame_fields):\n info += f' * operand: {self.interp_frame[\"operand\"]}\\n'\n if ('sp_bottom' in self.interp_frame_fields):\n info += f' * sp_bottom: {hex(int(self.interp_frame[\"sp_bottom\"]))}\\n'\n if ('sp_boundary' in self.interp_frame_fields):\n info += f' * sp_boundary: {hex(int(self.interp_frame[\"sp_boundary\"]))}\\n'\n if ('sp' in self.interp_frame_fields):\n info += f' * sp: {hex(int(self.interp_frame[\"sp\"]))}\\n'\n if ('csp_bottom' in self.interp_frame_fields):\n info += f' * csp_bottom: {hex(int(self.interp_frame[\"csp_bottom\"]))}\\n'\n if ('csp_boundary' in self.interp_frame_fields):\n info += f' * csp_boundary: {hex(int(self.interp_frame[\"csp_boundary\"]))}\\n'\n if ('csp' in self.interp_frame_fields):\n info += f' * csp: {hex(int(self.interp_frame[\"csp\"]))}\\n'\n\n if ('frame_ref' in self.interp_frame_fields):\n info += f' * frame_ref: {hex(int(self.interp_frame[\"frame_ref\"]))}\\n'\n\n if (int(self.interp_frame[\"function\"]) != 0):\n func = WASMFunction(self.interp_frame[\"function\"])\n\n max_cell_num = func.get_max_stack_cell_num()\n param_cell_num = func.param_cell_num\n local_cell_num = func.local_cell_num\n\n # dump operand stack content\n frame_ref_array = None\n lp_array = None\n ## Check if GC enabled\n if ('frame_ref' in self.interp_frame_fields):\n # fast interpreter\n frame_ref_array = self.interp_frame['frame_ref'].cast(\n gdb.lookup_type('uint8_t').pointer()\n )\n else:\n # classic interpreter\n try:\n func = gdb.parse_and_eval('get_frame_ref')\n if func.type.code == gdb.TYPE_CODE_FUNC:\n res = func(self.ref)\n frame_ref_array = res.cast(\n gdb.lookup_type('uint8_t').pointer()\n )\n except gdb.error:\n pass\n\n if ('operand' in self.interp_frame_fields):\n lp_array = self.interp_frame['lp'].cast(\n gdb.lookup_type('uint32_t').pointer()\n )\n else:\n lp_array = self.interp_frame['lp']\n\n if (max_cell_num > 0):\n if (frame_ref_array or lp_array):\n data = []\n value_len = 15\n header = [f'slot', f'{\"value\":^{value_len}}']\n row_seperator = f'\\t+{\"—\" * 6}+—{\"—\" * value_len}—+\\n'\n if (frame_ref_array):\n header.append('ref')\n row_seperator = f'\\t+{\"—\" * 6}+—{\"—\" * value_len}—+{\"—\" * 5}+\\n'\n\n for i in range(max_cell_num):\n row = [f'{i:^4}']\n if lp_array:\n row.append(f'{int(lp_array[i]):^{value_len}}')\n if (frame_ref_array):\n row.append(f'{int(frame_ref_array[i]):^3}')\n data.append(row)\n\n info += f' * operand stack:\\n'\n info += row_seperator\n info += f'\\t| {\" | \".join(header)} |\\n'\n info += row_seperator\n for i in range(len(data)):\n row = data[i]\n info += f'\\t| {\" | \".join([str(x) for x in row])} |'\n if (i == param_cell_num - 1):\n info += ' <--- param end'\n if (i == param_cell_num + local_cell_num - 1):\n info += ' <--- local end'\n if (i == param_cell_num + local_cell_num):\n info += ' <--- dynamic space start'\n if ('sp' in self.interp_frame_fields and i == int(self.interp_frame['sp'])):\n info += f'{PURPLE} <--- sp{GREEN}'\n if ('ret_offset' in self.interp_frame_fields and i == int(self.interp_frame['ret_offset'])):\n info += f'{PURPLE} <--- ret_offset{GREEN}'\n info += '\\n'\n info += row_seperator\n\n return info\n\n def __str__(self) -> str:\n return f'{GREEN}WASMInterpFrame: {hex(self.ref)}\\n' + \\\n self._get_feature_description() + \\\n f' * prev_frame: {hex(self.interp_frame[\"prev_frame\"])}\\n' + \\\n f' * function: {hex(self.interp_frame[\"function\"])}\\n' + \\\n f' * ip: {hex(self.interp_frame[\"ip\"])}\\n' + \\\n self._get_conditional_info() + \\\n f'{ENDC}\\n'\n\nclass WASMExecEnv:\n def __init__(self, ref):\n self.ref = ref\n self.exec_env = ref.cast(\n gdb.lookup_type('wasm_exec_env_t')\n ).dereference()\n\n self.prev = self.exec_env['prev']\n self.next = self.exec_env['next']\n\n self.interp_mode = 'classic'\n self.hw_bound_check = False\n self.gc_enabled = False\n self.fastjit_enabled = False\n self.thread_mgr = False\n self.aot_enabled = False\n self.source_debugger = False\n self.wasm_stack_size = int(self.exec_env['wasm_stack_size'])\n self.thread = hex(int(self.exec_env['handle']))\n self.suspend_flags = hex(int(self.exec_env['suspend_flags']['flags']))\n\n cur_frame_val = self.exec_env['cur_frame']\n if (cur_frame_val != 0):\n self.current_frame = WASMInterpFrame(cur_frame_val)\n else:\n self.current_frame = None\n\n # mode detection\n exec_env_fields = [field.name for field in self.exec_env.type.fields()]\n\n if ('block_addr_cache' not in exec_env_fields):\n self.interp_mode = 'fast'\n if ('jmpbuf_stack_top' in exec_env_fields):\n self.hw_bound_check = True\n if ('cur_local_object_ref' in exec_env_fields):\n self.gc_enabled = True\n if ('jit_cache' in exec_env_fields):\n self.fastjit_enabled = True\n if ('cluster' in exec_env_fields):\n self.thread_mgr = True\n if ('argv_buf' in exec_env_fields):\n self.aot_enabled = True\n if ('current_status' in exec_env_fields):\n self.source_debugger = True\n\n def get_cur_frame(self):\n return self.current_frame\n\n def _get_dynamic_info(self):\n frame_num = 0\n info = ''\n\n cur_frame = self.current_frame\n while (cur_frame):\n frame_num += 1\n cur_frame = cur_frame.get_prev_frame()\n\n info += f' * frame_count: {frame_num}\\n'\n\n return info\n\n def _get_feature_description(self):\n features = ' * features:\\n'\n features += f' - interp_mode: {self.interp_mode}\\n'\n features += f' - hw_bound_check: {\"on\" if self.hw_bound_check else \"off\"}\\n'\n features += f' - gc: {\"on\" if self.gc_enabled else \"off\"}\\n'\n features += f' - fastjit: {\"on\" if self.fastjit_enabled else \"off\"}\\n'\n features += f' - thread_mgr: {\"on\" if self.thread_mgr else \"off\"}\\n'\n features += f' - aot: {\"on\" if self.aot_enabled else \"off\"}\\n'\n features += f' - source_debugger: {\"on\" if self.source_debugger else \"off\"}\\n'\n\n return features\n\n def __str__(self) -> str:\n return f'{GREEN}WASMExecEnv: {hex(self.ref)}\\n' + \\\n f' * prev: {hex(self.prev)}\\n' + \\\n f' * next: {hex(self.next)}\\n' + \\\n f' * wasm_stack_size: {self.wasm_stack_size}\\n' + \\\n f' * thread: {self.thread}\\n' + \\\n f' * suspend_flags: {self.suspend_flags}\\n' + \\\n f' * current_frame: {self.current_frame.ref if self.current_frame else \"null\"}\\n' + \\\n self._get_dynamic_info() + \\\n self._get_feature_description() + \\\n f'{ENDC}\\n'\n","repo_name":"intel/Wasmnizer-ts","sub_path":"scripts/gdb/wamr_exec_env.py","file_name":"wamr_exec_env.py","file_ext":"py","file_size_in_byte":12165,"program_lang":"python","lang":"en","doc_type":"code","stars":180,"dataset":"github-code","pt":"76"}
+{"seq_id":"8031013170","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\npage = requests.get('http://www.cdiscount.com/search/10/acer+aspire.html#_his_')\n\nsoup = BeautifulSoup(page.text)\n\nprdtBloc = soup.find_all(\"div\", class_='prdtBloc')\n\n\nprdtDATA = {}\nfor prdt in prdtBloc:\n prdt = {}\n prdt['name'] = prdt.find(\"div\", class_='prdtBTit').get_ext()\n prdt['url'] = prdt.find(\"a\").get('href')\n","repo_name":"rachidalili/MS-BGD2015","sub_path":"Maxime_Kubryk/lesson3/exo_cc_lesson3.py","file_name":"exo_cc_lesson3.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"}
+{"seq_id":"44307193642","text":"import network\nimport mnist\n\n( training_data,validation_data,test_data,training_size, validation_size, test_size )= mnist.load_data_wrapper()\nroy_net = network.neuralnet([784,50,10])\n\n\n#print(test_data)\nroy_net.SGD(training_data,30,15,1)\n\n\nval = roy_net.evaluate(validation_data)\nprint(\"Result of validation : {0} pictures identified correctly out of {1} \".format(val,validation_size))\ntest = roy_net.evaluate(test_data)\nprint(\"Result of testing : {0} pictures identified correctly out of {1} \".format(test,test_size))","repo_name":"tathagatoroy/ML-algorithms","sub_path":"feedforwardnn/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"70790141687","text":"from flask import Flask, render_template, request, redirect, url_for, flash, Response, session, abort\nfrom flask_mysqldb import MySQL\nfrom flask_wtf.csrf import CSRFProtect # Para el token de protección\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\nfrom flask_wtf import FlaskForm\nfrom wtforms import *\nfrom wtforms.validators import *\n#libreria para crear random en StringAleatorio\nfrom random import sample\nimport openpyxl\nfrom werkzeug.utils import secure_filename\nimport cv2\nimport datetime, time\nimport os\nimport numpy as np\nfrom threading import Thread\nimport mediapipe as mp\nimport pandas as pd\nimport pickle\n\nfrom base64 import b64encode\n\n# Modelos\nfrom models.ModelUser import ModelUser\n\n# Entidades\nfrom models.entities.User import User\n\nglobal capture,rec_frame, grey, switch, neg, face, rec, out \ncapture=0\ngrey=0\nneg=0\nface=0\nswitch=1\nrec=0\n\nmp_drawing = mp.solutions.drawing_utils\nmp_hands = mp.solutions.hands\nmp_drawing_styles = mp.solutions.drawing_styles\n\napp = Flask(__name__)\n\napp.secret_key = 'B!1w8NAt1T^%kvhUI*S^'\ncsrf = CSRFProtect(app)\n\ndb = MySQL(app)\nlogin_manager_app = LoginManager(app)\n\napp.config['DEBUG'] = True\napp.config['MYSQL_HOST'] = 'lenguajeparatodoos.mariadb.database.azure.com'\napp.config['MYSQL_USER'] = 'administrador@lenguajeparatodoos'\napp.config['MYSQL_PASSWORD'] = 'Lenguaje123'\napp.config['MYSQL_DB'] = 'lenguajeparatodos'\napp.config['MYSQL_PORT'] = 3306\n\n# app.config['DEBUG'] = True\n# app.config['MYSQL_HOST'] = 'localhost'\n# app.config['MYSQL_USER'] = 'root'\n# app.config['MYSQL_PASSWORD'] = 'clave'\n# app.config['MYSQL_DB'] = 'lenguajeparatodos'\n# app.config['MYSQL_PORT'] = 3306\n\n \n\ndef image_processed(hand_img):\n #BGR to RGB\n img_rgb = cv2.cvtColor(hand_img, cv2.COLOR_BGR2RGB)\n\n img_flip = cv2.flip(img_rgb, 1)\n\n\n hands = mp_hands.Hands(static_image_mode=True,\n max_num_hands=1,\n min_detection_confidence=0.7) \n\n output = hands.process(img_flip)\n\n hands.close()\n\n try:\n data = output.multi_hand_landmarks[0]\n data = str(data)\n\n data = data.strip().split('\\n')\n\n garbage = ['landmark {', ' visibility: 0.0', ' presence: 0.0', '}']\n\n without_garbage = []\n\n for i in data:\n if i not in garbage:\n without_garbage.append(i)\n clean = []\n\n for i in without_garbage:\n i = i.strip()\n clean.append(i[2:])\n\n for i in range(0, len(clean)):\n clean[i] = float(clean[i])\n return (clean)\n except:\n return(np.zeros([1,63], dtype=int)[0])\n\ndef gen_frames(): # generate frame by frame from camera\n camera = cv2.VideoCapture(0)\n with open('model.pkl', 'rb') as f:\n svm = pickle.load(f)\n global out, capture,rec_frame\n while True:\n success, frame = camera.read() \n data = image_processed(frame)\n data = np.array(data)\n y_pred = svm.predict(data.reshape(-1, 63))\n print(y_pred)\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n org = (50, 100)\n\n fontScale = 3\n\n color = (255, 0, 0)\n\n thickness = 5\n\n frame = cv2.putText(frame, str(y_pred[0]),\n org, font, fontScale, color, thickness, cv2.LINE_AA)\n\n if success:\n if(capture):\n capture=0\n now = datetime.datetime.now()\n p = os.path.sep.join(['shots', \"shot{}.png\".format(str(now).replace(\":\",''))])\n cv2.imwrite(p, frame)\n try:\n ret, buffer = cv2.imencode('.jpg', frame)\n frame = buffer.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n except Exception as e:\n pass\n else:\n pass\n\n\nclass LoginForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(), Length(min=1, max=30)])\n password = PasswordField('password', validators=[InputRequired(), Length(min=1, max=30)])\n\nclass LoginRegisterForm(FlaskForm):\n rut = StringField('rut', validators=[InputRequired()])\n username = StringField('username', validators=[InputRequired()])\n password = PasswordField('password', validators=[InputRequired()])\n comuna = StringField('comuna', validators=[InputRequired()])\n nombre = StringField('nombre', validators=[InputRequired()])\n apellido = StringField('apellidos', validators=[InputRequired()])\n tipoUsuario = StringField('tipoUsuario', validators=[InputRequired()])\n telefono = StringField('telefono', validators=[InputRequired()])\n direccion = StringField('direccion', validators=[InputRequired()])\n correo = StringField('correo', validators=[InputRequired()])\n tiposexo = StringField('tiposexo', validators=[InputRequired()])\n###\nclass UserForm(FlaskForm):\n nombre = StringField('Nombre', validators=[InputRequired(), Length(min=3, max=25)])\n apellidos = StringField('Apellido', validators=[InputRequired(), Length(min=3, max=25)])\n username = StringField('Username', validators=[InputRequired(), Length(min=3, max=25)])\n password = PasswordField('Contraseña', validators=[InputRequired(),])\n correo = EmailField('Correo', validators=[InputRequired()])\n imagen = FileField('Sube tu foto de perfil', validators=[InputRequired()])\n\n\n#Página tareas\n@app.route('/tareas', methods=['POST', 'GET'])\n@login_required\ndef tareas():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n abort(401)\n elif tipoUsuario == 2:\n if request.method == 'POST':\n contenido = request.form['content']\n creado_por = current_user.id\n print(contenido, creado_por)\n try:\n cur = db.connection.cursor()\n cur.execute(\"INSERT INTO tabla_tareas (contenido, creado_por) VALUES (%s, %s)\", [contenido, creado_por])\n db.connection.commit()\n return redirect('/tareas')\n except:\n return 'No se ha podido agregar la tarea'\n else:\n cur = db.connection.cursor()\n cur.execute('SELECT * FROM tabla_tareas WHERE creado_por = {}'.format(current_user.id))\n # SELECT * FROM tabla_tareas WHERE creado_por = 19;\n # WHERE id = {}'.format(id)\n tasks = cur.fetchall()\n return render_template('tarea.html', tasks=tasks)\n elif tipoUsuario == 3:\n abort(401)\n \n\n#Eliminar Tareas\n@app.route('/delete/', methods=['POST', 'GET'])\n@login_required\ndef eliminar_tarea(id):\n try:\n cur = db.connection.cursor()\n cur.execute(\"CALL EliminarTarea(%s)\", (id,))\n db.connection.commit()\n return redirect('/tareas')\n except:\n return 'No se ha podido eliminar la tarea'\n \n#Redirigir a página perfil\n@app.route('/perfil', methods=['GET'])\n@login_required\ndef perfil():\n return render_template('perfil.html')\n\n#Método para crear nombre aleatorio de la imagen\ndef stringAleatorio():\n #Generando string aleatorio\n string_aleatorio = \"0123456789abcdefghijklmnopqrstuvwxyz_\"\n longitud = 20\n secuencia = string_aleatorio.upper()\n resultado_aleatorio = sample(secuencia, longitud)\n string_aleatorio = \"\".join(resultado_aleatorio)\n return string_aleatorio\n\n#Actualizar Perfil unitario\n@app.route('/perfil/update/', methods=['GET', 'POST'])\n@login_required\ndef updatePerfil(id):\n cur = db.connection.cursor()\n form = UserForm()\n cur.execute('SELECT * FROM usuario WHERE id = {}'.format(id))\n form_update = cur.fetchall()\n if request.method == 'POST' and form.validate_on_submit:\n nombre = form.nombre.data\n apellidos = form.apellidos.data\n username = form.username.data\n password = form.password.data\n correo = form.correo.data\n comuna = request.form['comuna']\n file = form.imagen.data\n basepath = os.path.dirname (__file__) #La ruta donde se encuentra el archivo actual\n filename = secure_filename(file.filename) #Nombre original del archivo\n #capturando extensión del archivo ejemplo: (.png, .jpg, .pdf ...etc)\n extension = os.path.splitext(filename)[1]\n nuevoNombreFile = stringAleatorio() + extension\n\n #Guardar Archivo en la carpeta img_perfiles que se encuentra en static\n upload_path = os.path.join (basepath, 'static/img_perfiles', nuevoNombreFile) \n file.save(upload_path)\n print('Registro: ' + nombre, apellidos, username, password, correo, comuna, nuevoNombreFile)\n try:\n cur.execute(\"\"\"\n UPDATE usuario\n SET nombre = %s,\n apellidos = %s,\n username = %s,\n password = %s,\n correo = %s,\n comuna = %s,\n imagen = %s\n WHERE id = %s\n \"\"\", (nombre, apellidos, username, password, correo, comuna, nuevoNombreFile, id))\n db.connection.commit()\n flash(\"Info actualizada correctamente\")\n return redirect(url_for('perfil'))\n except:\n flash(\"Error, datos no han podido ser modificados\")\n return render_template(\"perfil.html\", form=form, )\n else:\n return render_template('update.html', form=form, form_update=form_update[0], )\n\n@login_manager_app.user_loader\ndef load_user(id):\n return ModelUser.get_by_id(db, id)\n\n@app.route('/')\ndef pindex():\n return render_template('index.html')\n\n@app.route('/index')\ndef index():\n return render_template('index.html')\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if request.method == 'POST':\n user = User(0, 1, form.username.data,\n form.password.data, 4, 5, 6, 7, 8, 9, 10, 11, 12)\n logged_user = ModelUser.login(db, user)\n if logged_user != None:\n if logged_user.password:\n session['tipoUsuario'] = logged_user.tipoUsuario\n login_user(logged_user)\n\n if session['tipoUsuario'] == 1:\n return redirect(url_for('menuAdministrador'))\n elif session['tipoUsuario'] == 2:\n return redirect(url_for('menuDocente'))\n elif session['tipoUsuario'] == 3:\n return redirect(url_for('menuEstudiante'))\n else:\n flash(\"Clave Incorrecta...\")\n return render_template('auth/login.html', form=form)\n else:\n #print(\"Usuario no encontrado\")\n flash(\"Usuario no encontrado...\")\n return render_template('auth/login.html', form=form)\n else:\n return render_template('auth/login.html', form=form) \n\n@app.route('/aprende')\n@login_required\ndef aprende():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n return \" No tiene acceso a este modulo
\"\n elif tipoUsuario == 2:\n return render_template('aprende.html')\n elif tipoUsuario == 3:\n return render_template('aprende.html')\n # return render_template('error401', 400)\n\n#Base de la cámara\n@app.route('/video_feed')\ndef video_feed():\n return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')\n\n#Prender y Apagar cámara\n@app.route('/requests', methods=['POST','GET'])\ndef tasks():\n global switch, camera\n if request.method == 'POST':\n if request.form.get('stop') == 'Stop/Start':\n if(switch==1):\n switch=0\n camera.release()\n cv2.destroyAllWindows()\n #flash(\"Cámara Apagada...\")\n else:\n camera = cv2.VideoCapture(0)\n switch=1\n elif request.method == 'GET':\n return redirect(url_for('aprende'))\n return redirect(url_for('aprende'))\n\n#Página visualizar usuarios\n@app.route('/edit/')\n@login_required\ndef Edit():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n cur = db.connection.cursor()\n cur.execute(\n 'SELECT U.id, U.rut, U.nombre, U.apellidos, C.nombre, t.tipoUsuario FROM usuario U INNER JOIN comunas C ON U.comuna = C.codCom INNER JOIN tipousuario t ON U.tipousuario = t.codtipoUsuario;')\n data = cur.fetchall()\n print(type(data))\n return render_template('edit.html', usuarios=data)\n elif tipoUsuario == 2:\n return redirect(url_for('menuDocente'))\n elif tipoUsuario == 3:\n return redirect(url_for('menuEstudiante'))\n \n\n \n \n#Agregar Usuarios\n@app.route('/agregarUsuario', methods=['GET', 'POST'])\n@login_required\ndef agregarUsuario():\n\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM tipousuario\")\n tipoUsuario = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM comunas\")\n comunas = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM tiposexo\")\n tipoSexo = cur.fetchall()\n\n if request.method == 'POST':\n rut = request.form['rut']\n username = request.form['username']\n password = request.form['password']\n comuna = request.form['comuna']\n nombre = request.form['nombre']\n apellidos = request.form['apellido']\n tipoUsuario = request.form['tipoUsuario']\n telefono = request.form['telefono']\n direccion = request.form['direccion']\n correo = request.form['correo']\n tipoDeSexo = request.form['tipoSexo']\n imagen = \"imagen.png\"\n print('Registro' + rut, username,\n password, comuna, nombre, apellidos, tipoUsuario, telefono, direccion, correo, tipoDeSexo, imagen)\n try:\n cur = db.connection.cursor()\n cur.execute(\"CALL AgregarUsuarioI(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\", (rut, username,\n password, comuna, nombre, apellidos, tipoUsuario, telefono, direccion, correo, tipoDeSexo, imagen))\n db.connection.commit()\n flash('Usuario agregado')\n return redirect('/agregarUsuario')\n except:\n return 'No se ha podido agregar el usuario'\n else: \n return render_template('agregarUsuario.html', tipoUsuario= tipoUsuario, comunas = comunas, tipoSexo = tipoSexo)\n elif tipoUsuario == 2:\n return redirect(url_for('menuDocente'))\n elif tipoUsuario == 3:\n return redirect(url_for('menuEstudiante'))\n\n#Agregar Usuario simple (Excel)\n@app.route('/agregarUsuarioFacil', methods=['GET', 'POST'])\n@login_required\ndef agregarUsuarioFacil():\n print('Registro: ')\n\n if request.method == 'POST':\n tipoDeUsuario = request.form['tipoUsuario']\n # Script para archivo\n file = request.files['archivo']\n # La ruta donde se encuentra el archivo actual\n basepath = os.path.dirname(__file__)\n # Nombre original del archivo\n filename = secure_filename(file.filename)\n\n # capturando extensión del archivo ejemplo: (.png, .jpg, .pdf ...etc)\n extension = os.path.splitext(filename)[1]\n print(extension)\n nuevoNombreFile = stringAleatorio() + extension\n print(nuevoNombreFile)\n\n upload_path = os.path.join(\n basepath, 'static/archivos', nuevoNombreFile)\n file.save(upload_path)\n\n df = pd.read_excel(upload_path)\n\n for row, datos in df.iterrows():\n rut = str(datos['Rut'])\n nombre = str(datos['Nombre'])\n apellidos = str(datos['Apellido'])\n username = str(datos['Username'])\n password = str(datos['Password'])\n comuna = int(datos['Comuna'])\n tipoUsuario = tipoDeUsuario\n telefono = str(datos['Telefono'])\n direccion = str(datos['Direccion'])\n correo = str(datos['Correo'])\n tipoSexo = int(datos['Sexo'])\n imagen = str(datos['Imagen'])\n cur = db.connection.cursor()\n cur.execute(\"CALL AgregarUsuarioI(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\", (rut, username,\n password, comuna, nombre, apellidos, tipoUsuario, telefono, direccion, correo, tipoSexo, imagen))\n db.connection.commit()\n print(\"Usuario agregado\")\n #os.remove(\"static/archivos/nuevoNombreFile\")\n flash(\"Usuario Agregado\")\n return redirect(url_for('agregarUsuario'),)\n\n#Borrar usuario\n@app.route('/delete/')\n@login_required\ndef delete_user(id):\n # flash(id)\n cur = db.connection.cursor()\n cur.execute(\"SELECT tipoUsuario FROM usuario where id=(%s)\", (id,))\n data = cur.fetchall()\n tipoUsuario = data[0]\n\n if (tipoUsuario == (1,)):\n print(\"Administrador\")\n flash(\"Se Eliminó un administrador\")\n cur.execute(\"CALL EliminarUsuarioA_U(%s)\", (id,))\n db.connection.commit()\n elif (tipoUsuario == (2,)):\n flash(\"Se Eliminó un profesor\")\n cur.execute(\"CALL EliminarUsuarioP_U(%s)\", (id,))\n db.connection.commit()\n else:\n flash(\"Otro Usuario\")\n cur.execute(\"SELECT tipoUsuario FROM usuario WHERE id=(%s)\", (id,))\n db.connection.commit()\n return redirect(url_for('Edit'))\n\n\n@app.route('/menuAdministrador')\n@login_required\ndef menuAdministrador():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n return render_template('menuAdministrador.html')\n elif tipoUsuario == 2:\n return render_template('menuDocente.html')\n elif tipoUsuario == 3:\n return render_template('menuEstudiante.html')\n \n\n@app.route('/menuDocente')\n@login_required\ndef menuDocente():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n return render_template('menuAdministrador.html')\n elif tipoUsuario == 2:\n return render_template('menuDocente.html')\n elif tipoUsuario == 3:\n return render_template('menuEstudiante.html')\n\n@app.route('/menuEstudiante')\n@login_required\ndef menuEstudiante():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n return render_template('menuAdministrador.html')\n elif tipoUsuario == 2:\n return render_template('menuDocente.html')\n elif tipoUsuario == 3:\n return render_template('menuEstudiante.html')\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return render_template('index.html')\n\n@app.route('/faq')\ndef faq():\n return render_template('preguntas.html') \n\n@app.route('/nosotros')\ndef nosotros():\n return render_template('nosotros.html')\n\n\n#######\n#Registro básico (que esta en inicio)\n@app.route('/registro', methods = ['GET', 'POST'])\ndef registro():\n form=LoginRegisterForm()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM tipousuario WHERE codTipoUsuario > 1 AND codTipoUsuario <= 3\")\n tipoUsuario = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM comunas\")\n comunas = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM tiposexo\")\n tipoSexo = cur.fetchall()\n \n # if request.method == 'POST':\n if request.method == 'POST':\n rut = form.rut.data\n username = form.username.data\n password = form.password.data\n comuna = request.form['comuna']\n nombre = form.nombre.data\n apellidos = form.apellido.data\n tipoUsuario = request.form['tipoUsuario']\n telefono = form.telefono.data\n direccion = form.direccion.data\n correo = form.correo.data\n tipoSexo = request.form['tipoSexo']\n\n try:\n cur = db.connection.cursor()\n cur.execute(\"CALL AgregarUsuarioI(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\", (rut, username, password, comuna, nombre, apellidos, tipoUsuario, telefono, direccion, correo, tipoSexo, \"imagen.png\"))\n db.connection.commit()\n print(\"Usuario agregado\")\n flash(\"Usuario Agregado\")\n return redirect(url_for('registro'),)\n except:\n return ' no se ha podido agregar el usuario '\n return render_template('registro.html', form=form, tipoUsuario=tipoUsuario, comunas=comunas, tipoSexo=tipoSexo)\n\n#Envia Id de el usuario a actualizar\n@app.route('/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit_select(id):\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM tipousuario\")\n tipoUsuario = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM comunas\")\n comunas = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM tiposexo\")\n tipoSexo = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute('SELECT * FROM usuario WHERE id = {}'.format(id))\n data = cur.fetchall()\n \n if request.method == 'POST':\n # rut = request.form['rut']\n username = request.form['username']\n password = request.form['password']\n comuna = request.form['comuna']\n nombre = request.form['nombre']\n apellidos = request.form['apellido']\n tipoUsuario = request.form['tipoUsuario']\n telefono = request.form['telefono']\n direccion = request.form['direccion']\n correo = request.form['correo']\n tipoSexo = request.form['tipoSexo']\n print('Registro'+ username, password, comuna, nombre, apellidos, tipoUsuario,\n telefono, direccion, correo, tipoSexo)\n try:\n cur = db.connection.cursor()\n cur.execute(\"\"\"\n UPDATE usuario\n SET username = %s,\n password = %s,\n comuna = %s,\n nombre = %s,\n apellidos = %s,\n tipoUsuario = %s,\n telefono = %s,\n direccion = %s,\n correo = %s,\n tipoSexo = %s\n WHERE id = %s\n \"\"\", (username, password, comuna, nombre, apellidos, tipoUsuario, telefono, direccion, correo, tipoSexo, id))\n db.connection.commit()\n flash('Usuario actualizado con exito')\n return redirect(url_for('Edit'))\n except:\n flash('El usuario no se ha podido actualizar')\n return redirect(url_for('Edit'),)\n return render_template('edit-contact.html', usuarios = data[0], tipoUsuario=tipoUsuario, comunas=comunas, tipoSexo=tipoSexo)\n\n#Página de Actualizar Perfil\n# @app.route('/update/', methods=['POST'])\n# @login_required\n# def update(id):\n# if request.method == 'POST':\n# username = request.form['username']\n# password = request.form['password']\n# comuna = request.form['comuna']\n# cur = db.connection.cursor()\n# cur.execute(\"\"\"\n# UPDATE usuario\n# SET username = %s,\n# password = %s,\n# comuna = %s\n# WHERE id = %s\n# \"\"\", (username, password, comuna, id))\n# db.connection.commit()\n# return redirect(url_for('Edit'))\n\n#Ruta Diccionario\n@app.route('/diccionario')\n@login_required\ndef diccionario():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n return render_template('menuAdministrador.html')\n elif tipoUsuario == 2:\n cur = db.connection.cursor()\n cur.execute(\n 'SELECT * FROM diccionario;')\n data = cur.fetchall()\n print(type(data))\n return render_template('diccionario.html', usuarios=data)\n elif tipoUsuario == 3:\n return render_template('menuEstudiante.html')\n \n\n#Ruta Agregar Diccionario\n@app.route('/agregarDiccionario', methods=['POST'])\n@login_required\ndef agregarDiccionario():\n if request.method == 'POST':\n gesto = request.form['gesto']\n definicion = request.form['definicion']\n fuente = request.form['fuente']\n frase = request.form['frase']\n file = request.files['imagen']\n # # La ruta donde se encuentra el archivo actual\n basepath = os.path.dirname(__file__)\n #Nombre original del archivo\n filename = secure_filename(file.filename)\n # capturando extensión del archivo ejemplo: (.png, .jpg, .pdf ...etc)\n extension = os.path.splitext(filename)[1]\n nuevoNombreFile = stringAleatorio() + extension\n\n # Guardar Archivo en la carpeta img_perfiles que se encuentra en static\n upload_path = os.path.join(basepath, 'static/img_diccionario', nuevoNombreFile)\n file.save(upload_path)\n usuario = request.form['submit']\n print(gesto,definicion,fuente, frase,nuevoNombreFile, usuario)\n cur = db.connection.cursor()\n cur.execute(\"CALL AgregarGestoI(%s,%s,%s,%s,%s,%s)\", (gesto,nuevoNombreFile,definicion,frase,fuente,usuario))\n db.connection.commit()\n #flash\n return redirect(url_for('diccionario'))\n else:\n #flash\n return redirect(url_for('diccionario'))\n\n#Visualizar Contenido\n@app.route('/diccionario/', methods=['POST', 'GET'])\n@login_required\ndef show_content(id):\n cur = db.connection.cursor()\n cur.execute('SELECT d.idDiccionario,d.palabra,d.imagen,d.descripcion,d.frase,t.fuente,d.creadoPor FROM diccionario d JOIN tipoFuente t ON d.tipoFuente = t.idFuente WHERE idDiccionario= %s',(id,))\n data = cur.fetchall()\n return render_template('show_content.html', palabras=data[0])\n\n#Errores\n#Error 404, página no existente\n@app.errorhandler(404)\ndef page_not_found(err):\n return render_template(\"page_not_found.html\"), 404\n\n#Error 401, Unauthorized\n@app.errorhandler(401)\ndef unauthorized(err):\n return render_template(\"unauthorized.html\"), 401\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"SebastianOrtegaCL/lenguajeparatodospy","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":26424,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"28160640799","text":"from sdPy.functionDefinitions import make2d\nfrom UI.objectsTable import Ui_MainWindow as objectsTableUI\nfrom PySide2.QtWidgets import QGraphicsItem, QMainWindow,QHeaderView, QTableWidgetItem,QComboBox\nfrom numpy import array\nimport re\nfrom sdPy.segmentMethods import segPlotData2\nfrom sdPy.loadMethods import loadPlotData2, loadPlotData4\nfrom sdPy.supportMethods import supportPlotData2\nfrom PySide2.QtGui import QPainterPath, QPen,QColor\nfrom PySide2.QtCore import Qt,QPointF\nsegmentColumns={\n 0:'Name',\n 1:'P1',\n 2:'P3',\n 3:'P2',\n 4:'youngsModulus',\n 5:'shearModulus',\n 6:'area',\n 7:'I',\n 8:'shapeFactor',\n 9:'density',\n 10:'alpha',\n 11:'type'}\nloadColumns={\n 0:'Name',\n 1:'degree',\n 2:'peak',\n 3:'parentSegment',\n 4:'P1',\n 5:'P3',\n 6:'normal',\n }\nsupportColumns={\n 0:'Name',\n 1:'type',\n 2:'location',\n 3:'normal',\n 4:'settlement'\n }\ncolumnTypes={0:segmentColumns,1:loadColumns,2:supportColumns}\ntableClass={0:'segment',1:'load',2:'support'}\neditableitems=['P1','P2','P3','peak','normal','location','degree','type','parentSegment']\n\ndef editObjectsTable(self):\n pass\n\ndef addDataToTable(self,tableIndex,name,values):\n table=self.objectTables[tableIndex]\n rows = table.rowCount()\n columns = table.columnCount()\n table.insertRow(rows)\n table.setItem(rows,0,QTableWidgetItem(name))\n columnHeaders=columnTypes[tableIndex]\n for i in range(1,columns):\n table.setItem(rows,i,QTableWidgetItem(str(values[columnHeaders[i]])))\n if tableIndex==1:\n combo=QComboBox()\n combo.addItems(reversed(list(self.df[(self.df['Class']=='segment')&(self.df['Flag']==True)]['Name'])))\n combo.currentIndexChanged.connect(lambda:editObjectsTable(self))\n combo.setCurrentText(self.df[(self.df['Graphitem']==self.parent)]['Name'].iloc[0])\n table.setCellWidget(rows,3,combo) \n\n\ndef editMembers(self,tab): \n try:\n currentItem=self.objectTables[tab].currentItem()\n self.graphicsView.setFocus()\n if currentItem:\n row=currentItem.row()\n column=currentItem.column()\n currentText=currentItem.text()\n columnName=columnTypes[tab][column]\n data=self.df[(self.df['Class']==tableClass[tab]) & (self.df['Flag']==True)].iloc[row,2]\n index= self.df[(self.df['Class']==tableClass[tab]) & (self.df['Flag']==True)].index[0]\n\n if columnName not in['Name','parentSegment']:\n if columnName in ['P1','P2','P3','normal','location','settlement']: \n currentText=re.sub('[\\[\\]]','',currentText)\n currentText=re.split(',| ',currentText)\n while '' in currentText:currentText.remove('')\n if len(currentText)==2:\n currentText=array(currentText,dtype=float) \n else: \n if columnName=='normal':\n currentText=array([0]+[currentText[0]],dtype=float)\n else:\n currentText=array([currentText[0]]+[0],dtype=float) \n\n elif columnName=='type':\n currentText=currentText.capitalize()\n else:\n currentText=float(currentText)\n else:\n if currentText not in list(self.df['Name']):\n self.df.iat[index,0]=currentText\n else:\n raise NameError \n if columnName=='parentSegment':\n combo=self.ee.loadsTable.cellWidget(row,column) \n currentText=combo.currentText()\n data[columnName]=self.df[self.df['Name']==currentText].iloc[0]['Robject']\n data=make2d(list(data.values()))\n self.ee.loadsTable.setItem(row,4,QTableWidgetItem(data['P1']))\n self.ee.loadsTable.setItem(row,5,QTableWidgetItem(data['P3']))\n self.ee.loadsTable.setItem(row,6,QTableWidgetItem(data['normal']))\n self.ee.loadsTable.setItem(row,2,QTableWidgetItem(data['peak']))\n else:\n # prevData=data[columnName]\n data[columnName]=currentText\n data=make2d(list(data.values()))\n self.df.iat[index,2]=data\n\n print(row,column)\n if str(self.prevData)==str(data[columnName]):\n return\n self.prevData=data[columnName]\n \n self.objectTables[tab].setItem(\n row,column,QTableWidgetItem(str(currentText))\n )\n if columnName in editableitems:\n if tab==0:\n Rsegment = data \n data=segPlotData2(Rsegment['type'],self.rrts(Rsegment['P1']),\n self.rrts(Rsegment['P3']),scale=1,P2=self.rrts(Rsegment['P2']),no=self.NoOfPointsInCurvedSegments)\n pen=self.pen\n\n elif tab==2:\n Rsupport=data\n data=supportPlotData2(Rsupport['type'] , self.rrts(Rsupport['location']),1/(self.scale*50),Rsupport['normal'])\n color=self.supportColors[Rsupport['type']]\n self.supportPen=QPen(QColor(*color),1.5)\n pen=self.supportPen\n\n else:\n Rload=data\n ps=data['parentSegment']\n pen=self.loadPen\n\n if Rload['degree']> -3:\n data=loadPlotData2(self.rrts(Rload['P1']),self.rrts(Rload['P3']),self.rrts(ps['P1']),self.rrts(ps['P3']),\n self.rrts(ps['P2']), Rload['degree'],self.rrts(Rload['peak']),Rload['normal'],ps['type'],self.scale,self.loadLogScale)\n else:\n data=loadPlotData4(ps,Rload['degree'],self.rrts(Rload['peak']))\n\n\n\n rect = QPainterPath(QPointF(data[0][0],data[0][1]))\n for i in range(1,len(data),1):\n rect.lineTo(QPointF(data[i][0],data[i][1]))\n rect=self.scene.addPath(rect,pen) \n rect.setFlag(QGraphicsItem.ItemIsSelectable)\n\n self.df.iloc[index,3].hide()\n self.df.iat[index,3]=self.scene.items()[0] \n\n except NameError:\n self.statusbar.showMessage('Enter a unique name',5000)\n except:\n import traceback\n traceback.print_exc()\n self.statusbar.showMessage('Enter a valid data',5000)\ndef editLoads(self):\n pass\n\ndef editSupports(self):\n pass\n\n\n\ndef objectsTable(self):\n self.eE=QMainWindow(parent=self.MainWindow)\n self.ee=objectsTableUI()\n self.ee.setupUi(self.eE)\n self.prevData=None\n self.objectTables={\n 0:self.ee.segmentsTable,\n 1:self.ee.loadsTable,\n 2:self.ee.supportsTable\n }\n [self.objectTables[i].horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) for i in [0,1,2]]\n self.ee.segmentsTable.itemChanged.connect(lambda:editMembers(self,0))\n self.ee.loadsTable.itemChanged.connect(lambda:editMembers(self,1))\n self.ee.supportsTable.itemChanged.connect(lambda:editMembers(self,2))\n self.eE.show()","repo_name":"samrachana/Samrachana-Araniko","sub_path":"src/structure2d/objectsTable.py","file_name":"objectsTable.py","file_ext":"py","file_size_in_byte":7311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"3394278862","text":"#!/usr/bin/python\n\nimport multiprocessing\nimport containerstats\nimport etcd\nimport platform\nimport docker\nimport time\nimport os\nimport requests\n\ndockerconnection = docker.Client(base_url='unix://var/run/docker.sock', timeout=2)\ndockerconnection.close()\n\ndef getstats(obj):\n etcd.CreateDir(DDS_ETCD_URL, platform.node() + '/' + obj.containername, DDS_CONTAINER_TTL)\n etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/cpuusage',\n obj.getcontainercpuusage(dockerconnection)['cpuusage'])\n etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/memusage',\n obj.getcontainermemusage(dockerconnection)['memusage'])\n etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/memusagepercent',\n obj.getcontainermemusage(dockerconnection)['memusagepercent'])\n etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/netrx',\n obj.getcontainernetusage(dockerconnection)['netrx'])\n etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/nettx',\n obj.getcontainernetusage(dockerconnection)['nettx'])\n return True\n\n\nif __name__ == '__main__':\n\n if 'DDS_ETCD_URL' in os.environ:\n DDS_ETCD_URL = os.environ['DDS_ETCD_URL']\n else:\n DDS_ETCD_URL = 'http://127.0.0.1:4001/v2/keys/'\n\n if 'DDS_CONCURRENCY_LEVEL' in os.environ:\n DDS_CONCURRENCY_LEVEL = os.environ['DDS_CONCURRENCY_LEVEL']\n else:\n DDS_CONCURRENCY_LEVEL = 8\n\n # start values\n DDS_HOST_TTL = 120\n DDS_CONTAINER_TTL = 30\n\n\n while True:\n newpool = multiprocessing.Pool(processes=DDS_CONCURRENCY_LEVEL)\n etcd.CreateDir(DDS_ETCD_URL, platform.node(), ttl=DDS_HOST_TTL)\n containerlist = containerstats.getrunningcontainers(dockerconnection)\n objlist = []\n for container in containerlist:\n objlist.append(containerstats.ContainerStats(container))\n gatherstart = time.time()\n # when i.e. container stop during data gathering timeout generated\n try:\n newpool.map(getstats, objlist)\n except requests.packages.urllib3.exceptions.ReadTimeoutError:\n pass\n newpool.close()\n gatherstop = time.time()\n gatherduration = int(gatherstop - gatherstart)\n DDS_HOST_TTL = gatherduration * 5\n DDS_CONTAINER_TTL = gatherduration * 3\n time.sleep(gatherduration)\n","repo_name":"witalisoft/dds","sub_path":"app/dds.py","file_name":"dds.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"127324968","text":"from datetime import datetime, timezone\nfrom typing import Any, Dict, Set\n\nimport pytest\nfrom boto3.dynamodb.types import TypeDeserializer, TypeSerializer\nfrom fideslang.models import Dataset\n\nfrom fides.api.graph.config import (\n CollectionAddress,\n FieldAddress,\n FieldPath,\n ObjectField,\n ScalarField,\n)\nfrom fides.api.graph.graph import DatasetGraph, Edge\nfrom fides.api.graph.traversal import Traversal, TraversalNode\nfrom fides.api.models.datasetconfig import convert_dataset_to_graph\nfrom fides.api.models.privacy_request import PrivacyRequest\nfrom fides.api.schemas.masking.masking_configuration import HashMaskingConfiguration\nfrom fides.api.schemas.masking.masking_secrets import MaskingSecretCache, SecretType\nfrom fides.api.service.connectors.query_config import (\n DynamoDBQueryConfig,\n MongoQueryConfig,\n SQLQueryConfig,\n)\nfrom fides.api.service.masking.strategy.masking_strategy_hash import HashMaskingStrategy\nfrom fides.api.util.data_category import DataCategory\n\nfrom ...task.traversal_data import combined_mongo_postgresql_graph, integration_db_graph\nfrom ...test_helpers.cache_secrets_helper import cache_secret, clear_cache_secrets\n\n# customers -> address, order\n# orders -> address, payment card\n# payment card -> address\n# address\n\n# identities: customer.email\n\ngraph: DatasetGraph = integration_db_graph(\"postgres_example\")\ntraversal = Traversal(graph, {\"email\": \"X\"})\ntraversal_nodes: Dict[CollectionAddress, TraversalNode] = traversal.traversal_node_dict\npayment_card_node = traversal_nodes[\n CollectionAddress(\"postgres_example\", \"payment_card\")\n]\nuser_node = traversal_nodes[CollectionAddress(\"postgres_example\", \"payment_card\")]\nprivacy_request = PrivacyRequest(id=\"234544\")\n\n\nclass TestSQLQueryConfig:\n def test_extract_query_components(self):\n def found_query_keys(node: TraversalNode, values: Dict[str, Any]) -> Set[str]:\n return set(node.typed_filtered_values(values).keys())\n\n config = SQLQueryConfig(payment_card_node)\n assert config.field_map().keys() == {\n FieldPath(s)\n for s in [\n \"id\",\n \"name\",\n \"ccn\",\n \"customer_id\",\n \"billing_address_id\",\n ]\n }\n assert payment_card_node.query_field_paths == {\n FieldPath(\"id\"),\n FieldPath(\"customer_id\"),\n }\n\n # values exist for all query keys\n assert found_query_keys(\n payment_card_node,\n {\n \"id\": [\"A\"],\n \"customer_id\": [\"V\"],\n \"ignore_me\": [\"X\"],\n },\n ) == {\"id\", \"customer_id\"}\n # with no values OR an empty set, these are omitted\n assert found_query_keys(\n payment_card_node,\n {\n \"id\": [\"A\"],\n \"customer_id\": [],\n \"ignore_me\": [\"X\"],\n },\n ) == {\"id\"}\n assert found_query_keys(\n payment_card_node, {\"id\": [\"A\"], \"ignore_me\": [\"X\"]}\n ) == {\"id\"}\n assert found_query_keys(payment_card_node, {\"ignore_me\": [\"X\"]}) == set()\n assert found_query_keys(payment_card_node, {}) == set()\n\n def test_typed_filtered_values(self):\n assert payment_card_node.typed_filtered_values(\n {\n \"id\": [\"A\"],\n \"customer_id\": [\"V\"],\n \"ignore_me\": [\"X\"],\n }\n ) == {\"id\": [\"A\"], \"customer_id\": [\"V\"]}\n\n assert payment_card_node.typed_filtered_values(\n {\n \"id\": [\"A\"],\n \"customer_id\": [],\n \"ignore_me\": [\"X\"],\n }\n ) == {\"id\": [\"A\"]}\n\n assert payment_card_node.typed_filtered_values(\n {\"id\": [\"A\"], \"ignore_me\": [\"X\"]}\n ) == {\"id\": [\"A\"]}\n\n assert payment_card_node.typed_filtered_values(\n {\"id\": [], \"customer_id\": [\"V\"]}\n ) == {\"customer_id\": [\"V\"]}\n # test for type casting: id has type \"string\":\n assert payment_card_node.typed_filtered_values({\"id\": [1]}) == {\"id\": [\"1\"]}\n assert payment_card_node.typed_filtered_values({\"id\": [1, 2]}) == {\n \"id\": [\"1\", \"2\"]\n }\n\n def test_generated_sql_query(self):\n \"\"\"Test that the generated query depends on the input set\"\"\"\n assert (\n str(\n SQLQueryConfig(payment_card_node).generate_query(\n {\n \"id\": [\"A\"],\n \"customer_id\": [\"V\"],\n \"ignore_me\": [\"X\"],\n }\n )\n )\n == \"SELECT id,name,ccn,customer_id,billing_address_id FROM payment_card WHERE id = :id OR customer_id = :customer_id\"\n )\n\n assert (\n str(\n SQLQueryConfig(payment_card_node).generate_query(\n {\n \"id\": [\"A\"],\n \"customer_id\": [],\n \"ignore_me\": [\"X\"],\n }\n )\n )\n == \"SELECT id,name,ccn,customer_id,billing_address_id FROM payment_card WHERE id = :id\"\n )\n\n assert (\n str(\n SQLQueryConfig(payment_card_node).generate_query(\n {\"id\": [\"A\"], \"ignore_me\": [\"X\"]}\n )\n )\n == \"SELECT id,name,ccn,customer_id,billing_address_id FROM payment_card WHERE id = :id\"\n )\n\n assert (\n str(\n SQLQueryConfig(payment_card_node).generate_query(\n {\"id\": [], \"customer_id\": [\"V\"]}\n )\n )\n == \"SELECT id,name,ccn,customer_id,billing_address_id FROM payment_card WHERE customer_id = :customer_id\"\n )\n\n def test_update_rule_target_fields(\n self, erasure_policy, example_datasets, connection_config\n ):\n dataset = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset, connection_config.key)\n dataset_graph = DatasetGraph(*[graph])\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n\n customer_node = traversal.traversal_node_dict[\n CollectionAddress(\"postgres_example_test_dataset\", \"customer\")\n ]\n\n rule = erasure_policy.rules[0]\n config = SQLQueryConfig(customer_node)\n assert config.build_rule_target_field_paths(erasure_policy) == {\n rule: [FieldPath(\"name\")]\n }\n\n # Make target more broad\n target = rule.targets[0]\n target.data_category = DataCategory(\"user\").value\n assert config.build_rule_target_field_paths(erasure_policy) == {\n rule: [FieldPath(\"email\"), FieldPath(\"id\"), FieldPath(\"name\")]\n }\n\n # Check different collection\n address_node = traversal.traversal_node_dict[\n CollectionAddress(\"postgres_example_test_dataset\", \"address\")\n ]\n config = SQLQueryConfig(address_node)\n assert config.build_rule_target_field_paths(erasure_policy) == {\n rule: [FieldPath(x) for x in [\"city\", \"house\", \"street\", \"state\", \"zip\"]]\n }\n\n def test_generate_update_stmt_one_field(\n self, erasure_policy, example_datasets, connection_config\n ):\n dataset = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset, connection_config.key)\n dataset_graph = DatasetGraph(*[graph])\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n\n customer_node = traversal.traversal_node_dict[\n CollectionAddress(\"postgres_example_test_dataset\", \"customer\")\n ]\n\n config = SQLQueryConfig(customer_node)\n row = {\n \"email\": \"customer-1@example.com\",\n \"name\": \"John Customer\",\n \"address_id\": 1,\n \"id\": 1,\n }\n text_clause = config.generate_update_stmt(row, erasure_policy, privacy_request)\n assert text_clause.text == \"\"\"UPDATE customer SET name = :name WHERE id = :id\"\"\"\n assert text_clause._bindparams[\"name\"].key == \"name\"\n assert text_clause._bindparams[\"name\"].value is None # Null masking strategy\n\n def test_generate_update_stmt_length_truncation(\n self,\n erasure_policy_string_rewrite_long,\n example_datasets,\n connection_config,\n ):\n dataset = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset, connection_config.key)\n dataset_graph = DatasetGraph(*[graph])\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n\n customer_node = traversal.traversal_node_dict[\n CollectionAddress(\"postgres_example_test_dataset\", \"customer\")\n ]\n\n config = SQLQueryConfig(customer_node)\n row = {\n \"email\": \"customer-1@example.com\",\n \"name\": \"John Customer\",\n \"address_id\": 1,\n \"id\": 1,\n }\n\n text_clause = config.generate_update_stmt(\n row, erasure_policy_string_rewrite_long, privacy_request\n )\n assert text_clause.text == \"\"\"UPDATE customer SET name = :name WHERE id = :id\"\"\"\n assert text_clause._bindparams[\"name\"].key == \"name\"\n # length truncation on name field\n assert (\n text_clause._bindparams[\"name\"].value\n == \"some rewrite value that is very long and\"\n )\n\n def test_generate_update_stmt_multiple_fields_same_rule(\n self, erasure_policy, example_datasets, connection_config\n ):\n dataset = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset, connection_config.key)\n dataset_graph = DatasetGraph(*[graph])\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n\n customer_node = traversal.traversal_node_dict[\n CollectionAddress(\"postgres_example_test_dataset\", \"customer\")\n ]\n\n config = SQLQueryConfig(customer_node)\n row = {\n \"email\": \"customer-1@example.com\",\n \"name\": \"John Customer\",\n \"address_id\": 1,\n \"id\": 1,\n }\n\n # Make target more broad\n rule = erasure_policy.rules[0]\n target = rule.targets[0]\n target.data_category = DataCategory(\"user\").value\n\n # Update rule masking strategy\n rule.masking_strategy = {\n \"strategy\": \"hash\",\n \"configuration\": {\"algorithm\": \"SHA-512\"},\n }\n # cache secrets for hash strategy\n secret = MaskingSecretCache[str](\n secret=\"adobo\",\n masking_strategy=HashMaskingStrategy.name,\n secret_type=SecretType.salt,\n )\n cache_secret(secret, privacy_request.id)\n\n text_clause = config.generate_update_stmt(row, erasure_policy, privacy_request)\n assert (\n text_clause.text\n == \"UPDATE customer SET email = :email,name = :name WHERE id = :id\"\n )\n assert text_clause._bindparams[\"name\"].key == \"name\"\n # since length is set to 40 in dataset.yml, we expect only first 40 chars of masked val\n assert (\n text_clause._bindparams[\"name\"].value\n == HashMaskingStrategy(HashMaskingConfiguration(algorithm=\"SHA-512\")).mask(\n [\"John Customer\"], request_id=privacy_request.id\n )[0][0:40]\n )\n assert (\n text_clause._bindparams[\"email\"].value\n == HashMaskingStrategy(HashMaskingConfiguration(algorithm=\"SHA-512\")).mask(\n [\"customer-1@example.com\"], request_id=privacy_request.id\n )[0]\n )\n clear_cache_secrets(privacy_request.id)\n\n def test_generate_update_stmts_from_multiple_rules(\n self, erasure_policy_two_rules, example_datasets, connection_config\n ):\n dataset = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset, connection_config.key)\n dataset_graph = DatasetGraph(*[graph])\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n row = {\n \"email\": \"customer-1@example.com\",\n \"name\": \"John Customer\",\n \"address_id\": 1,\n \"id\": 1,\n }\n\n customer_node = traversal.traversal_node_dict[\n CollectionAddress(\"postgres_example_test_dataset\", \"customer\")\n ]\n\n config = SQLQueryConfig(customer_node)\n\n text_clause = config.generate_update_stmt(\n row, erasure_policy_two_rules, privacy_request\n )\n\n assert (\n text_clause.text\n == \"UPDATE customer SET email = :email,name = :name WHERE id = :id\"\n )\n # Two different masking strategies used for name and email\n assert text_clause._bindparams[\"name\"].value is None # Null masking strategy\n assert (\n text_clause._bindparams[\"email\"].value == \"*****\"\n ) # String rewrite masking strategy\n\n\nclass TestMongoQueryConfig:\n @pytest.fixture(scope=\"function\")\n def combined_traversal(self, connection_config, integration_mongodb_config):\n mongo_dataset, postgres_dataset = combined_mongo_postgresql_graph(\n connection_config, integration_mongodb_config\n )\n combined_dataset_graph = DatasetGraph(mongo_dataset, postgres_dataset)\n combined_traversal = Traversal(\n combined_dataset_graph,\n {\"email\": \"customer-1@examplecom\"},\n )\n return combined_traversal\n\n @pytest.fixture(scope=\"function\")\n def customer_details_node(self, combined_traversal):\n return combined_traversal.traversal_node_dict[\n CollectionAddress(\"mongo_test\", \"customer_details\")\n ]\n\n @pytest.fixture(scope=\"function\")\n def customer_feedback_node(self, combined_traversal):\n return combined_traversal.traversal_node_dict[\n CollectionAddress(\"mongo_test\", \"customer_feedback\")\n ]\n\n def test_field_map_nested(self, customer_details_node):\n config = MongoQueryConfig(customer_details_node)\n\n field_map = config.field_map()\n assert isinstance(field_map[FieldPath(\"workplace_info\")], ObjectField)\n assert isinstance(\n field_map[FieldPath(\"workplace_info\", \"employer\")], ScalarField\n )\n\n def test_primary_key_field_paths(self, customer_details_node):\n config = MongoQueryConfig(customer_details_node)\n assert list(config.primary_key_field_paths.keys()) == [FieldPath(\"_id\")]\n assert isinstance(config.primary_key_field_paths[FieldPath(\"_id\")], ScalarField)\n\n def test_nested_query_field_paths(\n self, customer_details_node, customer_feedback_node\n ):\n assert customer_details_node.query_field_paths == {\n FieldPath(\"customer_id\"),\n }\n\n assert customer_feedback_node.query_field_paths == {\n FieldPath(\"customer_information\", \"email\")\n }\n\n def test_nested_typed_filtered_values(self, customer_feedback_node):\n \"\"\"Identity data is located on a nested object\"\"\"\n input_data = {\n \"customer_information.email\": [\"test@example.com\"],\n \"ignore\": [\"abcde\"],\n }\n assert customer_feedback_node.typed_filtered_values(input_data) == {\n \"customer_information.email\": [\"test@example.com\"]\n }\n\n def test_generate_query(\n self,\n policy,\n example_datasets,\n integration_mongodb_config,\n connection_config,\n ):\n dataset_postgres = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset_postgres, connection_config.key)\n dataset_mongo = Dataset(**example_datasets[1])\n mongo_graph = convert_dataset_to_graph(\n dataset_mongo, integration_mongodb_config.key\n )\n dataset_graph = DatasetGraph(*[graph, mongo_graph])\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n # Edge created from Root to nested customer_information.email field\n assert (\n Edge(\n FieldAddress(\"__ROOT__\", \"__ROOT__\", \"email\"),\n FieldAddress(\n \"mongo_test\", \"customer_feedback\", \"customer_information\", \"email\"\n ),\n )\n in traversal.edges\n )\n\n # Test query on nested field\n customer_feedback = traversal.traversal_node_dict[\n CollectionAddress(\"mongo_test\", \"customer_feedback\")\n ]\n config = MongoQueryConfig(customer_feedback)\n input_data = {\"customer_information.email\": [\"customer-1@example.com\"]}\n # Tuple of query, projection - Searching for documents with nested\n # customer_information.email = customer-1@example.com\n assert config.generate_query(input_data, policy) == (\n {\"customer_information.email\": \"customer-1@example.com\"},\n {\"_id\": 1, \"customer_information\": 1, \"date\": 1, \"message\": 1, \"rating\": 1},\n )\n\n # Test query nested data\n customer_details = traversal.traversal_node_dict[\n CollectionAddress(\"mongo_test\", \"customer_details\")\n ]\n config = MongoQueryConfig(customer_details)\n input_data = {\"customer_id\": [1]}\n # Tuple of query, projection - Projection is specifying fields at the top-level. Nested data will\n # be filtered later.\n assert config.generate_query(input_data, policy) == (\n {\"customer_id\": 1},\n {\n \"_id\": 1,\n \"birthday\": 1,\n \"comments\": 1,\n \"customer_id\": 1,\n \"emergency_contacts\": 1,\n \"children\": 1,\n \"gender\": 1,\n \"travel_identifiers\": 1,\n \"workplace_info\": 1,\n },\n )\n\n def test_generate_update_stmt_multiple_fields(\n self,\n erasure_policy,\n example_datasets,\n integration_mongodb_config,\n connection_config,\n ):\n dataset_postgres = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset_postgres, connection_config.key)\n dataset_mongo = Dataset(**example_datasets[1])\n mongo_graph = convert_dataset_to_graph(\n dataset_mongo, integration_mongodb_config.key\n )\n dataset_graph = DatasetGraph(*[graph, mongo_graph])\n\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n customer_details = traversal.traversal_node_dict[\n CollectionAddress(\"mongo_test\", \"customer_details\")\n ]\n config = MongoQueryConfig(customer_details)\n row = {\n \"birthday\": \"1988-01-10\",\n \"gender\": \"male\",\n \"customer_id\": 1,\n \"_id\": 1,\n \"workplace_info\": {\n \"position\": \"Chief Strategist\",\n \"direct_reports\": [\"Robbie Margo\", \"Sully Hunter\"],\n },\n \"emergency_contacts\": [{\"name\": \"June Customer\", \"phone\": \"444-444-4444\"}],\n \"children\": [\"Christopher Customer\", \"Courtney Customer\"],\n }\n\n # Make target more broad\n rule = erasure_policy.rules[0]\n target = rule.targets[0]\n target.data_category = DataCategory(\"user\").value\n\n mongo_statement = config.generate_update_stmt(\n row, erasure_policy, privacy_request\n )\n\n expected_result_0 = {\"_id\": 1}\n expected_result_1 = {\n \"$set\": {\n \"birthday\": None,\n \"children.0\": None,\n \"children.1\": None,\n \"customer_id\": None,\n \"emergency_contacts.0.name\": None,\n \"workplace_info.direct_reports.0\": None, # Both direct reports are masked.\n \"workplace_info.direct_reports.1\": None,\n \"emergency_contacts.0.phone\": None,\n \"gender\": None,\n \"workplace_info.position\": None,\n }\n }\n\n print(mongo_statement[1])\n print(expected_result_1)\n assert mongo_statement[0] == expected_result_0\n assert mongo_statement[1] == expected_result_1\n\n def test_generate_update_stmt_multiple_rules(\n self,\n erasure_policy_two_rules,\n example_datasets,\n integration_mongodb_config,\n connection_config,\n ):\n dataset_postgres = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset_postgres, connection_config.key)\n dataset_mongo = Dataset(**example_datasets[1])\n mongo_graph = convert_dataset_to_graph(\n dataset_mongo, integration_mongodb_config.key\n )\n dataset_graph = DatasetGraph(*[graph, mongo_graph])\n\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n\n customer_details = traversal.traversal_node_dict[\n CollectionAddress(\"mongo_test\", \"customer_details\")\n ]\n\n config = MongoQueryConfig(customer_details)\n row = {\n \"birthday\": \"1988-01-10\",\n \"gender\": \"male\",\n \"customer_id\": 1,\n \"_id\": 1,\n \"workplace_info\": {\n \"position\": \"Chief Strategist\",\n \"direct_reports\": [\"Robbie Margo\", \"Sully Hunter\"],\n },\n \"emergency_contacts\": [{\"name\": \"June Customer\", \"phone\": \"444-444-4444\"}],\n \"children\": [\"Christopher Customer\", \"Courtney Customer\"],\n }\n\n rule = erasure_policy_two_rules.rules[0]\n rule.masking_strategy = {\n \"strategy\": \"hash\",\n \"configuration\": {\"algorithm\": \"SHA-512\"},\n }\n target = rule.targets[0]\n target.data_category = DataCategory(\"user.demographic.date_of_birth\").value\n\n rule_two = erasure_policy_two_rules.rules[1]\n rule_two.masking_strategy = {\n \"strategy\": \"random_string_rewrite\",\n \"configuration\": {\"length\": 30},\n }\n target = rule_two.targets[0]\n target.data_category = DataCategory(\"user.demographic.gender\").value\n # cache secrets for hash strategy\n secret = MaskingSecretCache[str](\n secret=\"adobo\",\n masking_strategy=HashMaskingStrategy.name,\n secret_type=SecretType.salt,\n )\n cache_secret(secret, privacy_request.id)\n\n mongo_statement = config.generate_update_stmt(\n row, erasure_policy_two_rules, privacy_request\n )\n assert mongo_statement[0] == {\"_id\": 1}\n assert len(mongo_statement[1][\"$set\"][\"gender\"]) == 30\n assert (\n mongo_statement[1][\"$set\"][\"birthday\"]\n == HashMaskingStrategy(HashMaskingConfiguration(algorithm=\"SHA-512\")).mask(\n [\"1988-01-10\"], request_id=privacy_request.id\n )[0]\n )\n\n\nclass TestDynamoDBQueryConfig:\n @pytest.fixture(scope=\"function\")\n def identity(self):\n identity = {\"email\": \"customer-test_uuid@example.com\"}\n return identity\n\n @pytest.fixture(scope=\"function\")\n def dataset_graph(self, integration_dynamodb_config, example_datasets):\n dataset = Dataset(**example_datasets[11])\n dataset_graph = convert_dataset_to_graph(\n dataset, integration_dynamodb_config.key\n )\n\n return DatasetGraph(*[dataset_graph])\n\n @pytest.fixture(scope=\"function\")\n def traversal(self, identity, dataset_graph):\n dynamo_traversal = Traversal(dataset_graph, identity)\n return dynamo_traversal\n\n @pytest.fixture(scope=\"function\")\n def customer_node(self, traversal):\n return traversal.traversal_node_dict[\n CollectionAddress(\"dynamodb_example_test_dataset\", \"customer\")\n ]\n\n @pytest.fixture(scope=\"function\")\n def customer_identifier_node(self, traversal):\n return traversal.traversal_node_dict[\n CollectionAddress(\"dynamodb_example_test_dataset\", \"customer_identifier\")\n ]\n\n @pytest.fixture(scope=\"function\")\n def customer_row(self):\n row = {\n \"customer_email\": {\"S\": \"customer-1@example.com\"},\n \"name\": {\"S\": \"John Customer\"},\n \"address_id\": {\"L\": [{\"S\": \"1\"}, {\"S\": \"2\"}]},\n \"personal_info\": {\"M\": {\"gender\": {\"S\": \"male\"}, \"age\": {\"S\": \"99\"}}},\n \"id\": {\"S\": \"1\"},\n }\n return row\n\n @pytest.fixture(scope=\"function\")\n def deserialized_customer_row(self, customer_row):\n deserialized_customer_row = {}\n deserializer = TypeDeserializer()\n for key, value in customer_row.items():\n deserialized_customer_row[key] = deserializer.deserialize(value)\n return deserialized_customer_row\n\n @pytest.fixture(scope=\"function\")\n def customer_identifier_row(self):\n row = {\n \"customer_id\": {\"S\": \"customer-1@example.com\"},\n \"email\": {\"S\": \"customer-1@example.com\"},\n \"name\": {\"S\": \"Customer 1\"},\n \"created\": {\"S\": datetime.now(timezone.utc).isoformat()},\n }\n return row\n\n @pytest.fixture(scope=\"function\")\n def deserialized_customer_identifier_row(self, customer_identifier_row):\n deserialized_customer_identifier_row = {}\n deserializer = TypeDeserializer()\n for key, value in customer_identifier_row.items():\n deserialized_customer_identifier_row[key] = deserializer.deserialize(value)\n return deserialized_customer_identifier_row\n\n def test_get_query_param_formatting_single_key(\n self,\n resources_dict,\n customer_node,\n ) -> None:\n input_data = {\n \"fidesops_grouped_inputs\": [],\n \"email\": [\"customer-test_uuid@example.com\"],\n }\n attribute_definitions = [{\"AttributeName\": \"email\", \"AttributeType\": \"S\"}]\n query_config = DynamoDBQueryConfig(customer_node, attribute_definitions)\n item = query_config.generate_query(\n input_data=input_data, policy=resources_dict[\"policy\"]\n )\n assert item[\"ExpressionAttributeValues\"] == {\n \":value\": {\"S\": \"customer-test_uuid@example.com\"}\n }\n assert item[\"KeyConditionExpression\"] == \"email = :value\"\n\n def test_put_query_param_formatting_single_key(\n self,\n erasure_policy,\n customer_node,\n deserialized_customer_row,\n ) -> None:\n input_data = {\n \"fidesops_grouped_inputs\": [],\n \"email\": [\"customer-test_uuid@example.com\"],\n }\n attribute_definitions = [{\"AttributeName\": \"email\", \"AttributeType\": \"S\"}]\n query_config = DynamoDBQueryConfig(customer_node, attribute_definitions)\n update_item = query_config.generate_update_stmt(\n deserialized_customer_row, erasure_policy, privacy_request\n )\n\n assert update_item == {\n \"customer_email\": {\"S\": \"customer-1@example.com\"},\n \"name\": {\"NULL\": True},\n \"address_id\": {\"S\": \"1\"},\n \"address_id\": {\"L\": [{\"S\": \"1\"}, {\"S\": \"2\"}]},\n \"personal_info\": {\"M\": {\"gender\": {\"S\": \"male\"}, \"age\": {\"S\": \"99\"}}},\n \"id\": {\"S\": \"1\"},\n }\n","repo_name":"ethyca/fides","sub_path":"tests/ops/service/connectors/test_queryconfig.py","file_name":"test_queryconfig.py","file_ext":"py","file_size_in_byte":27186,"program_lang":"python","lang":"en","doc_type":"code","stars":302,"dataset":"github-code","pt":"76"}
+{"seq_id":"22396029976","text":"# Tests numba.analysis functions\nfrom __future__ import print_function, absolute_import, division\n\nimport numpy as np\nfrom numba.compiler import compile_isolated, run_frontend\nfrom numba import types, rewrites, ir, jit, ir_utils\nfrom .support import TestCase, MemoryLeakMixin\n\n\nfrom numba.analysis import dead_branch_prune\n\n\ndef compile_to_ir(func):\n func_ir = run_frontend(func)\n\n class MockPipeline(object):\n def __init__(self, func_ir):\n self.typingctx = None\n self.targetctx = None\n self.args = None\n self.func_ir = func_ir\n self.typemap = None\n self.return_type = None\n self.calltypes = None\n # call this to get print etc rewrites\n rewrites.rewrite_registry.apply('before-inference', MockPipeline(func_ir),\n func_ir)\n return func_ir\n\n\nclass TestBranchPrune(MemoryLeakMixin, TestCase):\n \"\"\"\n Tests branch pruning\n \"\"\"\n _DEBUG = False\n\n # find *all* branches\n def find_branches(self, the_ir):\n branches = []\n for blk in the_ir.blocks.values():\n tmp = [_ for _ in blk.find_insts(cls=ir.Branch)]\n branches.extend(tmp)\n return branches\n\n def assert_prune(self, func, args_tys, prune, *args):\n # This checks that the expected pruned branches have indeed been pruned.\n # func is a python function to assess\n # args_tys is the numba types arguments tuple\n # prune arg is a list, one entry per branch. The value in the entry is\n # encoded as follows:\n # True: using constant inference only, the True branch will be pruned\n # False: using constant inference only, the False branch will be pruned\n # None: under no circumstances should this branch be pruned\n # *args: the argument instances to pass to the function to check\n # execution is still valid post transform\n\n func_ir = compile_to_ir(func)\n before = func_ir.copy()\n if self._DEBUG:\n print(\"=\" * 80)\n print(\"before prune\")\n func_ir.dump()\n\n dead_branch_prune(func_ir, args_tys)\n\n after = func_ir\n if self._DEBUG:\n print(\"after prune\")\n func_ir.dump()\n\n before_branches = self.find_branches(before)\n self.assertEqual(len(before_branches), len(prune))\n\n # what is expected to be pruned\n expect_removed = []\n for idx, prune in enumerate(prune):\n branch = before_branches[idx]\n if prune is True:\n expect_removed.append(branch.truebr)\n elif prune is False:\n expect_removed.append(branch.falsebr)\n elif prune is None:\n pass # nothing should be removed!\n else:\n assert 0, \"unreachable\"\n\n # compare labels\n original_labels = set([_ for _ in before.blocks.keys()])\n new_labels = set([_ for _ in after.blocks.keys()])\n # assert that the new labels are precisely the original less the\n # expected pruned labels\n try:\n self.assertEqual(new_labels, original_labels - set(expect_removed))\n except AssertionError as e:\n print(\"new_labels\", new_labels)\n print(\"original_labels\", original_labels)\n print(\"expect_removed\", expect_removed)\n raise e\n\n cres = compile_isolated(func, args_tys)\n res = cres.entry_point(*args)\n expected = func(*args)\n self.assertEqual(res, expected)\n\n def test_single_if(self):\n\n def impl(x):\n if 1 == 0:\n return 3.14159\n\n self.assert_prune(impl, (types.NoneType('none'),), [True], None)\n\n def impl(x):\n if 1 == 1:\n return 3.14159\n\n self.assert_prune(impl, (types.NoneType('none'),), [False], None)\n\n def impl(x):\n if x is None:\n return 3.14159\n\n self.assert_prune(impl, (types.NoneType('none'),), [False], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True], 10)\n\n def impl(x):\n if x == 10:\n return 3.14159\n\n self.assert_prune(impl, (types.NoneType('none'),), [True], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)\n\n def impl(x):\n if x == 10:\n z = 3.14159 # noqa: F841 # no effect\n\n self.assert_prune(impl, (types.NoneType('none'),), [True], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)\n\n # TODO: cannot handle this without const prop\n # def impl(x):\n # z = None\n # y = z\n # if x == y:\n # print(\"x is 10\")\n\n # self.assert_prune(impl, (types.NoneType('none'),), [None], None)\n # self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)\n\n def test_single_if_else(self):\n\n def impl(x):\n if x is None:\n return 3.14159\n else:\n return 1.61803\n\n self.assert_prune(impl, (types.NoneType('none'),), [False], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True], 10)\n\n def test_single_if_const_val(self):\n\n def impl(x):\n if x == 100:\n return 3.14159\n\n self.assert_prune(impl, (types.NoneType('none'),), [True], None)\n self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100)\n\n def impl(x):\n # switch the condition order\n if 100 == x:\n return 3.14159\n\n self.assert_prune(impl, (types.NoneType('none'),), [True], None)\n self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100)\n\n def test_single_if_else_two_const_val(self):\n\n def impl(x, y):\n if x == y:\n return 3.14159\n else:\n return 1.61803\n\n self.assert_prune(impl, (types.IntegerLiteral(100),) * 2, [None], 100,\n 100)\n self.assert_prune(impl, (types.NoneType('none'),) * 2, [False], None,\n None)\n self.assert_prune(impl, (types.IntegerLiteral(100),\n types.NoneType('none'),), [True], 100, None)\n self.assert_prune(impl, (types.IntegerLiteral(100),\n types.IntegerLiteral(1000)), [None], 100, 1000)\n\n def test_single_if_else_w_following_undetermined(self):\n\n def impl(x):\n x_is_none_work = False\n if x is None:\n x_is_none_work = True\n else:\n dead = 7 # noqa: F841 # no effect\n\n if x_is_none_work:\n y = 10\n else:\n y = -3\n return y\n\n self.assert_prune(impl, (types.NoneType('none'),), [False, None], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10)\n\n def impl(x):\n x_is_none_work = False\n if x is None:\n x_is_none_work = True\n else:\n pass # force the True branch exit to be on backbone\n\n if x_is_none_work:\n y = 10\n else:\n y = -3\n return y\n\n self.assert_prune(impl, (types.NoneType('none'),), [None, None], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10)\n\n def test_double_if_else_rt_const(self):\n\n def impl(x):\n one_hundred = 100\n x_is_none_work = 4\n if x is None:\n x_is_none_work = 100\n else:\n dead = 7 # noqa: F841 # no effect\n\n if x_is_none_work == one_hundred:\n y = 10\n else:\n y = -3\n\n return y, x_is_none_work\n\n self.assert_prune(impl, (types.NoneType('none'),), [False, None], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10)\n\n def test_double_if_else_non_literal_const(self):\n\n def impl(x):\n one_hundred = 100\n if x == one_hundred:\n y = 3.14159\n else:\n y = 1.61803\n return y\n\n # no prune as compilation specialization on literal value not permitted\n self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)\n self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100)\n\n def test_single_two_branches_same_cond(self):\n\n def impl(x):\n if x is None:\n y = 10\n else:\n y = 40\n\n if x is not None:\n z = 100\n else:\n z = 400\n\n return z, y\n\n self.assert_prune(impl, (types.NoneType('none'),), [False, True], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True, False], 10)\n\n def test_cond_is_kwarg_none(self):\n\n def impl(x=None):\n if x is None:\n y = 10\n else:\n y = 40\n\n if x is not None:\n z = 100\n else:\n z = 400\n\n return z, y\n\n self.assert_prune(impl, (types.Omitted(None),),\n [False, True], None)\n self.assert_prune(impl, (types.NoneType('none'),), [False, True], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True, False], 10)\n\n def test_cond_is_kwarg_value(self):\n\n def impl(x=1000):\n if x == 1000:\n y = 10\n else:\n y = 40\n\n if x != 1000:\n z = 100\n else:\n z = 400\n\n return z, y\n\n self.assert_prune(impl, (types.Omitted(1000),), [None, None], 1000)\n self.assert_prune(impl, (types.IntegerLiteral(1000),), [None, None],\n 1000)\n self.assert_prune(impl, (types.IntegerLiteral(0),), [None, None], 0)\n self.assert_prune(impl, (types.NoneType('none'),), [True, False], None)\n\n def test_cond_rewrite_is_correct(self):\n # this checks that when a condition is replaced, it is replace by a\n # true/false bit that correctly represents the evaluated condition\n def fn(x):\n if x is None:\n return 10\n return 12\n\n def check(func, arg_tys, bit_val):\n func_ir = compile_to_ir(func)\n\n # check there is 1 branch\n before_branches = self.find_branches(func_ir)\n self.assertEqual(len(before_branches), 1)\n\n # check the condition in the branch is a binop\n condition_var = before_branches[0].cond\n condition_defn = ir_utils.get_definition(func_ir, condition_var)\n self.assertEqual(condition_defn.op, 'binop')\n\n # do the prune, this should kill the dead branch and rewrite the\n #'condition to a true/false const bit\n if self._DEBUG:\n print(\"=\" * 80)\n print(\"before prune\")\n func_ir.dump()\n dead_branch_prune(func_ir, arg_tys)\n if self._DEBUG:\n print(\"=\" * 80)\n print(\"after prune\")\n func_ir.dump()\n\n # after mutation, the condition should be a const value `bit_val`\n new_condition_defn = ir_utils.get_definition(func_ir, condition_var)\n self.assertTrue(isinstance(new_condition_defn, ir.Const))\n self.assertEqual(new_condition_defn.value, bit_val)\n\n check(fn, (types.NoneType('none'),), 1)\n check(fn, (types.IntegerLiteral(10),), 0)\n\n def test_obj_mode_fallback(self):\n # see issue #3879, this checks that object mode fall back doesn't suffer\n # from the IR mutation\n\n @jit\n def bug(a,b):\n if a.ndim == 1:\n if b is None:\n return 10\n return 12\n return []\n\n self.assertEqual(bug(np.arange(10), 4), 12)\n self.assertEqual(bug(np.arange(10), None), 10)\n self.assertEqual(bug(np.arange(10).reshape((2, 5)), 10), [])\n self.assertEqual(bug(np.arange(10).reshape((2, 5)), None), [])\n self.assertFalse(bug.nopython_signatures)\n","repo_name":"nesliiiimmm/Web-Scraper-Python","sub_path":"scraping/Lib/site-packages/numba/tests/test_analysis.py","file_name":"test_analysis.py","file_ext":"py","file_size_in_byte":12326,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"}
+{"seq_id":"29778256229","text":"#Leonard Carcaramo 08/28/19\r\n#Python Tic-Tac-Toe\r\n\r\nimport Modules #All Modules used are located here\r\n\r\nx = \"X\" #String to represent player X on the board\r\no = \"O\" #String to represent player O on the board\r\n\r\nxWin = False #control var to check if player X wins or not\r\noWin = False #control var to check if player O wins or not\r\nmoveCount = 0 #keeps track or number of moves (tie if moveCount == 9)\r\n\r\nboard = [[\" \", \" \", \" \"], [\" \", \" \", \" \"], [\" \", \" \", \" \"]] #moves are stored in this 2 dimensional array.\r\n\r\ncontinueLoop = True #controls while loop below\r\n\r\nwhile continueLoop:\r\n while xWin == False and oWin == False and moveCount < 9: #while no wins and number of moves is less than 9\r\n board = Modules.playerTurn(x, board) #player X takes turn\r\n moveCount += 1\r\n xWin = Modules.checkWin(x, board) #check if player X wins\r\n if xWin == True: #if player X wins\r\n Modules.displayBoard(board)\r\n print(\"You Win!\")\r\n if moveCount == 9 and xWin == False and oWin == False: #check tie\r\n Modules.displayBoard(board)\r\n print(\"Tie.\")\r\n \r\n if xWin == False and moveCount < 9: #make sure player X did not win, and number of moves have not reached 9\r\n if moveCount == 1: #make player O's move random for the first turn\r\n board = Modules.computerTurnRandom(o, board)\r\n else: #use AI for player O's turn\r\n board = Modules.computerTurnWithAI(o, board) \r\n moveCount += 1\r\n oWin = Modules.checkWin(o, board) #check if player O wins\r\n if oWin == True: # if player O wins\r\n Modules.displayBoard(board)\r\n print(\"You lose.\")\r\n \r\n optionValid = False #controls while loop below\r\n \r\n while optionValid == False: #while user input is not valid\r\n playAgain = input(\"Would you like to play again 'yes/no': \")\r\n if playAgain == \"yes\": #if player wants to play again\r\n optionValid = True\r\n xWin = False\r\n oWin = False\r\n moveCount = 0\r\n board = [[\" \", \" \", \" \"], [\" \", \" \", \" \"], [\" \", \" \", \" \"]]\r\n elif playAgain == \"no\": #if player does not want to play again\r\n optionValid = True\r\n continueLoop = False\r\n else:\r\n print(\"Invalid Input! Valid options are 'yes' and 'no' only.\")","repo_name":"leonard112/Python-Tic-Tac-Toe-With-AI","sub_path":"tictactoe/TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"39310583196","text":"from tkinter import *\nfrom tkinter import ttk, filedialog, scrolledtext\nimport Second_Frame\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.impute import SimpleImputer\n\n\nmain_form = Tk()\nmain_form.geometry(\"700x600\")\nmain_form.title(\"Machine Learning\")\nmain_form.config(background=\"white\")\n\n\ndef browseFiles():\n # browse file \n global data, data_info\n filename = filedialog.askopenfilename(initialdir=\"/Machine Learning\",\n title=\"Select a File\",\n filetypes=((\"CSV files\", \"*.csv*\"), (\"all files\", \"*.*\")))\n data = pd.read_csv(filename)\n data_info = str(data.describe())\n file.configure(text=\"File Opened: \"+filename)\n\n\nfile = Button(main_form, text=\"Browse Files\",\n command=browseFiles, font=\"none 10 bold\")\nfile.pack()\n\n\ntree_data = ttk.Treeview(main_form)\n# scrollbar = Scrollbar(main_form)\n# scrollbar.pack(side=RIGHT, fill=Y)\ntext = Text(main_form,width=70, height=15)\n\n\ndef show_data(data):\n # display data\n tree_data[\"columns\"] = list(data.columns)\n for header in data.columns:\n tree_data.column(header, width=10)\n tree_data.heading(header, text=header.title())\n\n # Add the data to the Treeview\n for index, row in data.iterrows():\n tree_data.insert(\"\", END, values=list(row))\n # delet id column\n tree_data.column(\"#0\", width=0, stretch=NO)\n\n\ndef show_data_info(data_info):\n # display data describtion\n text.delete('1.0',END)\n text.insert(END, data_info)\n text.config(state=DISABLED)\n\n\nbtn = Button(main_form, text='show data', command=lambda: show_data(data))\nbtn.pack()\ntree_data.pack()\n\n\nbtn2 = Button(main_form, text='show data info',\n command=lambda: show_data_info(data_info))\nbtn2.pack()\ntext.pack()\n\n\n# to go to second frame and display algorithm \n\n\nbtn3 = Button(main_form, text='show details', command=lambda:Second_Frame.second.display(data))\nbtn3.pack(anchor='sw')\n\n\nmain_form.mainloop()\n","repo_name":"Andrewnazeh/machine_learning_preprocessing_task","sub_path":"task_first.py","file_name":"task_first.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"7197091648","text":"from tkinter import *\r\nfrom tkinter import ttk\r\n\r\ntela = Tk()\r\ntela.title(\"Calculadora\")\r\ntela.geometry(\"235x310+550+200\")\r\ntela.resizable(0, 0)\r\ntela.config(background=\"white\")\r\n\r\ndisplay = Frame(tela, width=235, height=50, background=\"#1e1f1e\")\r\ndisplay.grid(row=0, column=0)\r\n\r\nbotoes = Frame(tela, width=235, height=268)\r\nbotoes.grid(row=1, column=0)\r\n\r\nTodosValores = ''\r\n\r\ndef expressao(event):\r\n global TodosValores\r\n TodosValores = TodosValores + str(event)\r\n texto.set(TodosValores)\r\n\r\ndef calcular():\r\n global TodosValores\r\n result = eval(TodosValores)\r\n texto.set(result)\r\n\r\ndef limpar():\r\n global TodosValores\r\n TodosValores = \"\"\r\n texto.set(\"\")\r\n\r\n\r\ntexto = StringVar()\r\n\r\nvisor = Label(display, textvariable=texto, width=16, height=2, padx=7, relief=FLAT, anchor=\"e\", justify=RIGHT, font=(\"Ivy 18\"), fg=\"white\", background=\"#1e1f1e\")\r\nvisor.place(x=0, y=0)\r\n\r\nb1 = Button(botoes,command=lambda: limpar(), text=\"C\", width=11, height=2, background=\"#808080\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb1.place(x=0, y=0)\r\nb2 = Button(botoes, command=lambda: expressao('%'), text=\"%\", width=5, height=2, background=\"#808080\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb2.place(x=120, y=0)\r\nb3 = Button(botoes, command=lambda: expressao('/'), text=\"/\", width=5, height=2, background=\"#FFA500\", fg=\"white\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb3.place(x=180, y=0)\r\n\r\n\r\nb4 = Button(botoes, command=lambda: expressao('7'), text=\"7\", width=5, height=2, background=\"#808080\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb4.place(x=0, y=52)\r\nb5 = Button(botoes, command=lambda: expressao('8'),text=\"8\", width=5, height=2, background=\"#808080\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb5.place(x=60, y=52)\r\nb6 = Button(botoes, command=lambda: expressao('9'),text=\"9\", width=5, height=2, background=\"#808080\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb6.place(x=120, y=52)\r\nb7 = Button(botoes, command=lambda: expressao('*'),text=\"*\", width=5, height=2, background=\"#FFA500\", fg=\"white\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb7.place(x=180, y=52)\r\n\r\n\r\nb8 = Button(botoes, command=lambda: expressao('4'),text=\"4\", width=5, height=2, background=\"#808080\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb8.place(x=0, y=104)\r\nb9 = Button(botoes, command=lambda: expressao('5'),text=\"5\", width=5, height=2, background=\"#808080\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb9.place(x=60, y=104)\r\nb10 = Button(botoes, command=lambda: expressao('6'),text=\"6\", width=5, height=2, background=\"#808080\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb10.place(x=120, y=104)\r\nb11 = Button(botoes, command=lambda: expressao('-'),text=\"-\", width=5, height=2, background=\"#FFA500\", fg=\"white\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb11.place(x=180, y=104)\r\n\r\n\r\nb8 = Button(botoes, command=lambda: expressao('1'),text=\"1\", width=5, height=2, background=\"#808080\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb8.place(x=0, y=156)\r\nb9 = Button(botoes, command=lambda: expressao('2'),text=\"2\", width=5, height=2, background=\"#808080\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb9.place(x=60, y=156)\r\nb10 = Button(botoes, command=lambda: expressao('3'),text=\"3\", width=5, height=2, background=\"#808080\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb10.place(x=120, y=156)\r\nb11 = Button(botoes, command=lambda: expressao('+'),text=\"+\", width=5, height=2, background=\"#FFA500\", fg=\"white\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb11.place(x=180, y=156)\r\n\r\n\r\nb12 = Button(botoes, command=lambda: expressao('0'),text=\"0\", width=11, height=2, background=\"#808080\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb12.place(x=0, y=208)\r\nb13 = Button(botoes, command=lambda: expressao('.'), text=\".\", width=5, height=2, background=\"#808080\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb13.place(x=120, y=208)\r\nb14 = Button(botoes, command=lambda: calcular(), text=\"=\", width=5, height=2, background=\"#FFA500\", fg=\"white\", font=(\"Ivy 13 bold\"), relief=RAISED, overrelief=RIDGE)\r\nb14.place(x=180, y=208)\r\n\r\n\r\ntela.mainloop()\r\n","repo_name":"Wellsrodrigues/Calculadora-Python-com-GUI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"10743227187","text":"from flask import Flask, render_template\nimport requests\nfrom flask_humanize import Humanize\n\n# I set it up this way (along with a plethora of different ways) when I was following some tutorials\n# on how to setup a Flask project so it will deploy to Heroku. I have a Flask project successfully deployed\n# to Heroku, but I couldn't find and difference between this project and the previous one. If you uncomment the code,\n# it would pull the correct info and run it through the string format in the commented out request below.\n# I have done similar things with Flask, Django, and React, but for some reason I'm hitting a wall. I figured this\n# API can't really do any damage, so the key and id are hardcoded so that Heroku will work. Make it work, make it right, make it fast.\n# try:\n# from juicykey import app_id, app_key\n# except:\n # app_id = process.env.key_one\n # app_key = process.env.key_two\n\napp = Flask(__name__)\nhumanize = Humanize(app)\n\n\n@app.route(\"/\")\ndef index():\n result_span = []\n unique_ingredient_list = []\n unique_ingredient_index = {}\n total_of_each_items_cal_per_oz = 0\n lower_search_result = 0\n higher_search_result = 50\n total_hits = 1\n while len(result_span) != total_hits:\n # I don't normally hardcode api keys. Read the above comment if you haven't already\n # This is how I had the call setup prior to Heroku deployment issues\n # nutrionix_data = requests.get(\"https://api.nutritionix.com/v1_1/search/?brand_id=51db37d0176fe9790a899db2&results={}:{}&fields=*&appId={}&appKey={}\".format(lower_search_result, higher_search_result, app_id, app_key))\n nutrionix_data = requests.get(\n \"https://api.nutritionix.com/v1_1/search/?brand_id=51db37d0176fe9790a899db2&results={}:{}&fields=*&appId=f611e1fd&appKey=c63a3b9c90c5586828562a2cb5e93211\".format(\n lower_search_result, higher_search_result))\n nutrionix_data_json = nutrionix_data.json()\n nutrionix_hits = nutrionix_data_json['hits']\n\n lower_search_result += 50\n higher_search_result += 50\n total_hits = nutrionix_data_json['total_hits']\n\n for item in nutrionix_hits:\n fields = item['fields']\n result_span.append(fields)\n\n for result in result_span:\n if result['nf_ingredient_statement'] is None:\n pass\n else:\n\n first_del_pos = result['nf_ingredient_statement'].find(\"(\") # get the position of [\n second_del_pos = result['nf_ingredient_statement'].find(\"),\") # get the position of ]\n string_after_replace = result['nf_ingredient_statement'].replace(result['nf_ingredient_statement']\n [first_del_pos - 1:second_del_pos + 1], \"\").replace(\".\", \"\").title().replace(\"And \", \"\")\n\n ingredient_list = string_after_replace.split(\", \")\n\n for ingredient in ingredient_list:\n if ingredient not in unique_ingredient_list:\n print(\"ingredient, dude\", ingredient)\n unique_ingredient_list.append(ingredient)\n unique_ingredient_index.setdefault(ingredient, []).append(result['item_name'])\n else:\n unique_ingredient_index.setdefault(ingredient, []).append(result['item_name'])\n\n if result['nf_serving_size_unit'] == \"fl oz\":\n divided_up = result['nf_calories'] / 8\n total_of_each_items_cal_per_oz += divided_up\n elif result['nf_serving_size_unit'] == 'box':\n # assuming that all pouches are the 4.23 oz packages due to them being in packages of 8\n divided_up = result['nf_calories'] / 4.23\n total_of_each_items_cal_per_oz += divided_up\n elif result['nf_serving_size_unit'] == 'bottle':\n # Assuming that the bottle is for the 10 oz packages only because on the juicy juice nutrition facts,\n #it starts going by fl oz on the 48 oz bottles. https://juicyjuice.com/products/juicy-juice-fruit-juice/apple\n divided_up = result['nf_calories'] / 10\n total_of_each_items_cal_per_oz += divided_up\n else:\n # assuming that all pouches are 6 oz\n divided_up = result['nf_calories'] / 6\n total_of_each_items_cal_per_oz += divided_up\n\n ctx = {\n 'total': nutrionix_data_json['total_hits'],\n 'fields': result_span,\n 'ingredients': unique_ingredient_list,\n 'calories_per_oz': round(total_of_each_items_cal_per_oz / len(result_span)),\n 'index': unique_ingredient_index\n }\n print(\"index\", unique_ingredient_index)\n return render_template('index.html', **ctx)\n\n # ctx = {\n # 'index': {1: [1, 2, 3], 2: [1, 2, 3], 3: [2, 4, 5], 4: ['skdje', 'dfnkw'], 5: [5, 3, 2533], 6: [1, 2, 3], 7: [1, 2, 3], 8: [1, 2, 3], 9: [11, 22, 33, 44], 10: ['afs', 'fjknel', 'fasd;'], 11: ['fad', 'sad', 'dad']}\n # }\n # return render_template('index.html', **ctx)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"SethCWilliams/juicy-juice-analytics","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"7723430435","text":"from src.pipeline.modes.OCRBase import OCRBase\nfrom src.pipeline.dl_homograhpy.homographyDL import HomographyDL\nimport numpy as np\nimport cv2\nimport os\nfrom src.pipeline.dl_homograhpy.dl_homography_utils import *\n\nclass OCRMode1(OCRBase):\n \"\"\" OCR Mode 1: DL Homography + tesseract\"\"\"\n\n def __init__(self, ocr, intput, smartDoc, homography_model, grayscale):\n OCRBase.__init__(self, ocr, intput, smartDoc)\n self.homography_model = homography_model\n self.grayscale = grayscale\n\n # network spatial input shape\n input_shape = (384, 256)\n\n # create empty instance\n homography_dl = HomographyDL(input=None, output=None, architecture=None, model_fn=None, grayscale=None)\n\n def run(self, imgs):\n for img_nm in sorted(imgs):\n print('Processing image {} ...'.format(img_nm))\n\n # load image\n input_img = self.input + img_nm\n if self.grayscale:\n img = cv2.imread(input_img, 0)\n else:\n img = cv2.imread(input_img)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n # manually rotate (should be automated)\n if self.smartDoc:\n img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)\n\n # save original size to compute scaling factor\n if self.grayscale:\n org_y, org_x = img.shape\n else:\n org_y, org_x, _ = img.shape\n\n fac_y, fac_x = org_y/self.input_shape[0], org_x/self.input_shape[1]\n\n # resize (just for recovering homography)\n img_homography = cv2.resize(img, (self.input_shape[1], self.input_shape[0]))\n\n # adjust dimension for network\n if self.grayscale:\n img_homography_net = np.reshape(img_homography, (1, self.input_shape[0], self.input_shape[1], 1))\n else:\n img_homography_net = np.reshape(img_homography, (1, self.input_shape[0], self.input_shape[1], 3))\n\n # normalize\n img_homography_norm = img_homography_net/255.0\n\n # estimate corner positions\n corners = self.homography_dl.predict_corners(self.homography_model, img_homography_norm)\n\n # unwarp imgage (original size)\n pts_src = np.reshape(corners, (4, 2))\n pts_src = self.scale_estim_corners(pts_src, fac_x, fac_y)\n pts_dst = np.array([[0, 0], [org_x, 0], [org_x, org_y], [0, org_y]], dtype = 'float32')\n\n dewarped_image = warp_image(img, pts_src, pts_dst, self.grayscale)\n\n # tesseract\n self.ocr.run_image_to_text_save(dewarped_image, os.path.splitext(img_nm)[0])\n\n def scale_estim_corners(self, corners, scale_x, scale_y):\n \"\"\"\n scale estimated corners to original image size\n\n :param corners:\n :param scale_x:\n :param scale_y:\n :return:\n \"\"\"\n erg = np.zeros((4,2))\n\n for idx, corner_tuple in enumerate(corners):\n erg[idx] = corner_tuple[0]*scale_x,corner_tuple[1]*scale_y\n\n return erg","repo_name":"Nikolai10/mobile-ocr","sub_path":"src/pipeline/modes/OCRMode1.py","file_name":"OCRMode1.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"76"}
+{"seq_id":"6242539770","text":"from datetime import datetime\nimport traceback\n\nfrom django.db.models.query import QuerySet\nfrom django.shortcuts import render, redirect\nfrom django_currentuser.middleware import get_current_authenticated_user\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\nfrom petApp.models import PetModel\nfrom petApp.forms import PetForm\nfrom petCommentApp.forms import PetCommentForm\n\n\n@login_required\ndef index(request):\n list = PetModel.objects.all()\n list = list.order_by(\"created_at\").reverse()\n return render(request, \"pet/index.html\", {\"list\": list})\n\n\n@login_required\ndef new(request):\n if request.method == \"POST\":\n form = PetForm(request.POST, request.FILES)\n if not form.is_valid():\n # clean_post_cord()でバリデーションにかかった場合\n if \"post_cord\" in form.errors:\n messages.error(request, form.errors[\"post_cord\"][0])\n else:\n messages.error(request, \"ご入力の際にエラーが発生しました。管理者にご確認ください\")\n return redirect(\"/pet/new/\")\n\n pet = form.save(commit=False)\n\n now = datetime.now()\n today = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n pet.created_at = today\n pet.updated_at = today\n pet.owner = get_current_authenticated_user()\n\n pet.save()\n messages.success(request, \"ペットの登録が完了しました\")\n return redirect(\"/pet/index\")\n\n form = PetForm()\n return render(request, \"pet/new.html\", context={\"form\": form})\n\n\n@login_required\ndef show(request, id):\n pet = get_one_pet(id)\n if pet is None:\n return render(\n request,\n \"pet/index.html\",\n {\"error_message\": \"予期せぬエラーが発生しました\\n管理者にご確認ください\"},\n )\n\n pet_comment_list = pet.petcommentmodel_set.all() # type: ignore\n pet_comment_list = pet_comment_list.order_by(\"created_at\").reverse()\n form = PetCommentForm()\n return render(\n request,\n \"pet/show.html\",\n context={\"pet\": pet, \"pet_comment_list\": pet_comment_list, \"form\": form},\n )\n\n\n@login_required\ndef edit(request, id):\n pet = get_one_pet(id)\n if pet is None:\n return render(\n request,\n \"pet/index.html\",\n {\"error_message\": \"予期せぬエラーが発生しました\\n管理者にご確認ください\"},\n )\n\n if request.method == \"POST\":\n form = PetForm(request.POST, request.FILES)\n if not form.is_valid():\n # clean_post_cord()でバリデーションにかかった場合\n if \"post_cord\" in form.errors:\n messages.error(request, form.errors[\"post_cord\"][0])\n else:\n messages.error(request, \"ご入力の際にエラーが発生しました。管理者にご確認ください\")\n return redirect(\"edit\", id=id)\n\n edit_pet = form.save(commit=False)\n\n # 写真が未選択の場合、更新前の写真を設定\n if len(request.FILES) == 0:\n edit_pet.image = pet.image\n\n now = datetime.now()\n today = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n edit_pet.created_at = pet.created_at\n edit_pet.updated_at = today\n edit_pet.owner = get_current_authenticated_user()\n\n edit_pet.save()\n pet.delete() # save()完了後、更新前のデータは削除\n messages.success(request, \"ペットの更新が完了しました\")\n return redirect(\"/pet/index\")\n\n form = PetForm(instance=pet)\n pet_id = pet.id # type: ignore\n # nameから画像添付の有無を確認\n if pet.image.name:\n pet_image_url = pet.image.url\n else:\n pet_image_url = \"/static/image/no-image.png\"\n return render(\n request,\n \"pet/edit.html\",\n context={\"form\": form, \"pet_id\": pet_id, \"pet_image_url\": pet_image_url},\n )\n\n\n@login_required\ndef delete(request, id):\n pet = get_one_pet(id)\n if pet is None:\n return render(\n request,\n \"pet/index.html\",\n {\"error_message\": \"予期せぬエラーが発生しました\\n管理者にご確認ください\"},\n )\n\n try:\n pet.delete()\n return redirect(\"/pet/index\")\n except Exception as e:\n print(traceback.format_exc())\n return render(\n request,\n \"pet/index.html\",\n {\"error_message\": \"予期せぬエラーが発生しました\\n管理者にご確認ください\"},\n )\n\n\n@login_required\ndef search(request):\n if request.method == \"POST\":\n name = request.POST.get(\"name\")\n age = request.POST.get(\"age\")\n sex = request.POST.get(\"sex\")\n charm_point = request.POST.get(\"charm_point\")\n post_cord = request.POST.get(\"post_cord\")\n address = request.POST.get(\"address\")\n owner = request.POST.get(\"owner\")\n\n # 検索実行\n list = PetModel.objects\n if name:\n list = list.filter(name__icontains=name)\n if age:\n list = list.filter(age=age)\n if sex:\n if sex == \"true\":\n list = list.filter(sex=True)\n elif sex == \"false\":\n list = list.filter(sex=False)\n if charm_point:\n list = list.filter(charm_point__icontains=charm_point)\n if post_cord:\n list = list.filter(post_cord__icontains=post_cord)\n if address:\n list = list.filter(address__icontains=address)\n if owner:\n list = list.filter(owner__username__icontains=owner)\n\n # 未入力で検索ボタン押下\n if hasattr(list, \"name\") and list.name == \"objects\":\n return render(request, \"pet/index.html\", {\"search_message\": \"検索結果は0件でした\"})\n\n if list.exists():\n return render(request, \"pet/index.html\", {\"list\": list})\n else:\n return render(request, \"pet/index.html\", {\"search_message\": \"検索結果は0件でした\"})\n\n return render(\n request, \"pet/index.html\", {\"search_message\": \"予期せぬエラーが発生しました\\n管理者にご確認ください\"}\n )\n\n\ndef get_one_pet(id):\n try:\n return PetModel.objects.get(pk=id)\n except PetModel.DoesNotExist:\n print(traceback.format_exc())\n return None\n\n\ndef get_media_or_empty(request, name):\n if (name, \"\") in request.POST.items():\n return \"\"\n else:\n return request.FILES[name]\n","repo_name":"otinu/Pertch-Django","sub_path":"petApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"27567893921","text":"## 字母异位词分组\n## 利用dict,依次将含有相同字母的str放入sorted之后的key中\n\n\nclass Solution(object):\n def groupAnagrams(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n dict = {}\n for i in strs:\n if str(sorted(i)) not in dict.keys():\n dict[str(sorted(i))] = []\n dict.get(str(sorted(i))).append(i)\n else:\n dict.get(str(sorted(i))).append(i)\n # pdb.set_trace()\n return [i for i in dict.values()]\n","repo_name":"zhouliuling/Leetcode_Task","sub_path":"49.py","file_name":"49.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"72056488886","text":"n = input()\ndata = list(n)\none, zero = data.count('1'), data.count('0')\none = one//2\nzero = zero//2\nfor _ in range(one):\n data.pop(data.index('1'))\nfor _ in range(zero):\n temp = data[::-1].index('0')\n data.pop(len(data)- temp - 1)\nprint(''.join(data))\n# 재구성이 아니라 원래 순서에서 제거했을 때 사전 순으로 빠른 것을 구해야 함","repo_name":"Algorithm-Study/Algorithm","sub_path":"greedy/B20310_윤상준.py","file_name":"B20310_윤상준.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"}
+{"seq_id":"6857185944","text":"'''Finds out the number is a perfect number or not'''\n\ndef perfect_num(number):\n\n ''' The abouve function will return True if the number is Perfect number'''\n\n if not isinstance(number, int) or isinstance(number, bool):\n raise TypeError(\"unsupported format\")\n\n sum_divisors = 0\n\n for i in range(1, number):\n\n if number%i == 0:\n sum_divisors += i\n\n return bool((sum_divisors == number) and (sum_divisors != 1))\n","repo_name":"lnarasim/250_problems","sub_path":"pyproblems/perfect_number.py","file_name":"perfect_number.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"32324709766","text":"import random\nfrom re import A\ndef sacar_carta():\n carta = random.randint(1,10)\n return carta\n\ncantidad_cartas_crupier = []\ncantidad_cartas_usuario = []\ndinero = 500\n\nprint (\"Bienvenid@ a la mesa de Easy 21\")\nprint(\"\")\nprint (\"Empieza la partida\")\n\ncarta1 = sacar_carta()\ncantidad_cartas_crupier = carta1\nprint (f\"El crupier saca un {carta1} su total es [{cantidad_cartas_crupier}]\")\n\nquiere_apostar = input (\"¿Quiere apostar? (s/n) \")\n\nwhile quiere_apostar == \"s\":\n cantidad_dinero = int(input (\"¿Cuanto quiere apostar? \"))\n if cantidad_dinero <= dinero:\n print (\"---\")\n elif cantidad_dinero > dinero:\n print (\"No tienes esa cantidad de dinero\")\n print (\"----\")\n quiere_apostar == \"s\"\n break\n\nif quiere_apostar == \"n\":\n print (\"----\")\n\ncarta_usuario1 = sacar_carta()\nprint (f\"Usted saca un {carta_usuario1}, su total es {carta_usuario1}\")\nprint (f\"Por el momento sacó las cartas [{carta_usuario1}]\")\n\nseguir = input(\"¿Quiere otra carta? (s/n) \")\ncarta_usuario2 = sacar_carta()\ncantidad_cartas_usuario = carta_usuario1 + carta_usuario2\nwhile seguir == \"s\":\n print (f\"Usted saca un {carta_usuario2}, su total es {cantidad_cartas_usuario}\")\n print (f\"Por el momento sacó las cartas [{carta_usuario1},{carta_usuario2}]\")\n if seguir == \"s\":\n break\n if (cantidad_cartas_usuario)>= 21:\n print (\"La suma de sus cartas supero 21. Usted pierde\")\nif seguir == \"n\":\n print (\"----\")\n \ncarta_usuario3 = sacar_carta()\nsuma_de_usuario2 = cantidad_cartas_usuario + carta_usuario3\nseguir2 = input(\"¿Quiere otra carta? (s/n) \")\n \nif (cantidad_cartas_usuario) >= 21:\n print (\"La suma de sus cartas supero 21. Usted pierde\")\nelif (cantidad_cartas_usuario) <= 21:\n while seguir2 == \"s\":\n print (f\"Usted saca un {carta_usuario3}, su total es {suma_de_usuario2}\")\n print (f\"Por el momento sacó las cartas [{carta_usuario1},{carta_usuario2},{carta_usuario3}]\")\n if seguir == \"s\":\n break\nif seguir2 == \"n\":\n print (\"----\")\n\nif (suma_de_usuario2) >= 21:\n print (\"La suma de sus cartas supero 21. Usted pierde\")","repo_name":"sguntin/trabajo1ipc","sub_path":"tp1ipc.py","file_name":"tp1ipc.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"5462862034","text":"def main(): \r\n #Data Loading \r\n routes = {}\r\n transistions = 0\r\n with open(\"input.txt\",\"r\") as file:\r\n \r\n file.readline() #skips the comment\r\n startGoal = file.readline().strip()\r\n\r\n endGoal = file.readline().split(\" \")\r\n endGoal[len(endGoal)-1] = endGoal[len(endGoal)-1].strip()\r\n \r\n lines = file.readlines() #lines contains all remaining lines\r\n for a in lines:\r\n l = a.split(\" \")\r\n start = l[0].split(\":\")[0] \r\n for i in range(1,len(l)):\r\n temp = l[i].split(\",\")\r\n place = temp[0]\r\n cost = float(temp[1])\r\n #appends all connections with place and cost\r\n #increases each transistion\r\n if start in routes:\r\n routes[start].append((place,cost))\r\n transistions += 1\r\n else:\r\n routes[start] = [(place,cost)]\r\n transistions += 1\r\n #printing the result of data loading\r\n print(\"Data Loading\")\r\n print(\"Start State:\" + startGoal)\r\n print(\"End State(s):\" , end = \"\")\r\n print(endGoal)\r\n print(\"State Space:\", len(routes))\r\n print(\"Total transitions:\", transistions)\r\n print()\r\n\r\n #breadth first search\r\n def bfs(start,end, path=[]):\r\n path = path + [start]\r\n if start == end:\r\n return path\r\n if start not in routes:\r\n return None\r\n shortest_path = None\r\n for node in routes[start]:\r\n if node[0] not in path: #node[0] = place\r\n sp = bfs(node[0], end, path)\r\n if sp:\r\n if shortest_path is None or len(sp) < len(shortest_path):\r\n shortest_path = sp\r\n return shortest_path\r\n \r\n #printing the results of breadth first search\r\n print(\"Breadth First Search\")\r\n for end in endGoal:\r\n solution = bfs(startGoal,end)\r\n print(\"States visited:\", end=\" \")\r\n for i in range(1, len(solution) - 1):\r\n print(solution[i])\r\n print(\"Found Path Length:\", len(solution))\r\n for i in range(len(solution)):\r\n if i == len(solution) - 1:\r\n print(solution[i])\r\n else:\r\n print(solution[i] + \" ==>\", end=\" \")\r\n print()\r\n\r\n #uniform Search Cost:\r\n def uniformCostSearch(start, end, path = [],cost = 0):\r\n path = path + [start]\r\n if start == end:\r\n return (path,cost)\r\n if start not in routes:\r\n return None\r\n cheapPath = None\r\n for node in routes[start]:\r\n if node[0] not in path:\r\n cp = uniformCostSearch(node[0],end,path,cost + node[1])\r\n if cp:\r\n if cheapPath is None or cheapPath[1] > cp[1]:\r\n cheapPath = cp\r\n return cheapPath\r\n\r\n\r\n #Printing the result of uniform cost search\r\n print(\"Uniform Cost Search\")\r\n for end in endGoal:\r\n solution = uniformCostSearch(startGoal,end)\r\n print(\"States visited:\", end=\" \")\r\n for i in range(1, len(solution[0]) - 1):\r\n print(solution[0][i])\r\n print(\"Found Path Length:\", len(solution[0]), \"with total cost of\",solution[1])\r\n for i in range(len(solution[0])):\r\n if i == len(solution[0]) - 1:\r\n print(solution[0][i])\r\n else:\r\n print(solution[0][i] + \" ==>\", end=\" \")\r\n print()\r\n \r\nif __name__ == '__main__':\r\n main()\r\n ","repo_name":"joel662/TravelingProblem","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"2614426484","text":"import datetime\nstart_time = datetime.datetime.now()\n\nimport sys\nimport hashlib\n\n\ndef cycle_through_numbers(puzzle_input, starting_i, num_zeroes):\n i =starting_i\n while True:\n new_input = puzzle_input+str(i)\n md5 = hashlib.md5(new_input).hexdigest()\n zeroes = len(md5) - len(md5.lstrip('0'))\n if zeroes == num_zeroes:\n break\n else:\n i +=1\n return i\n\n\ndef main(puzzle_input): \n part_a = cycle_through_numbers(puzzle_input,0, 5)\n print('Answer to part a is {}'.format(part_a))\n processing_time = (datetime.datetime.now() - start_time).total_seconds() * 1000\n print(\"Time taken to get answer: {:.3f} ms\".format(processing_time))\n \n \n part_b = cycle_through_numbers(puzzle_input,part_a, 6)\n print('\\nAnswer to part b is {}'.format(part_b))\n processing_time = (datetime.datetime.now() - start_time).total_seconds() * 1000\n print(\"Time taken to get answer: {:.3f} ms\".format(processing_time))\n\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"USAGE: python [script.py] [input]\")\n else:\n main(sys.argv[1])\n","repo_name":"lwalsh8/Advent_of_Code","sub_path":"2015/day_04.py","file_name":"day_04.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"38980555171","text":"import matplotlib.pylab as plt\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\nMODEL_PATH =\"/notebooks/repo/part2/output/new_model\"\nLABELS_PATH =\"/notebooks/repo/part2/class_labels.txt\"\nIMAGE_SHAPE = (224, 224)\n\nclassifier = tf.keras.Sequential([\n hub.KerasLayer(MODEL_PATH, input_shape=IMAGE_SHAPE+(3,))\n])\n\nimagenet_labels = np.array(open(LABELS_PATH).read().splitlines())\n\ndata_root = tf.keras.utils.get_file(\n 'flower_photos','https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',\n untar=True)\n\nimage_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255)\nimage_data = image_generator.flow_from_directory(str(data_root), target_size=IMAGE_SHAPE)\n\nimage_batch, _ = next(image_data)\npredicted = classifier.predict(image_batch)\n\ntop3_pred = np.argsort(predicted[0])[:-4:-1]\n\nprint(\n \"First - class: \",\n imagenet_labels[top3_pred[0]],\n \", probability: \",\n np.round(100*predicted[0][top3_pred[0]],2),\n \"%\",\n sep='')\n\nprint(\n \"Second - class: \",\n imagenet_labels[top3_pred[1]],\n \", probability: \",\n np.round(100*predicted[0][top3_pred[1]],2),\n \"%\",\n sep='')\n\nprint(\n \"Third - class: \",\n imagenet_labels[top3_pred[2]],\n \", probability: \",\n np.round(100*predicted[0][top3_pred[2]],2),\n \"%\",\n sep='')\n\n\n","repo_name":"csancini/w251","sub_path":"hw05/homework5.py","file_name":"homework5.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"39817169516","text":"R, C, N = map(int, input().split())\r\ncmd = input()\r\nMap = [[*input()] for _ in range(R)]\r\n\r\nx, y = None, None\r\nfor i in range(R):\r\n for j in range(C):\r\n if Map[i][j] == 'O':\r\n x, y = i, j\r\n break\r\n\r\ndx = {'>': 0, '<': 0, 'v': 1, '^': -1}\r\ndy = {'>': 1, '<': -1, 'v': 0, '^': 0}\r\n\r\nans = 1\r\nfor c in cmd:\r\n while True:\r\n p, q = x + dx[c], y + dy[c]\r\n if Map[p][q] == '#':\r\n break\r\n if Map[p][q] == '.':\r\n ans += 1\r\n Map[p][q] = '!'\r\n x, y = p, q\r\nprint(ans)","repo_name":"juwkim/boj","sub_path":"백준/Silver/21361. Robotdammsugaren/Robotdammsugaren.py","file_name":"Robotdammsugaren.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"70849105205","text":"\"\"\"Fridge objects AML components based training pipeline.\"\"\"\nfrom typing import Optional\nimport os\nimport time\nimport logging\nimport argparse\nfrom azure.ai.ml.dsl import pipeline\nfrom azure.ai.ml import load_component\nfrom common.mlops.get_compute import get_compute\nfrom common.mlops.get_environment import get_environment\nfrom common.mlops.get_aml_client import get_aml_client\n\n\ngl_pipeline_components = []\n\n\n@pipeline()\ndef fridge_objects_automl_train(\n subscription_id: str,\n resource_group_name: str,\n workspace_name: str,\n automl_model_name: str,\n automl_experiment_name: str,\n automl_compute_cluster_name: str,\n build_reference_id: str,\n model_name: str,\n model_description: str,\n deploy_environment: str\n) -> None:\n \"\"\"Compose the fridge objects AutoML training pipeline.\n\n Adds steps for data preparation (creating train, val, test MLTables) and then launches\n an AutoML object detection training job.\n\n Args:\n subscription_id (str): AML subscription ID.\n resource_group_name (str): AML resource group name.\n workspace_name (str): AML workspace name.\n automl_model_name (str): the AutoML object detection model variant.\n automl_experiment_name (str): the AutoML experiment name.\n automl_compute_cluster_name (str): the compute cluster name to use\n for the AutoML job.\n build_reference_id (str): the DevOps build reference ID executing the pipeline.\n model_name (str): name of model shown at registration.\n model_description (str): description of model shown at registration.\n deploy_environment (str): the environment to use for the AutoML job.\n\n Returns:\n None\n \"\"\"\n tenant_id = os.getenv(\"AZURE_TENANT_ID\")\n client_id = os.getenv(\"AZURE_CLIENT_ID\")\n client_secret = os.getenv(\"AZURE_CLIENT_SECRET\")\n\n if tenant_id is None or client_id is None or client_secret is None:\n raise ValueError(\"Env variables not set, unable to create client\")\n\n train_mltable_name = \"fride_obj_det_mltable_train_\" + deploy_environment\n val_mltable_name = \"fride_obj_det_mltable_val_\" + deploy_environment\n test_mltable_name = \"fride_obj_det_mltable_test_\" + deploy_environment\n\n prepare_fridge_obj_data = gl_pipeline_components[0](\n client_id=client_id,\n client_secret=client_secret,\n tenant_id=tenant_id,\n subscription_id=subscription_id,\n resource_group_name=resource_group_name,\n workspace_name=workspace_name,\n train_mltable_name=train_mltable_name,\n val_mltable_name=val_mltable_name,\n test_mltable_name=test_mltable_name,\n )\n\n train_automl_model = gl_pipeline_components[1](\n client_id=client_id,\n client_secret=client_secret,\n tenant_id=tenant_id,\n subscription_id=subscription_id,\n resource_group_name=resource_group_name,\n workspace_name=workspace_name,\n training_mltable_path=prepare_fridge_obj_data.outputs.train_mltable,\n validation_mltable_path=prepare_fridge_obj_data.outputs.val_mltable,\n automl_obj_det_model_name=automl_model_name,\n automl_experiment_name=automl_experiment_name,\n automl_compute_cluster_name=automl_compute_cluster_name,\n )\n\n gl_pipeline_components[2](\n fp32_input_dir=train_automl_model.outputs.model_artifacts_dir\n )\n\n score_fp32_model = gl_pipeline_components[3](\n model_folder_path=train_automl_model.outputs.model_artifacts_dir,\n mltable_folder=prepare_fridge_obj_data.outputs.test_mltable\n )\n\n score_fp16_model = gl_pipeline_components[4](\n model_folder_path=train_automl_model.outputs.model_artifacts_dir,\n mltable_folder=prepare_fridge_obj_data.outputs.test_mltable\n )\n # TODO: change model input to convert_onnx_model.outputs.fp16_output_dir\n\n compare_map_scores = gl_pipeline_components[5](\n map_before=score_fp32_model.outputs.results_file,\n map_after=score_fp16_model.outputs.results_file\n )\n\n gl_pipeline_components[6](\n client_id=client_id,\n client_secret=client_secret,\n tenant_id=tenant_id,\n subscription_id=subscription_id,\n resource_group_name=resource_group_name,\n workspace_name=workspace_name,\n onnx_model_artifacts_folder=train_automl_model.outputs.model_artifacts_dir,\n registered_model_name=model_name,\n registered_model_description=model_description,\n build_reference_id=build_reference_id,\n metrics_json_file=compare_map_scores.outputs.metrics_json_file\n )\n\n\ndef construct_pipeline(\n subscription_id: str,\n resource_group_name: str,\n workspace_name: str,\n cluster_name: str,\n environment_name: str,\n model_name: str,\n model_description: str,\n display_name: str,\n deploy_environment: str,\n build_reference: str,\n automl_model_name: str,\n automl_experiment_name: str,\n automl_compute_cluster_name: str\n) -> None:\n \"\"\"Construct the AML components based pipeline.\n\n Args:\n subscription_id (str): AML subscription ID.\n resource_group_name (str): AML resource group name.\n workspace_name (str): AML workspace name.\n cluster_name (str): the AML cluster name used to run the pipeline steps.\n environment_name (str): the AML environment name used to run the pipeline steps.\n model_name (str): name of model shown at registration.\n model_description (str): description of model shown at registration.\n display_name (str): the display name of the pipeline run.\n deploy_environment (str): the stage of deployment (eg. dev, prod).\n build_reference (str): the DevOps build reference ID executing the pipeline.\n automl_model_name (str): the AutoML object detection model variant.\n automl_experiment_name (str): the AutoML experiment name.\n automl_compute_cluster_name (str): the AML compute cluster name to use to run the\n AutoML job.\n\n Returns:\n None\n \"\"\"\n parent_dir = os.path.join(\n os.getcwd(), \"fridge_obj_det/mlops/components\"\n )\n\n prepare_data = load_component(source=parent_dir + \"/prep.yml\")\n train_model = load_component(source=parent_dir + \"/train.yml\")\n convert_model = load_component(source=parent_dir + \"/convert.yml\")\n score_fp32 = load_component(source=parent_dir + \"/score.yml\")\n score_fp16 = load_component(source=parent_dir + \"/score.yml\")\n compare_map = load_component(source=parent_dir + \"/compare_map.yml\")\n register_model = load_component(source=parent_dir + \"/register.yml\")\n\n # Set the environment name to custom environment using name and version number\n prepare_data.environment = environment_name\n train_model.environment = environment_name\n convert_model.environment = environment_name\n score_fp32.environment = environment_name\n score_fp16.environment = environment_name\n compare_map.environment = environment_name\n register_model.environment = environment_name\n\n gl_pipeline_components.append(prepare_data)\n gl_pipeline_components.append(train_model)\n gl_pipeline_components.append(convert_model)\n gl_pipeline_components.append(score_fp32)\n gl_pipeline_components.append(score_fp16)\n gl_pipeline_components.append(compare_map)\n gl_pipeline_components.append(register_model)\n\n pipeline_job = fridge_objects_automl_train(\n subscription_id,\n resource_group_name,\n workspace_name,\n automl_model_name,\n automl_experiment_name,\n automl_compute_cluster_name,\n build_reference,\n model_name,\n model_description,\n deploy_environment\n )\n pipeline_job.display_name = display_name\n pipeline_job.tags = {\n \"environment\": deploy_environment,\n \"build_reference\": build_reference,\n }\n\n # set pipeline level compute\n pipeline_job.settings.default_compute = cluster_name\n pipeline_job.settings.force_rerun = False\n # set pipeline level datastore\n pipeline_job.settings.default_datastore = \"workspaceblobstore\"\n\n return pipeline_job\n\n\ndef execute_pipeline(\n subscription_id: str,\n resource_group_name: str,\n workspace_name: str,\n experiment_name: str,\n pipeline_job: pipeline,\n wait_for_completion: bool,\n output_file: Optional[str],\n):\n \"\"\"Execute the AML components based pipeline.\n\n Args:\n subscription_id (str): AML subscription ID.\n resource_group_name (str): AML resource group name.\n workspace_name (str): AML workspace name.\n experiment_name (str): AML pipeline experiment name.\n pipeline_job (pipeline): the AML pipeline to execute.\n wait_for_completion (bool): True if the function should wait for the\n pipeline to complete.\n output_file (Optional[str]): _description_\n\n Raises:\n Exception: _description_\n \"\"\"\n try:\n tenant_id = os.getenv(\"AZURE_TENANT_ID\")\n client_id = os.getenv(\"AZURE_CLIENT_ID\")\n client_secret = os.getenv(\"AZURE_CLIENT_SECRET\")\n\n if tenant_id is None or client_id is None or client_secret is None:\n raise ValueError(\"Env variables not set, unable to create client\")\n\n ml_client = get_aml_client(\n client_id=client_id,\n client_secret=client_secret,\n tenant_id=tenant_id,\n subscription_id=subscription_id,\n resource_group_name=resource_group_name,\n workspace_name=workspace_name,\n )\n\n pipeline_job = ml_client.jobs.create_or_update(\n pipeline_job, experiment_name=experiment_name\n )\n\n logging.info(f\"The job {pipeline_job.name} has been submitted!\")\n if output_file is not None:\n with open(output_file, \"w\") as out_file:\n out_file.write(pipeline_job.name)\n\n if wait_for_completion is True:\n total_wait_time = 3600\n current_wait_time = 0\n job_status = [\n \"NotStarted\",\n \"Queued\",\n \"Starting\",\n \"Preparing\",\n \"Running\",\n \"Finalizing\",\n \"Provisioning\",\n \"CancelRequested\",\n \"Failed\",\n \"Canceled\",\n \"NotResponding\",\n ]\n\n while pipeline_job.status in job_status:\n if current_wait_time <= total_wait_time:\n time.sleep(20)\n pipeline_job = ml_client.jobs.get(pipeline_job.name)\n\n current_wait_time = current_wait_time + 15\n\n if (\n pipeline_job.status == \"Failed\"\n or pipeline_job.status == \"NotResponding\"\n or pipeline_job.status == \"CancelRequested\"\n or pipeline_job.status == \"Canceled\"\n ):\n break\n else:\n break\n\n if pipeline_job.status == \"Completed\" or pipeline_job.status == \"Finished\":\n logging.info(\"job completed\")\n else:\n raise Exception(\"Sorry, exiting job with failure..\")\n except Exception as ex:\n print(f\"Exception raised in execute_pipeline {ex}\")\n raise\n\n\ndef prepare_and_execute(\n subscription_id: str,\n resource_group_name: str,\n workspace_name: str,\n cluster_name: str,\n cluster_size: str,\n cluster_region: str,\n min_instances: int,\n max_instances: int,\n idle_time_before_scale_down: int,\n env_base_image_name: str,\n conda_path: str,\n environment_name: str,\n env_description: str,\n wait_for_completion: bool,\n model_name: str,\n model_description: str,\n display_name: str,\n experiment_name: str,\n deploy_environment: str,\n build_reference: str,\n automl_model_name: str,\n automl_experiment_name: str,\n automl_compute_cluster_name: str,\n automl_cluster_size: str,\n automl_cluster_region: str,\n automl_min_instances: int,\n automl_max_instances: int,\n automl_idle_time_before_scale_down: int,\n output_file: Optional[str],\n):\n \"\"\"Prepare the pipeline and execute it.\n\n Checks all resource requirements for the pipleine and creates them if they do not exist. Then\n creates the pipeline and executes it.\n \"\"\"\n compute = get_compute(\n subscription_id,\n resource_group_name,\n workspace_name,\n cluster_name,\n cluster_size,\n cluster_region,\n min_instances,\n max_instances,\n idle_time_before_scale_down,\n )\n\n automl_compute = get_compute(\n subscription_id,\n resource_group_name,\n workspace_name,\n automl_compute_cluster_name,\n automl_cluster_size,\n automl_cluster_region,\n automl_min_instances,\n automl_max_instances,\n automl_idle_time_before_scale_down,\n )\n\n environment = get_environment(\n subscription_id,\n resource_group_name,\n workspace_name,\n env_base_image_name,\n conda_path,\n environment_name,\n env_description,\n )\n print(f\"Environment: {environment.name}, version: {environment.version}\")\n\n pipeline_job = construct_pipeline(\n subscription_id,\n resource_group_name,\n workspace_name,\n compute.name,\n f\"azureml:{environment.name}:{environment.version}\",\n model_name,\n model_description,\n display_name,\n deploy_environment,\n build_reference,\n automl_model_name,\n automl_experiment_name,\n automl_compute.name,\n )\n\n execute_pipeline(\n subscription_id,\n resource_group_name,\n workspace_name,\n experiment_name,\n pipeline_job,\n wait_for_completion,\n output_file,\n )\n\n\ndef main():\n \"\"\"Parse all args and execute the pipeline.\"\"\"\n parser = argparse.ArgumentParser(\"build_environment\")\n parser.add_argument(\"--subscription_id\", type=str, help=\"Azure subscription id\")\n parser.add_argument(\n \"--resource_group_name\", type=str, help=\"Azure Machine learning resource group\"\n )\n parser.add_argument(\n \"--workspace_name\", type=str, help=\"Azure Machine learning Workspace name\"\n )\n parser.add_argument(\n \"--cluster_name\", type=str, help=\"Azure Machine learning cluster name\"\n )\n parser.add_argument(\n \"--cluster_size\", type=str, help=\"Azure Machine learning cluster size\"\n )\n parser.add_argument(\n \"--cluster_region\",\n type=str,\n help=\"Azure Machine learning cluster region\",\n default=\"eastus2\",\n )\n parser.add_argument(\"--min_instances\", type=int, default=0)\n parser.add_argument(\"--max_instances\", type=int, default=4)\n parser.add_argument(\"--idle_time_before_scale_down\", type=int, default=120)\n parser.add_argument(\n \"--build_reference\",\n type=str,\n help=\"Unique identifier for Azure DevOps pipeline run\",\n )\n parser.add_argument(\n \"--deploy_environment\",\n type=str,\n help=\"execution and deployment environment. e.g. dev, prod, test\",\n )\n parser.add_argument(\n \"--experiment_name\", type=str, help=\"Job execution experiment name\"\n )\n parser.add_argument(\"--display_name\", type=str, help=\"Job execution run name\")\n parser.add_argument(\n \"--wait_for_completion\",\n type=bool,\n help=\"Set to True to wait for pipeline job completion\",\n )\n parser.add_argument(\n \"--environment_name\",\n type=str,\n help=\"Azure Machine Learning Environment name for job execution\",\n default=\"conda-based-devenv-py38-cpu\",\n )\n parser.add_argument(\n \"--env_base_image_name\",\n type=str,\n help=\"Environment custom base image name\",\n default=\"mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu20.04\",\n )\n parser.add_argument(\n \"--conda_path\",\n type=str,\n help=\"path to conda requirements file\",\n default=\"model_factory/fridge_obj_det/mlops/environment/conda.yml\",\n )\n parser.add_argument(\n \"--env_description\", type=str, default=\"Environment created using Conda.\"\n )\n parser.add_argument(\n \"--model_name\",\n type=str,\n default=\"fridge-objects-automl-onnx\",\n help=\"The name of the registered model.\",\n )\n parser.add_argument(\n \"--model-description\",\n type=str,\n default=\"Best AutoML Object Detection ONNX model for fridge objects dataset.\",\n help=\"The description of the registered model.\",\n )\n parser.add_argument(\n \"--automl_model_name\",\n type=str,\n default=\"fasterrcnn_resnet18_fpn\"\n )\n parser.add_argument(\n \"--automl_experiment_name\",\n type=str,\n default=\"automl-fridge-objects-detection-experiment\"\n )\n parser.add_argument(\n \"--automl_compute_cluster_name\",\n type=str,\n help=\"The AML cluster name for running AutoML training experiments.\",\n default=\"gpu-cluster-v100\"\n )\n parser.add_argument(\n \"--automl_cluster_size\",\n type=str,\n help=\"AML cluster size for AutoML jobs.\",\n default=\"STANDARD_NC6S_V3\"\n )\n parser.add_argument(\n \"--automl_cluster_region\",\n type=str,\n help=\"AML cluster region for AutoML jobs.\",\n default=\"eastus2\",\n )\n parser.add_argument(\"--automl_cluster_min_instances\", type=int, default=0)\n parser.add_argument(\"--automl_cluster_max_instances\", type=int, default=4)\n parser.add_argument(\"--automl_cluster_idle_time_before_scale_down\", type=int, default=120)\n parser.add_argument(\n \"--output_file\", type=str, required=False, help=\"A file to save run id\"\n )\n\n args = parser.parse_args()\n\n prepare_and_execute(\n args.subscription_id,\n args.resource_group_name,\n args.workspace_name,\n args.cluster_name,\n args.cluster_size,\n args.cluster_region,\n args.min_instances,\n args.max_instances,\n args.idle_time_before_scale_down,\n args.env_base_image_name,\n args.conda_path,\n args.environment_name,\n args.env_description,\n args.wait_for_completion,\n args.model_name,\n args.model_description,\n args.display_name,\n args.experiment_name,\n args.deploy_environment,\n args.build_reference,\n args.automl_model_name,\n args.automl_experiment_name,\n args.automl_compute_cluster_name,\n args.automl_cluster_size,\n args.automl_cluster_region,\n args.automl_cluster_min_instances,\n args.automl_cluster_max_instances,\n args.automl_cluster_idle_time_before_scale_down,\n args.output_file,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"microsoft/mlops-model-factory-accelerator","sub_path":"telco_case_study_implementation/fridge_object_detection/model_factory/fridge_obj_det/mlops/src/mlops_pipeline.py","file_name":"mlops_pipeline.py","file_ext":"py","file_size_in_byte":18841,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"}
+{"seq_id":"25093387131","text":"def maioremenor(numeros):\n maior = 0\n menor = 999\n while True:\n num = int(input('Valor: '))\n numeros.append(num)\n stop = str(input('Continuar?'))\n if stop in 'Nn':\n break\n for i, v in enumerate(numeros):\n if i == 0:\n maior = v\n menor = v\n if v > maior:\n maior = v\n if v < menor:\n menor = v\n print(f'{numeros}')\n print(f'O maior número da lista é {maior} e o menor é {menor}')\n\nnum = list()\nmaioremenor(num)","repo_name":"danpinheiro97/indices-bioestatistica","sub_path":"pythonProject/ex99.py","file_name":"ex99.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"18151051342","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 20 16:31:48 2018\r\n\r\n@author: James\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport itertools as it\r\nimport copy\r\n\r\nrfile = np.loadtxt(\"data3.data\")\r\n#rfile = np.loadtxt(\"data_mate.txt\")\r\n#print(rfile.shape)\r\n\r\n#rfile = [[1,1,1],[1,1,2],[1,1,3],[2,2,2],[2,2,4],[3,3,2],[3,3,5],[4,4,1],[4,4,2],[4,4,4],[5,5,1],[5,5,5],[6,6,2],[6,6,5],[7,7,1],[7,7,5],[8,8,1],[8,8,2],[8,8,3],[8,8,5],[9,9,1],[9,9,2],[9,9,5]]\r\n#rfile = np.array(rfile, dtype=float)\r\n#%%\r\nclass FPTree(object):\r\n def __init__(self):\r\n self.data = None\r\n self.parent = None\r\n self.children = []\r\n self.prefix = []\r\n self.count = 1\r\n\r\nclass TableNode(object):\r\n def __init__(self):\r\n self.key = None\r\n self.value = None\r\n self.nodes = []\r\n \r\nn_rfile = rfile.shape[0]\r\nn_trans = 1000\r\nn_item = 1000\r\nminsup = 80\r\nminconf = 0.5\r\nlevel = []\r\n\r\n#data preprocessing\r\nstart =1\r\ntrans_list = []\r\ntrans_tmp = []\r\ncount_arr = np.zeros(n_item+1)\r\n\r\nfor i in range(n_rfile):\r\n if(start == rfile[i,0]):\r\n trans_tmp.append(rfile[i,2])\r\n else:\r\n if trans_tmp:\r\n trans_list.append(list(trans_tmp))\r\n trans_tmp = []\r\n start = start+1\r\n trans_tmp.append(rfile[i,2])\r\n count_arr[int(rfile[i,2])] = count_arr[int(rfile[i,2])] +1\r\n\r\ntrans_list.append(list(trans_tmp))\r\n\r\n\r\nfp_list = copy.deepcopy(trans_list)\r\n\r\n#check support, if lower, remove\r\n### considered to be function\r\ndef powerset(iterable, item):\r\n \"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)\"\r\n s = list(iterable)\r\n return it.chain.from_iterable(it.combinations(s, r) for r in range(1, item+1))\r\n\r\n\r\n#%%\r\ndict_l = []\r\ndict_r = []\r\nfor i in range(n_item+1):\r\n if count_arr[i] >= minsup:\r\n dict_l.append(i)\r\n dict_r.append(count_arr[i])\r\n\r\ntable = dict(zip(dict_l, dict_r))\r\ntable = sorted(table.items(), key=lambda x: x[1], reverse=True)\r\n\r\n#%%\r\n\r\nmap_table = copy.deepcopy(table)\r\n#to negative, in order to rid of non frequent set\r\nindexneg = 0\r\nfor i in range(len(table)):\r\n indexneg = indexneg-1\r\n a = []\r\n c = []\r\n a.append(indexneg)\r\n c.append(map_table[i][0])\r\n map_table[i] = tuple(c) + tuple(a)\r\n\r\nfor i in range(n_trans):\r\n for j in range(len(fp_list[i])):\r\n for check in range(len(table)):\r\n if(fp_list[i][j] == map_table[check][0]):\r\n fp_list[i][j] = map_table[check][1]\r\n break\r\n#%%\r\nfor i in range(n_trans):\r\n fp_list[i] = [item for item in fp_list[i] if item < 0]\r\n fp_list[i].sort(reverse=True)\r\n \r\nfor i in range(n_trans):\r\n for j in range(len(fp_list[i])):\r\n for check in range(len(table)):\r\n if(fp_list[i][j] == map_table[check][1]):\r\n fp_list[i][j] = map_table[check][0]\r\n break\r\n\r\n\r\n#%%\r\n#BUILD TREE LA\r\n\r\n#bulid table\r\nroot_table = TableNode()\r\nfor i in range(len(table)):\r\n newnode = TableNode()\r\n newnode.key = table[i][0]\r\n newnode.value = table[i][1]\r\n root_table.nodes.append(newnode)\r\n\r\n#%%\r\n#build fp tree\r\n#aaa = root_table.nodes\r\ndebuglist = []\r\n\r\nroot_fp = FPTree()\r\nfor i in range(len(fp_list)):\r\n nodept = root_fp\r\n debuglist.append(\"G\")\r\n for items in fp_list[i]:\r\n found = False\r\n for nodes_inchild in nodept.children:\r\n if nodes_inchild.data == items:\r\n nodes_inchild.count = nodes_inchild.count + 1\r\n found = True\r\n nodept = nodes_inchild\r\n debuglist.append(nodept.data)\r\n break\r\n if found == False:\r\n newnode = FPTree()\r\n newnode.data = items\r\n newnode.parent = nodept\r\n list1 = nodept.prefix\r\n list2 = [nodept.data]\r\n newnode.prefix = list(list1+list2)\r\n nodept.children.append(newnode)\r\n debuglist.append(newnode.data)\r\n for tablept in root_table.nodes:\r\n if newnode.data == tablept.key:\r\n tablept.nodes.append(newnode)\r\n break\r\n nodept = newnode\r\n \r\nprint(\"BUILD FINISH\") \r\n#%%\r\naaa = root_fp.children #debug use\r\nttt = root_table.nodes\r\n#%%\r\ndef recursive_find(firstind, tablept, level, levelnum, realprefix):\r\n tmpset = []\r\n pdset = []\r\n pdcount = []\r\n weight = []\r\n addset = []\r\n reverseprefix = []\r\n runnext = False\r\n\r\n for leaffunc in tablept.nodes:\r\n single_count = leaffunc.count\r\n runnext = False\r\n reverseprefix = list(reversed(realprefix))\r\n reverseprefix = reverseprefix[1:]\r\n for findind in reverseprefix:\r\n while leaffunc.data != findind:\r\n leaffunc = leaffunc.parent\r\n if leaffunc.data == None:\r\n runnext = True\r\n break\r\n if runnext == True:\r\n break\r\n if runnext == True:\r\n continue\r\n \r\n prefix = leaffunc.prefix\r\n prefix = list(filter(None.__ne__, prefix))\r\n tmpset.append(prefix)\r\n weight.append(single_count)\r\n \r\n pdset = np.zeros((len(tmpset), n_item+1))\r\n for i in range(len(tmpset)):\r\n pdset[i, tmpset[i]] = pdset[i, tmpset[i]] + weight[i]\r\n pdset = pd.DataFrame(pdset)\r\n pdcount = pdset.sum()\r\n pdcount = pdcount[pdcount >= minsup]\r\n\r\n if pdcount.empty:\r\n return level\r\n \r\n #recursive starts here\r\n for ind in pdcount.index:\r\n addset = list(realprefix)\r\n level[levelnum].append(list(list([ind])+addset))\r\n level = recursive_find(ind, tablept, level, levelnum+1, list(list([ind])+addset))\r\n \r\n return level\r\n\r\n#%%\r\n#generate frequent dataset\r\nroot_table.nodes = list(reversed(root_table.nodes))\r\nttt = root_table.nodes #debug\r\n\r\n\r\ntmpset = []\r\npdset = []\r\npdcount = []\r\nweight = []\r\nfor i in range(15):\r\n level.append([])\r\n\r\n\r\nfor x in table:\r\n level[0].append(x[0])\r\n \r\n \r\nfor tablept in root_table.nodes:\r\n tmpset = []\r\n pdcount = []\r\n weight = []\r\n for leaf in tablept.nodes:\r\n single_count = leaf.count\r\n prefix = leaf.prefix\r\n prefix = list(filter(None.__ne__, prefix))\r\n tmpset.append(prefix)\r\n weight.append(single_count)\r\n \r\n pdset = np.zeros((len(tmpset), n_item+1))\r\n for i in range(len(tmpset)):\r\n pdset[i, tmpset[i]] = pdset[i, tmpset[i]] + weight[i]\r\n pdset = pd.DataFrame(pdset)\r\n pdcount = pdset.sum()\r\n\r\n pdcount = pdcount[pdcount >= minsup]\r\n #recursive starts here?\r\n for ind in pdcount.index:\r\n addset = [tablept.key]\r\n level[1].append(list(list([ind])+addset))\r\n level = recursive_find(ind, tablept, level, 2, list(list([ind])+addset))\r\n \r\n \r\n\r\n#%%\r\nimport csv\r\n\r\n\r\nwith open('sup80_0.csv','w') as out:\r\n csv_out=csv.writer(out)\r\n csv_out.writerow(level[0])\r\nwith open('sup80_1.csv','w') as out:\r\n csv_out=csv.writer(out)\r\n for row in level[1]:\r\n csv_out.writerow(row)\r\nwith open('sup80_2.csv','w') as out:\r\n csv_out=csv.writer(out)\r\n for row in level[2]:\r\n csv_out.writerow(row) \r\n#%%\r\nlastlevel = 0\r\ndef search_conf(left, landr):\r\n countl = 0\r\n countall = 0\r\n for j in range(len(trans_list)):\r\n if set(left).issubset(trans_list[j]):\r\n countl = countl +1\r\n if set(landr).issubset(trans_list[j]):\r\n countall = countall +1\r\n\r\n confi = countall/countl\r\n return confi\r\n \r\nresult = []\r\n\r\nfor i in range(15):\r\n if len(level[i])==0:\r\n lastlevel = i\r\n break\r\n \r\n#calculate confidence\r\n\r\nfor i in reversed(range(1,lastlevel)):\r\n for items in level[i]:\r\n newlist = list(set(items))\r\n combin_items = list(it.chain.from_iterable(it.combinations(newlist, r) for r in range(1,i+1)))\r\n for left in combin_items:\r\n confi = search_conf(left, newlist)\r\n print(left, newlist, confi)\r\n if confi >= minconf:\r\n right = tuple(set(newlist) - set(left))\r\n result.append([left, \"->\", right, confi])\r\n \r\nwith open('result800.5.csv','w') as out:\r\n csv_out=csv.writer(out)\r\n for row in result:\r\n csv_out.writerow(row)\r\n","repo_name":"C14036227/DM_project1","sub_path":"fpgrowth.py","file_name":"fpgrowth.py","file_ext":"py","file_size_in_byte":8325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"34092453953","text":"import glob\nimport time\nimport os\n\ndef getCheckpoint(folder_path):\n \n files_D = glob.glob(folder_path.replace(\"*.pth\", \"*D*.pth\"))\n files_G = glob.glob(folder_path.replace(\"*.pth\", \"*G*.pth\"))\n file_times_D = list(map(lambda x: time.ctime(os.path.getctime(x)), files_D))\n file_times_G = list(map(lambda x: time.ctime(os.path.getctime(x)), files_G))\n files_D[sorted(range(len(file_times_D)), key=lambda x: file_times_D[x])[-1]]\n files_G[sorted(range(len(file_times_G)), key=lambda x: file_times_G[x])[-1]]\n\n return [files_D[0], files_G[0]]","repo_name":"VasilisGks/PanoDR-WebApp","sub_path":"models/PanoDR/Unet/utils/chkpnt_manage.py","file_name":"chkpnt_manage.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"}
+{"seq_id":"37360836303","text":"import os, sys\nimport tensorflow as tf\nimport numpy as np\nimport random, cv2, operator, os\n\n# configure flags\n\nFLAGS = {}\n\nFLAGS['method'] = 'WGAN-v24-cycleganD2'\nFLAGS['mode_use_debug'] = False\nFLAGS['num_exp'] = 736\nFLAGS['num_gpu'] = '4'\nFLAGS['sys_use_unix'] = True\nFLAGS['sys_is_dgx'] = True\n\nFLAGS['netD_init_method'] = 'var_scale' #var_scale, rand_uniform, rand_normal, truncated_normal\nFLAGS['netD_init_weight'] = 1e-3\nFLAGS['netD_base_learning_rate'] = 1e-5\nFLAGS['netD_base_learning_decay'] = 75\nFLAGS['netD_base_learning_decay_epoch'] = 75\nFLAGS['netD_regularization_weight'] = 0\nFLAGS['netD_times'] = 50\nFLAGS['netD_times_grow'] = 1\nFLAGS['netD_buffer_times'] = 50 #it depends on batch size\nFLAGS['netD_init_times'] = 0\nFLAGS['netG_init_method'] = 'var_scale' #var_scale, rand_uniform, rand_normal, truncated_normal\nFLAGS['netG_init_weight'] = 1e-3\nFLAGS['netG_base_learning_rate'] = 1e-5\nFLAGS['netG_base_learning_decay'] = 75\nFLAGS['netG_base_learning_decay_epoch'] = 75\nFLAGS['netG_regularization_weight'] = 0\nFLAGS['loss_source_data_term'] = 'l2' # l1, l2, PR, GD\nFLAGS['loss_source_data_term_weight'] = 1e3\nFLAGS['loss_constant_term'] = 'l2' # l1, l2, PR, GD\nFLAGS['loss_constant_term_weight'] = 1e4\nFLAGS['loss_photorealism_is_our'] = True\nFLAGS['loss_wgan_lambda'] = 10\nFLAGS['loss_wgan_lambda_grow'] = 2.0\nFLAGS['loss_wgan_lambda_ignore'] = 1\nFLAGS['loss_wgan_use_g_to_one'] = False\nFLAGS['loss_wgan_gp_times'] = 1\nFLAGS['loss_wgan_gp_use_all'] = False\nFLAGS['loss_wgan_gp_bound'] = 5e-2\nFLAGS['loss_wgan_gp_mv_decay'] = 0.99\n\nFLAGS['loss_data_term_use_local_weight'] = False\nFLAGS['loss_constant_term_use_local_weight'] = False\nFLAGS['data_csr_buffer_size'] = 1500\nFLAGS['sys_use_all_gpu_memory'] = True\nFLAGS['loss_pr'] = (FLAGS['loss_constant_term'] == 'PR' and FLAGS['loss_constant_term_weight'] > 0) or (FLAGS['loss_source_data_term'] == 'PR' and FLAGS['loss_source_data_term_weight'] > 0)\nFLAGS['loss_heavy'] = (FLAGS['loss_constant_term_weight'] > 0)\n\nFLAGS['data_augmentation_size'] = 8\nFLAGS['data_use_random_pad'] = False\nFLAGS['data_train_batch_size'] = 3\nFLAGS['load_previous_exp'] = 0\nFLAGS['load_previous_epoch'] = 0\n\nFLAGS['process_run_first_testing_epoch'] = True\nFLAGS['process_write_test_img_count'] = 498\nFLAGS['process_train_log_interval_epoch'] = 20\nFLAGS['process_test_log_interval_epoch'] = 2\nFLAGS['process_max_epoch'] = 150\n\nFLAGS['format_log_step'] = '%.3f'\nFLAGS['format_log_value'] = '{:6.4f}'\nif FLAGS['sys_use_unix']:\n FLAGS['path_char'] = '/'\n if FLAGS['sys_is_dgx']:\n FLAGS['path_data'] = '/dataset/LPGAN'\n FLAGS['path_result_root'] = '/dataset/LPGAN-Result/%03d-DGX-LPGAN'\n else:\n FLAGS['path_data'] = '/tmp3/nothinglo/dataset/LPGAN'\n FLAGS['path_result_root'] = '/tmp3/nothinglo/dataset/LPGAN-Result/%03d-DGX-LPGAN'\nelse:\n FLAGS['path_char'] = '\\\\'\n FLAGS['path_data'] = 'D:\\\\G\\\\LPGAN'\n FLAGS['path_result_root'] = 'D:\\\\LPGAN\\\\%03d-DGX-LPGAN'\n\nFLAGS['path_result'] = FLAGS['path_result_root'] % FLAGS['num_exp']\nFLAGS['load_path'] = FLAGS['path_result_root'] % FLAGS['load_previous_exp'] + FLAGS['path_char']\nFLAGS['load_model_path'] = FLAGS['load_path'] + 'model' + FLAGS['path_char'] + '%s.ckpt' % (FLAGS['format_log_step'] % FLAGS['load_previous_epoch'])\nFLAGS['load_train_loss_path'] = FLAGS['load_path'] + 'train_netG_loss' + FLAGS['path_char'] + '%s.txt' % (FLAGS['format_log_step'] % FLAGS['load_previous_epoch'])\nFLAGS['load_train_indices_input_path'] = FLAGS['load_path'] + 'train_ind_input' + FLAGS['path_char'] + '%s.txt' % (FLAGS['format_log_step'] % FLAGS['load_previous_epoch'])\nFLAGS['load_train_indices_label_path'] = FLAGS['load_path'] + 'train_ind_label' + FLAGS['path_char'] + '%s.txt' % (FLAGS['format_log_step'] % FLAGS['load_previous_epoch'])\n\nFLAGS['load_model_need'] = FLAGS['load_previous_exp'] > 0\nFLAGS['process_epoch'] = 0\nFLAGS['process_train_drop_summary_step'] = 5\nFLAGS['process_test_drop_summary_step'] = 1\nFLAGS['process_train_data_loader_count'] = (8 if FLAGS['sys_use_unix'] else 4) if FLAGS['loss_pr'] else 2\n\n# data\nFLAGS['data_input_ext'] = '.tif'\nFLAGS['data_input_dtype'] = np.uint8\nFLAGS['data_label_dtype'] = np.uint8\nFLAGS['data_compute_dtype'] = np.float32\nFLAGS['data_image_size'] = 512\nFLAGS['data_image_channel'] = 3\nFLAGS['process_random_seed'] = 2\nFLAGS['process_load_test_batch_capacity'] = (8 if FLAGS['sys_use_unix'] else 4) if FLAGS['loss_pr'] else 32\nFLAGS['process_load_train_batch_capacity'] = (16 if FLAGS['sys_use_unix'] else 8) if FLAGS['loss_pr'] else 64\n\n# net\nFLAGS['net_gradient_clip_value'] = 1e8\n\n# input\nFLAGS['folder_input'] = FLAGS['path_data'] + FLAGS['path_char'] + 'input' + FLAGS['path_char']\nFLAGS['folder_label'] = FLAGS['path_data'] + FLAGS['path_char'] + 'label' + FLAGS['path_char']\nFLAGS['folder_label_HDR'] = FLAGS['path_data'] + FLAGS['path_char'] + 'label_HDR' + FLAGS['path_char']\n\nFLAGS['folder_csrs'] = FLAGS['path_data'] + FLAGS['path_char'] + 'csrs' + FLAGS['path_char']\nFLAGS['folder_csrs_rgb'] = FLAGS['path_data'] + FLAGS['path_char'] + 'csrs_rgb' + FLAGS['path_char']\nFLAGS['txt_test'] = FLAGS['path_data'] + FLAGS['path_char'] + 'test.txt'\nFLAGS['txt_train_input'] = FLAGS['path_data'] + FLAGS['path_char'] + 'train_input.txt'\nFLAGS['txt_train_label'] = FLAGS['path_data'] + FLAGS['path_char'] + 'train_label.txt'\nif FLAGS['sys_use_unix']:\n FLAGS['folder_test_csrs'] = FLAGS['folder_csrs']\nelse:\n FLAGS['folder_test_csrs'] = FLAGS['path_data'] + FLAGS['path_char'] + 'test_csrs' + FLAGS['path_char']\n\n# output\nFLAGS['folder_model'] = FLAGS['path_result'] + FLAGS['path_char'] + 'model' + FLAGS['path_char']\nFLAGS['folder_log'] = FLAGS['path_result'] + FLAGS['path_char'] + 'log' + FLAGS['path_char']\nFLAGS['folder_weight'] = FLAGS['path_result'] + FLAGS['path_char'] + 'weight' + FLAGS['path_char']\nFLAGS['folder_test_img'] = FLAGS['path_result'] + FLAGS['path_char'] + 'test_img' + FLAGS['path_char']\nFLAGS['folder_train_ind_input'] = FLAGS['path_result'] + FLAGS['path_char'] + 'train_ind_input' + FLAGS['path_char']\nFLAGS['folder_train_ind_label'] = FLAGS['path_result'] + FLAGS['path_char'] + 'train_ind_label' + FLAGS['path_char']\n\nFLAGS['folder_test_netG_loss'] = FLAGS['path_result'] + FLAGS['path_char'] + 'test_netG_loss' + FLAGS['path_char']\nFLAGS['folder_test_netG_psnr1'] = FLAGS['path_result'] + FLAGS['path_char'] + 'test_netG_psnr1' + FLAGS['path_char']\nFLAGS['folder_test_netG_psnr2'] = FLAGS['path_result'] + FLAGS['path_char'] + 'test_netG_psnr2' + FLAGS['path_char']\nFLAGS['folder_train_netG_loss'] = FLAGS['path_result'] + FLAGS['path_char'] + 'train_netG_loss' + FLAGS['path_char']\n\nFLAGS['netG_mat'] = FLAGS['path_result'] + FLAGS['path_char'] + '%03d-netG.mat' % FLAGS['num_exp']\nFLAGS['netD_mat'] = FLAGS['path_result'] + FLAGS['path_char'] + '%03d-netD.mat' % FLAGS['num_exp']\nFLAGS['txt_log'] = FLAGS['path_result'] + FLAGS['path_char'] + '%03d-log.txt' % FLAGS['num_exp']\n\n# Loss 및 다양한 측정 지표들을 정의한 함수들\n\n# Generator의 loss 측정에 사용되는 photorealism loss\n\ndef tf_photorealism_loss(img, df, i, is_our):\n rec_t = df.rect[i]\n img_t = img[i, rec_t[0]:rec_t[1], rec_t[2]:rec_t[3], :]\n img_t = tf.image.rot90(img_t, 4 - tf.floordiv(df.rot[i], 2))\n img_t = tf.cond(tf.equal(tf.mod(df.rot[i], 2), 0), lambda: img_t, lambda: tf.image.flip_left_right(img_t))\n img_t = tf.transpose(img_t, [1, 0, 2])\n img_r = tf.reshape(img_t, [-1, 3])\n h = rec_t[1] - rec_t[0]\n w = rec_t[3] - rec_t[2]\n k = tf.cast((h - 2) * (w - 2), tf.float32)\n if is_our:\n epsilon1 = 1\n e = tf.constant(np.sqrt(epsilon1), dtype=tf.float32, shape=[1, 3])\n img_r = tf.concat(0, [img_r, e])\n mat_t_r = df.csr_mat_r[i]\n mat_t_g = df.csr_mat_g[i]\n mat_t_b = df.csr_mat_b[i]\n img_r_b, img_r_g, img_r_r = tf.split(1, 3, img_r)\n d_mat_r = tf.sparse_tensor_dense_matmul(mat_t_r, img_r_r)\n d_mat_g = tf.sparse_tensor_dense_matmul(mat_t_g, img_r_g)\n d_mat_b = tf.sparse_tensor_dense_matmul(mat_t_b, img_r_b)\n result_r = tf.reduce_sum(img_r_r * d_mat_r)\n result_g = tf.reduce_sum(img_r_g * d_mat_g)\n result_b = tf.reduce_sum(img_r_b * d_mat_b)\n result = tf.reduce_mean(tf.pack([result_r, result_b, result_g])) / k\n else:\n mat_t = df.csr_mat[i]\n d_mat = tf.sparse_tensor_dense_matmul(mat_t, img_r)\n result = tf.reduce_sum(img_r * d_mat) / (k * 3)\n return result\n\ndef tf_imgradient(tensor):\n B, G, R = tf.unpack(tensor, axis=-1)\n tensor = tf.pack([R, G, B], axis=-1)\n tensor = tf.image.rgb_to_grayscale(tensor)\n #tensor = tensor * 255;\n sobel_x = tf.constant([[1, 0, -1], [2, 0, -2], [1, 0, -1]], tf.float32)\n sobel_x_filter = tf.reshape(sobel_x, [3, 3, 1, 1])\n sobel_y_filter = tf.transpose(sobel_x_filter, [1, 0, 2, 3])\n #tensor = tf.pad(tensor, [[0, 0], [1, 1], [1, 1], [0, 0]], 'SYMMETRIC')\n fx = tf.nn.conv2d(tensor, sobel_x_filter, strides=[1,1,1,1], padding='VALID')\n fy = tf.nn.conv2d(tensor, sobel_y_filter, strides=[1,1,1,1], padding='VALID')\n g = tf.sqrt(tf.square(fx) + tf.square(fy))\n return g\n\ndef img_L2_loss(img1, img2, use_local_weight):\n if use_local_weight:\n w = -tf.log(tf.cast(img2, tf.float64) + tf.exp(tf.constant(-99, dtype=tf.float64))) + 1\n w = tf.cast(w * w, tf.float32)\n return tf.reduce_mean(w * tf.square(tf.sub(img1, img2)))\n else:\n return tf.reduce_mean(tf.square(tf.sub(img1, img2)))\n\ndef img_L1_loss(img1, img2):\n return tf.reduce_mean(tf.abs(tf.sub(img1, img2)))\n\ndef img_GD_loss(img1, img2):\n img1 = tf_imgradient(tf.pack([img1]))\n img2 = tf_imgradient(tf.pack([img2]))\n return tf.reduce_mean(tf.square(tf.sub(img1, img2)))\n\ndef regularization_cost(net_info):\n cost = 0\n for w, p in zip(net_info.weights, net_info.parameter_names):\n if p[-2:] == \"_w\":\n cost = cost + (tf.nn.l2_loss(w))\n return cost\n\ndef flatten_list(xs):\n result = []\n if isinstance(xs, (list, tuple)):\n for x in xs:\n result.extend(flatten_list(x))\n else:\n result.append(xs)\n return result","repo_name":"rinha7/DPE_test","sub_path":"dpe_sumin/data_init.py","file_name":"data_init.py","file_ext":"py","file_size_in_byte":10152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"33484029746","text":"\"\"\"\n Singly Linked list data structure\n\"\"\"\n\nclass Node:\n \n def __init__(self, value = None, next = None) -> None:\n self.value = value\n self.next = next\n\nclass LinkedList:\n\n def __init__(self) -> None:\n self.head = None\n self.length = 0\n\n def append(self, value: int) -> None:\n if not self.head:\n self.head = Node(value)\n self.length += 1\n return\n \n temp: Node = self.head\n while temp.next != None:\n temp = temp.next\n temp.next = Node(value)\n self.length += 1\n \n def prepend(self, value: int) -> None:\n if not self.head:\n self.head = Node(value)\n self.length += 1\n return\n self.head = Node(value, next=self.head)\n self.length += 1\n \n def insert(self, index: int, value: int) -> None:\n if index < 0 or index > self.length:\n raise \"index out of range\"\n if not self.head:\n self.head = Node(value)\n self.length += 1\n return\n if not index:\n self.prepend(value)\n return\n if index == self.length - 1:\n self.append(value)\n return\n temp: Node = self.head\n count: int = 0\n while count < index - 1:\n temp = temp.next\n count += 1\n temp.next = Node(value, next=temp.next)\n self.length += 1\n\n def pop(self, index = None) -> None:\n if not self.head:\n raise \"linked list is empty\"\n if index and (index < 0 or index > self.length):\n raise \"index out of range\"\n if self.length == 1:\n temp = self.head\n self.head = None\n self.length -= 1 \n return\n if index == 0:\n self.head = self.head.next\n self.length -= 1\n return\n if not index:\n temp: Node = self.head\n while temp.next != None:\n prev = temp\n temp = temp.next\n prev.next = None\n self.length -= 1\n return\n count: int = 0\n temp: Node = self.head\n while count < index - 1:\n temp = temp.next\n count += 1\n temp.next = temp.next.next\n self.length -= 1\n \n def traverse(self) -> None:\n temp: Node = self.head\n print(\"HEAD -> \", end='')\n while temp.next != None:\n print(temp.value, end =\" -> \")\n temp = temp.next\n print(temp.value)\n\n\nl = LinkedList()\nl.append(1)\nl.append(2)\nl.append(3)\nl.prepend(4)\nl.insert(0, 10)\nl.insert(4, 20)\nl.insert(1, 30)\nl.insert(5, 40)\nl.pop()\nl.pop(0)\nl.pop(5)\nl.pop(2)\nprint(l.length)\nl.traverse() # HEAD -> 30 -> 4 -> 2 -> 40","repo_name":"KISHOREGOWTHAM123/DSA","sub_path":"linked_list/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"3640852405","text":"# 445. Add Two Numbers II (Bloomberg, Microsoft)\n# You are given two linked lists representing two non-negative numbers. The most significant digit comes first and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.\n# \n# You may assume the two numbers do not contain any leading zero, except the number 0 itself.\n# \n# Follow up:\n# What if you cannot modify the input lists? In other words, reversing the lists is not allowed.\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n \n # store the references of each node into array\n arr1 = []\n arr2 = []\n cursor = l1\n while(cursor):\n arr1.append(cursor.val)\n cursor = cursor.next\n cursor = l2\n while(cursor):\n arr2.append(cursor.val)\n cursor = cursor.next\n \n result = None\n carry = 0\n while arr1 or arr2:\n sum = carry\n if arr1: sum += arr1.pop()\n if arr2: sum += arr2.pop()\n carry, val = divmod(sum,10)\n # print carry, val\n digit = ListNode(val)\n digit.next = result\n result = digit\n\n if carry>0:\n digit = ListNode(1)\n digit.next = result\n result = digit\n \n return result","repo_name":"PhiphyZhou/Coding-Interview-Practice-Python","sub_path":"LeetCode/p445-add2Nums.py","file_name":"p445-add2Nums.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"15259895836","text":"# -*- coding: utf-8 -*-\n\"\"\"Calculate the annual energy production\n\"\"\"\nimport re\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom _loads_utils import load_stats\n\n\nstat_dir = 'C:/Users/Mathieu Pellé/Documents/GitHub/LAC_RotorDesign/Loads/res_turb/' # results directory with statistics files !!! END WITH SLASH !!!\nv_ref = 37.5 # reference wind speed based on wind class (I=50, 2=42.5, 3=37.5)\ni_wind = 15 # channel number with the wind speed\ni_pow = 100 # channel number for electrical power\n\n# dictionary to map .sel index to ylabel for the plot\nylabels = {4: 'Pitch angle [deg]',\n 10: 'Rotor speed [rad/s]',\n 13: 'Thrust [kN]',\n 15: 'Wind speed [m/s]',\n 17: 'Tower-base FA [kNm]',\n 18: 'Tower-base SS [kNm]',\n 20: 'Yaw-bearing pitch [kNm]',\n 22: 'Yaw-bearing roll [kNm]',\n 25: 'Shaft torsion [kNm]',\n 26: 'OoP BRM [kNm]',\n 27: 'IP BRM [kNm]',\n 70: 'Generator torque [Nm]',\n 100: 'Electrical power [W]',\n 108: 'Tower clearance [m]'}\n\n# load the mean statistics for wind speed and power\nstat_file = stat_dir + 'stats_mean.txt'\nfiles, idxs, data = load_stats(stat_file)\nwind = data[:, idxs == i_wind].squeeze()\npower = data[:, idxs == i_pow].squeeze()\n\n# extract the set wind speed value from the filename using regex tricks\nwsps = [float(re.findall('[0-9]{1,2}[.][0-9]', f)[0]) for f in files]\n\n# calculate the average power in a wind speed bin\nwsp_unique = np.unique(wsps)\ndelta_v = wsp_unique[1] - wsp_unique[0]\npows = np.empty(wsp_unique.size) # mean power at each wind speed\nfor j, vj in enumerate(wsp_unique):\n # isolate the dels from each simulation\n wsp_pows = power[np.isclose(wsps, vj)] # powers for that wind speed\n p = 1/wsp_pows.size # probability of each simulation in the wsp bin is equal 1/nsim\n pows[j] = sum(p * wsp_pows) # this is actually just a mean, really\n\n\n# calculate the annual energy production\nv_ave = 0.2*v_ref # v_ave=0.2*vref\nhrs_per_year = 365 * 24 # hours per year\ndvj = wsp_unique[1] - wsp_unique[0] # assuming even bins!\nprobs = (np.exp(-np.pi*((wsp_unique - dvj/2) / (2*v_ave))**2)\n - np.exp(-np.pi*((wsp_unique + dvj/2) / (2*v_ave))**2)) # prob of wind in each bin\naep = hrs_per_year * sum(probs * pows) # sum weighted power and convert to AEP (Wh)\nprint(f'The AEP is: {aep/(1e6):.1f} MWh')\n\n# make the plot\nfig, ax1 = plt.subplots(1, 1, num=1, figsize=(7, 3), clear=True)\nplt.plot(wind, power, 'o', zorder=10) # 10-min means\nplt.plot(wsp_unique, pows, 'or', mec='0.2', ms=7, alpha=0.9, zorder=11) # bin-average\nplt.grid('on')\nplt.xlabel('Wind speed [m/s]')\nplt.ylabel(ylabels[i_pow])\n# bar plot with probabilities\nax2 = ax1.twinx() # new axis with shared x\nax2.bar(wsp_unique, probs, facecolor='0.8', edgecolor='0.4', alpha=0.7, zorder=-2)\nax2.set_yticks([])\nax1.set_zorder(1) # magic to put bars under power\nax1.patch.set_visible(False) # prevent ax1 from hiding ax2\nplt.tight_layout()\n","repo_name":"GonMazzini/LAC_RotorDesign","sub_path":"Loads/calculate_aep.py","file_name":"calculate_aep.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"}
+{"seq_id":"27147366220","text":"import logging\nimport json\nimport sys\nimport os\nimport time , hmac, hashlib, base64\nfrom websocket import create_connection\nfrom requests.auth import AuthBase\nimport requests\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\nfrom core.libraries.pub_sub import Publisher, Subscriber\nfrom core.libraries.websocket_thread import ConnectThread\nfrom core.libraries.channels import channels as ch\nfrom core.libraries.gdax_auth import Authentication\n\n\nclass GDAXWebSocketClient():\n\n def __init__(self,data,channels=[]):\n self.url = \"wss://ws-feed.gdax.com\"\n\n self.params = json.dumps(data)\n self.pub = Publisher(channels)\n self.stop = False\n\n for c in channels:\n channel = Subscriber(c)\n channel.pub = Publisher(events=['incoming_data'])\n self.pub.register(c, channel)\n\n def on_message(self, message):\n if message['type'] == \"match\":\n self.pub.dispatch(message['product_id'], message)\n\n def on_open(self):\n print(\"--Subscribed--\")\n\n def on_error(self, err):\n\n self.stop = True\n print('{}'.format(err))\n\n def connect(self):\n self.on_open()\n self.ws = create_connection(self.url)\n self.ws.send(self.params)\n self.listen()\n\n def listen(self):\n while not self.stop:\n try:\n if int(time.time()%30) == 0:\n self.ws.ping(\"alive\")\n\n msg = json.loads(self.ws.recv())\n self.on_message(msg)\n except ValueError as e:\n self.on_error(e)\n except Exception as e:\n self.on_error(e)\n\ndef main():\n pairs=[\"ETH-USD\",\"BTC-USD\"]\n\n API_KEY = \"\"\n API_SECRET = \"\"\n API_PASS = \"\"\n\n auth=Authentication(API_KEY, API_SECRET, API_PASS)\n request = {\"type\": \"subscribe\",\n \"channels\": [{\"name\": \"full\", \"product_ids\": pairs }]}\n res = requests.get('https://api.gdax.com/'+ 'accounts', auth=auth)\n #test page example\n print(res.json())\n\n\n ws=GDAXWebSocketClient(request,pairs)\n ws.connect()\nif __name__==\"__main__\":\n main()\n","repo_name":"pauljherrera/cryptotrader","sub_path":"core/GDAX_data_feeder.py","file_name":"GDAX_data_feeder.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"}
+{"seq_id":"35979163597","text":"from plyer import notification\nimport requests\nfrom datetime import datetime\n# from bs4 import BeautifulSoup\nimport json\nfrom datetime import date\nimport time \n\ndef notifyme(title,message):\n notification.notify(\n title = title,\n message = message,\n app_icon = \"C:\\\\Users\\\\yashk\\\\Videos\\\\work\\\\CoronaVisrus Notification System\\\\icon.ico\",\n timeout = 20\n )\n\ndef getData(url):\n r = requests.get(url)\n return r.json()\n\nif __name__ == \"__main__\":\n while True:\n myJSONdata = getData(\"https://api.covid19india.org/states_daily.json\")\n datetime = datetime.now()\n today = date.today()\n today= str(today)\n print(type(today))\n for i in myJSONdata.get(\"states_daily\")[-3:]:\n # print(i.get('date'))\n # print(\"\\ntoday date is : \",today) \n # print('\\n')\n # print('\\n')\n # if i.get('date')==\"07-Aug-20\":\n date = i.get('date')\n if i.get('status')==\"Recovered\":\n Recovered = i.get('gj')\n if i.get('status')==\"Confirmed\":\n Confirmed = i.get('gj')\n if i.get('status')==\"Deceased\":\n Deceased = i.get('gj')\n final_string = f\"Confirmed : {Confirmed}\\nRecovered : {Recovered}\\nDeceased : {Deceased}\\nDate : {today}\"\n\n notifyme(\"State : Gujrat\",f\"Last Update : {date}\\n\"+final_string)\n time.sleep(3600)","repo_name":"Yash-Patel01/Coronavirus-Cases-Notification-System","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
+{"seq_id":"33156178002","text":"from skimage import io\nimport numpy as np\n\ndef read_3d_points(rgbpath, depthpath, Rtilt, K):\n \"\"\"\n a python implementation of SUNRGBDTOOL read3dPoints.m\n Rtilt: (3, 3)\n K: (3, 3)\n \"\"\"\n depth_vis = io.imread(depthpath)\n valid = (depth_vis != 0).ravel()\n\n depth = (depth_vis >> 3) | (depth_vis << 13)\n depth = depth.astype(np.float32) / 1000\n depth[depthpath > 8] = 8\n height = depth.shape[0]\n width = depth.shape[1]\n\n cx, cy = K[0, 2], K[1, 2]\n fx, fy = K[0, 0], K[1, 1]\n\n x, y = np.meshgrid(np.arange(width), np.arange(height))\n x3 = (x - cx) * depth / fx\n y3 = (y - cy) * depth / fy\n z3 = depth\n\n points = np.stack([x3.ravel(), z3.ravel(), -y3.ravel()], 1)\n points = points[valid]\n\n rgb = io.imread(rgbpath)\n rgb = rgb.astype(np.float32).reshape(-1, 3)[valid] / 255\n points = np.matmul(Rtilt, points.T).T\n\n points_rgb = np.concatenate([points, rgb], 1)\n\n return points_rgb\n","repo_name":"Gorilla-Lab-SCUT/frustum-convnet","sub_path":"sunrgbd/read_3d_points.py","file_name":"read_3d_points.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":236,"dataset":"github-code","pt":"76"}
+{"seq_id":"36542844966","text":"#+ =====================================================================================================================\n#+\n#+ 8 kyu - Thinkful - Number Drills: Pixelart planning [ ID: 58630e2ae88af44d2b0000ea ] (thinkful-number-drills-pixelart-planning)\n#+ URL: https://www.codewars.com/kata/58630e2ae88af44d2b0000ea\n#+ Category: REFERENCE | Tags: FUNDAMENTALS\n#+\n#+ =====================================================================================================================\n\nimport codewars_test as test\nfrom thinkful_number_drills_pixelart_planning import is_divisible\n\n\ntest.describe(\"Basic tests\")\ntest.assert_equals(is_divisible(4050, 27), True)\ntest.assert_equals(is_divisible(4066, 27), False)\ntest.assert_equals(is_divisible(10000, 20), True)\ntest.assert_equals(is_divisible(10005, 20), False)\n\ntest.describe(\"Random tests\")\n\nimport random\n\ndef my_is_divisible(wall_length, pixel_size):\n return wall_length % pixel_size == 0\n \nfor x in range(150):\n wall_length = random.randint(500, 11000)\n pixel_size = random.randint(2, 29)\n test.assert_equals(\n is_divisible(wall_length, pixel_size),\n my_is_divisible(wall_length, pixel_size)\n ) \n","repo_name":"jdold07/codewars-solutions","sub_path":"kata-8-kyu/thinkful-number-drills-pixelart-planning/python/thinkful_number_drills_pixelart_planning_test.py","file_name":"thinkful_number_drills_pixelart_planning_test.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}